ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
1,100 | cpp | tensorflow/tensorflow | functionalize_cond | tensorflow/compiler/tf2xla/functionalize_cond.cc | tensorflow/compiler/tf2xla/functionalize_cond_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_FUNCTIONALIZE_COND_H_
#define TENSORFLOW_COMPILER_TF2XLA_FUNCTIONALIZE_COND_H_
#include <deque>
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
Status FunctionalizeCond(Graph* graph, FunctionLibraryDefinition* library,
const NodeFilter& node_filter = {});
namespace functionalize_cond {
enum class BranchType {
kElseBranch = 0,
kThenBranch = 1,
kBoth = 2,
kNeither = 3,
};
struct AncestorNode {
enum class AncestorNodeType {
kPred = 0,
kSwitch = 1,
kMerge = 2,
};
OutputTensor output_tensor;
AncestorNodeType type;
bool operator<(const AncestorNode& other) const;
bool operator==(const AncestorNode& other) const;
struct Hash {
size_t operator()(const AncestorNode&) const;
};
};
class StateMap {
public:
explicit StateMap(Graph* graph);
struct OutputTensorLess {
bool operator()(const OutputTensor& lhs, const OutputTensor& rhs) const;
};
using CondState = std::map<OutputTensor, BranchType, OutputTensorLess>;
using CondId = const CondState*;
using AncestorState = std::set<AncestorNode>;
using AncestorId = const AncestorState*;
CondId LookupCondId(const Node* node) const;
CondId GetCondId(const CondState& state);
void ResetCondId(const Node* node, CondId id);
AncestorId LookupAncestorId(const Node* node) const;
AncestorId GetAncestorId(const AncestorState& state);
void ResetAncestorId(const Node* node, AncestorId id);
void MarkDead(const Node* node);
BranchType FindBranchOf(CondId id, OutputTensor predicate) const;
string CondStateToString(const Node* node) const;
string CondStateToString(CondId id) const;
string AncestorStateToString(const Node* node) const;
bool IsDead(CondId id) const;
bool IsEmpty(CondId id) const;
private:
struct Hash {
size_t operator()(const CondState& map) const;
size_t operator()(const AncestorState& map) const;
};
std::unordered_set<CondState, Hash> condstate_set_;
std::vector<CondId> node_to_condid_map_;
std::unordered_map<int, CondId> added_node_condid_mapping_;
std::unordered_set<AncestorState, Hash> ancestorstate_set_;
std::vector<AncestorId> node_to_ancestorid_map_;
std::unordered_map<int, AncestorId> added_node_ancestorid_mapping_;
CondId dead_id_;
};
class FunctionalizeCond {
public:
static Status Functionalize(Graph* graph, FunctionLibraryDefinition* library,
const NodeFilter& node_filter);
Status AddIdentityNode(const Node* replacee, Node* if_node, int port);
absl::StatusOr<Node*> AddIfNode(const NodeDef& def, const Node* replacee,
const OutputTensor& predicate);
Status PropagateUpdatedState(const Node* replacee);
void DumpGraphWithCondState(const string& name);
void AddSwitchId(int switch_id);
private:
FunctionalizeCond(Graph* graph, FunctionLibraryDefinition* library,
const NodeFilter& node_filter);
Status FunctionalizeInternal();
StateMap::CondId StateAlongEdge(const Edge* e);
Status DetermineStates(std::vector<Node*> rev_topo_order);
Status DetermineCondState(Node* dst) {
if (IsMerge(dst)) return DetermineCondStateMerge(dst);
return DetermineCondStateNonMerge(dst);
}
Status DetermineCondStateNonMerge(Node* dst);
Status DetermineCondStateMerge(Node* dst);
absl::StatusOr<StateMap::CondId> JoinCondStatesMerge(Node* merge,
StateMap::CondId src,
StateMap::CondId dst);
absl::StatusOr<StateMap::CondId> JoinCondStatesNonMerge(StateMap::CondId src,
StateMap::CondId dst);
Status DetermineAncestorState(Node* dst);
Status RemoveRedundantMerge(Node* node);
Status RemoveRedundantSwitch(Node* node);
void SortMergeNodes(std::vector<Node*>* merge_order);
void DeleteReachableAndDeadNodes(const std::vector<Node*>& merge_order);
StateMap state_map_;
std::unordered_map<Node*, OutputTensor> merge_to_predicate_;
std::unordered_map<Node*, OutputTensor> merge_to_replacement_;
FunctionLibraryDefinition* library_;
Graph* graph_;
friend class FunctionalizeCondTest;
std::vector<int> switch_ids_;
NodeFilter node_filter_ = {};
};
}
}
#endif
#include "tensorflow/compiler/tf2xla/functionalize_cond.h"
#include <algorithm>
#include <deque>
#include <stack>
#include <unordered_set>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/tf2xla/frontend_attributes_util.h"
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/union_find.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace functionalize_cond {
bool AncestorNode::operator<(const AncestorNode& other) const {
return (output_tensor.node->id() < other.output_tensor.node->id()) ||
(output_tensor.node->id() == other.output_tensor.node->id() &&
output_tensor.index < other.output_tensor.index) ||
(output_tensor.node->id() == other.output_tensor.node->id() &&
output_tensor.index == other.output_tensor.index &&
type < other.type);
}
bool AncestorNode::operator==(const AncestorNode& other) const {
return output_tensor.node->id() == other.output_tensor.node->id() &&
output_tensor.index == other.output_tensor.index && type == other.type;
}
size_t AncestorNode::Hash::operator()(const AncestorNode& ancestor) const {
size_t h = std::hash<int>()(ancestor.output_tensor.node->id());
h = Hash64Combine(h, std::hash<int>()(ancestor.output_tensor.index));
return Hash64Combine(h, std::hash<int>()(static_cast<int>(ancestor.type)));
}
typedef std::tuple<StateMap::CondId, StateMap::AncestorId, OutputTensor>
ClusterTuple;
struct ClusterTupleLessThan {
bool operator()(const ClusterTuple& a, const ClusterTuple& b) const {
if (std::tie(std::get<0>(a), std::get<1>(a)) <
std::tie(std::get<0>(b), std::get<1>(b))) {
return true;
} else if (std::tie(std::get<0>(a), std::get<1>(a)) ==
std::tie(std::get<0>(b), std::get<1>(b))) {
return StateMap::OutputTensorLess()(std::get<2>(a), std::get<2>(b));
} else {
return false;
}
}
};
string DebugString(const OutputTensor& tensor) {
return absl::StrCat(tensor.node->name(), ":", tensor.index);
}
string Branch_Name(BranchType b) {
switch (b) {
case BranchType::kElseBranch:
return "else";
case BranchType::kThenBranch:
return "then";
case BranchType::kBoth:
return "both";
case BranchType::kNeither:
return "neither";
}
}
string DebugString(StateMap::CondId cond_state) {
if (cond_state == nullptr || cond_state->empty()) return "{}";
using value_type = StateMap::CondState::value_type;
return absl::StrCat(
"{",
absl::StrJoin(*cond_state, ", ",
[](string* output, const value_type& pred_branch) {
const OutputTensor& pred = pred_branch.first;
const BranchType& branch = pred_branch.second;
if (branch == BranchType::kNeither)
absl::StrAppend(output, "d");
else
absl::StrAppend(output, "s(", DebugString(pred), ",",
Branch_Name(branch), ")");
}),
"}");
}
Status GetSwitchPredicate(const Node& switch_node, OutputTensor* pred) {
const Edge* pred_edge;
TF_RETURN_IF_ERROR(switch_node.input_edge(1, &pred_edge));
while (pred_edge->src()->IsIdentity()) {
TF_RETURN_IF_ERROR(pred_edge->src()->input_edge(0, &pred_edge));
}
*pred = OutputTensor(pred_edge->src(), pred_edge->src_output());
return absl::OkStatus();
}
Status GetSwitchValue(const Node& switch_node, OutputTensor* val) {
const Edge* val_edge;
TF_RETURN_IF_ERROR(switch_node.input_edge(0, &val_edge));
*val = OutputTensor(val_edge->src(), val_edge->src_output());
return absl::OkStatus();
}
bool StateMap::OutputTensorLess::operator()(const OutputTensor& lhs,
const OutputTensor& rhs) const {
return (lhs.node->id() < rhs.node->id()) ||
(lhs.node->id() == rhs.node->id() && lhs.index < rhs.index);
}
struct CondStateLess {
bool operator()(const StateMap::CondState::value_type& lhs,
const StateMap::CondState::value_type& rhs) const {
if (StateMap::OutputTensorLess().operator()(lhs.first, rhs.first))
return true;
if (lhs.first.node->id() == rhs.first.node->id() &&
lhs.first.index == rhs.first.index)
return lhs.second < rhs.second;
return false;
}
};
StateMap::StateMap(Graph* graph) {
node_to_condid_map_.resize(graph->num_node_ids());
node_to_ancestorid_map_.resize(graph->num_node_ids());
dead_id_ = GetCondId(
{std::make_pair(OutputTensor(nullptr, -1), BranchType::kNeither)});
}
bool StateMap::IsDead(StateMap::CondId id) const { return id == dead_id_; }
bool StateMap::IsEmpty(StateMap::CondId id) const { return id == nullptr; }
size_t StateMap::Hash::operator()(const StateMap::CondState& map) const {
if (map.empty()) return 0;
auto it = map.begin();
size_t h = Hash64Combine(OutputTensor::Hash()(it->first),
hash<BranchType>()(it->second));
for (++it; it != map.end(); ++it) {
h = Hash64Combine(h, Hash64Combine(OutputTensor::Hash()(it->first),
hash<BranchType>()(it->second)));
}
return h;
}
size_t StateMap::Hash::operator()(const StateMap::AncestorState& map) const {
if (map.empty()) return 0;
auto it = map.begin();
size_t h = AncestorNode::Hash()(*it);
for (++it; it != map.end(); ++it) {
h = Hash64Combine(h, AncestorNode::Hash()(*it));
}
return h;
}
struct CondArgNode {
explicit CondArgNode(Node* src, int src_output)
: src(src), src_output(src_output) {}
string ToString() const {
return absl::StrCat("src=", src->name(), ":", src_output,
" switches=", NodesToString(switches));
}
Node* src;
int src_output;
std::array<Node*, 2> branch_copy;
std::vector<Node*> switches;
};
using CondArgNodes = std::vector<CondArgNode>;
string DebugString(const CondArgNodes& nodes) {
return absl::StrCat(
"[",
absl::StrJoin(nodes, ", ",
[](string* output, const CondArgNode& node) {
absl::StrAppend(output, node.ToString());
}),
"]");
}
StateMap::CondId StateMap::LookupCondId(const Node* node) const {
const int64_t map_size = node_to_condid_map_.size();
if (node->id() < map_size) return node_to_condid_map_[node->id()];
return added_node_condid_mapping_.at(node->id());
}
StateMap::CondId StateMap::GetCondId(const StateMap::CondState& state) {
if (state.empty()) return nullptr;
return &*condstate_set_.insert(state).first;
}
void StateMap::ResetCondId(const Node* node, StateMap::CondId id) {
const int64_t map_size = node_to_condid_map_.size();
if (node->id() < map_size)
node_to_condid_map_[node->id()] = id;
else
added_node_condid_mapping_[node->id()] = id;
}
StateMap::AncestorId StateMap::LookupAncestorId(const Node* node) const {
const int64_t map_size = node_to_ancestorid_map_.size();
if (node->id() < map_size) return node_to_ancestorid_map_[node->id()];
return added_node_ancestorid_mapping_.at(node->id());
}
StateMap::AncestorId StateMap::GetAncestorId(
const StateMap::AncestorState& state) {
if (state.empty()) return nullptr;
return &*ancestorstate_set_.insert(state).first;
}
void StateMap::ResetAncestorId(const Node* node, StateMap::AncestorId id) {
const int64_t map_size = node_to_ancestorid_map_.size();
if (node->id() < map_size)
node_to_ancestorid_map_[node->id()] = id;
else
added_node_ancestorid_mapping_[node->id()] = id;
}
void StateMap::MarkDead(const Node* node) { ResetCondId(node, dead_id_); }
string StateMap::CondStateToString(const Node* node) const {
return CondStateToString(LookupCondId(node));
}
string StateMap::CondStateToString(StateMap::CondId id) const {
return DebugString(id);
}
string StateMap::AncestorStateToString(const Node* node) const {
if (auto id = LookupAncestorId(node)) {
return absl::StrCat(
"{",
absl::StrJoin(*id, ",",
[](string* output, const AncestorNode& ancestor) {
absl::StrAppend(output,
ancestor.output_tensor.node->name(),
":", ancestor.output_tensor.index);
}),
"}");
}
return "{}";
}
FunctionalizeCond::FunctionalizeCond(Graph* graph,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter)
: state_map_(graph),
library_(library),
graph_(graph),
node_filter_(node_filter) {}
class Conditional {
public:
Conditional(OutputTensor predicate, FunctionalizeCond* parent,
StateMap* cond_state_map, const ShapeRefiner& refiner);
Status AddMerge(Node* m);
Status BuildAndReplace(
Graph* graph, FunctionLibraryDefinition* library,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement);
private:
Status ExtractBodies(Graph* graph);
Status BuildArgumentNodes();
Status BuildIfNode(Graph* graph, FunctionLibraryDefinition* library);
Status AddInputEdges(
Graph* graph,
const std::unordered_map<Node*, OutputTensor>& merge_to_replacement);
Status AddOutputEdges(
Graph* graph,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement);
Status AddSwitch(Node* s);
Status AddSwitchNodeAlongEdge(const Edge* edge, BranchType branch,
Graph* graph);
string name() const;
FunctionalizeCond* parent_;
StateMap* state_map_;
OutputTensor predicate_;
const ShapeRefiner& refiner_;
OutputTensor switch_predicate_;
std::set<Node*, NodeCmpByNameResourcesLast> switches_;
std::set<Node*, NodeCmpByNameResourcesLast> merges_;
std::vector<Node*> external_control_inputs_;
std::vector<Node*> external_control_outputs_;
std::array<std::unique_ptr<Graph>, 2> bodies_;
std::array<std::vector<Node*>, 2> node_maps_;
CondArgNodes cond_arg_nodes_;
Node* if_node_ = nullptr;
bool replaced_ = false;
};
Conditional::Conditional(OutputTensor predicate, FunctionalizeCond* parent,
StateMap* cond_state_map, const ShapeRefiner& refiner)
: parent_(parent),
state_map_(cond_state_map),
predicate_(predicate),
refiner_(refiner) {}
Status Conditional::AddMerge(Node* m) {
merges_.insert(m);
return absl::OkStatus();
}
Status Conditional::AddSwitch(Node* s) {
VLOG(5) << "Adding switch " << s->DebugString();
OutputTensor predicate;
TF_RETURN_IF_ERROR(GetSwitchPredicate(*s, &predicate));
if (switch_predicate_.node == nullptr) switch_predicate_ = predicate;
if (!(switch_predicate_ == predicate)) {
return errors::InvalidArgument(
"Merge nodes ", NodesToString(merges_),
" directly dominated by switch nodes with different predicates (",
DebugString(switch_predicate_), " vs ", DebugString(predicate), ").");
}
switches_.insert(s);
parent_->AddSwitchId(s->id());
return absl::OkStatus();
}
Status Conditional::BuildArgumentNodes() {
VLOG(1) << "Build function arguments";
struct Hash {
size_t operator()(const std::pair<Node*, int>& item) const {
return Hash64Combine(hash<Node*>()(item.first),
std::hash<int>()(item.second));
}
};
std::unordered_map<std::pair<Node*, int>, int, Hash> input_index;
for (Node* switch_node : switches_) {
const Edge* e;
TF_RETURN_IF_ERROR(switch_node->input_edge(0, &e));
std::pair<Node*, int> key = std::make_pair(e->src(), e->src_output());
if (input_index.find(key) == input_index.end()) {
input_index[key] = cond_arg_nodes_.size();
cond_arg_nodes_.emplace_back(key.first, key.second);
}
cond_arg_nodes_.at(input_index.at(key)).switches.push_back(switch_node);
}
VLOG(5) << "CondArg nodes created: " << DebugString(cond_arg_nodes_);
int arg_count = 0;
for (CondArgNode& cond_arg_node : cond_arg_nodes_) {
DataType dtype = cond_arg_node.src->output_type(cond_arg_node.src_output);
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
TF_RETURN_IF_ERROR(
NodeBuilder(absl::StrCat("_Arg", arg_count),
FunctionLibraryDefinition::kArgOp)
.Attr("T", dtype)
.Attr("index", arg_count)
.Finalize(bodies_[branch_index].get(),
&cond_arg_node.branch_copy[branch_index]));
}
for (Node* node : cond_arg_node.switches) {
for (const Edge* e : node->out_edges()) {
if (e->IsControlEdge()) continue;
int branch_index = e->src_output();
Node* src_copy = cond_arg_node.branch_copy[branch_index];
Node* dst_copy = node_maps_[branch_index][e->dst()->id()];
if (dst_copy == nullptr) continue;
TF_RET_CHECK(dst_copy != nullptr)
<< "Unable to find copied node for " << e->dst()->DebugString()
<< " on branch " << Branch_Name(BranchType(branch_index));
int dst_input = IsMerge(e->dst()) ? 0 : e->dst_input();
bodies_[branch_index]->AddEdge(src_copy, 0, dst_copy, dst_input);
}
}
++arg_count;
}
for (Node* m : merges_) {
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
bool has_input = false;
for (auto e : node_maps_[static_cast<int>(branch)][m->id()]->in_edges()) {
if (!e->IsControlEdge()) {
has_input = true;
break;
}
}
if (!has_input) {
return errors::Internal(
"Failed to functionalize control flow with merge ",
FormatNodeForError(*m), " that doesn't have input on ",
Branch_Name(branch), " branch.");
}
}
}
return absl::OkStatus();
}
Status Conditional::AddSwitchNodeAlongEdge(const Edge* edge, BranchType branch,
Graph* graph) {
Node* switch_node;
Node* src = edge->src();
int src_output = edge->src_output();
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName(absl::StrCat(src->name(), "_added_switch")),
"Switch")
.Input(src, src_output)
.Input(const_cast<Node*>(predicate_.node), predicate_.index)
.Finalize(graph, &switch_node));
state_map_->ResetCondId(switch_node, state_map_->LookupCondId(src));
state_map_->ResetAncestorId(switch_node, state_map_->LookupAncestorId(src));
Node* dst = edge->dst();
int dst_input = edge->dst_input();
graph->RemoveEdge(edge);
graph->AddEdge(switch_node, static_cast<int>(branch), dst, dst_input);
return AddSwitch(switch_node);
}
Status Conditional::ExtractBodies(Graph* graph) {
VLOG(2) << "Extracting bodies for " << name();
for (auto b : {BranchType::kElseBranch, BranchType::kThenBranch}) {
bodies_[static_cast<int>(b)] =
std::make_unique<Graph>(graph->op_registry());
}
auto find_branch = [&](const Edge* e) {
const auto& id = state_map_->LookupCondId(e->src());
return IsSwitch(e->src()) ? BranchType(e->src_output())
: state_map_->FindBranchOf(id, predicate_);
};
std::array<std::vector<Node*>, 2> stacks;
VLOG(5) << "Merges: " << NodesToString(merges_);
for (Node* m : merges_) {
VLOG(5) << "For merge: " << m->DebugString() << " "
<< state_map_->CondStateToString(m);
for (auto e : m->in_edges()) {
if (e->IsControlEdge()) continue;
BranchType branch = find_branch(e);
TF_RET_CHECK(branch == BranchType::kThenBranch ||
branch == BranchType::kElseBranch)
<< "Error: " << e->src()->name()
<< " is not on either then or else branch (" << Branch_Name(branch)
<< ") for predicate " << DebugString(predicate_) << " ["
<< DebugString(state_map_->LookupCondId(e->src())) << "].";
Node* src = e->src();
if (IsSwitch(src)) {
TF_RETURN_IF_ERROR(AddSwitch(src));
} else {
stacks[static_cast<int>(branch)].push_back(src);
}
}
}
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
auto output = bodies_[branch_index].get();
auto& stack = stacks[branch_index];
VLOG(5) << "In branch: " << Branch_Name(branch) << " "
<< NodesToString(stack);
std::vector<bool> visited(graph->num_node_ids(), false);
node_maps_[branch_index].resize(graph->num_node_ids(), nullptr);
auto& node_map = node_maps_[branch_index];
while (!stack.empty()) {
Node* n = stack.back();
stack.pop_back();
if (visited.at(n->id())) continue;
visited[n->id()] = true;
for (const Edge* e : n->out_edges()) {
Node* dst = e->dst();
if (IsMerge(dst)) continue;
Node* src = e->src();
auto dst_id = state_map_->LookupCondId(dst);
auto src_id = state_map_->LookupCondId(src);
if (dst_id != src_id) {
if (e->IsControlEdge()) {
external_control_outputs_.push_back(e->src());
} | #include "tensorflow/compiler/tf2xla/functionalize_cond.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace functionalize_cond {
class FunctionalizeCondTest : public ::testing::Test {
protected:
FunctionalizeCondTest() {
graph_.reset(new Graph(OpRegistry::Global()));
flib_def_.reset(
new FunctionLibraryDefinition(OpRegistry::Global(), fdef_lib_));
fc_.reset(new functionalize_cond::FunctionalizeCond(
graph_.get(), flib_def_.get(), NodeFilter{}));
}
StateMap::CondId GetUniqueId(const StateMap::StateMap::CondState& state) {
return fc_->state_map_.GetCondId(state);
}
string GetString(const StateMap::StateMap::CondId id) {
return fc_->state_map_.CondStateToString(id);
}
absl::StatusOr<StateMap::CondId> JoinCondStatesNonMerge(
StateMap::CondId src, StateMap::CondId dst) {
return fc_->JoinCondStatesNonMerge(src, dst);
}
absl::StatusOr<StateMap::CondId> JoinCondStatesMerge(Node* n,
StateMap::CondId src,
StateMap::CondId dst) {
return fc_->JoinCondStatesMerge(n, src, dst);
}
FunctionDefLibrary fdef_lib_;
std::unique_ptr<functionalize_cond::FunctionalizeCond> fc_;
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
std::unique_ptr<Graph> graph_;
};
namespace {
TEST_F(FunctionalizeCondTest, JoinCondStates) {
Tensor pred_tensor(DT_BOOL, TensorShape());
pred_tensor.flat<bool>().setZero();
Node* pred = test::graph::Constant(graph_.get(), pred_tensor, "pred");
Tensor val_tensor(DT_INT32, TensorShape());
val_tensor.flat<int>().setZero();
Node* val = test::graph::Constant(graph_.get(), val_tensor, "val");
Node* m = test::graph::Merge(graph_.get(), val, val);
StateMap::CondId then_branch;
{
StateMap::CondState ss;
ss.insert(std::make_pair(OutputTensor(pred, 0), BranchType::kThenBranch));
then_branch = GetUniqueId(ss);
}
StateMap::CondId else_branch;
{
StateMap::CondState ss;
ss.insert(std::make_pair(OutputTensor(pred, 0), BranchType::kElseBranch));
else_branch = GetUniqueId(ss);
}
Status status = JoinCondStatesNonMerge(then_branch, else_branch).status();
EXPECT_TRUE(errors::IsInvalidArgument(status));
auto joined_or = JoinCondStatesMerge(m, then_branch, else_branch);
TF_EXPECT_OK(joined_or.status());
StateMap::CondId joined = joined_or.value();
auto t = JoinCondStatesNonMerge(then_branch, joined);
TF_EXPECT_OK(t.status());
}
TEST_F(FunctionalizeCondTest, JoinCondStatesMergeWithInputNotInCondContext) {
Tensor val_tensor(DT_INT32, TensorShape());
val_tensor.flat<int>().setZero();
Node* val = test::graph::Constant(graph_.get(), val_tensor, "val");
Node* m = test::graph::Merge(graph_.get(), val, val);
StateMap::CondState cond_state;
auto joined_or = JoinCondStatesMerge(m, nullptr, &cond_state);
EXPECT_FALSE(joined_or.ok());
}
TEST(FunctionalizeCond, DuplicateConstNodes) {
Scope root = Scope::NewRootScope().ExitOnError();
auto const_op = ops::Const(root.WithOpName("const"), 1);
auto arg_0_op = ops::_Arg(root.WithOpName("arg_0"), DT_BOOL, 0);
auto arg_1_op = ops::_Arg(root.WithOpName("arg_1"), DT_INT32, 1);
auto switch_op = ops::Switch(root.WithOpName("switch"), arg_1_op, arg_0_op);
auto identity_n_false_op =
ops::IdentityN(root.WithOpName("identity_n_0"),
{switch_op.output_false, const_op, const_op});
auto identity_n_true_op =
ops::IdentityN(root.WithOpName("identity_n_1"),
{switch_op.output_true, const_op, const_op});
auto merge_op = ops::Merge(
root.WithOpName("merge"),
{identity_n_false_op.output.front(), identity_n_true_op.output.front()});
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
Graph graph(OpRegistry::Global());
GraphConstructorOptions options;
TF_EXPECT_OK(ConvertGraphDefToGraph(options, graph_def, &graph));
FunctionDefLibrary fdef_lib;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
auto status = tensorflow::FunctionalizeCond(&graph, &flib_def);
TF_ASSERT_OK(status);
FunctionDefLibrary flib_def_proto = flib_def.ToProto();
for (const auto& fdef : flib_def_proto.function()) {
absl::flat_hash_set<absl::string_view> node_names;
for (const auto& node : fdef.node_def()) {
EXPECT_TRUE(node_names.insert(node.name()).second)
<< node.op() << " with duplicate node name '" << node.name()
<< "' found.";
}
}
}
}
}
} |
1,101 | cpp | tensorflow/tensorflow | resource_util | tensorflow/compiler/tf2xla/resource_util.cc | tensorflow/compiler/tf2xla/resource_util_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_RESOURCE_UTIL_H_
#define TENSORFLOW_COMPILER_TF2XLA_RESOURCE_UTIL_H_
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
class ResourceUsageAnalysis {
public:
class NodeInfo {
public:
std::optional<std::string> function_name_;
std::string node_name_;
std::string op_;
NodeInfo() {}
NodeInfo(const std::optional<std::string>& function_name,
std::string node_name, std::string op)
: function_name_(function_name),
node_name_(std::move(node_name)),
op_(std::move(op)) {}
std::string DebugString() const {
return absl::StrJoin({function_name_.value_or(""), node_name_, op_}, ":");
}
bool operator==(const NodeInfo& o) const {
return function_name_ == o.function_name_ && node_name_ == o.node_name_ &&
op_ == o.op_;
}
template <typename H>
friend H AbslHashValue(H h, const NodeInfo& o) {
return H::combine(std::move(h), o.function_name_, o.node_name_, o.op_);
}
};
static Status Analyze(
const Graph* graph, FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<NodeInfo, absl::flat_hash_set<NodeInfo>>*
source_to_path);
};
}
#endif
#include "tensorflow/compiler/tf2xla/resource_util.h"
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "xla/status_macros.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using tsl::StatusOr;
const char kIdentityNOp[] = "IdentityN";
const char kIfOp[] = "If";
const char kWhileOp[] = "While";
const char kArgOp[] = "_Arg";
const char kRetvalOp[] = "_Retval";
const int kMaxCallDepth = 100;
Status AnalyzeResourceUsage(
const Graph* graph, const std::optional<std::string>& function_name,
const int call_depth, const absl::flat_hash_set<int>& resource_arg_indices,
FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
source_to_path);
bool IsControlFlowV1Node(const Node* n) {
return (n->IsEnter() || n->IsExit() || n->IsSwitch() || n->IsMerge() ||
n->IsNextIteration());
}
absl::StatusOr<absl::InlinedVector<const Edge*, 1>> OutputEdgesByIndex(
const Node& n, int idx) {
absl::InlinedVector<const Edge*, 1> res;
if (idx >= n.num_outputs()) {
return errors::InvalidArgument("Invalid out_edge index: ", idx, ", Node ",
n.name(), " only has ", n.num_outputs(),
" outputs.");
}
for (const Edge* o : n.out_edges()) {
if (o->src_output() == idx) res.emplace_back(o);
}
return res;
}
bool IsStackOrTensorArraySource(const Node& n) {
const XlaResourceOpInfo* op_info = GetResourceOpInfoForOp(n.type_string());
if (!op_info) return false;
if (op_info->resource_kind() != XlaResourceKind::kStack &&
op_info->resource_kind() != XlaResourceKind::kTensorArray)
return false;
return n.num_outputs() > 0 && n.output_type(0) == DataType::DT_RESOURCE;
}
void PropagateFromStackOrTensorArraySourceOp(
const Node& n, const std::optional<std::string>& function_name,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source) {
ResourceUsageAnalysis::NodeInfo src_node_info(function_name, n.name(),
n.type_string());
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
if (o->dst()->input_type(o->dst_input()) != DataType::DT_RESOURCE) {
continue;
}
(*user_to_source)[o] = src_node_info;
}
}
Status PropagateFromArgOp(
const Node& n, const std::optional<std::string>& function_name,
const absl::flat_hash_set<int>& resource_arg_indices,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source) {
TF_RET_CHECK(n.type_string() == kArgOp);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n.attrs(), "index", &index));
if (!resource_arg_indices.contains(index)) return absl::OkStatus();
TF_RET_CHECK(function_name.has_value())
<< "ResourceUsageAnalysis does not support analyzing _Arg nodes "
"carrying Stack/TensorArray resource in given graph unless they "
"are in function calls.";
const ResourceUsageAnalysis::NodeInfo src_node_info(function_name, n.name(),
n.type_string());
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
if (o->dst()->input_type(o->dst_input()) != DataType::DT_RESOURCE) {
continue;
}
(*user_to_source)[o] = src_node_info;
}
return absl::OkStatus();
}
Status UpdateResourceUsageFromFunctionBodyAnalysis(
const Node& call_node,
const std::optional<absl::string_view>& caller_function_name,
const FunctionBody& fbody,
const absl::flat_hash_map<
ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>&
called_function_source_to_path,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
caller_source_to_path) {
std::unordered_map<std::string, Node*> node_name_index =
fbody.graph->BuildNodeNameIndex();
for (const auto& it : called_function_source_to_path) {
ResourceUsageAnalysis::NodeInfo src_node_info = it.first;
if (src_node_info.op_ == kArgOp) {
const Node* arg_src = node_name_index[src_node_info.node_name_];
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(arg_src->attrs(), "index", &index));
const Edge* e;
TF_RETURN_IF_ERROR(call_node.input_edge(index, &e));
src_node_info = (*user_to_source)[e];
}
for (const auto& dst_node_info : it.second) {
if (dst_node_info.op_ == kRetvalOp) {
const Node* ret_user = node_name_index[dst_node_info.node_name_];
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(ret_user->attrs(), "index", &index));
absl::InlinedVector<const Edge*, 1> outs;
TF_ASSIGN_OR_RETURN(outs, OutputEdgesByIndex(call_node, index));
for (const Edge* o : outs) (*user_to_source)[o] = src_node_info;
} else {
(*caller_source_to_path)[src_node_info].emplace(dst_node_info);
}
}
}
return absl::OkStatus();
}
Status PropagateThroughCallOp(
const Node& n, const std::optional<std::string>& function_name,
const int call_depth, FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
source_to_path) {
if (call_depth > kMaxCallDepth) {
return errors::InvalidArgument(
"Function call stack in given graph is too deep, last function ",
"name is: ", function_name.value());
}
absl::flat_hash_set<int> resource_arg_indices;
for (const Edge* e : n.in_edges()) {
if (user_to_source->contains(e)) {
resource_arg_indices.emplace(e->dst_input());
}
}
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(InstantiateFunctionCall(n.def(), lib_runtime, &handle));
auto release_handle_on_return = gtl::MakeCleanup(
[&] { TF_CHECK_OK(lib_runtime->ReleaseHandle(handle)); });
const FunctionBody* fbody = lib_runtime->GetFunctionBody(handle);
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>
called_function_source_to_path;
TF_RETURN_IF_ERROR(AnalyzeResourceUsage(
fbody->graph, n.type_string(), call_depth + 1, resource_arg_indices,
lib_runtime, &called_function_source_to_path));
TF_RETURN_IF_ERROR(UpdateResourceUsageFromFunctionBodyAnalysis(
n, function_name, *fbody, called_function_source_to_path, user_to_source,
source_to_path));
return absl::OkStatus();
}
Status PropagateThroughIdentityOp(
const Node& n,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source) {
TF_RET_CHECK(n.IsIdentity() || n.type_string() == kIdentityNOp);
if (n.IsIdentity()) {
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
const Edge* in;
TF_RETURN_IF_ERROR(n.input_edge(0, &in));
if (!user_to_source->contains(in)) continue;
user_to_source->emplace(std::make_pair(o, (*user_to_source)[in]));
}
} else {
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
const Edge* in;
TF_RETURN_IF_ERROR(n.input_edge(o->src_output(), &in));
if (!user_to_source->contains(in)) continue;
user_to_source->emplace(std::make_pair(o, (*user_to_source)[in]));
}
}
return absl::OkStatus();
}
Status AnalyzeResourceUsage(
const Graph* graph, const std::optional<std::string>& function_name,
const int call_depth, const absl::flat_hash_set<int>& resource_arg_indices,
FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
source_to_path) {
source_to_path->clear();
std::vector<Node*> reverse_post_order;
GetReversePostOrder(*graph, &reverse_post_order, NodeComparatorName{});
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>
user_to_source;
for (const Node* n : reverse_post_order) {
if (IsControlFlowV1Node(n)) {
return errors::InvalidArgument(
"AnalyzeResourceUsage does not support control flow v1 node: ",
n->DebugString());
}
if (n->type_string() == kIfOp || n->type_string() == kWhileOp) {
return errors::InvalidArgument(
"AnalyzeResourceUsage does not yet support control flow v2 "
"node: ",
n->DebugString());
}
if (IsStackOrTensorArraySource(*n)) {
PropagateFromStackOrTensorArraySourceOp(*n, function_name,
&user_to_source);
continue;
}
if (n->IsArg()) {
TF_RETURN_IF_ERROR(PropagateFromArgOp(
*n, function_name, resource_arg_indices, &user_to_source));
continue;
}
if (IsFunctionCall(*lib_runtime->GetFunctionLibraryDefinition(), *n)) {
TF_RETURN_IF_ERROR(PropagateThroughCallOp(*n, function_name, call_depth,
lib_runtime, &user_to_source,
source_to_path));
continue;
}
if (n->IsIdentity() || n->type_string() == kIdentityNOp) {
TF_RETURN_IF_ERROR(PropagateThroughIdentityOp(*n, &user_to_source));
}
}
for (const auto& it : user_to_source) {
(*source_to_path)[it.second].emplace(function_name, it.first->dst()->name(),
it.first->dst()->type_string());
}
return absl::OkStatus();
}
}
Status ResourceUsageAnalysis::Analyze(
const Graph* graph, FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<NodeInfo, absl::flat_hash_set<NodeInfo>>*
source_to_path) {
return AnalyzeResourceUsage(
graph, {}, 0,
absl::flat_hash_set<int>(), lib_runtime,
source_to_path);
}
} | #include "tensorflow/compiler/tf2xla/resource_util.h"
#include <memory>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
ResourceUsageAnalysis::NodeInfo node_info_from_string(absl::string_view s) {
std::vector<std::string> tokens = absl::StrSplit(s, ':');
EXPECT_EQ(tokens.size(), 3);
ResourceUsageAnalysis::NodeInfo node_info;
if (tokens[0].empty()) {
node_info.function_name_ = std::nullopt;
} else {
node_info.function_name_ = std::move(tokens[0]);
}
node_info.node_name_ = std::move(tokens[1]);
node_info.op_ = std::move(tokens[2]);
return node_info;
}
void AnalyzeAndVerify(
const GraphDef& graphdef, FunctionLibraryDefinition* flib_def,
const absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>>&
expected) {
auto graph = std::make_unique<Graph>(flib_def);
TF_EXPECT_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), graphdef, graph.get()));
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, Env::Default(), nullptr, TF_GRAPH_DEF_VERSION,
flib_def, OptimizerOptions());
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>
source_to_path;
TF_EXPECT_OK(ResourceUsageAnalysis::Analyze(graph.get(), lib_runtime,
&source_to_path));
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>
expected_source_to_path;
for (auto it : expected) {
auto src_node_info = node_info_from_string(it.first);
for (const std::string& user : it.second) {
expected_source_to_path[src_node_info].emplace(
node_info_from_string(user));
}
}
EXPECT_EQ(source_to_path, expected_source_to_path);
}
}
TEST(ResourceOpAnalyzerTest, SingleResourceSingleUserNoPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(stack_op);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] =
absl::flat_hash_set<std::string>({":stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, SingleResourceSingleUserWithPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder resource_identity_builder("resource_identity", "Identity",
op_reg);
resource_identity_builder.Input(stack_op);
Node* resource_identity = opts.FinalizeBuilder(&resource_identity_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(resource_identity);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":resource_identity:Identity", ":stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, SingleResourceMultipleUserNoPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(stack_op);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(stack_op);
opts.FinalizeBuilder(&stack_close1_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close0:StackCloseV2", ":stack_close1:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, SingleResourceMultipleUserWithPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder resource_identity_builder("resource_identity", "Identity",
op_reg);
resource_identity_builder.Input(stack_op);
Node* resource_identity = opts.FinalizeBuilder(&resource_identity_builder);
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(resource_identity);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(resource_identity);
opts.FinalizeBuilder(&stack_close1_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":resource_identity:Identity", ":stack_close0:StackCloseV2",
":stack_close1:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, MultipleResourceMultipleUserNoPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op0_builder("stack_op0", "StackV2", op_reg);
stack_op0_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op0 = opts.FinalizeBuilder(&stack_op0_builder);
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close1_builder);
NodeBuilder stack_op1_builder("stack_op1", "StackV2", op_reg);
stack_op1_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op1 = opts.FinalizeBuilder(&stack_op1_builder);
NodeBuilder stack_close2_builder("stack_close2", "StackCloseV2", op_reg);
stack_close2_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close2_builder);
NodeBuilder stack_close3_builder("stack_close3", "StackCloseV2", op_reg);
stack_close3_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close3_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op0:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close0:StackCloseV2", ":stack_close1:StackCloseV2"});
expected[":stack_op1:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close2:StackCloseV2", ":stack_close3:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, MultipleResourceMultipleUserWithPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op0_builder("stack_op0", "StackV2", op_reg);
stack_op0_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op0 = opts.FinalizeBuilder(&stack_op0_builder);
NodeBuilder stack_op1_builder("stack_op1", "StackV2", op_reg);
stack_op1_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op1 = opts.FinalizeBuilder(&stack_op1_builder);
NodeBuilder identity_n_builder("identity_n", "IdentityN", op_reg);
identity_n_builder.Input({stack_op0, stack_size_placeholder, stack_op1});
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close1_builder);
NodeBuilder stack_close2_builder("stack_close2", "StackCloseV2", op_reg);
stack_close2_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close2_builder);
NodeBuilder stack_close3_builder("stack_close3", "StackCloseV2", op_reg);
stack_close3_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close3_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op0:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close0:StackCloseV2", ":stack_close1:StackCloseV2"});
expected[":stack_op1:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close2:StackCloseV2", ":stack_close3:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, ResourcePassThroughFunction) {
auto library = std::make_unique<FunctionDefLibrary>();
*library->add_function() = FunctionDefHelper::Define(
"pass_through_function",
{"in: resource"},
{"out: resource"},
{},
{{{"out"}, "Identity", {"in"}, {{"T", DataType::DT_RESOURCE}}}});
FunctionLibraryDefinition flib_def(OpRegistry::Global(), *library);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder pass_through_fn_builder("pass_through_fn",
"pass_through_function", op_reg);
pass_through_fn_builder.Input(stack_op);
Node* pass_through_fn = opts.FinalizeBuilder(&pass_through_fn_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(pass_through_fn);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close:StackCloseV2", ":pass_through_fn:pass_through_function",
"pass_through_function:out:Identity"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, ResourceUserInFunction) {
auto library = std::make_unique<FunctionDefLibrary>();
*library->add_function() = FunctionDefHelper::Define(
"resource_user_function",
{"in: resource"},
{},
{},
{{{"stack_close"},
"StackCloseV2",
{"in"},
{{"T", DataType::DT_RESOURCE}}}});
FunctionLibraryDefinition flib_def(OpRegistry::Global(), *library);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder resource_user_fn_builder("resource_user_function",
"resource_user_function", op_reg);
resource_user_fn_builder.Input(stack_op);
opts.FinalizeBuilder(&resource_user_fn_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":resource_user_function:resource_user_function",
"resource_user_function:stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, ResourceSourceInFunction) {
auto library = std::make_unique<FunctionDefLibrary>();
*library->add_function() = FunctionDefHelper::Define(
"resource_source_function",
{"in: int32"},
{"out: resource"},
{},
{{{"out"}, "StackV2", {"in"}, {{"elem_type", DataType::DT_FLOAT}}}});
FunctionLibraryDefinition flib_def(OpRegistry::Global(), *library);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder resource_source_fn_builder("resource_source_function",
"resource_source_function", op_reg);
resource_source_fn_builder.Input(stack_size_placeholder);
Node* resource_source_function =
opts.FinalizeBuilder(&resource_source_fn_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(resource_source_function);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected["resource_source_function:out:StackV2"] =
absl::flat_hash_set<std::string>({":stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
} |
1,102 | cpp | tensorflow/tensorflow | functionalize_control_flow | tensorflow/compiler/tf2xla/functionalize_control_flow.cc | tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_FUNCTIONALIZE_CONTROL_FLOW_H_
#define TENSORFLOW_COMPILER_TF2XLA_FUNCTIONALIZE_CONTROL_FLOW_H_
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
Status FunctionalizeControlFlow(Graph* graph,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter = {},
bool include_functions = false);
Status FunctionalizeControlFlowForGraphDef(GraphDef* graph_def,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter = {},
bool include_functions = false);
class FunctionalizeControlFlowForXlaPass : public GraphOptimizationPass {
public:
Status Run(const GraphOptimizationPassOptions& options) override;
};
}
#endif
#include "tensorflow/compiler/tf2xla/functionalize_control_flow.h"
#include <algorithm>
#include <deque>
#include <stack>
#include <unordered_set>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/tf2xla/functionalize_cond.h"
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "tensorflow/compiler/tf2xla/functionalize_while.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/status_macros.h"
#include "xla/union_find.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
using FuncMap = std::map<string, std::optional<string>>;
using FuncMapIter = std::map<string, std::optional<string>>::const_iterator;
bool FunctionHasBeenProcessed(FuncMapIter func_iter, const FuncMap* func_map) {
return func_iter != func_map->end();
}
bool FunctionHasBeenModified(FuncMapIter func_iter) {
return func_iter->second.has_value();
}
string GetNewFunctionName(
const string& func_name, Node* n,
AssociatedFunctionInfo::AssociatedFunctionType func_type,
FunctionLibraryDefinition* fld) {
return (
func_type ==
AssociatedFunctionInfo::AssociatedFunctionType::kSymbolicGradient
? fld->UniqueFunctionName(absl::StrCat(n->name(), "_f15n_"))
: fld->UniqueFunctionName(absl::StrCat(func_name, "_f15n_")));
}
const string& GetMappedFunctionName(FuncMapIter func_iter) {
DCHECK(func_iter->second.has_value());
return func_iter->second.value();
}
void UpdateFunctionMap(FuncMap* func_map, const string& canonicalized_name,
const string& new_func_name, bool function_modified) {
(*func_map)[canonicalized_name] =
function_modified ? absl::make_optional(new_func_name) : std::nullopt;
}
Status AddFunctionDefToGraphLibrary(
const string& func_name, const AssociatedFunctionInfo& associated_function,
Graph* graph, FunctionLibraryDefinition* fld) {
const OpRegistrationData* op_reg_data;
if (graph->flib_def().LookUp(func_name, &op_reg_data).ok())
return absl::OkStatus();
const FunctionDef* new_fdef = fld->Find(func_name);
DCHECK(new_fdef != nullptr);
FunctionDefLibrary fdef_lib;
*(fdef_lib.add_function()) = *new_fdef;
return graph->AddFunctionLibrary(fdef_lib);
}
Status FunctionalizeControlFlowForFunction(
const string& func_name, const string& new_func_name,
const protobuf::Map<string, tensorflow::AttrValue>& attrs,
FunctionLibraryDefinition* fld, FunctionLibraryRuntime* flr,
FuncMap* func_map, bool* function_modified,
const NodeFilter& node_filter = {});
Status FunctionalizeControlFlowForNodeAssociatedFunctions(
FuncMap* func_map, Graph* graph, FunctionLibraryDefinition* fld,
FunctionLibraryRuntime* flr, bool* any_function_modified,
const NodeFilter& node_filter) {
std::vector<std::pair<Node*, std::vector<AssociatedFunctionInfo>>>
nodes_to_associated_functions;
for (auto* n : graph->nodes()) {
auto associated_functions = GetAssociatedFunctions(*n, fld);
if (!associated_functions.empty()) {
nodes_to_associated_functions.push_back({n, associated_functions});
}
}
for (const auto& pair : nodes_to_associated_functions) {
Node* n = pair.first;
auto associated_functions = pair.second;
for (auto& associated_function : associated_functions) {
DCHECK(associated_function.type() !=
AssociatedFunctionInfo::kFunctionCallNode ||
associated_functions.size() == 1);
string func_name = associated_function.func_name();
string canonicalized_name =
Canonicalize(func_name, AttrSlice(&associated_function.attrs()));
auto func_iter = func_map->find(canonicalized_name);
string new_func_name;
if (FunctionHasBeenProcessed(func_iter, func_map)) {
if (FunctionHasBeenModified(func_iter)) {
*any_function_modified = true;
new_func_name = GetMappedFunctionName(func_iter);
TF_RETURN_IF_ERROR(RewriteAssociatedFunction(
graph, n, fld, associated_function, new_func_name));
}
continue;
}
bool function_modified = false;
new_func_name =
GetNewFunctionName(func_name, n, associated_function.type(), fld);
TF_RETURN_IF_ERROR(FunctionalizeControlFlowForFunction(
func_name, new_func_name, associated_function.attrs(), fld, flr,
func_map, &function_modified, node_filter));
UpdateFunctionMap(func_map, canonicalized_name, new_func_name,
function_modified);
if (function_modified) {
*any_function_modified = true;
TF_RETURN_IF_ERROR(AddFunctionDefToGraphLibrary(
new_func_name, associated_function, graph, fld));
TF_RETURN_IF_ERROR(RewriteAssociatedFunction(
graph, n, fld, associated_function, new_func_name));
}
}
}
return absl::OkStatus();
}
Status FunctionalizeControlFlowForFunction(
const string& func_name, const string& new_func_name,
const protobuf::Map<string, tensorflow::AttrValue>& attrs,
FunctionLibraryDefinition* fld, FunctionLibraryRuntime* flr,
FuncMap* func_map, bool* function_modified, const NodeFilter& node_filter) {
*function_modified = false;
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(flr->Instantiate(func_name, AttrSlice(&attrs), &handle));
Status ret_status = absl::OkStatus();
auto cleanup_handle = gtl::MakeCleanup([&]() {
auto s = flr->ReleaseHandle(handle);
if (!s.ok()) {
ret_status.Update(s);
}
});
const FunctionBody* body = flr->GetFunctionBody(handle);
Graph* g = body->graph;
bool has_switch_or_merge = false;
for (Node* n : body->graph->nodes()) {
if (node_filter && !node_filter(n)) continue;
if (n->type_string() == "Switch" || n->type_string() == "Merge") {
has_switch_or_merge = true;
break;
}
}
TF_RETURN_IF_ERROR(FunctionalizeControlFlowForNodeAssociatedFunctions(
func_map, g, fld, flr, function_modified, node_filter));
if (has_switch_or_merge) {
*function_modified = true;
if (VLOG_IS_ON(4)) {
DumpGraphToFile(
absl::StrCat("functionalize_control_flow_before_fdef_", func_name),
*g, fld);
}
TF_RETURN_IF_ERROR(FunctionalizeControlFlow(g, fld, node_filter));
if (VLOG_IS_ON(4)) {
DumpGraphToFile(
absl::StrCat("functionalize_control_flow_after_fdef_", func_name), *g,
fld);
}
}
if (*function_modified) {
FunctionDef functionalized_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*g, new_func_name, &functionalized_fdef));
if (func_name == new_func_name) {
VLOG(2) << "Replacing function " << func_name;
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(new_func_name, functionalized_fdef));
} else {
VLOG(2) << "Adding function " << new_func_name;
TF_RETURN_IF_ERROR(fld->AddFunctionDef(functionalized_fdef));
}
}
return ret_status;
}
Status FunctionalizeControlFlow(Graph* graph,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter,
bool include_functions) {
VLOG(2) << "FunctionalizeControlFlow (initial): "
<< DumpGraphToFile("functionalize_initial", *graph, library);
if (include_functions) {
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, tensorflow::Env::Default(),
nullptr, TF_GRAPH_DEF_VERSION, library,
tensorflow::OptimizerOptions());
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
FuncMap func_map;
bool modified = false;
TF_RETURN_IF_ERROR(FunctionalizeControlFlowForNodeAssociatedFunctions(
&func_map, graph, library, flr, &modified, node_filter));
}
TF_RETURN_IF_ERROR(FunctionalizeWhileLoop(graph, library, node_filter));
TF_RETURN_IF_ERROR(FunctionalizeCond(graph, library, node_filter));
VLOG(2) << "FunctionalizeControlFlow (final): "
<< DumpGraphToFile("functionalize_final", *graph, library);
return absl::OkStatus();
}
Status FunctionalizeControlFlowForGraphDef(GraphDef* graph_def,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter,
bool include_functions) {
FunctionDefLibrary function_lib = graph_def->library();
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph({}, *graph_def, &graph));
TF_RETURN_IF_ERROR(FunctionalizeControlFlow(&graph, library, node_filter,
include_functions));
graph.ToGraphDef(graph_def);
std::swap(*graph_def->mutable_library(), function_lib);
return absl::OkStatus();
}
Status FunctionalizeControlFlowForXlaPass::Run(
const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
if (VLOG_IS_ON(4)) {
DumpGraphToFile("functionalize_control_flow_before", *graph,
options.flib_def);
}
const auto* config = &options.session_options->config;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(
nullptr, options.session_options->env, config,
TF_GRAPH_DEF_VERSION, options.flib_def,
config->graph_options().optimizer_options()));
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
static std::map<string, string>* kNodeTypeToFunctionAttrMapping =
new std::map<string, string>{
{"_TPUReplicate", "computation"},
{"XlaLaunch", "function"},
};
FuncMap func_map;
bool fld_modified = false;
for (Node* n : graph->nodes()) {
auto it = kNodeTypeToFunctionAttrMapping->find(n->type_string());
if (it == kNodeTypeToFunctionAttrMapping->end()) {
continue;
}
const string func_attr = it->second;
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), func_attr, &func));
VLOG(2) << "Graph has node " << n->type_string()
<< ". Corresponding function: " << func.name();
string new_func_name = options.flib_def->UniqueFunctionName(
absl::StrCat(func.name(), "_f15n_"));
bool modified;
TF_RETURN_IF_ERROR(FunctionalizeControlFlowForFunction(
func.name(), new_func_name, func.attr(), options.flib_def, flr,
&func_map, &modified));
if (modified) {
n->ClearAttr(func_attr);
func.set_name(new_func_name);
n->AddAttr(func_attr, func);
fld_modified = true;
}
}
if (false) {
if (VLOG_IS_ON(4)) {
DumpGraphToFile("functionalize_control_flow_before_prune", *graph,
options.flib_def);
}
TF_RETURN_IF_ERROR(
PruneUnreachableFunctionsFromGraph(*graph, options.flib_def));
}
if (VLOG_IS_ON(4)) {
DumpGraphToFile("functionalize_control_flow_after", *graph,
options.flib_def);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/tf2xla/functionalize_control_flow.h"
#include <string>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2xla/cc/ops/xla_ops.h"
#include "tensorflow/compiler/tf2xla/test_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/validate.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
Status FindIfThenAndElse(const GraphDef& graph, string* op_name,
NameAttrList* then_fn, NameAttrList* else_fn) {
for (const NodeDef& node : graph.node()) {
if (node.op() == "If") {
*op_name = node.name();
const NameAttrList* result;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "then_branch", &result));
*then_fn = *result;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "else_branch", &result));
*else_fn = *result;
return absl::OkStatus();
}
}
return errors::NotFound("No If node found in graph");
}
class ConditionalTestFixture
: public ::testing::TestWithParam<std::tuple<bool, bool>> {
protected:
void SetUp() override {
restrict_to_tpu_nodes_ = std::get<0>(GetParam());
wrap_condition_in_function_ = std::get<1>(GetParam());
}
void RunTest();
private:
void BuildCondGraph(Graph* cond_graph);
void CheckGraphDef(const GraphDef& graph_def,
const FunctionLibraryDefinition& library);
bool restrict_to_tpu_nodes_ = false;
bool wrap_condition_in_function_ = false;
};
TEST_P(ConditionalTestFixture, ConditionalTests) { RunTest(); }
INSTANTIATE_TEST_SUITE_P(
FunctionalizeControlFlow, ConditionalTestFixture,
::testing::Combine(::testing::Bool(), ::testing::Bool()),
[](const ::testing::TestParamInfo<ConditionalTestFixture::ParamType>&
info) {
bool restrict_to_tpu_nodes = std::get<0>(info.param);
bool wrap_cond_in_function = std::get<1>(info.param);
string name =
absl::StrCat(restrict_to_tpu_nodes ? "with_filter" : "without_filter",
wrap_cond_in_function ? "_in_function" : "_in_graph");
return name;
});
void ConditionalTestFixture::BuildCondGraph(Graph* cond_graph) {
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto x = ops::Placeholder(scope.WithOpName("x"), DT_INT32);
auto y = ops::Placeholder(scope.WithOpName("y"), DT_INT32);
auto less = ops::Less(scope.WithOpName("cond/Less"), y, x);
auto switch_1 = ops::Switch(scope.WithOpName("cond/Switch"), less, less);
auto identity_t =
ops::Identity(scope.WithOpName("cond/Identity"), switch_1.output_true);
auto seventeen = ops::Const<int32>(
scope.WithOpName("cond").WithControlDependencies(identity_t), 17);
auto switch_2 = ops::Switch(scope.WithOpName("cond/Switch"), y, less);
auto mul = ops::Multiply(scope.WithOpName("cond/Mul"), switch_2.output_true,
seventeen);
auto identity_f =
ops::Identity(scope.WithOpName("cond/Identity"), switch_1.output_false);
auto twenty_three = ops::Const<int32>(
scope.WithOpName("cond").WithControlDependencies(identity_f), 23);
auto switch_3 = ops::Switch(scope.WithOpName("cond/Switch"), x, less);
auto add = ops::Add(scope.WithOpName("cond/false/add"),
switch_3.output_false, twenty_three);
auto merge = ops::Merge(scope.WithOpName("cond/Merge"),
std::initializer_list<Input>{add, mul});
TF_EXPECT_OK(scope.ToGraph(cond_graph));
for (Node* n : cond_graph->nodes()) {
std::string dummy_value = "value";
for (absl::string_view attr_name : kAttrsToPropagate) {
n->AddAttr(std::string(attr_name), dummy_value);
}
}
}
}
void ConditionalTestFixture::CheckGraphDef(
const GraphDef& graph_def, const FunctionLibraryDefinition& library) {
string op_name;
NameAttrList then_fn;
NameAttrList else_fn;
TF_EXPECT_OK(FindIfThenAndElse(graph_def, &op_name, &then_fn, &else_fn));
InstantiationResultForTest else_result;
TF_EXPECT_OK(
InstantiateFunctionForTest(else_fn.name(), library, &else_result));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto y = ops::Placeholder(scope.WithOpName("y"), DT_INT32);
auto x = ops::Placeholder(scope.WithOpName("x"), DT_INT32);
auto less = ops::Less(scope.WithOpName("cond/Less"), y, x);
auto if_op =
ops::If(scope.WithOpName(op_name), less,
std::initializer_list<Input>{less, y, x}, {DT_INT32}, then_fn,
else_fn, ops::If::OutputShapes({PartialTensorShape()}));
auto id = ops::Identity(scope.WithOpName("cond/Merge"), if_op.output[0]);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg_0 = ops::_Arg(scope.WithOpName("arg0"), DT_BOOL, 0);
auto arg_1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto arg_2 = ops::_Arg(scope.WithOpName("arg2"), DT_INT32, 2);
auto identity = ops::Identity(scope.WithOpName("cond/Identity"), arg_0);
auto cond = ops::Const(
scope.WithOpName("cond").WithControlDependencies(identity), 17);
auto mul = ops::Mul(scope.WithOpName("cond/Mul"), arg_1, cond);
auto retval0 = ops::_Retval(scope.WithOpName("retval0_RetVal"), mul, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(InstantiateFunctionForTest(then_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
EXPECT_EQ((DataTypeVector{DT_BOOL, DT_INT32, DT_INT32}), result.arg_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg_0 = ops::_Arg(scope.WithOpName("arg0"), DT_BOOL, 0);
auto arg_1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto arg_2 = ops::_Arg(scope.WithOpName("arg2"), DT_INT32, 2);
auto identity = ops::Identity(scope.WithOpName("cond/Identity_1"), arg_0);
auto cond_1 = ops::Const(
scope.WithOpName("cond_1").WithControlDependencies(identity), 23);
auto add = ops::Add(scope.WithOpName("cond/false/add"), arg_2, cond_1);
auto retval0 = ops::_Retval(scope.WithOpName("retval0_RetVal"), add, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(InstantiateFunctionForTest(else_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
EXPECT_EQ((DataTypeVector{DT_BOOL, DT_INT32, DT_INT32}), result.arg_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
for (const NodeDef& node : graph_def.node()) {
if (node.op() == "If") {
for (absl::string_view attr_name : kAttrsToPropagate) {
std::string attr_val;
TF_EXPECT_OK(GetNodeAttr(node, attr_name, &attr_val));
EXPECT_EQ(attr_val, "value");
}
}
}
}
}
void ConditionalTestFixture::RunTest() {
Graph graph(OpRegistry::Global());
if (wrap_condition_in_function_) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
Graph cond_graph(OpRegistry::Global());
BuildCondGraph(&cond_graph);
FunctionDef cond_fdef;
TF_ASSERT_OK(GraphToFunctionDef(cond_graph, "cond_fn", &cond_fdef));
FunctionDefLibrary fdef_lib;
*(fdef_lib.add_function()) = cond_fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
NodeDef cond_fn;
cond_fn.set_name("cond_node");
cond_fn.set_op("cond_fn");
*(cond_fn.add_input()) = "source";
Status status;
scope.graph()->AddNode(cond_fn, &status);
TF_ASSERT_OK(status);
TF_ASSERT_OK(scope.ToGraph(&graph));
} else {
BuildCondGraph(&graph);
}
FunctionLibraryDefinition library(graph.flib_def());
NodeFilter node_filter =
restrict_to_tpu_nodes_
? [](const Node* n) { return n->attrs().Find("_tpu_replicate"); }
: NodeFilter{};
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
TF_ASSERT_OK(FunctionalizeControlFlowForGraphDef(
&optimized_graph_def, &library, node_filter,
wrap_condition_in_function_));
TF_ASSERT_OK(FunctionalizeControlFlow(
&graph, &library, node_filter,
wrap_condition_in_function_));
if (wrap_condition_in_function_) {
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, tensorflow::Env::Default(),
nullptr, TF_GRAPH_DEF_VERSION, &library,
tensorflow::OptimizerOptions());
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
FunctionLibraryRuntime::Handle handle;
string func_name;
for (Node* n : graph.nodes()) {
if (n->name() == "cond_node") {
func_name = n->type_string();
break;
}
}
TF_ASSERT_OK(flr->Instantiate(func_name, AttrSlice(), &handle));
const FunctionBody* body = flr->GetFunctionBody(handle);
GraphDef graph_def;
body->graph->ToGraphDef(&graph_def);
CheckGraphDef(graph_def, library);
} else {
CheckGraphDef(optimized_graph_def, library);
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
CheckGraphDef(converted_graph_def, library);
}
}
Status FindWhileCondAndBody(const GraphDef& graph, NameAttrList* cond,
NameAttrList* body) {
for (const NodeDef& node : graph.node()) {
if (node.op() == "While") {
const NameAttrList* result;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "cond", &result));
*cond = *result;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "body", &result));
*body = *result;
return absl::OkStatus();
}
}
return errors::NotFound("No While node found in graph");
}
TEST(FunctionalizeControlFlow, OneLoopVar) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32);
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto enter =
ops::internal::Enter(scope.WithOpName("while/Enter"), source, "aloop");
auto enter2 =
ops::internal::Enter(scope.WithOpName("while/Enter2"), source, "aloop");
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(merge.output),
10);
auto less = ops::Less(scope.WithOpName("while/Less"), merge.output, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_ =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto exit = ops::internal::Exit(scope.WithOpName("while/Exit"),
switch_.output_false);
auto identity =
ops::Identity(scope.WithOpName("while/Identity"), switch_.output_true);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity, one);
auto next_iteration =
ops::NextIteration(scope.WithOpName("while/NextIteration"), add);
auto sink = ops::Identity(scope.WithOpName("sink"), exit);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration.node(), 0, merge.output.node(), 1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
for (Node* n : graph.nodes()) {
if (n->name() == "while/Enter") {
graph.AddControlEdge(n, graph.sink_node());
}
}
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
TF_ASSERT_OK(
FunctionalizeControlFlowForGraphDef(&optimized_graph_def, &library));
TF_ASSERT_OK(FunctionalizeControlFlow(&graph, &library));
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
for (const GraphDef& graph_def : {optimized_graph_def, converted_graph_def}) {
NameAttrList cond_fn, body_fn;
TF_EXPECT_OK(FindWhileCondAndBody(graph_def, &cond_fn, &body_fn));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto while_op =
ops::While(scope.WithOpName("while/LoopCond"),
std::initializer_list<Input>{source}, cond_fn, body_fn);
auto sink = ops::Identity(scope.WithOpName("sink"), while_op[0]);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(arg), 10);
auto less = ops::Less(scope.WithOpName("while/Less"), arg, ten);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), less, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(cond_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_BOOL}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto identity = ops::Identity(scope.WithOpName("while/Identity"), arg);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity, one);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), add, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(body_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
}
}
FunctionDef GetNoinlineFunctionDef() {
FunctionDef fdef = FunctionDefHelper::Create(
"increment_fn", {"x:int32"}, {"add:int32"}, {},
{
{{"add/y"}, "Const", {}, {{"dtype", DT_INT32}}},
{{"add_0"}, "Add", {"x", "add/y:output:0"}, {{"T", DT_INT32}}},
},
{{"add", "add_0:z:0"}});
(*fdef.mutable_attr())["_noinline"].set_b(true);
return fdef;
}
Status AddNoinlineFunctionToGraph(const string& node_name, Graph* graph) {
FunctionDefLibrary fdef_lib;
*(fdef_lib.add_function()) = GetNoinlineFunctionDef();
TF_RETURN_IF_ERROR(graph->AddFunctionLibrary(fdef_lib));
NodeDef increment_fn;
increment_fn.set_name(node_name);
increment_fn.set_op("increment_fn");
*increment_fn.add_input() = "while/Identity";
*increment_fn.add_input() = "^while/Identity";
Status status;
graph->AddNode(increment_fn, &status);
return status;
}
TEST(FunctionalizeControlFlow, NoinlineLoopBody) {
const string& noinline_node_name = "while/increment_fn";
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32);
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto enter = ops::internal::Enter(scope.WithOpName("while/Enter"), source,
"while/while_context");
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(merge.output),
10);
auto less = ops::Less(scope.WithOpName("while/Less"), merge.output, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_ =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto exit = ops::internal::Exit(scope.WithOpName("while/Exit"),
switch_.output_false);
auto identity =
ops::Identity(scope.WithOpName("while/Identity"), switch_.output_true);
TF_ASSERT_OK(AddNoinlineFunctionToGraph(noinline_node_name, scope.graph()));
NodeDef next_iter;
next_iter.set_name("while/NextIteration");
next_iter.set_op("NextIteration");
*next_iter.add_input() = noinline_node_name;
(*next_iter.mutable_attr())["T"].set_type(DT_INT32);
Status status;
Node* n = scope.graph()->AddNode(next_iter, &status);
TF_ASSERT_OK(status);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(n, 0, merge.output.node(), 1);
TF_ASSERT_OK(scope.ToGraph(&graph));
}
FunctionLibraryDefinition library(graph.flib_def());
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
*(optimized_graph_def.mutable_library()->add_function()) =
GetNoinlineFunctionDef();
TF_ASSERT_OK(
FunctionalizeControlFlowForGraphDef(&optimized_graph_def, &library));
TF_ASSERT_OK(FunctionalizeControlFlow(&graph, &library));
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
for (const GraphDef& graph_def : {optimized_graph_def, converted_graph_def}) {
NameAttrList cond_fn, body_fn;
TF_ASSERT_OK(FindWhileCondAndBody(graph_def, &cond_fn, &body_fn));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto while_op =
ops::While(scope.WithOpName("while/LoopCond"),
std::initializer_list<Input>{source}, cond_fn, body_fn);
GraphDef expected;
TF_ASSERT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
TF_ASSERT_OK(
AddNoinlineFunctionToGraph(noinline_node_name, scope.graph()));
auto identity = ops::Identity(scope.WithOpName("while/Identity"), arg);
NodeDef retval;
retval.set_name("retval0_RetVal");
retval.set_op(FunctionLibraryDefinition::kRetOp);
*retval.add_input() = noinline_node_name;
(*retval.mutable_attr())["T"].set_type(DT_INT32);
(*retval.mutable_attr())["index"].set_i(0);
Status status;
scope.graph()->AddNode(retval, &status);
TF_ASSERT_OK(status);
GraphDef expected;
TF_ASSERT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(body_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
expected.clear_library();
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
}
}
TEST(FunctionalizeControlFlow, MissingFunctionDefInLibrary) {
const string& noinline_node_name = "while/increment_fn";
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto identity = ops::Identity(scope.WithOpName("while/Identity"), source);
TF_ASSERT_OK(AddNoinlineFunctionToGraph(noinline_node_name, scope.graph()));
TF_ASSERT_OK(scope.ToGraph(&graph));
}
FunctionLibraryDefinition library(graph.flib_def());
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
graph_def.clear_library();
Status status = FunctionalizeControlFlowForGraphDef(&graph_def, &library);
EXPECT_EQ(tensorflow::error::NOT_FOUND, status.code());
}
TEST(FunctionalizeControlFlow, OneLoopVarWithoutExit) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32);
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto enter =
ops::internal::Enter(scope.WithOpName("while/Enter"), source, "aloop");
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(merge.output),
10);
auto less = ops::Less(scope.WithOpName("while/Less"), merge.output, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_ =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto identity =
ops::Identity(scope.WithOpName("while/Identity"), switch_.output_true);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity, one);
auto next_iteration =
ops::NextIteration(scope.WithOpName("while/NextIteration"), add);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration.node(), 0, merge.output.node(), 1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
TF_ASSERT_OK(
FunctionalizeControlFlowForGraphDef(&optimized_graph_def, &library));
TF_ASSERT_OK(FunctionalizeControlFlow(&graph, &library));
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
for (const GraphDef& graph_def : {optimized_graph_def, converted_graph_def}) {
NameAttrList cond_fn, body_fn;
TF_EXPECT_OK(FindWhileCondAndBody(graph_def, &cond_fn, &body_fn));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto while_op =
ops::While(scope.WithOpName("while/LoopCond"),
std::initializer_list<Input>{source}, cond_fn, body_fn);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(arg), 10);
auto less = ops::Less(scope.WithOpName("while/Less"), arg, ten);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), less, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(cond_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_BOOL}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto identity = ops::Identity(scope.WithOpName("while/Identity"), arg);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity, one);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), add, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(body_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
}
}
TEST(FunctionalizeControlFlow, TwoLoopVars) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32);
auto x = ops::Placeholder(scope.WithOpName("Placeholder/x"), DT_INT32);
auto y = ops::Placeholder(scope.WithOpName("Placeholder/y"), DT_INT32);
auto enter_x =
ops::internal::Enter(scope.WithOpName("while/Enter/x"), x, "aloop");
auto enter_y =
ops::internal::Enter(scope.WithOpName("while/Enter/y"), y, "aloop");
auto merge_x = ops::Merge(scope.WithOpName("while/Merge/x"),
std::initializer_list<Input>{enter_x, dummy});
auto merge_y = ops::Merge(scope.WithOpName("while/Merge/y"),
std::initializer_list<Input>{enter_y, dummy});
auto three = ops::Const<int32>(scope.WithOpName("while/cond/three")
.WithControlDependencies(merge_x.output),
3);
auto cond_add =
ops::Add(scope.WithOpName("while/cond/Add"), merge_x.output, three);
auto ten = ops::Const<int32>(scope.WithOpName("while/cond/ten")
.WithControlDependencies(merge_x.output),
10);
auto less = ops::Less(scope.WithOpName("while/cond/Less"), cond_add, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_x = ops::Switch(scope.WithOpName("while/Switch/x"),
merge_x.output, loop_cond);
auto switch_y = ops::Switch(scope.WithOpName("while/Switch/y"),
merge_y.output, loop_cond);
auto exit_x = ops::internal::Exit(scope.WithOpName("while/Exit/x"),
switch_x.output_false);
auto exit_y = ops::internal::Exit(scope.WithOpName("while/Exit/y"),
switch_y.output_false);
auto identity_x = ops::Identity(scope.WithOpName("while/Identity/x"),
switch_x.output_true);
auto identity_y = ops::Identity(scope.WithOpName("while/Identity/y"),
switch_y.output_true);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/one").WithControlDependencies(identity_x),
1);
auto two = ops::Const<int32>(
scope.WithOpName("while/mul/two").WithControlDependencies(identity_x),
2);
auto add = ops::Add(scope.WithOpName("while/add"), identity_x, one);
auto mul = ops::Add(scope.WithOpName("while/mul"), identity_y, two);
auto next_iteration_x =
ops::NextIteration(scope.WithOpName("while/NextIteration/x"), add);
auto next_iteration_y =
ops::NextIteration(scope.WithOpName("while/NextIteration/y"), mul);
auto sink_x = ops::Identity(scope.WithOpName("sink_x"), exit_x);
auto sink_y = ops::Identity(scope.WithOpName("sink_y"), exit_y);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration_x.node(), 0, merge_x.output.node(),
1);
scope.graph()->AddEdge(next_iteration_y.node(), 0, merge_y.output.node(),
1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
TF_ASSERT_OK(
FunctionalizeControlFlowForGraphDef(&optimized_graph_def, &library));
TF_ASSERT_OK(FunctionalizeControlFlow(&graph, &library));
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
for (const GraphDef& graph_def : {optimized_graph_def, converted_graph_def}) {
NameAttrList cond_fn, body_fn;
TF_EXPECT_OK(FindWhileCondAndBody(graph_def, &cond_fn, &body_fn));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto x = ops::Placeholder(scope.WithOpName("Placeholder/x"), DT_INT32);
auto y = ops::Placeholder(scope.WithOpName("Placeholder/y"), DT_INT32); |
1,103 | cpp | tensorflow/tensorflow | tf2xla_opset | tensorflow/compiler/tf2xla/tf2xla_opset.cc | tensorflow/compiler/tf2xla/tf2xla_opset_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_TF2XLA_OPSET_H_
#define TENSORFLOW_COMPILER_TF2XLA_TF2XLA_OPSET_H_
#include <string>
#include <vector>
#include "absl/status/statusor.h"
namespace tensorflow {
absl::StatusOr<std::vector<std::string>> GetRegisteredXlaOpsForDevice(
absl::string_view device_name);
}
#endif
#include "tensorflow/compiler/tf2xla/tf2xla_opset.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
namespace tensorflow {
const int SUPPORTED_DEVICES_NUM = 2;
static const char* const SUPPORTED_DEVICES[SUPPORTED_DEVICES_NUM] = {
DEVICE_GPU_XLA_JIT, DEVICE_CPU_XLA_JIT};
bool IsSupportedBackend(absl::string_view device_name) {
for (int i = 0; i < SUPPORTED_DEVICES_NUM; i++) {
if (SUPPORTED_DEVICES[i] == device_name) return true;
}
return false;
}
absl::Status RegisterBackends(absl::string_view device_name) {
if (!IsSupportedBackend(device_name)) {
return absl::InvalidArgumentError(
absl::StrCat(device_name, " is not supported. Supported devices are ",
absl::StrJoin(SUPPORTED_DEVICES, ", ")));
}
auto op_filter = [](KernelDef* kdef) {
if (kdef->op() == "Const") {
AddDtypeToKernelDefConstraint("dtype", DT_STRING, kdef);
}
if (kdef->op() == "Assert") {
AddDtypeToKernelDefConstraint("T", DT_STRING, kdef);
}
return true;
};
if (!XlaOpRegistry::IsBackendRegistered(DEVICE_GPU_XLA_JIT)) {
static auto gpu_backend =
XlaBackendRegistrar(DEVICE_GPU_XLA_JIT, kGpuAllTypes, op_filter);
}
if (!XlaOpRegistry::IsBackendRegistered(DEVICE_CPU_XLA_JIT)) {
static auto cpu_backend =
XlaBackendRegistrar(DEVICE_CPU_XLA_JIT, kCpuAllTypes, op_filter);
}
if (!XlaOpRegistry::IsBackendRegistered(std::string(device_name))) {
return absl::InternalError(
absl::StrCat(device_name, " is not registered."));
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::string>> GetRegisteredXlaOpsForDevice(
absl::string_view device_name) {
auto status = RegisterBackends(device_name);
if (!status.ok()) return status;
std::vector<const KernelDef*> kernel_defs =
XlaOpRegistry::DeviceKernels(std::string(device_name), true);
std::vector<std::string> op_names;
op_names.reserve(kernel_defs.size());
for (const auto& kernel_def : kernel_defs) {
op_names.push_back(kernel_def->op());
}
std::sort(op_names.begin(), op_names.end());
return op_names;
}
} | #include "tensorflow/compiler/tf2xla/tf2xla_opset.h"
#include <algorithm>
#include <string>
#include <vector>
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(GeXlaOpsForDeviceTest, InvalidDeviceToRegister) {
absl::StatusOr<std::vector<std::string>> result =
GetRegisteredXlaOpsForDevice("Invalid_Device");
EXPECT_FALSE(result.ok());
}
TEST(GeXlaOpsForDeviceTest, GetGpuNames) {
absl::StatusOr<std::vector<std::string>> result =
GetRegisteredXlaOpsForDevice("XLA_GPU_JIT");
EXPECT_GT(result.value().size(), 0);
auto matmul =
std::find(result.value().begin(), result.value().end(), "MatMul");
auto max = std::find(result.value().begin(), result.value().end(), "Max");
auto min = std::find(result.value().begin(), result.value().end(), "Min");
EXPECT_TRUE((matmul != result.value().end()));
EXPECT_TRUE((max != result.value().end()));
EXPECT_TRUE((min != result.value().end()));
EXPECT_LT(matmul, max);
EXPECT_LT(max, min);
}
TEST(GeXlaOpsForDeviceTest, GetCpuNames) {
absl::StatusOr<std::vector<std::string>> result =
GetRegisteredXlaOpsForDevice("XLA_CPU_JIT");
EXPECT_GT(result.value().size(), 0);
auto matmul =
std::find(result.value().begin(), result.value().end(), "MatMul");
auto max = std::find(result.value().begin(), result.value().end(), "Max");
auto min = std::find(result.value().begin(), result.value().end(), "Min");
EXPECT_TRUE((matmul != result.value().end()));
EXPECT_TRUE((max != result.value().end()));
EXPECT_TRUE((min != result.value().end()));
EXPECT_LT(matmul, max);
EXPECT_LT(max, min);
}
}
} |
1,104 | cpp | tensorflow/tensorflow | const_analysis | tensorflow/compiler/tf2xla/const_analysis.cc | tensorflow/compiler/tf2xla/const_analysis_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_CONST_ANALYSIS_H_
#define TENSORFLOW_COMPILER_TF2XLA_CONST_ANALYSIS_H_
#include <vector>
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
Status BackwardsConstAnalysis(
const Graph& g, std::vector<bool>* compile_time_const_arg_indices,
std::vector<bool>* compile_time_const_nodes,
FunctionLibraryRuntime* flib_runtime,
std::function<bool(const Edge&)> edge_filter_input = nullptr);
Status GetCompileTimeConstInputs(const OpKernel* op_kernel,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime);
}
#endif
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include <unordered_map>
#include <unordered_set>
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
Status GetFunctionBody(FunctionLibraryRuntime* flib_runtime,
const NodeDef& node, StringPiece func_attr_name,
const FunctionBody** fbody) {
NameAttrList name_attr_list;
TF_RETURN_IF_ERROR(GetNodeAttr(node, func_attr_name, &name_attr_list));
FunctionLibraryRuntime::Handle func_handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
name_attr_list.name(), AttrSlice(&name_attr_list.attr()), &func_handle));
*fbody = flib_runtime->GetFunctionBody(func_handle);
return absl::OkStatus();
}
Status GetFunctionBodies(FunctionLibraryRuntime* flib_runtime,
const NodeDef& node, StringPiece func_list_attr_name,
std::vector<const FunctionBody*>* fbodies) {
std::vector<NameAttrList> name_attr_lists;
TF_RETURN_IF_ERROR(GetNodeAttr(node, func_list_attr_name, &name_attr_lists));
for (const NameAttrList& name_attr_list : name_attr_lists) {
FunctionLibraryRuntime::Handle func_handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
name_attr_list.name(), AttrSlice(&name_attr_list.attr()),
&func_handle));
fbodies->push_back(flib_runtime->GetFunctionBody(func_handle));
}
return absl::OkStatus();
}
Status CondConstInputIndices(
absl::Span<const FunctionBody* const> branch_bodies,
std::vector<int>* const_input_idxs, FunctionLibraryRuntime* flib_runtime) {
TF_RET_CHECK(!branch_bodies.empty());
TF_RET_CHECK(branch_bodies[0] != nullptr);
int num_inputs =
branch_bodies[0]->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
for (auto fbody : branch_bodies) {
TF_RET_CHECK(fbody != nullptr);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
}
for (int i = 0, end = compile_time_const_arg_indices.size(); i < end; i++) {
if (compile_time_const_arg_indices[i]) {
const_input_idxs->push_back(i + 1);
}
}
return absl::OkStatus();
}
Status GetCompileTimeConstInputs(const NodeDef& node, const OpKernel* op_kernel,
const OpDef* op_def,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
DCHECK(op_def != nullptr || op_kernel != nullptr);
if (node.op() == "While" || node.op() == "StatelessWhile") {
const FunctionBody* fcond = nullptr;
const FunctionBody* fbody = nullptr;
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "cond", &fcond));
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "body", &fbody));
TF_RET_CHECK(fcond);
TF_RET_CHECK(fbody);
int num_inputs = fbody->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fcond->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
for (int i = 0; i < num_inputs; i++) {
if (compile_time_const_arg_indices[i]) {
TF_ASSIGN_OR_RETURN(
bool is_loop_invariant,
IsLoopInvariant(fbody, i,
flib_runtime->GetFunctionLibraryDefinition()));
if (is_loop_invariant) {
const_input_idxs->push_back(i);
} else {
Node* arg_i = fbody->arg_nodes[i];
Node* ret_i = fbody->ret_nodes[i];
VLOG(1) << "Argument " << i << " to while-loop " << node.name()
<< " has to be constant, but it's not a loop invariant, "
"cluster compilation likely to fail at compile time: "
<< arg_i->DebugString() << " vs. " << ret_i->DebugString();
VLOG(1) << node.ShortDebugString();
}
}
}
return absl::OkStatus();
} else if (node.op() == "If" || node.op() == "StatelessIf") {
const FunctionBody* fthen = nullptr;
const FunctionBody* felse = nullptr;
TF_RETURN_IF_ERROR(
GetFunctionBody(flib_runtime, node, "then_branch", &fthen));
TF_RETURN_IF_ERROR(
GetFunctionBody(flib_runtime, node, "else_branch", &felse));
return CondConstInputIndices({fthen, felse}, const_input_idxs,
flib_runtime);
} else if (node.op() == "Case" || node.op() == "StatelessCase") {
std::vector<const FunctionBody*> branch_bodies;
TF_RETURN_IF_ERROR(
GetFunctionBodies(flib_runtime, node, "branches", &branch_bodies));
return CondConstInputIndices(branch_bodies, const_input_idxs, flib_runtime);
} else if (node.op() == "PartitionedCall" ||
node.op() == "StatefulPartitionedCall") {
const FunctionBody* fbody;
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "f", &fbody));
int num_inputs = fbody->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
for (int i = 0; i < num_inputs; i++) {
if (compile_time_const_arg_indices[i]) {
const_input_idxs->push_back(i);
}
}
return absl::OkStatus();
} else if (op_def != nullptr) {
return XlaOpRegistry::CompileTimeConstantInputs(node, *op_def,
const_input_idxs);
} else {
return XlaOpRegistry::CompileTimeConstantInputs(*op_kernel,
const_input_idxs);
}
}
Status GetCompileTimeConstInputs(const Node* node,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
return GetCompileTimeConstInputs(node->def(), nullptr,
&node->op_def(), const_input_idxs,
flib_runtime);
}
}
Status BackwardsConstAnalysis(
const Graph& g, std::vector<bool>* compile_time_const_arg_indices,
std::vector<bool>* compile_time_const_nodes,
FunctionLibraryRuntime* flib_runtime,
std::function<bool(const Edge&)> edge_filter_input) {
if (!compile_time_const_nodes && g.GetConstArgIndicesCache().has_value() &&
!edge_filter_input) {
VLOG(5) << "Using cached argument indices on graph " << &g;
*compile_time_const_arg_indices = g.GetConstArgIndicesCache().value();
return absl::OkStatus();
}
auto edge_filter = [&](const Edge& e) {
return edge_filter_input ? edge_filter_input(e) : true;
};
std::vector<bool> compile_time_const_nodes_impl;
if (compile_time_const_nodes) {
CHECK_EQ(compile_time_const_nodes->size(), g.num_node_ids());
} else {
compile_time_const_nodes_impl.resize(g.num_node_ids());
compile_time_const_nodes = &compile_time_const_nodes_impl;
}
Status status;
auto visit = [&](Node* node) {
if (!status.ok()) return;
if (XlaOpRegistry::IsMetadataOp(node->type_string())) {
VLOG(3) << "must-be-const node is metadata op: " << node->name();
return;
}
if ((*compile_time_const_nodes)[node->id()]) {
VLOG(3) << "marking consts for must-be-const node " << node->name();
if (node->type_string() == "_Arg") {
int index;
status = GetNodeAttr(node->attrs(), "index", &index);
if (!status.ok()) return;
if (compile_time_const_arg_indices) {
(*compile_time_const_arg_indices)[index] = true;
}
VLOG(3) << " const _Arg " << index << ": " << node->name();
return;
}
for (const Edge* pred : node->in_edges()) {
if (!pred->IsControlEdge() && edge_filter(*pred)) {
while (edge_filter(*pred) && IsConstTraversableOpType(pred->src())) {
status = pred->src()->input_edge(pred->src_output(), &pred);
if (!status.ok()) return;
}
if (edge_filter(*pred)) {
VLOG(4) << " " << pred->src()->name() << " must be const (is "
<< pred->src()->type_string() << ")";
(*compile_time_const_nodes)[pred->src()->id()] = true;
}
}
}
return;
}
std::vector<int> const_input_idxs;
status = GetCompileTimeConstInputs(node, &const_input_idxs, flib_runtime);
if (!status.ok() || const_input_idxs.empty()) {
return;
}
VLOG(3) << "marking consts for must-be-const inputs of " << node->name();
for (Edge const* edge : node->in_edges()) {
if (!edge->IsControlEdge() &&
absl::c_binary_search(const_input_idxs, edge->dst_input()) &&
edge_filter(*edge)) {
while (edge_filter(*edge) && IsConstTraversableOpType(edge->src())) {
status = edge->src()->input_edge(edge->src_output(), &edge);
if (!status.ok()) return;
}
if (edge_filter(*edge)) {
VLOG(4) << " input " << edge->dst_input() << ": "
<< edge->src()->name() << " must be const (is "
<< edge->src()->type_string() << ")";
(*compile_time_const_nodes)[edge->src()->id()] = true;
}
}
}
};
DFS(g, {}, visit, NodeComparatorName{},
[](const Edge& edge) { return !edge.src()->IsNextIteration(); });
if (compile_time_const_arg_indices && !edge_filter_input) {
VLOG(5) << "Setting the cache on the graph: " << &g;
g.GetConstArgIndicesCache() = *compile_time_const_arg_indices;
}
return status;
}
Status GetCompileTimeConstInputs(const OpKernel* op_kernel,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
return GetCompileTimeConstInputs(op_kernel->def(), op_kernel,
nullptr, const_input_idxs,
flib_runtime);
}
} | #include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(ConstAnalysisTest, Basics) {
Scope root = Scope::NewRootScope();
auto arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(root.WithOpName("Arg2"), DT_INT32, 2);
auto arg3 = ops::_Arg(root.WithOpName("Arg3"), DT_INT32, 3);
auto a = ops::Shape(root, arg0);
auto b = ops::Add(root, a, arg1);
auto c = ops::Reshape(root, arg2, b);
auto d = ops::Mul(root, c, ops::Sum(root, arg3, arg3));
FixupSourceAndSinkEdges(root.graph());
std::vector<bool> const_args(4, false);
std::vector<bool> const_nodes(root.graph()->num_node_ids(), false);
TF_ASSERT_OK(BackwardsConstAnalysis(*root.graph(), &const_args, &const_nodes,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, true, false, true}));
EXPECT_FALSE(const_nodes[arg0.node()->id()]);
EXPECT_TRUE(const_nodes[arg1.node()->id()]);
EXPECT_FALSE(const_nodes[arg2.node()->id()]);
EXPECT_TRUE(const_nodes[arg3.node()->id()]);
}
TEST(ConstAnalysisTest, TopologicalOrder) {
for (bool order : {false, true}) {
Scope root = Scope::NewRootScope();
auto arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(root.WithOpName("Arg2"), DT_INT32, 2);
auto a = ops::Reshape(root, arg0, arg1);
auto b = ops::Reshape(root, arg2, a);
if (order) {
std::swap(a, b);
}
auto c = ops::Add(root, a, b);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(3, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({true, true, false}));
}
}
void TestFunctionCall(bool is_stateful_partitioned_call) {
FunctionDef callee = FunctionDefHelper::Define(
"Callee", {"t:float", "shape:int32"}, {"result:float"}, {},
{{{"result"}, "Reshape", {"t", "shape"}, {{"T", DT_FLOAT}}}});
FunctionDefLibrary flib;
*flib.add_function() = callee;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Scope root = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(root.WithOpName("tensor"), DT_FLOAT, 0);
auto arg1 = ops::_Arg(root.WithOpName("shape"), DT_INT32, 1);
NameAttrList call_attrs;
call_attrs.set_name("Callee");
if (is_stateful_partitioned_call) {
ops::StatefulPartitionedCall b(root.WithOpName("Call"),
{Output(arg0), Output(arg1)}, {DT_FLOAT},
call_attrs);
} else {
ops::PartitionedCall b(root.WithOpName("Call"),
{Output(arg0), Output(arg1)}, {DT_FLOAT},
call_attrs);
}
Graph graph(&flib_def);
TF_ASSERT_OK(root.ToGraph(&graph));
OptimizerOptions opts;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(nullptr, Env::Default(),
nullptr,
TF_GRAPH_DEF_VERSION, &flib_def, opts));
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
lib_runtime));
EXPECT_EQ(const_args, std::vector<bool>({false, true}));
}
TEST(ConstAnalysisTest, PartitionedCall) {
TestFunctionCall(false);
}
TEST(ConstAnalysisTest, StatefulPartitionedCall) {
TestFunctionCall(true);
}
TEST(ConstAnalysisTest, DontFollowControlDependencies) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg1, c1);
Output reshape = ops::Reshape(root, arg1, add);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, true}));
}
TEST(ConstAnalysisTest, RespectExplicitAttr_0) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg1, c1);
Output reshape = ops::Reshape(root, arg1, add);
reshape.node()->AddAttr(kXlaCompileTimeConstantInputsAttr,
std::vector<string>());
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, false}));
}
TEST(ConstAnalysisTest, RespectExplicitAttr_1) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg0, c1);
std::vector<string> add_constant_inputs;
add_constant_inputs.push_back("x");
add.node()->AddAttr(kXlaCompileTimeConstantInputsAttr, add_constant_inputs);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(1, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({true}));
}
static bool Initialized = [] {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
return true;
}();
}
} |
1,105 | cpp | tensorflow/tensorflow | shape_util | third_party/xla/xla/shape_util.cc | third_party/xla/xla/shape_util_test.cc | #ifndef XLA_SHAPE_UTIL_H_
#define XLA_SHAPE_UTIL_H_
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <numeric>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/overflow_util.h"
#include "xla/primitive_util.h"
#include "xla/printer.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
namespace xla {
using ShapeIndexView = absl::Span<const int64_t>;
struct ShapeIndex : public absl::InlinedVector<int64_t, 2> {
using InlinedVector::InlinedVector;
TF_ATTRIBUTE_NOINLINE ShapeIndex() = default;
explicit ShapeIndex(ShapeIndexView view)
: ShapeIndex(view.begin(), view.end()) {}
void push_front(int64_t value) { insert(begin(), value); }
void pop_front() { erase(begin()); }
std::string ToString() const;
};
std::ostream& operator<<(std::ostream& out, const ShapeIndex& shape_index);
class ShapeUtil {
public:
using DynamicSizeType = int32_t;
struct IndexedShape {
IndexedShape() = default;
IndexedShape(ShapeIndex index, Shape shape)
: index(std::move(index)), shape(std::move(shape)) {}
ShapeIndex index;
Shape shape;
};
template <bool kBoundedDynamicOk>
static inline std::pair<int64_t, bool> ExtentProduct(const Shape& shape) {
DCHECK(shape.IsArray()) << ShapeUtil::HumanString(shape);
DCHECK_EQ(shape.dimensions_size(), shape.rank());
int64_t product = 1;
bool any_overflows = false;
for (int dim = 0; dim < shape.dimensions_size(); ++dim) {
if constexpr (kBoundedDynamicOk) {
if (shape.is_unbounded_dynamic_dimension(dim)) {
continue;
}
} else {
DCHECK(!shape.is_unbounded_dynamic_dimension(dim));
}
bool overflow;
std::tie(product, overflow) =
OverflowSafeMultiply(product, shape.dimensions(dim));
any_overflows |= overflow;
}
return {product, any_overflows};
}
static inline int64_t StaticExtentProduct(const Shape& shape) {
auto [product, overflow] = ExtentProduct<true>(shape);
DCHECK(!overflow);
return product;
}
static inline int64_t ElementsIn(const Shape& shape) {
auto [product, overflow] =
ExtentProduct<false>(shape);
DCHECK(!overflow);
return product;
}
static int64_t ElementsInRecursive(const Shape& shape);
static bool HasPrimitiveType(const Shape& shape,
PrimitiveType primitive_type);
static bool IsZeroElementArray(const Shape& shape);
static int64_t ByteSizeOf(const Shape& shape, int64_t pointer_size = -1);
static int64_t ByteSizeOfPrimitiveType(PrimitiveType primitive_type);
static int64_t ByteSizeOfTupleIndexTable(const Shape& shape,
int64_t pointer_size);
static int64_t ByteSizeOfElements(const Shape& shape);
static absl::StatusOr<int64_t> SerializedSize(const Shape& shape);
static absl::StatusOr<int64_t> SerializedSizeWithProto(
const Shape& shape, const ShapeProto& proto);
static void PrintHumanString(xla::Printer* printer, const Shape& shape);
static void PrintHumanStringWithLayout(xla::Printer* printer,
const Shape& shape);
static void PrintHumanString(xla::Printer* printer,
const ProgramShape& program_shape);
static std::string HumanString(const Shape& shape);
static std::string HumanStringWithLayout(const Shape& shape);
static std::string HumanString(const ProgramShape& program_shape);
static bool SameDimensions(const Shape& lhs, const Shape& rhs);
static bool SameRank(const Shape& lhs, const Shape& rhs);
static bool SameElementType(const Shape& lhs, const Shape& rhs) {
return lhs.element_type() == rhs.element_type();
}
static bool SameElementTypeIgnoringFpPrecision(const Shape& a,
const Shape& b) {
if (ElementIsFloating(a) && ElementIsFloating(b)) {
return true;
}
return ShapeUtil::SameElementType(a, b);
}
static PrimitiveType HigherPrecisionElementType(const Shape& a,
const Shape& b) {
return primitive_util::HigherPrecisionType(a.element_type(),
b.element_type());
}
static bool Compatible(const Shape& lhs, const Shape& rhs);
static bool CompatibleIgnoringElementType(const Shape& lhs, const Shape& rhs);
static bool CompatibleKind(const Shape& lhs, const Shape& rhs);
static bool CompatibleIgnoringFpPrecision(const Shape& lhs, const Shape& rhs);
static bool Equal(const Shape& lhs, const Shape& rhs);
static bool EqualIgnoringElementType(const Shape& lhs, const Shape& rhs);
static bool EqualIgnoringFpPrecision(const Shape& lhs, const Shape& rhs);
static bool EqualStructure(const Shape& lhs, const Shape& rhs);
static int64_t TrueRank(const Shape& shape);
static ProgramShape MakeProgramShape(std::initializer_list<Shape> parameters,
Shape result);
static bool IsScalar(const Shape& shape) {
return shape.IsArray() && shape.rank() == 0;
}
static bool IsEffectiveScalar(const Shape& shape) {
return shape.IsArray() && TrueRank(shape) == 0;
}
static bool IsScalarWithElementType(const Shape& shape,
PrimitiveType element_type);
static DimensionVector CreateDimensionVectorFromShape(const Shape& shape);
static int64_t GetDimension(const Shape& shape, int64_t dimension_number);
static int64_t GetDimensionNumber(const Shape& shape,
int64_t dimension_number);
static Shape ChangeElementType(const Shape& original, PrimitiveType type);
static Shape MakeStaticShape(const Shape& original);
static Shape MakeTupleShape(absl::Span<const Shape> shapes);
static Shape MakeTupleShapeWithPtrs(absl::Span<const Shape* const> shapes);
static Shape MakeMaybeTupleShape(absl::Span<const Shape> shapes);
static Shape MakeOpaqueShape();
static Shape MakeTokenShape();
static void AppendShapeToTuple(const Shape& shape, Shape* tuple_shape);
static void UpdateTupleShape(const Shape& shape, int64_t index,
Shape* tuple_shape);
static void UpdateDynamicDimension(Shape* shape, ShapeIndexView index,
int64_t dim, bool is_dynamic);
static void AppendMajorDimension(int bound, Shape* shape);
static Shape PrependMajorDimension(int64_t bound, Shape shape);
static void AppendMinorDimension(int bound, Shape* shape);
static void CopyDynamicDimensions(Shape* to, const Shape& from);
static bool IsEffectivelyMostMajorDimension(const Shape& shape,
int64_t dimension);
static Shape MakeNil() { return MakeTupleShape({}); }
static bool IsInitialized(const Shape& shape) {
return shape.element_type() != PRIMITIVE_TYPE_INVALID;
}
static Shape MakeShape(PrimitiveType element_type,
absl::Span<const int64_t> dimensions);
static Shape MakeScalarShape(PrimitiveType element_type);
static Shape MakeShape(PrimitiveType element_type,
absl::Span<const int64_t> dimensions,
const std::vector<bool>& dynamic_dimensions);
static absl::StatusOr<Shape> MakeValidatedShape(
PrimitiveType element_type, absl::Span<const int64_t> dimensions);
static absl::StatusOr<Shape> MakeValidatedShape(
PrimitiveType element_type, absl::Span<const int64_t> dimensions,
const std::vector<bool>& dynamic_dimensions);
template <typename T>
static Shape MakeShapeWithType(absl::Span<const int64_t> dimensions) {
return ShapeUtil::MakeShape(primitive_util::NativeToPrimitiveType<T>(),
dimensions);
}
static Shape MakeShapeWithDenseLayout(
PrimitiveType element_type, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> minor_to_major,
absl::Span<const Tile> tiles = {},
int64_t tail_padding_alignment_in_elements = 1,
int64_t element_size_in_bits = 0, int64_t memory_space = 0,
absl::Span<const SplitConfig> split_configs = {});
static Shape MakeShapeWithSparseLayout(
PrimitiveType element_type, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> minor_to_major,
absl::Span<const DimLevelType> dim_level_types,
absl::Span<const bool> dim_unique = {},
absl::Span<const bool> dim_ordered = {},
PrimitiveType index_primitive_type = PRIMITIVE_TYPE_INVALID,
PrimitiveType pointer_primitive_type = PRIMITIVE_TYPE_INVALID,
int64_t tail_padding_alignment_in_elements = 1,
int64_t element_size_in_bits = 0, int64_t memory_space = 0,
std::optional<Shape> physical_shape = std::nullopt);
static Shape MoveDimToMajor(const Shape& shape, int64_t dim);
static Shape MakeShapeWithStaticDimensions(const Shape& shape);
static Shape MakeShapeWithDescendingLayout(
PrimitiveType element_type, absl::Span<const int64_t> dimensions);
static Shape MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
const Shape& shape);
static absl::Status PopulateShape(PrimitiveType element_type,
absl::Span<const int64_t> dimensions,
Shape* shape);
static absl::Status ValidateShape(const Shape& shape);
static absl::Status ValidateShapeWithOptionalLayout(const Shape& shape);
static bool ElementIsIntegral(const Shape& shape);
static bool ElementIsFloating(const Shape& shape);
static bool ElementIsComplex(const Shape& shape);
static bool ElementHasBitWidth(const Shape& shape, int bits);
static bool ElementIsIntegralWithBits(const Shape& shape, int bits);
static bool ElementIsSigned(const Shape& shape);
static bool IsArrayPrimitiveType(PrimitiveType primitive_type);
static bool IsNestedTuple(const Shape& shape);
static bool IsEmptyTuple(const Shape& shape);
static int64_t TupleElementCount(const Shape& shape);
static const Shape& GetTupleElementShape(const Shape& shape, int64_t index);
static int64_t SubshapeCount(const Shape& shape);
static Shape SliceTuple(const Shape& tuple, int64_t start, int64_t limit);
static Shape ComplexComponentShape(const Shape& complex_shape);
static bool IndexIsValid(const Shape& shape, ShapeIndexView index);
static const Shape& GetSubshape(const Shape& shape, ShapeIndexView index);
static const Shape& GetSubshapeOneIndex(const Shape& shape, int64_t index);
static absl::StatusOr<const Shape*> TryGetSubshape(const Shape& shape,
ShapeIndexView index);
static Shape* GetMutableSubshape(Shape* shape, ShapeIndexView index);
static bool IsLeafIndex(const Shape& shape, const ShapeIndex& index);
static int64_t GetLeafCount(const Shape& shape);
static int64_t GetLeafCountTuple(const Shape& shape);
static std::vector<IndexedShape> GetLeafShapes(const Shape& shape);
template <typename Fn>
static void ForEachSubshape(const Shape& shape, Fn&& fn) {
ForEachSubshapeWithStatus(shape, [&](const Shape& subshape,
const ShapeIndex& index) {
fn(subshape, index);
return absl::OkStatus();
}).IgnoreError();
}
template <typename Fn>
static void ForEachMutableSubshape(Shape* shape, Fn&& fn) {
ForEachMutableSubshapeWithStatus(shape, [&](Shape* subshape,
const ShapeIndex& index) {
fn(subshape, index);
return absl::OkStatus();
}).IgnoreError();
}
template <typename Fn>
static absl::Status ForEachLeafShapeWithStatus(const Shape& shape, Fn&& fn) {
return ForEachSubshapeWithStatus(
shape, [&](const Shape& subshape, const ShapeIndex& index) {
if (IsLeafIndex(shape, index)) {
TF_RETURN_IF_ERROR(fn(subshape, index));
}
return absl::OkStatus();
});
}
template <typename Fn>
static absl::Status ForEachMutableLeafShapeWithStatus(Shape* shape, Fn&& fn) {
return ForEachMutableSubshapeWithStatus(
shape, [&](Shape* subshape, const ShapeIndex& index) {
if (IsLeafIndex(*shape, index)) {
TF_RETURN_IF_ERROR(fn(subshape, index));
}
return absl::OkStatus();
});
}
template <typename Fn>
static void ForEachLeafShape(const Shape& shape, Fn&& fn) {
ForEachLeafShapeWithStatus(shape, [&](const Shape& subshape,
const ShapeIndex& index) {
fn(subshape, index);
return absl::OkStatus();
}).IgnoreError();
}
template <typename Fn>
static void ForEachMutableLeafShape(const Shape& shape, Fn&& fn) {
ForEachMutableLeafShapeWithStatus(shape, [&](Shape* subshape,
const ShapeIndex& index) {
fn(subshape, index);
return absl::OkStatus();
}).IgnoreError();
}
template <typename Fn>
static absl::Status ForEachSubshapeWithStatus(const Shape& shape, Fn&& fn) {
return ForEachMutableSubshapeWithStatus(
const_cast<Shape*>(&shape),
[&](Shape* subshape, const ShapeIndex& index) -> absl::Status {
return fn(*const_cast<const Shape*>(subshape), index);
});
}
template <typename Fn>
static absl::Status ForEachMutableSubshapeWithStatus(Shape* shape, Fn&& fn) {
ShapeIndex index;
return ForEachMutableSubshapeWithStatusHelper(shape, fn, &index);
}
template <typename Fn>
static void ForEachSubshapePostOrder(const Shape& shape, Fn&& fn) {
ForEachSubshapePostOrderWithStatus(shape, [&](const Shape& subshape,
const ShapeIndex& index) {
fn(subshape, index);
return absl::OkStatus();
}).IgnoreError();
}
template <typename Fn>
static void ForEachMutableSubshapePostOrder(Shape* shape, Fn&& fn) {
ForEachMutableSubshapePostOrderWithStatus(
shape,
[&](Shape* subshape, const ShapeIndex& index) {
fn(subshape, index);
return absl::OkStatus();
})
.IgnoreError();
}
template <typename Fn>
static absl::Status ForEachSubshapePostOrderWithStatus(const Shape& shape,
Fn&& fn) {
return ForEachMutableSubshapePostOrderWithStatus(
const_cast<Shape*>(&shape),
[&](Shape* subshape, const ShapeIndex& index) -> absl::Status {
return fn(*const_cast<const Shape*>(subshape), index);
});
}
template <typename Fn>
static absl::Status ForEachMutableSubshapePostOrderWithStatus(Shape* shape,
Fn&& fn) {
ShapeIndex index;
return ForEachMutableSubshapePostOrderWithStatusHelper(shape, fn, &index);
}
static bool HasDegenerateDimensions(const Shape& shape);
static Shape DropDegenerateDimensions(const Shape& shape);
static Shape PermuteDimensions(absl::Span<const int64_t> permutation,
const Shape& shape);
struct ShapeEqualityDescriptor {
std::vector<int64_t> deleted_dimensions;
std::vector<int64_t> inserted_dimensions;
}; | #include "xla/shape_util.h"
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/test.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
TEST(ShapeUtilTest, GetDimensionHelperCanNegativeIndex) {
Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
EXPECT_EQ(3, ShapeUtil::GetDimension(matrix, -1));
EXPECT_EQ(2, ShapeUtil::GetDimension(matrix, -2));
}
TEST(ShapeUtilTest, GetDimensionHelperExampleInDocumentationTest) {
auto shape = ShapeUtil::MakeShape(F32, {1, 2, 3, 4});
ASSERT_EQ(4, ShapeUtil::GetDimension(shape, -1));
}
TEST(ShapeUtilTest, NegativeIndexOobFails) {
Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
ASSERT_DEATH(ShapeUtil::GetDimension(matrix, -3), "dimension_number >= 0");
}
TEST(ShapeUtilTest, CreateRank3DimensionVectorFromShape) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2, 7});
DimensionVector dimensions = ShapeUtil::CreateDimensionVectorFromShape(shape);
EXPECT_THAT(dimensions, ElementsAre(3, 2, 7));
}
TEST(ShapeUtilTest, Rank1DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3});
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, Rank2DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2});
ASSERT_EQ(2, shape.dimensions(1));
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, Rank3DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2, 7});
ASSERT_EQ(7, shape.dimensions(2));
ASSERT_EQ(2, shape.dimensions(1));
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, Rank4DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2, 7, 8});
ASSERT_EQ(8, shape.dimensions(3));
ASSERT_EQ(7, shape.dimensions(2));
ASSERT_EQ(2, shape.dimensions(1));
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, CompatibleIdenticalShapes) {
Shape shape1 = ShapeUtil::MakeShape(F32, {3, 2});
Shape shape2 = ShapeUtil::MakeShape(F32, {3, 2});
ASSERT_TRUE(ShapeUtil::Compatible(shape1, shape2));
}
TEST(ShapeUtilTest, TokenCompatibility) {
EXPECT_TRUE(ShapeUtil::Compatible(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeTokenShape()));
EXPECT_FALSE(ShapeUtil::Compatible(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShape(F32, {})));
EXPECT_FALSE(ShapeUtil::Compatible(ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeTokenShape()));
EXPECT_TRUE(ShapeUtil::Compatible(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTokenShape()}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTokenShape()})));
}
TEST(ShapeUtilTest, TokensEqualShapes) {
EXPECT_TRUE(ShapeUtil::Equal(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeTokenShape()));
EXPECT_FALSE(ShapeUtil::Equal(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShape(F32, {})));
EXPECT_FALSE(ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeTokenShape()));
EXPECT_TRUE(ShapeUtil::Equal(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {0, 1})}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {0, 1})})));
EXPECT_FALSE(ShapeUtil::Equal(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {0, 1})}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {1, 0})})));
}
TEST(ShapeUtilTest, CompatibleNotIdenticalShapes) {
Shape shape_1 = ShapeUtil::MakeShape(F32, {3, 2});
auto layout_1 = shape_1.mutable_layout();
layout_1->clear_minor_to_major();
layout_1->add_minor_to_major(0);
layout_1->add_minor_to_major(1);
Shape shape_2 = ShapeUtil::MakeShape(F32, {3, 2});
auto layout_2 = shape_2.mutable_layout();
layout_2->clear_minor_to_major();
layout_2->add_minor_to_major(1);
layout_2->add_minor_to_major(0);
EXPECT_FALSE(ShapeUtil::Equal(shape_1, shape_2));
EXPECT_TRUE(ShapeUtil::Compatible(shape_1, shape_2));
}
TEST(ShapeUtilTest, CompatibleIgnoringFpPrecision) {
Shape shape1 = ShapeUtil::MakeShape(BF16, {3, 2});
Shape shape2 = ShapeUtil::MakeShape(F32, {3, 2});
ASSERT_TRUE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
}
TEST(ShapeUtilTest, IncompatibleIgnoringFpPrecision) {
Shape shape1 = ShapeUtil::MakeShape(BF16, {3, 2});
Shape shape2 = ShapeUtil::MakeShape(F32, {2, 2});
ASSERT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
}
TEST(ShapeUtilTest, IncompatibleDifferentElementShapes) {
Shape shape_1 = ShapeUtil::MakeShape(F32, {3, 2});
Shape shape_2 = ShapeUtil::MakeShape(PRED, {3, 2});
EXPECT_FALSE(ShapeUtil::Compatible(shape_1, shape_2));
}
TEST(ShapeUtilTest, EqualIgnoringFpPrecision) {
EXPECT_TRUE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {4, 3}, {0, 1})));
}
TEST(ShapeUtilTest, UnequalIgnoringFpPrecision) {
EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {0, 1})));
EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 4}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {1, 0})));
EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(PRED, {4, 3}, {0, 1})));
}
TEST(ShapeUtilTest, EqualIgnoringElementType) {
EXPECT_TRUE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {4, 3}, {0, 1})));
EXPECT_TRUE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(S32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {4, 3}, {0, 1})));
EXPECT_TRUE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(PRED, {4, 3}, {0, 1})));
}
TEST(ShapeUtilTest, UnequalIgnoringElementType) {
EXPECT_FALSE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {0, 1})));
EXPECT_FALSE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 4}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {1, 0})));
}
TEST(ShapeUtilTest, EqualDynamicShapes) {
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {4, 3}, {true, false}),
ShapeUtil::MakeShape(F32, {4, 3}, {true, false})));
EXPECT_FALSE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {4, 3}, {true, false}),
ShapeUtil::MakeShape(F32, {4, 3}, {false, false})));
EXPECT_FALSE(ShapeUtil::Equal(
ShapeUtil::MakeShape(F32, {Shape::kUnboundedSize}, {true}),
ShapeUtil::MakeShape(F32, {2}, {true})));
}
TEST(ShapeUtilTest, CompatibleDynamicShapes) {
Shape shape_a = ShapeUtil::MakeShape(F32, {4, 3}, {true, false});
*shape_a.mutable_layout() = Layout({1, 0});
Shape shape_b = ShapeUtil::MakeShape(F32, {4, 3}, {true, false});
*shape_b.mutable_layout() = Layout({0, 1});
Shape shape_c = ShapeUtil::MakeShape(F32, {4, 3}, {false, true});
*shape_c.mutable_layout() = Layout({0, 1});
EXPECT_TRUE(ShapeUtil::Compatible(shape_a, shape_a));
EXPECT_TRUE(ShapeUtil::Compatible(shape_a, shape_b));
EXPECT_TRUE(ShapeUtil::Compatible(shape_a, shape_c));
}
TEST(ShapeUtilTest, CompatibleTuples) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
EXPECT_TRUE(ShapeUtil::Compatible(tuple1, tuple2));
}
TEST(ShapeUtilTest, MakeMaybeTupleShape) {
Shape s1 =
ShapeUtil::MakeMaybeTupleShape({ShapeUtil::MakeShape(F32, {3, 2})});
EXPECT_TRUE(ShapeUtil::Compatible(s1, ShapeUtil::MakeShape(F32, {3, 2})));
}
TEST(ShapeUtilTest, CompatibleTuplesIgnoringFpPrecision) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(BF16, {3, 2}), ShapeUtil::MakeShape(F32, {4, 5})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F64, {3, 2}), ShapeUtil::MakeShape(BF16, {4, 5})});
EXPECT_TRUE(ShapeUtil::CompatibleIgnoringFpPrecision(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesWithSwappedElements) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
EXPECT_FALSE(ShapeUtil::Compatible(tuple1, tuple2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesIgnoringFpPrecision) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(BF16, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(BF16, {4, 5})});
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesWithDifferentPrimitiveType) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(S32, {3, 2})});
EXPECT_FALSE(ShapeUtil::Compatible(tuple1, tuple2));
EXPECT_TRUE(ShapeUtil::CompatibleIgnoringElementType(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesWithDifferentDimensions) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {4, 2})});
EXPECT_FALSE(ShapeUtil::Compatible(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleScalarVsTuple) {
Shape shape1 = ShapeUtil::MakeShape(F32, {});
Shape shape2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(U32, {})});
EXPECT_FALSE(ShapeUtil::Compatible(shape1, shape2));
EXPECT_FALSE(ShapeUtil::Compatible(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape2, shape1));
}
TEST(ShapeUtilTest, OpaqueVsArray) {
Shape shape1 = ShapeUtil::MakeShape(F32, {5, 7});
Shape shape2 = ShapeUtil::MakeOpaqueShape();
EXPECT_FALSE(ShapeUtil::Compatible(shape1, shape2));
EXPECT_FALSE(ShapeUtil::Compatible(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape2, shape1));
}
TEST(ShapeUtilTest, ScalarDefaultLayoutEqualsScalarEmptyMin2Maj) {
Shape scalar_default_layout = ShapeUtil::MakeShape(F32, {});
ASSERT_TRUE(scalar_default_layout.has_layout())
<< ShapeUtil::HumanStringWithLayout(scalar_default_layout);
const Shape scalar_empty_min2maj =
ShapeUtil::MakeShapeWithDenseLayout(F32, {}, {});
ASSERT_TRUE(scalar_empty_min2maj.has_layout())
<< ShapeUtil::HumanStringWithLayout(scalar_empty_min2maj);
EXPECT_TRUE(ShapeUtil::Equal(scalar_default_layout, scalar_empty_min2maj));
}
TEST(ShapeUtilTest, ByteSizeOfWithoutPadding) {
EXPECT_EQ(4, ShapeUtil::ByteSizeOfPrimitiveType(F32));
EXPECT_EQ(4, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {})));
EXPECT_EQ(800, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {10, 20})));
EXPECT_EQ(8, ShapeUtil::ByteSizeOfPrimitiveType(F64));
EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {})));
EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {10, 20})));
EXPECT_EQ(8, ShapeUtil::ByteSizeOfPrimitiveType(C64));
EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {})));
EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {10, 20})));
}
TEST(ShapeUtilTest, ByteStrides) {
Shape shape1 = ShapeUtil::MakeShape(F32, {3, 5, 7});
Shape shape2 = ShapeUtil::MakeShape(F16, {5, 7, 9});
EXPECT_THAT(*ShapeUtil::ByteStrides(shape1), ElementsAre(140, 28, 4));
EXPECT_THAT(*ShapeUtil::ByteStrides(shape2), ElementsAre(126, 18, 2));
}
TEST(ShapeUtilTest, NilShape) {
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(ShapeUtil::MakeNil()));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(ShapeUtil::MakeShape(F32, {1, 2, 3})));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(ShapeUtil::MakeShape(F32, {0, 1})));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {})})));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {0})})));
}
TEST(ShapeUtilTest, NestedTuple) {
EXPECT_FALSE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape({})));
EXPECT_FALSE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTupleShape({})})));
EXPECT_FALSE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(S32, {})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeTupleShape({})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({}), ShapeUtil::MakeShape(S32, {})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({}), ShapeUtil::MakeTupleShape({})})));
}
TEST(ShapeUtilTest, NestedTupleWithPtrs) {
const Shape nil = ShapeUtil::MakeNil();
const Shape s32 = ShapeUtil::MakeShape(S32, {});
EXPECT_FALSE(ShapeUtil::IsNestedTuple(nil));
EXPECT_FALSE(
ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShapeWithPtrs({&s32})));
EXPECT_TRUE(
ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShapeWithPtrs({&nil})));
EXPECT_FALSE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&s32, &s32})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&s32, &nil})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&nil, &s32})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&nil, &nil})));
}
TEST(ShapeUtilTest, ElementsIn) {
EXPECT_EQ(1, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {})));
EXPECT_EQ(0, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {0})));
EXPECT_EQ(1, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {1})));
EXPECT_EQ(1, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {1, 1})));
EXPECT_EQ(2, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {2})));
EXPECT_EQ(2, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {2, 1})));
EXPECT_EQ(15, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {3, 5})));
EXPECT_EQ(0, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {3, 0, 5})));
EXPECT_EQ(0, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {0, 3, 0})));
EXPECT_EQ(15, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {1, 3, 5})));
EXPECT_EQ(221, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {13, 17})));
}
TEST(ShapeUtilTest, HasPrimitiveType) {
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeShape(S32, {}), S32));
EXPECT_FALSE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeShape(S32, {}), S16));
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeShape(S32, {0}), S32));
EXPECT_FALSE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeTupleShape({}), S32));
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(S32, {})}),
S32));
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S16, {})})}),
S16));
}
TEST(ShapeUtilTest, IsZeroElementArray) {
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {})));
EXPECT_TRUE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {0})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {1})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {1, 1})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {2})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {2, 1})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {3, 5})));
EXPECT_TRUE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {3, 0, 5})));
EXPECT_TRUE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {0, 3, 0})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {1, 3, 5})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {13, 17})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeNil()));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeTupleShape({})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {0, 3, 0})})));
}
TEST(ShapeUtilTest, SameDimensions) {
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(S32, {})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(F32, {})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(S32, {1})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {0}),
ShapeUtil::MakeShape(S32, {0})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {2}),
ShapeUtil::MakeShape(S32, {2})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {2})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {0, 0}),
ShapeUtil::MakeShape(F32, {0})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {1, 1})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(F32, {1})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {1, 1})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {1, 0})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1, 1}),
ShapeUtil::MakeShape(F32, {1, 2})));
}
TEST(ShapeUtilTest, GetSubshape) {
Shape array_shape = ShapeUtil::MakeShape(F32, {42, 42, 123});
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(array_shape, {})));
EXPECT_TRUE(ShapeUtil::Equal(
array_shape, *ShapeUtil::GetMutableSubshape(&array_shape, {})));
Shape tuple_shape =
ShapeUtil::MakeTupleShape({array_shape, array_shape, array_shape});
EXPECT_TRUE(
ShapeUtil::Equal(tuple_shape, ShapeUtil::GetSubshape(tuple_shape, {})));
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(tuple_shape, {0})));
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(tuple_shape, {1})));
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(tuple_shape, {2})));
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape(
{array_shape, ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({array_shape, array_shape}),
array_shape})});
EXPECT_TRUE(ShapeUtil::Equal(nested_tuple_shape,
ShapeUtil::GetSubshape(nested_tuple_shape, {})));
EXPECT_TRUE(ShapeUtil::Equal(
array_shape, ShapeUtil::GetSubshape(nested_tuple_shape, {0})));
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::GetSubshape(nested_tuple_shape, {1})));
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::GetSubshape(nested_tuple_shape, {2, 0})));
}
TEST(ShapeUtilTest, IsLeafIndex) {
Shape array_shape = ShapeUtil::MakeShape(F32, {42, 42, 123});
EXPECT_TRUE(ShapeUtil::IsLeafIndex(array_shape, {}));
Shape tuple_shape = ShapeUtil::MakeTupleShape({array_shape, array_shape});
EXPECT_FALSE(ShapeUtil::IsLeafIndex(tuple_shape, {}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(tuple_shape, {0}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(tuple_shape, {1}));
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape(
{array_shape, ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({array_shape, array_shape}),
array_shape})});
EXPECT_FALSE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {0}));
EXPECT_FALSE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {1}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {1, 0}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {1, 1}));
}
TEST(ShapeUtilTest, ForEachSubshapeArray) {
const Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
int calls = 0;
ShapeUtil::ForEachSubshape(
shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) {
EXPECT_EQ(&shape, &subshape);
EXPECT_TRUE(index.empty());
++calls;
});
EXPECT_EQ(1, calls);
}
TEST(ShapeUtilTest, ForEachSubshapeNestedTuple) {
const Shape shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {101}),
ShapeUtil::MakeShape(PRED, {33})})});
int calls = 0;
ShapeUtil::ForEachSubshape(
shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) {
EXPECT_TRUE(
ShapeUtil::Equal(subshape, ShapeUtil::GetSubshape(shape, index)));
if (calls == 0) {
EXPECT_TRUE(index.empty());
} else if (calls == 4) {
EXPECT_EQ(33, ShapeUtil::ElementsIn(subshape));
}
++calls;
});
EXPECT_EQ(5, calls);
}
TEST(ShapeUtilTest, ForEachMutableSubshapeNestedTuple) {
Shape shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {101}),
ShapeUtil::MakeShape(PRED, {33})})});
int calls = 0;
ShapeUtil::ForEachMutableSubshape(
&shape, [&calls, &shape](const Shape* subshape, const ShapeIndex& index) {
EXPECT_EQ(subshape, ShapeUtil::GetMutableSubshape(&shape, index));
if (calls == 0) {
EXPECT_TRUE(index.empty());
} else if (calls == 4) {
EXPECT_EQ(33, ShapeUtil::ElementsIn(*subshape));
}
++calls;
});
EXPECT_EQ(5, calls);
}
TEST(ShapeUtilTest, InsertedOrDeleted1SizedDimensions) {
Shape shape0 = ShapeUtil::MakeShape(S32, {9, 1, 4});
Shape shape1 = ShapeUtil::MakeShape(S32, {1, 9, 4, 1});
Shape shape2 = ShapeUtil::MakeShape(S32, {3, 1, 12});
EXPECT_TRUE(
ShapeUtil::InsertedOrDeleted1SizedDimensions(shape0, shape1).has_value());
EXPECT_FALSE(
ShapeUtil::InsertedOrDeleted1SizedDimensions(shape0, shape2).has_value());
}
TEST(ShapeUtilTest, ForEachIndex) {
struct ShapeDimensionAndNumberInvocations {
std::vector<int64_t> dimensions;
int invocations;
} test_data[] = {
{{}, 1}, {{0}, 0}, {{16}, 16}, {{3, 0}, 0},
{{0, 2}, 0}, {{4, 16}, 64}, {{6, 11, 17}, 1122}, {{6, 11, 5, 17}, 5610},
};
for (const auto& data : test_data) {
Shape shape = ShapeUtil::MakeShape(F32, data.dimensions);
int invocations = 0;
auto increment_func = [&invocations](absl::Span<const int64_t> indexes) {
invocations++;
return true;
};
std::vector<int64_t> zero_base(data.dimensions.size(), 0);
std::vector<int64_t> step(data.dimensions.size(), 1);
ShapeUtil::ForEachIndex(shape, zero_base, data.dimensions, step,
increment_func);
EXPECT_EQ(invocations, data.invocations);
}
}
TEST(ShapeUtilTest, ForEachIndexWithStatus) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int invocations = 0;
auto increment_func =
[&invocations](
absl::Span<const int64_t> indexes) -> absl::StatusOr<bool> {
if (++invocations == 5) {
return Unimplemented("Cannot increment beyond 5.");
}
return true;
};
absl::Status error_status = ShapeUtil::ForEachIndexWithStatus(
shape, {0, 0}, {10, 10}, {0, 1},
increment_func);
EXPECT_FALSE(error_status.ok());
EXPECT_THAT(error_status.message(),
::testing::HasSubstr("Cannot increment beyond 5."));
EXPECT_EQ(invocations, 5);
}
TEST(ShapeUtilTest, GetForEachIndexParallelThreadCount) {
const int kThreadCount = ShapeUtil::GetForEachIndexParallelThreadCount();
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
auto check_func = [kThreadCount](absl::Span<const int64_t> ,
int thread_id) -> absl::StatusOr<bool> {
EXPECT_GE(thread_id, -1);
EXPECT_LT(thread_id, kThreadCount);
return true;
};
for (int i = 0; i < 10; ++i) {
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 100},
{1, 1}, check_func);
}
}
TEST(ShapeUtilTest, ForEachIndexParallel) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64_t output[10][10];
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 10},
{1, 1}, set_func);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
EXPECT_EQ(output[i][j], init + i + j);
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_Rank0) {
Shape shape = ShapeUtil::MakeShape(F32, {});
int64_t output = -1;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output = indexes.size();
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {}, {},
{}, set_func);
EXPECT_EQ(output, 0);
}
TEST(ShapeUtilTest, ForEachIndexParallel_Empty) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 0});
bool called = false;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
called = true;
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {2, 0},
{1, 1}, set_func);
EXPECT_FALSE(called);
}
TEST(ShapeUtilTest, ForEachIndexParallel_DimensionPinnedWithZeros) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
int64_t output[2][2] = {};
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {1, 0}, {0, 2},
{0, 1}, set_func);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
if (i == 1) {
EXPECT_EQ(output[i][j], init + i + j);
} else {
EXPECT_EQ(output[i][j], 0);
}
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_WithSkips) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64_t output[10][10] = {};
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {2, 3}, {3, 1},
{2, 1}, set_func);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
if ((i == 2 || i == 4) && j == 3) {
EXPECT_EQ(output[i][j], init + i + j);
} else {
EXPECT_EQ(output[i][j], 0);
}
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_CalledTwice) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64_t output[10][10];
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
int init2 = 15;
auto set_func2 = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init2 + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 10},
{1, 1}, set_func);
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 10},
{1, 1}, set_func2);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
EXPECT_EQ(output[i][j], init2 + i + j);
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_ |
1,106 | cpp | tensorflow/tensorflow | literal_util | third_party/xla/xla/literal_util.cc | tensorflow/compiler/tf2xla/literal_util_test.cc | #ifndef XLA_LITERAL_UTIL_H_
#define XLA_LITERAL_UTIL_H_
#include <array>
#include <cstdint>
#include <initializer_list>
#include <iterator>
#include <optional>
#include <ostream>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/bitmap.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
class LiteralUtil {
public:
LiteralUtil() = delete;
static Literal GetFirstScalarLiteral(const LiteralSlice& literal);
static Literal GetScalarLiteral(const LiteralBase& literal,
absl::Span<const int64_t> multi_index);
static void SetScalarLiteral(MutableLiteralBase& literal,
absl::Span<const int64_t> multi_index,
const LiteralBase& scalar);
template <typename NativeT>
static Literal CreateR0(NativeT value);
template <typename T>
static Literal CreateR0(PrimitiveType primitive_type, T value);
template <typename NativeT>
static Literal CreateR1(absl::Span<const NativeT> values);
static Literal CreateR1(const tsl::core::Bitmap& values);
template <typename NativeT>
static Literal CreateR2(
std::initializer_list<std::initializer_list<NativeT>> values);
template <typename NativeT>
static Literal CreateR2WithLayout(
std::initializer_list<std::initializer_list<NativeT>> values,
const Layout& layout);
template <typename NativeT>
static Literal CreateR3(std::initializer_list<
std::initializer_list<std::initializer_list<NativeT>>>
values);
template <typename NativeT>
static Literal CreateR3WithLayout(
std::initializer_list<
std::initializer_list<std::initializer_list<NativeT>>>
values,
const Layout& layout);
template <typename NativeT>
static Literal CreateR4(
std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<NativeT>>>>
values);
template <typename NativeT>
static Literal CreateR4WithLayout(
std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<NativeT>>>>
values,
const Layout& layout);
static Literal Zero(PrimitiveType primitive_type);
static Literal One(PrimitiveType primitive_type);
static Literal MinValue(PrimitiveType primitive_type);
static Literal MaxValue(PrimitiveType primitive_type);
static absl::StatusOr<Literal> NanValue(PrimitiveType primitive_type);
template <typename NativeT>
static Literal CreateFullWithDescendingLayout(
absl::Span<const int64_t> dimensions, NativeT value);
template <typename NativeT>
static Literal CreateFromArray(const Array<NativeT>& values);
template <typename NativeT>
static Literal CreateFromArrayWithLayout(const Array<NativeT>& values,
const Layout& layout);
template <typename NativeT>
static Literal CreateR2FromArray2D(const Array2D<NativeT>& values);
template <typename NativeT>
static Literal CreateR2FromArray2DWithLayout(const Array2D<NativeT>& values,
const Layout& layout);
template <typename NativeT>
static Literal CreateR3FromArray3D(const Array3D<NativeT>& values);
template <typename NativeT>
static Literal CreateR3FromArray3DWithLayout(const Array3D<NativeT>& values,
const Layout& layout);
template <typename NativeT>
static Literal CreateR4FromArray4D(const Array4D<NativeT>& values);
template <typename NativeT>
static Literal CreateR4FromArray4DWithLayout(const Array4D<NativeT>& values,
const Layout& layout);
static Literal CreateR1U8(absl::string_view value);
static Literal CreateR2F32Linspace(float from, float to, int64_t rows,
int64_t cols);
template <typename NativeT>
static Literal CreateR3Projected(
std::initializer_list<std::initializer_list<NativeT>> values,
int64_t projection);
template <typename NativeT>
static Literal CreateR4Projected(
std::initializer_list<std::initializer_list<NativeT>> values,
int64_t projection_p, int64_t projection_z);
template <typename NativeT>
static Literal MakeIdentityR2(int64_t size);
static Literal MakeTuple(absl::Span<const Literal* const> elements);
static Literal MakeTupleFromSlices(absl::Span<const LiteralSlice> elements);
static Literal MakeTupleOwned(std::vector<Literal> elements);
template <typename... Ts>
static Literal MakeTupleOwned(Ts... elements) {
std::array<Literal, sizeof...(Ts)> arr{std::move(elements)...};
std::vector<Literal> v;
v.insert(v.begin(), std::make_move_iterator(arr.begin()),
std::make_move_iterator(arr.end()));
return MakeTupleOwned(std::move(v));
}
static Literal CreateToken();
static Literal CreateFromDimensions(PrimitiveType primitive_type,
absl::Span<const int64_t> dimensions);
static Literal ConvertBF16ToF32(const LiteralSlice& bf16_literal);
static Literal ConvertBF16ToF64(const LiteralSlice& bf16_literal);
static Literal ConvertF32ToF8E4M3FNUZ(const LiteralSlice& f32_literal);
static Literal ConvertF32ToF8E5M2FNUZ(const LiteralSlice& f32_literal);
static Literal ConvertF32ToBF16(const LiteralSlice& f32_literal);
static Literal ConvertF32ToS8(const LiteralSlice& f32_literal);
static Literal ConvertF32ToF64(const LiteralSlice& f32_literal);
static Literal ConvertF64ToBF16(const LiteralSlice& f64_literal);
static Literal ConvertF64ToF32(const LiteralSlice& f64_literal);
static Literal ConvertS32ToF32(const LiteralSlice& s32_literal);
static Literal MaxElement(const LiteralSlice& literal);
static Literal ReshapeSlice(absl::Span<const int64_t> new_dimensions,
absl::Span<const int64_t> minor_to_major,
const LiteralSlice& literal);
template <PrimitiveType type, typename T = primitive_util::NativeTypeOf<type>>
static absl::StatusOr<Literal> CreateLiteralWithGenerator(
const Shape& shape,
absl::FunctionRef<T(absl::Span<const int64_t>)> generator);
template <PrimitiveType type, typename E,
typename T = primitive_util::NativeTypeOf<type>>
static absl::StatusOr<Literal> CreateRandomLiteral(const Shape& shape,
E* engine, T mean,
T stddev);
template <PrimitiveType type, typename T = primitive_util::NativeTypeOf<type>>
static absl::StatusOr<Literal> CreateRandomLiteral(const Shape& shape, T mean,
T stddev);
static std::string MultiIndexAsString(absl::Span<const int64_t> multi_index);
static std::optional<int64_t> LiteralAsScalarInt64(const Literal& l);
};
std::ostream& operator<<(std::ostream& out, const Literal& literal);
template <typename NativeT>
Literal LiteralUtil::CreateR0(NativeT value) {
Literal literal(ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<NativeT>(), {}));
literal.Set({}, value);
return literal;
}
template <typename T>
Literal LiteralUtil::CreateR0(PrimitiveType primitive_type,
T value) {
return primitive_util::ArrayTypeSwitch<Literal>(
[&value](auto type) {
using NativeT = primitive_util::NativeTypeOf<type>;
return CreateR0(static_cast<NativeT>(value));
},
primitive_type);
}
template <typename NativeT>
Literal LiteralUtil::CreateR1(absl::Span<const NativeT> values) {
Literal literal(
ShapeUtil::MakeShape(primitive_util::NativeToPrimitiveType<NativeT>(),
{static_cast<int64_t>(values.size())}));
literal.PopulateR1(values);
return literal;
}
template <typename NativeT>
Literal LiteralUtil::CreateR2WithLayout(
std::initializer_list<std::initializer_list<NativeT>> values,
const Layout& layout) {
Literal literal(ShapeUtil::MakeShapeWithDenseLayout(
primitive_util::NativeToPrimitiveType<NativeT>(),
{static_cast<int64_t>(values.size()),
static_cast<int64_t>(values.begin()->size())},
layout.minor_to_major()));
literal.PopulateR2(values);
return literal;
}
template <typename NativeT>
Literal LiteralUtil::CreateR2(
std::initializer_list<std::initializer_list<NativeT>> values) {
return CreateR2WithLayout(values, LayoutUtil::GetDefaultLayoutForR2());
}
template <typename NativeT>
Literal LiteralUtil::CreateR3WithLayout(
std::initializer_list<std::initializer_list<std::initializer_list<NativeT>>>
values,
const Layout& layout) {
const int64_t d0 = values.size();
const int64_t d1 = values.begin()->size();
const int64_t d2 = values.begin()->begin()->size();
Array3D<NativeT> tmp(d0, d1, d2);
int64_t i0 = 0;
for (auto d1_values : values) {
int64_t i1 = 0;
for (auto d2_values : d1_values) {
int64_t i2 = 0;
for (auto value : d2_values) {
tmp(i0, i1, i2) = value;
++i2;
}
++i1;
}
++i0;
}
return CreateR3FromArray3DWithLayout(tmp, layout);
}
template <typename NativeT>
Literal LiteralUtil::CreateR3(
std::initializer_list<std::initializer_list<std::initializer_list<NativeT>>>
values) {
return CreateR3WithLayout(values, LayoutUtil::GetDefaultLayoutForR3());
}
template <typename NativeT>
Literal LiteralUtil::CreateR4WithLayout(
std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<NativeT>>>>
values,
const Layout& layout) {
const int64_t d0 = values.size();
const int64_t d1 = values.begin()->size();
const int64_t d2 = values.begin()->begin()->size();
const int64_t d3 = values.begin()->begin()->begin()->size();
Array4D<NativeT> tmp(d0, d1, d2, d3);
int64_t i0 = 0;
for (auto d1_values : values) {
int64_t i1 = 0;
for (auto d2_values : d1_values) {
int64_t i2 = 0;
for (auto d3_values : d2_values) {
int64_t i3 = 0;
for (auto value : d3_values) {
tmp(i0, i1, i2, i3) = value;
++i3;
}
++i2;
}
++i1;
}
++i0;
}
return CreateR4FromArray4DWithLayout(tmp, layout);
}
template <typename NativeT>
Literal LiteralUtil::CreateR4(
std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<NativeT>>>>
values) {
return CreateR4WithLayout(values, LayoutUtil::GetDefaultLayoutForR4());
}
template <typename NativeT>
Literal LiteralUtil::CreateFromArrayWithLayout(
const Array<NativeT>& values, const Layout& layout) {
Literal literal(ShapeUtil::MakeShapeWithDenseLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), values.dimensions(),
layout.minor_to_major()));
literal.PopulateFromArray(values);
return literal;
}
template <typename NativeT>
Literal LiteralUtil::CreateFromArray(
const Array<NativeT>& values) {
return CreateFromArrayWithLayout(
values, LayoutUtil::GetDefaultLayoutForRank(values.num_dimensions()));
}
template <typename NativeT>
Literal LiteralUtil::CreateR2FromArray2DWithLayout(
const Array2D<NativeT>& values, const Layout& layout) {
return CreateFromArrayWithLayout(values, layout);
}
template <typename NativeT>
Literal LiteralUtil::CreateR2FromArray2D(
const Array2D<NativeT>& values) {
return CreateFromArray(values);
}
template <typename NativeT>
Literal LiteralUtil::CreateR3FromArray3DWithLayout(
const Array3D<NativeT>& values, const Layout& layout) {
return CreateFromArrayWithLayout(values, layout);
}
template <typename NativeT>
Literal LiteralUtil::CreateR3FromArray3D(
const Array3D<NativeT>& values) {
return CreateFromArray(values);
}
template <typename NativeT>
Literal LiteralUtil::CreateR3Projected(
std::initializer_list<std::initializer_list<NativeT>> values,
int64_t projection) {
int64_t dim0_size = projection;
int64_t dim1_size = values.size();
int64_t dim2_size = values.begin()->size();
Array3D<NativeT> array(dim0_size, dim1_size, dim2_size);
for (int64_t dim0 = 0; dim0 < dim0_size; ++dim0) {
int64_t dim1 = 0;
for (auto inner_list : values) {
int64_t dim2 = 0;
for (auto value : inner_list) {
array(dim0, dim1, dim2) = value;
++dim2;
}
CHECK_EQ(dim2_size, dim2);
++dim1;
}
CHECK_EQ(dim1_size, dim1);
}
return CreateR3FromArray3D(array);
}
template <typename NativeT>
Literal LiteralUtil::CreateR4Projected(
std::initializer_list<std::initializer_list<NativeT>> values,
int64_t projection_p, int64_t projection_z) {
int64_t dim0_size = projection_p;
int64_t dim1_size = projection_z;
int64_t dim2_size = values.size();
int64_t dim3_size = values.begin()->size();
Array4D<NativeT> array(dim0_size, dim1_size, dim2_size, dim3_size);
for (int64_t dim0 = 0; dim0 < dim0_size; ++dim0) {
for (int64_t dim1 = 0; dim1 < dim1_size; ++dim1) {
int64_t dim2 = 0;
for (auto inner_list : values) {
int64_t dim3 = 0;
for (auto value : inner_list) {
array(dim0, dim1, dim2, dim3) = value;
++dim3;
}
CHECK_EQ(dim3_size, dim3);
++dim2;
}
CHECK_EQ(dim2_size, dim2);
}
}
return CreateR4FromArray4D(array);
}
template <typename NativeT>
Literal LiteralUtil::CreateR4FromArray4D(
const Array4D<NativeT>& values) {
return CreateFromArray(values);
}
template <typename NativeT>
Literal LiteralUtil::CreateR4FromArray4DWithLayout(
const Array4D<NativeT>& values, const Layout& layout) {
return CreateFromArrayWithLayout(values, layout);
}
template <typename NativeT>
Literal LiteralUtil::MakeIdentityR2(int64_t size) {
Array2D<NativeT> array(size, size, 0);
for (int64_t i = 0; i < size; ++i) {
array(i, i) = 1;
}
return CreateR2FromArray2D(array);
}
template <typename NativeT>
Literal LiteralUtil::CreateFullWithDescendingLayout(
absl::Span<const int64_t> dimensions, NativeT value) {
Literal literal(ShapeUtil::MakeShapeWithDescendingLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), dimensions));
literal.PopulateWithValue(value);
return literal;
}
template <PrimitiveType type, typename T>
absl::StatusOr<Literal> LiteralUtil::CreateLiteralWithGenerator(
const Shape& shape,
absl::FunctionRef<T(absl::Span<const int64_t>)> generator) {
using NativeT = primitive_util::NativeTypeOf<type>;
TF_RET_CHECK(shape.element_type() == type);
Literal literal(shape);
TF_RETURN_IF_ERROR(literal.Populate<NativeT>(
[=](absl::Span<const int64_t> indexes) { return generator(indexes); }));
return std::move(literal);
}
template <PrimitiveType type, typename E, typename T>
absl::StatusOr<Literal> LiteralUtil::CreateRandomLiteral(
const Shape& shape, E* engine, T mean, T stddev) {
using NativeT = primitive_util::NativeTypeOf<type>;
std::normal_distribution<double> generator(mean, stddev);
return CreateLiteralWithGenerator<type, NativeT>(
shape, [&](absl::Span<const int64_t> ) {
return static_cast<NativeT>(generator(*engine));
});
}
template <PrimitiveType type, typename T>
absl::StatusOr<Literal> LiteralUtil::CreateRandomLiteral(
const Shape& shape, T mean, T stddev) {
std::minstd_rand0 engine;
return CreateRandomLiteral<type>(shape, &engine, mean, stddev);
}
}
#endif
#include "xla/literal_util.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/index_util.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/bitmap.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
using absl::StrCat;
template <typename FromNativeT, typename ToNativeT>
Literal ConvertType(LiteralSlice literal) {
Shape result_shape(literal.shape());
ShapeUtil::ForEachMutableSubshape(
&result_shape, [](Shape* subshape, const ShapeIndex&) {
if (subshape->element_type() ==
primitive_util::NativeToPrimitiveType<FromNativeT>()) {
subshape->set_element_type(
primitive_util::NativeToPrimitiveType<ToNativeT>());
}
});
Literal result(result_shape);
ShapeUtil::ForEachSubshape(
literal.shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (subshape.IsArray()) {
if (subshape.element_type() ==
primitive_util::NativeToPrimitiveType<FromNativeT>()) {
auto src = literal.data<FromNativeT>(shape_index);
auto dest = result.data<ToNativeT>(shape_index);
for (int64_t i = 0, end = src.size(); i < end; ++i) {
dest[i] = static_cast<ToNativeT>(src[i]);
}
} else {
TF_CHECK_OK(result.CopyFrom(literal,
shape_index,
shape_index));
}
}
});
return result;
}
template <PrimitiveType kType>
using NativeT = typename primitive_util::PrimitiveTypeToNative<kType>::type;
template <PrimitiveType kType, typename F, typename... Args>
Literal CreateScalarImpl(F&& value_provider, Args... args) {
return LiteralUtil::CreateR0<NativeT<kType>>(
value_provider(std::forward<Args>(args)...));
}
template <template <PrimitiveType> class F, typename... Args>
Literal CreateScalar(PrimitiveType primitive_type, Args... args) {
return primitive_util::PrimitiveTypeSwitch<Literal>(
[&](auto primitive_type_constant) -> Literal {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
return CreateScalarImpl<primitive_type_constant>(
F<primitive_type_constant>{}, std::forward<Args>(args)...);
}
LOG(FATAL) << "Unhandled primitive type " << primitive_type;
},
primitive_type);
}
template <PrimitiveType kType>
struct ZeroProvider {
NativeT<kType> operator()() const { return static_cast<NativeT<kType>>(0); }
};
template <PrimitiveType kType>
struct OneProvider {
NativeT<kType> operator()() const { return static_cast<NativeT<kType>>(1); }
};
template <typename T>
struct IsReal {
static constexpr bool value = std::numeric_limits<T>::is_specialized;
};
template <typename T>
struct IsValidScalarType {
static constexpr bool value = IsReal<T>::value || is_complex_v<T>;
};
template <typename NativeT>
NativeT GetMaxImpl() {
if constexpr (IsReal<NativeT>::value) {
if constexpr (std::numeric_limits<NativeT>::has_infinity) {
return std::numeric_limits<NativeT>::infinity();
}
return std::numeric_limits<NativeT>::max();
}
LOG(FATAL) << "No max value for given type.";
}
template <typename NativeT>
NativeT GetMinImpl() {
if constexpr (IsReal<NativeT>::value) {
if constexpr (std::numeric_limits<NativeT>::has_infinity) {
return -std::numeric_limits<NativeT>::infinity();
}
return std::numeric_limits<NativeT>::lowest();
}
LOG(FATAL) << "No min value for given type.";
}
template <PrimitiveType kType>
struct MaxProvider {
NativeT<kType> operator()() const { return GetMaxImpl<NativeT<kType>>(); }
};
template <PrimitiveType kType>
struct MinProvider {
NativeT<kType> operator()() const { return GetMinImpl<NativeT<kType>>(); }
};
template <PrimitiveType kType>
struct FirstElementProvider {
NativeT<kType> operator()(const LiteralBase& literal) const {
return literal.GetFirstElement<NativeT<kType>>();
}
};
template <typename NativeT>
std::enable_if_t<IsReal<NativeT>::value, NativeT> GetMaxElementImpl(
const LiteralBase& literal) {
auto view = literal.data<NativeT>();
return *absl::c_max_element(view);
}
template <typename NativeT>
std::enable_if_t<!IsReal<NativeT>::value, NativeT> GetMaxElementImpl(
const LiteralBase& literal) {
LOG(FATAL) << "Unsupported type.";
}
template <PrimitiveType kType>
struct MaxElementProvider {
NativeT<kType> operator()(const LiteralBase& literal) const {
return GetMaxElementImpl<NativeT<kType>>(literal);
}
};
template <typename NativeT>
std::enable_if_t<IsValidScalarType<NativeT>::value, NativeT>
GetElementAtIndexImpl(const LiteralBase* literal,
absl::Span<const int64_t> multi_index) {
return literal->Get<NativeT>(multi_index);
}
template <typename NativeT>
std::enable_if_t<!IsValidScalarType<NativeT>::value, NativeT>
GetElementAtIndexImpl(const LiteralBase* literal,
absl::Span<const int64_t> multi_index) {
LOG(FATAL) << "Not a valid scalar element type.";
}
template <PrimitiveType kType>
struct GetElementAtIndexProvider {
NativeT<kType> operator()(const LiteralBase* literal,
absl::Span<const int64_t> multi_index) const {
DCHECK_EQ(literal->shape().element_type(), kType);
return GetElementAtIndexImpl<NativeT<kType>>(literal, multi_index);
}
};
template <PrimitiveType kType>
void SetScalarAtIndexImpl(MutableLiteralBase& literal,
absl::Span<const int64_t> multi_index,
const LiteralBase& scalar) {
DCHECK_EQ(literal.shape().element_type(), kType);
using NativeT = typename primitive_util::PrimitiveTypeToNative<kType>::type;
literal.Set<NativeT>(multi_index, scalar.Get<NativeT>({}));
}
}
Literal LiteralUtil::CreateFromDimensions(
PrimitiveType primitive_type, absl::Span<const int64_t> dimensions) {
return Literal::CreateFromShape(
ShapeUtil::MakeShape(primitive_type, dimensions));
}
Literal LiteralUtil::ConvertBF16ToF32(
const LiteralSlice& bf16_literal) {
return ConvertType<bfloat16, float>(bf16_literal);
}
Literal LiteralUtil::ConvertBF16ToF64(
const LiteralSlice& bf16_literal) {
return ConvertType<bfloat16, double>(bf16_literal);
}
Literal LiteralUtil::ConvertF32ToF8E4M3FNUZ(
const LiteralSlice& f32_literal) {
return ConvertType<float, tsl::float8_e4m3fnuz>(f32_literal);
}
Literal LiteralUtil::ConvertF32ToF8E5M2FNUZ(
const LiteralSlice& f32_literal) {
return ConvertType<float, tsl::float8_e5m2fnuz>(f32_literal);
}
Literal LiteralUtil::ConvertF32ToBF16(
const LiteralSlice& f32_literal) {
return ConvertType<float, bfloat16>(f32_literal);
}
Literal LiteralUtil::ConvertF32ToS8(
const LiteralSlice& f32_literal) {
return ConvertType<float, int8_t>(f32_literal);
}
Literal LiteralUtil::ConvertF32ToF64(
const LiteralSlice& f32_literal) {
return ConvertType<float, double>(f32_literal);
}
Literal LiteralUtil::ConvertF64ToBF16(
const LiteralSlice& f64_literal) {
return ConvertType<double, bfloat16>(f64_literal);
}
Literal LiteralUtil::ConvertF64ToF32(
const LiteralSlice& f64_literal) {
return ConvertType<double, float>(f64_literal);
}
Literal LiteralUtil::ConvertS32ToF32(
const LiteralSlice& s32_literal) {
return ConvertType<int32_t, float>(s32_literal);
}
Literal LiteralUtil::CreateToken() {
return Literal(ShapeUtil::MakeTokenShape());
}
Literal LiteralUtil::Zero(PrimitiveType primitive_type) {
return CreateScalar<ZeroProvider>(primitive_type);
}
Literal LiteralUtil::One(PrimitiveType primitive_type) {
return CreateScalar<OneProvider>(primitive_type);
}
Literal LiteralUtil::MinValue(PrimitiveType primitive_type) {
return CreateScalar<MinProvider>(primitive_type);
}
Literal LiteralUtil::MaxValue(PrimitiveType primitive_type) {
return CreateScalar<MaxProvider>(primitive_type);
}
absl::StatusOr<Literal> LiteralUtil::NanValue(
PrimitiveType primitive_type) {
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return LiteralUtil::CreateR0<NativeT>(
std::numeric_limits<NativeT>::quiet_NaN());
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
auto nan =
std::numeric_limits<typename NativeT::value_type>::quiet_NaN();
return LiteralUtil::CreateR0<NativeT>(NativeT(nan, nan));
}
return InvalidArgum | #include "tensorflow/compiler/tf2xla/literal_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(LiteralUtil, LiteralToHostTensor) {
std::vector<int64_t> int64_values = {1, 2, 3};
xla::Literal int64_values_literal =
xla::LiteralUtil::CreateR1(absl::Span<const int64_t>(int64_values));
Tensor host_tensor;
EXPECT_EQ("Cannot convert literal of type S64 to tensor of type int32",
LiteralToHostTensor(int64_values_literal, DT_INT32, &host_tensor)
.message());
EXPECT_EQ("Cannot convert literal of type S64 to tensor of type qint32",
LiteralToHostTensor(int64_values_literal, DT_QINT32, &host_tensor)
.message());
EXPECT_TRUE(
LiteralToHostTensor(int64_values_literal, DT_INT64, &host_tensor).ok());
test::ExpectTensorEqual<int64_t>(host_tensor,
test::AsTensor<int64_t>(int64_values));
}
template <class T>
using LiteralUtilTest = ::testing::Test;
using Types =
::testing::Types<std::pair<int8, qint8>, std::pair<uint8, quint8>,
std::pair<int16, qint16>, std::pair<uint16, quint16>,
std::pair<int32, qint32>>;
TYPED_TEST_SUITE(LiteralUtilTest, Types);
TYPED_TEST(LiteralUtilTest, LiteralToQuantizedHostTensor) {
using int_type = typename TypeParam::first_type;
using qint_type = typename TypeParam::second_type;
Tensor host_tensor;
std::vector<int_type> int_values = {10, 11};
xla::Literal int_values_literal =
xla::LiteralUtil::CreateR1(absl::Span<const int_type>(int_values));
EXPECT_TRUE(LiteralToHostTensor(int_values_literal,
DataTypeToEnum<int_type>::value, &host_tensor)
.ok());
test::ExpectTensorEqual<int_type>(host_tensor,
test::AsTensor<int_type>(int_values));
EXPECT_TRUE(LiteralToHostTensor(int_values_literal,
DataTypeToEnum<qint_type>::value,
&host_tensor)
.ok());
std::vector<qint_type> qint_values = {10, 11};
test::ExpectTensorEqual<qint_type>(host_tensor,
test::AsTensor<qint_type>(qint_values));
EXPECT_EQ(
error::INVALID_ARGUMENT,
LiteralToHostTensor(int_values_literal, DT_INT64, &host_tensor).code());
}
}
} |
1,107 | cpp | tensorflow/tensorflow | xla_jit_compiled_cpu_function | tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.cc | tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_XLA_JIT_COMPILED_CPU_FUNCTION_H_
#define TENSORFLOW_COMPILER_TF2XLA_XLA_JIT_COMPILED_CPU_FUNCTION_H_
#include <memory>
#include <vector>
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "xla/client/local_client.h"
#include "xla/cpu_function_runtime.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class XlaJitCompiledCpuFunction {
public:
static absl::StatusOr<std::unique_ptr<XlaJitCompiledCpuFunction>> Compile(
const GraphDef& graph_def, const tf2xla::Config& config,
const xla::ExecutableBuildOptions& build_options);
XlaJitCompiledCpuFunction(const XlaJitCompiledCpuFunction&) = delete;
XlaJitCompiledCpuFunction& operator=(const XlaJitCompiledCpuFunction&) =
delete;
const XlaCompiledCpuFunction::StaticData& StaticData() const {
return static_data_;
}
private:
XlaJitCompiledCpuFunction() {}
std::unique_ptr<xla::LocalExecutable> executable_;
XlaCompiledCpuFunction::StaticData static_data_;
std::vector<xla::cpu_function_runtime::BufferInfo> buffer_infos_;
std::vector<int32> arg_index_table_;
std::vector<string> nonempty_arg_names_;
std::vector<string> nonempty_variable_names_;
std::vector<string> nonempty_result_names_;
std::vector<const char*> arg_names_;
std::vector<const char*> variable_names_;
std::vector<const char*> result_names_;
std::unique_ptr<const xla::ProgramShapeProto> program_shape_;
};
}
#endif
#include "tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.h"
#include <memory>
#include <vector>
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/tf2xla.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_computation.h"
#include "xla/cpu_function_runtime.h"
#include "xla/service/cpu/buffer_info_util.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
constexpr char kHostPlatform[] = "Host";
absl::StatusOr<size_t> ComputeResultIndex(
const xla::BufferAssignment& buffer_assignment) {
TF_ASSIGN_OR_RETURN(const xla::BufferAllocation::Slice result_slice,
buffer_assignment.GetUniqueTopLevelOutputSlice());
return result_slice.index();
}
int CountResults(
absl::Span<const xla::cpu_function_runtime::BufferInfo> buffer_infos) {
int num_results = 0;
for (const auto& info : buffer_infos) {
if (info.is_result_parameter()) {
++num_results;
}
}
return num_results;
}
template <typename T>
void CollectNames(const T& entries, std::vector<string>* nonempty_names,
std::vector<const char*>* name_ptrs) {
for (const auto& entry : entries) {
const string& name = entry.name();
if (!name.empty()) {
nonempty_names->push_back(name);
}
}
name_ptrs->reserve(entries.size() + 1);
size_t nonempty_index = 0;
for (const auto& entry : entries) {
const string& name = entry.name();
if (!name.empty()) {
name_ptrs->push_back(nonempty_names->at(nonempty_index).c_str());
++nonempty_index;
} else {
name_ptrs->push_back("");
}
}
name_ptrs->push_back(nullptr);
}
}
absl::StatusOr<std::unique_ptr<XlaJitCompiledCpuFunction>>
XlaJitCompiledCpuFunction::Compile(
const GraphDef& graph_def, const tf2xla::Config& config,
const xla::ExecutableBuildOptions& build_options) {
TF_ASSIGN_OR_RETURN(se::Platform * platform,
xla::PlatformUtil::GetPlatform(kHostPlatform));
TF_ASSIGN_OR_RETURN(xla::LocalClient * client,
xla::ClientLibrary::GetOrCreateLocalClient(platform));
xla::XlaComputation computation;
TF_RETURN_IF_ERROR(tensorflow::ConvertGraphDefToXla(graph_def, config, client,
&computation));
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::ProgramShape> program_shape,
client->GetComputationShape(computation));
if (program_shape->result().element_type() != xla::TUPLE) {
return errors::Internal(
"XlaJitCompiledCpuFunction requires the XLA result to be a tuple");
}
program_shape->clear_parameter_names();
std::vector<const xla::Shape*> arg_shapes;
arg_shapes.reserve(program_shape->parameters_size());
for (int i = 0; i < program_shape->parameters_size(); ++i) {
arg_shapes.push_back(&program_shape->parameters(i));
}
TF_ASSIGN_OR_RETURN(auto executables,
client->Compile(computation, arg_shapes, build_options));
TF_RET_CHECK(executables.size() == 1);
std::unique_ptr<xla::LocalExecutable> executable = std::move(executables[0]);
const xla::cpu::CpuExecutable* cpu_executable =
static_cast<xla::cpu::CpuExecutable*>(executable->executable());
XlaCompiledCpuFunction::RawFunction raw_function =
cpu_executable->compute_function();
const xla::BufferAssignment& buffer_assignment =
cpu_executable->buffer_assignment();
std::vector<xla::cpu_function_runtime::BufferInfo> buffer_infos =
xla::cpu::CreateBufferInfosFromBufferAssignment(cpu_executable->module(),
buffer_assignment);
std::vector<int32> arg_index_table =
xla::cpu::CreateArgIndexTableFromBufferInfos(buffer_infos);
TF_ASSIGN_OR_RETURN(size_t result_index,
ComputeResultIndex(buffer_assignment));
const int num_results = CountResults(buffer_infos);
std::unique_ptr<XlaJitCompiledCpuFunction> jit_unique_ptr(
new XlaJitCompiledCpuFunction);
XlaJitCompiledCpuFunction* jit = jit_unique_ptr.get();
jit->executable_ = std::move(executable);
jit->buffer_infos_ = std::move(buffer_infos);
jit->arg_index_table_ = std::move(arg_index_table);
jit->program_shape_ =
std::make_unique<xla::ProgramShapeProto>(program_shape->ToProto());
XlaCompiledCpuFunction::set_static_data_raw_function(&jit->static_data_,
raw_function);
XlaCompiledCpuFunction::set_static_data_buffer_infos(
&jit->static_data_, jit->buffer_infos_.data());
XlaCompiledCpuFunction::set_static_data_num_buffers(
&jit->static_data_, jit->buffer_infos_.size());
XlaCompiledCpuFunction::set_static_data_arg_index_table(
&jit->static_data_, jit->arg_index_table_.data());
XlaCompiledCpuFunction::set_static_data_num_args(
&jit->static_data_, jit->arg_index_table_.size());
XlaCompiledCpuFunction::set_static_data_num_variables(&jit->static_data_,
config.variable_size());
XlaCompiledCpuFunction::set_static_data_num_results(&jit->static_data_,
num_results);
XlaCompiledCpuFunction::set_static_data_result_index(&jit->static_data_,
result_index);
CollectNames(config.feed(), &jit->nonempty_arg_names_, &jit->arg_names_);
auto variable_copy = config.variable();
for (auto& var : variable_copy) {
if (var.name().empty()) {
var.set_name(var.node_name());
}
}
CollectNames(variable_copy, &jit->nonempty_variable_names_,
&jit->variable_names_);
CollectNames(config.fetch(), &jit->nonempty_result_names_,
&jit->result_names_);
XlaCompiledCpuFunction::set_static_data_arg_names(&jit->static_data_,
jit->arg_names_.data());
XlaCompiledCpuFunction::set_static_data_variable_names(
&jit->static_data_, jit->variable_names_.data());
XlaCompiledCpuFunction::set_static_data_result_names(
&jit->static_data_, jit->result_names_.data());
XlaCompiledCpuFunction::set_static_data_program_shape(
&jit->static_data_, jit->program_shape_.get());
if (cpu_executable->hlo_profiling_enabled()) {
XlaCompiledCpuFunction::set_static_data_hlo_profile_printer_data(
&jit->static_data_, &cpu_executable->hlo_profile_printer_data());
XlaCompiledCpuFunction::set_static_data_profile_counters_size(
&jit->static_data_,
cpu_executable->hlo_profile_printer_data().profile_counters_size());
}
return std::move(jit_unique_ptr);
}
} | #include "tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.h"
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "xla/client/local_client.h"
#include "xla/service/compiler.h"
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::HasSubstr;
PLATFORM_DEFINE_ID(kFakePlatformId);
AttrValue TypeAttrValue(DataType type) {
AttrValue attr_value;
SetAttrValue(type, &attr_value);
return attr_value;
}
GraphDef SumGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* sum = graph_def.add_node();
sum->set_name("sum");
sum->set_op("Add");
sum->add_input("x");
sum->add_input("y");
(*sum->mutable_attr())["T"] = TypeAttrValue(DT_INT32);
return graph_def;
}
tf2xla::Config SumConfig() {
tf2xla::Config config;
tf2xla::Feed* x = config.add_feed();
x->mutable_id()->set_node_name("x");
x->set_name("x_name");
tf2xla::Feed* y = config.add_feed();
y->mutable_id()->set_node_name("y");
y->set_name("y_name");
tf2xla::Fetch* sum = config.add_fetch();
sum->mutable_id()->set_node_name("sum");
sum->set_name("sum_name");
return config;
}
GraphDef SumGraphVariable() {
constexpr char text_proto[] = R"pb(
node {
name: "x"
op: "VarHandleOp"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "shared_name"
value { s: "myvar" }
}
attr {
key: "shape"
value { shape { dim { size: 1 } } }
}
}
node {
name: "read"
op: "ReadVariableOp"
input: "x"
attr {
key: "dtype"
value { type: DT_INT32 }
}
}
node {
name: "y"
op: "Placeholder"
attr {
key: "dtype"
value { type: DT_INT32 }
}
}
node {
name: "sum"
op: "Add"
input: "read"
input: "y"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node {
name: "assign"
op: "AssignVariableOp"
input: "x"
input: "sum"
attr {
key: "dtype"
value { type: DT_INT32 }
}
}
# We use this identity op to make sure assign doesn't get pruned away.
node {
name: "out"
op: "Identity"
input: "y"
input: "^assign"
attr {
key: "T"
value { type: DT_INT32 }
}
})pb";
GraphDef graph;
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &graph));
return graph;
}
tf2xla::Config SumConfigVariable() {
constexpr char text_proto[] = R"pb(feed { id { node_name: "y" } }
variable {
node_name: "myvar"
shape { dim { size: 1 } }
type: DT_INT32
}
fetch { id { node_name: "out" } })pb";
tf2xla::Config config;
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &config));
return config;
}
TEST(XlaJitCompiledCpuFunction, Sum) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
XlaCompiledCpuFunction function(jit->StaticData());
ASSERT_EQ(function.num_args(), 2);
ASSERT_EQ(function.num_results(), 1);
*static_cast<int32*>(function.arg_data(0)) = 10;
*static_cast<int32*>(function.arg_data(1)) = 32;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 42);
*static_cast<int32*>(function.arg_data(0)) = 100;
*static_cast<int32*>(function.arg_data(1)) = 320;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 420);
EXPECT_TRUE(function.HasNameIndices());
EXPECT_EQ(function.LookupArgIndex("x_name"), 0);
EXPECT_EQ(function.LookupArgIndex("y_name"), 1);
EXPECT_EQ(function.LookupArgIndex(""), -1);
EXPECT_EQ(function.LookupArgIndex("x"), -1);
EXPECT_EQ(function.LookupArgIndex("y"), -1);
EXPECT_EQ(function.LookupArgIndex("sum"), -1);
EXPECT_EQ(function.LookupArgIndex("sum_name"), -1);
EXPECT_EQ(function.LookupResultIndex("sum_name"), 0);
EXPECT_EQ(function.LookupResultIndex(""), -1);
EXPECT_EQ(function.LookupResultIndex("x"), -1);
EXPECT_EQ(function.LookupResultIndex("y"), -1);
EXPECT_EQ(function.LookupResultIndex("sum"), -1);
EXPECT_EQ(function.LookupResultIndex("x_name"), -1);
EXPECT_EQ(function.LookupResultIndex("y_name"), -1);
EXPECT_EQ(0, function.num_variables());
EXPECT_EQ(function.LookupVariableIndex("x"), -1);
for (int i = 0; i < function.num_args(); ++i) {
const char* name = function.GetArgName(i);
ASSERT_NE(name, nullptr);
const int roundtrip_i = function.LookupArgIndex(name);
EXPECT_EQ(roundtrip_i, i) << " name= " << name;
}
for (int i = 0; i < function.num_results(); ++i) {
const char* name = function.GetResultName(i);
ASSERT_NE(name, nullptr);
const int roundtrip_i = function.LookupResultIndex(name);
EXPECT_EQ(roundtrip_i, i) << " name= " << name;
}
EXPECT_EQ(function.GetArgName(-1), nullptr);
EXPECT_EQ(function.GetArgName(function.num_args()), nullptr);
EXPECT_EQ(function.GetResultName(-1), nullptr);
EXPECT_EQ(function.GetResultName(function.num_results()), nullptr);
EXPECT_EQ(function.GetVariableName(0), nullptr);
using xla::ShapeUtil;
const xla::Shape s32 = ShapeUtil::MakeShape(xla::S32, {});
ASSERT_TRUE(function.ProgramShape() != nullptr);
const xla::ProgramShape program_shape(*function.ProgramShape());
ASSERT_EQ(program_shape.parameters_size(), 2);
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(0), s32));
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(1), s32));
const xla::Shape& result = program_shape.result();
ASSERT_EQ(result.element_type(), xla::TUPLE);
ASSERT_EQ(ShapeUtil::TupleElementCount(result), 1);
const xla::Shape& result0 = ShapeUtil::GetTupleElementShape(result, 0);
EXPECT_TRUE(ShapeUtil::Compatible(result0, s32));
}
TEST(XlaJitCompiledCpuFunction, SumVariable) {
GraphDef graph_def = SumGraphVariable();
tf2xla::Config config = SumConfigVariable();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
XlaCompiledCpuFunction function(jit->StaticData());
ASSERT_EQ(function.num_args(), 2);
ASSERT_EQ(function.num_results(), 2);
*static_cast<int32*>(function.arg_data(0)) = 10;
*static_cast<int32*>(function.arg_data(1)) = 32;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 10);
EXPECT_EQ(*static_cast<int32*>(function.result_data(1)), 42);
*static_cast<int32*>(function.arg_data(0)) = 100;
*static_cast<int32*>(function.arg_data(1)) = 320;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 100);
EXPECT_EQ(*static_cast<int32*>(function.result_data(1)), 420);
EXPECT_TRUE(function.HasNameIndices());
EXPECT_EQ(2, function.num_args());
EXPECT_EQ(1, function.num_variables());
EXPECT_EQ(function.LookupVariableIndex("myvar"), 1);
const char* name = function.GetVariableName(0);
EXPECT_EQ(std::string(name), "myvar");
EXPECT_EQ(function.GetVariableName(1), nullptr);
EXPECT_EQ(function.GetVariableName(-1), nullptr);
using xla::ShapeUtil;
const xla::Shape s32 = ShapeUtil::MakeShape(xla::S32, {});
const xla::Shape s32_1 = ShapeUtil::MakeShape(xla::S32, {1});
ASSERT_TRUE(function.ProgramShape() != nullptr);
const xla::ProgramShape program_shape(*function.ProgramShape());
ASSERT_EQ(program_shape.parameters_size(), 2);
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(0), s32));
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(1), s32_1));
const xla::Shape& result = program_shape.result();
ASSERT_EQ(result.element_type(), xla::TUPLE);
ASSERT_EQ(ShapeUtil::TupleElementCount(result), 2);
const xla::Shape& result0 = ShapeUtil::GetTupleElementShape(result, 0);
EXPECT_TRUE(ShapeUtil::Compatible(result0, s32));
}
TEST(XlaJitCompiledCpuFunction, CanCompileWithAdditionalPlatform) {
class FakePlatform : public se::Platform {
public:
FakePlatform() : name_("FakePlatform") {}
~FakePlatform() override {}
se::Platform::Id id() const override { return kFakePlatformId; }
int VisibleDeviceCount() const override { return 0; }
const string& Name() const override { return name_; }
absl::StatusOr<std::unique_ptr<se::DeviceDescription>> DescriptionForDevice(
int ordinal) const override {
return std::unique_ptr<se::DeviceDescription>(nullptr);
}
absl::StatusOr<se::StreamExecutor*> ExecutorForDevice(
int ordinal) override {
return nullptr;
}
absl::StatusOr<se::StreamExecutor*> GetExecutor(
const se::StreamExecutorConfig& config) override {
return nullptr;
}
absl::StatusOr<std::unique_ptr<se::StreamExecutor>> GetUncachedExecutor(
const se::StreamExecutorConfig& config) override {
return std::unique_ptr<se::StreamExecutor>(nullptr);
}
private:
string name_;
};
TF_EXPECT_OK(
se::PlatformManager::RegisterPlatform(std::make_unique<FakePlatform>()));
xla::Compiler::RegisterCompilerFactory(kFakePlatformId, []() {
return std::unique_ptr<xla::Compiler>(nullptr);
});
EXPECT_THAT(xla::PlatformUtil::GetDefaultPlatform().status().message(),
HasSubstr("FakePlatform"));
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
}
}
} |
1,108 | cpp | tensorflow/tensorflow | layout_util | third_party/xla/xla/translate/mhlo_to_hlo/layout_util.cc | third_party/xla/xla/layout_util_test.cc | #ifndef XLA_TRANSLATE_MHLO_TO_HLO_LAYOUT_UTIL_H_
#define XLA_TRANSLATE_MHLO_TO_HLO_LAYOUT_UTIL_H_
#include <functional>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/client/xla_builder.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
namespace mlir {
enum class XlaLayoutPreference {
kNoPreference = 0,
kTpuPreferCompactChunkPaddedLayout = 1,
kTpuPreferLinearLayout = 2
};
typedef std::function<absl::StatusOr<XlaLayoutPreference>(
const xla::Shape& shape)>
LayoutPreferenceFn;
typedef std::function<absl::StatusOr<xla::Shape>(
const xla::Shape& shape, bool fast_mem,
XlaLayoutPreference layout_preference)>
ShapeRepresentationFn;
LayoutPreferenceFn UseNoPreferenceLayoutFn();
absl::Status RewriteLayoutWithShardedShape(
const std::optional<xla::HloSharding>& sharding, bool use_fast_memory,
const LayoutPreferenceFn& layout_preference_fn,
const ShapeRepresentationFn& shape_representation_fn,
xla::Shape* xla_shape);
absl::StatusOr<xla::XlaOp> ReshapeWithCorrectRepresentationAndSharding(
xla::XlaBuilder* builder, xla::XlaOp original, xla::Shape original_shape,
const LayoutPreferenceFn& layout_preference_fn,
const ShapeRepresentationFn& shape_representation_fn,
std::optional<xla::OpSharding> sharding, bool fast_mem);
}
#endif
#include "xla/translate/mhlo_to_hlo/layout_util.h"
#include "absl/status/status.h"
namespace mlir {
absl::Status RewriteLayoutWithShardedShape(
const std::optional<xla::HloSharding>& sharding, bool use_fast_memory,
const LayoutPreferenceFn& layout_preference_fn,
const ShapeRepresentationFn& shape_representation_fn,
xla::Shape* xla_shape) {
if (sharding && !sharding->IsTileMaximal() && !sharding->IsManual()) {
int64_t device = sharding->tile_assignment().first();
std::vector<int64_t> offset =
sharding->TileOffsetForDevice(*xla_shape, device);
std::vector<int64_t> limit =
sharding->TileLimitForDevice(*xla_shape, device);
std::vector<int64_t> dimensions(xla_shape->rank());
for (int64_t i = 0; i < xla_shape->rank(); ++i) {
dimensions[i] = limit[i] - offset[i];
}
xla::Shape per_device_xla_shape =
xla::ShapeUtil::MakeShape(xla_shape->element_type(), dimensions);
TF_ASSIGN_OR_RETURN(auto layout_preference,
layout_preference_fn
? layout_preference_fn(per_device_xla_shape)
: XlaLayoutPreference::kNoPreference);
TF_ASSIGN_OR_RETURN(
per_device_xla_shape,
shape_representation_fn
? shape_representation_fn(per_device_xla_shape, use_fast_memory,
layout_preference)
: per_device_xla_shape);
*xla_shape->mutable_layout() = per_device_xla_shape.layout();
}
return absl::OkStatus();
}
absl::StatusOr<xla::XlaOp> ReshapeWithCorrectRepresentationAndSharding(
xla::XlaBuilder* builder, xla::XlaOp original, xla::Shape original_shape,
const LayoutPreferenceFn& layout_preference_fn,
const ShapeRepresentationFn& shape_representation_fn,
std::optional<xla::OpSharding> sharding, bool fast_mem) {
if (original_shape.IsTuple()) {
std::vector<xla::XlaOp> elements;
for (int i = 0; i < original_shape.tuple_shapes_size(); ++i) {
auto subsharding = sharding ? sharding->tuple_shardings(i) : sharding;
TF_ASSIGN_OR_RETURN(
auto element,
ReshapeWithCorrectRepresentationAndSharding(
builder, xla::GetTupleElement(original, i),
original_shape.tuple_shapes(i), layout_preference_fn,
shape_representation_fn, subsharding, fast_mem));
elements.push_back(element);
}
return xla::Tuple(builder, elements);
}
if (!original_shape.IsArray()) return original;
TF_ASSIGN_OR_RETURN(auto layout_preference,
layout_preference_fn
? layout_preference_fn(original_shape)
: XlaLayoutPreference::kNoPreference);
TF_ASSIGN_OR_RETURN(
auto to_shape,
shape_representation_fn
? shape_representation_fn(original_shape, fast_mem, layout_preference)
: original_shape);
if (sharding) {
TF_ASSIGN_OR_RETURN(auto hlo_sharding,
xla::HloSharding::FromProto(*sharding));
TF_RETURN_IF_ERROR(RewriteLayoutWithShardedShape(
hlo_sharding, fast_mem, layout_preference_fn, shape_representation_fn,
&to_shape));
}
if (xla::ShapeUtil::Compatible(original_shape, to_shape)) {
for (int64_t i = 0; i < original_shape.rank(); ++i) {
to_shape.set_dynamic_dimension(i, original_shape.is_dynamic_dimension(i));
}
}
xla::XlaScopedShardingAssignment scoped_sharding(builder, sharding);
return xla::Reshape(to_shape, original);
}
} | #include "xla/layout_util.h"
#include <cstdint>
#include <vector>
#include "absl/types/span.h"
#include "xla/layout.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace {
class LayoutUtilTest : public ::testing::Test {
protected:
Shape MakeShapeWithLayout(
PrimitiveType element_type, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> minor_to_major,
absl::Span<const DimLevelType> dim_level_types = {}) {
Shape shape = ShapeUtil::MakeShape(element_type, dimensions);
*shape.mutable_layout() =
LayoutUtil::MakeLayout(minor_to_major, dim_level_types);
return shape;
}
};
TEST_F(LayoutUtilTest, TupleLayoutComparison) {
Shape shape =
ShapeUtil::MakeTupleShape({MakeShapeWithLayout(F32, {2, 3}, {0, 1})});
Shape other_shape =
ShapeUtil::MakeTupleShape({MakeShapeWithLayout(F32, {2, 2}, {0, 1})});
Shape tuple0 = ShapeUtil::MakeTupleShape({});
Shape tuple1 = ShapeUtil::MakeTupleShape({shape});
Shape tuple2 = ShapeUtil::MakeTupleShape({shape, shape});
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(tuple0, tuple0));
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(tuple0, tuple1));
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(tuple0, tuple2));
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(tuple1, tuple0));
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(tuple2, tuple0));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(tuple1, tuple1));
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(tuple1, tuple2));
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(tuple2, tuple1));
Shape other_tuple2 = ShapeUtil::MakeTupleShape({shape, other_shape});
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(tuple2, tuple2));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(tuple2, other_tuple2));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(other_tuple2, tuple2));
}
TEST_F(LayoutUtilTest, CopyLayoutDenseArray) {
Shape src = MakeShapeWithLayout(F32, {2, 3}, {0, 1});
Shape dst = MakeShapeWithLayout(F32, {2, 3}, {1, 0});
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
dst.clear_layout();
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
src.clear_layout();
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_TRUE(dst.has_layout());
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_FALSE(dst.has_layout());
}
TEST_F(LayoutUtilTest, CopyLayoutCSRArray) {
Shape src =
MakeShapeWithLayout(F32, {2, 3}, {1, 0}, {DIM_DENSE, DIM_COMPRESSED});
Shape dst = MakeShapeWithLayout(F32, {2, 3}, {0, 1});
EXPECT_TRUE(LayoutUtil::IsSparseArray(src));
EXPECT_FALSE(LayoutUtil::IsSparseArray(dst));
EXPECT_TRUE(LayoutUtil::IsCSRArray(src));
EXPECT_FALSE(LayoutUtil::IsCSRArray(dst));
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_TRUE(LayoutUtil::IsCSRArray(dst));
dst.clear_layout();
EXPECT_FALSE(LayoutUtil::IsCSRArray(dst));
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_TRUE(LayoutUtil::IsCSRArray(dst));
*dst.mutable_layout()->mutable_minor_to_major() = {0, 1};
EXPECT_TRUE(LayoutUtil::IsCSCArray(dst));
EXPECT_FALSE(LayoutUtil::IsCSRArray(dst));
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
*src.mutable_layout()->mutable_physical_shape() = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShapeWithDenseLayout(U32, {2}, {0}, {Tile({100})}),
ShapeUtil::MakeShapeWithDenseLayout(U32, {4}, {0}, {Tile({100})}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {4}, {0}, {Tile({100})}),
});
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(src, dst));
dst.clear_layout();
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
src.clear_layout();
EXPECT_FALSE(LayoutUtil::IsCSRArray(src));
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_TRUE(dst.has_layout());
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_FALSE(dst.has_layout());
EXPECT_FALSE(LayoutUtil::IsCSRArray(dst));
}
TEST_F(LayoutUtilTest, CopyLayoutTuple) {
Shape src = ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {2, 3}, {0, 1}),
MakeShapeWithLayout(F32, {42, 123}, {1, 0}),
ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {}, {}),
MakeShapeWithLayout(F32, {1, 2, 3}, {0, 2, 1})})});
Shape dst = ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {2, 3}, {1, 0}),
MakeShapeWithLayout(F32, {42, 123}, {1, 0}),
ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {}, {}),
MakeShapeWithLayout(F32, {1, 2, 3}, {1, 2, 0})})});
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
}
TEST_F(LayoutUtilTest, CopyLayoutNotCompatibleSameRank) {
Shape src = MakeShapeWithLayout(F32, {123, 42, 7}, {2, 0, 1});
Shape dst = MakeShapeWithLayout(F32, {2, 3, 5}, {1, 0});
ASSERT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
}
TEST_F(LayoutUtilTest, CopyLayoutNotCompatibleDifferentRank) {
Shape src = MakeShapeWithLayout(F32, {123, 42, 7}, {2, 0, 1});
Shape dst = MakeShapeWithLayout(F32, {2, 3}, {1, 0});
auto status = LayoutUtil::CopyLayoutBetweenShapes(src, &dst);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::ContainsRegex("cannot copy layout from shape"));
}
TEST_F(LayoutUtilTest, CopyLayoutNotCompatibleTuple) {
Shape src =
ShapeUtil::MakeTupleShape({MakeShapeWithLayout(F32, {2, 3}, {0, 1}),
MakeShapeWithLayout(F32, {42, 123}, {1, 0}),
ShapeUtil::MakeTupleShape({MakeShapeWithLayout(
F32, {1, 2, 3}, {0, 2, 1})})});
Shape dst = ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {2, 3}, {1, 0}),
MakeShapeWithLayout(F32, {42, 123}, {1, 0}),
ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {}, {}),
MakeShapeWithLayout(F32, {1, 2, 3}, {1, 2, 0})})});
auto status = LayoutUtil::CopyLayoutBetweenShapes(src, &dst);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::ContainsRegex("cannot copy layout from shape"));
}
TEST_F(LayoutUtilTest, CopyLayoutBogusLayout) {
Shape src = ShapeUtil::MakeShape(F32, {2, 3});
Shape dst = ShapeUtil::MakeShape(F32, {2, 3});
*src.mutable_layout() = LayoutUtil::MakeLayout({1, 2, 3, 4});
auto status = LayoutUtil::CopyLayoutBetweenShapes(src, &dst);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(), ::testing::ContainsRegex(
"layout minor_to_major field contains .* "
"elements, but shape is rank"));
}
TEST_F(LayoutUtilTest, CopyTokenLayout) {
Shape src = ShapeUtil::MakeTokenShape();
Shape dst = ShapeUtil::MakeTokenShape();
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
}
TEST_F(LayoutUtilTest, CopyOpaqueLayout) {
Shape src = ShapeUtil::MakeOpaqueShape();
Shape dst = ShapeUtil::MakeOpaqueShape();
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
}
TEST_F(LayoutUtilTest, CopyTupleLayoutWithTokenAndOpaque) {
Shape src = ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {2, 3}, {0, 1}),
MakeShapeWithLayout(F32, {42, 123}, {1, 0}), ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeOpaqueShape(), MakeShapeWithLayout(F32, {}, {}),
MakeShapeWithLayout(F32, {1, 2, 3}, {0, 2, 1})})});
Shape dst = ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {2, 3}, {1, 0}),
MakeShapeWithLayout(F32, {42, 123}, {1, 0}), ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeOpaqueShape(), MakeShapeWithLayout(F32, {}, {}),
MakeShapeWithLayout(F32, {1, 2, 3}, {1, 2, 0})})});
EXPECT_FALSE(LayoutUtil::LayoutsInShapesEqual(src, dst));
EXPECT_IS_OK(LayoutUtil::CopyLayoutBetweenShapes(src, &dst));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(src, dst));
}
TEST_F(LayoutUtilTest, ClearLayoutTuple) {
Shape shape = ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {2, 3}, {1, 0}),
MakeShapeWithLayout(F32, {42, 123}, {1, 0}),
ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {}, {}),
MakeShapeWithLayout(F32, {1, 2, 3}, {1, 2, 0})})});
EXPECT_TRUE(LayoutUtil::HasLayout(shape));
EXPECT_TRUE(shape.tuple_shapes(0).has_layout());
EXPECT_TRUE(shape.tuple_shapes(2).tuple_shapes(1).has_layout());
LayoutUtil::ClearLayout(&shape);
EXPECT_FALSE(LayoutUtil::HasLayout(shape));
EXPECT_FALSE(shape.tuple_shapes(0).has_layout());
EXPECT_FALSE(shape.tuple_shapes(2).tuple_shapes(1).has_layout());
}
TEST_F(LayoutUtilTest, ClearLayoutOpaqueAndToken) {
for (Shape shape :
{ShapeUtil::MakeOpaqueShape(), ShapeUtil::MakeTokenShape()}) {
EXPECT_TRUE(LayoutUtil::HasLayout(shape));
LayoutUtil::ClearLayout(&shape);
EXPECT_TRUE(LayoutUtil::HasLayout(shape));
}
}
TEST_F(LayoutUtilTest, SetToDefaultLayoutTuple) {
Shape shape = ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {2, 3, 4}, {1, 0, 2}),
MakeShapeWithLayout(F32, {42, 123, 7}, {1, 2, 0}),
ShapeUtil::MakeTupleShape(
{MakeShapeWithLayout(F32, {}, {}),
MakeShapeWithLayout(F32, {1, 2, 3, 4}, {3, 1, 2, 0})})});
EXPECT_FALSE(LayoutUtil::Equal(shape.tuple_shapes(0).layout(),
shape.tuple_shapes(1).layout()));
LayoutUtil::SetToDefaultLayout(&shape);
EXPECT_TRUE(LayoutUtil::Equal(shape.tuple_shapes(0).layout(),
shape.tuple_shapes(1).layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::GetDefaultLayoutForShape(shape.tuple_shapes(0)),
shape.tuple_shapes(1).layout()));
}
TEST_F(LayoutUtilTest, DefaultLayoutGettersMajorToMinor) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
LayoutUtil::GetDefaultLayoutForR2()));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({2, 1, 0}),
LayoutUtil::GetDefaultLayoutForR3()));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({3, 2, 1, 0}),
LayoutUtil::GetDefaultLayoutForR4()));
EXPECT_TRUE(
LayoutUtil::Equal(LayoutUtil::MakeLayout({4, 3, 2, 1, 0}),
LayoutUtil::GetDefaultLayoutForShape(
ShapeUtil::MakeShape(F32, {10, 20, 30, 15, 25}))));
}
TEST_F(LayoutUtilTest, MakeDescending) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeDescendingLayout(5),
LayoutUtil::MakeLayout({4, 3, 2, 1, 0})));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeDescendingLayout(1),
LayoutUtil::MakeLayout({0})));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeDescendingLayout(0),
LayoutUtil::MakeLayout({})));
}
TEST_F(LayoutUtilTest, MakeAscending) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeAscendingLayout(5),
LayoutUtil::MakeLayout({0, 1, 2, 3, 4})));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeAscendingLayout(1),
LayoutUtil::MakeLayout({0})));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeAscendingLayout(0),
LayoutUtil::MakeLayout({})));
}
TEST_F(LayoutUtilTest, HumanStringWithTiling) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 3, 4}, {0, 1, 2});
Tile* tile;
EXPECT_EQ(ShapeUtil::HumanStringWithLayout(shape), "f32[2,3,4]{0,1,2}");
tile = shape.mutable_layout()->add_tiles();
tile->add_dimensions(512);
tile->add_dimensions(1024);
EXPECT_EQ(ShapeUtil::HumanStringWithLayout(shape),
"f32[2,3,4]{0,1,2:T(512,1024)}");
shape.mutable_layout()->clear_tiles();
tile = shape.mutable_layout()->add_tiles();
tile->add_dimensions(512);
EXPECT_EQ(ShapeUtil::HumanStringWithLayout(shape),
"f32[2,3,4]{0,1,2:T(512)}");
shape = ShapeUtil::MakeShapeWithDenseLayout(BF16, {2, 3, 4}, {1, 2, 0});
tile = shape.mutable_layout()->add_tiles();
tile->add_dimensions(16);
tile->add_dimensions(256);
tile = shape.mutable_layout()->add_tiles();
tile->add_dimensions(2);
tile->add_dimensions(1);
EXPECT_EQ(ShapeUtil::HumanStringWithLayout(shape),
"bf16[2,3,4]{1,2,0:T(16,256)(2,1)}");
shape = ShapeUtil::MakeShapeWithDenseLayout(PRED, {8, 8, 8}, {0, 2, 1});
tile = shape.mutable_layout()->add_tiles();
tile->add_dimensions(8);
tile->add_dimensions(128);
EXPECT_EQ(ShapeUtil::HumanStringWithLayout(shape),
"pred[8,8,8]{0,2,1:T(8,128)}");
shape.mutable_layout()->clear_tiles();
tile = shape.mutable_layout()->add_tiles();
tile->add_dimensions(8);
tile->add_dimensions(128);
shape.mutable_layout()->set_element_size_in_bits(32);
EXPECT_EQ(ShapeUtil::HumanStringWithLayout(shape),
"pred[8,8,8]{0,2,1:T(8,128)E(32)}");
shape.mutable_layout()->clear_tiles();
shape.mutable_layout()->set_element_size_in_bits(32);
EXPECT_EQ(ShapeUtil::HumanStringWithLayout(shape),
"pred[8,8,8]{0,2,1:E(32)}");
shape = ShapeUtil::MakeShapeWithDenseLayout(BF16, {2, 3, 1004}, {2, 1, 0});
tile = shape.mutable_layout()->add_tiles();
tile->add_dimensions(2);
tile->add_dimensions(Tile::kCombineDimension);
tile->add_dimensions(128);
EXPECT_EQ(ShapeUtil::HumanStringWithLayout(shape),
"bf16[2,3,1004]{2,1,0:T(2,*,128)}");
shape =
ShapeUtil::MakeShapeWithDenseLayout(BF16, {8, 2, 3, 1004}, {3, 2, 1, 0});
tile = shape.mutable_layout()->add_tiles();
tile->add_dimensions(2);
tile->add_dimensions(Tile::kCombineDimension);
tile->add_dimensions(Tile::kCombineDimension);
tile->add_dimensions(128);
EXPECT_EQ(ShapeUtil::HumanStringWithLayout(shape),
"bf16[8,2,3,1004]{3,2,1,0:T(2,*,*,128)}");
}
TEST_F(LayoutUtilTest, ValidateLayout_ValidArrayLayout) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 3}, {0, 1});
auto status =
LayoutUtil::ValidateLayoutInShape(shape, false);
EXPECT_TRUE(status.ok());
status =
LayoutUtil::ValidateLayoutInShape(shape, true);
EXPECT_TRUE(status.ok());
}
TEST_F(LayoutUtilTest, ValidateLayout_InvalidArrayLayout) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
*shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1, 2});
auto status =
LayoutUtil::ValidateLayoutInShape(shape, false);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("layout minor_to_major field "
"contains 3 elements, but shape is rank 2"));
status =
LayoutUtil::ValidateLayoutInShape(shape, true);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("layout minor_to_major field "
"contains 3 elements, but shape is rank 2"));
}
TEST_F(LayoutUtilTest, ValidateLayout_InvalidDimLevelTypes) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
*shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
shape.mutable_layout()->add_dim_level_type(DIM_DENSE);
shape.mutable_layout()->add_dim_level_type(DIM_DENSE);
shape.mutable_layout()->add_dim_level_type(DIM_DENSE);
auto status =
LayoutUtil::ValidateLayoutInShape(shape, false);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("layout dim_level_types field "
"contains 3 elements, but shape is rank 2"));
status =
LayoutUtil::ValidateLayoutInShape(shape, true);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("layout dim_level_types field "
"contains 3 elements, but shape is rank 2"));
}
TEST_F(LayoutUtilTest, ValidateLayout_MissingArrayLayout) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
LayoutUtil::ClearLayout(&shape);
auto status =
LayoutUtil::ValidateLayoutInShape(shape, false);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("shape f32[2,3] does not have a layout"));
status =
LayoutUtil::ValidateLayoutInShape(shape, true);
EXPECT_TRUE(status.ok());
}
TEST_F(LayoutUtilTest, ValidateLayout_Sparse) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
*shape.mutable_layout() = LayoutUtil::MakeLayout(
{1, 0}, {DIM_DENSE, DIM_COMPRESSED}, {}, {}, {Tile({10, 10})});
EXPECT_THAT(LayoutUtil::ValidateLayoutInShape(shape),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
::testing::HasSubstr(
"layout has tiles, but the shape is a sparse array")));
shape.mutable_layout()->clear_tiles();
EXPECT_THAT(LayoutUtil::ValidateLayoutInShape(shape), tsl::testing::IsOk());
*shape.mutable_layout()->mutable_physical_shape() =
ShapeUtil::MakeShape(F32, {6});
EXPECT_THAT(LayoutUtil::ValidateLayoutInShape(shape), tsl::testing::IsOk());
*shape.mutable_layout()
->mutable_physical_shape()
->mutable_layout()
->mutable_physical_shape() = ShapeUtil::MakeShape(S32, {10});
EXPECT_THAT(
LayoutUtil::ValidateLayoutInShape(shape),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
::testing::HasSubstr(
"layout has a physical_shape, but is not a sparse array")));
shape.mutable_layout()->mutable_physical_shape()->clear_layout();
shape.mutable_layout()->clear_dim_level_types();
EXPECT_THAT(
LayoutUtil::ValidateLayoutInShape(shape),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
::testing::HasSubstr(
"layout has a physical_shape, but is not a sparse array")));
*shape.mutable_layout() =
LayoutUtil::MakeLayout({1, 0}, {DIM_DENSE, DIM_DENSE}, {true, false});
EXPECT_THAT(LayoutUtil::ValidateLayoutInShape(shape),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
::testing::HasSubstr("layout dimension 1 has invalid level "
"encoding DIM_DENSE, non-unique")));
}
TEST_F(LayoutUtilTest, ValidateLayout_TupleSubshapesWithMissingLayouts) {
Shape sub_1_1_1 = ShapeUtil::MakeShape(F32, {1, 2});
Shape sub_1_1 = ShapeUtil::MakeTupleShape({sub_1_1_1});
Shape sub_1_2 = ShapeUtil::MakeShape(F32, {1, 2});
LayoutUtil::ClearLayout(&sub_1_2);
Shape sub_1 = ShapeUtil::MakeTupleShape({sub_1_1, sub_1_2});
Shape sub_2_1 = ShapeUtil::MakeShape(F32, {9});
LayoutUtil::ClearLayout(&sub_2_1);
Shape sub_2 = ShapeUtil::MakeTupleShape({sub_2_1});
Shape shape = ShapeUtil::MakeTupleShape({sub_1, sub_2});
auto status =
LayoutUtil::ValidateLayoutInShape(shape, false);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("shape f32[1,2] does not have a layout"));
status =
LayoutUtil::ValidateLayoutInShape(shape, true);
EXPECT_TRUE(status.ok());
*shape.mutable_tuple_shapes(1)->mutable_tuple_shapes(0)->mutable_layout() =
LayoutUtil::MakeLayout({0, 2, 3});
status =
LayoutUtil::ValidateLayoutInShape(shape, true);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("layout minor_to_major field "
"contains 3 elements, but shape is rank 1"));
}
TEST_F(LayoutUtilTest, MoveDimToMajor) {
const Layout layout = LayoutUtil::MakeLayout({2, 1, 0});
Layout new_layout = LayoutUtil::MoveDimToMajor(layout, 0);
EXPECT_EQ(new_layout, layout);
new_layout = LayoutUtil::MoveDimToMajor(layout, 1);
EXPECT_EQ(new_layout, LayoutUtil::MakeLayout({2, 0, 1}));
}
TEST_F(LayoutUtilTest, StridesIsMajorToMinor) {
std::vector<int64_t> byte_strides = {3960, 440, 44, 4};
EXPECT_TRUE(LayoutUtil::ByteStridesIsMajorToMinor(
byte_strides, {8, 9, 10, 11}, PrimitiveType::F32));
}
TEST_F(LayoutUtilTest, StridesNotMajorToMinorInnerMostStrideIncorrect) {
std::vector<int64_t> byte_strides = {1880, 220, 22, 2};
EXPECT_FALSE(LayoutUtil::ByteStridesIsMajorToMinor(
byte_strides, {8, 9, 10, 11}, PrimitiveType::F32));
}
TEST_F(LayoutUtilTest, StridesNotMajorToMinor) {
std::vector<int64_t> byte_strides = {1880, 440, 44, 4};
EXPECT_FALSE(LayoutUtil::ByteStridesIsMajorToMinor(
byte_strides, {8, 9, 10, 11}, PrimitiveType::F32));
}
TEST_F(LayoutUtilTest, HasCustomElementSizeInBits) {
Shape shape = ShapeUtil::MakeShape(F32, {1, 2});
EXPECT_FALSE(LayoutUtil::HasCustomElementSizeInBits(shape));
shape = ShapeUtil::MakeShape(F32, {1, 2});
shape.mutable_layout()->set_element_size_in_bits(0);
EXPECT_FALSE(LayoutUtil::HasCustomElementSizeInBits(shape));
shape = ShapeUtil::MakeShape(F32, {1, 2});
shape.mutable_layout()->set_element_size_in_bits(32);
EXPECT_TRUE(LayoutUtil::HasCustomElementSizeInBits(shape));
shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {1, 2}),
ShapeUtil::MakeShape(F32, {1, 2})}),
ShapeUtil::MakeShape(F32, {1, 2})});
EXPECT_FALSE(LayoutUtil::HasCustomElementSizeInBits(shape));
shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {1, 2}),
ShapeUtil::MakeShape(F32, {1, 2})}),
ShapeUtil::MakeShape(F32, {1, 2})});
ShapeUtil::GetMutableSubshape(&shape, {0, 1})
->mutable_layout()
->set_element_size_in_bits(32);
EXPECT_TRUE(LayoutUtil::HasCustomElementSizeInBits(shape));
}
TEST_F(LayoutUtilTest, MaxSplitSize) {
Shape shape = ShapeUtil::MakeShape(F32, {150, 200, 100});
*shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1, 2})
.add_split_configs(SplitConfig(0, {30}))
.add_split_configs(SplitConfig(1, {40, 130}));
EXPECT_EQ(LayoutUtil::MaxSplitSize(shape, 0), 150);
EXPECT_EQ(LayoutUtil::MaxSplitSize(shape, 1), 90);
EXPECT_EQ(LayoutUtil::MaxSplitSize(shape, 2), 70);
}
TEST_F(LayoutUtilTest, MaxElementsInPerSplit) {
Shape shape = ShapeUtil::MakeShape(F32, {150, 200, 100});
*shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1, 2});
EXPECT_EQ(LayoutUtil::MaxElementsInPerSplit(shape), 150 * 200 * 100);
*shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1, 2})
.add_split_configs(SplitConfig(0, {30}))
.add_split_configs(SplitConfig(1, {40, 130}));
EXPECT_EQ(LayoutUtil::MaxElementsInPerSplit(shape), 150 * 90 * 70);
}
TEST_F(LayoutUtilTest, GetPhysicalShapeFromLogicalShapeNoLayout) {
Shape shape = ShapeUtil::MakeShape(F32, {150, 200, 100});
EXPECT_EQ(LayoutUtil::GetPhysicalShapeFromLogicalShape(shape), shape);
}
TEST_F(LayoutUtilTest, GetPhysicalShapeFromLogicalShapeLayout) {
Shape shape = ShapeUtil::MakeShape(F32, {150, 200, 100});
*shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1, 2});
Shape expected_shape = ShapeUtil::MakeShape(F32, {100, 200, 150});
*expected_shape.mutable_layout() = LayoutUtil::MakeLayout({2, 1, 0});
EXPECT_EQ(LayoutUtil::GetPhysicalShapeFromLogicalShape(shape),
expected_shape);
}
}
} |
1,109 | cpp | tensorflow/tensorflow | resource_operation_table | tensorflow/compiler/tf2xla/resource_operation_table.cc | tensorflow/compiler/tf2xla/resource_operation_table_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_RESOURCE_OPERATION_TABLE_H_
#define TENSORFLOW_COMPILER_TF2XLA_RESOURCE_OPERATION_TABLE_H_
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
enum class XlaResourceOpKind {
kRead,
kWrite,
kReadWrite
};
enum class XlaResourceKind {
kVariable,
kStack,
kTensorArray
};
class XlaResourceOpInfo {
public:
explicit XlaResourceOpInfo(XlaResourceOpKind op_kind,
XlaResourceKind resource_kind)
: op_kind_(op_kind), resource_kind_(resource_kind) {}
XlaResourceOpKind kind() const { return op_kind_; }
XlaResourceKind resource_kind() const { return resource_kind_; }
static absl::string_view XlaResourceOpKindToString(XlaResourceOpKind op_kind);
private:
XlaResourceOpKind op_kind_;
XlaResourceKind resource_kind_;
};
const XlaResourceOpInfo* GetResourceOpInfoForOp(absl::string_view op);
namespace resource_op_table_internal {
std::vector<absl::string_view> GetKnownResourceOps();
}
}
#endif
#include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
namespace tensorflow {
absl::string_view XlaResourceOpInfo::XlaResourceOpKindToString(
XlaResourceOpKind op_kind) {
switch (op_kind) {
case XlaResourceOpKind::kRead:
return "Read";
case XlaResourceOpKind::kWrite:
return "Write";
case XlaResourceOpKind::kReadWrite:
return "Modify";
}
}
static absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>*
CreateResourceOpInfoMap() {
auto* result = new absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>;
auto add = [&](absl::string_view op, XlaResourceOpKind op_kind,
XlaResourceKind resource_kind) {
auto insert_result =
result->insert({op, XlaResourceOpInfo(op_kind, resource_kind)});
CHECK(insert_result.second);
};
auto kRead = XlaResourceOpKind::kRead;
auto kWrite = XlaResourceOpKind::kWrite;
auto kReadWrite = XlaResourceOpKind::kReadWrite;
auto kVariable = XlaResourceKind::kVariable;
auto kStack = XlaResourceKind::kStack;
auto kTensorArray = XlaResourceKind::kTensorArray;
add("AssignAddVariableOp" , kReadWrite, kVariable);
add("AssignSubVariableOp" , kReadWrite, kVariable);
add("AssignVariableOp" , kWrite, kVariable);
add("AssignVariableXlaConcatND" , kWrite, kVariable);
add("CollectiveReduceV2" , kRead, kVariable);
add("ReadVariableOp" , kRead, kVariable);
add("ReadVariableXlaSplitND" , kRead, kVariable);
add("ResourceApplyAdaMax" , kReadWrite, kVariable);
add("ResourceApplyAdadelta" , kReadWrite, kVariable);
add("ResourceApplyAdagrad" , kReadWrite, kVariable);
add("ResourceApplyAdagradV2" , kReadWrite, kVariable),
add("ResourceApplyAdagradDA" , kReadWrite, kVariable);
add("ResourceApplyAdam" , kReadWrite, kVariable);
add("ResourceApplyAddSign" , kReadWrite, kVariable);
add("ResourceApplyCenteredRMSProp" , kReadWrite, kVariable);
add("ResourceApplyFtrl" , kReadWrite, kVariable);
add("ResourceApplyFtrlV2" , kReadWrite, kVariable);
add("ResourceApplyGradientDescent" , kReadWrite, kVariable);
add("ResourceApplyMomentum" , kReadWrite, kVariable);
add("ResourceApplyKerasMomentum" , kReadWrite, kVariable);
add("ResourceApplyPowerSign" , kReadWrite, kVariable);
add("ResourceApplyProximalAdagrad" , kReadWrite, kVariable);
add("ResourceApplyProximalGradientDescent" , kReadWrite, kVariable);
add("ResourceApplyRMSProp" , kReadWrite, kVariable);
add("ResourceGather" , kRead, kVariable);
add("ResourceScatterAdd" , kReadWrite, kVariable);
add("ResourceScatterDiv" , kReadWrite, kVariable);
add("ResourceScatterMax" , kReadWrite, kVariable);
add("ResourceScatterMin" , kReadWrite, kVariable);
add("ResourceScatterMul" , kReadWrite, kVariable);
add("ResourceScatterNdAdd" , kReadWrite, kVariable);
add("ResourceScatterNdSub" , kReadWrite, kVariable);
add("ResourceScatterNdUpdate" , kReadWrite, kVariable);
add("ResourceScatterSub" , kReadWrite, kVariable);
add("ResourceScatterUpdate" , kReadWrite, kVariable);
add("ResourceStridedSliceAssign" , kReadWrite, kVariable);
add("RngReadAndSkip" , kReadWrite, kVariable);
add("RngSkip" , kReadWrite, kVariable);
add("StatefulStandardNormalV2" , kReadWrite, kVariable);
add("StatefulTruncatedNormal" , kReadWrite, kVariable);
add("StatefulUniform" , kReadWrite, kVariable);
add("StatefulUniformFullInt" , kReadWrite, kVariable);
add("StatefulUniformInt" , kReadWrite, kVariable);
add("VarIsInitializedOp" , kRead, kVariable);
add("VariableShape" , kRead, kVariable);
add("StackV2" , kWrite, kStack);
add("StackCloseV2" , kRead, kStack);
add("StackPopV2" , kReadWrite, kStack);
add("StackPushV2" , kReadWrite, kStack);
add("TensorArrayV3" , kWrite, kTensorArray);
add("TensorArrayConcatV3" , kRead, kTensorArray);
add("TensorArrayGatherV3" , kRead, kTensorArray);
add("TensorArrayScatterV3" , kWrite, kTensorArray);
add("TensorArrayGradV3" , kRead, kTensorArray);
add("TensorArrayCloseV3" , kRead, kTensorArray);
add("TensorArrayReadV3" , kRead, kTensorArray);
add("TensorArraySizeV3" , kRead, kTensorArray);
add("TensorArraySplitV3" , kWrite, kTensorArray);
add("TensorArrayWriteV3" , kWrite, kTensorArray);
return result;
}
static const absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>&
GetStaticResourceOpInfoMap() {
static absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>*
op_info_map = CreateResourceOpInfoMap();
return *op_info_map;
}
const XlaResourceOpInfo* GetResourceOpInfoForOp(absl::string_view op) {
const absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>& op_infos =
GetStaticResourceOpInfoMap();
auto it = op_infos.find(op);
return it == op_infos.end() ? nullptr : &it->second;
}
namespace resource_op_table_internal {
std::vector<absl::string_view> GetKnownResourceOps() {
std::vector<absl::string_view> result;
for (const auto& p : GetStaticResourceOpInfoMap()) {
result.push_back(p.first);
}
absl::c_sort(result);
return result;
}
}
} | #include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
bool IsResourceArgDef(const OpDef::ArgDef& arg_def) {
return arg_def.type() == DT_RESOURCE;
}
bool HasResourceInputOrOutput(const OpDef& op_def) {
return absl::c_any_of(op_def.input_arg(), IsResourceArgDef) ||
absl::c_any_of(op_def.output_arg(), IsResourceArgDef);
}
TEST(ResourceOperationTableTest, HaveAllResourceOps) {
absl::flat_hash_map<string, bool> known_resource_ops;
for (absl::string_view known_resource_op :
resource_op_table_internal::GetKnownResourceOps()) {
ASSERT_TRUE(
known_resource_ops.insert({string(known_resource_op), false}).second);
}
std::vector<string> xla_op_names = XlaOpRegistry::GetAllRegisteredOps();
for (const string& xla_op_name : xla_op_names) {
const OpDef* op_def;
TF_ASSERT_OK(OpRegistry::Global()->LookUpOpDef(xla_op_name, &op_def));
if (HasResourceInputOrOutput(*op_def)) {
EXPECT_EQ(known_resource_ops.count(xla_op_name), 1)
<< "Unknown resource op " << xla_op_name;
known_resource_ops[xla_op_name] = true;
}
}
std::vector<string> unnecessary_resource_ops;
for (const auto& pair : known_resource_ops) {
if (!pair.second) {
unnecessary_resource_ops.push_back(pair.first);
}
}
EXPECT_TRUE(unnecessary_resource_ops.empty())
<< "Stale resource ops:\n"
<< absl::StrJoin(unnecessary_resource_ops, "\n");
}
}
} |
1,110 | cpp | tensorflow/tensorflow | sharding_util | tensorflow/compiler/tf2xla/sharding_util.cc | tensorflow/compiler/tf2xla/sharding_util_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_SHARDING_UTIL_H_
#define TENSORFLOW_COMPILER_TF2XLA_SHARDING_UTIL_H_
#include <string>
#include "xla/client/sharding_builder.h"
#include "xla/status_macros.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice(
const string& device_name, int num_cores_per_replica,
std::optional<xla::OpSharding> explicit_sharding = std::nullopt,
std::optional<xla::OpMetadata> metadata = std::nullopt);
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice(
const Node& node, int num_cores_per_replica, bool add_metadata);
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice(
const NodeDef& node_def, int num_cores_per_replica, bool add_metadata);
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromEdgeSource(
const Edge& edge, int num_cores_per_replica, bool add_metadata);
void SetShardingDeviceAssignmentFromNode(const Node& src, Node* dst);
absl::StatusOr<std::optional<xla::OpSharding>> GetShardingFromNodeDef(
const NodeDef& node_def, bool add_metadata);
}
#endif
#include "tensorflow/compiler/tf2xla/sharding_util.h"
#include "absl/strings/match.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
const char kDeviceSuffixReplicatedCore[] = "REPLICATED_CORE";
const char kShardingAttribute[] = "_XlaSharding";
const char kShardingOpAttribute[] = "sharding";
}
namespace {
xla::OpMetadata CreateOpMetadata(const std::string& op_type,
const std::string& op_name) {
xla::OpMetadata metadata;
metadata.set_op_type(op_type);
metadata.set_op_name(op_name);
return metadata;
}
void AssignOpMetadataToSharding(xla::OpSharding& sharding,
const string& op_type, const string& op_name) {
auto metadata = CreateOpMetadata(op_type, op_name);
if (sharding.type() == xla::OpSharding::TUPLE) {
for (auto& sharding_element : *sharding.mutable_tuple_shardings()) {
*sharding_element.add_metadata() = metadata;
}
} else {
*sharding.add_metadata() = metadata;
}
}
Status CoreOutOfRangeError(int core, int num_cores_per_replica) {
return errors::InvalidArgument(
"Invalid replicated core id: ", core,
"; num_cores_per_replica=", num_cores_per_replica);
}
}
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice(
const string& device_name, int num_cores_per_replica,
std::optional<xla::OpSharding> explicit_sharding,
std::optional<xla::OpMetadata> metadata) {
if (device_name.empty()) {
return explicit_sharding;
}
DeviceNameUtils::ParsedName parsed_device;
if (!DeviceNameUtils::ParseFullName(device_name, &parsed_device)) {
return errors::InvalidArgument("Malformed assigned device '", device_name,
"'");
}
if (explicit_sharding.has_value()) {
return explicit_sharding;
} else if (!parsed_device.has_type || !parsed_device.has_id ||
!absl::StrContains(parsed_device.type,
kDeviceSuffixReplicatedCore)) {
return std::optional<xla::OpSharding>();
} else {
const int core = parsed_device.id;
if (core < 0 || core >= num_cores_per_replica) {
return CoreOutOfRangeError(core, num_cores_per_replica);
}
auto sharding = xla::sharding_builder::AssignDevice(core);
if (metadata.has_value()) {
*sharding.add_metadata() = metadata.value();
}
return std::optional<xla::OpSharding>(sharding);
}
}
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice(
const NodeDef& node_def, int num_cores_per_replica, bool add_metadata) {
const string& device_name = node_def.device();
TF_ASSIGN_OR_RETURN(std::optional<xla::OpSharding> sharding,
GetShardingFromNodeDef(node_def, add_metadata));
return ParseShardingFromDevice(
device_name, num_cores_per_replica, sharding,
add_metadata ? std::optional<xla::OpMetadata>(
CreateOpMetadata(node_def.op(), node_def.name()))
: std::nullopt);
}
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice(
const Node& node, int num_cores_per_replica, bool add_metadata) {
string device_name = node.assigned_device_name();
if (device_name.empty()) {
device_name = node.requested_device();
}
TF_ASSIGN_OR_RETURN(std::optional<xla::OpSharding> sharding,
GetShardingFromNodeDef(node.def(), add_metadata));
return ParseShardingFromDevice(
device_name, num_cores_per_replica, sharding,
add_metadata ? std::optional<xla::OpMetadata>(
CreateOpMetadata(node.type_string(), node.name()))
: std::nullopt);
}
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromEdgeSource(
const Edge& edge, int num_cores_per_replica, bool add_metadata) {
if (edge.src() == nullptr) {
return tensorflow::errors::InvalidArgument(
"Null src for ParseShardingFromEdgeSource edge=", edge.DebugString());
}
TF_ASSIGN_OR_RETURN(std::optional<xla::OpSharding> sharding,
ParseShardingFromDevice(
*edge.src(), num_cores_per_replica, add_metadata));
if (sharding.has_value() &&
sharding.value().type() == xla::OpSharding::TUPLE) {
if (edge.src_output() < 0 ||
edge.src_output() >= sharding.value().tuple_shardings_size()) {
return tensorflow::errors::InvalidArgument(
"Tuple index out of bound: edge=", edge.DebugString(),
" sharding=", sharding->DebugString());
}
std::optional<xla::OpSharding> subsharding =
sharding.value().tuple_shardings(edge.src_output());
return subsharding;
}
return sharding;
}
void SetShardingDeviceAssignmentFromNode(const Node& src, Node* dst) {
string device_name = src.assigned_device_name();
if (device_name.empty()) {
device_name = src.requested_device();
}
dst->set_assigned_device_name(device_name);
if (const AttrValue* attr = src.attrs().Find(kShardingAttribute)) {
dst->AddAttr(kShardingAttribute, *attr);
}
}
namespace {
absl::StatusOr<std::optional<xla::OpSharding>> GetShardingFromNodeDefInternal(
const NodeDef& node_def, bool add_metadata, const char* attribute) {
if (!HasNodeAttr(node_def, attribute)) {
return std::optional<xla::OpSharding>();
}
string value;
xla::OpSharding sharding;
TF_RETURN_IF_ERROR(GetNodeAttr(node_def, attribute, &value));
if (tensorflow::DecodeShardingAttribute(value, sharding).failed()) {
return xla::InvalidArgument(
"Experimental %s attribute was not a valid encoded xla::OpSharding "
"proto.",
attribute);
}
if (add_metadata) {
AssignOpMetadataToSharding(sharding, node_def.op(), node_def.name());
}
return std::optional<xla::OpSharding>(sharding);
}
}
absl::StatusOr<std::optional<xla::OpSharding>> GetShardingFromNodeDef(
const NodeDef& node_def, bool add_metadata) {
if (node_def.op() == "XlaSharding") {
TF_ASSIGN_OR_RETURN(auto sharding,
GetShardingFromNodeDefInternal(node_def, add_metadata,
kShardingOpAttribute));
if (sharding.has_value()) {
return sharding;
}
}
return GetShardingFromNodeDefInternal(node_def, add_metadata,
kShardingAttribute);
}
} | #include "tensorflow/compiler/tf2xla/sharding_util.h"
#include <functional>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(CoreUtilTest, ParseShardingFromDevice) {
Graph graph(OpRegistry::Global());
auto core_from_sharding =
[](std::optional<xla::OpSharding> sharding) -> int64 {
if (sharding.has_value() &&
sharding.value().type() == xla::OpSharding::MAXIMAL) {
return sharding.value().tile_assignment_devices(0);
} else {
return -1;
}
};
auto parse_status = ParseShardingFromDevice("", 1);
TF_EXPECT_OK(parse_status.status());
EXPECT_EQ(-1, core_from_sharding(parse_status.value()));
parse_status = ParseShardingFromDevice("", 100);
TF_EXPECT_OK(parse_status.status());
EXPECT_EQ(-1, core_from_sharding(parse_status.value()));
parse_status = ParseShardingFromDevice("/device:A_REPLICATED_CORE:-1", 100);
EXPECT_FALSE(parse_status.ok());
parse_status = ParseShardingFromDevice("/device:A_REPLICATED_CORE:55", 100);
TF_EXPECT_OK(parse_status.status());
EXPECT_EQ(55, core_from_sharding(parse_status.value()));
parse_status = ParseShardingFromDevice("/device:A_REPLICATED_CORE:100", 100);
EXPECT_FALSE(parse_status.ok());
parse_status = ParseShardingFromDevice("/cpu:0", 100);
TF_EXPECT_OK(parse_status.status());
EXPECT_EQ(-1, core_from_sharding(parse_status.value()));
}
class ShardingWithMetadataTest
: public ::testing::TestWithParam<xla::OpSharding> {};
TEST_P(ShardingWithMetadataTest, GetShardingFromNode) {
NodeDef node_def;
{
node_def.set_op("_Arg");
node_def.set_name("arg");
AttrValue xla_sharding;
xla_sharding.set_s("");
AttrValue index;
index.set_i(0);
AttrValue type;
type.set_type(DataType::DT_FLOAT);
node_def.mutable_attr()->insert(
{{"_XlaSharding", xla_sharding}, {"index", index}, {"T", type}});
}
auto check_metadata = [](const xla::OpSharding& sharding) {
ASSERT_EQ(sharding.metadata_size(), 1);
const auto& metadata = sharding.metadata(0);
EXPECT_EQ(metadata.op_type(), "_Arg");
EXPECT_EQ(metadata.op_name(), "arg");
};
auto test_sharding_metadata =
[&check_metadata](
const std::function<absl::StatusOr<std::optional<xla::OpSharding>>()>&
fn) {
auto status_or_sharding = fn();
TF_ASSERT_OK(status_or_sharding.status());
ASSERT_TRUE(status_or_sharding.value().has_value());
auto& sharding = status_or_sharding.value();
ASSERT_TRUE(sharding.has_value());
if (sharding->type() == xla::OpSharding::TUPLE) {
EXPECT_TRUE(sharding->metadata().empty());
for (const auto& sharding_element : sharding->tuple_shardings()) {
check_metadata(sharding_element);
}
} else {
check_metadata(sharding.value());
}
};
{
test_sharding_metadata([&node_def]() {
return GetShardingFromNodeDef(node_def, true);
});
}
{
test_sharding_metadata([&node_def]() {
return ParseShardingFromDevice(node_def, 1,
true);
});
}
{
Graph graph(OpRegistry::Global());
Status status;
Node* node = graph.AddNode(node_def, &status);
TF_ASSERT_OK(status);
test_sharding_metadata([node]() {
return ParseShardingFromDevice(*node, 1,
true);
});
}
}
xla::OpSharding CreateTupleSharding() {
xla::OpSharding sharding;
sharding.set_type(xla::OpSharding::TUPLE);
sharding.add_tuple_shardings()->set_type(xla::OpSharding::REPLICATED);
sharding.add_tuple_shardings()->set_type(xla::OpSharding::REPLICATED);
return sharding;
}
INSTANTIATE_TEST_SUITE_P(GetShardingFromNode, ShardingWithMetadataTest,
::testing::Values(xla::sharding_builder::Replicate(),
CreateTupleSharding()));
} |
1,111 | cpp | tensorflow/tensorflow | identity_op | tensorflow/core/kernels/identity_op.cc | tensorflow/core/kernels/identity_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IDENTITY_OP_H_
#define TENSORFLOW_CORE_KERNELS_IDENTITY_OP_H_
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
class IdentityOp : public OpKernel {
public:
explicit IdentityOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
if (IsRefType(context->input_dtype(0))) {
context->forward_ref_input_to_ref_output(0, 0);
} else {
context->set_output(0, context->input(0));
}
}
bool IsExpensive() override { return false; }
};
}
#endif
#include "tensorflow/core/kernels/identity_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
REGISTER_KERNEL_BUILDER(Name("Identity").Device(DEVICE_CPU), IdentityOp);
REGISTER_KERNEL_BUILDER(Name("Identity").Device(DEVICE_TPU_SYSTEM), IdentityOp);
REGISTER_KERNEL_BUILDER(Name("StopGradient").Device(DEVICE_CPU), IdentityOp);
REGISTER_KERNEL_BUILDER(Name("PreventGradient").Device(DEVICE_CPU), IdentityOp);
REGISTER_KERNEL_BUILDER(Name("PlaceholderWithDefault").Device(DEVICE_CPU),
IdentityOp);
REGISTER_KERNEL_BUILDER(Name("_EagerConst").Device(DEVICE_CPU), IdentityOp);
REGISTER_KERNEL_BUILDER(Name("RefIdentity").Device(DEVICE_CPU), IdentityOp);
REGISTER_KERNEL_BUILDER(Name("DebugGradientIdentity").Device(DEVICE_CPU),
IdentityOp);
REGISTER_KERNEL_BUILDER(Name("DebugGradientRefIdentity").Device(DEVICE_CPU),
IdentityOp);
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("Identity").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER( \
Name("PreventGradient").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER( \
Name("RefIdentity").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER( \
Name("StopGradient").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("DebugGradientIdentity") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("PlaceholderWithDefault") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("dtype"), \
IdentityOp) \
REGISTER_KERNEL_BUILDER( \
Name("_EagerConst").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
IdentityOp)
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL);
REGISTER_GPU_KERNEL(Variant);
REGISTER_GPU_KERNEL(bool);
#undef REGISTER_GPU_KERNEL
#define REGISTER_DEFAULT_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("Identity").Device(DEVICE_DEFAULT).TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("PreventGradient") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER( \
Name("RefIdentity").Device(DEVICE_DEFAULT).TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER( \
Name("StopGradient").Device(DEVICE_DEFAULT).TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("DebugGradientIdentity") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("PlaceholderWithDefault") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("dtype"), \
IdentityOp) \
REGISTER_KERNEL_BUILDER( \
Name("_EagerConst").Device(DEVICE_DEFAULT).TypeConstraint<type>("T"), \
IdentityOp)
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL);
REGISTER_DEFAULT_KERNEL(Variant);
REGISTER_DEFAULT_KERNEL(bool);
#undef REGISTER_DEFAULT_KERNEL
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#define REGISTER_GPU_HOST_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Identity") \
.Device(DEVICE_GPU) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("RefIdentity") \
.Device(DEVICE_GPU) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("StopGradient") \
.Device(DEVICE_GPU) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("PlaceholderWithDefault") \
.Device(DEVICE_GPU) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("dtype"), \
IdentityOp) \
REGISTER_KERNEL_BUILDER(Name("_EagerConst") \
.Device(DEVICE_GPU) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("T"), \
IdentityOp);
REGISTER_GPU_HOST_KERNEL(int32);
REGISTER_GPU_HOST_KERNEL(tstring);
REGISTER_GPU_HOST_KERNEL(ResourceHandle);
#undef REGISTER_GPU_HOST_KERNEL
#endif
#define REGISTER_DEFAULT_HOST_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Identity") \
.Device(DEVICE_DEFAULT) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("RefIdentity") \
.Device(DEVICE_DEFAULT) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("StopGradient") \
.Device(DEVICE_DEFAULT) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("T"), \
IdentityOp); \
REGISTER_KERNEL_BUILDER(Name("PlaceholderWithDefault") \
.Device(DEVICE_DEFAULT) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("dtype"), \
IdentityOp) \
REGISTER_KERNEL_BUILDER(Name("_EagerConst") \
.Device(DEVICE_DEFAULT) \
.HostMemory("input") \
.HostMemory("output") \
.TypeConstraint<type>("T"), \
IdentityOp)
REGISTER_DEFAULT_HOST_KERNEL(int32);
REGISTER_DEFAULT_HOST_KERNEL(tstring);
REGISTER_DEFAULT_HOST_KERNEL(ResourceHandle);
#undef REGISTER_DEFAULT_HOST_KERNEL
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class IdentityOpTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "Identity")
.Input(FakeInput(input_type))
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(IdentityOpTest, Int32Success_6) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, Int32Success_2_3) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, StringSuccess) {
TF_ASSERT_OK(Init(DT_STRING));
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, RefInputError) { TF_ASSERT_OK(Init(DT_INT32_REF)); }
}
} |
1,112 | cpp | tensorflow/tensorflow | reverse_op | tensorflow/core/kernels/reverse_op.cc | tensorflow/core/kernels/reverse_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_REVERSE_OP_H_
#define TENSORFLOW_CORE_KERNELS_REVERSE_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T, int Dims>
struct Reverse {
void operator()(const Device& d, typename TTypes<T, Dims>::ConstTensor input,
const Eigen::array<bool, Dims>& reverse_dims,
typename TTypes<T, Dims>::Tensor output) {
output.device(d) = input.reverse(reverse_dims);
}
};
template <typename Device, typename T>
struct Reverse<Device, T, 0> {
void operator()(const Device& d, typename TTypes<T, 0>::ConstTensor input,
const Eigen::array<bool, 0>& reverse_dims,
typename TTypes<T, 0>::Tensor output) {
output.device(d) = input;
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#if !defined(PLUGGABLE_DEVICE_SUPPORTED_MACOS) && defined(__APPLE__) && \
!defined(ANDROID) && !defined(__ANDROID__) && \
(!defined(TARGET_OS_IOS) || !TARGET_OS_IOS)
#define PLUGGABLE_DEVICE_SUPPORTED_MACOS 1
#endif
#include "tensorflow/core/kernels/reverse_op.h"
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace {
template <typename T, int NUM_CHANNELS>
void ReverseRows(OpKernelContext* context, const Tensor& input,
Tensor* result) {
auto work = [&input, result](int64_t start, int64_t end) {
const int64_t inner_size =
NUM_CHANNELS > 0 ? NUM_CHANNELS : input.dim_size(2);
const int64_t middle_size = input.dim_size(1);
const int64_t row_size = inner_size * middle_size;
DCHECK_EQ(input.dim_size(2), inner_size);
const T* in_ptr = input.bit_casted_tensor<T, 3>().data();
T* out_ptr = result->bit_casted_tensor<T, 3>().data();
in_ptr += start * row_size;
out_ptr += start * row_size;
for (int outer_dim = start; outer_dim < end; ++outer_dim) {
out_ptr += row_size;
int remaining = middle_size;
while (remaining > 0) {
out_ptr -= inner_size;
memcpy(out_ptr, in_ptr, inner_size * sizeof(T));
in_ptr += inner_size;
--remaining;
}
out_ptr += row_size;
}
};
const int64_t N = input.dim_size(0);
const int64_t cost_per_unit = input.NumElements() / N;
auto worker_threads = context->device()->tensorflow_cpu_worker_threads();
Shard(worker_threads->num_threads, worker_threads->workers, N, cost_per_unit,
std::move(work));
}
template <typename T>
struct data_type_can_memcpy {
static constexpr bool value =
std::is_same<T, uint8>::value || std::is_same<T, int8>::value ||
std::is_same<T, bool>::value || std::is_same<T, uint16>::value ||
std::is_same<T, int16>::value || std::is_same<T, Eigen::half>::value ||
std::is_same<T, Eigen::bfloat16>::value ||
std::is_same<T, int32>::value || std::is_same<T, float>::value ||
std::is_same<T, int64_t>::value || std::is_same<T, double>::value ||
std::is_same<T, complex64>::value || std::is_same<T, complex128>::value;
};
template <typename T, int NUM_CHANNELS>
typename std::enable_if<data_type_can_memcpy<T>::value>::type
DoHandleReverseCase(OpKernelContext* context, const Tensor& input,
Tensor* result) {
if (sizeof(T) == 1) {
static_assert(sizeof(uint8) == 1, "uint8 must be 1 byte.");
ReverseRows<uint8, NUM_CHANNELS>(context, input, result);
} else if (sizeof(T) == 2) {
static_assert(sizeof(uint16) == 2, "uint16 must be 2 bytes");
ReverseRows<uint16, NUM_CHANNELS>(context, input, result);
} else if (sizeof(T) == 4) {
static_assert(sizeof(uint32) == 4, "uint32 must be 4 bytes");
ReverseRows<uint32, NUM_CHANNELS>(context, input, result);
} else if (sizeof(T) == 8) {
static_assert(sizeof(uint64) == 8, "uint64 must be 8 bytes");
ReverseRows<uint64, NUM_CHANNELS>(context, input, result);
} else if (sizeof(T) == 16) {
static_assert(sizeof(complex128) == 16, "complex128 must be 16 bytes");
ReverseRows<complex128, NUM_CHANNELS>(context, input, result);
} else {
context->CtxFailure(errors::InvalidArgument(DataTypeString(input.dtype()),
" has unexpected size of ",
sizeof(T), " bytes"));
}
}
template <typename T, int NUM_CHANNELS>
typename std::enable_if<!data_type_can_memcpy<T>::value>::type
DoHandleReverseCase(OpKernelContext* context, const Tensor& input,
Tensor* result) {}
}
template <typename Device, typename T, int NDIMS>
void HandleReverseCase(OpKernelContext* context,
typename TTypes<bool, 1>::ConstTensor dims,
Tensor* result) {
const Tensor& input = context->input(0);
if (NDIMS == 3 && std::is_same<Device, CPUDevice>::value &&
data_type_can_memcpy<T>::value && (!dims(0) && dims(1) && !dims(2))) {
if (input.dim_size(2) == 3) {
DoHandleReverseCase<T, 3>(context, input, result);
} else {
DoHandleReverseCase<T, -1>(context, input, result);
}
return;
}
typename Eigen::array<bool, NDIMS> axes_di;
for (int i = 0; i < NDIMS; i++) {
axes_di[i] = dims(i);
}
functor::Reverse<Device, T, NDIMS>()(context->eigen_device<Device>(),
input.tensor<T, NDIMS>(), axes_di,
result->tensor<T, NDIMS>());
}
template <typename Device, typename T>
class ReverseOp : public OpKernel {
public:
explicit ReverseOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
if (input.dims() > 0) {
OP_REQUIRES(
context, input.dim_size(0) != 0,
errors::InvalidArgument("Invalid input first dimension. Found 0."));
}
const Tensor& dims = context->input(1);
if (TensorShapeUtils::IsScalar(input.shape())) {
context->set_output(0, input);
} else {
const int input_dims = input.dims();
OP_REQUIRES(context, TensorShapeUtils::IsVector(dims.shape()),
errors::InvalidArgument("'dims' must be 1-dimension, not ",
dims.dims()));
OP_REQUIRES(
context, input_dims == dims.dim_size(0),
errors::InvalidArgument(
"'dims' must have the same number of values as 'input' has "
"dimensions. 'input' has ",
input_dims, "'dims' has ", dims.dim_size(0), " values"));
OP_REQUIRES(context, input_dims <= 8,
errors::Unimplemented(
"reverse is not implemented for tensors of rank > 8."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
#define HANDLE_REVERSE(NDIMS) \
case NDIMS: \
HandleReverseCase<Device, T, NDIMS>(context, dims.vec<bool>(), output); \
return;
switch (input_dims) {
HANDLE_REVERSE(0);
HANDLE_REVERSE(1);
HANDLE_REVERSE(2);
HANDLE_REVERSE(3);
HANDLE_REVERSE(4);
HANDLE_REVERSE(5);
HANDLE_REVERSE(6);
HANDLE_REVERSE(7);
HANDLE_REVERSE(8);
}
#undef HANDLE_REVERSE
}
}
};
template <typename Device, typename T, int NDIMS>
void HandleReverseV2Case(OpKernelContext* context,
const absl::Span<const bool> axes, Tensor* result) {
const Tensor& input = context->input(0);
if (NDIMS == 3 && std::is_same<Device, CPUDevice>::value &&
data_type_can_memcpy<T>::value && (!axes[0] && axes[1] && !axes[2])) {
if (input.dim_size(2) == 3) {
DoHandleReverseCase<T, 3>(context, input, result);
} else {
DoHandleReverseCase<T, -1>(context, input, result);
}
return;
}
typename Eigen::array<bool, NDIMS> axes_di;
for (int i = 0; i < NDIMS; i++) {
axes_di[i] = axes[i];
}
functor::Reverse<Device, T, NDIMS>()(context->eigen_device<Device>(),
input.tensor<T, NDIMS>(), axes_di,
result->tensor<T, NDIMS>());
}
template <typename Device, typename T, typename Tidx>
class ReverseV2Op : public OpKernel {
public:
explicit ReverseV2Op(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& sparse_dims = context->input(1);
if (TensorShapeUtils::IsScalar(input.shape()) || input.NumElements() == 0) {
context->set_output(0, input);
} else {
const int input_dims = input.dims();
const TensorShape& sparse_dims_shape = sparse_dims.shape();
const auto& axes_sparse_flat = sparse_dims.flat<Tidx>();
OP_REQUIRES(context, TensorShapeUtils::IsVector(sparse_dims_shape),
errors::InvalidArgument("'dims' must be 1-dimension, not ",
sparse_dims.dims()));
absl::InlinedVector<bool, 8> axes_dense(input_dims, false);
for (int dummy = 0; dummy < axes_sparse_flat.size(); dummy++) {
Tidx axis = internal::SubtleMustCopy<Tidx>(axes_sparse_flat(dummy));
Tidx canonical_axis = axis < 0 ? input_dims + axis : axis;
OP_REQUIRES(context, canonical_axis >= 0 && canonical_axis < input_dims,
errors::InvalidArgument("'axis'[", dummy, "] = ", axis,
" is out of valid range [", 0, ", ",
input_dims - 1));
OP_REQUIRES(context, !axes_dense[canonical_axis],
errors::InvalidArgument("axis ", canonical_axis,
" specified more than once."));
axes_dense[canonical_axis] = true;
}
OP_REQUIRES(context, input_dims <= 8,
errors::Unimplemented(
"reverse is not implemented for tensors of rank > 8."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
#define HANDLE_REVERSE(NDIMS) \
case NDIMS: \
HandleReverseV2Case<Device, T, NDIMS>(context, axes_dense, output); \
return;
switch (input_dims) {
HANDLE_REVERSE(0);
HANDLE_REVERSE(1);
HANDLE_REVERSE(2);
HANDLE_REVERSE(3);
HANDLE_REVERSE(4);
HANDLE_REVERSE(5);
HANDLE_REVERSE(6);
HANDLE_REVERSE(7);
HANDLE_REVERSE(8);
}
#undef HANDLE_REVERSE
}
}
};
#define REGISTER_KERNELS(T) \
REGISTER_KERNEL_BUILDER(Name("Reverse") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("dims"), \
ReverseOp<CPUDevice, T>) \
REGISTER_KERNEL_BUILDER(Name("ReverseV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tidx") \
.HostMemory("axis"), \
ReverseV2Op<CPUDevice, T, int32>) \
REGISTER_KERNEL_BUILDER(Name("ReverseV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tidx") \
.HostMemory("axis"), \
ReverseV2Op<CPUDevice, T, int64>)
TF_CALL_POD_TYPES(REGISTER_KERNELS);
TF_CALL_tstring(REGISTER_KERNELS);
#undef REGISTER_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPEC_DIM(T, DIM) \
template <> \
void Reverse<GPUDevice, T, DIM>::operator()( \
const GPUDevice& d, typename TTypes<T, DIM>::ConstTensor input, \
const Eigen::array<bool, DIM>& reverse_dims, \
typename TTypes<T, DIM>::Tensor output); \
extern template struct Reverse<GPUDevice, T, DIM>;
#define DECLARE_GPU_SPEC(T) \
DECLARE_GPU_SPEC_DIM(T, 0) \
DECLARE_GPU_SPEC_DIM(T, 1) \
DECLARE_GPU_SPEC_DIM(T, 2) \
DECLARE_GPU_SPEC_DIM(T, 3) \
DECLARE_GPU_SPEC_DIM(T, 4) \
DECLARE_GPU_SPEC_DIM(T, 5) \
DECLARE_GPU_SPEC_DIM(T, 6) \
DECLARE_GPU_SPEC_DIM(T, 7) \
DECLARE_GPU_SPEC_DIM(T, 8)
TF_CALL_uint8(DECLARE_GPU_SPEC);
TF_CALL_int8(DECLARE_GPU_SPEC);
TF_CALL_GPU_ALL_TYPES(DECLARE_GPU_SPEC);
#undef DECLARE_GPU_SPEC
#undef DECLARE_GPU_SPEC_DIM
}
#define REGISTER_GPU_KERNELS(T) \
REGISTER_KERNEL_BUILDER(Name("Reverse") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("dims"), \
ReverseOp<GPUDevice, T>) \
REGISTER_KERNEL_BUILDER(Name("ReverseV2") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tidx") \
.HostMemory("axis"), \
ReverseV2Op<GPUDevice, T, int32>) \
REGISTER_KERNEL_BUILDER(Name("ReverseV2") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tidx") \
.HostMemory("axis"), \
ReverseV2Op<GPUDevice, T, int64>)
TF_CALL_uint8(REGISTER_GPU_KERNELS);
TF_CALL_int8(REGISTER_GPU_KERNELS);
TF_CALL_GPU_ALL_TYPES(REGISTER_GPU_KERNELS);
#undef REGISTER_GPU_KERNEL
REGISTER_KERNEL_BUILDER(Name("Reverse")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.HostMemory("tensor")
.HostMemory("dims")
.HostMemory("output"),
ReverseOp<CPUDevice, int32>);
REGISTER_KERNEL_BUILDER(Name("ReverseV2")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("Tidx")
.HostMemory("tensor")
.HostMemory("axis")
.HostMemory("output"),
ReverseV2Op<CPUDevice, int32, int32>);
REGISTER_KERNEL_BUILDER(Name("ReverseV2")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.TypeConstraint<int64_t>("Tidx")
.HostMemory("tensor")
.HostMemory("axis")
.HostMemory("output"),
ReverseV2Op<CPUDevice, int32, int64>);
#endif
#if defined(PLUGGABLE_DEVICE_SUPPORTED_MACOS)
#define REGISTER_DEFAULT_KERNELS(T) \
REGISTER_KERNEL_BUILDER(Name("Reverse") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<T>("T") \
.HostMemory("tensor") \
.HostMemory("dims") \
.HostMemory("output"), \
ReverseOp<CPUDevice, T>) \
REGISTER_KERNEL_BUILDER(Name("ReverseV2") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tidx") \
.HostMemory("tensor") \
.HostMemory("axis") \
.HostMemory("output"), \
ReverseV2Op<CPUDevice, T, int32>) \
REGISTER_KERNEL_BUILDER(Name("ReverseV2") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64>("Tidx") \
.HostMemory("tensor") \
.HostMemory("axis") \
.HostMemory("output"), \
ReverseV2Op<CPUDevice, T, int64>)
TF_CALL_uint8(REGISTER_DEFAULT_KERNELS);
TF_CALL_int8(REGISTER_DEFAULT_KERNELS);
TF_CALL_int16(REGISTER_DEFAULT_KERNELS);
TF_CALL_uint32(REGISTER_DEFAULT_KERNELS);
TF_CALL_int32(REGISTER_DEFAULT_KERNELS);
TF_CALL_GPU_ALL_TYPES(REGISTER_DEFAULT_KERNELS);
#undef REGISTER_DEFAULT_KERNELS
#endif
} | #include <functional>
#include <memory>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class ReverseOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "Reverse")
.Input(FakeInput(data_type))
.Input(FakeInput())
.Attr("T", data_type)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
template <typename T>
void Reverse_0() {
MakeOp(DataTypeToEnum<T>::value);
AddInputFromArray<T>(TensorShape({}), {3});
AddInputFromArray<bool>(TensorShape({}), {true});
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
Tensor expected(allocator(), DataTypeToEnum<T>::value, TensorShape({}));
expected.scalar<T>() = expected.scalar<T>().constant(3);
test::ExpectTensorEqual<T>(expected, *output);
}
template <typename T>
void Reverse_234() {
MakeOp(DataTypeToEnum<T>::value);
AddInputFromArray<T>(TensorShape({2, 3, 4}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
AddInputFromArray<bool>(TensorShape({3}), {true, false, true});
TF_ASSERT_OK(RunOpKernel());
Tensor* params_tensor = GetOutput(0);
Tensor expected(allocator(), DataTypeToEnum<T>::value,
TensorShape({2, 3, 4}));
test::FillValues<T>(&expected,
{15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20,
3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8});
test::ExpectTensorEqual<T>(expected, *params_tensor);
}
template <typename T>
void Reverse_1234() {
MakeOp(DataTypeToEnum<T>::value);
AddInputFromArray<T>(TensorShape({1, 2, 3, 4}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
AddInputFromArray<bool>(TensorShape({4}), {true, true, false, true});
TF_ASSERT_OK(RunOpKernel());
Tensor* params_tensor = GetOutput(0);
Tensor expected(allocator(), DataTypeToEnum<T>::value,
TensorShape({1, 2, 3, 4}));
test::FillValues<T>(&expected,
{15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20,
3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8});
test::ExpectTensorEqual<T>(expected, *params_tensor);
}
};
TEST_F(ReverseOpTest, Reverse_0_uint8) { Reverse_0<uint8>(); }
TEST_F(ReverseOpTest, Reverse_0_int8) { Reverse_0<int8>(); }
TEST_F(ReverseOpTest, Reverse_0_uint16) { Reverse_0<uint16>(); }
TEST_F(ReverseOpTest, Reverse_0_int16) { Reverse_0<int16>(); }
TEST_F(ReverseOpTest, Reverse_0_float) { Reverse_0<float>(); }
TEST_F(ReverseOpTest, Reverse_0_int32) { Reverse_0<int32>(); }
TEST_F(ReverseOpTest, Reverse_0_int64) { Reverse_0<int64_t>(); }
TEST_F(ReverseOpTest, Reverse_0_double) { Reverse_0<double>(); }
TEST_F(ReverseOpTest, Reverse_0_complex64) { Reverse_0<complex64>(); }
TEST_F(ReverseOpTest, Reverse_0_complex128) { Reverse_0<complex128>(); }
TEST_F(ReverseOpTest, Reverse_234_uint8) { Reverse_234<uint8>(); }
TEST_F(ReverseOpTest, Reverse_234_int8) { Reverse_234<int8>(); }
TEST_F(ReverseOpTest, Reverse_234_uint16) { Reverse_234<uint16>(); }
TEST_F(ReverseOpTest, Reverse_234_int16) { Reverse_234<int16>(); }
TEST_F(ReverseOpTest, Reverse_234_float) { Reverse_234<float>(); }
TEST_F(ReverseOpTest, Reverse_234_int32) { Reverse_234<int32>(); }
TEST_F(ReverseOpTest, Reverse_234_int64) { Reverse_234<int64_t>(); }
TEST_F(ReverseOpTest, Reverse_234_double) { Reverse_234<double>(); }
TEST_F(ReverseOpTest, Reverse_234_complex64) { Reverse_234<complex64>(); }
TEST_F(ReverseOpTest, Reverse_234_complex128) { Reverse_234<complex128>(); }
TEST_F(ReverseOpTest, Reverse_1234_uint8) { Reverse_1234<uint8>(); }
TEST_F(ReverseOpTest, Reverse_1234_int8) { Reverse_1234<int8>(); }
TEST_F(ReverseOpTest, Reverse_1234_uint16) { Reverse_1234<uint16>(); }
TEST_F(ReverseOpTest, Reverse_1234_int16) { Reverse_1234<int16>(); }
TEST_F(ReverseOpTest, Reverse_1234_float) { Reverse_1234<float>(); }
TEST_F(ReverseOpTest, Reverse_1234_int32) { Reverse_1234<int32>(); }
TEST_F(ReverseOpTest, Reverse_1234_int64) { Reverse_1234<int64_t>(); }
TEST_F(ReverseOpTest, Reverse_1234_double) { Reverse_1234<double>(); }
TEST_F(ReverseOpTest, Reverse_1234_complex64) { Reverse_1234<complex64>(); }
TEST_F(ReverseOpTest, Reverse_1234_complex128) { Reverse_1234<complex128>(); }
static SessionOptions GetOptions(int intra_threads) {
SessionOptions opts;
opts.config.set_intra_op_parallelism_threads(intra_threads);
opts.config.set_inter_op_parallelism_threads(1);
return opts;
}
template <typename T>
static Graph* Reverse(const TensorShape& shape, int reverse_axis) {
Graph* g = new Graph(OpRegistry::Global());
Tensor data(DataTypeToEnum<T>::value, shape);
data.flat<T>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = reverse_axis;
test::graph::Reverse(g, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
template <typename T>
static void RunReverseRowsBenchmark(::testing::benchmark::State& state,
int outer_dim, int middle_dim,
int intra_threads, int channels) {
SessionOptions opts = GetOptions(intra_threads);
TensorShape shape{outer_dim, middle_dim, channels};
test::Benchmark("cpu", Reverse<T>(shape, 1), &opts, nullptr, nullptr, "",
false)
.Run(state);
const int64_t num_items =
static_cast<int64_t>(state.iterations()) * shape.num_elements();
state.SetItemsProcessed(num_items);
state.SetBytesProcessed(num_items * sizeof(T));
}
void BM_ReverseRowsOf1Channel_1T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
1 , 1 );
}
BENCHMARK(BM_ReverseRowsOf1Channel_1T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf1Channel_1T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
1 , 1 );
}
BENCHMARK(BM_ReverseRowsOf1Channel_1T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf1Channel_4T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
4 , 1 );
}
BENCHMARK(BM_ReverseRowsOf1Channel_4T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf1Channel_4T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
4 , 1 );
}
BENCHMARK(BM_ReverseRowsOf1Channel_4T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf3Channels_1T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
1 , 3 );
}
BENCHMARK(BM_ReverseRowsOf3Channels_1T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(30, 30)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf3Channels_1T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
1 , 3 );
}
BENCHMARK(BM_ReverseRowsOf3Channels_1T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(30, 30)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf3Channels_4T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
4 , 3 );
}
BENCHMARK(BM_ReverseRowsOf3Channels_4T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(30, 30)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf3Channels_4T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
4 , 3 );
}
BENCHMARK(BM_ReverseRowsOf3Channels_4T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(30, 30)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf4Channels_1T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
1 , 4 );
}
BENCHMARK(BM_ReverseRowsOf4Channels_1T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf4Channels_1T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
1 , 4 );
}
BENCHMARK(BM_ReverseRowsOf4Channels_1T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf4Channels_4T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
4 , 4 );
}
BENCHMARK(BM_ReverseRowsOf4Channels_4T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf4Channels_4T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
4 , 4 );
}
BENCHMARK(BM_ReverseRowsOf4Channels_4T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
}
} |
1,113 | cpp | tensorflow/tensorflow | image_ops | tensorflow/core/kernels/image/image_ops.cc | tensorflow/core/ops/image_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_IMAGE_OPS_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_IMAGE_OPS_H_
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
enum Interpolation { NEAREST, BILINEAR };
enum Mode { FILL_REFLECT, FILL_WRAP, FILL_CONSTANT, FILL_NEAREST };
using Eigen::array;
using Eigen::DenseIndex;
template <typename Device, Mode M>
struct MapCoordinate {
float operator()(const float out_coord, const DenseIndex len);
};
template <typename Device>
struct MapCoordinate<Device, Mode::FILL_REFLECT> {
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float operator()(const float out_coord,
const DenseIndex len) {
float in_coord = out_coord;
if (in_coord < 0) {
if (len <= 1) {
in_coord = 0;
} else {
const DenseIndex sz2 = 2 * len;
if (in_coord < sz2) {
in_coord = sz2 * static_cast<DenseIndex>(-in_coord / sz2) + in_coord;
}
in_coord = (in_coord < -len) ? in_coord + sz2 : -in_coord - 1;
}
} else if (in_coord > len - 1) {
if (len <= 1) {
in_coord = 0;
} else {
const DenseIndex sz2 = 2 * len;
in_coord -= sz2 * static_cast<DenseIndex>(in_coord / sz2);
if (in_coord >= len) {
in_coord = sz2 - in_coord - 1;
}
}
}
return Eigen::internal::scalar_clamp_op<float>(0.0f, len - 1)(in_coord);
}
};
template <typename Device>
struct MapCoordinate<Device, Mode::FILL_WRAP> {
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float operator()(const float out_coord,
const DenseIndex len) {
float in_coord = out_coord;
if (in_coord < 0) {
if (len <= 1) {
in_coord = 0;
} else {
const DenseIndex sz = len - 1;
in_coord += len * (static_cast<DenseIndex>(-in_coord / sz) + 1);
}
} else if (in_coord > len - 1) {
if (len <= 1) {
in_coord = 0;
} else {
const DenseIndex sz = len - 1;
in_coord -= len * static_cast<DenseIndex>(in_coord / sz);
}
}
return Eigen::internal::scalar_clamp_op<float>(0.0f, len - 1)(in_coord);
}
};
template <typename Device>
struct MapCoordinate<Device, Mode::FILL_CONSTANT> {
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float operator()(const float out_coord,
const DenseIndex len) {
return out_coord;
}
};
template <typename Device>
struct MapCoordinate<Device, Mode::FILL_NEAREST> {
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float operator()(const float out_coord,
const DenseIndex len) {
return Eigen::internal::scalar_clamp_op<float>(0.0f, len - 1)(out_coord);
}
};
template <typename Device, typename T, Mode M>
class ProjectiveGenerator {
private:
typename TTypes<T, 4>::ConstTensor input_;
typename TTypes<float>::ConstMatrix transforms_;
const Interpolation interpolation_;
const T fill_value_;
public:
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
ProjectiveGenerator(typename TTypes<T, 4>::ConstTensor input,
typename TTypes<float>::ConstMatrix transforms,
const Interpolation interpolation, const T fill_value)
: input_(input),
transforms_(transforms),
interpolation_(interpolation),
fill_value_(fill_value) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
operator()(const array<DenseIndex, 4>& coords) const {
const int64_t output_y = coords[1];
const int64_t output_x = coords[2];
const float* transform =
transforms_.dimension(0) == 1
? transforms_.data()
: &transforms_.data()[transforms_.dimension(1) * coords[0]];
float projection = transform[6] * output_x + transform[7] * output_y + 1.f;
if (projection == 0) {
return fill_value_;
}
const float input_x =
(transform[0] * output_x + transform[1] * output_y + transform[2]) /
projection;
const float input_y =
(transform[3] * output_x + transform[4] * output_y + transform[5]) /
projection;
auto map_functor = MapCoordinate<Device, M>();
const float x = map_functor(input_x, input_.dimension(2));
const float y = map_functor(input_y, input_.dimension(1));
const DenseIndex batch = coords[0];
const DenseIndex channels = coords[3];
switch (interpolation_) {
case NEAREST:
return nearest_interpolation(batch, y, x, channels, fill_value_);
case BILINEAR:
return bilinear_interpolation(batch, y, x, channels, fill_value_);
}
return fill_value_;
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
nearest_interpolation(const DenseIndex batch, const float y, const float x,
const DenseIndex channel, const T fill_value) const {
return read_with_fill_value(batch, DenseIndex(std::round(y)),
DenseIndex(std::round(x)), channel, fill_value);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
bilinear_interpolation(const DenseIndex batch, const float y, const float x,
const DenseIndex channel, const T fill_value) const {
const float y_floor = std::floor(y);
const float x_floor = std::floor(x);
const float y_ceil = y_floor + 1;
const float x_ceil = x_floor + 1;
const float value_yfloor =
(x_ceil - x) * static_cast<float>(read_with_fill_value(
batch, DenseIndex(y_floor), DenseIndex(x_floor),
channel, fill_value)) +
(x - x_floor) * static_cast<float>(read_with_fill_value(
batch, DenseIndex(y_floor), DenseIndex(x_ceil),
channel, fill_value));
const float value_yceil =
(x_ceil - x) * static_cast<float>(read_with_fill_value(
batch, DenseIndex(y_ceil), DenseIndex(x_floor),
channel, fill_value)) +
(x - x_floor) * static_cast<float>(read_with_fill_value(
batch, DenseIndex(y_ceil), DenseIndex(x_ceil),
channel, fill_value));
return T((y_ceil - y) * value_yfloor + (y - y_floor) * value_yceil);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T read_with_fill_value(
const DenseIndex batch, const DenseIndex y, const DenseIndex x,
const DenseIndex channel, const T fill_value) const {
return (0 <= y && y < input_.dimension(1) && 0 <= x &&
x < input_.dimension(2))
? input_(array<DenseIndex, 4>{batch, y, x, channel})
: fill_value;
}
};
}
namespace functor {
using generator::Interpolation;
using generator::Mode;
using generator::ProjectiveGenerator;
template <typename Device, typename T>
struct FillProjectiveTransform {
typedef typename TTypes<T, 4>::Tensor OutputType;
typedef typename TTypes<T, 4>::ConstTensor InputType;
typedef typename TTypes<float, 2>::ConstTensor TransformsType;
const Interpolation interpolation;
explicit FillProjectiveTransform(Interpolation interpolation)
: interpolation(interpolation) {}
EIGEN_ALWAYS_INLINE
void operator()(const Device& device, OutputType* output,
const InputType& images, const TransformsType& transform,
const Mode fill_mode, const T fill_value) const {
switch (fill_mode) {
case Mode::FILL_REFLECT:
output->device(device) =
output->generate(ProjectiveGenerator<Device, T, Mode::FILL_REFLECT>(
images, transform, interpolation, fill_value));
break;
case Mode::FILL_WRAP:
output->device(device) =
output->generate(ProjectiveGenerator<Device, T, Mode::FILL_WRAP>(
images, transform, interpolation, fill_value));
break;
case Mode::FILL_CONSTANT:
output->device(device) = output->generate(
ProjectiveGenerator<Device, T, Mode::FILL_CONSTANT>(
images, transform, interpolation, fill_value));
break;
case Mode::FILL_NEAREST:
output->device(device) =
output->generate(ProjectiveGenerator<Device, T, Mode::FILL_NEAREST>(
images, transform, interpolation, fill_value));
break;
}
}
};
}
}
#endif
#include <algorithm>
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status SetOutputToSizedImage(InferenceContext* c, DimensionHandle batch_dim,
int size_input_idx, DimensionHandle channel_dim) {
ShapeHandle size;
TF_RETURN_IF_ERROR(c->WithRank(c->input(size_input_idx), 1, &size));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 2, &unused));
const Tensor* size_tensor = c->input_tensor(size_input_idx);
DimensionHandle width;
DimensionHandle height;
if (size_tensor == nullptr) {
width = c->UnknownDim();
height = c->UnknownDim();
} else {
if (size_tensor->dtype() != DT_INT32) {
return errors::InvalidArgument(
"Bad size input type for SetOutputToSizedImage: Expected DT_INT32 "
"but got ",
DataTypeString(size_tensor->dtype()), " for input #", size_input_idx,
" in ", c->DebugString());
}
auto vec = size_tensor->vec<int32>();
height = c->MakeDim(vec(0));
width = c->MakeDim(vec(1));
}
c->set_output(0, c->MakeShape({batch_dim, height, width, channel_dim}));
return absl::OkStatus();
}
Status ResizeShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input));
return SetOutputToSizedImage(c, c->Dim(input, 0), 1 ,
c->Dim(input, 3));
}
Status DecodeImageShapeFn(InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
DimensionHandle channels_dim;
int32_t channels;
TF_RETURN_IF_ERROR(c->GetAttr("channels", &channels));
if (channels == 0) {
channels_dim = c->UnknownDim();
} else {
if (channels < 0) {
return errors::InvalidArgument("channels must be non-negative, got ",
channels);
}
channels_dim = c->MakeDim(channels);
}
c->set_output(0, c->MakeShape({InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim, channels_dim}));
return absl::OkStatus();
}
Status DecodeImageV2ShapeFn(InferenceContext* c) {
ShapeHandle unused;
int32_t channels;
bool expand_animations;
DimensionHandle channels_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->GetAttr("channels", &channels));
TF_RETURN_IF_ERROR(c->GetAttr("expand_animations", &expand_animations));
if (channels == 0) {
channels_dim = c->UnknownDim();
} else {
if (channels < 0) {
return errors::InvalidArgument("channels must be non-negative, got ",
channels);
}
channels_dim = c->MakeDim(channels);
}
if (expand_animations) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
} else {
c->set_output(0,
c->MakeShape({InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim, channels_dim}));
return absl::OkStatus();
}
}
Status EncodeImageShapeFn(InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &unused));
c->set_output(0, c->Scalar());
return absl::OkStatus();
}
Status BatchedEncodeImageShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 3, &input));
ShapeHandle s;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -3, &s));
c->set_output(0, s);
return absl::OkStatus();
}
Status ColorspaceShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &input));
DimensionHandle last_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(input, -1), 3, &last_dim));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->ReplaceDim(input, -1, last_dim, &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status NMSShapeFn(InferenceContext* c) {
ShapeHandle boxes;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &boxes));
ShapeHandle scores;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &scores));
ShapeHandle max_output_size;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &max_output_size));
ShapeHandle iou_threshold;
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &iou_threshold));
ShapeHandle score_threshold;
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &score_threshold));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 0), c->Dim(scores, 0), &unused));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(boxes, 1), 4, &unused));
c->set_output(0, c->Vector(c->UnknownDim()));
return absl::OkStatus();
}
Status SoftNMSShapeFn(InferenceContext* c) {
ShapeHandle boxes;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &boxes));
ShapeHandle scores;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &scores));
ShapeHandle max_output_size;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &max_output_size));
ShapeHandle iou_threshold;
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &iou_threshold));
ShapeHandle score_threshold;
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &score_threshold));
ShapeHandle soft_nms_sigma;
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &soft_nms_sigma));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 0), c->Dim(scores, 0), &unused));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(boxes, 1), 4, &unused));
c->set_output(0, c->Vector(c->UnknownDim()));
c->set_output(1, c->Vector(c->UnknownDim()));
return absl::OkStatus();
}
Status CombinedNMSShapeFn(InferenceContext* c) {
ShapeHandle boxes;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &boxes));
ShapeHandle scores;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 3, &scores));
ShapeHandle max_output_size_per_class;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &max_output_size_per_class));
ShapeHandle max_total_size;
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &max_total_size));
ShapeHandle unused_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &unused_shape));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 0), c->Dim(scores, 0), &unused));
TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 1), c->Dim(scores, 1), &unused));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(boxes, 3), 4, &unused));
DimensionHandle d = c->Dim(boxes, 2);
DimensionHandle class_dim = c->Dim(scores, 2);
if (c->ValueKnown(d) && c->ValueKnown(class_dim)) {
if (c->Value(d) != 1 && c->Value(d) != c->Value(class_dim)) {
return errors::InvalidArgument(
"third dimension of boxes must be either "
"1 or equal to the third dimension of scores");
}
}
DimensionHandle output_dim;
DimensionHandle batch_dim = c->Dim(boxes, 0);
TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(3, &output_dim));
if (c->ValueKnown(output_dim) && c->Value(output_dim) <= 0) {
return errors::InvalidArgument("max_total_size should be > 0 ");
}
DimensionHandle size_per_class;
TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(2, &size_per_class));
int64_t output_size = -1;
bool pad_per_class;
TF_RETURN_IF_ERROR(c->GetAttr("pad_per_class", &pad_per_class));
if (!pad_per_class) {
output_size = c->Value(output_dim);
} else {
if (c->ValueKnown(size_per_class) && c->Value(size_per_class) <= 0) {
return errors::InvalidArgument(
"max_output_size_per_class must be > 0 "
"if pad_per_class is set to true ");
}
if (c->ValueKnown(size_per_class) && c->ValueKnown(class_dim)) {
output_size = std::min(
static_cast<int64_t>(c->Value(output_dim)),
static_cast<int64_t>(c->Value(size_per_class)) * c->Value(class_dim));
}
}
c->set_output(0, c->MakeShape({batch_dim, output_size, 4}));
c->set_output(1, c->MakeShape({batch_dim, output_size}));
c->set_output(2, c->MakeShape({batch_dim, output_size}));
c->set_output(3, c->Vector(batch_dim));
return absl::OkStatus();
}
}
REGISTER_OP("ResizeArea")
.Input("images: T")
.Input("size: int32")
.Output("resized_images: float")
.Attr(
"T: {int8, uint8, int16, uint16, int32, int64, half, float, double,"
"bfloat16}")
.Attr("align_corners: bool = false")
.SetShapeFn(ResizeShapeFn);
REGISTER_OP("ResizeBicubic")
.Input("images: T")
.Input("size: int32")
.Output("resized_images: float")
.Attr(
"T: {int8, uint8, int16, uint16, int32, int64, half, float, double,"
"bfloat16}")
.Attr("align_corners: bool = false")
.Attr("half_pixel_centers: bool = false")
.SetShapeFn(ResizeShapeFn);
REGISTER_OP("ResizeBicubicGrad")
.Input("grads: float")
.Input("original_image: T")
.Output("output: T")
.Attr("T: {float, double}")
.Attr("align_corners: bool = false")
.Attr("half_pixel_centers: bool = false")
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->input(1));
return absl::OkStatus();
});
REGISTER_OP("ResizeBilinear")
.Input("images: T")
.Input("size: int32")
.Output("resized_images: float")
.Attr(
"T: {int8, uint8, int16, uint16, int32, int64, bfloat16, half, "
"float, double, bfloat16}")
.Attr("align_corners: bool = false")
.Attr("half_pixel_centers: bool = false")
.SetShapeFn(ResizeShapeFn);
REGISTER_OP("ScaleAndTranslate")
.Input("images: T")
.Input("size: int32")
.Input("scale: float")
.Input("translation: float")
.Output("resized_images: float")
.Attr(
"T: {int8, uint8, int16, uint16, int32, int64, bfloat16, half, "
"float, double}")
.Attr("kernel_type: string = 'lanczos3'")
.Attr("antialias: bool = true")
.SetShapeFn(ResizeShapeFn);
REGISTER_OP("QuantizedResizeBilinear")
.Input("images: T")
.Input("size: int32")
.Input("min: float")
.Input("max: float")
.Output("resized_images: T")
.Output("out_min: float")
.Output("out_max: float")
.Attr("T: {quint8, qint32, float}")
.Attr("align_corners: bool = false")
.Attr("half_pixel_centers: bool = false")
.SetShapeFn([](InferenceContext* c) {
TF_RETURN_IF_ERROR(ResizeShapeFn(c));
ShapeHandle min_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &min_shape));
ShapeHandle max_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &max_shape));
c->set_output(1, c->MakeShape({}));
c->set_output(2, c->MakeShape({}));
return absl::OkStatus();
});
REGISTER_OP("ResizeBilinearGrad")
.Input("grads: float")
.Input("original_image: T")
.Output("output: T")
.Attr("T: {float, bfloat16, half, double}")
.Attr("align_corners: bool = false")
.Attr("half_pixel_centers: bool = false")
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->input(1));
return absl::OkStatus();
});
REGISTER_OP("ScaleAndTranslateGrad")
.Input("grads: T")
.Input("original_image: T")
.Input("scale: float")
.Input("translation: float")
.Output("output: T")
.Attr("T: {float}")
.Attr("kernel_type: string = 'lanczos3'")
.Attr("antialias: bool = true")
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->input(1));
return absl::OkStatus();
});
REGISTER_OP("ResizeNearestNeighbor")
.Input("images: T")
.Input("size: int32")
.Output("resized_images: T")
.Attr(
"T: {int8, uint8, int16, uint16, int32, int64, half, float,"
"double, bfloat16}")
.Attr("align_corners: bool = false")
.Attr("half_pixel_centers: bool = false")
.SetShapeFn(ResizeShapeFn);
REGISTER_OP("ResizeNearestNeighborGrad")
.Input("grads: T")
.Input("size: int32")
.Output("output: T")
.Attr("T: {uint8, int8, int32, half, float, double, bfloat16}")
.Attr("align_corners: bool = false")
.Attr("half_pixel_centers: bool = false")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input));
ShapeHandle unused;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(unused, 0), 2, &unused_dim));
const Tensor* size = c->input_tensor(1);
if (size == nullptr) {
TF_RETURN_IF_ERROR(c->ReplaceDim(input, 1, c->UnknownDim(), &input));
TF_RETURN_IF_ERROR(c->ReplaceDim(input, 2, c->UnknownDim(), &input));
} else {
auto size_vec = size->vec<int32>();
TF_RETURN_IF_ERROR(
c->ReplaceDim(input, 1, c->MakeDim(size_vec(0)), &input));
TF_RETURN_IF_ERROR(
c->ReplaceDim(input, 2, c->MakeDim(size_vec(1)), &input));
}
c->set_output(0, input);
return absl::OkStatus();
});
REGISTER_OP("RandomCrop")
.Input("image: T")
.Input("size: int64")
.Output("output: T")
.Attr("T: {uint8, int8, int16, int32, int64, float, double}")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.SetIsStateful()
.Deprecated(8, "Random crop is now pure Python")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle image;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &image));
DimensionHandle channels = c->Dim(image, -1);
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->input(1), c->Vector(2), &unused));
const Tensor* size = c->input_tensor(1);
DimensionHandle h;
DimensionHandle w;
if (size == nullptr) {
h = c->UnknownDim();
w = c->UnknownDim();
} else {
auto size_vec = size->vec<int64_t>();
h = c->MakeDim(size_vec(0));
w = c->MakeDim(size_vec(1));
}
c->set_output(0, c->MakeShape({h, w, channels}));
return absl::OkStatus();
});
REGISTER_OP("DecodeImage")
.Input("contents: string")
.Attr("channels: int = 0")
.Attr("dtype: {uint8, uint16, float32} = DT_UINT8")
.Output("image: dtype")
.Attr("expand_animations: bool = true")
.SetShapeFn(DecodeImageV2ShapeFn);
REGISTER_OP("DecodeJpeg")
.Input("contents: string")
.Attr("channels: int = 0")
.Attr("ratio: int = 1")
.Attr("fancy_upscaling: bool = true")
.Attr("try_recover_truncated: bool = false")
.Attr("acceptable_fraction: float = 1.0")
.Attr("dct_method: string = ''")
.Output("image: uint8")
.SetShapeFn(DecodeImageShapeFn);
REGISTER_OP("DecodeAndCropJpeg")
.Input("contents: string")
.Input("crop_window: int32")
.Attr("channels: int = 0")
.Attr("ratio: int = 1")
.Attr("fancy_upscaling: bool = true")
.Attr("try_recover_truncated: bool = false")
.Attr("acceptable_fraction: float = 1.0")
.Attr("dct_method: string = ''")
.Output("image: uint8")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
DimensionHandle channels_dim = c->UnknownDim();
DimensionHandle h = c->UnknownDim();
DimensionHandle w = c->UnknownDim();
int32_t channels;
TF_RETURN_IF_ERROR(c->GetAttr("channels", &channels));
if (channels != 0) {
if (channels < 0) {
return errors::InvalidArgument("channels must be non-negative, got ",
channels);
}
channels_dim = c->MakeDim(channels);
}
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(unused, 0), 4, &unused_dim));
const Tensor* crop_window = c->input_tensor(1);
if (crop_window != nullptr) {
auto crop_window_vec = crop_window->vec<int32>();
h = c->MakeDim(crop_window_vec(2));
w = c->MakeDim(crop_window_vec(3));
}
c->set_output(0, c->MakeShape({h, w, channels_dim}));
return absl::OkStatus();
});
REGISTER_OP("EncodeJpeg")
.Input("image: uint8")
.Attr("format: {'', 'grayscale', 'rgb'} = ''")
.Attr("quality: int = 95")
.Attr("progressive: bool = false")
.Attr("optimize_size: bool = false")
.Attr("chroma_downsampling: bool = true")
.Attr("density_unit: {'in', 'cm'} = 'in'")
.Attr("x_density: int = 300")
.Attr("y_density: int = 300")
.Attr("xmp_metadata: string = ''")
.Output("contents: string")
.SetShapeFn(EncodeImageShapeFn);
REGISTER_OP("EncodeJpegVariableQuality")
.Input("images: uint8")
.Input("quality: int32")
.Output("contents: string")
.SetShapeFn(EncodeImageShapeFn);
REGISTER_OP("ExtractJpegShape")
.Input("contents: string")
.Output("image_shape: output_type")
.Attr("output_type: {int32, int64} = DT_INT32")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
c->set_output(0, c->Vector(3));
return absl::OkStatus();
});
REGISTER_OP("AdjustContrast")
.Input("images: T")
.Input("contrast_factor: float")
.Input("min_value: float")
.Input("max_value: float")
.Output("output: float")
.Attr("T: {uint8, int8, int16, int32, int64, float, double}")
.Deprecated(2, "Use AdjustContrastv2 instead")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::UnchangedShapeWithRankAtLeast(c, 3);
});
REGISTER_OP("AdjustContrastv2")
.Input("images: T")
.Input("contrast_factor: float")
.Output("output: T")
.Attr("T: {half, float} = DT_FLOAT")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return shape_inference::UnchangedShapeWithRankAtLeast(c, 3);
});
REGISTER_OP("AdjustHue")
.Input("images: T")
.Input("delta: float")
.Output("output: T")
.Attr("T: {half, float} = DT_FLOAT")
.SetSh | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ImageOpsTest, SampleDistortedBoundingBox_ShapeFn) {
ShapeInferenceTestOp op("SampleDistortedBoundingBox");
INFER_OK(op, "?;?", "[3];[3];[1,1,4]");
}
TEST(ImageOpsTest, Resize_ShapeFn) {
for (const char* op_name : {"ResizeArea", "ResizeBicubic", "ResizeBilinear",
"ResizeNearestNeighbor"}) {
ShapeInferenceTestOp op(op_name);
op.input_tensors.resize(2);
INFER_ERROR("Shape must be rank 4 but is rank 5", op, "[1,2,3,4,5];?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "?;[3]");
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[1] = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,20,30,d0_3]");
}
}
TEST(ImageOpsTest, DecodeGif) {
ShapeInferenceTestOp op("DecodeGif");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
INFER_OK(op, "?", "[?,?,?,3]");
INFER_OK(op, "[]", "[?,?,?,3]");
}
TEST(ImageOpTest, DecodeImage) {
ShapeInferenceTestOp op("DecodeImage");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeImage")
.Input({"img", 0, DT_STRING})
.Attr("expand_animations", false)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?,?]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeImage")
.Input({"img", 0, DT_STRING})
.Attr("expand_animations", true)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "?");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeImage")
.Input({"img", 0, DT_STRING})
.Attr("channels", -1)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -1", op, "[]");
}
TEST(ImageOpsTest, DecodeImage_ShapeFn) {
for (const char* op_name : {"DecodeJpeg", "DecodePng"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"a", 0, DT_STRING})
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?,?]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"a", 0, DT_STRING})
.Attr("channels", 4)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?,4]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"a", 0, DT_STRING})
.Attr("channels", -1)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -1", op, "[]");
}
}
TEST(ImageOpsTest, DecodeAndCropJpeg_ShapeFn) {
const char* op_name = "DecodeAndCropJpeg";
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Wrong number of inputs passed: 1 while 2 expected", op, "[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "[];[?]", "[?,?,?]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Attr("channels", 4)
.Finalize(&op.node_def));
INFER_OK(op, "[];[?]", "[?,?,4]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Attr("channels", -1)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -1", op, "[];[]");
}
TEST(ImageOpsTest, DecodeAndCropJpeg_InvalidCropWindow) {
const char* op_name = "DecodeAndCropJpeg";
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Wrong number of inputs passed: 1 while 2 expected", op, "[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "[];[?]", "[?,?,?]");
}
TEST(ImageOpsTest, EncodeImage_ShapeFn) {
for (const char* op_name : {"EncodeJpeg"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be rank 3 but is rank 2", op, "[1,2]");
INFER_OK(op, "[1,?,3]", "[]");
}
}
TEST(ImageOpsTest, BatchedEncodeImage_ShapeFn) {
for (const char* op_name : {"EncodePng"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be at least rank 3 but is rank 2", op, "[1,2]");
INFER_OK(op, "[1,?,3]", "[]");
INFER_OK(op, "[?,1,?,3]", "[d0_0]");
INFER_OK(op, "[4,5,1,?,3]", "[d0_0,d0_1]");
}
}
TEST(ImageOpsTest, ExtractJpegShape_ShapeFn) {
ShapeInferenceTestOp op("ExtractJpegShape");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
INFER_OK(op, "?", "[3]");
}
TEST(ImageOpsTest, Colorspace_ShapeFn) {
for (const char* op_name : {"HSVToRGB", "RGBToHSV"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_ERROR("Dimension must be 3 but is 4", op, "[1,2,4]");
INFER_OK(op, "[1,2,3]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[1,2,?]", "[d0_0,d0_1,3]");
INFER_OK(op, "?", "?");
}
}
TEST(ImageOpsTest, ExtractGlimpse_ShapeFn) {
ShapeInferenceTestOp op("ExtractGlimpse");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "ExtractGlimpse")
.Input({"input", 0, DT_FLOAT})
.Input({"size", 1, DT_INT32})
.Input({"offsets", 2, DT_FLOAT})
.Attr("uniform_noise", true)
.Attr("noise", "")
.Finalize(&op.node_def));
INFER_ERROR("Shape must be rank 4 but is rank 5", op, "[1,2,3,4,5];?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[];?");
INFER_ERROR("Dimension must be 2 but is 3", op, "?;[3];?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;?;[1,2,3]");
INFER_OK(op, "[1,?,3,?];[2];?", "[d0_0,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[1] = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2];?", "[d0_0,20,30,d0_3]");
INFER_OK(op, "[?,?,3,?];[2];[1,?]", "[d2_0,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];[2];[1,?]", "[d0_0|d2_0,20,30,d_0|d0_3]");
INFER_ERROR("Dimensions must be equal, but are 10 and 1", op,
"[10,?,?,?];?;[1,2]");
}
TEST(ImageOpsTest, CropAndResize_ShapeFn) {
ShapeInferenceTestOp op("CropAndResize");
op.input_tensors.resize(4);
INFER_ERROR("Shape must be rank 4 but is rank 5", op, "[1,2,3,4,5];?;?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;?;[1,2];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;?;?;[1,2]");
INFER_ERROR("Dimension must be 2 but is 1", op, "?;?;?;[1]");
INFER_OK(op, "[1,?,3,?];?;?;[2]", "[?,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[3] = &size_tensor;
INFER_OK(op, "[1,?,3,?];?;?;[2]", "[?,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];[2,4];?;[2]", "[d1_0,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];?;[2];[2]", "[d2_0,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];[?,4];[?];[2]", "[d1_0|d3_0,20,30,d0_3]");
INFER_ERROR("Dimensions must be equal, but are 2 and 1", op, "?;[2,?];[1];?");
INFER_ERROR("Dimension must be 4 but is 3", op, "?;[?,3];?;?");
}
TEST(ImageOpsTest, ResizeNearestNeighborGrad_ShapeFn) {
ShapeInferenceTestOp op("ResizeNearestNeighborGrad");
op.input_tensors.resize(2);
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]")
INFER_ERROR("Dimension must be 2 but is 1", op, "?;[1]");
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[1] = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,20,30,d0_3]");
}
TEST(ImageOpsTest, CropAndResizeGradImage_ShapeFn) {
ShapeInferenceTestOp op("CropAndResizeGradImage");
op.input_tensors.resize(4);
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;?;?;[1,2]");
INFER_OK(op, "?;?;?;?", "[?,?,?,?]");
Tensor image_size = test::AsTensor<int32>({10, 20, 30, 40});
op.input_tensors[3] = &image_size;
INFER_OK(op, "?;?;?;[1]", "[10, 20, 30, 40]");
}
TEST(ImageOpsTest, RandomCrop_ShapeFn) {
ShapeInferenceTestOp op("RandomCrop");
op.input_tensors.resize(2);
INFER_ERROR("must be rank 3", op, "[1,2];?");
INFER_ERROR("must be equal", op, "?;[3]");
INFER_ERROR("must be equal", op, "?;[1,2]");
INFER_OK(op, "[?,?,?];[2]", "[?,?,d0_2]");
Tensor size = test::AsTensor<int64_t>({10, 20});
op.input_tensors[1] = &size;
INFER_OK(op, "[?,?,?];[2]", "[10,20,d0_2]");
}
TEST(ImageOpsTest, QuantizedResizeBilinear_ShapeFn) {
ShapeInferenceTestOp op("QuantizedResizeBilinear");
op.input_tensors.resize(4);
NodeDefBuilder builder =
NodeDefBuilder("test", "QuantizedResizeBilinear")
.Input(NodeDefBuilder::NodeOut{"images", 0, DT_QINT32})
.Input(NodeDefBuilder::NodeOut{"size", 0, DT_INT32})
.Input(NodeDefBuilder::NodeOut{"min", 0, DT_FLOAT})
.Input(NodeDefBuilder::NodeOut{"max", 0, DT_FLOAT})
.Attr("T", DT_QINT32)
.Attr("Toutput", DT_QINT32);
TF_ASSERT_OK(builder.Finalize(&op.node_def));
INFER_OK(op, "[1,?,3,?];[2];[];[]",
"[d0_0,?,?,d0_3];[];[]");
INFER_ERROR("must be rank 0", op, "[1,?,3,?];[2];[?];[]");
INFER_ERROR("must be rank 0", op, "[1,?,3,?];[2];[];[?]");
const Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors.at(1) = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2];[];[]", "[d0_0,20,30,d0_3];[];[]");
}
TEST(ImageOpsTest, DrawBoundingBoxes_ShapeFn) {
ShapeInferenceTestOp op("DrawBoundingBoxes");
op.input_tensors.resize(2);
INFER_ERROR("must be rank 4", op, "[1,?,3];?");
INFER_ERROR("should be either 1 (GRY), 3 (RGB), or 4 (RGBA)", op,
"[1,?,?,5];?");
INFER_ERROR("must be rank 3", op, "[1,?,?,4];[1,4]");
INFER_ERROR("Dimension must be 4", op, "[1,?,?,4];[1,2,2]");
INFER_OK(op, "[4,?,?,4];?", "in0");
INFER_OK(op, "[?,?,?,?];[?,?,?]", "in0");
INFER_OK(op, "[4,?,?,4];[?,?,?]", "in0");
INFER_OK(op, "[4,?,?,4];[?,?,4]", "in0");
}
} |
1,114 | cpp | tensorflow/tensorflow | cross_op | tensorflow/core/kernels/cross_op.cc | tensorflow/core/kernels/cross_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_CROSS_OP_H_
#define TENSORFLOW_CORE_KERNELS_CROSS_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename Type>
struct Cross {
void operator()(const Device &d,
typename TTypes<Type, 2>::ConstTensor in0_data,
typename TTypes<Type, 2>::ConstTensor in1_data,
typename TTypes<Type, 2>::Tensor output_data) {
auto s1 = output_data.template chip<1>(0);
auto s2 = output_data.template chip<1>(1);
auto s3 = output_data.template chip<1>(2);
auto u1 = in0_data.template chip<1>(0);
auto u2 = in0_data.template chip<1>(1);
auto u3 = in0_data.template chip<1>(2);
auto v1 = in1_data.template chip<1>(0);
auto v2 = in1_data.template chip<1>(1);
auto v3 = in1_data.template chip<1>(2);
s1.device(d) = u2 * v3 - u3 * v2;
s2.device(d) = u3 * v1 - u1 * v3;
s3.device(d) = u1 * v2 - u2 * v1;
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include <algorithm>
#include <cmath>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/cross_op.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename Type>
class CrossOp : public OpKernel {
public:
explicit CrossOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& in0 = context->input(0);
const Tensor& in1 = context->input(1);
OP_REQUIRES(context, in0.shape() == in1.shape(),
errors::InvalidArgument("Both inputs must be of same shape: ",
in0.shape().DebugString(), " vs. ",
in1.shape().DebugString()));
OP_REQUIRES(context, in0.dims() >= 1,
errors::InvalidArgument("Input must be at least 1D",
in0.shape().DebugString()));
auto inner_dim = in0.dim_size(in0.dims() - 1);
OP_REQUIRES(context, inner_dim == 3,
errors::FailedPrecondition(
"Cross-products are only defined for 3-element vectors."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, in0.shape(), &output));
typename TTypes<Type, 2>::ConstTensor in0_data =
in0.flat_inner_dims<Type>();
typename TTypes<Type, 2>::ConstTensor in1_data =
in1.flat_inner_dims<Type>();
typename TTypes<Type, 2>::Tensor output_data =
output->flat_inner_dims<Type>();
functor::Cross<Device, Type>()(context->eigen_device<Device>(), in0_data,
in1_data, output_data);
}
};
#define REGISTER_CPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("Cross").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
CrossOp<CPUDevice, type>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_CPU_KERNEL);
#undef REGISTER_CPU_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_KERNEL(type) \
template <> \
void Cross<GPUDevice, type>::operator()( \
const GPUDevice& d, TTypes<type, 2>::ConstTensor in0_data, \
TTypes<type, 2>::ConstTensor in1_data, \
TTypes<type, 2>::Tensor output_data); \
extern template struct Cross<GPUDevice, type>;
TF_CALL_REAL_NUMBER_TYPES(DECLARE_GPU_KERNEL);
#undef DECLARE_GPU_KERNEL
}
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("Cross").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
CrossOp<GPUDevice, type>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
#endif
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class CrossOpTest : public OpsTestBase {
protected:
CrossOpTest() {
TF_EXPECT_OK(NodeDefBuilder("cross_op", "Cross")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(CrossOpTest, Zero) {
AddInputFromArray<float>(TensorShape({3}), {0, 0, 0});
AddInputFromArray<float>(TensorShape({3}), {0, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {0, 0, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CrossOpTest, RightHandRule) {
AddInputFromArray<float>(TensorShape({2, 3}), {1, 0, 0, 0, 1, 0});
AddInputFromArray<float>(TensorShape({2, 3}), {0, 1, 0, 1, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected, {{0, 0, 1, 0, 0, -1}});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CrossOpTest, ArbitraryNonintegral) {
const float u1 = -0.669, u2 = -0.509, u3 = 0.125;
const float v1 = -0.477, v2 = 0.592, v3 = -0.110;
const float s1 = u2 * v3 - u3 * v2;
const float s2 = u3 * v1 - u1 * v3;
const float s3 = u1 * v2 - u2 * v1;
AddInputFromArray<float>(TensorShape({3}), {u1, u2, u3});
AddInputFromArray<float>(TensorShape({3}), {v1, v2, v3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {s1, s2, s3});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-6);
}
class CrossOpIntTest : public OpsTestBase {
protected:
CrossOpIntTest() {
TF_EXPECT_OK(NodeDefBuilder("cross_int_op", "Cross")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(CrossOpIntTest, RightHandRule) {
AddInputFromArray<int>(TensorShape({2, 3}), {2, 0, 0, 0, 2, 0});
AddInputFromArray<int>(TensorShape({2, 3}), {0, 2, 0, 2, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int>(&expected, {{0, 0, 4, 0, 0, -4}});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
} |
1,115 | cpp | tensorflow/tensorflow | in_topk_op | tensorflow/core/kernels/in_topk_op.cc | tensorflow/core/kernels/in_topk_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IN_TOPK_OP_H_
#define TENSORFLOW_CORE_KERNELS_IN_TOPK_OP_H_
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
struct TopKArg {
int64_t k_value = -1;
const Tensor* k_tensor = nullptr;
};
template <typename Device, typename T, typename TargetT>
struct InTopKFunctor {
template <int ndims>
using Dims = Eigen::DSizes<Eigen::Index, ndims>;
void operator()(OpKernelContext* context,
typename TTypes<T, 2>::ConstTensor predictions,
typename TTypes<TargetT>::ConstVec targets, const TopKArg k,
typename TTypes<bool>::Vec output) {}
};
template <typename T, typename TargetT>
struct InTopKFunctor<CPUDevice, T, TargetT> {
void operator()(OpKernelContext* context,
typename TTypes<T, 2>::ConstTensor predictions,
typename TTypes<TargetT>::ConstVec targets, const TopKArg k,
typename TTypes<bool>::Vec output) {
const Eigen::Index num_targets = predictions.dimension(0);
const Eigen::Index num_classes = predictions.dimension(1);
int64_t k_val = k.k_value;
if (k.k_tensor != nullptr) {
if (k.k_tensor->dtype() == DT_INT32) {
k_val = k.k_tensor->scalar<int32>()();
} else {
k_val = k.k_tensor->scalar<int64_t>()();
}
}
for (int batch_idx = 0; batch_idx < num_targets; batch_idx++) {
auto target = internal::SubtleMustCopy(targets(batch_idx));
bool cannot_say = !FastBoundsCheck(target, num_classes) ||
!std::isfinite(predictions(batch_idx, target));
int more_probable_classes = 0;
if (!cannot_say) {
const T target_prediction = predictions(batch_idx, target);
for (int class_idx = 0; class_idx < num_classes; ++class_idx) {
T pred = predictions(batch_idx, class_idx);
if (!std::isfinite(pred)) {
cannot_say = true;
break;
} else if (pred > target_prediction) {
++more_probable_classes;
if (more_probable_classes > k_val) break;
}
}
}
output(batch_idx) = cannot_say ? false : (more_probable_classes < k_val);
}
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/in_topk_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T, typename TARGET_T>
class InTopK : public OpKernel {
public:
explicit InTopK(OpKernelConstruction* context) : OpKernel(context) {
if (context->num_inputs() == 2) {
OP_REQUIRES_OK(context, context->GetAttr("k", &k_));
}
}
void Compute(OpKernelContext* context) override {
const auto& predictions_in = context->input(0);
const auto& targets_in = context->input(1);
int64_t k_value = k_;
const Tensor* k_tensor = nullptr;
if (context->num_inputs() == 3) {
const auto& k_in = context->input(2);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(k_in.shape()),
errors::InvalidArgument("k must be 0-D, got shape ",
k_in.shape().DebugString()));
k_tensor = &k_in;
}
OP_REQUIRES(context, predictions_in.dims() == 2,
errors::InvalidArgument("predictions must be 2-dimensional"));
OP_REQUIRES(context, targets_in.dims() == 1,
errors::InvalidArgument("targets must be 1-dimensional"));
OP_REQUIRES(context, predictions_in.dim_size(0) == targets_in.dim_size(0),
errors::InvalidArgument("First dimension of predictions ",
predictions_in.dim_size(0),
" must match length of targets ",
targets_in.dim_size(0)));
const auto predictions = predictions_in.matrix<T>();
const auto targets = targets_in.vec<TARGET_T>();
Tensor* t_out = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(
0, TensorShape({targets_in.dim_size(0)}), &t_out));
auto out = t_out->vec<bool>();
functor::InTopKFunctor<Device, T, TARGET_T> f;
functor::TopKArg arg;
arg.k_value = k_value;
arg.k_tensor = k_tensor;
f(context, predictions, targets, arg, out);
}
private:
int k_;
};
REGISTER_KERNEL_BUILDER(Name("InTopK")
.Device(DEVICE_CPU)
.HostMemory("predictions")
.HostMemory("targets")
.HostMemory("precision")
.TypeConstraint<int32>("T"),
InTopK<CPUDevice, float, int32>);
REGISTER_KERNEL_BUILDER(Name("InTopK")
.Device(DEVICE_CPU)
.HostMemory("predictions")
.HostMemory("targets")
.HostMemory("precision")
.TypeConstraint<int64_t>("T"),
InTopK<CPUDevice, float, int64>);
REGISTER_KERNEL_BUILDER(Name("InTopKV2")
.Device(DEVICE_CPU)
.HostMemory("predictions")
.HostMemory("targets")
.HostMemory("k")
.HostMemory("precision")
.TypeConstraint<int32>("T"),
InTopK<CPUDevice, float, int32>);
REGISTER_KERNEL_BUILDER(Name("InTopKV2")
.Device(DEVICE_CPU)
.HostMemory("predictions")
.HostMemory("targets")
.HostMemory("k")
.HostMemory("precision")
.TypeConstraint<int64_t>("T"),
InTopK<CPUDevice, float, int64>);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPEC(T, TARGET_T) \
template <> \
void InTopKFunctor<GPUDevice, T, TARGET_T>::operator()( \
OpKernelContext* context, \
typename TTypes<T, 2>::ConstTensor predictions, \
typename TTypes<TARGET_T>::ConstVec targets, const TopKArg k, \
typename TTypes<bool>::Vec output); \
extern template struct InTopKFunctor<GPUDevice, T, TARGET_T>;
DECLARE_GPU_SPEC(float, int32);
DECLARE_GPU_SPEC(float, int64_t);
#undef DECLARE_GPU_SPEC
}
REGISTER_KERNEL_BUILDER(
Name("InTopKV2").Device(DEVICE_GPU).TypeConstraint<int32>("T"),
InTopK<GPUDevice, float, int32>);
REGISTER_KERNEL_BUILDER(
Name("InTopKV2").Device(DEVICE_GPU).TypeConstraint<int64_t>("T"),
InTopK<GPUDevice, float, int64>);
#endif
} | #include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <typename T>
static Graph* InTopK(int num_targets, int num_classes, T top_k) {
Graph* g = new Graph(OpRegistry::Global());
DataType dtype = DataTypeToEnum<T>::value;
Tensor predictions_t(DT_FLOAT, TensorShape({num_targets, num_classes}));
predictions_t.flat<float>().setRandom();
Tensor targets_t(dtype, TensorShape({num_targets}));
targets_t.flat<T>().setRandom();
Tensor k_t(dtype, TensorShape({}));
k_t.scalar<T>() = k_t.scalar<T>().constant(top_k);
Node* predictions = test::graph::Constant(g, predictions_t, "predictions");
Node* targets = test::graph::Constant(g, targets_t, "targets");
Node* k = test::graph::Constant(g, k_t, "k");
Node* in_topk;
TF_CHECK_OK(NodeBuilder(g->NewName("in_topk"), "InTopKV2")
.Input(predictions)
.Input(targets)
.Input(k)
.Attr("T", dtype)
.Finalize(g, &in_topk));
return g;
}
#define BM_NAME(T, TARGETS, CLASSES, K, DEVICE) \
BM_InTopK##_##T##_##TARGETS##_##CLASSES##_##K##_##DEVICE
#define BM_InTopK(T, TARGETS, CLASSES, K, DEVICE) \
static void BM_NAME(T, TARGETS, CLASSES, K, \
DEVICE)(::testing::benchmark::State & state) { \
test::Benchmark(#DEVICE, InTopK<T>(TARGETS, CLASSES, K), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * \
TARGETS * CLASSES); \
} \
BENCHMARK(BM_NAME(T, TARGETS, CLASSES, K, DEVICE))->UseRealTime();
BM_InTopK(int64_t, 64, 1000, 10, cpu);
BM_InTopK(int64_t, 64, 10000, 10, cpu);
#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM)
BM_InTopK(int64_t, 64, 1000, 10, gpu);
BM_InTopK(int64_t, 64, 10000, 10, gpu);
#endif
} |
1,116 | cpp | tensorflow/tensorflow | broadcast_to_op | tensorflow/core/kernels/broadcast_to_op.cc | tensorflow/core/kernels/broadcast_to_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BROADCAST_TO_OP_H_
#define TENSORFLOW_CORE_KERNELS_BROADCAST_TO_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct BroadcastTo {
template <int NDIMS>
void DoBCast(
const Device &device, typename TTypes<T, NDIMS>::Tensor out,
typename TTypes<T, NDIMS>::ConstTensor in,
const typename Eigen::array<Eigen::DenseIndex, NDIMS> &bcast) const {
MaybeWith32BitIndexing<Device>(
[&](auto out32, auto in32, const auto &bcast32) {
out32.device(device) = in32.broadcast(bcast32);
},
out, in, bcast);
}
template <int NDIMS>
void ReshapeAndBCast(const Device &device, Tensor &output_tensor,
const Tensor &input_tensor, const BCast &bcast) const {
DoBCast<NDIMS>(
device, output_tensor.template shaped<T, NDIMS>(bcast.result_shape()),
input_tensor.template shaped<T, NDIMS>(bcast.x_reshape()),
BCast::ToIndexArrayType<Eigen::DenseIndex, NDIMS>(bcast.x_bcast()));
}
void operator()(const Device &device, OpKernelContext *ctx,
Tensor &output_tensor, const TensorShape &output_shape,
const Tensor &input_tensor, const TensorShape &input_shape,
const BCast &bcast) const {
const int ndims = bcast.y_reshape().size();
switch (ndims) {
case 1:
ReshapeAndBCast<1>(device, output_tensor, input_tensor, bcast);
break;
case 2:
ReshapeAndBCast<2>(device, output_tensor, input_tensor, bcast);
break;
case 3:
ReshapeAndBCast<3>(device, output_tensor, input_tensor, bcast);
break;
case 4:
ReshapeAndBCast<4>(device, output_tensor, input_tensor, bcast);
break;
case 5:
ReshapeAndBCast<5>(device, output_tensor, input_tensor, bcast);
break;
default:
ctx->SetStatus(errors::Unimplemented(
"Broadcast between ", input_shape.DebugString(), " and ",
output_shape.DebugString(), " is not supported yet."));
break;
}
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#define EIGEN_USE_GPU
#endif
#if !defined(PLUGGABLE_DEVICE_SUPPORTED_MACOS) && defined(__APPLE__) && \
!defined(ANDROID) && !defined(__ANDROID__) && \
(!defined(TARGET_OS_IOS) || !TARGET_OS_IOS)
#define PLUGGABLE_DEVICE_SUPPORTED_MACOS 1
#endif
#include "tensorflow/core/kernels/broadcast_to_op.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class BroadcastToOp : public OpKernel {
public:
explicit BroadcastToOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& input_tensor = ctx->input(0);
const TensorShape& input_shape = input_tensor.shape();
const Tensor& shape_tensor = ctx->input(1);
TensorShape output_shape;
OP_REQUIRES_OK(ctx, tensor::MakeShape(shape_tensor, &output_shape));
if (output_shape == input_shape) {
ctx->set_output(0, input_tensor);
return;
}
OP_REQUIRES(ctx, input_shape.dims() <= output_shape.dims(),
errors::InvalidArgument(
"Rank of input (", input_shape.dims(),
") must be no greater than rank of output shape (",
output_shape.dims(), ")."));
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &output_tensor));
const Device& device = ctx->eigen_device<Device>();
if (input_shape.dims() == 0) {
functor::FillFunctor<Device, T>()(device, output_tensor->flat<T>(),
input_tensor.scalar<T>());
return;
}
BCast bcast(BCast::FromShape(input_shape), BCast::FromShape(output_shape),
true);
OP_REQUIRES(ctx, bcast.IsValid(),
errors::InvalidArgument(
"Incompatible shapes: ", input_shape.DebugString(), " vs. ",
output_shape.DebugString()));
OP_REQUIRES(ctx, BCast::ToShape(bcast.output_shape()) == output_shape,
errors::InvalidArgument("Unable to broadcast tensor of shape ",
input_shape, " to tensor of shape ",
output_shape));
if (output_shape.num_elements() == 0) {
return;
}
functor::BroadcastTo<Device, T>()(device, ctx, *output_tensor, output_shape,
input_tensor, input_shape, bcast);
}
};
#define REGISTER_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("BroadcastTo").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
BroadcastToOp<CPUDevice, type>);
TF_CALL_ALL_TYPES(REGISTER_KERNEL);
TF_CALL_float8_e5m2(REGISTER_KERNEL);
TF_CALL_float8_e4m3fn(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
namespace functor {
#define DECLARE_GPU_TEMPLATE(Type) \
template <> \
void BroadcastTo<GPUDevice, Type>::operator()( \
const GPUDevice& d, OpKernelContext* ctx, Tensor& output, \
const TensorShape& output_shape, const Tensor& input, \
const TensorShape& input_shape, const BCast& bcast) const; \
extern template struct BroadcastTo<GPUDevice, Type>;
TF_CALL_GPU_ALL_TYPES(DECLARE_GPU_TEMPLATE);
TF_CALL_int64(DECLARE_GPU_TEMPLATE);
TF_CALL_float8_e5m2(DECLARE_GPU_TEMPLATE);
TF_CALL_float8_e4m3fn(DECLARE_GPU_TEMPLATE);
#undef DECLARE_GPU_KERNEL
}
#define REGISTER_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("BroadcastTo") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.HostMemory("shape"), \
BroadcastToOp<GPUDevice, type>);
TF_CALL_GPU_ALL_TYPES(REGISTER_KERNEL);
TF_CALL_int64(REGISTER_KERNEL);
TF_CALL_float8_e5m2(REGISTER_KERNEL);
TF_CALL_float8_e4m3fn(REGISTER_KERNEL);
#undef REGISTER_KERNEL
REGISTER_KERNEL_BUILDER(Name("BroadcastTo")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.HostMemory("input")
.HostMemory("shape")
.HostMemory("output"),
BroadcastToOp<CPUDevice, int32>);
#endif
#if defined(PLUGGABLE_DEVICE_SUPPORTED_MACOS)
REGISTER_KERNEL_BUILDER(Name("BroadcastTo")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.HostMemory("input")
.HostMemory("shape")
.HostMemory("output"),
BroadcastToOp<CPUDevice, int32>);
#endif
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <typename InputShape>
static Graph* BroadcastTo(int dim0, int dim1, InputShape input_shape) {
Graph* g = new Graph(OpRegistry::Global());
Tensor input(DT_FLOAT, input_shape(dim0, dim1));
input.flat<float>() = input.flat<float>().setRandom();
Tensor shape(DT_INT32, TensorShape({2}));
shape.flat<int32>()(0) = dim0;
shape.flat<int32>()(1) = dim1;
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BroadcastTo")
.Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, shape))
.Attr("T", DT_FLOAT)
.Attr("Tidx", DT_INT32)
.Finalize(g, &node));
return g;
}
#define BM_BroadcastTo_InnerDim(DIM0, DIM1, type) \
static void BM_BroadcastTo_Inner##_##type##_##DIM0##_##DIM1( \
::testing::benchmark::State& state) { \
test::Benchmark(#type, \
BroadcastTo(DIM0, DIM1, \
[](int dim0, int dim1) { \
return TensorShape({dim0, 1}); \
}), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * DIM0 * \
DIM1); \
} \
BENCHMARK(BM_BroadcastTo_Inner##_##type##_##DIM0##_##DIM1)->UseRealTime();
#define BM_BroadcastTo_OuterDim(DIM0, DIM1, type) \
static void BM_BroadcastTo_Outer##_##type##_##DIM0##_##DIM1( \
::testing::benchmark::State& state) { \
test::Benchmark(#type, \
BroadcastTo(DIM0, DIM1, \
[](int dim0, int dim1) { \
return TensorShape({1, dim1}); \
}), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * DIM0 * \
DIM1); \
} \
BENCHMARK(BM_BroadcastTo_Outer##_##type##_##DIM0##_##DIM1)->UseRealTime();
BM_BroadcastTo_InnerDim(64, 64, cpu);
BM_BroadcastTo_InnerDim(128, 128, cpu);
BM_BroadcastTo_InnerDim(256, 256, cpu);
BM_BroadcastTo_InnerDim(512, 512, cpu);
BM_BroadcastTo_InnerDim(1024, 1024, cpu);
BM_BroadcastTo_InnerDim(500, 20000, cpu);
BM_BroadcastTo_OuterDim(64, 64, cpu);
BM_BroadcastTo_OuterDim(128, 128, cpu);
BM_BroadcastTo_OuterDim(256, 256, cpu);
BM_BroadcastTo_OuterDim(512, 512, cpu);
BM_BroadcastTo_OuterDim(1024, 1024, cpu);
BM_BroadcastTo_OuterDim(500, 20000, cpu);
} |
1,117 | cpp | tensorflow/tensorflow | cwise_ops | tensorflow/compiler/tf2xla/kernels/cwise_ops.cc | tensorflow/core/kernels/cwise_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_CWISE_OPS_H_
#define TENSORFLOW_CORE_KERNELS_CWISE_OPS_H_
#define _USE_MATH_DEFINES
#include <cmath>
#include <functional>
#include <type_traits>
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace Eigen {
namespace internal {
#if GOOGLE_CUDA
template <>
struct scalar_arg_op<std::complex<float>> {
typedef typename Eigen::NumTraits<std::complex<float>>::Real result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator()(
const std::complex<float>& a) const {
return ::atan2f(a.imag(), a.real());
}
};
template <>
struct scalar_arg_op<std::complex<double>> {
typedef typename Eigen::NumTraits<std::complex<double>>::Real result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double operator()(
const std::complex<double>& a) const {
return ::atan2(a.imag(), a.real());
}
};
#endif
template <typename Scalar, typename Exponent>
struct safe_scalar_binary_pow_op {
static_assert(std::is_integral<Scalar>::value, "Integer type expected");
static_assert(std::is_integral<Exponent>::value &&
std::is_signed<Exponent>::value,
"Signed integer type expected");
bool* const error;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE safe_scalar_binary_pow_op(bool* error)
: error(error) {}
EIGEN_DEVICE_FUNC inline Scalar operator()(const Scalar& a,
const Exponent& b) const {
const Exponent safe_b = tensorflow::internal::SubtleMustCopy(b);
if (TF_PREDICT_TRUE(safe_b >= 0)) {
return numext::pow(a, safe_b);
} else {
*error = true;
return 0;
}
}
};
template <typename Scalar, typename Exponent>
struct functor_traits<safe_scalar_binary_pow_op<Scalar, Exponent>> {
enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
};
template <typename T, typename DivOrMod>
struct safe_div_or_mod_op {
static_assert(std::is_integral<T>::value, "Integer type expected");
bool* const error;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE safe_div_or_mod_op(bool* error)
: error(error) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& a,
const T& b) const {
const T safe_b = tensorflow::internal::SubtleMustCopy(b);
if (TF_PREDICT_TRUE(safe_b != 0)) {
const T safe_a = tensorflow::internal::SubtleMustCopy(a);
if (TF_PREDICT_FALSE(std::is_signed<T>::value &&
safe_a == std::numeric_limits<T>::min() &&
safe_b == T(-1))) {
return DivOrMod()(-safe_a, 1);
}
return DivOrMod()(safe_a, safe_b);
} else {
*error = true;
return 0;
}
}
};
template <typename T, typename DivOrMod>
struct functor_traits<safe_div_or_mod_op<T, DivOrMod>> {
enum {
Cost = functor_traits<DivOrMod>::Cost + NumTraits<T>::AddCost,
PacketAccess = false,
};
};
template <typename T, typename Binary>
struct no_nan_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& a,
const T& b) const {
if (b != T(0)) {
return Binary()(a, b);
} else {
return T(0);
}
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a,
const Packet& b) const {
const Packet mask = pcmp_eq(b, pzero(b));
const Packet quotient = Binary().packetOp(a, b);
return pandnot(quotient, mask);
}
};
template <typename T, bool IsComplex = Eigen::NumTraits<T>::IsComplex>
struct div_no_nan_op;
template <typename T>
struct div_no_nan_op<T, false>
: public no_nan_op<T, scalar_quotient_op<T>> {
};
template <typename T>
struct functor_traits<div_no_nan_op<T, false>> {
enum {
Cost = functor_traits<scalar_quotient_op<T>>::Cost + NumTraits<T>::AddCost,
PacketAccess = true,
};
};
template <typename T>
struct div_no_nan_op<T, true> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& a,
const T& b) const {
if (b == T(0)) {
return T(0);
} else {
const T numerator =
scalar_product_op<T>()(a, scalar_conjugate_op<T>()(b));
if (numerator == T(0)) {
return T(0);
}
}
return scalar_quotient_op<T>()(a, b);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a,
const Packet& b) const {
const Packet numerator = pmul(a, pconj(b));
const Packet mask = por(pcmp_eq(b, pzero(a)), pcmp_eq(numerator, pzero(a)));
const Packet quotient = pdiv(a, b);
return pandnot(quotient, mask);
}
};
template <typename T>
struct functor_traits<div_no_nan_op<T, true>> {
enum {
Cost = functor_traits<scalar_quotient_op<T>>::Cost + NumTraits<T>::MulCost,
PacketAccess = packet_traits<T>::HasMul && packet_traits<T>::HasDiv &&
packet_traits<T>::HasConj,
};
};
template <typename T>
struct mul_no_nan_op : public no_nan_op<T, scalar_product_op<T>> {
};
template <typename T>
struct functor_traits<mul_no_nan_op<T>> {
enum {
Cost = functor_traits<scalar_product_op<T>>::Cost + NumTraits<T>::AddCost,
PacketAccess = true,
};
};
template <typename Tout, typename Tin, typename Binary>
struct scalar_left : private Binary {
using result_type = Tout;
const Tin* left;
inline scalar_left(const scalar_left& other) = default;
template <typename... Args>
EIGEN_DEVICE_FUNC inline explicit scalar_left(const Tin* c, Args... args)
: Binary(args...), left(c) {}
EIGEN_DEVICE_FUNC inline Tout operator()(const Tin& right) const {
return Binary::operator()(*left, right);
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& right_packet) const {
return Binary::packetOp(Eigen::internal::pset1<Packet>(*left),
right_packet);
}
};
template <typename Tout, typename Tin, typename Binary>
struct functor_traits<scalar_left<Tout, Tin, Binary>> {
enum {
Cost = functor_traits<Binary>::Cost,
PacketAccess = functor_traits<Binary>::PacketAccess,
};
};
template <typename Tout, typename Tin, typename Binary>
struct scalar_right : private Binary {
using result_type = Tout;
const Tin* right;
inline scalar_right(const scalar_right& other) = default;
template <typename... Args>
EIGEN_DEVICE_FUNC inline explicit scalar_right(const Tin* c, Args... args)
: Binary(args...), right(c) {}
EIGEN_DEVICE_FUNC inline Tout operator()(const Tin& left) const {
return Binary::operator()(left, *right);
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& left_packet) const {
return Binary::packetOp(left_packet,
Eigen::internal::pset1<Packet>(*right));
}
};
template <typename Tout, typename Tin, typename Binary>
struct functor_traits<scalar_right<Tout, Tin, Binary>> {
enum {
Cost = functor_traits<Binary>::Cost,
PacketAccess = functor_traits<Binary>::PacketAccess,
};
};
template <class T>
struct equal_to : std::function<bool(T, T)> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const T& x,
const T& y) const {
return x == y;
}
};
template <class T>
struct not_equal_to : std::function<bool(T, T)> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const T& x,
const T& y) const {
return x != y;
}
};
template <class T>
struct greater : std::function<bool(T, T)> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const T& x,
const T& y) const {
return x > y;
}
};
template <class T>
struct less : std::function<bool(T, T)> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const T& x,
const T& y) const {
return x < y;
}
};
template <class T>
struct greater_equal : std::function<bool(T, T)> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const T& x,
const T& y) const {
return x >= y;
}
};
template <class T>
struct less_equal : std::function<bool(T, T)> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const T& x,
const T& y) const {
return x <= y;
}
};
template <typename Scalar>
struct scalar_squared_difference_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& a, const Scalar& b) const {
const Scalar v = scalar_difference_op<Scalar>()(a, b);
return scalar_product_op<Scalar>()(v, scalar_conjugate_op<Scalar>()(v));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a,
const Packet& b) const {
const Packet v = scalar_difference_op<Scalar>().packetOp(a, b);
return scalar_product_op<Scalar>().packetOp(
v, scalar_conjugate_op<Scalar>().packetOp(v));
}
};
template <typename Scalar>
struct functor_traits<scalar_squared_difference_op<Scalar>> {
enum {
Cost = functor_traits<scalar_difference_op<Scalar>>::Cost +
functor_traits<scalar_conjugate_op<Scalar>>::Cost +
functor_traits<scalar_product_op<Scalar>>::Cost,
PacketAccess = functor_traits<scalar_difference_op<Scalar>>::PacketAccess &&
functor_traits<scalar_conjugate_op<Scalar>>::PacketAccess &&
functor_traits<scalar_product_op<Scalar>>::PacketAccess
};
};
template <typename T, typename Enable = void>
struct google_floor_div {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x,
const T& y) const {
const T z = x / y;
return z * y != x && (x < T(0) != y < T(0)) ? z - T(1) : z;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x,
const Packet& y) const {
Packet zeros = pzero(x);
Packet x_mask = pcmp_lt(x, zeros);
Packet y_mask = pcmp_lt(y, zeros);
Packet x_div_y = pdiv(x, y);
Packet x_div_y_times_y = pmul(x_div_y, y);
return pselect(por(peq(x_div_y_times_y, x), peq(x_mask, y_mask)), x_div_y,
psub(x_div_y, pones(x)));
}
};
template <typename T>
struct google_floor_div<
T, typename std::enable_if<std::is_unsigned<T>::value>::type> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x,
const T& y) const {
return x / y;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x,
const Packet& y) const {
return pdiv(x, y);
}
};
template <typename Scalar>
struct functor_traits<google_floor_div<Scalar>> {
enum {
Cost = 2 * Eigen::internal::scalar_div_cost<
Scalar, packet_traits<Scalar>::HasDiv>::value +
NumTraits<Scalar>::AddCost,
PacketAccess = packet_traits<Scalar>::HasDiv
};
};
template <typename T, typename Enable = void>
struct google_floor_div_real {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x,
const T& y) const {
return Eigen::numext::floor(x / y);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x,
const Packet& y) const {
return pfloor(pdiv(x, y));
}
};
template <typename Scalar>
struct functor_traits<google_floor_div_real<Scalar>> {
enum {
Cost = 2 * Eigen::internal::scalar_div_cost<
Scalar, packet_traits<Scalar>::HasDiv>::value +
2 * NumTraits<Scalar>::AddCost,
PacketAccess =
packet_traits<Scalar>::HasDiv && packet_traits<Scalar>::HasRound
};
};
template <typename T>
struct google_floor_fmod {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x,
const T& y) const {
T trunc_mod = scalar_fmod_op<T>()(x, y);
return trunc_mod != T(0) && (y < T(0) != trunc_mod < T(0)) ? trunc_mod + y
: trunc_mod;
}
};
template <typename Scalar>
struct functor_traits<google_floor_fmod<Scalar>> {
enum {
Cost = functor_traits<Eigen::internal::scalar_fmod_op<Scalar>>::Cost +
NumTraits<Scalar>::AddCost,
PacketAccess = false
};
};
template <typename T>
struct google_floor_mod {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x,
const T& y) const {
T trunc_mod = Eigen::internal::scalar_mod2_op<T>()(x, y);
return trunc_mod != T(0) && (y < T(0) != trunc_mod < T(0)) ? trunc_mod + y
: trunc_mod;
}
};
template <typename Scalar>
struct functor_traits<google_floor_mod<Scalar>> {
enum {
Cost = functor_traits<Eigen::internal::scalar_mod2_op<Scalar>>::Cost +
NumTraits<Scalar>::AddCost,
PacketAccess = false
};
};
template <typename T, typename Enable = void>
struct google_truncate_div_real {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x,
const T& y) const {
EIGEN_USING_STD(trunc)
return static_cast<T>(trunc(x / y));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x,
const Packet& y) const {
const Packet z = pdiv(x, y);
return pselect(pcmp_lt(z, pzero(z)), pceil(z), pfloor(z));
}
};
template <typename Scalar>
struct functor_traits<google_truncate_div_real<Scalar>> {
enum {
Cost = 2 * Eigen::internal::scalar_div_cost<
Scalar, packet_traits<Scalar>::HasDiv>::value +
3 * NumTraits<Scalar>::AddCost,
PacketAccess = packet_traits<Scalar>::HasDiv &&
packet_traits<Scalar>::HasRound &&
packet_traits<Scalar>::HasCmp
};
};
#if EIGEN_COMP_GNUC && __cplusplus > 199711L
#define DISABLE_FLOAT_EQUALITY_WARNING \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wfloat-equal\"")
#define ENABLE_FLOAT_EQUALITY_WARNING _Pragma("GCC diagnostic pop")
#else
#define DISABLE_FLOAT_EQUALITY_WARNING
#define ENABLE_FLOAT_EQUALITY_WARNING
#endif
template <typename Scalar, bool IsInteger = Eigen::NumTraits<Scalar>::IsInteger,
bool HasRint = packet_traits<Scalar>::HasRound>
struct scalar_round_half_to_even_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& x) const {
EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex),
NUMERIC_TYPE_MUST_BE_REAL)
const Scalar round_val = Eigen::numext::floor(x + Scalar(0.5));
const Scalar fraction = round_val - x;
if (TF_PREDICT_FALSE(fraction == Scalar(.5))) {
return Scalar(2) * Eigen::numext::floor(Scalar(.5) * x + Scalar(0.5));
} else {
return round_val;
}
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const {
Packet half = pset1<Packet>(Scalar(0.5));
Packet round_val = pfloor(padd(x, half));
Packet fraction = psub(round_val, x);
Packet half_mask = pcmp_eq(fraction, half);
bool any_halves = predux_any(half_mask);
if (TF_PREDICT_FALSE(any_halves)) {
Packet two = pset1<Packet>(Scalar(2));
Packet nearest_even = pmul(two, pfloor(pmadd(half, x, half)));
return pselect(half_mask, nearest_even, round_val);
} else {
return round_val;
}
}
};
template <typename Scalar>
struct scalar_round_half_to_even_op<Scalar, true, false> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& x) const {
return x;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const {
return x;
}
};
template <typename Scalar>
struct scalar_round_half_to_even_op<Scalar, false, true> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& x) const {
return Eigen::numext::rint(x);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const {
return print(x);
}
};
template <typename Scalar>
struct functor_traits<scalar_round_half_to_even_op<Scalar>> {
enum {
Cost = Eigen::NumTraits<Scalar>::IsInteger ? 0
: 4 * NumTraits<Scalar>::AddCost,
PacketAccess = packet_traits<Scalar>::HasRound &&
packet_traits<Scalar>::HasAdd &&
packet_traits<Scalar>::HasMul,
};
};
template <typename Scalar, bool IsInteger = Eigen::NumTraits<Scalar>::IsInteger>
struct scalar_round_up_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& x) const {
EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex),
NUMERIC_TYPE_MUST_BE_REAL)
return Eigen::numext::floor(x + Scalar(0.5));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const {
return pfloor(padd(x, pset1<Packet>(0.5)));
}
};
template <typename Scalar>
struct scalar_round_up_op<Scalar, true> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& x) const {
return x;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const {
return x;
}
};
template <typename Scalar, bool IsInteger>
struct functor_traits<scalar_round_up_op<Scalar, IsInteger>> {
enum {
Cost = IsInteger ? 0 : 4 * NumTraits<Scalar>::AddCost,
PacketAccess = IsInteger || packet_traits<Scalar>::HasRound
};
};
#undef ENABLE_FLOAT_EQUALITY_WARNING
#undef DISABLE_FLOAT_EQUALITY_WARNING
template <typename Scalar>
struct bitwise_xor_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& x, const Scalar& y) const {
return x ^ y;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a,
const Packet& b) const {
return Eigen::internal::pxor(a, b);
}
};
template <typename Scalar>
struct functor_traits<bitwise_xor_op<Scalar>> {
enum { Cost = Eigen::NumTraits<Scalar>::AddCost, PacketAccess = true };
};
template <typename Scalar>
struct xlogy_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& x, const Scalar& y) const {
if (x == Scalar(0.)) {
return Scalar(0.);
}
return x * numext::log(y);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x,
const Packet& y) const {
Packet zeros = pzero(x);
Packet mask = pcmp_eq(x, zeros);
scalar_log_op<Scalar> log_op;
Packet log_y = log_op.packetOp(y);
Packet x_log_y = pmul(x, log_y);
return pselect(mask, x, x_log_y);
}
};
template <typename Scalar>
struct functor_traits<xlogy_op<Scalar>> {
enum {
Cost = functor_traits<scalar_log_op<Scalar>>::Cost +
Eigen::NumTraits<Scalar>::MulCost,
PacketAccess = functor_traits<scalar_log_op<Scalar>>::PacketAccess
};
};
template <typename Scalar>
struct xlog1py_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& x, const Scalar& y) const {
if (x == Scalar(0.)) {
return Scalar(0.);
}
return x * numext::log1p(y);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x,
const Packet& y) const {
Packet zeros = pzero(x);
Packet mask = pcmp_eq(x, zeros);
scalar_log1p_op<Scalar> log1p_op;
Packet log1p_y = log1p_op.packetOp(y);
Packet x_log1p_y = pmul(x, log1p_y);
return pselect(mask, x, x_log1p_y);
}
};
template <typename Scalar>
struct functor_traits<xlog1py_op<Scalar>> {
enum {
Cost = functor_traits<scalar_log1p_op<Scalar>>::Cost +
Eigen::NumTraits<Scalar>::MulCost,
#if TENSORFLOW_USE_ROCM
PacketAccess = false,
#else
PacketAccess = functor_traits<scalar_log1p_op<Scalar>>::PacketAccess
#endif
};
};
template <typename Scalar>
struct xdivy_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& x, const Scalar& y) const {
if (x == Scalar(0.)) {
return Scalar(0.);
}
return x / y;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x,
const Packet& y) const {
Packet zeros = pzero(x);
Packet mask = pcmp_eq(x, zeros);
Packet x_div_y = pdiv(x, y);
return pselect(mask, x, x_div_y);
}
};
template <typename Scalar>
struct functor_traits<xdivy_op<Scalar>> {
enum {
Cost =
Eigen::NumTraits<Scalar>::AddCost +
Eigen::internal::scalar_div_cost<Scalar,
packet_traits<Scalar>::HasDiv>::value,
PacketAccess = packet_traits<Scalar>::HasDiv
};
};
template <typename T>
struct scalar_erfinv_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const {
constexpr T half = T(0.5);
T y = numext::ndtri(half * x + half);
constexpr T half_sqrt = T(M_SQRT1_2);
return y * half_sqrt;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const {
Packet half = pset1<Packet>(T(0.5));
Packet y = pndtri<Packet>(pmadd(half, x, half));
Packet half_sqrt = pset1<Packet>(T(M_SQRT1_2));
return pmul(y, half_sqrt);
}
};
template <typename T>
struct functor_traits<scalar_erfinv_op<T>> {
enum {
Cost = functor_traits<scalar_ndtri_op<T>>::Cost + NumTraits<T>::AddCost,
PacketAccess = packet_traits<T>::HasNdtri,
};
};
}
}
namespace tensorflow {
namespace functor {
template <typename T, typename F, typename R = T>
struct base {
typedef F func;
static constexpr bool use_bcast_optimization = false;
typedef R out_type;
typedef T in_type;
typedef typename TTypes<out_type>::Flat tout_type;
typedef typename TTypes<in_type>::ConstFlat tin_type;
typedef typename TTypes<in_type>::ConstScalar tscalar_type;
static constexpr bool has_errors = false;
};
template <typename T>
struct use_bcast_optimization {
static constexpr bool value = false;
};
template <>
struct use_bcast_optimization<float> {
static constexpr bool value = true;
};
template <>
struct use_bcast_optimization<double> {
static constexpr bool value = true;
};
template <typename T>
struct abs : base<T, Eigen::internal::scalar_abs_op<T>,
typename Eigen::internal::scalar_abs_op<T>::result_type> {};
template <typename T>
struct neg : base<T, Eigen::internal::scalar_opposite_op<T>> {};
template <typename T>
struct inverse : base<T, Eigen::internal::scalar_inverse_op<T>> {};
template <typename T>
struct square : base<T, Eigen::internal::scalar_square_op<T>> {};
template <typename T>
struct sqrt : base<T, Eigen::internal::scalar_sqrt_op<T>> {};
template <typename T>
struct rsqrt : base<T, Eigen::internal::scalar_rsqrt_op<T>> {};
template <typename T>
struct exp : base<T, Eigen::internal::scalar_exp_op<T>> {};
template <typename T>
struct expm1 : base<T, Eigen::internal::scalar_expm1_op<T>> {};
template <typename T>
struct log : base<T, Eigen::internal::scalar_log_op<T>> {};
template <typename T>
struct log1p : base<T, Eigen::internal::scalar_log1p_op<T>> {};
template <typename T>
struct sign : base<T, Eigen::internal::scalar_sign_op<T>> {};
template <typename T>
struct sinh : base<T, Eigen::internal::scalar_sinh_op<T>> {};
template <typename T>
struct cosh : base<T, Eigen::internal::scalar_cosh_op<T>> {};
template <typename T>
struct tanh : base<T, Eigen::internal::scalar_tanh_op<T>> {};
template <typename T>
struct asinh : base<T, Eigen::internal::scalar_asinh_op<T>> {};
template <typename T>
struct acosh : base<T, Eigen::internal::scalar_acosh_op<T>> {};
template <typename T>
struct atanh : base<T, Eigen::internal::scalar_atanh_op<T>> {};
template <typename T>
struct lgamma : base<T, Eigen::internal::scalar_lgamma_op<T>> {};
template <typename T>
struct digamma : base<T, Eigen::internal::scalar_digamma_op<T>> {};
template <typename T>
struct erf : base<T, Eigen::internal::scalar_erf_op<T>> {};
template <typename T>
struct erfc : base<T, Eigen::internal::scalar_erfc_op<T>> {};
template <typename T>
struct ndtri : base<T, Eigen::internal::scalar_ndtri_op<T>> {};
template <typename T>
struct erfinv : base<T, Eigen::internal::scalar_erfinv_op<T>> {};
template <typename T>
struct sigmoid : base<T, Eigen::internal::scalar_logistic_op<T>> {};
template <typename T>
struct sin : base<T, Eigen::internal::scalar_sin_op<T>> {};
template <typename T>
struct cos : base<T, Eigen::internal::scalar_cos_op<T>> {};
template <typename T>
struct tan : base<T, Eigen::internal::scalar_tan_op<T>> {};
template <typename T>
struct asin : base<T, Eigen::internal::scalar_asin_op<T>> {};
template <typename T>
struct acos : base<T, Eigen::internal::scalar_acos_op<T>> {};
template <typename T>
struct atan : base<T, Eigen::internal::scalar_atan_op<T>> {};
struct logical_not : base<bool, Eigen::internal::scalar_boolean_not_op<bool>> {
};
template <typename T>
struct invert_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& a) const {
return ~a;
}
};
template <typename T>
struct invert : base<T, invert_op<T>> {};
template <typename T>
struct isinf : base<T, Eigen::internal::scalar_isinf_op<T>, bool> {};
template <typename T>
struct isnan : base<T, Eigen::internal::scalar_isnan_op<T>, bool> {};
template <typename T>
struct isfinite : base<T, Eigen::internal::scalar_isfinite_op<T>, bool> {};
template <typename T>
struct floor : base<T, Eigen::internal::scalar_floor_op<T>> {};
template <typename T>
struct round : base<T, Eigen::internal::scalar_round_half_to_even_op<T>> {};
template <typename T>
struct ceil : base<T, Eigen::internal::scalar_ceil_op<T>> {};
template <typename T>
struct rint : base<T, Eigen::internal::scalar_rint_op<T>> {};
template <typename T>
struct add : base<T, Eigen::internal::scalar_sum_op<T>> {
static constexpr bool use_bcast_optimization = true;
};
template <typename T>
struct sub : base<T, Eigen::internal::scalar_difference_op<T>> {
static constexpr bool use_bcast_optimization = true;
};
template <typename T>
struct mul : base<T, Eigen::internal::scalar_product_op<T>> {
static constexpr bool use_bcast_optimization = true;
};
template <typename T>
struct mul_no_nan : base<T, Eigen::internal::mul_no_nan_op<T>> {};
template <typename T>
struct div : base<T, Eigen::internal::scalar_quotient_op<T>> {};
template <typename T>
struct safe_div : base<T, Eigen::internal::safe_div_or_mod_op< | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
namespace {
template <typename T>
static Graph* Unary(const string& func, int num, DataType dtype) {
Graph* g = new Graph(OpRegistry::Global());
Tensor data(dtype, TensorShape({64, 64, num / (64 * 64)}));
CHECK_GT(data.NumElements(), 0);
data.flat<T>().setRandom();
test::graph::Unary(g, func, test::graph::Constant(g, data), 0);
return g;
}
const int kRows = 100000;
int RowsAndColsArg(int r, int c) { return r * kRows + c; }
int RowsFromArg(int arg) { return (arg / kRows); }
int ColsFromArg(int arg) { return (arg % kRows); }
#define BM_UNARY(DEVICE, FUNC, T, TYPE) \
void BM_##DEVICE##_##FUNC##_##TYPE(::testing::benchmark::State& state) { \
const int num = state.range(0); \
test::Benchmark(#DEVICE, Unary<T>(#FUNC, num, TYPE), \
false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * num; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(T)); \
} \
BENCHMARK(BM_##DEVICE##_##FUNC##_##TYPE) \
->UseRealTime() \
->Range(4 << 10, 1 << 20);
BM_UNARY(cpu, LeakyRelu, float, DT_FLOAT);
BM_UNARY(cpu, LeakyRelu, bfloat16, DT_BFLOAT16);
BM_UNARY(cpu, Floor, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Floor, float, DT_FLOAT);
#endif
BM_UNARY(cpu, Floor, double, DT_DOUBLE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Floor, double, DT_DOUBLE);
#endif
BM_UNARY(cpu, Conj, std::complex<float>, DT_COMPLEX64);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Conj, std::complex<float>, DT_COMPLEX64);
#endif
BM_UNARY(cpu, Conj, std::complex<double>, DT_COMPLEX128);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Conj, std::complex<double>, DT_COMPLEX128);
#endif
BM_UNARY(cpu, Rint, double, DT_DOUBLE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Rint, double, DT_DOUBLE);
#endif
BM_UNARY(cpu, Rint, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Rint, float, DT_FLOAT);
#endif
BM_UNARY(cpu, Round, double, DT_DOUBLE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Round, double, DT_DOUBLE);
#endif
BM_UNARY(cpu, Round, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Round, float, DT_FLOAT);
#endif
Graph* BinaryScalar(int num, const string& func) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
Tensor rhs(DT_FLOAT, TensorShape({}));
rhs.flat<float>().setRandom();
test::graph::Binary(g, func, test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
#define BM_BINARY_SCALAR(DEVICE, FUNC) \
void BM_##DEVICE##_##FUNC##_scalar(::testing::benchmark::State& state) { \
const int num = state.range(0); \
\
test::Benchmark(#DEVICE, BinaryScalar(num, #FUNC), \
false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * num; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_##FUNC##_scalar) \
->Arg(1 << 12) \
->Arg(1 << 13) \
->Arg(1 << 14) \
->Arg((1 << 15) - (1 << 13)) \
->Arg(1 << 15) \
->Arg((1 << 15) + (1 << 14)) \
->Arg(1 << 16) \
->Arg((1 << 17) - (1 << 15)) \
->Arg(1 << 17) \
->Arg((1 << 17) + (1 << 16)) \
->Arg(1 << 18) \
->Arg(1 << 19) \
->Arg(1 << 20);
BM_BINARY_SCALAR(cpu, Less);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BINARY_SCALAR(gpu, Less);
#endif
BM_BINARY_SCALAR(cpu, Add);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BINARY_SCALAR(gpu, Add);
#endif
BM_BINARY_SCALAR(cpu, DivNoNan);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BINARY_SCALAR(gpu, DivNoNan);
#endif
#undef BM_BINARY_SCALAR
Graph* CubeWithPow3(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
Tensor rhs(DT_FLOAT, TensorShape({}));
rhs.flat<float>().setConstant(3);
test::graph::Binary(g, "Pow", test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
Graph* CubeWithTwoMuls(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
auto* x = test::graph::Constant(g, lhs);
auto* inner = test::graph::Binary(g, "Mul", x, x);
test::graph::Binary(g, "Mul", x, inner);
return g;
}
Graph* CubeWithMulSquare(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
auto* x = test::graph::Constant(g, lhs);
auto* inner = test::graph::Unary(g, "Square", x);
test::graph::Binary(g, "Mul", test::graph::Constant(g, lhs), inner);
return g;
}
#define BM_CUBE(DEVICE, Impl) \
void BM_##DEVICE##_Cube_##Impl(::testing::benchmark::State& state) { \
const int num = state.range(0); \
\
test::Benchmark(#DEVICE, Impl(num), false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * num; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_Cube_##Impl) \
->UseRealTime() \
->Arg(1 << 12) \
->Arg(1 << 16) \
->Arg(1 << 20);
BM_CUBE(cpu, CubeWithPow3);
BM_CUBE(cpu, CubeWithTwoMuls);
BM_CUBE(cpu, CubeWithMulSquare);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_CUBE(gpu, CubeWithPow3);
BM_CUBE(gpu, CubeWithTwoMuls);
BM_CUBE(gpu, CubeWithMulSquare);
#endif
#undef BM_CUBE
template <class T>
Graph* BiasAdd(int rows, int cols, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(type, TensorShape({rows, cols}));
lhs.template flat<T>().setRandom();
TensorShape rhs_shape;
rhs_shape = TensorShape({cols});
Tensor rhs(type, rhs_shape);
rhs.template flat<T>().setRandom();
test::graph::Binary(g, "BiasAdd", test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
#define BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, R, C) \
void BM_##DEVICE##_##C_TYPE##_BiasAdd_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
test::Benchmark(#DEVICE, BiasAdd<C_TYPE>(rows, cols, TF_TYPE), \
false) \
.Run(state); \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_##DEVICE##_##C_TYPE##_BiasAdd_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BIAS_ADD_ALL(DEVICE, C_TYPE, TF_TYPE) \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 512, 2048); \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 512, 4096); \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 2048, 512); \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 4096, 512);
using Eigen::half;
BM_BIAS_ADD_ALL(cpu, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_ALL(gpu, float, DT_FLOAT);
#endif
BM_BIAS_ADD_ALL(cpu, half, DT_HALF);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_ALL(gpu, half, DT_HALF);
#endif
#undef BM_BIAS_ADD_ALL
#undef BM_BIAS_ADD
template <class T>
Graph* BiasAddGrad(int rows, int cols, int channels, DataType type,
TensorFormat format) {
Graph* g = new Graph(OpRegistry::Global());
TensorShape lhs_shape;
if (format == FORMAT_NCHW) {
lhs_shape = TensorShape({channels, rows, cols});
} else {
lhs_shape = TensorShape({rows, cols, channels});
}
Tensor lhs(type, lhs_shape);
lhs.template flat<T>().setRandom();
Node* n;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BiasAddGrad")
.Attr("data_format", ToString(format))
.Input(test::graph::Constant(g, lhs), 0)
.Finalize(g, &n));
return g;
}
#define BM_BIAS_ADD_GRAD(DEVICE, FMT, C_TYPE, TF_TYPE, R, C, CH) \
void BM_##DEVICE##_##FMT##_##C_TYPE##_BiasAddGrad_R##R##_C##C##_CH##CH( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
const int channels = state.range(1); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark( \
#DEVICE, \
BiasAddGrad<C_TYPE>(rows, cols, channels, TF_TYPE, FORMAT_##FMT), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols * channels; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_##DEVICE##_##FMT##_##C_TYPE##_BiasAddGrad_R##R##_C##C##_CH##CH) \
->ArgPair(RowsAndColsArg(R, C), CH);
#define BM_BIAS_ADD_GRAD_ALL(DEVICE, FORMAT, C_TYPE, TF_TYPE) \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 64, 64, 64); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 512, 512, 4); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 512, 512, 1); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 4096, 4096, 4); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 4096, 4096, 1);
using Eigen::half;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_GRAD_ALL(gpu, NCHW, float, DT_FLOAT);
BM_BIAS_ADD_GRAD_ALL(gpu, NCHW, half, DT_HALF);
#endif
BM_BIAS_ADD_GRAD_ALL(cpu, NHWC, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_GRAD_ALL(gpu, NHWC, float, DT_FLOAT);
#endif
BM_BIAS_ADD_GRAD_ALL(cpu, NHWC, half, DT_HALF);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_GRAD_ALL(gpu, NHWC, half, DT_HALF);
#endif
#undef BM_BIAS_ADD_GRAD_ALL
#undef BM_BIAS_ADD_GRAD
Graph* BcastAdd(int rows, int cols, int dim) {
Graph* g = new Graph(OpRegistry::Global());
TensorShape lhs_shape, rhs_shape;
if (dim == 0) {
lhs_shape = TensorShape({rows, cols});
rhs_shape = TensorShape({rows, 1});
} else if (dim == 1) {
lhs_shape = TensorShape({rows, cols});
rhs_shape = TensorShape({cols});
} else if (dim == 2) {
lhs_shape = TensorShape({rows, 1});
rhs_shape = TensorShape({1, cols});
} else {
lhs_shape = TensorShape({1, cols});
rhs_shape = TensorShape({rows, 1});
}
Tensor lhs(DT_FLOAT, lhs_shape);
lhs.flat<float>().setRandom();
Tensor rhs(DT_FLOAT, rhs_shape);
rhs.flat<float>().setRandom();
test::graph::Binary(g, "Add", test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
#define BM_BCAST_ADD_ROW(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddRow_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 0), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddRow_R##R##_C##C)->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_ROW_ALL(DEVICE) \
BM_BCAST_ADD_ROW(DEVICE, 512, 2048); \
BM_BCAST_ADD_ROW(DEVICE, 512, 4096); \
BM_BCAST_ADD_ROW(DEVICE, 2048, 512); \
BM_BCAST_ADD_ROW(DEVICE, 4096, 512);
BM_BCAST_ADD_ROW_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_ROW_ALL(gpu);
#endif
#undef BM_BCAST_ADD_ROW_ALL
#undef BM_BCAST_ADD_ROW
#define BM_BCAST_ADD_COL(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddCol_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 1), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
\
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddCol_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_COL_ALL(DEVICE) \
BM_BCAST_ADD_COL(DEVICE, 512, 2048); \
BM_BCAST_ADD_COL(DEVICE, 512, 4096); \
BM_BCAST_ADD_COL(DEVICE, 2048, 512); \
BM_BCAST_ADD_COL(DEVICE, 4096, 512);
BM_BCAST_ADD_COL_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_COL_ALL(gpu);
#endif
#undef BM_BCAST_ADD_COL_ALL
#undef BM_BCAST_ADD_COL
#define BM_BCAST_ADD_CROSS_RC(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddCrossRC_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 2), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
\
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddCrossRC_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_CROSS_RC_ALL(DEVICE) \
BM_BCAST_ADD_CROSS_RC(DEVICE, 512, 2048); \
BM_BCAST_ADD_CROSS_RC(DEVICE, 512, 4096); \
BM_BCAST_ADD_CROSS_RC(DEVICE, 2048, 512); \
BM_BCAST_ADD_CROSS_RC(DEVICE, 4096, 512);
BM_BCAST_ADD_CROSS_RC_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_CROSS_RC_ALL(gpu);
#endif
#undef BM_BCAST_ADD_CROSS_RC_ALL
#undef BM_BCAST_ADD_CROSS_RC
#define BM_BCAST_ADD_CROSS_CR(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddCrossCR_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 3), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddCrossCR_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_CROSS_CR_ALL(DEVICE) \
BM_BCAST_ADD_CROSS_CR(DEVICE, 512, 2048); \
BM_BCAST_ADD_CROSS_CR(DEVICE, 512, 4096); \
BM_BCAST_ADD_CROSS_CR(DEVICE, 2048, 512); \
BM_BCAST_ADD_CROSS_CR(DEVICE, 4096, 512);
BM_BCAST_ADD_CROSS_CR_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_CROSS_CR_ALL(gpu);
#endif
#undef BM_BCAST_ADD_CROSS_CR_ALL
#undef BM_BCAST_ADD_CROSS_CR
}
} |
1,118 | cpp | tensorflow/tensorflow | variable_ops | tensorflow/core/kernels/variable_ops.cc | tensorflow/lite/kernels/variable_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_VARIABLE_OPS_H_
#define TENSORFLOW_CORE_KERNELS_VARIABLE_OPS_H_
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class VariableOp : public OpKernel {
public:
explicit VariableOp(OpKernelConstruction* context);
void Compute(OpKernelContext* ctx) override;
private:
DataType dtype_;
TensorShape shape_;
ContainerInfo cinfo_;
VariableOp(const VariableOp&) = delete;
void operator=(const VariableOp&) = delete;
};
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/variable_ops.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
string TemporaryVariableName(const string& var_name,
const FrameAndIter& control_frame) {
if (control_frame.frame_id != kIllegalFrameId &&
control_frame.iter_id != kIllegalIterId) {
return strings::StrCat(var_name, "/frame:", control_frame.frame_id,
"/iter:", control_frame.iter_id);
}
return var_name;
}
}
class LegacyVar : public ResourceBase {
public:
explicit LegacyVar(DataType dtype) : tensor_(dtype) {}
LegacyVar(const LegacyVar&) = delete;
LegacyVar& operator=(const LegacyVar&) = delete;
mutex* mu() { return &mu_; }
Tensor* tensor() { return &tensor_; }
string DebugString() const override {
return strings::StrCat(DataTypeString(tensor_.dtype()), "/",
tensor_.shape().DebugString());
}
private:
mutex mu_;
Tensor tensor_;
~LegacyVar() override {}
};
VariableOp::VariableOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("shape", &shape_));
dtype_ = RemoveRefType(context->output_type(0));
OP_REQUIRES_OK(context, cinfo_.Init(context->resource_manager(), def(),
true ));
}
void VariableOp::Compute(OpKernelContext* ctx) {
auto creator = [this](LegacyVar** var) {
*var = new LegacyVar(dtype_);
(*var)->tensor()->set_shape(shape_);
return absl::OkStatus();
};
LegacyVar* var;
OP_REQUIRES_OK(ctx, cinfo_.resource_manager()->LookupOrCreate<LegacyVar>(
cinfo_.container(), cinfo_.name(), &var, creator));
ctx->set_output_ref(0, var->mu(), var->tensor());
if (ctx->track_allocations() && var->tensor()->IsInitialized()) {
ctx->record_persistent_memory_allocation(var->tensor()->AllocatedBytes());
}
var->Unref();
}
class TemporaryVariableOp : public OpKernel {
public:
explicit TemporaryVariableOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("shape", &shape_));
OP_REQUIRES_OK(context, context->GetAttr("dtype", &dtype_));
OP_REQUIRES_OK(context, context->GetAttr("var_name", &var_name_));
if (var_name_.empty()) var_name_ = name();
}
void Compute(OpKernelContext* context) override {
Status s;
ResourceMgr* rm = context->resource_manager();
OP_REQUIRES(context, rm, errors::Internal("No per-step resource manager."));
auto unique_name = TemporaryVariableName(var_name_, context->frame_iter());
auto* tmp_var = new TmpVar;
OP_REQUIRES(context, tmp_var,
errors::ResourceExhausted("Could not allocate TmpVar."));
tmp_var->name = unique_name;
s = context->allocate_temp(dtype_, shape_, &tmp_var->val);
if (!s.ok()) tmp_var->Unref();
OP_REQUIRES_OK(context, s);
OP_REQUIRES_OK(context,
context->step_container()->Create(rm, unique_name, tmp_var));
context->set_output_ref(0, &tmp_var->mu, &tmp_var->val);
if (context->track_allocations()) {
context->record_persistent_memory_allocation(
tmp_var->val.AllocatedBytes());
}
}
private:
friend class DestroyTemporaryVariableOp;
struct TmpVar : public ResourceBase {
mutex mu;
Tensor val;
string name;
string DebugString() const override { return name; }
~TmpVar() override { VLOG(3) << "TmpVar " << name << " deleted"; }
};
TensorShape shape_;
DataType dtype_;
string var_name_;
};
class DestroyTemporaryVariableOp : public OpKernel {
public:
explicit DestroyTemporaryVariableOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES(context, IsRefType(context->input_type(0)),
errors::InvalidArgument("lhs input needs to be a ref type"));
OP_REQUIRES_OK(context, context->GetAttr("var_name", &var_name_));
OP_REQUIRES(context, !var_name_.empty(),
errors::InvalidArgument("Missing var_name attribute"));
}
void Compute(OpKernelContext* context) override {
CHECK(IsRefType(context->input_dtype(0)));
Tensor tmpvar = context->mutable_input(0, false);
context->set_output(0, tmpvar);
ResourceMgr* rm = context->resource_manager();
OP_REQUIRES(context, rm, errors::Internal("No per-step resource manager."));
auto unique_name = TemporaryVariableName(var_name_, context->frame_iter());
OP_REQUIRES_OK(
context, context->step_container()->Delete<TemporaryVariableOp::TmpVar>(
rm, unique_name));
if (context->track_allocations()) {
context->record_persistent_memory_allocation(
-static_cast<int64_t>(tmpvar.AllocatedBytes()));
}
}
private:
string var_name_;
};
class IsVariableInitializedOp : public OpKernel {
public:
explicit IsVariableInitializedOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input_tensor = context->mutable_input(0, false);
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({}), &output));
auto output_tensor = output->tensor<bool, 0>();
bool result = input_tensor.IsInitialized();
output_tensor() = result;
}
};
REGISTER_KERNEL_BUILDER(Name("Variable").Device(DEVICE_CPU), VariableOp);
REGISTER_KERNEL_BUILDER(Name("VariableV2").Device(DEVICE_CPU), VariableOp);
REGISTER_KERNEL_BUILDER(Name("TemporaryVariable").Device(DEVICE_CPU),
TemporaryVariableOp);
REGISTER_KERNEL_BUILDER(Name("DestroyTemporaryVariable").Device(DEVICE_CPU),
DestroyTemporaryVariableOp);
REGISTER_KERNEL_BUILDER(Name("IsVariableInitialized").Device(DEVICE_CPU),
IsVariableInitializedOp);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNELS(type) \
REGISTER_KERNEL_BUILDER( \
Name("Variable").Device(DEVICE_GPU).TypeConstraint<type>("dtype"), \
VariableOp); \
REGISTER_KERNEL_BUILDER( \
Name("VariableV2").Device(DEVICE_GPU).TypeConstraint<type>("dtype"), \
VariableOp); \
REGISTER_KERNEL_BUILDER(Name("TemporaryVariable") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("dtype"), \
TemporaryVariableOp); \
REGISTER_KERNEL_BUILDER(Name("DestroyTemporaryVariable") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T"), \
DestroyTemporaryVariableOp); \
REGISTER_KERNEL_BUILDER(Name("IsVariableInitialized") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("dtype") \
.HostMemory("is_initialized"), \
IsVariableInitializedOp);
TF_CALL_int64(REGISTER_GPU_KERNELS);
TF_CALL_uint32(REGISTER_GPU_KERNELS);
TF_CALL_GPU_ALL_TYPES(REGISTER_GPU_KERNELS);
#undef REGISTER_GPU_KERNELS
#endif
#define REGISTER_DEFAULT_KERNELS(type) \
REGISTER_KERNEL_BUILDER( \
Name("Variable").Device(DEVICE_DEFAULT).TypeConstraint<type>("dtype"), \
VariableOp); \
REGISTER_KERNEL_BUILDER( \
Name("VariableV2").Device(DEVICE_DEFAULT).TypeConstraint<type>("dtype"), \
VariableOp); \
REGISTER_KERNEL_BUILDER(Name("TemporaryVariable") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("dtype"), \
TemporaryVariableOp); \
REGISTER_KERNEL_BUILDER(Name("DestroyTemporaryVariable") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("T"), \
DestroyTemporaryVariableOp); \
REGISTER_KERNEL_BUILDER(Name("IsVariableInitialized") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("dtype") \
.HostMemory("is_initialized"), \
IsVariableInitializedOp);
TF_CALL_int64(REGISTER_DEFAULT_KERNELS);
TF_CALL_uint32(REGISTER_DEFAULT_KERNELS);
TF_CALL_GPU_ALL_TYPES(REGISTER_DEFAULT_KERNELS);
#undef REGISTER_DEFAULT_KERNELS
} | #include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace {
void ManyManyVariablesHelper(int threads, int variables,
::testing::benchmark::State& state) {
Graph g(OpRegistry::Global());
std::vector<string> targets;
for (int i = 0; i < variables; ++i) {
Node* v;
TF_CHECK_OK(
NodeBuilder(
g.NewName("VeryVeryLongRealistSoundingVariableName/weights"),
"VariableV2")
.Attr("shape", TensorShape())
.Attr("dtype", DT_FLOAT)
.Finalize(&g, &v));
targets.push_back(v->name());
}
GraphDef gd;
g.ToGraphDef(&gd);
SessionOptions opts;
opts.config.set_inter_op_parallelism_threads(threads);
Session* sess = NewSession(opts);
TF_CHECK_OK(sess->Create(gd));
TF_CHECK_OK(sess->Run({}, {}, targets, nullptr));
for (auto s : state) {
TF_CHECK_OK(sess->Run({}, {}, targets, nullptr));
}
delete sess;
}
void BM_ManyManyVariablesManyThreads(::testing::benchmark::State& state) {
const int threads = state.range(0);
ManyManyVariablesHelper(threads, 1000, state);
}
BENCHMARK(BM_ManyManyVariablesManyThreads)->Arg(50);
}
} |
1,119 | cpp | tensorflow/tensorflow | batch_norm_op | tensorflow/core/kernels/batch_norm_op.cc | tensorflow/core/kernels/batch_norm_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCH_NORM_OP_H_
#define TENSORFLOW_CORE_KERNELS_BATCH_NORM_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct BatchNorm {
void operator()(const Device& d, typename TTypes<T, 4>::ConstTensor input,
typename TTypes<T>::ConstVec mean,
typename TTypes<T>::ConstVec var,
typename TTypes<T>::ConstVec beta,
typename TTypes<T>::ConstVec gamma, T variance_epsilon,
bool scale_after_normalization,
typename TTypes<T, 4>::Tensor output) {
const int depth = mean.dimension(0);
const int rest_size = input.size() / depth;
Eigen::DSizes<int, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<int, Eigen::type2index<1> > rest_by_one;
rest_by_one.set(0, rest_size);
Eigen::IndexList<Eigen::type2index<1>, int> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<int, Eigen::type2index<1> > depth_by_one;
depth_by_one.set(0, depth);
if (scale_after_normalization) {
output.reshape(rest_by_depth).device(d) =
(input.reshape(rest_by_depth) -
mean.reshape(one_by_depth).broadcast(rest_by_one)) *
((var + var.constant(variance_epsilon)).rsqrt() * gamma)
.eval()
.reshape(one_by_depth)
.broadcast(rest_by_one) +
beta.reshape(one_by_depth).broadcast(rest_by_one);
} else {
output.reshape(rest_by_depth).device(d) =
(input.reshape(rest_by_depth) -
mean.reshape(one_by_depth).broadcast(rest_by_one)) *
((var + var.constant(variance_epsilon)).rsqrt())
.eval()
.reshape(one_by_depth)
.broadcast(rest_by_one) +
beta.reshape(one_by_depth).broadcast(rest_by_one);
}
}
};
template <typename Device, typename T>
struct BatchNormGrad {
void operator()(const Device& d, typename TTypes<T, 4>::ConstTensor input,
typename TTypes<T>::ConstVec mean,
typename TTypes<T>::ConstVec var,
typename TTypes<T>::ConstVec gamma,
typename TTypes<T, 4>::ConstTensor out_backprop,
T variance_epsilon, bool scale_after_normalization,
typename TTypes<T, 4>::Tensor dx, typename TTypes<T>::Vec dm,
typename TTypes<T>::Vec dv, typename TTypes<T>::Vec db,
typename TTypes<T>::Vec dg, typename TTypes<T>::Vec scratch1,
typename TTypes<T>::Vec scratch2) {
const int depth = mean.dimension(0);
const int rest_size = input.size() / depth;
typedef typename TTypes<T>::ConstVec::Index Index;
Eigen::DSizes<Index, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<Index, Eigen::type2index<1> > rest_by_one;
rest_by_one.set(0, rest_size);
Eigen::IndexList<Eigen::type2index<1>, Index> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<Eigen::type2index<0> > reduction_axis;
db.device(d) = out_backprop.reshape(rest_by_depth).sum(reduction_axis);
scratch1.device(d) = (var + var.constant(variance_epsilon)).rsqrt();
scratch2.device(d) = (out_backprop.reshape(rest_by_depth) *
(input.reshape(rest_by_depth) -
mean.reshape(one_by_depth).broadcast(rest_by_one)))
.sum(reduction_axis);
if (scale_after_normalization) {
dx.reshape(rest_by_depth).device(d) =
out_backprop.reshape(rest_by_depth) * ((scratch1 * gamma)
.eval()
.reshape(one_by_depth)
.broadcast(rest_by_one));
dm.device(d) = -db * (scratch1 * gamma).eval();
dg.device(d) = scratch2 * scratch1;
} else {
dx.reshape(rest_by_depth).device(d) =
out_backprop.reshape(rest_by_depth) *
scratch1.reshape(one_by_depth).broadcast(rest_by_one);
dm.device(d) = -db * scratch1;
dg.device(d) = dg.constant(static_cast<T>(0.0));
}
scratch1.device(d) = scratch1 * scratch1.constant(static_cast<T>(-0.5f)) /
(var + var.constant(variance_epsilon));
if (scale_after_normalization) {
dv.device(d) = scratch2 * (scratch1 * gamma).eval();
} else {
dv.device(d) = scratch2 * scratch1;
}
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/batch_norm_op.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class BatchNormOp : public OpKernel {
public:
explicit BatchNormOp(OpKernelConstruction* context) : OpKernel(context) {
float variance_epsilon;
OP_REQUIRES_OK(context,
context->GetAttr("variance_epsilon", &variance_epsilon));
variance_epsilon_ = T(variance_epsilon);
OP_REQUIRES_OK(context, context->GetAttr("scale_after_normalization",
&scale_after_normalization_));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& mean = context->input(1);
const Tensor& var = context->input(2);
const Tensor& beta = context->input(3);
const Tensor& gamma = context->input(4);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
OP_REQUIRES(context, mean.dims() == 1,
errors::InvalidArgument("mean must be 1-dimensional",
mean.shape().DebugString()));
OP_REQUIRES(context, var.dims() == 1,
errors::InvalidArgument("var must be 1-dimensional",
var.shape().DebugString()));
OP_REQUIRES(context, beta.dims() == 1,
errors::InvalidArgument("beta must be 1-dimensional",
beta.shape().DebugString()));
OP_REQUIRES(context, gamma.dims() == 1,
errors::InvalidArgument("gamma must be 1-dimensional",
gamma.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
functor::BatchNorm<Device, T>()(
context->eigen_device<Device>(), input.tensor<T, 4>(), mean.vec<T>(),
var.vec<T>(), beta.vec<T>(), gamma.vec<T>(), variance_epsilon_,
scale_after_normalization_, output->tensor<T, 4>());
}
private:
T variance_epsilon_;
bool scale_after_normalization_;
};
template <typename Device, typename T>
class BatchNormGradOp : public OpKernel {
public:
explicit BatchNormGradOp(OpKernelConstruction* context) : OpKernel(context) {
float variance_epsilon;
OP_REQUIRES_OK(context,
context->GetAttr("variance_epsilon", &variance_epsilon));
variance_epsilon_ = T(variance_epsilon);
OP_REQUIRES_OK(context, context->GetAttr("scale_after_normalization",
&scale_after_normalization_));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& mean = context->input(1);
const Tensor& var = context->input(2);
const Tensor& gamma = context->input(3);
const Tensor& out_backprop = context->input(4);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
OP_REQUIRES(context, mean.dims() == 1,
errors::InvalidArgument("mean must be 1-dimensional",
mean.shape().DebugString()));
OP_REQUIRES(context, var.dims() == 1,
errors::InvalidArgument("var must be 1-dimensional",
var.shape().DebugString()));
OP_REQUIRES(context, gamma.dims() == 1,
errors::InvalidArgument("gamma must be 1-dimensional",
gamma.shape().DebugString()));
OP_REQUIRES(context, out_backprop.dims() == 4,
errors::InvalidArgument("out_backprop must be 4-dimensional",
out_backprop.shape().DebugString()));
Tensor* dx = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0, 4}, 0, input.shape(), &dx));
Tensor* dm = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{1}, 1, mean.shape(), &dm));
Tensor* dv = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{2}, 2, var.shape(), &dv));
Tensor* db = nullptr;
if (scale_after_normalization_) {
OP_REQUIRES_OK(context, context->allocate_output(3, mean.shape(), &db));
} else {
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{3}, 3, mean.shape(), &db));
}
Tensor* dg = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(4, gamma.shape(), &dg));
Tensor scratch1;
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({input.dim_size(3)}), &scratch1));
Tensor scratch2;
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({input.dim_size(3)}), &scratch2));
functor::BatchNormGrad<Device, T>()(
context->eigen_device<Device>(), input.tensor<T, 4>(), mean.vec<T>(),
var.vec<T>(), gamma.vec<T>(), out_backprop.tensor<T, 4>(),
variance_epsilon_, scale_after_normalization_, dx->tensor<T, 4>(),
dm->vec<T>(), dv->vec<T>(), db->vec<T>(), dg->vec<T>(),
scratch1.vec<T>(), scratch2.vec<T>());
}
private:
T variance_epsilon_;
bool scale_after_normalization_;
};
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("BatchNormWithGlobalNormalization") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T"), \
BatchNormOp<CPUDevice, T>);
TF_CALL_half(REGISTER_KERNEL);
TF_CALL_float(REGISTER_KERNEL);
TF_CALL_double(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPEC(T) \
template <> \
void BatchNorm<GPUDevice, T>::operator()( \
const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, \
typename TTypes<T>::ConstVec mean, typename TTypes<T>::ConstVec var, \
typename TTypes<T>::ConstVec beta, typename TTypes<T>::ConstVec gamma, \
T variance_epsilon, bool scale_after_normalization, \
typename TTypes<T, 4>::Tensor output); \
extern template struct BatchNorm<GPUDevice, T>;
#define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPEC(T);
TF_CALL_half(DECLARE_GPU_SPECS);
TF_CALL_float(DECLARE_GPU_SPECS);
#undef DECLARE_GPU_SPEC
}
#define REGISTER_GPU_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("BatchNormWithGlobalNormalization") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T"), \
BatchNormOp<GPUDevice, T>);
TF_CALL_half(REGISTER_GPU_KERNEL);
TF_CALL_float(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
#endif
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("BatchNormWithGlobalNormalizationGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T"), \
BatchNormGradOp<CPUDevice, T>);
TF_CALL_half(REGISTER_KERNEL);
TF_CALL_float(REGISTER_KERNEL);
TF_CALL_double(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPEC(T) \
template <> \
void BatchNormGrad<GPUDevice, T>::operator()( \
const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, \
typename TTypes<T>::ConstVec mean, typename TTypes<T>::ConstVec var, \
typename TTypes<T>::ConstVec gamma, \
typename TTypes<T, 4>::ConstTensor out_backprop, T variance_epsilon, \
bool scale_after_normalization, typename TTypes<T, 4>::Tensor dx, \
typename TTypes<T>::Vec dm, typename TTypes<T>::Vec dv, \
typename TTypes<T>::Vec db, typename TTypes<T>::Vec dg, \
typename TTypes<T>::Vec scratch1, typename TTypes<T>::Vec scratch2); \
extern template struct BatchNormGrad<GPUDevice, T>;
#define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPEC(T);
TF_CALL_half(DECLARE_GPU_SPECS);
TF_CALL_float(DECLARE_GPU_SPECS);
#undef DECLARE_GPU_SPEC
}
#define REGISTER_GPU_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("BatchNormWithGlobalNormalizationGrad") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T"), \
BatchNormGradOp<GPUDevice, T>);
TF_CALL_half(REGISTER_GPU_KERNEL);
TF_CALL_float(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
#endif
} | #include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
template <typename T>
struct BatchNormOpTest : public OpsTestBase {
static constexpr auto TValueType = DataTypeToEnum<T>::value;
void run_me() {
TF_EXPECT_OK(
NodeDefBuilder("batch_norm_op", "BatchNormWithGlobalNormalization")
.Input(FakeInput(TValueType))
.Input(FakeInput(TValueType))
.Input(FakeInput(TValueType))
.Input(FakeInput(TValueType))
.Input(FakeInput(TValueType))
.Attr("scale_after_normalization", false)
.Attr("variance_epsilon", 0.001)
.Finalize(node_def()));
TF_EXPECT_OK(InitOpWithGraphVersion(8));
AddInputFromList<T>(TensorShape({1, 1, 6, 2}),
{1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6});
AddInputFromList<T>(TensorShape({2}), {10, 20});
AddInputFromList<T>(TensorShape({2}), {0.25, 0.5});
AddInputFromList<T>(TensorShape({2}), {0.1, 0.6});
AddInputFromList<T>(TensorShape({2}), {0.0, 0.0});
TF_ASSERT_OK(RunOpKernel());
double atol = TValueType == DT_FLOAT ? 0.01 : 0.1;
Tensor expected(allocator(), TValueType, TensorShape({1, 1, 6, 2}));
test::FillValues<T>(&expected,
{-17.86f, -22.00f, -15.87f, -20.59f, -13.87f, -19.18f,
-21.86f, -33.31f, -23.85f, -34.72f, -25.85f, -36.13f});
test::ExpectTensorNear<T>(expected, *GetOutput(0), atol);
}
};
TYPED_TEST_SUITE_P(BatchNormOpTest);
TYPED_TEST_P(BatchNormOpTest, Simple) { this->run_me(); }
REGISTER_TYPED_TEST_SUITE_P(BatchNormOpTest, Simple);
using DataTypes = ::testing::Types<float, Eigen::half>;
INSTANTIATE_TYPED_TEST_SUITE_P(Test, BatchNormOpTest, DataTypes);
} |
1,120 | cpp | tensorflow/tensorflow | strided_slice_op | tensorflow/core/kernels/strided_slice_op.cc | tensorflow/core/kernels/strided_slice_op_test.cc | #ifndef TENSORFLOW_CORE_UTIL_STRIDED_SLICE_OP_H_
#define TENSORFLOW_CORE_UTIL_STRIDED_SLICE_OP_H_
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
namespace tensorflow {
struct StridedSliceShapeSpec {
int32_t begin_dense_mask;
int32_t end_dense_mask;
int32_t shrink_axis_dense_mask;
absl::InlinedVector<int64_t, 4UL> output_to_sparse_mapping;
absl::InlinedVector<int64_t, 4UL> output_to_processing_mapping;
absl::InlinedVector<int64_t, 4UL> processing_to_sparse_mapping;
};
Status ValidateStridedSliceOp(
const Tensor* begin_tensor, const Tensor* end_tensor,
const Tensor& strides_tensor, const PartialTensorShape& input_shape,
int32_t begin_mask_spec, int32_t end_mask_spec, int32_t ellipsis_mask,
int32_t new_axis_mask, int32_t shrink_axis_mask,
PartialTensorShape* processing_shape, PartialTensorShape* final_shape,
bool* is_identity, bool* is_simple_slice, bool* slice_dim0,
absl::InlinedVector<int64_t, 4UL>* begin,
absl::InlinedVector<int64_t, 4UL>* end,
absl::InlinedVector<int64_t, 4UL>* strides,
StridedSliceShapeSpec* shape_spec = nullptr);
Status ValidateStridedSliceOp(
const Tensor* begin_tensor, const Tensor* end_tensor,
const Tensor& strides_tensor, const PartialTensorShape& input_shape,
int32_t begin_mask_spec, int32_t end_mask_spec, int32_t ellipsis_mask,
int32_t new_axis_mask, int32_t shrink_axis_mask,
TensorShape* processing_shape, TensorShape* final_shape, bool* is_identity,
bool* is_simple_slice, bool* slice_dim0,
absl::InlinedVector<int64_t, 4UL>* begin,
absl::InlinedVector<int64_t, 4UL>* end,
absl::InlinedVector<int64_t, 4UL>* strides,
StridedSliceShapeSpec* shape_spec = nullptr);
class StridedSliceAssignBCast {
public:
using Vec = absl::InlinedVector<int64_t, 4UL>;
StridedSliceAssignBCast(const Vec& input_shape, const Vec& output_shape);
bool RemapDimensions(int64_t num_dims, const Vec& dimension_map);
bool IsValid() const { return valid_; }
bool IsBroadcastingRequired() const { return broadcasting_required_; }
const Vec& reshape() const { return reshape_; }
const Vec& bcast() const { return bcast_; }
const Vec& result_shape() const { return result_shape_; }
private:
bool valid_ = true;
bool broadcasting_required_ = false;
Vec reshape_;
Vec bcast_;
Vec result_shape_;
};
}
#endif
#include "tensorflow/core/util/strided_slice_op.h"
#include <algorithm>
#include <array>
#include <iterator>
#include <utility>
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace {
constexpr int32_t kShrinkAxis = -1, kNewAxis = -2;
struct StridedSliceSparseSpec {
int64_t dims;
int32 num_add_axis_after_ellipsis;
const Tensor* begin_tensor;
const Tensor* end_tensor;
const Tensor& strides_tensor;
const int32 begin_mask, end_mask;
int32 ellipsis_mask;
const int32 new_axis_mask, shrink_axis_mask;
};
struct StridedSliceDenseSpec {
const int64_t dims;
int32 begin_mask;
int32 end_mask;
bool begin_valid;
bool end_valid;
absl::InlinedVector<int64_t, 4UL>& begin;
absl::InlinedVector<int64_t, 4UL>& end;
absl::InlinedVector<int64_t, 4UL>& strides;
absl::InlinedVector<int32, 4UL> final_shape_gather_indices;
absl::InlinedVector<int32, 4UL> final_shape_gather_indices_sparse;
absl::InlinedVector<int32, 4UL> input_shape_gather_indices_sparse;
int32 shrink_axis_mask;
};
}
template <class T>
static Status TF_MUST_USE_RESULT BuildDenseSpec(
const StridedSliceSparseSpec& sparse, StridedSliceDenseSpec* dense) {
if (dense->dims < 0) {
return errors::InvalidArgument("Unexpected negative dense.dims: %d",
dense->dims);
}
if (dense->dims >= 1024) {
return errors::InvalidArgument("Unexpected large dense.dims: %d",
dense->dims);
}
dense->begin.resize(dense->dims);
dense->end.resize(dense->dims);
dense->strides.resize(dense->dims);
dense->input_shape_gather_indices_sparse.resize(dense->dims);
dense->begin_mask = 0;
dense->end_mask = 0;
dense->shrink_axis_mask = 0;
{
int full_index = 0;
const T* const strides_flat = sparse.strides_tensor.vec<T>().data();
dense->begin_valid = sparse.begin_tensor != nullptr;
dense->end_valid = sparse.end_tensor != nullptr;
const T* const begin_flat = sparse.begin_tensor != nullptr
? sparse.begin_tensor->vec<T>().data()
: nullptr;
const T* const end_flat = sparse.end_tensor != nullptr
? sparse.end_tensor->vec<T>().data()
: nullptr;
for (int i = 0; i < sparse.dims; i++) {
if ((1 << i) & sparse.ellipsis_mask) {
int32_t next_index = std::min(dense->dims - (sparse.dims - i) + 1 +
sparse.num_add_axis_after_ellipsis,
dense->dims);
for (; full_index < next_index; full_index++) {
dense->begin[full_index] = dense->end[full_index] = 0;
dense->strides[full_index] = 1;
dense->begin_mask |= (1 << full_index);
dense->end_mask |= (1 << full_index);
dense->final_shape_gather_indices.push_back(full_index);
dense->final_shape_gather_indices_sparse.push_back(-1);
dense->input_shape_gather_indices_sparse[full_index] = i;
}
} else if ((1 << i) & sparse.new_axis_mask) {
dense->final_shape_gather_indices.push_back(kNewAxis);
dense->final_shape_gather_indices_sparse.push_back(-1);
} else {
if (full_index == dense->begin.size()) {
if (dense->dims == 0) {
return errors::InvalidArgument("Attempting to slice scalar input.");
}
return errors::InvalidArgument("Index out of range using input dim ",
full_index, "; input has only ",
dense->dims, " dims");
}
if (begin_flat != nullptr) {
dense->begin[full_index] = internal::SubtleMustCopy<T>(begin_flat[i]);
}
if (end_flat != nullptr) {
dense->end[full_index] = internal::SubtleMustCopy<T>(end_flat[i]);
}
dense->strides[full_index] =
internal::SubtleMustCopy<T>(strides_flat[i]);
if (sparse.begin_mask & (1 << i)) {
dense->begin_mask |= (1 << full_index);
}
if (sparse.end_mask & (1 << i)) {
dense->end_mask |= (1 << full_index);
}
if (sparse.shrink_axis_mask & (1 << i)) {
dense->final_shape_gather_indices.push_back(kShrinkAxis);
dense->final_shape_gather_indices_sparse.push_back(-1);
dense->shrink_axis_mask |= (1 << full_index);
} else {
dense->final_shape_gather_indices.push_back(full_index);
dense->final_shape_gather_indices_sparse.push_back(i);
}
dense->input_shape_gather_indices_sparse[full_index] = i;
full_index++;
}
}
}
return absl::OkStatus();
}
Status ValidateStridedSliceOp(
const Tensor* begin_tensor, const Tensor* end_tensor,
const Tensor& strides_tensor, const PartialTensorShape& input_shape,
int32_t begin_mask_spec, int32_t end_mask_spec, const int32_t ellipsis_mask,
int32_t new_axis_mask, int32_t shrink_axis_mask,
PartialTensorShape* processing_shape, PartialTensorShape* final_shape,
bool* is_identity, bool* is_simple_slice, bool* slice_dim0,
absl::InlinedVector<int64_t, 4UL>* begin,
absl::InlinedVector<int64_t, 4UL>* end,
absl::InlinedVector<int64_t, 4UL>* strides,
StridedSliceShapeSpec* shape_spec) {
if (input_shape.unknown_rank()) {
return errors::InvalidArgument("Unexpected input_shape with unknown rank");
}
const bool begin_is_wrong =
begin_tensor != nullptr &&
!(TensorShapeUtils::IsVector(begin_tensor->shape()) &&
begin_tensor->NumElements() == strides_tensor.NumElements() &&
begin_tensor->NumElements() < 32 );
const bool end_is_wrong =
end_tensor != nullptr &&
!(TensorShapeUtils::IsVector(end_tensor->shape()) &&
end_tensor->NumElements() == strides_tensor.NumElements());
if (begin_is_wrong || end_is_wrong ||
!TensorShapeUtils::IsVector(strides_tensor.shape())) {
if (begin_tensor != nullptr && end_tensor != nullptr) {
return errors::InvalidArgument(
"Expected begin, end, and strides to be 1D equal size tensors, ",
"but got shapes ", begin_tensor->shape().DebugString(), ", ",
end_tensor->shape().DebugString(), ", and ",
strides_tensor.shape().DebugString(), " instead.");
} else {
return errors::InvalidArgument(
"Expected begin, end, and strides to be 1D equal size tensors, ",
"but got shape ", strides_tensor.shape().DebugString(),
" for strides.");
}
}
if (ellipsis_mask && ((ellipsis_mask & (ellipsis_mask - 1)) != 0)) {
return errors::InvalidArgument(
"Multiple ellipses in slice spec not allowed");
}
bool ellipsis_seen = false;
StridedSliceSparseSpec sparse_spec = {strides_tensor.NumElements(),
0,
begin_tensor,
end_tensor,
strides_tensor,
begin_mask_spec,
end_mask_spec,
ellipsis_mask,
new_axis_mask,
shrink_axis_mask};
for (int32_t i = 0; i < sparse_spec.dims; i++) {
if (ellipsis_seen && ((1 << i) & new_axis_mask) != 0) {
sparse_spec.num_add_axis_after_ellipsis++;
}
if ((1 << i) & ellipsis_mask) {
ellipsis_seen = true;
}
}
if (!ellipsis_seen) {
sparse_spec.ellipsis_mask |= (1 << sparse_spec.dims);
sparse_spec.dims++;
}
StridedSliceDenseSpec dense_spec = {input_shape.dims(),
0 ,
0 ,
false ,
false ,
*begin,
*end,
*strides};
if (strides_tensor.dtype() == DT_INT32) {
TF_RETURN_IF_ERROR(BuildDenseSpec<int32>(sparse_spec, &dense_spec));
} else if (strides_tensor.dtype() == DT_INT64) {
TF_RETURN_IF_ERROR(BuildDenseSpec<int64_t>(sparse_spec, &dense_spec));
} else if (strides_tensor.dtype() == DT_INT16) {
TF_RETURN_IF_ERROR(BuildDenseSpec<int16_t>(sparse_spec, &dense_spec));
} else {
LOG(FATAL) << "begin must be either int16, int32 or int64";
}
*is_identity = true;
*slice_dim0 = true;
*is_simple_slice = true;
processing_shape->Clear();
for (int i = 0; i < input_shape.dims(); ++i) {
int64_t& begin_i = (*begin)[i];
int64_t& end_i = (*end)[i];
int64_t& stride_i = (*strides)[i];
int64_t dim_i = input_shape.dim_size(i);
if (stride_i == 0) {
return errors::InvalidArgument("strides[", i, "] must be non-zero");
}
bool shrink_i = (dense_spec.shrink_axis_mask & (1 << i));
if (dim_i == -1) {
processing_shape->AddDim(shrink_i ? 1 : -1);
continue;
}
const std::array<int64_t, 2> masks = {
{dense_spec.begin_mask & (1 << i), dense_spec.end_mask & (1 << i)}};
const std::array<int64_t, 2> valid_range = {
{stride_i > 0 ? 0 : -1, stride_i > 0 ? dim_i : dim_i - 1}};
auto canonical = [stride_i, dim_i, masks, valid_range](int64_t x, int c) {
if (masks[c]) {
return stride_i > 0 ? valid_range[c] : valid_range[(c + 1) & 1];
} else {
int64_t x_fwd =
x < 0 ? dim_i + x : x;
return x_fwd < valid_range[0]
? valid_range[0]
: x_fwd > valid_range[1] ? valid_range[1] : x_fwd;
}
};
if (shrink_i && stride_i <= 0) {
return errors::InvalidArgument(
"only stride 1 allowed on non-range indexing.");
}
(*is_simple_slice) &= stride_i == 1;
const bool begin_and_end_masked =
(dense_spec.begin_mask & (1 << i)) && (dense_spec.end_mask & (1 << i));
if (dense_spec.begin_valid && dense_spec.end_valid) {
if (shrink_i) {
int64_t x_fwd = begin_i < 0 ? dim_i + begin_i : begin_i;
begin_i = x_fwd;
end_i = begin_i + 1;
if (x_fwd < 0 || x_fwd >= dim_i) {
return errors::InvalidArgument(
"slice index ", begin_i, " of dimension ", i, " out of bounds.");
}
} else {
begin_i = canonical(begin_i, 0);
end_i = canonical(end_i, 1);
}
bool take_all_in_dimension =
stride_i == 1 && begin_i == 0 && end_i == dim_i;
(*is_identity) &= take_all_in_dimension;
(*slice_dim0) &= (i == 0 && stride_i == 1) || take_all_in_dimension;
} else {
(*is_identity) &= stride_i == 1 && begin_and_end_masked;
(*slice_dim0) &= (i == 0 && stride_i == 1) || begin_and_end_masked;
}
int64_t interval_length;
bool known_interval = false;
if (dense_spec.begin_valid && dense_spec.end_valid) {
interval_length = end_i - begin_i;
known_interval = true;
} else if (shrink_i) {
interval_length = 1;
known_interval = true;
} else if (begin_and_end_masked) {
if (dim_i >= 0) {
if (stride_i < 0) {
interval_length = -dim_i;
} else {
interval_length = dim_i;
}
known_interval = true;
}
}
if (known_interval) {
int64_t size_i;
if (interval_length == 0 || ((interval_length < 0) != (stride_i < 0))) {
size_i = 0;
} else {
size_i = interval_length / stride_i +
(interval_length % stride_i != 0 ? 1 : 0);
}
processing_shape->AddDim(size_i);
} else {
processing_shape->AddDim(-1);
}
}
final_shape->Clear();
if (shape_spec != nullptr) {
shape_spec->output_to_sparse_mapping.clear();
shape_spec->output_to_processing_mapping.clear();
shape_spec->processing_to_sparse_mapping.assign(
dense_spec.input_shape_gather_indices_sparse.begin(),
dense_spec.input_shape_gather_indices_sparse.end());
shape_spec->begin_dense_mask = dense_spec.begin_mask;
shape_spec->end_dense_mask = dense_spec.end_mask;
shape_spec->shrink_axis_dense_mask = dense_spec.shrink_axis_mask;
}
for (int64_t dense_dim = 0;
dense_dim < dense_spec.final_shape_gather_indices.size(); ++dense_dim) {
int64_t gather_index = dense_spec.final_shape_gather_indices[dense_dim];
int64_t sparse_index =
dense_spec.final_shape_gather_indices_sparse[dense_dim];
if (gather_index >= 0) {
final_shape->AddDim(processing_shape->dim_size(gather_index));
if (shape_spec != nullptr) {
shape_spec->output_to_sparse_mapping.push_back(sparse_index);
shape_spec->output_to_processing_mapping.push_back(gather_index);
}
} else if (gather_index == kNewAxis) {
final_shape->AddDim(1);
if (shape_spec != nullptr) {
shape_spec->output_to_sparse_mapping.push_back(-1);
shape_spec->output_to_processing_mapping.push_back(-1);
}
}
}
return absl::OkStatus();
}
Status ValidateStridedSliceOp(
const Tensor* begin_tensor, const Tensor* end_tensor,
const Tensor& strides_tensor, const PartialTensorShape& input_shape,
int32_t begin_mask_spec, int32_t end_mask_spec, const int32_t ellipsis_mask,
int32_t new_axis_mask, int32_t shrink_axis_mask,
TensorShape* processing_shape, TensorShape* final_shape, bool* is_identity,
bool* is_simple_slice, bool* slice_dim0,
absl::InlinedVector<int64_t, 4UL>* begin,
absl::InlinedVector<int64_t, 4UL>* end,
absl::InlinedVector<int64_t, 4UL>* strides,
StridedSliceShapeSpec* shape_spec) {
PartialTensorShape partial_processing_shape, partial_final_shape;
TF_RETURN_IF_ERROR(ValidateStridedSliceOp(
begin_tensor, end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&partial_processing_shape, &partial_final_shape, is_identity,
is_simple_slice, slice_dim0, begin, end, strides, shape_spec));
if (!partial_processing_shape.AsTensorShape(processing_shape) ||
!partial_final_shape.AsTensorShape(final_shape)) {
return errors::Internal("ValidateStridedSliceOp returned partial shapes ",
partial_processing_shape.DebugString(), " and ",
partial_final_shape.DebugString());
}
return absl::OkStatus();
}
StridedSliceAssignBCast::StridedSliceAssignBCast(
const StridedSliceAssignBCast::Vec& input_shape,
const StridedSliceAssignBCast::Vec& output_shape)
: valid_(true),
broadcasting_required_(false),
reshape_(output_shape.size()),
bcast_(output_shape.size()),
result_shape_(output_shape) {
size_t input_start = 0;
size_t prepend_size = 0;
if (output_shape.size() < input_shape.size()) {
input_start = input_shape.size() - output_shape.size();
for (size_t i = 0; i < input_start; ++i) {
if (input_shape[i] != 1) {
valid_ = false;
return;
}
}
} else {
prepend_size = output_shape.size() - input_shape.size();
}
std::fill_n(reshape_.begin(), prepend_size, 1);
std::copy(input_shape.begin() + input_start, input_shape.end(),
reshape_.begin() + prepend_size);
for (size_t i = 0; i < output_shape.size(); ++i) {
if (reshape_[i] == output_shape[i]) {
bcast_[i] = 1;
} else if (reshape_[i] == 1) {
bcast_[i] = output_shape[i];
broadcasting_required_ = true;
} else {
valid_ = false;
return;
}
}
}
bool StridedSliceAssignBCast::RemapDimensions(
int64_t num_dims, const StridedSliceAssignBCast::Vec& dimension_map) {
if (dimension_map.size() != result_shape_.size()) {
return false;
}
for (size_t i = 0; i < dimension_map.size(); ++i) {
int64_t dim = dimension_map[i];
if (dim >= num_dims) {
return false;
}
}
Vec old_reshape = std::move(reshape_);
Vec old_bcast = std::move(bcast_);
Vec old_result_shape = std::move(result_shape_);
reshape_ = Vec(num_dims);
bcast_ = Vec(num_dims);
result_shape_ = Vec(num_dims);
std::fill_n(reshape_.begin(), num_dims, 1);
std::fill_n(bcast_.begin(), num_dims, 1);
std::fill_n(result_shape_.begin(), num_dims, 1);
for (size_t i = 0; i < dimension_map.size(); ++i) {
int64_t dim = dimension_map[i];
if (dim >= 0) {
reshape_[dim] = old_reshape[i];
bcast_[dim] = old_bcast[i];
result_shape_[dim] = old_result_shape[i];
}
}
return true;
}
} | #include "tensorflow/core/util/strided_slice_op.h"
#include <algorithm>
#include <ostream>
#include <tuple>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
using ::testing::PrintToString;
using Vec = typename StridedSliceAssignBCast::Vec;
struct BroadcastPair {
Vec from;
Vec to;
friend std::ostream& operator<<(std::ostream& os, const BroadcastPair& pair) {
return os << strings::StrCat("BroadcastPair{", PrintToString(pair.from),
"->", PrintToString(pair.to), "}");
}
};
struct BroadcastRemap {
int64_t dims;
Vec map;
friend std::ostream& operator<<(std::ostream& os,
const BroadcastRemap& remap) {
return os << strings::StrCat("BroadcastRemap{", remap.dims, ", ",
PrintToString(remap.map), "}");
}
};
int64_t NumberOfElements(const Vec& shape) {
int64_t number_of_elements = 1;
for (int64_t elem : shape) {
number_of_elements *= elem;
}
return number_of_elements;
}
MATCHER_P2(Broadcasts, input_shape, output_shape,
strings::StrCat("broadcasts ", PrintToString(input_shape), " to ",
PrintToString(output_shape))) {
const size_t size = input_shape.size();
for (size_t i = 0; i < size; ++i) {
if (!((arg[i] == 1 && input_shape[i] == output_shape[i]) ||
(arg[i] == output_shape[i] && input_shape[i] == 1))) {
return false;
}
}
return true;
}
MATCHER_P(HasSuffix, suffix, "") {
const size_t offset = arg.size() - suffix.size();
for (size_t i = 0; i < suffix.size(); ++i) {
if (suffix[i] != arg[i + offset]) {
return false;
}
}
return true;
}
MATCHER_P(HasSameNumberOfElementsAs, other, "") {
return NumberOfElements(arg) == NumberOfElements(other);
}
TEST(StridedSliceAssignBCastTest, BroadcastingToSameRankWorks) {
const BroadcastPair test_pairs[] = {
{Vec{1}, Vec{5}},
{Vec{1, 1}, Vec{4, 5}},
{Vec{1, 5}, Vec{4, 5}},
{Vec{4, 1}, Vec{4, 5}},
{Vec{1, 1, 1}, Vec{2, 4, 5}},
{Vec{1, 1, 5}, Vec{2, 4, 5}},
{Vec{1, 4, 5}, Vec{2, 4, 5}},
{Vec{2, 1, 5}, Vec{2, 4, 5}},
{Vec{2, 4, 1}, Vec{2, 4, 5}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_TRUE(bcast.IsValid()) << test_pair;
EXPECT_TRUE(bcast.IsBroadcastingRequired());
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_EQ(bcast.reshape(), test_pair.from);
EXPECT_THAT(bcast.bcast(), Broadcasts(test_pair.from, test_pair.to));
}
}
TEST(StridedSliceAssignBCastTest, BroadcastingToLargerRankWorks) {
const BroadcastPair test_pairs[] = {
{Vec{}, Vec{2, 4, 5}},
{Vec{1}, Vec{2, 4, 5}},
{Vec{5}, Vec{2, 4, 5}},
{Vec{1, 1}, Vec{2, 4, 5}},
{Vec{1, 5}, Vec{2, 4, 5}},
{Vec{4, 1}, Vec{2, 4, 5}},
{Vec{4, 5}, Vec{2, 4, 5}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_TRUE(bcast.IsValid()) << test_pair;
EXPECT_TRUE(bcast.IsBroadcastingRequired());
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_THAT(bcast.reshape(), HasSuffix(test_pair.from));
EXPECT_THAT(bcast.reshape(), HasSameNumberOfElementsAs(test_pair.from));
EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), test_pair.to));
}
}
TEST(StridedSliceAssignBCastTest, BroadcastingToSmallerRankWorks) {
const BroadcastPair test_pairs[] = {
{Vec{1, 1}, Vec{5}},
{Vec{1, 1, 5}, Vec{4, 5}},
{Vec{1, 4, 1}, Vec{4, 5}},
{Vec{1, 1, 1, 5}, Vec{4, 5}},
{Vec{1, 1, 4, 1}, Vec{4, 5}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_TRUE(bcast.IsValid()) << test_pair;
EXPECT_TRUE(bcast.IsBroadcastingRequired());
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_THAT(test_pair.from, HasSuffix(bcast.reshape()));
EXPECT_THAT(bcast.reshape(), HasSameNumberOfElementsAs(test_pair.from));
EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), test_pair.to));
}
}
TEST(StridedSliceAssignBCastTest, ReshapeOnlyWorks) {
const BroadcastPair test_pairs[] = {
{Vec{}, Vec{1, 1}},
{Vec{5}, Vec{5}},
{Vec{5}, Vec{1, 5}},
{Vec{1, 1}, Vec{}},
{Vec{1, 5}, Vec{5}},
{Vec{2, 4, 5}, Vec{2, 4, 5}},
{Vec{2, 4, 5}, Vec{1, 1, 1, 2, 4, 5}},
{Vec{1, 1, 1, 2, 4, 5}, Vec{2, 4, 5}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_TRUE(bcast.IsValid()) << test_pair;
EXPECT_FALSE(bcast.IsBroadcastingRequired());
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_THAT(bcast.reshape(), HasSameNumberOfElementsAs(test_pair.from));
EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), test_pair.to));
}
}
TEST(StridedSliceAssignBCastTest, InvalidBroadcastFails) {
const BroadcastPair test_pairs[] = {
{Vec{5}, Vec{1}},
{Vec{3}, Vec{4, 5}},
{Vec{4}, Vec{4, 5}},
{Vec{5}, Vec{}},
{Vec{3, 5}, Vec{4, 5}},
{Vec{4, 3}, Vec{4, 5}},
{Vec{5, 5}, Vec{1, 5}},
{Vec{2, 4}, Vec{2, 4, 5}},
{Vec{4, 3}, Vec{2, 4, 5}},
{Vec{3, 5}, Vec{2, 4, 5}},
{Vec{3, 5}, Vec{5}},
{Vec{3, 5}, Vec{}},
{Vec{3, 4, 5}, Vec{2, 4, 5}},
{Vec{2, 4, 5}, Vec{1, 4, 5}},
{Vec{2, 3, 5}, Vec{2, 4, 5}},
{Vec{2, 4, 5}, Vec{2, 4, 5, 2}},
{Vec{2, 4, 5}, Vec{2, 4, 5, 1}},
{Vec{2, 4, 5}, Vec{2, 4, 1, 5}},
{Vec{2, 4, 5}, Vec{4, 5}},
{Vec{2, 4, 5}, Vec{2, 4}},
{Vec{1, 4, 5}, Vec{4, 1}},
{Vec{1, 4, 5}, Vec{5}},
{Vec{1, 4, 5}, Vec{}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_FALSE(bcast.IsValid()) << test_pair;
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsToItselfWorks) {
const std::pair<BroadcastPair, BroadcastRemap> test_inputs[] = {
{BroadcastPair{Vec{}, Vec{}},
BroadcastRemap{0, Vec{}}},
{BroadcastPair{Vec{4, 5}, Vec{4, 5}},
BroadcastRemap{2, Vec{0, 1}}},
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{3, Vec{0, 1, 2}}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = test_input.first;
const BroadcastRemap& test_remap = test_input.second;
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_THAT(bcast.bcast(),
Broadcasts(bcast.reshape(), bcast.result_shape()));
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsRemovingAxesWorks) {
const std::tuple<BroadcastPair, BroadcastRemap, Vec> test_inputs[] = {
{BroadcastPair{Vec{2, 1, 4, 1, 5}, Vec{2, 1, 4, 1, 5}},
BroadcastRemap{3, Vec{0, -1, 1, -1, 2}}, Vec{2, 4, 5}},
{BroadcastPair{Vec{1, 4, 1}, Vec{1, 4, 1}},
BroadcastRemap{1, Vec{-1, 0, -1}}, Vec{4}},
{BroadcastPair{Vec{1, 1, 1}, Vec{1, 1, 1}},
BroadcastRemap{0, Vec{-1, -1, -1}}, Vec{}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = std::get<0>(test_input);
const BroadcastRemap& test_remap = std::get<1>(test_input);
const Vec& expected_result_shape = std::get<2>(test_input);
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
EXPECT_EQ(bcast.result_shape(), expected_result_shape);
EXPECT_THAT(bcast.bcast(),
Broadcasts(bcast.reshape(), bcast.result_shape()));
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsAddingAxesWorks) {
const std::tuple<BroadcastPair, BroadcastRemap, Vec> test_inputs[] = {
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{5, Vec{0, 2, 4}}, Vec{2, 1, 4, 1, 5}},
{BroadcastPair{Vec{4, 5}, Vec{4, 5}},
BroadcastRemap{4, Vec{1, 2}}, Vec{1, 4, 5, 1}},
{BroadcastPair{Vec{}, Vec{}},
BroadcastRemap{3, Vec{}}, Vec{1, 1, 1}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = std::get<0>(test_input);
const BroadcastRemap& test_remap = std::get<1>(test_input);
const Vec& expected_result_shape = std::get<2>(test_input);
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
EXPECT_EQ(bcast.result_shape(), expected_result_shape);
EXPECT_THAT(bcast.bcast(),
Broadcasts(bcast.reshape(), bcast.result_shape()));
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsAddingAndRemovingAxesWorks) {
const std::tuple<BroadcastPair, BroadcastRemap, Vec> test_inputs[] = {
{BroadcastPair{Vec{1}, Vec{1}},
BroadcastRemap{1, Vec{-1}}, Vec{1}},
{BroadcastPair{Vec{1}, Vec{1}},
BroadcastRemap{3, Vec{-1}}, Vec{1, 1, 1}},
{BroadcastPair{Vec{1, 5}, Vec{1, 5}},
BroadcastRemap{3, Vec{-1, 1}}, Vec{1, 5, 1}},
{BroadcastPair{Vec{1, 5}, Vec{2, 1, 4, 1, 5}},
BroadcastRemap{4, Vec{0, -1, 1, -1, 3}},
Vec{2, 4, 1, 5}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = std::get<0>(test_input);
const BroadcastRemap& test_remap = std::get<1>(test_input);
const Vec& expected_result_shape = std::get<2>(test_input);
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
EXPECT_EQ(bcast.result_shape(), expected_result_shape);
EXPECT_THAT(bcast.bcast(),
Broadcasts(bcast.reshape(), bcast.result_shape()));
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsInvalidSizeFails) {
const std::pair<BroadcastPair, BroadcastRemap> test_inputs[] = {
{BroadcastPair{Vec{}, Vec{}},
BroadcastRemap{0, Vec{-1}}},
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{3, Vec{0, 1, -1, 2}}},
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{3, Vec{0, 2}}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = test_input.first;
const BroadcastRemap& test_remap = test_input.second;
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_FALSE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsOutOfBoundsFails) {
const std::pair<BroadcastPair, BroadcastRemap> test_inputs[] = {
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{3, Vec{0, 1, 3}}},
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{2, Vec{0, 1, 2}}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = test_input.first;
const BroadcastRemap& test_remap = test_input.second;
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_FALSE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
}
}
using IntVector = absl::InlinedVector<int64_t, 4UL>;
TensorShape AsTensorShape(absl::Span<const int64_t> dim_sizes) {
TensorShape out;
TF_CHECK_OK(TensorShape::BuildTensorShape(dim_sizes, &out));
return out;
}
TEST(ValidateStridedSliceOpTest, BasicStride) {
Tensor begin_tensor = test::AsTensor<int32_t>({1, 1});
Tensor end_tensor = test::AsTensor<int32_t>({7, 7});
Tensor strides_tensor = test::AsTensor<int32_t>({2, 2});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x1;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({5, 4}));
EXPECT_EQ(final_shape, AsTensorShape({5, 4}));
EXPECT_FALSE(is_identity);
EXPECT_FALSE(is_simple_slice);
EXPECT_FALSE(slice_dim0);
EXPECT_EQ(begin, (IntVector{1, 0}));
EXPECT_EQ(end, (IntVector{10, 7}));
EXPECT_EQ(strides, (IntVector{2, 2}));
}
TEST(ValidateStridedSliceOpTest, NegativeBeginEnd) {
Tensor begin_tensor = test::AsTensor<int32_t>({-9, -20});
Tensor end_tensor = test::AsTensor<int32_t>({-3, -3});
Tensor strides_tensor = test::AsTensor<int32_t>({2, 2});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({3, 4}));
EXPECT_EQ(final_shape, AsTensorShape({3, 4}));
EXPECT_EQ(begin, (IntVector{1, 0}));
EXPECT_EQ(end, (IntVector{7, 7}));
}
TEST(ValidateStridedSliceOpTest, EmptyOutputDim) {
Tensor begin_tensor = test::AsTensor<int32_t>({1, 1});
Tensor end_tensor = test::AsTensor<int32_t>({7, 1});
Tensor strides_tensor = test::AsTensor<int32_t>({2, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({3, 0}));
EXPECT_EQ(final_shape, AsTensorShape({3, 0}));
}
TEST(ValidateStridedSliceOpTest, ZeroStrideFails) {
Tensor begin_tensor = test::AsTensor<int32_t>({1, 1});
Tensor end_tensor = test::AsTensor<int32_t>({7, 7});
Tensor strides_tensor = test::AsTensor<int32_t>({0, 2});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x1;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("strides.* must be non-zero")));
}
TEST(ValidateStridedSliceOpTest, ShrinkAxis) {
Tensor begin_tensor = test::AsTensor<int16_t>({0, 1, 0});
Tensor end_tensor = test::AsTensor<int16_t>({3, 1, 5});
Tensor strides_tensor = test::AsTensor<int16_t>({1, 1, 1});
TensorShape input_shape = AsTensorShape({3, 4, 5});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x2;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x2;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(final_shape, AsTensorShape({3, 5}));
}
TEST(ValidateStridedSliceOpTest, ShrinkSliceOutOfBoundsFails) {
Tensor begin_tensor = test::AsTensor<int16_t>({0, 7, 0});
Tensor end_tensor = test::AsTensor<int16_t>({3, 7, 5});
Tensor strides_tensor = test::AsTensor<int16_t>({1, 1, 1});
TensorShape input_shape = AsTensorShape({3, 4, 5});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x2;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x2;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("slice index .* out of bounds")));
}
TEST(ValidateStridedSliceOpTest, ShrinkAxisNegativeStrideFails) {
Tensor begin_tensor = test::AsTensor<int16_t>({0, 1, 0});
Tensor end_tensor = test::AsTensor<int16_t>({3, 2, 5});
Tensor strides_tensor = test::AsTensor<int16_t>({1, -1, 1});
TensorShape input_shape = AsTensorShape({3, 4, 5});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x2;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x2;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("only stride 1 allowed")));
}
TEST(ValidateStridedSliceOpTest, NewAxis) {
Tensor begin_tensor = test::AsTensor<int64_t>({0, 0});
Tensor end_tensor = test::AsTensor<int64_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int64_t>({1, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({10, 10}));
EXPECT_EQ(final_shape, AsTensorShape({10, 1, 10}));
}
TEST(ValidateStridedSliceOpTest, Ellipsis) {
Tensor begin_tensor = test::AsTensor<int32_t>({0, 0});
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x1;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({10, 10}));
EXPECT_EQ(final_shape, AsTensorShape({10, 10, 1}));
}
TEST(ValidateStridedSliceOpTest, MultipleEllipsisFails) {
Tensor begin_tensor = test::AsTensor<int32_t>({0, 0});
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x3;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT,
"Multiple ellipses in slice spec not allowed"));
}
TEST(ValidateStridedSliceOpTest, WrongBeginTensorFails) {
Tensor begin_tensor = test::AsTensor<int32_t>({0});
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x1;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Expected .* equal size tensors")));
}
TEST(ValidateStridedSliceOpTest, WrongStridesTensorWithNullBeginFails) {
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x1;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
nullptr, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Expected .* equal size tensors")));
}
TEST(ValidateStridedSliceOpTest, NullBeginEndWithShrinkAxis) {
Tensor strides_tensor = test::AsTensor<int32_t>({2, -2, 1});
TensorShape input_shape = AsTensorShape({10, 10, 1});
int32_t begin_mask_spec = 0x3;
int32_t end_mask_spec = 0x3;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x4;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
nullptr, nullptr, strides_tensor,
input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({5, 5, 1}));
EXPECT_EQ(final_shape, AsTensorShape({5, 5}));
EXPECT_EQ(strides, (IntVector{2, -2, 1}));
}
TEST(ValidateStridedSliceOpTest, UnknownInputRankFails) {
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
PartialTensorShape input_shape;
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x1;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
nullptr, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("unknown rank")));
}
TEST(ValidateStridedSliceOpTest, PartialInputShape) {
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
PartialTensorShape input_shape;
TF_CHECK_OK(
PartialTensorShape::BuildPartialTensorShape({10, -1}, &input_shape));
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
PartialTensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
nullptr, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec));
}
}
} |
1,121 | cpp | tensorflow/tensorflow | matmul_op | tensorflow/compiler/tf2xla/kernels/matmul_op.cc | tensorflow/core/kernels/matmul_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_MATMUL_OP_H_
#define TENSORFLOW_CORE_KERNELS_MATMUL_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/lib/hash/hash.h"
#if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
#include "xla/tsl/framework/contraction/eigen_contraction_kernel.h"
#endif
namespace tensorflow {
namespace functor {
template <typename T>
struct MatMulTypes {
typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Aligned>
out_type;
typedef Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor>,
Eigen::Aligned>
in_type;
};
template <typename Device, typename In0, typename In1, typename Out,
typename DimPair>
void MatMul(const Device& d, Out out, In0 in0, In1 in1,
const DimPair& dim_pair) {
out.device(d) = in0.contract(in1, dim_pair);
}
template <typename Device, typename T>
struct MatMulFunctor {
void operator()(
const Device& d, typename MatMulTypes<T>::out_type out,
typename MatMulTypes<T>::in_type in0,
typename MatMulTypes<T>::in_type in1,
const Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1>& dim_pair);
};
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
typedef Eigen::GpuDevice GPUDevice;
#endif
}
#endif
#include <array>
#include <optional>
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/lib/matrix.h"
#include "xla/client/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace {
constexpr std::array<DataType, 10> kMatmulTypes = {
{DT_HALF, DT_BFLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128,
DT_INT32, DT_INT64, DT_INT16, DT_INT8}};
class MatMulOp : public XlaOpKernel {
public:
explicit MatMulOp(OpKernelConstruction* ctx, bool is_sparse = false)
: XlaOpKernel(ctx),
is_sparse_(is_sparse),
grad_a_(false),
grad_b_(false) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_a", &transpose_a_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_b", &transpose_b_));
if (!is_sparse) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("grad_a", &grad_a_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("grad_b", &grad_b_));
}
if (is_sparse) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("Ta", &a_type_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("Tb", &b_type_));
bool dummy_is_sparse;
OP_REQUIRES_OK(ctx, ctx->GetAttr("a_is_sparse", &dummy_is_sparse));
OP_REQUIRES_OK(ctx, ctx->GetAttr("b_is_sparse", &dummy_is_sparse));
}
}
~MatMulOp() override = default;
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape a_shape = ctx->InputShape(0);
const TensorShape b_shape = ctx->InputShape(1);
OP_REQUIRES(ctx, a_shape.dims() == b_shape.dims(),
errors::InvalidArgument("In[0] and In[1] has different ndims: ",
a_shape.DebugString(), " vs. ",
b_shape.DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsMatrix(a_shape),
errors::InvalidArgument("In[0] is not a matrix. Instead it has shape ",
a_shape.DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsMatrix(b_shape),
errors::InvalidArgument("In[1] is not a matrix. Instead it has shape ",
b_shape.DebugString()));
int first_index = transpose_a_ ? 0 : 1;
int second_index = transpose_b_ ? 1 : 0;
OP_REQUIRES(ctx,
a_shape.dim_size(first_index) == b_shape.dim_size(second_index),
errors::InvalidArgument(
"Matrix size-incompatible: In[0]: ", a_shape.DebugString(),
", In[1]: ", b_shape.DebugString()));
xla::XlaOp a = ctx->Input(0);
xla::XlaOp b = ctx->Input(1);
if (is_sparse_) {
if (a_type_ == DT_BFLOAT16) {
a = xla::ConvertElementType(a, xla::F32);
}
if (b_type_ == DT_BFLOAT16) {
b = xla::ConvertElementType(b, xla::F32);
}
}
xla::PrecisionConfig::Precision precision =
tsl::tensor_float_32_execution_enabled()
? xla::PrecisionConfig::DEFAULT
: xla::PrecisionConfig::HIGHEST;
ctx->SetOutput(0, xla::BatchDot(a, transpose_a_, b, transpose_b_, precision,
std::nullopt, grad_a_, grad_b_));
}
private:
bool is_sparse_;
bool transpose_a_;
bool transpose_b_;
bool grad_a_;
bool grad_b_;
DataType a_type_;
DataType b_type_;
};
REGISTER_XLA_OP(Name("MatMul").TypeConstraint("T", kMatmulTypes), MatMulOp);
class SparseMatMulOp : public MatMulOp {
public:
explicit SparseMatMulOp(OpKernelConstruction* ctx) : MatMulOp(ctx, true) {}
~SparseMatMulOp() override = default;
};
REGISTER_XLA_OP(Name("SparseMatMul"), SparseMatMulOp);
}
} | #include <functional>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tsl/platform/status.h"
#if TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
namespace tensorflow {
namespace {
template <typename T>
class FusedMatMulOpTest : public OpsTestBase {
protected:
static constexpr auto kTValueType = DataTypeToEnum<T>::value;
using BiasAddGraphRunner =
std::function<bool(const Tensor& lhs_data, const Tensor& rhs_data,
const Tensor& bias_data, Tensor* out)>;
void RunAndFetch(const tensorflow::Scope& root, const string& fetch,
Tensor* output, bool allow_gpu_device,
const NodeDef* fetch_node = nullptr,
absl::Status* last_status = nullptr) {
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
if (fetch_node) {
*graph.add_node() = *fetch_node;
}
tensorflow::SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
tensorflow::RewriterConfig* cfg =
session_options.config.mutable_graph_options()
->mutable_rewrite_options();
cfg->set_constant_folding(tensorflow::RewriterConfig::OFF);
cfg->set_layout_optimizer(tensorflow::RewriterConfig::OFF);
cfg->set_remapping(tensorflow::RewriterConfig::OFF);
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(session_options));
std::vector<DeviceAttributes> available_devices;
TF_ASSERT_OK(session->ListDevices(&available_devices))
<< "Failed to get available session devices";
const bool has_gpu_device =
absl::c_any_of(available_devices, [](const DeviceAttributes& device) {
return device.device_type() == DEVICE_GPU;
});
const bool place_all_on_gpu = allow_gpu_device && has_gpu_device;
const string device = place_all_on_gpu ? "/device:GPU:0" : "/device:CPU:0";
for (NodeDef& mutable_node : *graph.mutable_node()) {
mutable_node.set_device(device);
}
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> unfused_tensors;
auto res = session->Run({}, {fetch}, {}, &unfused_tensors);
if (last_status != nullptr) {
*last_status = res;
} else {
TF_ASSERT_OK(res);
}
if (!unfused_tensors.empty()) {
*output = unfused_tensors[0];
}
}
void RunMatMulWithBias(const Tensor& lhs_data, const Tensor& rhs_data,
const Tensor& bias_data, bool transpose_a,
bool transpose_b, Tensor* output,
bool allow_gpu_device = false) {
Scope root = tensorflow::Scope::NewRootScope();
ops::MatMul matmul = ops::MatMul(
root.WithOpName("matmul"),
ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data)),
ops::Const(root.WithOpName("rhs"), Input::Initializer(rhs_data)),
ops::MatMul::Attrs().TransposeA(transpose_a).TransposeB(transpose_b));
ops::BiasAdd with_bias = ops::BiasAdd(
root.WithOpName("with_bias"), matmul,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
RunAndFetch(root, "with_bias", output, allow_gpu_device);
}
void RunMatMulWithBiasAndActivation(
const Tensor& lhs_data, const Tensor& rhs_data, const Tensor& bias_data,
bool transpose_a, bool transpose_b, const string& activation_type,
Tensor* output, bool allow_gpu_device = false) {
Scope root = tensorflow::Scope::NewRootScope();
ops::MatMul matmul = ops::MatMul(
root.WithOpName("matmul"),
ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data)),
ops::Const(root.WithOpName("rhs"), Input::Initializer(rhs_data)),
ops::MatMul::Attrs().TransposeA(transpose_a).TransposeB(transpose_b));
ops::BiasAdd with_bias = ops::BiasAdd(
root.WithOpName("with_bias"), matmul,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
if (activation_type == "Relu") {
ops::Relu(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Relu6") {
ops::Relu6(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Elu") {
ops::Elu(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "LeakyRelu") {
ops::internal::LeakyRelu(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "GeluExact") {
VLOG(0) << "ERROR: GeluExact is yet not available!!";
ops::Identity(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Sigmoid") {
ops::Sigmoid(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Tanh") {
ops::Tanh(root.WithOpName("with_activation"), with_bias);
} else {
ops::Identity(root.WithOpName("with_activation"), with_bias);
}
RunAndFetch(root, "with_activation", output, allow_gpu_device);
}
void RunFusedMatMulOp(const Tensor& lhs_data, const Tensor& rhs_data,
const std::vector<Tensor>& args_data,
const std::vector<string>& fused_ops, bool transpose_a,
bool transpose_b, Tensor* output,
bool allow_gpu_device = false,
bool* test_skipped = nullptr) {
Scope root = tensorflow::Scope::NewRootScope();
DataType dtype = DataTypeToEnum<T>::v();
int num_args = static_cast<int>(args_data.size());
Output lhs =
ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data));
Output rhs =
ops::Const(root.WithOpName("rhs"), Input::Initializer(rhs_data));
std::vector<NodeDefBuilder::NodeOut> args;
for (int i = 0; i < num_args; ++i) {
Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)),
Input::Initializer(args_data[i]));
args.emplace_back(arg.name(), 0, dtype);
}
NodeDef fused_matmul;
TF_EXPECT_OK(NodeDefBuilder("fused_matmul", "_FusedMatMul")
.Input({lhs.name(), 0, dtype})
.Input({rhs.name(), 0, dtype})
.Input(args)
.Attr("num_args", num_args)
.Attr("T", dtype)
.Attr("fused_ops", fused_ops)
.Attr("transpose_a", transpose_a)
.Attr("transpose_b", transpose_b)
.Finalize(&fused_matmul));
absl::Status last_status;
RunAndFetch(root, fused_matmul.name(), output, allow_gpu_device,
&fused_matmul, &last_status);
std::string what = "No algorithm worked!";
bool skip = absl::StrContains(last_status.message(), what);
if (test_skipped != nullptr) {
*test_skipped = skip;
}
if (skip) {
GTEST_SKIP() << what;
} else {
TF_ASSERT_OK(last_status);
}
}
void VerifyBiasAddTensorsNear(int m, int k, int n, bool transpose_a,
bool transpose_b,
const BiasAddGraphRunner& run_default,
const BiasAddGraphRunner& run_fused) {
DataType dtype = DataTypeToEnum<T>::v();
Tensor lhs(dtype, {transpose_a ? k : m, transpose_a ? m : k});
lhs.flat<T>() = lhs.flat<T>().setRandom();
Tensor rhs(dtype, {transpose_b ? n : k, transpose_b ? k : n});
rhs.flat<T>() = rhs.flat<T>().setRandom();
rhs.flat<T>() -= rhs.flat<T>().constant(static_cast<T>(0.5f));
const int bias_size = n;
Tensor bias(dtype, {bias_size});
bias.flat<T>() = bias.flat<T>().setRandom();
bias.flat<T>() += bias.flat<T>().constant(static_cast<T>(0.5f));
Tensor matmul;
Tensor fused_matmul;
run_default(lhs, rhs, bias, &matmul);
bool skipped = run_fused(lhs, rhs, bias, &fused_matmul);
if (!skipped) {
ASSERT_EQ(matmul.dtype(), fused_matmul.dtype());
ASSERT_EQ(matmul.shape(), fused_matmul.shape());
double atol = this->kTValueType == DT_HALF ? 1e-3 : 1e-5;
double rtol = this->kTValueType == DT_HALF ? 1e-3 : -1.0;
test::ExpectClose(matmul, fused_matmul, atol, rtol);
}
}
void VerifyMatMulWithBias(int m, int k, int n, bool transpose_a,
bool transpose_b) {
VLOG(2) << "=== VerifyMatMulWithBias (" << m << ", " << k << ", " << n
<< ", " << (int)transpose_a << ", " << (int)transpose_b << ") ===";
const BiasAddGraphRunner run_default =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
RunMatMulWithBias(input_data, filter_data, bias_data, transpose_a,
transpose_b, out, true);
return false;
};
const BiasAddGraphRunner run_fused =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
bool skipped = false;
RunFusedMatMulOp(input_data, filter_data, {bias_data}, {"BiasAdd"},
transpose_a, transpose_b, out,
true, &skipped);
return skipped;
};
VerifyBiasAddTensorsNear(m, k, n, transpose_a, transpose_b, run_default,
run_fused);
}
void VerifyConv2DWithBiasAndActivation(int m, int k, int n, bool transpose_a,
bool transpose_b,
const string& activation) {
bool use_gpu_device =
activation == "Relu" || (this->kTValueType == DT_HALF);
const BiasAddGraphRunner run_default =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
RunMatMulWithBiasAndActivation(input_data, filter_data, bias_data,
transpose_a, transpose_b, activation,
out, use_gpu_device);
return false;
};
const BiasAddGraphRunner run_fused =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
bool skipped = false;
RunFusedMatMulOp(input_data, filter_data, {bias_data},
{"BiasAdd", activation}, transpose_a, transpose_b,
out, use_gpu_device, &skipped);
return skipped;
};
VerifyBiasAddTensorsNear(m, k, n, transpose_a, transpose_b, run_default,
run_fused);
}
};
template <typename T>
class FusedMatMulWithBiasOpTest : public FusedMatMulOpTest<T> {};
TYPED_TEST_SUITE_P(FusedMatMulWithBiasOpTest);
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x128x64) {
this->VerifyMatMulWithBias(256, 128, 64, false, false);
this->VerifyMatMulWithBias(256, 128, 64, true, false);
this->VerifyMatMulWithBias(256, 128, 64, false, true);
this->VerifyMatMulWithBias(256, 128, 64, true, true);
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x256) {
this->VerifyMatMulWithBias(1, 256, 256, false, false);
this->VerifyMatMulWithBias(1, 256, 256, true, false);
this->VerifyMatMulWithBias(1, 256, 256, false, true);
this->VerifyMatMulWithBias(1, 256, 256, true, true);
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x256x1) {
this->VerifyMatMulWithBias(256, 256, 1, false, false);
this->VerifyMatMulWithBias(256, 256, 1, true, false);
this->VerifyMatMulWithBias(256, 256, 1, false, true);
this->VerifyMatMulWithBias(256, 256, 1, true, true);
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x1) {
this->VerifyMatMulWithBias(1, 256, 1, false, false);
}
static auto GetActivations(DataType dtype) {
switch (dtype) {
case DT_HALF:
return std::vector{ "Tanh", "Sigmoid"};
default:
return std::vector{"Relu", "Relu6", "Elu", "LeakyRelu"};
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x128x64WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, false, false,
activation);
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, true, false,
activation);
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, false, true,
activation);
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, true, true,
activation);
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x256WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(1, 256, 256, false, false,
activation);
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x256x1WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(256, 256, 1, false, false,
activation);
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x1WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(1, 256, 1, false, false,
activation);
}
}
REGISTER_TYPED_TEST_SUITE_P(FusedMatMulWithBiasOpTest,
MatMul256x128x64,
MatMul1x256x256,
MatMul256x256x1,
MatMul1x256x1,
MatMul256x128x64WithActivation,
MatMul1x256x256WithActivation,
MatMul256x256x1WithActivation,
MatMul1x256x1WithActivation);
using FusedBiasAddDataTypes = ::testing::Types<float, Eigen::half>;
INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedMatMulWithBiasOpTest,
FusedBiasAddDataTypes);
template <typename T>
static Graph* Matmul(int m, int k, int n, bool transpose_a, bool transpose_b,
DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, transpose_a ? TensorShape({k, m}) : TensorShape({m, k}));
in0.flat<T>().setRandom();
Tensor in1(type, transpose_b ? TensorShape({n, k}) : TensorShape({k, n}));
in1.flat<T>().setRandom();
test::graph::Matmul(g, test::graph::Constant(g, in0),
test::graph::Constant(g, in1), transpose_a, transpose_b);
return g;
}
#define BM_MatmulDev(M, K, N, TA, TB, T, TFTYPE, DEVICE) \
static void BM_Matmul##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, Matmul<T>(M, K, N, TA, TB, TFTYPE)).Run(state); \
state.SetItemsProcessed(state.iterations() * M * K * N * 2); \
} \
BENCHMARK(BM_Matmul##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE) \
->MeasureProcessCPUTime();
#ifdef GOOGLE_CUDA
#define BM_Matmul(M, K, N, TA, TB) \
BM_MatmulDev(M, K, N, TA, TB, float, DT_FLOAT, cpu); \
BM_MatmulDev(M, K, N, TA, TB, std::complex<float>, DT_COMPLEX64, cpu); \
BM_MatmulDev(M, K, N, TA, TB, float, DT_FLOAT, gpu); \
BM_MatmulDev(M, K, N, TA, TB, std::complex<float>, DT_COMPLEX64, gpu); \
\
#else
#define BM_Matmul(M, K, N, TA, TB) \
BM_MatmulDev(M, K, N, TA, TB, float, DT_FLOAT, cpu); \
BM_MatmulDev(M, K, N, TA, TB, std::complex<float>, DT_COMPLEX64, cpu);
#endif
BM_Matmul(1, 512, 512, false, false);
BM_Matmul(8, 512, 512, false, false);
BM_Matmul(16, 512, 512, false, false);
BM_Matmul(128, 512, 512, false, false);
BM_Matmul(1, 1024, 1024, false, false);
BM_Matmul(8, 1024, 1024, false, false);
BM_Matmul(16, 1024, 1024, false, false);
BM_Matmul(128, 1024, 1024, false, false);
BM_Matmul(4096, 4096, 4096, false, false);
BM_Matmul(1, 1024, 1024, false, true);
BM_Matmul(8, 1024, 1024, false, true);
BM_Matmul(16, 1024, 1024, false, true);
BM_Matmul(128, 1024, 1024, false, true);
BM_Matmul(1, 200, 10000, false, false);
BM_Matmul(8, 200, 10000, false, false);
BM_Matmul(20, 200, 10000, false, false);
BM_Matmul(20, 200, 20000, false, false);
BM_Matmul(1, 10000, 200, false, true);
BM_Matmul(1, 10000, 200, false, false);
BM_Matmul(8, 10000, 200, false, true);
BM_Matmul(20, 10000, 200, false, true);
BM_Matmul(20, 20000, 200, false, true);
BM_Matmul(50, 50, 1, false, false);
BM_Matmul(50, 50, 1, true, false);
BM_Matmul(50, 50, 1, false, true);
BM_Matmul(50, 50, 1, true, true);
BM_Matmul(500, 500, 1, false, false);
BM_Matmul(500, 500, 1, true, false);
BM_Matmul(500, 500, 1, false, true);
BM_Matmul(500, 500, 1, true, true);
BM_Matmul(2000, 2000, 1, false, false);
BM_Matmul(2000, 2000, 1, true, false);
BM_Matmul(2000, 2000, 1, false, true);
BM_Matmul(2000, 2000, 1, true, true);
BM_Matmul(1, 50, 50, false, false);
BM_Matmul(1, 50, 50, true, false);
BM_Matmul(1, 50, 50, false, true);
BM_Matmul(1, 50, 50, true, true);
BM_Matmul(1, 500, 500, false, false);
BM_Matmul(1, 500, 500, true, false);
BM_Matmul(1, 500, 500, false, true);
BM_Matmul(1, 500, 500, true, true);
BM_Matmul(1, 2000, 2000, false, false);
BM_Matmul(1, 2000, 2000, true, false);
BM_Matmul(1, 2000, 2000, false, true);
BM_Matmul(1, 2000, 2000, true, true);
BM_Matmul(50, 1, 50, false, false);
BM_Matmul(50, 1, 50, true, false);
BM_Matmul(50, 1, 50, false, true);
BM_Matmul(50, 1, 50, true, true);
BM_Matmul(500, 1, 500, false, false);
BM_Matmul(500, 1, 500, true, false);
BM_Matmul(500, 1, 500, false, true);
BM_Matmul(500, 1, 500, true, true);
BM_Matmul(2000, 1, 2000, false, false);
BM_Matmul(2000, 1, 2000, true, false);
BM_Matmul(2000, 1, 2000, false, true);
BM_Matmul(2000, 1, 2000, true, true);
Node* BroadcastTo(Graph* g, Node* input, Node* shape) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BroadcastTo")
.Input(input)
.Input(shape)
.Finalize(g, &ret));
return ret;
}
Node* BatchMatmulV2(Graph* g, Node* in0, Node* in1, bool adj_x, bool adj_y) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BatchMatMulV2")
.Input(in0)
.Input(in1)
.Attr("adj_x", adj_x)
.Attr("adj_y", adj_y)
.Finalize(g, &ret));
return ret;
}
template <typename T>
static Graph* BatchMatmul(int b, int m, int k, int n, bool adjoint_a,
bool adjoint_b, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, adjoint_a ? TensorShape({b, k, m}) : TensorShape({b, m, k}));
in0.flat<T>().setRandom();
Tensor in1(type, adjoint_b ? TensorShape({b, n, k}) : TensorShape({b, k, n}));
in1.flat<T>().setRandom();
test::graph::BatchMatmul(g, test::graph::Constant(g, in0),
test::graph::Constant(g, in1), adjoint_a, adjoint_b);
return g;
}
template <typename T>
static Graph* BatchMatmulWithBroadcast(int b0, int b1, int m, int k, int n,
bool manual_broadcast, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, TensorShape({b0, m, k}));
in0.flat<T>().setRandom();
Tensor in1(type, TensorShape({b1, k, n}));
in1.flat<T>().setRandom();
Tensor broadcasted_in0_shape(DT_INT64, TensorShape({3}));
Tensor broadcasted_in1_shape(DT_INT64, TensorShape({3}));
Node* in0_node = nullptr;
Node* in1_node = nullptr;
if (manual_broadcast) {
for (int i = 0; i < 3; ++i) {
auto vec0 = broadcasted_in0_shape.vec<int64_t>();
auto vec1 = broadcasted_in1_shape.vec<int64_t>();
vec0(i) = (i == 0 ? std::max(b0, b1) : in0.shape().dim_size(i));
vec1(i) = (i == 0 ? std::max(b0, b1) : in1.shape().dim_size(i));
}
in0_node = BroadcastTo(g, test::graph::Constant(g, in0),
test::graph::Constant(g, broadcasted_in0_shape));
in1_node = BroadcastTo(g, test::graph::Constant(g, in1),
test::graph::Constant(g, broadcasted_in1_shape));
} else {
in0_node = test::graph::Constant(g, in0);
in1_node = test::graph::Constant(g, in1);
}
BatchMatmulV2(g, in0_node, in1_node, false, false);
return g;
}
#define BM_BatchMatmulDev(B, M, K, N, TA, TB, T, TFTYPE, DEVICE) \
static void \
BM_BatchMatmul##_##B##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, BatchMatmul<T>(B, M, K, N, TA, TB, TFTYPE), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * M * K * N * 2); \
} \
BENCHMARK( \
BM_BatchMatmul##_##B##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE) \
->MeasureProcessCPUTime();
#define BM_BatchMatmul(B, M, K, N, TA, TB) \
BM_BatchMatmulDev(B, M, K, N, TA, TB, float, DT_FLOAT, cpu);
#define BM_BatchMatmulBCastDev(B1, B2, M, K, N, MB, T, TT, D) \
static void \
BM_BatchMatmulBCast##_##B1##_##B2##_##M##_##K##_##N##_##MB##_##TT##_##D( \
::testing::benchmark::State& state) { \
test::Benchmark(#D, BatchMatmulWithBroadcast<T>(B1, B2, M, K, N, MB, TT), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * std::max(B1, B2) * M * K * \
N * 2); \
} \
BENCHMARK( \
BM_BatchMatmulBCast##_##B1##_##B2##_##M##_##K##_##N##_##MB##_##TT##_##D) \
->MeasureProcessCPUTime();
#define BM_BatchMatmulBCast(B1, B2, M, K, N, MB) \
BM_BatchMatmulBCastDev(B1, B2, M, K, N, MB, float, DT_FLOAT, cpu);
BM_BatchMatmulBCast(1, 128, 1, 1024, 1024, true);
BM_BatchMatmulBCast(1, 128, 1, 1024, 1024, false);
BM_BatchMatmulBCast(128, 1, 1, 1024, 1024, true);
BM_BatchMatmulBCast(128, 1, 1, 1024, 1024, false);
BM_BatchMatmulBCast(1, 128, 128, 1024, 1024, true);
BM_BatchMatmulBCast(1, 128, 128, 1024, 1024, false);
BM_BatchMatmulBCast(128, 1, 128, 1024, 1024, true);
BM_BatchMatmulBCast(128, 1, 128, 1024, 1024, false);
BM_BatchMatmulBCast(1, 128, 512, 512, 512, true);
BM_BatchMatmulBCast(1, 128, 512, 512, 512, false);
BM_BatchMatmulBCast(128, 1, 512, 512, 512, true);
BM_BatchMatmulBCast(128, 1, 512, 512, 512, false);
BM_BatchMatmulBCast(1, 128, 1024, 1024, 1024, true);
BM_BatchMatmulBCast(1, 128, 1024, 1024, 1024, false);
BM_BatchMatmulBCast(128, 1, 1024, 1024, 1024, true);
BM_BatchMatmulBCast(128, 1, 1024, 1024, 1024, false);
BM_BatchMatmulBCast(1, 128, 10000, 200, 1, true);
BM_BatchMatmulBCast(1, 128, 10000, 200, 1, false);
BM_BatchMatmulBCast(128, 1, 10000, 200, 1, true);
BM_BatchMatmulBCast(128, 1, 10000, 200, 1, false);
BM_BatchMatmulBCast(1, 128, 1, 200, 10000, true);
BM_BatchMatmulBCast(1, 128, 1, 200, 10000, false);
BM_BatchMatmulBCast(128, 1, 1, 200, 10000, true);
BM_BatchMatmulBCast(128, 1, 1, 200, 10000, false);
BM_BatchMatmul(1, 1, 1024, 1024, false, false);
BM_BatchMatmul(1, 8, 1024, 1024, false, false);
BM_BatchMatmul(1, 16, 1024, 1024, false, false);
BM_BatchMatmul(1, 128, 1024, 1024, false, false);
BM_BatchMatmul(2, 1, 1024, 1024, false, false);
BM_BatchMatmul(2, 8, 1024, 1024, false, false);
BM_BatchMatmul(2, 16, 1024, 1024, false, false);
BM_BatchMatmul(2, 128, 1024, 1024, false, false);
BM_BatchMatmul(8, 1, 1024, 1024, false, false);
BM_BatchMatmul(8, 8, 1024, 1024, false, false);
BM_BatchMatmul(8, 16, 1024, 1024, false, false);
BM_BatchMatmul(8, 128, 1024, 1024, false, false);
BM_BatchMatmul(32, 1, 1024, 1024, false, false);
BM_BatchMatmul(32, 8, 1024, 1024, false, false);
BM_BatchMatmul(32, 16, 1024, 1024, false, false);
BM_BatchMatmul(32, 128, 1024, 1024, false, false);
BM_BatchMatmul(1, 32, 32, 32, false, false);
BM_BatchMatmul(1, 128, 128, 128, false, false);
BM_BatchMatmul(1, 256, 256, 256, false, false);
BM_BatchMatmul(1, 1024, 1024, 1024, false, false);
BM_BatchMatmul(1, 2048, 2048, 2048, false, false);
BM_BatchMatmul(2, 32, 32, 32, false, false);
BM_BatchMatmul(2, 128, 128, 128, false, false);
BM_BatchMatmul(2, 256, 256, 256, false, false);
BM_BatchMatmul(2, 1024, 1024, 1024, false, false);
BM_BatchMatmul(2, 2048, 2048, 2048, false, false);
BM_BatchMatmul(4, 32, 32, 32, false, false);
BM_BatchMatmul(4, 128, 128, 128, false, false);
BM_BatchMatmul(4, 256, 256, 256, false, false);
BM_BatchMatmul(4, 1024, 1024, 1024, false, false);
BM_BatchMatmul(4, 2048, 2048, 2048, false, false);
BM_BatchMatmul(8, 32, 32, 32, false, false);
BM_BatchMatmul(8, 128, 128, 128, false, false);
BM_BatchMatmul(8, 256, 256, 256, false, false);
BM_BatchMatmul(8, 1024, 1024, 1024, false, false);
BM_BatchMatmul(8, 2048, 2048, 2048, false, false);
BM_BatchMatmul(32, 32, 32, 32, false, false);
BM_BatchMatmul(32, 128, 128, 128, false, false);
BM_BatchMatmul(32, 256, 256, 256, false, false);
BM_BatchMatmul(32, 1024, 1024, 1024, false, false);
BM_BatchMatmul(32, 2048, 2048, 2048, false, false);
BM_BatchMatmul(1, 10000, 200, 1, false, false);
BM_BatchMatmul(8, 10000, 200, 1, false, false);
BM_BatchMatmul(32, 10000, 200, 1, false, false);
BM_BatchMatmul(1, 10000, 200, 1, true, false);
BM_BatchMatmul(8, 10000, 200, 1, true, false);
BM_BatchMatmul(32, 10000, 200, 1, true, false);
BM_BatchMatmul(1, 10000, 200, 1, false, true);
BM_BatchMatmul(8, 10000, 200, 1, false, true);
BM_BatchMatmul(32, 10000, 200, 1, false, true);
BM_BatchMatmul(1, 10000, 200, 1, true, true);
BM_BatchMatmul(8, 10000, 200, 1, true, true);
BM_BatchMatmul(32, 10000, 200, 1, true, true);
BM_BatchMatmul(1, 1, 200, 10000, false, false);
BM_BatchMatmul(8, 1, 200, 10000, false, false);
BM_BatchMatmul(32, 1, 200, 10000, false, false);
BM_BatchMatmul(1, 1, 200, 10000, true, false);
BM_BatchMatmul(8, 1, 200, 10000, true, false);
BM_BatchMatmul(32, 1, 200, 10000, true, false);
BM_BatchMatmul(1, 1, 200, 10000, false, true);
BM_BatchMatmul(8, 1, 200, 10000, false, true);
BM_BatchMatmul(32, 1, 200, 10000, false, true);
BM_BatchMatmul(1, 1, 200, 10000, true, true);
BM_BatchMatmul(8, 1, 200, 10 |
1,122 | cpp | tensorflow/tensorflow | rng_converter_utils | tensorflow/compiler/tf2xla/kernels/rng_converter_utils.cc | tensorflow/compiler/tf2xla/kernels/rng_converter_utils_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_KERNELS_RNG_CONVERTER_UTILS_H_
#define TENSORFLOW_COMPILER_TF2XLA_KERNELS_RNG_CONVERTER_UTILS_H_
#include "absl/strings/string_view.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/rng_alg.h"
namespace tensorflow {
Algorithm ToTensorflowAlgorithm(xla::RandomAlgorithm alg);
xla::RandomAlgorithm DefaultRngAlgForDeviceType(
absl::string_view device_type_string);
}
#endif
#include "tensorflow/compiler/tf2xla/kernels/rng_converter_utils.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/rng_alg.h"
namespace tensorflow {
Algorithm ToTensorflowAlgorithm(xla::RandomAlgorithm alg) {
switch (alg) {
case xla::RandomAlgorithm::RNG_PHILOX:
return RNG_ALG_PHILOX;
case xla::RandomAlgorithm::RNG_THREE_FRY:
return RNG_ALG_THREEFRY;
case xla::RandomAlgorithm::RNG_DEFAULT:
default:
return RNG_ALG_AUTO_SELECT;
}
}
xla::RandomAlgorithm DefaultRngAlgForDeviceType(
absl::string_view device_type_string) {
if (device_type_string == DEVICE_GPU_XLA_JIT ||
device_type_string == DEVICE_CPU_XLA_JIT) {
return xla::RandomAlgorithm::RNG_PHILOX;
} else {
return xla::RandomAlgorithm::RNG_DEFAULT;
}
}
} | #include "tensorflow/compiler/tf2xla/kernels/rng_converter_utils.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/rng_alg.h"
namespace tensorflow {
namespace {
TEST(RngConverterUtilsTest, DefaultRngForCPUEqualsGPU) {
EXPECT_EQ(DefaultRngAlgForDeviceType(DEVICE_CPU_XLA_JIT),
DefaultRngAlgForDeviceType(DEVICE_GPU_XLA_JIT));
}
TEST(RngConverterUtilsTest, UnknownDeviceIsDefault) {
EXPECT_EQ(DefaultRngAlgForDeviceType("UNKNOWN DEVICE"),
xla::RandomAlgorithm::RNG_DEFAULT);
}
TEST(RngConverterUtilsTest, TensorflowAutoSelects) {
EXPECT_EQ(ToTensorflowAlgorithm(xla::RandomAlgorithm::RNG_DEFAULT),
tensorflow::RNG_ALG_AUTO_SELECT);
}
TEST(RngConverterUtilsTest, ToTensorflow) {
EXPECT_EQ(ToTensorflowAlgorithm(xla::RandomAlgorithm::RNG_PHILOX),
tensorflow::RNG_ALG_PHILOX);
EXPECT_EQ(ToTensorflowAlgorithm(xla::RandomAlgorithm::RNG_THREE_FRY),
tensorflow::RNG_ALG_THREEFRY);
}
}
} |
1,123 | cpp | tensorflow/tensorflow | reduction_ops | tensorflow/compiler/tf2xla/kernels/reduction_ops.cc | tensorflow/core/kernels/reduction_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_REDUCTION_OPS_H_
#define TENSORFLOW_CORE_KERNELS_REDUCTION_OPS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Reducer>
struct ReducerTraits {
enum { IsScalarIdentity = true };
};
template <typename Scalar>
struct MeanReducer {
Scalar initialize() const { return Scalar(0); }
};
template <typename Scalar>
struct EuclideanNormReducer {
Scalar initialize() const { return Scalar(0); }
};
template <typename Scalar>
struct ReducerTraits<EuclideanNormReducer<Scalar>> {
enum { IsScalarIdentity = false };
};
template <typename Device, typename OUT_T, typename IN_T,
typename ReductionAxes, typename Reducer>
struct ReduceEigenImpl {
void operator()(const Device& d, OUT_T out, IN_T in,
const ReductionAxes& reduction_axes, const Reducer& reducer) {
out.device(d) = in.reduce(reduction_axes, reducer);
}
};
#define CASTING_SPECIALIZATION(Reducer, ScalarType, IntermediateType) \
template <typename Device, typename OUT_T, typename IN_T, \
typename ReductionAxes> \
struct ReduceEigenImpl<Device, OUT_T, IN_T, ReductionAxes, \
Reducer<ScalarType>> { \
void operator()(const Device& d, OUT_T out, IN_T in, \
const ReductionAxes& reduction_axes, \
const Reducer<ScalarType>& reducer) { \
static_assert(std::is_same<ScalarType, typename OUT_T::Scalar>::value, \
""); \
Reducer<IntermediateType> intermediate_reducer; \
auto in_as_intermediate = in.template cast<IntermediateType>(); \
out.device(d) = \
in_as_intermediate.reduce(reduction_axes, intermediate_reducer) \
.template cast<ScalarType>(); \
} \
};
CASTING_SPECIALIZATION(Eigen::internal::SumReducer, bfloat16, float);
#undef CASTING_SPECIALIZATION
template <typename Device, typename OUT_T, typename IN_T,
typename ReductionAxes, typename Scalar>
struct ReduceEigenImpl<Device, OUT_T, IN_T, ReductionAxes,
functor::MeanReducer<Scalar>> {
void operator()(const Device& d, OUT_T out, IN_T in,
const ReductionAxes& reduction_axes,
const functor::MeanReducer<Scalar>& reducer) {
static_assert(std::is_same<Scalar, typename OUT_T::Scalar>::value, "");
Eigen::internal::SumReducer<Scalar> sum_reducer;
out.device(d) = in.reduce(reduction_axes, sum_reducer) /
static_cast<Scalar>(in.size() / out.size());
}
};
#define CASTING_SPECIALIZATION(ScalarType, IntermediateType) \
template <typename Device, typename OUT_T, typename IN_T, \
typename ReductionAxes> \
struct ReduceEigenImpl<Device, OUT_T, IN_T, ReductionAxes, \
functor::MeanReducer<ScalarType>> { \
void operator()(const Device& d, OUT_T out, IN_T in, \
const ReductionAxes& reduction_axes, \
const functor::MeanReducer<ScalarType>& reducer) { \
static_assert(std::is_same<ScalarType, typename OUT_T::Scalar>::value, \
""); \
Eigen::internal::SumReducer<IntermediateType> sum_reducer; \
out.device(d) = (in.template cast<IntermediateType>().reduce( \
reduction_axes, sum_reducer) / \
static_cast<IntermediateType>(in.size() / out.size())) \
.template cast<ScalarType>(); \
} \
}
CASTING_SPECIALIZATION(uint8, uint64);
CASTING_SPECIALIZATION(uint16, uint64);
CASTING_SPECIALIZATION(uint32, uint64);
CASTING_SPECIALIZATION(int8, int64_t);
CASTING_SPECIALIZATION(int16, int64_t);
CASTING_SPECIALIZATION(int32, int64_t);
CASTING_SPECIALIZATION(bfloat16, float);
#undef CASTING_SPECIALIZATION
template <typename Device, typename OUT_T, typename IN_T,
typename ReductionAxes, typename Scalar>
struct ReduceEigenImpl<Device, OUT_T, IN_T, ReductionAxes,
functor::EuclideanNormReducer<Scalar>> {
void operator()(const Device& d, OUT_T out, IN_T in,
const ReductionAxes& reduction_axes,
const functor::EuclideanNormReducer<Scalar>& reducer) {
static_assert(std::is_same<Scalar, typename OUT_T::Scalar>::value, "");
Eigen::internal::SumReducer<Scalar> sum_reducer;
out.device(d) =
(in * in.conjugate()).reduce(reduction_axes, sum_reducer).sqrt();
}
};
template <typename Device, typename OUT_T, typename IN_T,
typename ReductionAxes>
struct ReduceEigenImpl<Device, OUT_T, IN_T, ReductionAxes,
functor::EuclideanNormReducer<bfloat16>> {
void operator()(const Device& d, OUT_T out, IN_T in,
const ReductionAxes& reduction_axes,
const functor::EuclideanNormReducer<bfloat16>& reducer) {
static_assert(std::is_same<bfloat16, typename OUT_T::Scalar>::value, "");
Eigen::internal::SumReducer<float> sum_reducer;
auto in_as_float = in.template cast<float>();
out.device(d) = (in_as_float * in_as_float.conjugate())
.reduce(reduction_axes, sum_reducer)
.sqrt()
.template cast<bfloat16>();
}
};
template <typename Reducer>
struct Identity {
static auto identity(const Reducer& reducer)
-> decltype(reducer.initialize()) {
return reducer.initialize();
}
};
#define FIX_MEAN_IDENTITY(T) \
template <> \
struct Identity<functor::MeanReducer<T>> { \
static T identity(const functor::MeanReducer<T>&) { \
return Eigen::NumTraits<T>::quiet_NaN(); \
} \
};
FIX_MEAN_IDENTITY(Eigen::half)
FIX_MEAN_IDENTITY(Eigen::bfloat16)
FIX_MEAN_IDENTITY(float)
FIX_MEAN_IDENTITY(double)
#undef FIX_MEAN_IDENTITY
template <typename Device, typename OUT_T, typename Reducer>
void FillIdentityEigenImpl(const Device& d, OUT_T out, const Reducer& reducer) {
MaybeWith32BitIndexing<Device>(
[&](auto out32) {
out32.device(d) = out32.constant(Identity<Reducer>::identity(reducer));
},
out);
}
template <typename Device, typename Reducer>
struct ReduceFunctor {
template <typename OUT_T, typename IN_T, typename ReductionAxes>
static void Reduce(OpKernelContext* ctx, OUT_T out, IN_T in,
const ReductionAxes& reduction_axes,
const Reducer& reducer);
template <typename OUT_T>
static void FillIdentity(const Device& d, OUT_T out, const Reducer& reducer);
};
}
}
#endif
#include "tensorflow/compiler/tf2xla/kernels/reduction_ops.h"
#include <cstdint>
#include <limits>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace {
class SumOp : public XlaReductionOp {
public:
explicit SumOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::Zero(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Add(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Sum").CompileTimeConstantInput("reduction_indices"),
SumOp);
class ProdOp : public XlaReductionOp {
public:
explicit ProdOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::One(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Mul(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Prod").CompileTimeConstantInput("reduction_indices"),
ProdOp);
class MinOp : public XlaReductionOp {
public:
explicit MinOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MaxValue(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Min(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Min").CompileTimeConstantInput("reduction_indices"),
MinOp);
class MaxOp : public XlaReductionOp {
public:
explicit MaxOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {
OP_REQUIRES_OK(ctx, PrimitiveTypeCheck(xla_reduction_type_));
}
static Status PrimitiveTypeCheck(xla::PrimitiveType xla_reduction_type) {
if (xla_reduction_type == xla::C64 || xla_reduction_type == xla::C128 ||
xla_reduction_type == xla::TUPLE ||
xla_reduction_type == xla::OPAQUE_TYPE) {
return errors::InvalidArgument(
"Unsupported PrimitiveType in MaxOp: '",
xla::PrimitiveType_Name(xla_reduction_type), "'");
} else {
return absl::OkStatus();
}
}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MinValue(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Max(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Max").CompileTimeConstantInput("reduction_indices"),
MaxOp);
class MeanOp : public XlaReductionOp {
public:
explicit MeanOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::Zero(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Add(scalar_lhs, scalar_rhs);
}
xla::XlaOp BuildFinalizer(
xla::XlaBuilder* builder, const xla::XlaOp& input,
const xla::XlaOp& reduce_output,
const std::vector<int64_t>& dimensions_to_reduce) override {
if (dimensions_to_reduce.empty()) {
return reduce_output;
}
xla::XlaOp result = reduce_output;
xla::Shape bounded_shape = builder->GetShape(input).value();
int64_t divisor_value = bounded_shape.dimensions(dimensions_to_reduce[0]);
auto divisor = xla::GetDimensionSize(input, dimensions_to_reduce[0]);
for (int i = 1; i < dimensions_to_reduce.size(); i++) {
int64_t size_value = bounded_shape.dimensions(dimensions_to_reduce[i]);
auto size = xla::GetDimensionSize(input, dimensions_to_reduce[i]);
if (size_value * divisor_value > std::numeric_limits<int32_t>::max()) {
result = result / xla::ConvertElementType(divisor, xla_reduction_type_);
divisor_value = size_value;
divisor = size;
} else {
divisor = xla::Mul(divisor, size);
divisor_value = size_value * divisor_value;
}
}
divisor = xla::ConvertElementType(divisor, xla_reduction_type_);
return XlaHelpers::ConvertElementType(result / divisor, input_type(0));
}
};
REGISTER_XLA_OP(Name("Mean").CompileTimeConstantInput("reduction_indices"),
MeanOp);
class AllOp : public XlaReductionOp {
public:
explicit AllOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::ConstantR0<bool>(builder, true);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::And(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("All").CompileTimeConstantInput("reduction_indices"),
AllOp);
class AnyOp : public XlaReductionOp {
public:
explicit AnyOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::ConstantR0<bool>(builder, false);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Or(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Any").CompileTimeConstantInput("reduction_indices"),
AnyOp);
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <typename T>
static Graph* ToScalar(const string& reduce, int num_x, int num_y) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DataTypeToEnum<T>::value, TensorShape({num_x, num_y}));
data.flat<T>().setRandom();
Tensor axes(DT_INT32, TensorShape({2}));
axes.flat<int32>()(0) = 0;
axes.flat<int32>()(1) = 1;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ColReduce(const string& reduce, int num_x, int num_y) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({num_x, num_y}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = 0;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* RowReduce(const string& reduce, int num_x, int num_y) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({num_x, num_y}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = 1;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ThreeDYReduce(const string& reduce, int num_y, int num_z) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({4, num_y, num_z}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = 1;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ThreeDXZReduce(const string& reduce, int num_y, int num_z) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({4, num_y, num_z}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({2}));
axes.flat<int32>()(0) = 0;
axes.flat<int32>()(1) = 2;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
template <typename T>
static void ReduceToScalar(::testing::benchmark::State& state,
const string& device, const string& reduce,
int num_x, int num_y) {
test::Benchmark(device, ToScalar<T>(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(T));
}
static void DoRowReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, RowReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void DoColReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, ColReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void Do3DYReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, ThreeDYReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void Do3DXZReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, ThreeDXZReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void BM_Sum2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DToScalarGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DToScalarGPUComplex(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<std::complex<float>>(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DToScalarGPUComplex)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DToScalarGPUHalf(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<Eigen::half>(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DToScalarGPUHalf)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DRowReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoRowReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DRowReduceGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DColumnReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoColReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DColumnReduceGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum3DYReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
Do3DYReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum3DYReduceGPU)->RangePair(64, 4096, 64, 4096);
static void BM_Sum3DXZReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
Do3DXZReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum3DXZReduceGPU)->RangePair(64, 4096, 64, 4096);
static void BM_Mean2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Mean", num_x, num_y);
}
BENCHMARK(BM_Mean2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_EuclideanNorm2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "EuclideanNorm", num_x, num_y);
}
BENCHMARK(BM_EuclideanNorm2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Max2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Max", num_x, num_y);
}
BENCHMARK(BM_Max2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Min2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Min", num_x, num_y);
}
BENCHMARK(BM_Min2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Min2DToScalarGPUHalf(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<Eigen::half>(state, "gpu", "Min", num_x, num_y);
}
BENCHMARK(BM_Min2DToScalarGPUHalf)->RangePair(2048, 8192, 2048, 8192);
static void BM_Bool2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<bool>(state, "gpu", "All", num_x, num_y);
}
BENCHMARK(BM_Bool2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Mean2DToScalarCPUBF16(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<bfloat16>(state, "cpu", "Mean", num_x, num_y);
}
BENCHMARK(BM_Mean2DToScalarCPUBF16)->RangePair(2048, 8192, 2048, 8192);
} |
1,124 | cpp | tensorflow/tensorflow | stochastic_cast_op | tensorflow/core/kernels/stochastic_cast_op.cc | tensorflow/core/kernels/stochastic_cast_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_STOCHASTIC_CAST_OP_H_
#define TENSORFLOW_CORE_KERNELS_STOCHASTIC_CAST_OP_H_
#include <limits>
#include <type_traits>
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/rng_alg.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
namespace internal {
class StochasticCastOpBase : public OpKernel {
public:
explicit StochasticCastOpBase(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override;
protected:
virtual void RoundOff(OpKernelContext* ctx, Algorithm alg, const Tensor& key,
const Tensor& counter, Tensor* output) = 0;
};
}
}
namespace Eigen {
namespace internal {
template <typename Scalar, typename IntResultType, typename Generator>
struct StochasticRoundToIntOp {
static_assert(std::is_integral<IntResultType>::value,
"Integer type expected");
typedef tensorflow::random::UniformDistribution<Generator, Scalar>
Distribution;
const Scalar max =
static_cast<Scalar>(std::numeric_limits<IntResultType>::max());
const Scalar min =
static_cast<Scalar>(std::numeric_limits<IntResultType>::min());
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC explicit StochasticRoundToIntOp(
Generator* g)
: gen(g) {}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar
operator()(const Scalar& s) const {
if (TF_PREDICT_FALSE(Eigen::numext::isnan(s))) {
return Scalar{0};
}
if (s >= max) {
return max;
}
if (s <= min) {
return min;
}
if (Eigen::numext::floor(s) == s) {
return s;
}
Distribution dist;
Scalar random = dist(gen)[0];
if (s < 0) {
return Eigen::numext::floor(s + random);
} else {
return Eigen::numext::floor(s + Scalar{1} - random);
}
}
template <typename Packet>
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Packet packetOp(const Packet& p) const {
constexpr size_t kPacketSize =
Eigen::internal::unpacket_traits<Packet>::size;
Scalar unpacked_random[kPacketSize];
Distribution dist;
auto const sample = dist(gen);
for (int i = 0; i < kPacketSize; i += Distribution::kResultElementCount) {
int granularity = std::min(Distribution::kResultElementCount,
static_cast<int>(kPacketSize - i));
std::copy(&sample[0], &sample[0] + granularity, &unpacked_random[i]);
}
Packet random = pload<Packet>(unpacked_random);
Packet rounded =
pselect(pcmp_eq(pfloor(p), p), p,
pselect(pcmp_lt(p, pzero(p)), pfloor(padd(p, random)),
pfloor(padd(p, psub(pset1<Packet>(1), random)))));
Packet result =
pselect(pcmp_le(pset1<Packet>(max), p), pset1<Packet>(max), rounded);
result =
pselect(pcmp_le(p, pset1<Packet>(min)), pset1<Packet>(min), result);
return pselect(pcmp_eq(p, p), result, pset1<Packet>(0));
}
Generator* gen;
};
template <typename Scalar, typename IntResultType, typename Generator>
struct functor_traits<
StochasticRoundToIntOp<Scalar, IntResultType, Generator>> {
enum {
Cost = 3 * NumTraits<Scalar>::AddCost,
PacketAccess =
packet_traits<Scalar>::HasCmp && packet_traits<Scalar>::HasRound,
};
};
}
}
#endif
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/rng_alg.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/core/status.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("StochasticCastToInt")
.Input("input: Tin")
.Input("key: uint64")
.Input("counter: uint64")
.Input("alg: int32")
.Output("output: Tout")
.Attr("Tin: {half, bfloat16, float32, float64}")
.Attr("Tout: {int8, int16, int32}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle key;
ShapeHandle shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &key));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &shape));
DimensionHandle dim;
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(key, 0), RNG_KEY_SIZE, &dim));
c->set_output(0, c->input(0));
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(StochasticCastOpTest, StochasticCastToIntShapeInference) {
ShapeInferenceTestOp op("StochasticCastToInt");
INFER_OK(op, "[4,2];[1];[1];[]", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[4,2];[1,2];[1];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[4,2];[1];[1,2];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[4,2];[1];[1];[1]");
}
} |
1,125 | cpp | tensorflow/tensorflow | roll_op | tensorflow/core/kernels/roll_op.cc | tensorflow/core/kernels/roll_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_ROLL_OP_H_
#define TENSORFLOW_CORE_KERNELS_ROLL_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct Roll {
void operator()(const OpKernelContext* context, const int64_t num_elements,
const int num_dims, const absl::Span<const int32> dim_size,
const T* input, T* output,
const absl::Span<const int32> threshold,
const absl::Span<const int64_t> dim_range, const int64_t isd);
};
}
}
#endif
#include "tensorflow/core/kernels/roll_op.h"
#include <algorithm>
#include <cstdint>
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T, typename Tshift, typename Taxis>
class RollOp : public OpKernel {
public:
explicit RollOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& shift = context->input(1);
const Tensor& axis = context->input(2);
auto shift_flat = shift.flat<Tshift>();
auto axis_flat = axis.flat<Taxis>();
OP_REQUIRES(context, TensorShapeUtils::IsVectorOrHigher(input.shape()),
errors::InvalidArgument("input must be 1-D or higher"));
OP_REQUIRES(context, shift.shape().dims() <= 1,
errors::InvalidArgument(
"shift must be a scalar or a 1-D vector. Found: ",
shift.shape().DebugString()));
OP_REQUIRES(context, axis.shape().dims() <= 1,
errors::InvalidArgument(
"axis must be a scalar or a 1-D vector. Found: ",
axis.shape().DebugString()));
OP_REQUIRES(
context, shift.shape() == axis.shape(),
errors::InvalidArgument("shift and axis must have the same size"));
const int64_t num_elements = input.NumElements();
const int num_shifts = static_cast<int>(shift_flat.size());
const int num_dims = input.dims();
absl::InlinedVector<int32, 4> shift_mod_sum(num_dims, 0);
for (int i = 0; i < num_shifts; i++) {
int axis = axis_flat(i);
if (axis < 0) {
axis += num_dims;
}
OP_REQUIRES(context, FastBoundsCheck(axis, num_dims),
errors::InvalidArgument("axis ", axis, " is out of range"));
const int ds = std::max<int>(static_cast<int>(input.dim_size(axis)), 1);
const int sum = shift_mod_sum[axis] + static_cast<int>(shift_flat(i));
shift_mod_sum[axis] = (sum % ds + ds) % ds;
}
absl::InlinedVector<int32, 4> dim_size(num_dims);
absl::InlinedVector<int32, 4> threshold(num_dims);
absl::InlinedVector<int64_t, 4> dim_range(num_dims);
int64_t dim_size_prod = 1;
int64_t isd = 0;
for (int i = num_dims - 1; i >= 0; i--) {
if (isd == 0 && shift_mod_sum[i] != 0) isd = i;
const int ds = std::max<int>(static_cast<int>(input.dim_size(i)), 1);
dim_size[i] = ds;
threshold[i] = (ds - shift_mod_sum[i]) % ds;
dim_size_prod *= static_cast<int64_t>(input.dim_size(i));
dim_range[i] = dim_size_prod;
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
auto input_flat = input.flat<T>().data();
auto output_flat = output->flat<T>().data();
functor::Roll<Device, T>()(context, num_elements, num_dims, dim_size,
input_flat, output_flat, threshold, dim_range,
isd);
}
};
namespace functor {
template <typename T>
void DoRoll(const OpKernelContext* context, const int64_t num_elements,
const int num_dims, const absl::Span<const int32> dim_size,
const T* input, T* output, const absl::Span<const int32> threshold,
const absl::Span<const int64_t> dim_range) {
auto work = [input, output, num_dims, &dim_size, &threshold, &dim_range](
int64_t start, int64_t end) {
absl::InlinedVector<int, 4> indices(num_dims);
int offset = 0;
for (int i = 0; i < num_dims; i++) {
const int64_t stride = dim_range[i] / dim_size[i];
const int shift = dim_size[i] - threshold[i];
const int indx = (start / stride) % dim_size[i];
indices[i] = indx;
const int shifted_indx = (indx + shift) % dim_size[i];
offset += (shifted_indx - indx) * stride;
}
for (int64_t i = start; i < end; i++) {
output[i + offset] = input[i];
for (int j = num_dims - 1; j >= 0; j--) {
const int indx = (indices[j] + 1) % dim_size[j];
indices[j] = indx;
if (indx != 0) {
if (indx == threshold[j]) {
offset -= dim_range[j];
}
break;
} else if (threshold[j] != 0) {
offset += dim_range[j];
}
}
}
};
auto worker_threads = context->device()->tensorflow_cpu_worker_threads();
const int cost_per_element = 15 * sizeof(T);
Shard(worker_threads->num_threads, worker_threads->workers, num_elements,
cost_per_element, std::move(work));
}
template <typename T>
void DoRollWithMemcpy(const OpKernelContext* context,
const int64_t num_elements, const int num_dims,
const absl::Span<const int32> dim_size, const T* input,
T* output, const absl::Span<const int32> threshold,
const absl::Span<const int64_t> dim_range,
const int64_t isd) {
auto work = [input, output, num_dims, &dim_size, &threshold, &dim_range, isd](
int64_t start, int64_t end) {
const int64_t isd_range = std::max<int64_t>(dim_range[isd], 1);
const int64_t isd_stride = isd_range / std::max<int64_t>(dim_size[isd], 1);
const int64_t start_remainder = (start % 2) * threshold[isd] * isd_stride;
const int64_t end_remainder = (end % 2) * threshold[isd] * isd_stride;
start = (start / 2) * isd_range + start_remainder;
end = (end / 2) * isd_range + end_remainder;
const T* in_ptr = &input[0];
T* out_ptr = &output[0];
in_ptr += start;
out_ptr += start;
absl::InlinedVector<int, 4> indices(num_dims);
int64_t remainder_offset = 0;
for (int i = 0; i < num_dims; i++) {
const int64_t stride = dim_range[i] / dim_size[i];
const int shift = dim_size[i] - threshold[i];
const int indx = (start / stride) % dim_size[i];
indices[i] = indx;
int out_indx = (indx + shift) % dim_size[i];
if (i > isd) {
out_indx = 0;
remainder_offset += (out_indx - indx) * stride;
}
out_ptr += (out_indx - indx) * stride;
}
for (int i = num_dims - 1; i > isd; i--) indices[i] = 0;
int isd_indx_skip = 0;
int64_t group_size = 0;
if (indices[isd] < threshold[isd]) {
isd_indx_skip = threshold[isd] - indices[isd];
group_size = isd_indx_skip * isd_stride + remainder_offset;
} else {
isd_indx_skip = dim_size[isd] - indices[isd];
group_size = isd_indx_skip * isd_stride + remainder_offset;
}
int64_t i = start;
while (i < end) {
memcpy(out_ptr, in_ptr, group_size * sizeof(T));
i += group_size;
out_ptr += group_size;
in_ptr += group_size;
for (int j = isd; j >= 0; j--) {
int inc = 1;
if (j == isd) inc = isd_indx_skip;
const int indx = (indices[j] + inc) % dim_size[j];
indices[j] = indx;
if (indx != 0) {
if (indx == threshold[j]) {
out_ptr -= dim_range[j];
}
break;
} else if (threshold[j] != 0) {
out_ptr += dim_range[j];
}
}
if (indices[isd] < threshold[isd]) {
isd_indx_skip = threshold[isd] - indices[isd];
group_size = isd_indx_skip * isd_stride;
} else {
isd_indx_skip = dim_size[isd] - indices[isd];
group_size = isd_indx_skip * isd_stride;
}
}
};
auto worker_threads = context->device()->tensorflow_cpu_worker_threads();
const int64_t ave_group_size = dim_range[isd] / 2;
const int64_t total_work =
2 * num_elements / std::max<int64_t>(dim_range[isd], 1);
const int64_t cost_per_group = 25000 * sizeof(T) * ave_group_size;
Shard(worker_threads->num_threads, worker_threads->workers, total_work,
cost_per_group, std::move(work));
}
template <typename T>
struct Roll<CPUDevice, T> {
void operator()(const OpKernelContext* context, const int64_t num_elements,
const int num_dims, const absl::Span<const int32> dim_size,
const T* input, T* output,
const absl::Span<const int32> threshold,
const absl::Span<const int64_t> dim_range,
const int64_t isd) {
if (DataTypeCanUseMemcpy(DataTypeToEnum<T>::v())) {
DoRollWithMemcpy<T>(context, num_elements, num_dims, dim_size, input,
output, threshold, dim_range, isd);
} else {
DoRoll<T>(context, num_elements, num_dims, dim_size, input, output,
threshold, dim_range);
}
};
};
}
#define REGISTER_CPU(type) \
REGISTER_KERNEL_BUILDER(Name("Roll") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tshift") \
.TypeConstraint<int32>("Taxis") \
.HostMemory("shift") \
.HostMemory("axis"), \
RollOp<CPUDevice, type, int32, int32>) \
REGISTER_KERNEL_BUILDER(Name("Roll") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tshift") \
.TypeConstraint<int32>("Taxis") \
.HostMemory("shift") \
.HostMemory("axis"), \
RollOp<CPUDevice, type, int64, int32>) \
REGISTER_KERNEL_BUILDER(Name("Roll") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tshift") \
.TypeConstraint<int64_t>("Taxis") \
.HostMemory("shift") \
.HostMemory("axis"), \
RollOp<CPUDevice, type, int32, int64>) \
REGISTER_KERNEL_BUILDER(Name("Roll") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tshift") \
.TypeConstraint<int64_t>("Taxis") \
.HostMemory("shift") \
.HostMemory("axis"), \
RollOp<CPUDevice, type, int64, int64>)
TF_CALL_ALL_TYPES(REGISTER_CPU);
#undef REGISTER_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Roll") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tshift") \
.TypeConstraint<int32>("Taxis") \
.HostMemory("shift") \
.HostMemory("axis"), \
RollOp<GPUDevice, type, int32, int32>) \
REGISTER_KERNEL_BUILDER(Name("Roll") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tshift") \
.TypeConstraint<int32>("Taxis") \
.HostMemory("shift") \
.HostMemory("axis"), \
RollOp<GPUDevice, type, int64, int32>) \
REGISTER_KERNEL_BUILDER(Name("Roll") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tshift") \
.TypeConstraint<int64_t>("Taxis") \
.HostMemory("shift") \
.HostMemory("axis"), \
RollOp<GPUDevice, type, int32, int64>) \
REGISTER_KERNEL_BUILDER(Name("Roll") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tshift") \
.TypeConstraint<int64_t>("Taxis") \
.HostMemory("shift") \
.HostMemory("axis"), \
RollOp<GPUDevice, type, int64, int64>)
TF_CALL_int32(REGISTER_KERNEL);
TF_CALL_int64(REGISTER_KERNEL);
TF_CALL_uint32(REGISTER_KERNEL);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_KERNEL);
TF_CALL_COMPLEX_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#endif
} | #include <functional>
#include <memory>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class RollOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "Roll")
.Input(FakeInput(data_type))
.Input(FakeInput(index_type))
.Input(FakeInput(index_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(RollOpTest, ScalarIndices) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({}), {3});
AddInputFromArray<int32>(TensorShape({}), {0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
test::FillValues<float>(&expected, {2, 3, 4, 0, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, ScalarIndices_NoMemcpy) {
MakeOp(DT_STRING, DT_INT32);
AddInputFromArray<tstring>(TensorShape({5}), {"a", "b", "c", "d", "e"});
AddInputFromArray<int32>(TensorShape({}), {3});
AddInputFromArray<int32>(TensorShape({}), {0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({5}));
test::FillValues<tstring>(&expected, {"c", "d", "e", "a", "b"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, ScalarIndices_Complex) {
MakeOp(DT_COMPLEX64, DT_INT32);
AddInputFromArray<std::complex<float>>(
TensorShape({5}), {std::complex<float>(0, 10), std::complex<float>(1, 11),
std::complex<float>(2, 12), std::complex<float>(3, 13),
std::complex<float>(4, 14)});
AddInputFromArray<int32>(TensorShape({}), {3});
AddInputFromArray<int32>(TensorShape({}), {0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_COMPLEX64, TensorShape({5}));
test::FillValues<std::complex<float>>(
&expected, {std::complex<float>(2, 12), std::complex<float>(3, 13),
std::complex<float>(4, 14), std::complex<float>(0, 10),
std::complex<float>(1, 11)});
test::ExpectTensorEqual<std::complex<float>>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, Simple_TwoD32) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({3, 5}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14});
AddInputFromArray<int32>(TensorShape({2}), {2, -1});
AddInputFromArray<int32>(TensorShape({2}), {0, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3, 5}));
test::FillValues<float>(&expected,
{6, 7, 8, 9, 5, 11, 12, 13, 14, 10, 1, 2, 3, 4, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, Simple_TwoD32_NoMemcpy) {
MakeOp(DT_STRING, DT_INT32);
AddInputFromArray<tstring>(TensorShape({3, 5}),
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
"k", "l", "m", "n", "o"});
AddInputFromArray<int32>(TensorShape({2}), {2, -1});
AddInputFromArray<int32>(TensorShape({2}), {0, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({3, 5}));
test::FillValues<tstring>(&expected, {"g", "h", "i", "j", "f", "l", "m", "n",
"o", "k", "b", "c", "d", "e", "a"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, Simple_ThreeD32) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({2, 2, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
AddInputFromArray<int32>(TensorShape({3}), {1, -1, -1});
AddInputFromArray<int32>(TensorShape({3}), {0, 1, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 3}));
test::FillValues<float>(&expected, {10, 11, 9, 7, 8, 6, 4, 5, 3, 1, 2, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, Simple_ThreeD32_NoMemcpy) {
MakeOp(DT_STRING, DT_INT32);
AddInputFromArray<tstring>(
TensorShape({2, 2, 3}),
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"});
AddInputFromArray<int32>(TensorShape({3}), {1, -1, -1});
AddInputFromArray<int32>(TensorShape({3}), {0, 1, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({2, 2, 3}));
test::FillValues<tstring>(
&expected, {"k", "l", "j", "h", "i", "g", "e", "f", "d", "b", "c", "a"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, Simple_TwoD64) {
MakeOp(DT_FLOAT, DT_INT64);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14});
AddInputFromArray<int64_t>(TensorShape({2}), {-1, 4});
AddInputFromArray<int64_t>(TensorShape({2}), {0, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
test::FillValues<float>(&expected,
{5, 3, 4, 8, 6, 7, 11, 9, 10, 14, 12, 13, 2, 0, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, Simple_TwoD64_NoMemcpy) {
MakeOp(DT_STRING, DT_INT64);
AddInputFromArray<tstring>(TensorShape({5, 3}),
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
"k", "l", "m", "n", "o"});
AddInputFromArray<int64_t>(TensorShape({2}), {-1, 4});
AddInputFromArray<int64_t>(TensorShape({2}), {0, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({5, 3}));
test::FillValues<tstring>(&expected, {"f", "d", "e", "i", "g", "h", "l", "j",
"k", "o", "m", "n", "c", "a", "b"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, Simple_ThreeD64) {
MakeOp(DT_FLOAT, DT_INT64);
AddInputFromArray<float>(TensorShape({4, 1, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
AddInputFromArray<int64_t>(TensorShape({3}), {4, 3, 2});
AddInputFromArray<int64_t>(TensorShape({3}), {0, 1, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({4, 1, 3}));
test::FillValues<float>(&expected, {1, 2, 0, 4, 5, 3, 7, 8, 6, 10, 11, 9});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, Simple_ThreeD64_NoMemcpy) {
MakeOp(DT_STRING, DT_INT64);
AddInputFromArray<tstring>(
TensorShape({4, 1, 3}),
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"});
AddInputFromArray<int64_t>(TensorShape({3}), {4, 3, 2});
AddInputFromArray<int64_t>(TensorShape({3}), {0, 1, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({4, 1, 3}));
test::FillValues<tstring>(
&expected, {"b", "c", "a", "e", "f", "d", "h", "i", "g", "k", "l", "j"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, ZeroShift_ThreeD32) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({2, 2, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0});
AddInputFromArray<int32>(TensorShape({3}), {0, 1, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 3}));
test::FillValues<float>(&expected, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, ZeroShift_ThreeD32_NoMemcpy) {
MakeOp(DT_STRING, DT_INT32);
AddInputFromArray<tstring>(
TensorShape({2, 2, 3}),
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"});
AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0});
AddInputFromArray<int32>(TensorShape({3}), {0, 1, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({2, 2, 3}));
test::FillValues<tstring>(
&expected, {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, ZeroSize_ThreeD32) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 0, 0}), {});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<int32>(TensorShape({}), {0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 0, 0}));
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, ZeroSize_ThreeD32_NoMemcpy) {
MakeOp(DT_STRING, DT_INT32);
AddInputFromArray<tstring>(TensorShape({5, 0, 0}), {});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<int32>(TensorShape({}), {0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({5, 0, 0}));
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, OneSize_ThreeD32) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({1, 1, 1}), {5});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<int32>(TensorShape({}), {0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1}));
test::FillValues<float>(&expected, {5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, OneSize_ThreeD32_NoMemcpy) {
MakeOp(DT_STRING, DT_INT32);
AddInputFromArray<tstring>(TensorShape({1, 1, 1}), {"a"});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<int32>(TensorShape({}), {0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({1, 1, 1}));
test::FillValues<tstring>(&expected, {"a"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, MultiShifts_TwoD32) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({3, 5}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14});
AddInputFromArray<int32>(TensorShape({4}), {-2, 2, -1, 1});
AddInputFromArray<int32>(TensorShape({4}), {1, 0, 0, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3, 5}));
test::FillValues<float>(&expected,
{11, 12, 13, 14, 10, 1, 2, 3, 4, 0, 6, 7, 8, 9, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, MultiShifts_TwoD32_NoMemcpy) {
MakeOp(DT_STRING, DT_INT32);
AddInputFromArray<tstring>(TensorShape({3, 5}),
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
"k", "l", "m", "n", "o"});
AddInputFromArray<int32>(TensorShape({4}), {-2, 2, -1, 1});
AddInputFromArray<int32>(TensorShape({4}), {1, 0, 0, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({3, 5}));
test::FillValues<tstring>(&expected, {"l", "m", "n", "o", "k", "b", "c", "d",
"e", "a", "g", "h", "i", "j", "f"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(RollOpTest, Error_InputMustBeVectorOrHigher) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({}), {7});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<int32>(TensorShape({}), {0});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(s.ToString(), "input must be 1-D or higher"))
<< s;
}
TEST_F(RollOpTest, Error_AxisMustBeScalarOrVector) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({2, 2}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<int32>(TensorShape({1, 2}), {0, 1});
Status s = RunOpKernel();
EXPECT_TRUE(
absl::StrContains(s.ToString(), "axis must be a scalar or a 1-D vector"))
<< s;
}
TEST_F(RollOpTest, Error_ShiftMustBeScalarOrVector) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({2, 2}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({1, 2}), {0, 1});
AddInputFromArray<int32>(TensorShape({}), {1});
Status s = RunOpKernel();
EXPECT_TRUE(
absl::StrContains(s.ToString(), "shift must be a scalar or a 1-D vector"))
<< s;
}
TEST_F(RollOpTest, Error_ShiftAndAxisMustBeSameSize) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({2, 2}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({1}), {1});
AddInputFromArray<int32>(TensorShape({2}), {0, 1});
Status s = RunOpKernel();
EXPECT_TRUE(
absl::StrContains(s.ToString(), "shift and axis must have the same size"))
<< s;
}
TEST_F(RollOpTest, Error_AxisOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<int32>(TensorShape({}), {1});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(s.ToString(), "is out of range")) << s;
}
static Graph* RollGraph(const TensorShape& shape, int isd) {
Graph* g = new Graph(OpRegistry::Global());
Tensor input(DT_FLOAT, shape);
input.flat<float>().setRandom();
const int dims = static_cast<int>(input.dims());
Tensor shift(DT_INT32, TensorShape({dims}));
for (int i = 0; i < dims; i++) {
shift.flat<int32>()(i) = (i <= isd) ? 2 : 0;
}
Tensor axis(DT_INT32, TensorShape({dims}));
for (int i = 0; i < dims; i++) {
axis.flat<int32>()(i) = i;
}
test::graph::Roll(g, test::graph::Constant(g, input),
test::graph::Constant(g, shift),
test::graph::Constant(g, axis));
return g;
}
#define BM_ROLL_OUTER(DEVICE) \
static void BM_##DEVICE##_roll_outer(::testing::benchmark::State& state) { \
const int rows = state.range(0); \
const int columns = state.range(1); \
\
TensorShape shape{rows, columns}; \
test::Benchmark(#DEVICE, RollGraph(shape, 0), false) \
.Run(state); \
const int64_t num_items = \
static_cast<int64_t>(state.iterations()) * shape.num_elements(); \
state.SetItemsProcessed(num_items); \
state.SetBytesProcessed(num_items * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_roll_outer) \
->UseRealTime() \
->ArgPair(256, 256) \
->ArgPair(512, 512) \
->ArgPair(1024, 1024) \
->ArgPair(2048, 2048)
#define BM_ROLL_ALL(DEVICE) \
static void BM_##DEVICE##_roll_all(::testing::benchmark::State& state) { \
const int rows = state.range(0); \
const int columns = state.range(1); \
\
TensorShape shape{rows, columns}; \
test::Benchmark(#DEVICE, RollGraph(shape, 1), false) \
.Run(state); \
const int64_t num_items = \
static_cast<int64_t>(state.iterations()) * shape.num_elements(); \
state.SetItemsProcessed(num_items); \
state.SetBytesProcessed(num_items * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_roll_all) \
->UseRealTime() \
->ArgPair(256, 256) \
->ArgPair(512, 512) \
->ArgPair(1024, 1024) \
->ArgPair(2048, 2048)
BM_ROLL_OUTER(cpu);
BM_ROLL_ALL(cpu);
}
} |
1,126 | cpp | tensorflow/tensorflow | diag_op | tensorflow/core/kernels/diag_op.cc | tensorflow/core/kernels/diag_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DIAG_OP_H_
#define TENSORFLOW_CORE_KERNELS_DIAG_OP_H_
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct DiagFunctor {
Status operator()(OpKernelContext* context, const int64_t size, const T* in,
T* out);
};
template <typename Device, typename T>
struct DiagPartFunctor {
Status operator()(OpKernelContext* context, const int64_t size, const T* in,
T* out);
};
}
}
#endif
#define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/kernels/diag_op.h"
#include <algorithm>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class DiagOp : public OpKernel {
public:
explicit DiagOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& diagonal = context->input(0);
const int num_dims = diagonal.dims();
OP_REQUIRES(
context, 0 != num_dims,
errors::InvalidArgument("Input must be at least rank 1, got 0"));
TensorShape out_shape;
for (int i = 0; i < num_dims; ++i) {
OP_REQUIRES_OK(context, out_shape.AddDimWithStatus(diagonal.dim_size(i)));
}
for (int i = 0; i < num_dims; ++i) {
OP_REQUIRES_OK(context, out_shape.AddDimWithStatus(diagonal.dim_size(i)));
}
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, out_shape, &output_tensor));
functor::DiagFunctor<Device, T> diagFunc;
Status s =
diagFunc(context, diagonal.NumElements(), diagonal.flat<T>().data(),
output_tensor->flat<T>().data());
OP_REQUIRES_OK(context, s);
}
};
template <typename Device, typename T>
class DiagPartOp : public OpKernel {
public:
explicit DiagPartOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& tensor = context->input(0);
const int num_dims = tensor.dims();
const int out_dims = num_dims / 2;
OP_REQUIRES(context, 0 == num_dims % 2,
errors::InvalidArgument("The rank of the tensor should be \
even and positive, got shape ",
tensor.shape().DebugString()));
for (int i = 0; i < out_dims; i++) {
OP_REQUIRES(
context, tensor.dim_size(i) == tensor.dim_size(i + out_dims),
errors::InvalidArgument("Invalid shape ",
tensor.shape().DebugString(), ": dimensions ",
i, " and ", i + out_dims, " do not match."));
}
TensorShape out_shape;
for (int i = 0; i < out_dims; ++i) {
OP_REQUIRES_OK(context, out_shape.AddDimWithStatus(tensor.dim_size(i)));
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output));
functor::DiagPartFunctor<Device, T> diagPartFunc;
Status s = diagPartFunc(context, out_shape.num_elements(),
tensor.flat<T>().data(), output->flat<T>().data());
OP_REQUIRES_OK(context, s);
}
};
namespace functor {
template <typename T>
struct DiagFunctor<CPUDevice, T> {
EIGEN_ALWAYS_INLINE Status operator()(OpKernelContext* context,
const int64_t size, const T* in,
T* out) {
auto subDiag = [in, out, size](int64_t start, int64_t limit) {
std::fill(out + size * start, out + size * limit, T());
for (int64_t index = start; index < limit; ++index) {
out[(1 + size) * index] = in[index];
}
};
auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers, size, 5 * size,
subDiag);
return OkStatus();
}
};
template <typename T>
struct DiagPartFunctor<CPUDevice, T> {
EIGEN_ALWAYS_INLINE Status operator()(OpKernelContext* context,
const int64_t size, const T* in,
T* out) {
auto subDiagPart = [in, out, size](int64_t start, int64_t limit) {
for (int64_t index = start; index < limit; ++index) {
out[index] = in[(1 + size) * index];
}
};
auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers, size, 5,
subDiagPart);
return OkStatus();
}
};
}
#define REGISTER_DIAGOP(T) \
REGISTER_KERNEL_BUILDER( \
Name("Diag").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
DiagOp<CPUDevice, T>)
TF_CALL_double(REGISTER_DIAGOP);
TF_CALL_float(REGISTER_DIAGOP);
TF_CALL_int32(REGISTER_DIAGOP);
TF_CALL_int64(REGISTER_DIAGOP);
TF_CALL_COMPLEX_TYPES(REGISTER_DIAGOP);
TF_CALL_half(REGISTER_DIAGOP);
#undef REGISTER_DIAGOP
#define REGISTER_DIAGPARTOP(T) \
REGISTER_KERNEL_BUILDER( \
Name("DiagPart").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
DiagPartOp<CPUDevice, T>)
TF_CALL_double(REGISTER_DIAGPARTOP);
TF_CALL_float(REGISTER_DIAGPARTOP);
TF_CALL_int32(REGISTER_DIAGPARTOP);
TF_CALL_int64(REGISTER_DIAGPARTOP);
TF_CALL_COMPLEX_TYPES(REGISTER_DIAGPARTOP);
TF_CALL_half(REGISTER_DIAGPARTOP);
#undef REGISTER_DIAGPARTOP
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
extern template struct DiagFunctor<GPUDevice, double>;
extern template struct DiagFunctor<GPUDevice, float>;
extern template struct DiagFunctor<GPUDevice, int32>;
extern template struct DiagFunctor<GPUDevice, int64_t>;
extern template struct DiagFunctor<GPUDevice, complex64>;
extern template struct DiagFunctor<GPUDevice, complex128>;
}
#define REGISTER_DIAGOP_GPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("Diag").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
DiagOp<GPUDevice, T>)
TF_CALL_double(REGISTER_DIAGOP_GPU);
TF_CALL_float(REGISTER_DIAGOP_GPU);
TF_CALL_int32(REGISTER_DIAGOP_GPU);
TF_CALL_int64(REGISTER_DIAGOP_GPU);
TF_CALL_COMPLEX_TYPES(REGISTER_DIAGOP_GPU);
TF_CALL_half(REGISTER_DIAGOP_GPU);
#undef REGISTER_DIAGOP_GPU
namespace functor {
extern template struct DiagPartFunctor<GPUDevice, double>;
extern template struct DiagPartFunctor<GPUDevice, float>;
extern template struct DiagPartFunctor<GPUDevice, int32>;
extern template struct DiagPartFunctor<GPUDevice, int64_t>;
extern template struct DiagPartFunctor<GPUDevice, complex64>;
extern template struct DiagPartFunctor<GPUDevice, complex128>;
extern template struct DiagPartFunctor<GPUDevice, Eigen::half>;
}
#define REGISTER_DIAGPARTOP_GPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("DiagPart").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
DiagPartOp<GPUDevice, T>)
TF_CALL_double(REGISTER_DIAGPARTOP_GPU);
TF_CALL_float(REGISTER_DIAGPARTOP_GPU);
TF_CALL_int32(REGISTER_DIAGPARTOP_GPU);
TF_CALL_int64(REGISTER_DIAGPARTOP_GPU);
TF_CALL_COMPLEX_TYPES(REGISTER_DIAGPARTOP_GPU);
TF_CALL_half(REGISTER_DIAGPARTOP_GPU);
#undef REGISTER_DIAGPARTOP_GPU
#endif
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <typename T>
static Graph* Diag(int n, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in(type, TensorShape({n}));
in.flat<T>().setRandom();
Node* out = test::graph::Diag(g, test::graph::Constant(g, in), type);
test::graph::DiagPart(g, out, type);
return g;
}
#define BM_DiagDev(N, T, TFTYPE, DEVICE) \
static void BM_Diag##_##N##_##TFTYPE##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, Diag<T>(N, TFTYPE), false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N * N); \
} \
BENCHMARK(BM_Diag##_##N##_##TFTYPE##_##DEVICE);
#define BM_Diag(N) \
BM_DiagDev(N, int, DT_INT32, cpu); \
BM_DiagDev(N, float, DT_FLOAT, cpu); \
BM_DiagDev(N, std::complex<float>, DT_COMPLEX64, cpu); \
BM_DiagDev(N, int, DT_INT32, gpu); \
BM_DiagDev(N, float, DT_FLOAT, gpu); \
BM_DiagDev(N, std::complex<float>, DT_COMPLEX64, gpu);
BM_Diag(16);
BM_Diag(128);
BM_Diag(512);
} |
1,127 | cpp | tensorflow/tensorflow | mirror_pad_op | tensorflow/core/kernels/image/mirror_pad_op.cc | tensorflow/core/kernels/image/mirror_pad_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_MIRROR_PAD_OP_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_MIRROR_PAD_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
namespace Eigen {
template <typename PaddingDimensions, typename XprType>
class TensorMirrorPadOp;
namespace internal {
template <typename PaddingDimensions, typename XprType>
struct traits<TensorMirrorPadOp<PaddingDimensions, XprType>>
: public traits<XprType> {
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef std::remove_reference_t<Nested> _Nested;
static constexpr int NumDimensions = XprTraits::NumDimensions;
static constexpr int Layout = XprTraits::Layout;
};
template <typename PaddingDimensions, typename XprType>
struct eval<TensorMirrorPadOp<PaddingDimensions, XprType>, Eigen::Dense> {
typedef const TensorMirrorPadOp<PaddingDimensions, XprType>& type;
};
template <typename PaddingDimensions, typename XprType>
struct nested<
TensorMirrorPadOp<PaddingDimensions, XprType>, 1,
typename eval<TensorMirrorPadOp<PaddingDimensions, XprType>>::type> {
typedef TensorMirrorPadOp<PaddingDimensions, XprType> type;
};
}
template <typename PaddingDimensions, typename XprType>
class TensorMirrorPadOp
: public TensorBase<TensorMirrorPadOp<PaddingDimensions, XprType>,
ReadOnlyAccessors> {
public:
typedef typename Eigen::internal::traits<TensorMirrorPadOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorMirrorPadOp>::type Nested;
typedef typename Eigen::internal::traits<TensorMirrorPadOp>::StorageKind
StorageKind;
typedef typename Eigen::internal::traits<TensorMirrorPadOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMirrorPadOp(
const XprType& expr, const PaddingDimensions& padding_dims, Index offset)
: xpr_(expr), padding_dims_(padding_dims), offset_(offset) {}
EIGEN_DEVICE_FUNC
const PaddingDimensions& padding() const { return padding_dims_; }
EIGEN_DEVICE_FUNC
Index offset() const { return offset_; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const {
return xpr_;
}
protected:
typename XprType::Nested xpr_;
const PaddingDimensions padding_dims_;
const Index offset_;
};
template <typename PaddingDimensions, typename ArgType, typename Device>
struct TensorEvaluator<const TensorMirrorPadOp<PaddingDimensions, ArgType>,
Device> {
typedef TensorMirrorPadOp<PaddingDimensions, ArgType> XprType;
typedef typename XprType::Index Index;
static constexpr int Dims = internal::array_size<PaddingDimensions>::value;
typedef DSizes<Index, Dims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
BlockAccessV2 = false,
PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = true,
RawAccess = false
};
typedef internal::TensorBlockNotImplemented TensorBlock;
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: impl_(op.expression(), device), padding_(op.padding()) {
EIGEN_STATIC_ASSERT(Dims > 0, YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(op.offset() == 0 || op.offset() == 1);
left_offset_ = -1 + op.offset();
right_offset_ = -1 - op.offset();
dimensions_ = impl_.dimensions();
for (int dim = 0; dim < Dims; ++dim) {
eigen_assert(padding_[dim].first + op.offset() <= dimensions_[dim]);
eigen_assert(padding_[dim].second + op.offset() <= dimensions_[dim]);
dimensions_[dim] += padding_[dim].first + padding_[dim].second;
}
const auto& input_dims = impl_.dimensions();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
input_strides_[0] = 1;
output_strides_[0] = 1;
for (int i = 0; i < Dims - 1; ++i) {
input_strides_[i + 1] = input_strides_[i] * input_dims[i];
output_strides_[i + 1] = output_strides_[i] * dimensions_[i];
}
} else {
input_strides_[numext::maxi(0, Dims - 1)] = 1;
output_strides_[numext::maxi(0, Dims - 1)] = 1;
for (int i = Dims - 1; i > 0; --i) {
input_strides_[i - 1] = input_strides_[i] * input_dims[i];
output_strides_[i - 1] = output_strides_[i] * dimensions_[i];
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
return dimensions_;
}
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
impl_.evalSubExprsIfNeeded(nullptr);
return true;
}
EIGEN_STRONG_INLINE void cleanup() { impl_.cleanup(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType
coeff(Index index) const {
eigen_assert(index < dimensions().TotalSize());
const Index input_index = ToInputIndex(index);
return impl_.coeff(input_index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType
coeff(array<Index, Dims> coords) const {
for (int dim = 0; dim < Dims; ++dim) {
coords[dim] = ToInputCoord(coords[dim], dim);
}
ReadInputHelper<TensorEvaluator<ArgType, Device>::CoordAccess> helper;
return helper(coords, input_strides_, impl_);
}
template <int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType
packet(Index index) const {
constexpr int kPacketSize =
internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT(kPacketSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index + kPacketSize <= dimensions().TotalSize());
int dim = -1;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int k = 0; k < Dims; ++k) {
if (padding_[k].first != 0 || padding_[k].second != 0) {
dim = k;
break;
}
}
} else {
for (int k = Dims - 1; k >= 0; --k) {
if (padding_[k].first != 0 || padding_[k].second != 0) {
dim = k;
break;
}
}
}
const Index input_index = ToInputIndex(index);
if (dim < 0) {
return impl_.template packet<Unaligned>(input_index);
}
const Index left = padding_[dim].first * output_strides_[dim];
const Index right =
(dimensions_[dim] - padding_[dim].second) * output_strides_[dim];
const Index index_mod = index % (dimensions_[dim] * output_strides_[dim]);
if (left <= index_mod && (index_mod + kPacketSize - 1) < right) {
return impl_.template packet<Unaligned>(input_index);
}
EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[kPacketSize];
values[0] = impl_.coeff(input_index);
for (int i = 1; i < kPacketSize; ++i) {
values[i] = coeff(index + i);
}
PacketReturnType result = internal::pload<PacketReturnType>(values);
return result;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
constexpr int kPacketSize =
internal::unpacket_traits<PacketReturnType>::size;
const double compute_cost = Dims * (7 * TensorOpCost::AddCost<Index>() +
2 * TensorOpCost::MulCost<Index>() +
TensorOpCost::DivCost<Index>());
return impl_.costPerCoeff(vectorized) +
TensorOpCost(1, 0, compute_cost, vectorized, kPacketSize);
}
EIGEN_DEVICE_FUNC Scalar* data() const { return nullptr; }
protected:
using Coords = array<Index, Dims>;
template <bool CoordAccess, bool dummy = true>
struct ReadInputHelper;
template <bool dummy>
struct ReadInputHelper<false, dummy> {
template <typename Eval>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index
operator()(const Coords& coord, const Coords& strides, const Eval& eval) {
Index index = 0;
for (int k = 0; k < Dims; ++k) {
index += coord[k] * strides[k];
}
return eval.coeff(index);
}
};
template <bool dummy>
struct ReadInputHelper<true, dummy> {
template <typename Eval>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index
operator()(const Coords& coord, const Coords& strides, const Eval& eval) {
return eval.coeff(coord);
}
};
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index ToInputCoord(Index k,
int dim) const {
const Index m = impl_.dimensions()[dim];
k -= padding_[dim].first;
if (k < 0) {
return -k + left_offset_;
}
if (k < m) {
return k;
}
return m - (k - m) + right_offset_;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index
ToInputIndex(const Coords& coords) const {
Index input_index = 0;
for (int dim = 0; dim < Dims; ++dim) {
input_index += ToInputCoord(coords[dim], dim) * input_strides_[dim];
}
return input_index;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index ToInputIndex(Index index) const {
Index input_index = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int dim = Dims - 1; dim > 0; --dim) {
const Index k = index / output_strides_[dim];
index -= k * output_strides_[dim];
input_index += ToInputCoord(k, dim) * input_strides_[dim];
}
input_index += ToInputCoord(index, 0);
} else {
for (int dim = 0; dim < Dims - 1; ++dim) {
const Index k = index / output_strides_[dim];
index -= k * output_strides_[dim];
input_index += ToInputCoord(k, dim) * input_strides_[dim];
}
input_index += ToInputCoord(index, Dims - 1);
}
return input_index;
}
TensorEvaluator<ArgType, Device> impl_;
PaddingDimensions padding_;
Dimensions dimensions_;
array<Index, Dims> input_strides_;
array<Index, Dims> output_strides_;
Index left_offset_;
Index right_offset_;
};
}
namespace tensorflow {
namespace functor {
template <typename Device, typename T, typename Tpaddings, int Dims>
struct MirrorPad {
void operator()(const Device& device,
typename TTypes<T, Dims, int32>::Tensor output,
typename TTypes<T, Dims, int32>::ConstTensor input,
typename TTypes<Tpaddings>::ConstMatrix padding, int offset) {
Eigen::array<Eigen::IndexPair<int32>, Dims> padding_dims;
for (int i = 0; i < Dims; ++i) {
padding_dims[i] = Eigen::IndexPair<int32>(padding(i, 0), padding(i, 1));
}
output.device(device) = MirrorPadOp(input, padding_dims, offset);
}
template <typename PaddingDimensions, typename Derived>
static const Eigen::TensorMirrorPadOp<PaddingDimensions, const Derived>
MirrorPadOp(
const Eigen::TensorBase<Derived, Eigen::ReadOnlyAccessors>& tensor,
const PaddingDimensions& padding, int offset) {
return Eigen::TensorMirrorPadOp<PaddingDimensions, const Derived>(
static_cast<const Derived&>(tensor), padding, offset);
}
};
template <typename Device, typename T, typename Tpaddings, int Dims>
struct MirrorPadGrad {
void operator()(const Device& device,
typename TTypes<T, Dims, int32>::Tensor output,
typename TTypes<T, Dims, int32>::ConstTensor input,
typename TTypes<Tpaddings>::ConstMatrix paddings, int offset,
typename TTypes<T, Dims, int32>::Tensor scratch) {
scratch.device(device) = input;
Eigen::array<int32, Dims> lhs_offsets;
Eigen::array<int32, Dims> rhs_offsets;
Eigen::array<int32, Dims> extents;
Eigen::array<bool, Dims> reverses;
for (int i = 0; i < Dims; ++i) {
lhs_offsets[i] = 0;
rhs_offsets[i] = 0;
extents[i] = scratch.dimension(i);
reverses[i] = false;
}
for (int i = 0; i < Dims; ++i) {
reverses[i] = true;
if (paddings(i, 0) > 0) {
rhs_offsets[i] = 0;
lhs_offsets[i] = paddings(i, 0) + offset;
extents[i] = paddings(i, 0);
scratch.slice(lhs_offsets, extents).device(device) +=
scratch.slice(rhs_offsets, extents).reverse(reverses);
}
if (paddings(i, 1) > 0) {
rhs_offsets[i] = scratch.dimension(i) - paddings(i, 1);
lhs_offsets[i] = rhs_offsets[i] - paddings(i, 1) - offset;
extents[i] = paddings(i, 1);
scratch.slice(lhs_offsets, extents).device(device) +=
scratch.slice(rhs_offsets, extents).reverse(reverses);
}
reverses[i] = false;
lhs_offsets[i] = paddings(i, 0);
rhs_offsets[i] = paddings(i, 0);
extents[i] = output.dimension(i);
}
output.device(device) = scratch.slice(rhs_offsets, extents);
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/mirror_pad_op.h"
#include <string>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/mirror_pad_mode.h"
namespace tensorflow {
template <typename Device, typename T, typename Tpaddings>
class MirrorPadOp : public OpKernel {
public:
explicit MirrorPadOp(OpKernelConstruction* context) : OpKernel(context) {
MirrorPadMode mode;
OP_REQUIRES_OK(context, context->GetAttr("mode", &mode));
switch (mode) {
case MirrorPadMode::SYMMETRIC: {
offset_ = 0;
break;
}
case MirrorPadMode::REFLECT: {
offset_ = 1;
break;
}
default:
OP_REQUIRES(context, false,
errors::InvalidArgument(
"mode must be either REFLECT or SYMMETRIC."));
}
}
~MirrorPadOp() override = default;
void Compute(OpKernelContext* context) override {
const Tensor& in0 = context->input(0);
const Tensor& in1 = context->input(1);
const int dims = in0.dims();
constexpr int kMinDims = 0;
constexpr int kMaxDims = 5;
OP_REQUIRES(context, kMinDims <= dims && dims <= kMaxDims,
errors::Unimplemented("inputs rank not in [", kMinDims, ",",
kMaxDims, "]: ", dims));
OP_REQUIRES(
context,
TensorShapeUtils::IsMatrix(in1.shape()) && in1.dim_size(1) == 2,
errors::InvalidArgument("paddings must be a matrix with 2 columns: ",
in1.shape().DebugString()));
OP_REQUIRES(
context, dims == in1.dim_size(0),
errors::InvalidArgument(
"The first dimension of paddings must be the rank of inputs",
in1.shape().DebugString(), ", ", in0.shape().DebugString()));
TensorShape output_shape;
typename TTypes<Tpaddings>::ConstMatrix paddings = in1.matrix<Tpaddings>();
for (int d = 0; d < dims; ++d) {
const Tpaddings before = paddings(d, 0);
const Tpaddings after = paddings(d, 1);
OP_REQUIRES(context, before >= 0 && after >= 0,
errors::InvalidArgument(
"paddings must be non-negative: ", before, " ", after));
if (offset_ == 0) {
OP_REQUIRES(context,
before <= in0.dim_size(d) && after <= in0.dim_size(d),
errors::InvalidArgument("paddings must be no greater "
"than the dimension size: ",
before, ", ", after,
" greater than ", in0.dim_size(d)));
} else if (offset_ == 1) {
OP_REQUIRES(
context, before < in0.dim_size(d) && after < in0.dim_size(d),
errors::InvalidArgument("paddings must be less than"
" the dimension size: ",
before, ", ", after, " not less than ",
in0.dim_size(d)));
}
output_shape.AddDim(before + in0.dim_size(d) + after);
}
if (output_shape.num_elements() == in0.NumElements()) {
Tensor out;
CHECK(out.CopyFrom(in0, output_shape));
context->set_output(0, out);
return;
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
#define MIRROR_PAD_CASE(i) \
case i: { \
functor::MirrorPad<Device, T, Tpaddings, i>()( \
context->eigen_device<Device>(), To32Bit(output->tensor<T, i>()), \
To32Bit(in0.tensor<T, i>()), paddings, offset_); \
break; \
}
switch (dims) {
MIRROR_PAD_CASE(1)
MIRROR_PAD_CASE(2)
MIRROR_PAD_CASE(3)
MIRROR_PAD_CASE(4)
MIRROR_PAD_CASE(5)
default:
OP_REQUIRES(context, false,
errors::InvalidArgument("Unsupported rank: ",
in0.shape().DebugString()));
}
#undef MIRROR_PAD_CASE
}
private:
int offset_;
};
using CpuDevice = Eigen::ThreadPoolDevice;
using GpuDevice = Eigen::GpuDevice;
namespace functor {
#define DECLARE_CPU_SPEC(T, Tpaddings, i) \
template <> \
void MirrorPad<CpuDevice, T, Tpaddings, i>::operator()( \
const CpuDevice&, typename TTypes<T, i, int32>::Tensor, \
typename TTypes<T, i, int32>::ConstTensor, \
TTypes<Tpaddings>::ConstMatrix, int); \
extern template struct MirrorPad<CpuDevice, T, Tpaddings, i>;
#define DECLARE_CPU_SPECS(T) \
DECLARE_CPU_SPEC(T, int32, 1); \
DECLARE_CPU_SPEC(T, int32, 2); \
DECLARE_CPU_SPEC(T, int32, 3); \
DECLARE_CPU_SPEC(T, int32, 4); \
DECLARE_CPU_SPEC(T, int32, 5); \
DECLARE_CPU_SPEC(T, int64_t, 1); \
DECLARE_CPU_SPEC(T, int64_t, 2); \
DECLARE_CPU_SPEC(T, int64_t, 3); \
DECLARE_CPU_SPEC(T, int64_t, 4); \
DECLARE_CPU_SPEC(T, int64_t, 5);
TF_CALL_POD_TYPES(DECLARE_CPU_SPECS);
TF_CALL_QUANTIZED_TYPES(DECLARE_CPU_SPECS);
TF_CALL_tstring(DECLARE_CPU_SPECS);
#undef DECLARE_CPU_SPEC
#undef DECLARE_CPU_SPECS
}
#define REGISTER_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("MirrorPad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tpaddings") \
.HostMemory("paddings"), \
MirrorPadOp<CpuDevice, type, int32>); \
REGISTER_KERNEL_BUILDER(Name("MirrorPad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.HostMemory("paddings"), \
MirrorPadOp<CpuDevice, type, int64>);
TF_CALL_POD_TYPES(REGISTER_KERNEL);
TF_CALL_QUANTIZED_TYPES(REGISTER_KERNEL);
TF_CALL_tstring(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPEC(T, Tpaddings, i) \
template <> \
void MirrorPad<GpuDevice, T, Tpaddings, i>::operator()( \
const GpuDevice&, typename TTypes<T, i, int32>::Tensor, \
typename TTypes<T, i, int32>::ConstTensor, \
TTypes<Tpaddings>::ConstMatrix, int); \
extern template struct MirrorPad<GpuDevice, T, Tpaddings, i>;
#define DECLARE_GPU_SPECS(T) \
DECLARE_GPU_SPEC(T, int32, 1); \
DECLARE_GPU_SPEC(T, int32, 2); \
DECLARE_GPU_SPEC(T, int32, 3); \
DECLARE_GPU_SPEC(T, int32, 4); \
DECLARE_GPU_SPEC(T, int32, 5); \
DECLARE_GPU_SPEC(T, int64_t, 1); \
DECLARE_GPU_SPEC(T, int64_t, 2); \
DECLARE_GPU_SPEC(T, int64_t, 3); \
DECLARE_GPU_SPEC(T, int64_t, 4); \
DECLARE_GPU_SPEC(T, int64_t, 5);
TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPECS);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPEC
}
#define REGISTER_GPU_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("MirrorPad") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tpaddings") \
.HostMemory("paddings"), \
MirrorPadOp<GpuDevice, T, int32>); \
REGISTER_KERNEL_BUILDER(Name("MirrorPad") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.HostMemory("paddings"), \
MirrorPadOp<GpuDevice, T, int64>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
#endif
template <typename Device, typename T, typename Tpaddings>
class MirrorPadGradOp : public OpKernel {
public:
explicit MirrorPadGradOp(OpKernelConstruction* context) : OpKernel(context) {
MirrorPadMode mode;
OP_REQUIRES_OK(context, context->GetAttr("mode", &mode));
switch (mode) {
case MirrorPadMode::SYMMETRIC: {
offset_ = 0;
break;
}
case MirrorPadMode::REFLECT: {
offset_ = 1;
break;
}
default:
OP_REQUIRES(context, false,
errors::InvalidArgument(
"mode must be either REFLECT or SYMMETRIC."));
}
}
~MirrorPadGradOp() override = default;
void Compute(OpKernelContext* context) override {
const Tensor& in0 = context->input(0);
const Tensor& in1 = context->input(1);
const int dims = in0.dims();
constexpr int kMinDims = 0;
constexpr int kMaxDims = 5;
OP_REQUIRES(context, kMinDims <= dims && dims <= kMaxDims,
errors::Unimplemented("inputs rank not in [", kMinDims, ",",
kMaxDims, "]: ", dims));
OP_REQUIRES(
context,
TensorShapeUtils::IsMatrix(in1.shape()) && in1.dim_size(1) == 2,
errors::InvalidArgument("paddings must be a matrix with 2 columns: ",
in1.shape().DebugString()));
OP_REQUIRES(
context, dims == in1.dim_size(0),
errors::InvalidArgument(
"The first dimension of paddings must be the rank of inputs",
in1.shape().DebugString(), " ", in0.shape().DebugString()));
TensorShape output_shape;
typename TTypes<Tpaddings>::ConstMatrix paddings = in1.matrix<Tpaddings>();
for (int d = 0; d < dims; ++d) {
const int64_t before = paddings(d, 0);
const int64_t after = paddings(d, 1);
OP_REQUIRES(context, before >= 0 && after >= 0,
errors::InvalidArgument(
"Paddings must be non-negative: ", before, ", ", after));
const int64_t in_size = in0.dim_size(d);
const int64_t total_padding = before + after;
OP_REQUIRES(
context, total_padding < in_size && total_padding >= 0,
errors::InvalidArgument(
"Total paddings must be less than the input dimension size: ",
total_padding, " was not less than ", in_size));
const int64_t out_size = in_size - total_padding;
if (offset_ == 0) {
OP_REQUIRES(context, before <= out_size && after <= out_size,
errors::InvalidArgument("paddings must be no greater "
"than the output dimension size: ",
before, ", ", after,
" greater than ", out_size));
} else if (offset_ == 1) {
OP_REQUIRES(context, before < out_size && after < out_size,
errors::InvalidArgument("paddings must be less than"
" the output dimension size: ",
before, ", ", after,
" not less than ", out_size));
}
OP_REQUIRES_OK(context, output_shape.AddDimWithStatus(out_size));
}
if (output_shape == in0.shape()) {
context->set_output(0, in0);
return;
}
Tensor scratch;
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
in0.shape(), &scratch));
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
#define MIRROR_PAD_GRAD_CASE(k) \
case k: { \
functor::MirrorPadGrad<Device, T, Tpaddings, k>()( \
context->eigen_device<Device>(), To32Bit(output->tensor<T, k>()), \
To32Bit(in0.tensor<T, k>()), paddings, offset_, \
To32Bit(scratch.tensor<T, k>())); \
break; \
}
switch (dims) {
MIRROR_PAD_GRAD_CASE(1);
MIRROR_PAD_GRAD_CASE(2);
MIRROR_PAD_GRAD_CASE(3);
MIRROR_PAD_GRAD_CASE(4);
MIRROR_PAD_GRAD_CASE(5);
default:
OP_REQUIRES(context, false,
errors::InvalidArgument("Unsupported rank: ",
in0.shape().DebugString()));
}
#undef MIRROR_PAD_GRAD_CASE
}
private:
int offset_;
};
namespace functor {
#define DECLARE_CPU_SPEC(T, Tpaddings, k) \
template <> \
void MirrorPadGrad<CpuDevice, T, Tpaddings, k>::operator()( \
const CpuDevice&, typename TTypes<T, k, int32>::Tensor, \
typename TTypes<T, k, int32>::ConstTensor, \
TTypes<Tpaddings>::ConstMatrix, int, \
typename TTypes<T, k, int32>::Tensor); \
extern template struct MirrorPadGrad<CpuDevice, T, Tpaddings, k>;
#define DECLARE_CPU_SPECS(T) \
DECLARE_CPU_SPEC(T, int32, 1); \
DECLARE_CPU_SPEC(T, int32, 2); \
DECLARE_CPU_SPEC(T, int32, 3); \
DECLARE_CPU_SPEC(T, int32, 4); \
DECLARE_CPU_SPEC(T, int32, 5); \
DECLARE_CPU_SPEC(T, int64_t, 1); \
DECLARE_CPU_SPEC(T, int64_t, 2); \
DECLARE_CPU_SPEC(T, int64_t, 3); \
DECLARE_CPU_SPEC(T, int | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class MirrorPadOpTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp(const string& mode) {
TF_EXPECT_OK(NodeDefBuilder("mirror_pad_op", "MirrorPad")
.Input(FakeInput(DataTypeToEnum<T>::value))
.Input(FakeInput(DT_INT32))
.Attr("mode", mode)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
#define REGISTER_TEST(T) \
TEST_F(MirrorPadOpTest, TestMirrorPadReflect##T) { \
MakeOp<T>("REFLECT"); \
AddInputFromArray<T>(TensorShape({1, 2, 3, 1}), {1, 2, 3, 4, 5, 6}); \
AddInputFromArray<int32>(TensorShape({4, 2}), {0, 0, 1, 1, 2, 2, 0, 0}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({1, 4, 7, 1})); \
test::FillValues<T>(&expected, \
{6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1, \
6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
} \
\
TEST_F(MirrorPadOpTest, TestMirrorPadSymmetric##T) { \
MakeOp<T>("SYMMETRIC"); \
AddInputFromArray<T>(TensorShape({1, 2, 1, 3}), {1, 2, 3, 4, 5, 6}); \
AddInputFromArray<int32>(TensorShape({4, 2}), {1, 1, 0, 0, 0, 0, 2, 2}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({3, 2, 1, 7})); \
test::FillValues<T>( \
&expected, \
{2, 1, 1, 2, 3, 3, 2, 5, 4, 4, 5, 6, 6, 5, 2, 1, 1, 2, 3, 3, 2, \
5, 4, 4, 5, 6, 6, 5, 2, 1, 1, 2, 3, 3, 2, 5, 4, 4, 5, 6, 6, 5}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
}
REGISTER_TEST(float)
REGISTER_TEST(double)
REGISTER_TEST(quint8)
REGISTER_TEST(qint8)
REGISTER_TEST(qint32)
REGISTER_TEST(uint8)
REGISTER_TEST(uint16)
REGISTER_TEST(int8)
REGISTER_TEST(int16)
REGISTER_TEST(int32)
REGISTER_TEST(int64_t)
#undef REGISTER_TEST
TEST_F(MirrorPadOpTest, TestMirrorPadReflectLargeInput) {
MakeOp<float>("REFLECT");
const int kInput = 1000;
const int kPad = 10;
const int kOutput = kInput + 2 * kPad;
AddInput<float>(TensorShape({1, kInput, kInput, 1}),
[=](int i) -> float { return i % kInput; });
AddInputFromArray<int32>(TensorShape({4, 2}),
{0, 0, kPad, kPad, kPad, kPad, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, kOutput, kOutput, 1}));
test::FillFn<float>(&expected, [=](int i) -> float {
i = i % kOutput;
if (0 <= i && i < kPad)
return kPad - i;
else if (kPad <= i && i < kInput + kPad)
return i - kPad;
else if (kInput + kPad <= i && i < kOutput)
return 2 * kInput + kPad - 2 - i;
else
return -1;
});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(MirrorPadOpTest, TestMirrorPadSymmetricLargeInput) {
MakeOp<float>("SYMMETRIC");
const int kInput = 1000;
const int kPad = 10;
const int kOutput = kInput + 2 * kPad;
AddInput<float>(TensorShape({1, kInput, kInput, 1}),
[=](int i) -> float { return i % kInput; });
AddInputFromArray<int32>(TensorShape({4, 2}),
{0, 0, kPad, kPad, kPad, kPad, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, kOutput, kOutput, 1}));
test::FillFn<float>(&expected, [=](int i) -> float {
i = i % kOutput;
if (0 <= i && i < kPad)
return kPad - i - 1;
else if (kPad <= i && i < kInput + kPad)
return i - kPad;
else if (kInput + kPad <= i && i < kOutput)
return 2 * kInput + kPad - 1 - i;
else
return -1;
});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
class MirrorPadGradOpTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp(const string& mode) {
TF_EXPECT_OK(NodeDefBuilder("mirror_pad_grad_op", "MirrorPadGrad")
.Input(FakeInput(DataTypeToEnum<T>::value))
.Input(FakeInput(DT_INT32))
.Attr("mode", mode)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
#define REGISTER_TEST(T) \
TEST_F(MirrorPadGradOpTest, TestMirrorPadGradReflect##T) { \
MakeOp<T>("REFLECT"); \
AddInput<T>(TensorShape({1, 4, 7, 1}), [](int i) -> T { return i % 7; }); \
AddInputFromArray<int32>(TensorShape({4, 2}), {0, 0, 1, 1, 2, 2, 0, 0}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({1, 2, 3, 1})); \
test::FillValues<T>(&expected, {16, 18, 8, 16, 18, 8}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
} \
\
TEST_F(MirrorPadGradOpTest, TestMirrorPadGradSymmetric##T) { \
MakeOp<T>("SYMMETRIC"); \
AddInput<T>(TensorShape({3, 2, 1, 7}), [](int i) -> T { return i % 7; }); \
AddInputFromArray<int32>(TensorShape({4, 2}), {1, 1, 0, 0, 0, 0, 2, 2}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({1, 2, 1, 3})); \
test::FillValues<T>(&expected, {9, 27, 27, 9, 27, 27}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
}
REGISTER_TEST(float)
REGISTER_TEST(double)
REGISTER_TEST(uint8)
REGISTER_TEST(uint16)
REGISTER_TEST(int8)
REGISTER_TEST(int16)
REGISTER_TEST(int32)
REGISTER_TEST(int64_t)
#undef REGISTER_TEST
} |
1,128 | cpp | tensorflow/tensorflow | quantize_and_dequantize_op | tensorflow/core/kernels/quantize_and_dequantize_op.cc | tensorflow/core/kernels/quantize_and_dequantize_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_QUANTIZE_AND_DEQUANTIZE_OP_H_
#define TENSORFLOW_CORE_KERNELS_QUANTIZE_AND_DEQUANTIZE_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/cwise_ops.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
enum QuantizerRoundMode {
ROUND_HALF_UP,
ROUND_HALF_TO_EVEN,
};
namespace functor {
template <typename Device, typename T>
struct QuantizeAndDequantizeOneScaleFunctor {
void operator()(const Device& d, typename TTypes<T>::ConstVec input,
bool signed_input, int num_bits, bool range_given,
Tensor* input_min_tensor, Tensor* input_max_tensor,
QuantizerRoundMode round_mode, bool narrow_range,
typename TTypes<T>::Vec output);
};
template <typename Device, typename T>
struct QuantizeAndDequantizePerChannelFunctor {
void operator()(const Device& d, typename TTypes<T, 3>::ConstTensor input,
bool signed_input, int num_bits, bool range_given,
Tensor* input_min_tensor, Tensor* input_max_tensor,
QuantizerRoundMode round_mode, bool narrow_range,
typename TTypes<T, 3>::Tensor output);
};
template <typename Device, typename T>
struct QuantizeAndDequantizeOneScaleGradientFunctor {
void operator()(const Device& d, typename TTypes<T>::ConstFlat gradient,
typename TTypes<T>::ConstFlat input,
typename TTypes<T>::ConstScalar input_min,
typename TTypes<T>::ConstScalar input_max,
typename TTypes<T>::Flat input_backprop,
typename TTypes<T>::Scalar input_min_backprop,
typename TTypes<T>::Scalar input_max_backprop);
};
template <typename Device, typename T>
struct QuantizeAndDequantizePerChannelGradientFunctor {
void operator()(const Device& d, typename TTypes<T, 3>::ConstTensor gradient,
typename TTypes<T, 3>::ConstTensor input,
const Tensor* input_min_tensor,
const Tensor* input_max_tensor,
typename TTypes<T, 3>::Tensor input_backprop,
typename TTypes<T>::Flat input_min_backprop,
typename TTypes<T>::Flat input_max_backprop);
};
template <typename Device, typename T, typename Func,
typename Vec = typename TTypes<T>::Vec,
typename ConstVec = typename TTypes<T>::ConstVec>
void ClampScaleAndRound(const Device& d, ConstVec input, T min_range,
T max_range, T scale, T inverse_scale, Func round_func,
Vec output) {
output.device(d) = (input.cwiseMin(max_range).cwiseMax(min_range) * scale)
.unaryExpr(round_func) *
inverse_scale;
}
template <typename Device, typename T, typename Vec = typename TTypes<T>::Vec,
typename ConstVec = typename TTypes<T>::ConstVec>
void ClampScaleAndRound(const Device& d, ConstVec input, T min_range,
T max_range, T scale, T inverse_scale,
QuantizerRoundMode round_mode, Vec output) {
switch (round_mode) {
case ROUND_HALF_TO_EVEN:
ClampScaleAndRound(d, input, min_range, max_range, scale, inverse_scale,
Eigen::internal::scalar_round_half_to_even_op<T>(),
output);
break;
case ROUND_HALF_UP:
ClampScaleAndRound(d, input, min_range, max_range, scale, inverse_scale,
Eigen::internal::scalar_round_up_op<T>(), output);
break;
}
}
template <typename Device, typename T, typename Func,
typename Vec = typename TTypes<T>::Vec,
typename ConstVec = typename TTypes<T>::ConstVec>
void ScaleAndRound(const Device& d, ConstVec input, T scale, T inverse_scale,
Func round_func, Vec output) {
output.device(d) = (input * scale).unaryExpr(round_func) * inverse_scale;
}
template <typename Device, typename T, typename Vec = typename TTypes<T>::Vec,
typename ConstVec = typename TTypes<T>::ConstVec>
void ScaleAndRound(const Device& d, ConstVec input, T scale, T inverse_scale,
QuantizerRoundMode round_mode, Vec output) {
switch (round_mode) {
case ROUND_HALF_TO_EVEN:
ScaleAndRound(d, input, scale, inverse_scale,
Eigen::internal::scalar_round_half_to_even_op<T>(), output);
break;
case ROUND_HALF_UP:
ScaleAndRound(d, input, scale, inverse_scale,
Eigen::internal::scalar_round_up_op<T>(), output);
break;
}
}
template <typename T>
void ComputeQuantizationRange(bool signed_input, int num_bits,
QuantizerRoundMode round_mode, bool narrow_range,
T* min_range, T* max_range, T* scale,
T* inverse_scale) {
const int64_t min_quantized =
signed_input ? narrow_range ? -(1ULL << (num_bits - 1)) + 1
: -(1ULL << (num_bits - 1))
: 0;
const int64_t max_quantized =
signed_input ? (1ULL << (num_bits - 1)) - 1 : (1ULL << num_bits) - 1;
const T scale_from_min_side = (min_quantized * *min_range > 0)
? min_quantized / *min_range
: std::numeric_limits<T>::max();
const T scale_from_max_side = (max_quantized * *max_range > 0)
? max_quantized / *max_range
: std::numeric_limits<T>::max();
if (scale_from_min_side < scale_from_max_side) {
*scale = scale_from_min_side;
*inverse_scale = *min_range / min_quantized;
*max_range = max_quantized * *inverse_scale;
} else {
*scale = scale_from_max_side;
*inverse_scale = *max_range / max_quantized;
*min_range = min_quantized * *inverse_scale;
}
}
template <typename Device, typename T>
struct QuantizeAndDequantizeOneScaleImpl {
static void Compute(const Device& d, typename TTypes<T>::ConstVec input,
bool signed_input, int num_bits, bool range_given,
Tensor* input_min_tensor, Tensor* input_max_tensor,
QuantizerRoundMode round_mode, bool narrow_range,
typename TTypes<T>::Vec output) {
T min_range;
T max_range;
auto input_min = input_min_tensor->scalar<T>();
auto input_max = input_max_tensor->scalar<T>();
if (!range_given) {
input_min.device(d) = input.minimum();
input_max.device(d) = input.maximum();
d.memcpyDeviceToHost(&min_range, input_min.data(), sizeof(T));
d.memcpyDeviceToHost(&max_range, input_max.data(), sizeof(T));
} else {
min_range = input_min_tensor->scalar<T>()();
max_range = input_max_tensor->scalar<T>()();
}
T scale, inverse_scale;
ComputeQuantizationRange(signed_input, num_bits, round_mode, narrow_range,
&min_range, &max_range, &scale, &inverse_scale);
if (range_given) {
ClampScaleAndRound(d, input, min_range, max_range, scale, inverse_scale,
round_mode, output);
} else {
ScaleAndRound(d, input, scale, inverse_scale, round_mode, output);
}
}
};
template <typename Device, typename T>
struct QuantizeAndDequantizePerChannelImpl {
static void Compute(const Device& d, typename TTypes<T, 3>::ConstTensor input,
bool signed_input, int num_bits, bool range_given,
Tensor* input_min_tensor, Tensor* input_max_tensor,
QuantizerRoundMode round_mode, bool narrow_range,
typename TTypes<T, 3>::Tensor output) {
using Index = typename tensorflow::TTypes<T>::ConstTensor::Index;
int num_channels = input.dimension(1);
auto input_min = input_min_tensor->vec<T>();
auto input_max = input_max_tensor->vec<T>();
std::vector<T> min_range(num_channels);
std::vector<T> max_range(num_channels);
if (!range_given) {
Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<2> > reduce_dims;
input_min.device(d) = input.minimum(reduce_dims);
input_max.device(d) = input.maximum(reduce_dims);
d.memcpyDeviceToHost(min_range.data(), input_min.data(),
num_channels * sizeof(T));
d.memcpyDeviceToHost(max_range.data(), input_max.data(),
num_channels * sizeof(T));
} else {
std::memcpy(min_range.data(), input_min_tensor->vec<T>().data(),
num_channels * sizeof(T));
std::memcpy(max_range.data(), input_max_tensor->vec<T>().data(),
num_channels * sizeof(T));
}
for (Index i = 0; i < num_channels; ++i) {
const auto input_chip = input.template chip<1>(i);
auto output_chip = output.template chip<1>(i);
T scale, inverse_scale;
ComputeQuantizationRange(signed_input, num_bits, round_mode, narrow_range,
&min_range[i], &max_range[i], &scale,
&inverse_scale);
if (range_given) {
ClampScaleAndRound(d, input_chip, min_range[i], max_range[i], scale,
inverse_scale, round_mode, output_chip);
} else {
ScaleAndRound(d, input_chip, scale, inverse_scale, round_mode,
output_chip);
}
}
}
};
template <typename Device, typename T>
struct QuantizeAndDequantizeOneScaleGradientImpl {
static void Compute(const Device& d, typename TTypes<T>::ConstFlat gradient,
typename TTypes<T>::ConstFlat input,
typename TTypes<T>::ConstScalar input_min,
typename TTypes<T>::ConstScalar input_max,
typename TTypes<T>::Flat input_backprop,
typename TTypes<T>::Scalar input_min_backprop,
typename TTypes<T>::Scalar input_max_backprop) {
const T min_val = input_min();
const T max_val = input_max();
const auto in_range =
(input >= min_val && input <= max_val)
.select(input.constant(1.0f), input.constant(0.0f));
input_backprop.device(d) = gradient * in_range;
input_min_backprop.device(d) = input_min_backprop.constant(0.0f);
input_max_backprop.device(d) = input_max_backprop.constant(0.0f);
}
};
template <typename Device, typename T>
struct QuantizeAndDequantizePerChannelGradientImpl {
static void Compute(const Device& d,
typename TTypes<T, 3>::ConstTensor gradient,
typename TTypes<T, 3>::ConstTensor input,
const Tensor* input_min_tensor,
const Tensor* input_max_tensor,
typename TTypes<T, 3>::Tensor input_backprop,
typename TTypes<T>::Flat input_min_backprop,
typename TTypes<T>::Flat input_max_backprop) {
using Index = typename tensorflow::TTypes<T>::ConstTensor::Index;
auto input_min = input_min_tensor->vec<T>();
auto input_max = input_max_tensor->vec<T>();
int num_channels = input.dimension(1);
for (Index i = 0; i < num_channels; ++i) {
const auto gradient_chip = gradient.template chip<1>(i);
const auto input_chip = input.template chip<1>(i);
const T min_val = input_min(i);
const T max_val = input_max(i);
const auto in_range =
(input_chip >= min_val && input_chip <= max_val)
.select(input_chip.constant(1.0f), input_chip.constant(0.0f));
input_backprop.template chip<1>(i).device(d) = gradient_chip * in_range;
}
input_min_backprop.device(d) = input_min_backprop.constant(0.0f);
input_max_backprop.device(d) = input_max_backprop.constant(0.0f);
}
};
}
}
#endif
#include "tensorflow/core/framework/op_requires.h"
#define EIGEN_USE_THREADS
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/quantize_and_dequantize_op.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
using CpuDevice = ::Eigen::ThreadPoolDevice;
using GpuDevice = ::Eigen::GpuDevice;
using ::tensorflow::errors::InvalidArgument;
}
template <typename Device, typename T>
class QuantizeAndDequantizeV2Op : public OpKernel {
public:
explicit QuantizeAndDequantizeV2Op(OpKernelConstruction* ctx)
: OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("signed_input", &signed_input_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("num_bits", &num_bits_));
OP_REQUIRES(ctx, num_bits_ > 0 && num_bits_ < (signed_input_ ? 62 : 63),
InvalidArgument("num_bits is out of range: ", num_bits_,
" with signed_input_ ", signed_input_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("range_given", &range_given_));
string round_mode_string;
OP_REQUIRES_OK(ctx, ctx->GetAttr("round_mode", &round_mode_string));
OP_REQUIRES(
ctx,
(round_mode_string == "HALF_UP" || round_mode_string == "HALF_TO_EVEN"),
InvalidArgument("Round mode string must be "
"'HALF_UP' or "
"'HALF_TO_EVEN', is '" +
round_mode_string + "'"));
if (round_mode_string == "HALF_UP") {
round_mode_ = ROUND_HALF_UP;
} else if (round_mode_string == "HALF_TO_EVEN") {
round_mode_ = ROUND_HALF_TO_EVEN;
}
OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
OP_REQUIRES(ctx, axis_ >= -1,
InvalidArgument("Axis must be at least -1. Found ", axis_));
OP_REQUIRES(ctx, (axis_ == -1 || axis_ < input.shape().dims()),
InvalidArgument("Shape must be at least rank ", axis_ + 1,
" but is rank ", input.shape().dims()));
const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_);
Tensor input_min_tensor;
Tensor input_max_tensor;
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output));
if (range_given_) {
input_min_tensor = ctx->input(1);
input_max_tensor = ctx->input(2);
OP_REQUIRES(ctx, input_min_tensor.dims() == 0,
InvalidArgument("input_min must be a scalar."));
OP_REQUIRES(ctx, input_max_tensor.dims() == 0,
InvalidArgument("input_max must be a scalar."));
if (axis_ == -1) {
auto min_val = input_min_tensor.scalar<T>()();
auto max_val = input_max_tensor.scalar<T>()();
OP_REQUIRES(ctx, min_val <= max_val,
InvalidArgument("Invalid range: input_min ", min_val,
" > input_max ", max_val));
} else {
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(input_min_tensor.shape()),
InvalidArgument("Shape must be rank 1 for input_min_tensor when the"
" axis is specified"));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(input_max_tensor.shape()),
InvalidArgument("Shape must be rank 1 for input_max_tensor when the"
" axis is specified"));
OP_REQUIRES(
ctx, input_min_tensor.dim_size(0) == depth,
InvalidArgument("input_min_tensor has incorrect size, was ",
input_min_tensor.dim_size(0), " expected ", depth,
" to match dim ", axis_, " of the input ",
input_min_tensor.shape()));
OP_REQUIRES(
ctx, input_max_tensor.dim_size(0) == depth,
InvalidArgument("input_max_tensor has incorrect size, was ",
input_max_tensor.dim_size(0), " expected ", depth,
" to match dim ", axis_, " of the input ",
input_max_tensor.shape()));
}
} else {
auto range_shape = (axis_ == -1) ? TensorShape({}) : TensorShape({depth});
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
range_shape, &input_min_tensor));
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
range_shape, &input_max_tensor));
}
if (axis_ == -1) {
functor::QuantizeAndDequantizeOneScaleFunctor<Device, T> f;
f(ctx->eigen_device<Device>(), input.flat<T>(), signed_input_, num_bits_,
range_given_, &input_min_tensor, &input_max_tensor, round_mode_,
narrow_range_, output->flat<T>());
} else {
functor::QuantizeAndDequantizePerChannelFunctor<Device, T> f;
f(ctx->eigen_device<Device>(),
input.template flat_inner_outer_dims<T, 3>(axis_ - 1), signed_input_,
num_bits_, range_given_, &input_min_tensor, &input_max_tensor,
round_mode_, narrow_range_,
output->template flat_inner_outer_dims<T, 3>(axis_ - 1));
}
}
private:
int num_bits_;
int axis_;
QuantizerRoundMode round_mode_;
bool signed_input_;
bool range_given_;
bool narrow_range_;
};
template <typename Device, typename T>
class QuantizeAndDequantizeV4GradientOp : public OpKernel {
public:
explicit QuantizeAndDequantizeV4GradientOp(OpKernelConstruction* ctx)
: OpKernel::OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& gradient = ctx->input(0);
const Tensor& input = ctx->input(1);
Tensor* input_backprop = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, input.shape(), &input_backprop));
OP_REQUIRES(ctx, axis_ >= -1,
InvalidArgument("Axis must be at least -1. Found ", axis_));
OP_REQUIRES(ctx, (axis_ == -1 || axis_ < input.shape().dims()),
InvalidArgument(
"Axis should be -1 or 0 or a positive value less than ",
input.shape().dims(), "but given axis value was ", axis_));
OP_REQUIRES(ctx, input.IsSameSize(gradient),
InvalidArgument("gradient and input must be the same size"));
const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_);
const Tensor& input_min_tensor = ctx->input(2);
OP_REQUIRES(ctx,
input_min_tensor.dims() == 0 || input_min_tensor.dims() == 1,
InvalidArgument(
"Input min tensor must have dimension 0 or 1. Received ",
input_min_tensor.dims(), "."));
const Tensor& input_max_tensor = ctx->input(3);
OP_REQUIRES(ctx,
input_max_tensor.dims() == 0 || input_max_tensor.dims() == 1,
InvalidArgument(
"Input max tensor must have dimension 0 or 1. Received ",
input_max_tensor.dims(), "."));
if (axis_ != -1) {
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(input_min_tensor.shape()),
InvalidArgument("Shape must be rank 1 for input_min_tensor when the"
" axis is specified"));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(input_max_tensor.shape()),
InvalidArgument("Shape must be rank 1 for input_max_tensor when the"
" axis is specified"));
OP_REQUIRES(ctx, input_min_tensor.dim_size(0) == depth,
InvalidArgument("min has incorrect size, expected ", depth,
" was ", input_min_tensor.dim_size(0)));
OP_REQUIRES(ctx, input_max_tensor.dim_size(0) == depth,
InvalidArgument("max has incorrect size, expected ", depth,
" was ", input_max_tensor.dim_size(0)));
}
TensorShape min_max_shape(input_min_tensor.shape());
Tensor* input_min_backprop;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(1, min_max_shape, &input_min_backprop));
Tensor* input_max_backprop;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(2, min_max_shape, &input_max_backprop));
if (axis_ == -1) {
OP_REQUIRES(
ctx, TensorShapeUtils::IsScalar(input_min_tensor.shape()),
InvalidArgument("input_min must be a scalar if axis is unspecified"));
OP_REQUIRES(
ctx, TensorShapeUtils::IsScalar(input_max_tensor.shape()),
InvalidArgument("input_max must be a scalar if axis is unspecified"));
functor::QuantizeAndDequantizeOneScaleGradientFunctor<Device, T> f;
f(ctx->eigen_device<Device>(), gradient.template flat<T>(),
input.template flat<T>(), input_min_tensor.scalar<T>(),
input_max_tensor.scalar<T>(), input_backprop->template flat<T>(),
input_min_backprop->template scalar<T>(),
input_max_backprop->template scalar<T>());
} else {
functor::QuantizeAndDequantizePerChannelGradientFunctor<Device, T> f;
f(ctx->eigen_device<Device>(),
gradient.template flat_inner_outer_dims<T, 3>(axis_ - 1),
input.template flat_inner_outer_dims<T, 3>(axis_ - 1),
&input_min_tensor, &input_max_tensor,
input_backprop->template flat_inner_outer_dims<T, 3>(axis_ - 1),
input_min_backprop->template flat<T>(),
input_max_backprop->template flat<T>());
}
}
private:
int axis_;
};
template <typename Device, typename T>
class QuantizeAndDequantizeV3Op : public OpKernel {
public:
explicit QuantizeAndDequantizeV3Op(OpKernelConstruction* ctx)
: OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("signed_input", &signed_input_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("range_given", &range_given_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
OP_REQUIRES(ctx, -input.dims() <= axis_ && axis_ < input.dims(),
InvalidArgument(
"Axis requested is larger than input dimensions. Axis: ",
axis_, " Input Dimensions: ", input.dims()));
const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_);
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output));
const Tensor num_bits_tensor = ctx->input(3);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(num_bits_tensor.shape()),
InvalidArgument("Invalid shape. The `num_bits` tensor should "
"be a scalar. Got dimensions: ",
num_bits_tensor.dims()));
const int num_bits_val = num_bits_tensor.scalar<int32>()();
OP_REQUIRES(ctx,
num_bits_val > 0 && num_bits_val < (signed_input_ ? 62 : 63),
InvalidArgument("num_bits is out of range: ", num_bits_val,
" with `signed_input_` ", signed_input_));
Tensor input_min_tensor;
Tensor input_max_tensor;
if (range_given_) {
input_min_tensor = ctx->input(1);
input_max_tensor = ctx->input(2);
if (axis_ == -1) {
const auto min_val = input_min_tensor.scalar<T>()();
const auto max_val = input_max_tensor.scalar<T>()();
OP_REQUIRES(ctx, min_val <= max_val,
InvalidArgument("Invalid range: input_min ", min_val,
" > input_max ", max_val));
} else {
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(input_min_tensor.shape()),
InvalidArgument("Shape must be rank 1 for input_min_tensor when the"
" axis is specified"));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(input_max_tensor.shape()),
InvalidArgument("Shape must be rank 1 for input_max_tensor when the"
" axis is specified"));
OP_REQUIRES(
ctx, input_min_tensor.dim_size(0) == depth,
InvalidArgument("input_min_tensor has incorrect size, was ",
input_min_tensor.dim_size(0), " expected ", depth,
" to match dim ", axis_, " of the input ",
input_min_tensor.shape()));
OP_REQUIRES(
ctx, input_max_tensor.dim_size(0) == depth,
InvalidArgument("input_max_tensor has incorrect size, was ",
input_max_tensor.dim_size(0), " expected ", depth,
" to match dim ", axis_, " of the input ",
input_max_tensor.shape()));
}
} else {
auto range_shape = (axis_ == -1) ? TensorShape({}) : TensorShape({depth});
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
range_shape, &input_min_tensor));
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
range_shape, &input_max_tensor));
}
if (axis_ == -1) {
functor::QuantizeAndDequantizeOneScaleFunctor<Device, T> f;
f(ctx->eigen_device<Device>(), input.flat<T>(), signed_input_,
num_bits_val, range_given_, &input_min_tensor, &input_max_tensor,
ROUND_HALF_TO_EVEN, narrow_range_, output->flat<T>());
} else {
functor::QuantizeAndDequantizePerChannelFunctor<Device, T> f;
f(ctx->eigen_device<Device>(),
input.template flat_inner_outer_dims<T, 3>(axis_ - 1), signed_input_,
num_bits_val, range_given_, &input_min_tensor, &input_max_tensor,
ROUND_HALF_TO_EVEN, narrow_range_,
output->template flat_inner_outer_dims<T, 3>(axis_ - 1));
}
}
private:
int axis_;
bool signed_input_;
bool range_given_;
bool narrow_range_;
};
template <typename Device, typename T>
class QuantizeAndDequantizeOp : public OpKernel {
public:
explicit QuantizeAndDequantizeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("signed_input", &signed_input_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("num_bits", &num_bits_));
OP_REQUIRES(ctx, num_bits_ > 0 && num_bits_ < (signed_input_ ? 62 : 63),
InvalidArgument("num_bits is out of range: ", num_bits_,
" with signed_input_ ", signed_input_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("range_given", &range_given_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("input_min", &input_min_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("input_max", &input_max_));
if (range_given_) {
OP_REQUIRES(ctx, input_min_ <= input_max_,
InvalidArgument("Invalid range: input_min ", input_min_,
" > input_max ", input_max_));
}
}
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output));
Tensor input_min_tensor(DataTypeToEnum<T>::value, TensorShape());
Tensor input_max_tensor(DataTypeToEnum<T>::value, TensorShape());
input_min_tensor.template scalar<T>()() = static_cast<T>(input_min_);
input_max_tensor.template scalar<T>()() = static_cast<T>(input_max_);
functor::QuantizeAndDequantizeOneScaleFunctor<Device, T> functor;
functor(ctx->eigen_device<Device>(), input.flat<T>(), signed_input_,
num_bits_, range_given_, &input_min_tensor, &input_max_tensor,
ROUND_HALF_TO_EVEN, false, output->flat<T>());
}
private:
bool signed_input_;
int num_bits_;
bool range_given_ | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::MatchesRegex;
class QuantizeAndDequantizeTest : public OpsTestBase {};
struct ParameterizedQuantizeAndDequantizeTest
: public OpsTestBase,
public ::testing::WithParamInterface<int> {};
TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1}), {-3.5});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1}));
test::FillValues<float>(&expected, {-3.5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1}), {-3.5});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1}));
test::FillValues<float>(&expected, {-3.5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
template <typename T>
std::vector<T> ScalePerSliceAlongAxis(std::vector<int64_t> dims, int axis,
const std::vector<T>& data) {
uint32 seed = 123;
int64_t out_size = 1;
for (int dim : dims) {
out_size *= dim;
}
int minor_size = 1;
for (int i = axis + 1; i < dims.size(); ++i) {
minor_size *= dims[i];
}
std::vector<T> out(out_size);
int num_slices = (axis == -1) ? 1 : dims[axis];
for (int out_idx = 0; out_idx < out_size; ++out_idx) {
int in_idx = rand_r(&seed) % data.size();
int multiplier = ((out_idx / minor_size) % num_slices) + 1;
out[out_idx] = data[in_idx] * multiplier;
}
return out;
}
TEST_P(ParameterizedQuantizeAndDequantizeTest, Convert_4D_tensor_with_int8) {
const int axis = GetParam();
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625}));
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> init_value(num_slices, 0.0f);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<float>(range_shape, init_value);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(
&expected,
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128, 0.5}));
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0);
EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0);
}
}
TEST_P(ParameterizedQuantizeAndDequantizeTest,
Convert_4D_tensor_with_int8_round_half_up) {
const int axis = GetParam();
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Attr("round_mode", "HALF_UP")
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {5, 7, 11, 13};
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625}));
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> init_value(num_slices, 0.0f);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<float>(range_shape, init_value);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(&expected, ScalePerSliceAlongAxis<float>(
dims, axis,
{-1, -0.5, 0, 38.0 / 128, 102.0 / 128,
71.0 / 128, 65.0 / 128}));
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0);
EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0);
}
}
TEST_P(ParameterizedQuantizeAndDequantizeTest,
Convert_4D_tensor_with_int8_round_half_up_narrow_range) {
const int axis = GetParam();
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Attr("round_mode", "HALF_UP")
.Attr("narrow_range", true)
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625}));
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> init_value(num_slices, 0.0f);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<float>(range_shape, init_value);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(
&expected,
ScalePerSliceAlongAxis<float>(dims, axis,
{-1, -63.0 / 127, 0, 38.0 / 127,
102.0 / 127, 70.0 / 127, 64.0 / 127}));
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0);
EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0);
}
}
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int8_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected,
{-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_P(ParameterizedQuantizeAndDequantizeTest,
Convert_4D_tensor_with_int8_narrow_range_V3) {
const int axis = GetParam();
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", false)
.Attr("narrow_range", true)
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625}));
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> init_value(num_slices, 0.0f);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(
&expected,
ScalePerSliceAlongAxis<float>(dims, axis,
{-1, -64.0 / 127, 0, 38.0 / 127,
102.0 / 127, 70.0 / 127, 64.0 / 127}));
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0);
EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0);
}
}
TEST_P(ParameterizedQuantizeAndDequantizeTest, GradientV4_op) {
const int axis = GetParam();
TF_ASSERT_OK(NodeDefBuilder("qdq_v4_grad_op", "QuantizeAndDequantizeV4Grad")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
auto gradients = ScalePerSliceAlongAxis<float>(
dims, axis, {1, -2, -3, 4, 5, 6, -7, -8, -9, -10, 11});
AddInputFromArray<float>(TensorShape(dims), gradients);
auto inputs = ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.55, 0.6});
AddInputFromArray<float>(TensorShape(dims), inputs);
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> input_min_values(num_slices), input_max_values(num_slices);
for (int i = 0; i < num_slices; ++i) {
input_max_values[i] = 0.8f + i * 0.4f;
input_min_values[i] = -input_max_values[i];
}
AddInputFromArray<float>(range_shape, input_min_values);
AddInputFromArray<float>(range_shape, input_max_values);
std::vector<float> expected_vals(inputs.size());
int minor_size = 1;
for (int i = axis + 1; i < dims.size(); ++i) {
minor_size *= dims[i];
}
for (int i = 0; i < inputs.size(); ++i) {
int slice_idx = (i / minor_size) % num_slices;
expected_vals[i] = ((inputs[i] >= input_min_values[slice_idx]) &&
(inputs[i] <= input_max_values[slice_idx]))
? gradients[i]
: 0;
}
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(&expected, expected_vals);
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
INSTANTIATE_TEST_SUITE_P(All, ParameterizedQuantizeAndDequantizeTest,
::testing::Values(-1, 1, 3));
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 4)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3125, 0.8, 0.555});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected, {-1, -0.5, 0, 0.25, 0.75, 0.5});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4_round_half_up) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 4)
.Attr("range_given", false)
.Attr("round_mode", "HALF_UP")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3125, 0.8, 0.555});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected, {-1, -0.5, 0, 0.375, 0.75, 0.5});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected, {-1, -0.5, 0, 0.25, 0.75, 0.5});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 4}),
{-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33});
AddInputFromArray<float>(TensorShape({}), {-1.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(
&expected, {-102.0 / 127, -64.0 / 127, 0, 38.0 / 127, 102.0 / 127,
70.0 / 127, -128.0 / 127, 1});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest,
Convert_2D_tensor_with_int8_range_given_round_half_up) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", true)
.Attr("round_mode", "HALF_UP")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 4}),
{-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33});
AddInputFromArray<float>(TensorShape({}), {-1.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(
&expected, {-102.0 / 127, -63.0 / 127, 0, 38.0 / 127, 102.0 / 127,
70.0 / 127, -128.0 / 127, 1});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 4}),
{-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33});
AddInputFromArray<float>(TensorShape({}), {-1.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(
&expected, {-102.0 / 127, -64.0 / 127, 0, 38.0 / 127, 102.0 / 127,
70.0 / 127, -128.0 / 127, 1});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", false)
.Attr("num_bits", 8)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 76.0 / 255, 204.0 / 255});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest,
Convert_4D_tensor_with_uint8_range_given_round_half_up) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", false)
.Attr("num_bits", 8)
.Attr("range_given", true)
.Attr("round_mode", "HALF_UP")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 77.0 / 255, 204.0 / 255});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", false)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 76.0 / 255, 204.0 / 255});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", false)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {0, 0, 0, 0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 0, 0});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", false)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {0, 0, 0, 0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 0, 0});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Invalid_range_given) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("num_bits", 8)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(s.ToString(),
"Invalid range: input_min 1 > input_max 0"))
<< s;
}
TEST_F(QuantizeAndDequantizeTest, Invalid_range_given_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(s.ToString(),
"Invalid range: input_min 1 > input_max 0"))
<< s;
}
TEST_F(QuantizeAndDequantizeTest, Invalid_axis_given_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("range_given", false)
.Attr("axis", static_cast<int32_t>(-2147483648))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
EXPECT_THAT(
RunOpKernel(),
StatusIs(
error::INVALID_ARGUMENT,
MatchesRegex("Axis requested is larger than input dimensions.*")));
}
#define BM_SIMPLE_QUAN_DEQUAN(DEVICE) \
static void BM_SIMPLE_QUAN_DEQUAN_##DEVICE( \
::testing::benchmark::Sta |
1,129 | cpp | tensorflow/tensorflow | sequence_ops | tensorflow/core/kernels/sequence_ops.cc | tensorflow/core/kernels/sequence_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SEQUENCE_OPS_H_
#define TENSORFLOW_CORE_KERNELS_SEQUENCE_OPS_H_
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct RangeFunctor {
void operator()(OpKernelContext* context, int64_t size, T start, T delta,
typename TTypes<T>::Flat output) const;
};
}
}
#endif
#include "tensorflow/core/kernels/sequence_ops.h"
#include <cmath>
#include <type_traits>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
namespace functor {
template <typename T>
struct RangeFunctor<CPUDevice, T> {
void operator()(OpKernelContext* context, int64_t size, T start, T delta,
typename TTypes<T>::Flat output) const {
(void)context;
for (int64_t i = 0; i < size; ++i) {
output(i) = start + static_cast<T>(i) * delta;
}
}
};
}
template <typename Device, typename T>
class RangeOp : public OpKernel {
public:
explicit RangeOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& start_in = context->input(0);
const Tensor& limit_in = context->input(1);
const Tensor& delta_in = context->input(2);
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(start_in.shape()) ||
(TensorShapeUtils::IsVector(start_in.shape()) &&
start_in.shape().dim_size(0) == 1),
errors::InvalidArgument("start must be a scalar, not shape ",
start_in.shape().DebugString()));
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(limit_in.shape()) ||
(TensorShapeUtils::IsVector(limit_in.shape()) &&
limit_in.shape().dim_size(0) == 1),
errors::InvalidArgument("limit must be a scalar, not shape ",
limit_in.shape().DebugString()));
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(delta_in.shape()) ||
(TensorShapeUtils::IsVector(delta_in.shape()) &&
delta_in.shape().dim_size(0) == 1),
errors::InvalidArgument("delta must be a scalar, not shape ",
delta_in.shape().DebugString()));
const T start = start_in.scalar<T>()();
const T limit = limit_in.scalar<T>()();
const T delta = delta_in.scalar<T>()();
OP_REQUIRES(context, delta != 0,
errors::InvalidArgument("Requires delta != 0: ", delta));
if (delta > 0) {
OP_REQUIRES(
context, start <= limit,
errors::InvalidArgument(
"Requires start <= limit when delta > 0: ", start, "/", limit));
} else {
OP_REQUIRES(
context, start >= limit,
errors::InvalidArgument(
"Requires start >= limit when delta < 0: ", start, "/", limit));
}
int64_t size;
if constexpr (std::is_integral<T>::value) {
size = Eigen::divup(Eigen::numext::abs(limit - start),
Eigen::numext::abs(delta));
} else {
auto size_auto =
Eigen::numext::ceil(Eigen::numext::abs((limit - start) / delta));
OP_REQUIRES(
context, size_auto <= std::numeric_limits<int64_t>::max(),
errors::InvalidArgument("Requires ((limit - start) / delta) <= ",
std::numeric_limits<int64_t>::max()));
size = static_cast<int64_t>(size_auto);
}
TensorShape shape;
OP_REQUIRES_OK(context, shape.AddDimWithStatus(size));
Tensor* out = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, shape, &out));
if (size == 0) return;
auto flat = out->flat<T>();
functor::RangeFunctor<Device, T>()(context, size, start, delta, flat);
}
};
#define REGISTER_KERNEL(DEV, DEV_TYPE, TYPE) \
REGISTER_KERNEL_BUILDER(Name("Range") \
.Device(DEV) \
.HostMemory("start") \
.HostMemory("limit") \
.HostMemory("delta") \
.TypeConstraint<TYPE>("Tidx"), \
RangeOp<DEV_TYPE, TYPE>);
#define REGISTER_CPU_KERNEL(T) REGISTER_KERNEL(DEVICE_CPU, CPUDevice, T)
#define REGISTER_GPU_KERNEL(T) REGISTER_KERNEL(DEVICE_GPU, GPUDevice, T)
TF_CALL_float(REGISTER_CPU_KERNEL);
TF_CALL_double(REGISTER_CPU_KERNEL);
TF_CALL_int32(REGISTER_CPU_KERNEL);
TF_CALL_int64(REGISTER_CPU_KERNEL);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_CALL_float(REGISTER_GPU_KERNEL);
TF_CALL_double(REGISTER_GPU_KERNEL);
TF_CALL_int64(REGISTER_GPU_KERNEL);
#endif
REGISTER_KERNEL_BUILDER(Name("Range")
.Device(DEVICE_DEFAULT)
.HostMemory("start")
.HostMemory("limit")
.HostMemory("delta")
.HostMemory("output")
.TypeConstraint<int32_t>("Tidx"),
RangeOp<CPUDevice, int32_t>);
#undef REGISTER_KERNEL
#undef REGISTER_CPU_KERNEL
#undef REGISTER_GPU_KERNEL
template <typename T, typename Tnum>
class LinSpaceOp : public OpKernel {
public:
explicit LinSpaceOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& start_in = context->input(0);
const Tensor& stop_in = context->input(1);
const Tensor& num_in = context->input(2);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(start_in.shape()),
errors::InvalidArgument("start must be a scalar, not shape ",
start_in.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(stop_in.shape()),
errors::InvalidArgument("stop must be a scalar, not shape ",
stop_in.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_in.shape()),
errors::InvalidArgument("num must be a scalar, not shape ",
num_in.shape().DebugString()));
const T start = start_in.scalar<T>()();
const T stop = stop_in.scalar<T>()();
const Tnum num = num_in.scalar<Tnum>()();
OP_REQUIRES(context, num > 0,
errors::InvalidArgument("Requires num > 0: ", num));
Tensor* out = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({num}), &out));
auto flat = out->flat<T>();
flat(0) = start;
if (num > 1) {
const T step = (stop - start) / (num - 1);
for (Tnum i = 1; i < num - 1; ++i) flat(i) = start + step * i;
flat(num - 1) = stop;
}
}
};
#define REGISTER_KERNEL(DEV, T, Tidx) \
REGISTER_KERNEL_BUILDER(Name("LinSpace") \
.Device(DEV) \
.TypeConstraint<T>("T") \
.TypeConstraint<Tidx>("Tidx") \
.HostMemory("start") \
.HostMemory("stop") \
.HostMemory("num") \
.HostMemory("output"), \
LinSpaceOp<T, Tidx>);
#define REGISTER_KERNEL_ALL_NUMS(dev, T) \
REGISTER_KERNEL(dev, T, int32); \
REGISTER_KERNEL(dev, T, int64_t)
#define REGISTER_CPU_KERNEL(T) REGISTER_KERNEL_ALL_NUMS(DEVICE_CPU, T)
TF_CALL_float(REGISTER_CPU_KERNEL);
TF_CALL_double(REGISTER_CPU_KERNEL);
#define REGISTER_DEFAULT_KERNEL(T) REGISTER_KERNEL_ALL_NUMS(DEVICE_DEFAULT, T)
TF_CALL_float(REGISTER_DEFAULT_KERNEL);
TF_CALL_double(REGISTER_DEFAULT_KERNEL);
#undef REGISTER_DEFAULT_KERNEL
#undef REGISTER_CPU_KERNEL
#undef REGISTER_KERNEL_ALL_NUMS
#undef REGISTER_KERNEL
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class RangeOpTest : public OpsTestBase {
protected:
void MakeOp(DataType input_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "Range")
.Input(FakeInput(input_type))
.Input(FakeInput(input_type))
.Input(FakeInput(input_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
class LinSpaceOpTest : public OpsTestBase {
protected:
void MakeOp(DataType input_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "LinSpace")
.Input(FakeInput(input_type))
.Input(FakeInput(input_type))
.Input(FakeInput(index_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(RangeOpTest, Simple_D32) {
MakeOp(DT_INT32);
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<int32>(TensorShape({}), {10});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({5}));
test::FillValues<int32>(&expected, {0, 2, 4, 6, 8});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(RangeOpTest, Simple_Float) {
MakeOp(DT_FLOAT);
AddInputFromArray<float>(TensorShape({}), {0.5});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {0.3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
test::FillValues<float>(&expected, {0.5, 0.8, 1.1, 1.4, 1.7});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RangeOpTest, Large_Double) {
MakeOp(DT_DOUBLE);
AddInputFromArray<double>(TensorShape({}), {0.0});
AddInputFromArray<double>(TensorShape({}), {10000});
AddInputFromArray<double>(TensorShape({}), {0.5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({20000}));
std::vector<double> result;
for (int32_t i = 0; i < 20000; ++i) result.push_back(i * 0.5);
test::FillValues<double>(&expected, absl::Span<const double>(result));
test::ExpectTensorEqual<double>(expected, *GetOutput(0));
}
TEST_F(LinSpaceOpTest, Simple_D32) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<float>(TensorShape({}), {7.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {3.0, 5.0, 7.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(LinSpaceOpTest, Exact_Endpoints) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<int32>(TensorShape({}), {42});
TF_ASSERT_OK(RunOpKernel());
Tensor output = *GetOutput(0);
float expected_start = 0.0;
float start = output.flat<float>()(0);
EXPECT_EQ(expected_start, start) << expected_start << " vs. " << start;
float expected_stop = 1.0;
float stop = output.flat<float>()(output.NumElements() - 1);
EXPECT_EQ(expected_stop, stop) << expected_stop << " vs. " << stop;
}
TEST_F(LinSpaceOpTest, Single_D64) {
MakeOp(DT_FLOAT, DT_INT64);
AddInputFromArray<float>(TensorShape({}), {9.0});
AddInputFromArray<float>(TensorShape({}), {100.0});
AddInputFromArray<int64_t>(TensorShape({}), {1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1}));
test::FillValues<float>(&expected, {9.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(LinSpaceOpTest, Simple_Double) {
MakeOp(DT_DOUBLE, DT_INT32);
AddInputFromArray<double>(TensorShape({}), {5.0});
AddInputFromArray<double>(TensorShape({}), {6.0});
AddInputFromArray<int32>(TensorShape({}), {6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({6}));
test::FillValues<double>(&expected, {5.0, 5.2, 5.4, 5.6, 5.8, 6.0});
test::ExpectTensorEqual<double>(expected, *GetOutput(0));
}
}
} |
1,130 | cpp | tensorflow/tensorflow | bincount_op | tensorflow/core/kernels/bincount_op.cc | tensorflow/core/kernels/bincount_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BINCOUNT_OP_H_
#define TENSORFLOW_CORE_KERNELS_BINCOUNT_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename Tidx, typename T, bool binary_count>
struct BincountFunctor {
static Status Compute(OpKernelContext* context,
const typename TTypes<Tidx, 1>::ConstTensor& arr,
const typename TTypes<T, 1>::ConstTensor& weights,
typename TTypes<T, 1>::Tensor& output,
const Tidx num_bins);
};
template <typename Device, typename Tidx, typename T, bool binary_count>
struct BincountReduceFunctor {
static Status Compute(OpKernelContext* context,
const typename TTypes<Tidx, 2>::ConstTensor& in,
const typename TTypes<T, 2>::ConstTensor& weights,
typename TTypes<T, 2>::Tensor& out,
const Tidx num_bins);
};
}
}
#endif
#include <atomic>
#include "tensorflow/core/platform/errors.h"
#define EIGEN_USE_THREADS
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/bincount_op.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/kernels/sparse_utils.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/determinism.h"
namespace tensorflow {
using thread::ThreadPool;
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace functor {
template <typename Tidx, typename T>
struct BincountFunctor<CPUDevice, Tidx, T, true> {
static Status Compute(OpKernelContext* context,
const typename TTypes<Tidx, 1>::ConstTensor& arr,
const typename TTypes<T, 1>::ConstTensor& weights,
typename TTypes<T, 1>::Tensor& output,
const Tidx num_bins) {
Tensor all_nonneg_t;
TF_RETURN_IF_ERROR(context->allocate_temp(
DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes()));
all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) =
(arr >= Tidx(0)).all();
if (!all_nonneg_t.scalar<bool>()()) {
return errors::InvalidArgument("Input arr must be non-negative!");
}
ThreadPool* thread_pool =
context->device()->tensorflow_cpu_worker_threads()->workers;
const int64_t num_threads = thread_pool->NumThreads() + 1;
Tensor partial_bins_t;
TF_RETURN_IF_ERROR(context->allocate_temp(
DT_BOOL, TensorShape({num_threads, num_bins}), &partial_bins_t));
auto partial_bins = partial_bins_t.matrix<bool>();
partial_bins.setZero();
thread_pool->ParallelForWithWorkerId(
arr.size(), 8 ,
[&](int64_t start_ind, int64_t limit_ind, int64_t worker_id) {
for (int64_t i = start_ind; i < limit_ind; i++) {
Tidx value = arr(i);
if (value < num_bins) {
partial_bins(worker_id, value) = true;
}
}
});
Eigen::array<int, 1> reduce_dim({0});
output.device(context->eigen_cpu_device()) =
partial_bins.any(reduce_dim).cast<T>();
return OkStatus();
}
};
template <typename Tidx, typename T>
struct BincountFunctor<CPUDevice, Tidx, T, false> {
static Status Compute(OpKernelContext* context,
const typename TTypes<Tidx, 1>::ConstTensor& arr,
const typename TTypes<T, 1>::ConstTensor& weights,
typename TTypes<T, 1>::Tensor& output,
const Tidx num_bins) {
Tensor all_nonneg_t;
TF_RETURN_IF_ERROR(context->allocate_temp(
DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes()));
all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) =
(arr >= Tidx(0)).all();
if (!all_nonneg_t.scalar<bool>()()) {
return errors::InvalidArgument("Input arr must be non-negative!");
}
ThreadPool* thread_pool =
context->device()->tensorflow_cpu_worker_threads()->workers;
const int64_t num_threads = thread_pool->NumThreads() + 1;
const Tidx* arr_data = arr.data();
const std::ptrdiff_t arr_size = arr.size();
const T* weight_data = weights.data();
if (weights.size() && weights.size() != arr_size) {
return errors::InvalidArgument(
"Input indices and weights must have the same size.");
}
if (num_threads == 1) {
output.setZero();
T* output_data = output.data();
if (weights.size()) {
for (int64_t i = 0; i < arr_size; i++) {
const Tidx value = arr_data[i];
if (value < num_bins) {
output_data[value] += weight_data[i];
}
}
} else {
for (int64_t i = 0; i < arr_size; i++) {
const Tidx value = arr_data[i];
if (value < num_bins) {
output_data[value] += T(1);
}
}
}
} else {
Tensor partial_bins_t;
TF_RETURN_IF_ERROR(context->allocate_temp(
DataTypeToEnum<T>::value, TensorShape({num_threads, num_bins}),
&partial_bins_t));
auto partial_bins = partial_bins_t.matrix<T>();
partial_bins.setZero();
thread_pool->ParallelForWithWorkerId(
arr_size, 8 ,
[&](int64_t start_ind, int64_t limit_ind, int64_t worker_id) {
if (weights.size()) {
for (int64_t i = start_ind; i < limit_ind; i++) {
Tidx value = arr_data[i];
if (value < num_bins) {
partial_bins(worker_id, value) += weight_data[i];
}
}
} else {
for (int64_t i = start_ind; i < limit_ind; i++) {
Tidx value = arr_data[i];
if (value < num_bins) {
partial_bins(worker_id, value) += T(1);
}
}
}
});
Eigen::array<int, 1> reduce_dim({0});
output.device(context->eigen_cpu_device()) = partial_bins.sum(reduce_dim);
}
return OkStatus();
}
};
template <typename Tidx, typename T, bool binary_output>
struct BincountReduceFunctor<CPUDevice, Tidx, T, binary_output> {
static Status Compute(OpKernelContext* context,
const typename TTypes<Tidx, 2>::ConstTensor& in,
const typename TTypes<T, 2>::ConstTensor& weights,
typename TTypes<T, 2>::Tensor& out,
const Tidx num_bins) {
std::atomic<int> err_neg_val = 0;
const int num_rows = out.dimension(0);
const int num_cols = in.dimension(1);
ThreadPool* thread_pool =
context->device()->tensorflow_cpu_worker_threads()->workers;
thread_pool->ParallelForWithWorkerId(
num_rows, 8 ,
[&](int64_t start_row, int64_t end_row, int64_t worker_id) {
for (int64_t i = start_row; i < end_row; ++i) {
for (int64_t j = 0; j < num_cols; ++j) {
Tidx value = in(i, j);
if (value < 0) {
err_neg_val = value;
} else if (value < num_bins) {
if (binary_output) {
out(i, value) = T(1);
} else {
if (weights.size()) {
out(i, value) += weights(i, j);
} else {
out(i, value) += T(1);
}
}
}
}
}
});
if (err_neg_val < 0) {
return errors::InvalidArgument(absl::StrCat(
"Input 'in' must be non-negative! Negative input value found: ",
static_cast<int>(err_neg_val)));
}
return OkStatus();
}
};
}
template <typename Device, typename T>
class BincountOp : public OpKernel {
public:
explicit BincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& arr_t = ctx->input(0);
const Tensor& size_tensor = ctx->input(1);
OP_REQUIRES(ctx, size_tensor.dims() == 0,
errors::InvalidArgument("Shape must be rank 0 but is rank ",
size_tensor.dims()));
int32_t size = size_tensor.scalar<int32_t>()();
OP_REQUIRES(
ctx, size >= 0,
errors::InvalidArgument("size (", size, ") must be non-negative"));
const Tensor& weights_t = ctx->input(2);
const auto arr = arr_t.flat<int32_t>();
const auto weights = weights_t.flat<T>();
Tensor* output_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({size}), &output_t));
auto output = output_t->flat<T>();
OP_REQUIRES_OK(ctx,
functor::BincountFunctor<Device, int32_t, T, false>::Compute(
ctx, arr, weights, output, size));
}
};
#define REGISTER_KERNELS(type) \
REGISTER_KERNEL_BUILDER( \
Name("Bincount").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
BincountOp<CPUDevice, type>)
TF_CALL_NUMBER_TYPES(REGISTER_KERNELS);
#undef REGISTER_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_KERNELS(type) \
REGISTER_KERNEL_BUILDER(Name("Bincount") \
.Device(DEVICE_GPU) \
.HostMemory("size") \
.TypeConstraint<type>("T"), \
BincountOp<GPUDevice, type>)
TF_CALL_int32(REGISTER_KERNELS);
TF_CALL_float(REGISTER_KERNELS);
#undef REGISTER_KERNELS
#endif
template <typename Device, typename Tidx, typename T>
class DenseBincountOp : public OpKernel {
public:
explicit DenseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
ctx, !OpDeterminismRequired(),
errors::Unimplemented(
"Determinism is not yet supported in GPU implementation of "
"DenseBincount."));
}
}
void Compute(OpKernelContext* ctx) override {
const Tensor& data = ctx->input(0);
OP_REQUIRES(ctx, data.dims() <= 2,
errors::InvalidArgument(
"Shape must be at most rank 2 but is rank ", data.dims()));
const Tensor& size_t = ctx->input(1);
const Tensor& weights = ctx->input(2);
OP_REQUIRES(ctx, size_t.dims() == 0,
errors::InvalidArgument("Shape must be rank 0 but is rank ",
size_t.dims()));
OP_REQUIRES(ctx,
weights.shape() == data.shape() || weights.NumElements() == 0,
errors::InvalidArgument(
"`weights` must be the same shape as `arr` or a length-0 "
"`Tensor`, in which case it acts as all weights equal to "
"1. Received ",
weights.shape().DebugString()));
Tidx size = size_t.scalar<Tidx>()();
OP_REQUIRES(
ctx, size >= 0,
errors::InvalidArgument("size (", size, ") must be non-negative"));
Tensor* out_t;
functor::SetZeroFunctor<Device, T> fill;
if (data.dims() <= 1) {
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t));
auto out = out_t->flat<T>();
fill(ctx->eigen_device<Device>(), out);
if (binary_output_) {
OP_REQUIRES_OK(
ctx, functor::BincountFunctor<Device, Tidx, T, true>::Compute(
ctx, data.flat<Tidx>(), weights.flat<T>(), out, size));
} else {
OP_REQUIRES_OK(
ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute(
ctx, data.flat<Tidx>(), weights.flat<T>(), out, size));
}
} else if (data.dims() == 2) {
const int64_t num_rows = data.dim_size(0);
auto weight_matrix =
(weights.NumElements() == 0)
? weights.shaped<T, 2>(gtl::InlinedVector<int64_t, 2>(2, 0))
: weights.matrix<T>();
OP_REQUIRES_OK(
ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t));
auto out = out_t->matrix<T>();
fill(ctx->eigen_device<Device>(), out_t->flat<T>());
if (binary_output_) {
OP_REQUIRES_OK(
ctx, functor::BincountReduceFunctor<Device, Tidx, T, true>::Compute(
ctx, data.matrix<Tidx>(), weight_matrix, out, size));
} else {
OP_REQUIRES_OK(
ctx,
functor::BincountReduceFunctor<Device, Tidx, T, false>::Compute(
ctx, data.matrix<Tidx>(), weight_matrix, out, size));
}
}
}
private:
bool binary_output_;
};
#define REGISTER_KERNELS(Tidx, T) \
REGISTER_KERNEL_BUILDER(Name("DenseBincount") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<Tidx>("Tidx"), \
DenseBincountOp<CPUDevice, Tidx, T>);
#define REGISTER_CPU_KERNELS(T) \
REGISTER_KERNELS(int32, T); \
REGISTER_KERNELS(int64_t, T);
TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#undef REGISTER_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_KERNELS(Tidx, T) \
REGISTER_KERNEL_BUILDER(Name("DenseBincount") \
.Device(DEVICE_GPU) \
.HostMemory("size") \
.TypeConstraint<T>("T") \
.TypeConstraint<Tidx>("Tidx"), \
DenseBincountOp<GPUDevice, Tidx, T>);
#define REGISTER_GPU_KERNELS(T) \
REGISTER_KERNELS(int32, T); \
REGISTER_KERNELS(int64_t, T);
TF_CALL_int32(REGISTER_GPU_KERNELS);
TF_CALL_float(REGISTER_GPU_KERNELS);
#undef REGISTER_GPU_KERNELS
#undef REGISTER_KERNELS
#endif
template <typename Device, typename Tidx, typename T>
class SparseBincountOp : public OpKernel {
public:
explicit SparseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& indices = ctx->input(0);
const Tensor& values = ctx->input(1);
const auto values_flat = values.flat<Tidx>();
const Tensor& dense_shape = ctx->input(2);
const Tensor& size_t = ctx->input(3);
const auto weights = ctx->input(4).flat<T>();
const int64_t weights_size = weights.size();
OP_REQUIRES(ctx, size_t.dims() == 0,
errors::InvalidArgument("Shape must be rank 0 but is rank ",
size_t.dims()));
Tidx size = size_t.scalar<Tidx>()();
OP_REQUIRES(
ctx, size >= 0,
errors::InvalidArgument("size (", size, ") must be non-negative"));
OP_REQUIRES_OK(ctx, sparse_utils::ValidateSparseTensor<int64_t>(
indices, values, dense_shape,
sparse_utils::IndexValidation::kUnordered));
bool is_1d = dense_shape.NumElements() == 1;
Tensor* out_t;
functor::SetZeroFunctor<Device, T> fill;
if (is_1d) {
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t));
auto out = out_t->flat<T>();
fill(ctx->eigen_device<Device>(), out);
if (binary_output_) {
OP_REQUIRES_OK(ctx,
functor::BincountFunctor<Device, Tidx, T, true>::Compute(
ctx, values_flat, weights, out, size));
} else {
OP_REQUIRES_OK(
ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute(
ctx, values_flat, weights, out, size));
}
} else {
const auto shape = dense_shape.flat<int64_t>();
const int64_t num_rows = shape(0);
OP_REQUIRES_OK(
ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t));
const auto out = out_t->matrix<T>();
fill(ctx->eigen_device<Device>(), out_t->flat<T>());
const auto indices_mat = indices.matrix<int64_t>();
for (int64_t i = 0; i < indices_mat.dimension(0); ++i) {
const int64_t batch = indices_mat(i, 0);
const Tidx bin = values_flat(i);
OP_REQUIRES(
ctx, batch < out.dimension(0),
errors::InvalidArgument("Index out of bound. `batch` (", batch,
") must be less than the dimension size (",
out.dimension(0), ")."));
if (bin < size) {
if (binary_output_) {
out(batch, bin) = T(1);
} else {
if (weights_size) {
out(batch, bin) += weights(i);
} else {
out(batch, bin) += T(1);
}
}
}
}
}
}
private:
bool binary_output_;
};
#define REGISTER_KERNELS(Tidx, T) \
REGISTER_KERNEL_BUILDER(Name("SparseBincount") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<Tidx>("Tidx"), \
SparseBincountOp<CPUDevice, Tidx, T>);
#define REGISTER_CPU_KERNELS(T) \
REGISTER_KERNELS(int32, T); \
REGISTER_KERNELS(int64_t, T);
TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#undef REGISTER_KERNELS
template <typename Device, typename Tidx, typename T>
class RaggedBincountOp : public OpKernel {
public:
explicit RaggedBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_));
}
void Compute(OpKernelContext* ctx) override {
const auto splits = ctx->input(0).flat<int64_t>();
const auto values = ctx->input(1).flat<Tidx>();
const Tensor& size_t = ctx->input(2);
const auto weights = ctx->input(3).flat<T>();
const int64_t weights_size = weights.size();
OP_REQUIRES(ctx, size_t.dims() == 0,
errors::InvalidArgument("Shape must be rank 0 but is rank ",
size_t.dims()));
Tidx size = size_t.scalar<Tidx>()();
OP_REQUIRES(
ctx, size >= 0,
errors::InvalidArgument("size (", size, ") must be non-negative"));
int num_rows = splits.size() - 1;
int num_values = values.size();
int batch_idx = 0;
OP_REQUIRES(ctx, splits.size() > 0,
errors::InvalidArgument("Splits must be non-empty"));
OP_REQUIRES(ctx, splits(0) == 0,
errors::InvalidArgument("Splits must start with 0, not with ",
splits(0)));
OP_REQUIRES(ctx, splits(num_rows) == num_values,
errors::InvalidArgument(
"Splits must end with the number of values, got ",
splits(num_rows), " instead of ", num_values));
Tensor* out_t;
OP_REQUIRES_OK(
ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t));
functor::SetZeroFunctor<Device, T> fill;
fill(ctx->eigen_device<Device>(), out_t->flat<T>());
const auto out = out_t->matrix<T>();
for (int idx = 0; idx < num_values; ++idx) {
while (idx >= splits(batch_idx)) {
batch_idx++;
}
Tidx bin = values(idx);
OP_REQUIRES(ctx, bin >= 0,
errors::InvalidArgument("Input must be non-negative"));
if (bin < size) {
if (binary_output_) {
out(batch_idx - 1, bin) = T(1);
} else {
T value = (weights_size > 0) ? weights(idx) : T(1);
out(batch_idx - 1, bin) += value;
}
}
}
}
private:
bool binary_output_;
};
#define REGISTER_KERNELS(Tidx, T) \
REGISTER_KERNEL_BUILDER(Name("RaggedBincount") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<Tidx>("Tidx"), \
RaggedBincountOp<CPUDevice, Tidx, T>);
#define REGISTER_CPU_KERNELS(T) \
REGISTER_KERNELS(int32, T); \
REGISTER_KERNELS(int64_t, T);
TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#undef REGISTER_KERNELS
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* Bincount(int arr_size, int nbins) {
Graph* g = new Graph(OpRegistry::Global());
Tensor arr(DT_INT32, TensorShape({arr_size}));
arr.flat<int32>() = arr.flat<int32>().setRandom().abs();
Tensor size(DT_INT32, TensorShape({static_cast<int32>(1)}));
size.flat<int32>()(0) = static_cast<int32>(nbins);
Tensor weights(DT_INT32, TensorShape({0}));
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Bincount")
.Input(test::graph::Constant(g, arr))
.Input(test::graph::Constant(g, size))
.Input(test::graph::Constant(g, weights))
.Attr("T", DT_INT32)
.Finalize(g, &node));
return g;
}
#define BM_BincountDev(K, NBINS, type) \
static void BM_Bincount##_##type##_##K##_##NBINS( \
::testing::benchmark::State& state) { \
test::Benchmark(#type, Bincount(K * 1024, NBINS), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * K * \
1024); \
} \
BENCHMARK(BM_Bincount##_##type##_##K##_##NBINS);
BM_BincountDev(32, 1000, cpu);
BM_BincountDev(32, 2000, cpu);
BM_BincountDev(32, 5000, cpu);
BM_BincountDev(64, 1000, cpu);
BM_BincountDev(64, 2000, cpu);
BM_BincountDev(64, 5000, cpu);
BM_BincountDev(128, 1000, cpu);
BM_BincountDev(128, 2000, cpu);
BM_BincountDev(128, 5000, cpu);
BM_BincountDev(32, 1000, gpu);
BM_BincountDev(32, 2000, gpu);
BM_BincountDev(32, 5000, gpu);
BM_BincountDev(64, 1000, gpu);
BM_BincountDev(64, 2000, gpu);
BM_BincountDev(64, 5000, gpu);
BM_BincountDev(128, 1000, gpu);
BM_BincountDev(128, 2000, gpu);
BM_BincountDev(128, 5000, gpu);
} |
1,131 | cpp | tensorflow/tensorflow | segment_reduction_ops | tensorflow/compiler/tf2xla/kernels/segment_reduction_ops.cc | tensorflow/core/kernels/segment_reduction_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_H_
#define TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
class OpKernelContext;
bool UseDeterministicSegmentReductions();
bool DisableSegmentReductionOpDeterminismExceptions();
enum class SparseSegmentReductionOperation { kSum, kMean, kSqrtN };
namespace functor {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
struct Sum {
template <typename T>
__host__ __device__ T operator()(const T& a, const T& b) const {
return a + b;
}
};
struct Prod {
template <typename T>
__host__ __device__ T operator()(const T& a, const T& b) const {
return a * b;
}
};
struct Min {
template <typename T>
__host__ __device__ T operator()(const T& a, const T& b) const {
return min(a, b);
}
};
struct Max {
template <typename T>
__host__ __device__ T operator()(const T& a, const T& b) const {
return max(a, b);
}
};
template <typename ReduceOp, typename T>
struct ReduceOpIsAssociative {};
template <typename T>
struct ReduceOpIsAssociative<functor::Sum, T> : std::is_integral<T> {};
template <typename T>
struct ReduceOpIsAssociative<functor::Prod, T> : std::is_integral<T> {};
template <typename T>
struct ReduceOpIsAssociative<functor::Max, T> : std::true_type {};
template <typename T>
struct ReduceOpIsAssociative<functor::Min, T> : std::true_type {};
typedef Eigen::GpuDevice GPUDevice;
template <typename T, typename Index, typename InitialValueF,
typename EmptySegmentValueF, typename ReductionF>
struct SegmentReductionFunctor {
void operator()(OpKernelContext* ctx, const GPUDevice& d,
const Index output_rows, const TensorShape& segment_ids_shape,
bool is_mean, typename TTypes<Index>::ConstFlat segment_ids,
const Index data_size, const T* data,
typename TTypes<T, 2>::Tensor output);
static constexpr bool atomic_reduction_is_associative =
ReduceOpIsAssociative<ReductionF, T>::value;
};
#endif
template <typename Device, typename T, typename Index, typename InitialValueF,
typename ReductionF>
struct UnsortedSegmentFunctor {
void operator()(OpKernelContext* ctx, const TensorShape& segment_ids_shape,
typename TTypes<Index>::ConstFlat segment_ids,
typename TTypes<T, 2>::ConstTensor data,
typename TTypes<T, 2>::Tensor output);
};
template <typename T>
struct Zero {
EIGEN_STRONG_INLINE T operator()() const { return T(0); }
};
template <typename T>
struct One {
EIGEN_STRONG_INLINE T operator()() const { return T(1); }
};
template <typename T>
struct Lowest {
EIGEN_STRONG_INLINE T operator()() const {
return Eigen::NumTraits<T>::lowest();
}
};
template <typename T>
struct Highest {
EIGEN_STRONG_INLINE T operator()() const {
return Eigen::NumTraits<T>::highest();
}
};
template <typename T, typename Index, typename SegmentId>
struct SparseSegmentReductionFunctor {
Status operator()(OpKernelContext* context, bool is_mean, bool is_sqrtn,
T default_value, typename TTypes<T, 2>::ConstTensor input,
typename TTypes<Index>::ConstVec indices,
typename TTypes<SegmentId>::ConstVec segment_ids,
typename TTypes<T, 2>::Tensor output);
};
template <class Device, typename T, typename Index, typename SegmentId>
struct SparseSegmentGradFunctor {
void operator()(OpKernelContext* context,
SparseSegmentReductionOperation operation,
typename TTypes<T>::ConstMatrix input_flat,
typename TTypes<Index>::ConstVec indices_vec,
typename TTypes<SegmentId>::ConstVec segment_vec,
Tensor* output);
};
template <class Device, typename T, typename Index, typename SegmentId>
struct SparseSegmentGradV2Functor {
void operator()(OpKernelContext* context,
SparseSegmentReductionOperation operation,
typename TTypes<T>::ConstMatrix input_flat,
typename TTypes<Index>::ConstVec indices_vec,
typename TTypes<SegmentId>::ConstVec segment_vec,
const TensorShape& dense_output_shape,
typename AsyncOpKernel::DoneCallback done);
};
}
}
#endif
#include <vector>
#include "tensorflow/compiler/tf2xla/lib/scatter.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/lib/constants.h"
#include "xla/client/value_inference.h"
#include "xla/client/xla_builder.h"
namespace tensorflow {
namespace {
class SegmentReduce : public XlaOpKernel {
public:
explicit SegmentReduce(OpKernelConstruction* ctx, bool indices_are_sorted)
: XlaOpKernel(ctx), indices_are_sorted_(indices_are_sorted) {
DataType dtype;
OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype));
OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(dtype, &type_));
}
virtual xla::XlaOp InitialValue(xla::XlaBuilder* builder) = 0;
virtual xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) = 0;
void Compile(XlaOpKernelContext* ctx) override {
auto data = ctx->Input(0);
TensorShape data_shape = ctx->InputShape(0);
auto indices = ctx->Input(1);
TensorShape indices_shape = ctx->InputShape(1);
int64_t num_segments;
OP_REQUIRES_OK(ctx,
ctx->ConstantInputAsIntScalar(
2, &num_segments, xla::ValueInferenceMode::kUpperBound));
OP_REQUIRES(ctx, data_shape.dims() >= indices_shape.dims(),
errors::InvalidArgument(type_string(),
" requires that indices' rank be"
" less than or equal to data's rank."));
for (int d = 0; d < indices_shape.dims(); ++d) {
OP_REQUIRES(
ctx, (data_shape.dim_size(d) == indices_shape.dim_size(d)),
errors::InvalidArgument(type_string(),
" requires indices shape to be prefix"
" of data_shape, but dimension ",
d, " differs ", data_shape.dim_size(d),
" vs. ", indices_shape.dim_size(d)));
}
xla::XlaBuilder* builder = ctx->builder();
TensorShape buffer_shape = data_shape;
buffer_shape.RemoveDimRange(0, indices_shape.dims());
buffer_shape.InsertDim(0, num_segments);
auto buffer =
xla::Broadcast(InitialValue(builder), buffer_shape.dim_sizes());
std::vector<xla::XlaOp> buffer_dims;
std::vector<bool> buffer_dims_are_dynamic;
bool num_segments_is_dynamic;
OP_REQUIRES_OK(
ctx, ctx->ResolveInputDynamismIntoPred(2, &num_segments_is_dynamic));
buffer_dims.insert(buffer_dims.begin(), ctx->Input(2));
buffer_dims_are_dynamic.insert(buffer_dims_are_dynamic.begin(),
num_segments_is_dynamic);
for (int64_t i = indices_shape.dims(); i < data_shape.dims(); ++i) {
buffer_dims.push_back(xla::GetDimensionSize(data, i));
buffer_dims_are_dynamic.push_back(
ctx->InputXlaShape(0)->is_dynamic_dimension(i));
}
for (int64_t i = 0; i < buffer_dims.size(); ++i) {
if (buffer_dims_are_dynamic[i]) {
buffer = xla::SetDimensionSize(buffer, buffer_dims[i], i);
}
}
auto combiner = [this](xla::XlaOp a, xla::XlaOp b,
xla::XlaBuilder* builder) { return Combine(a, b); };
auto result = XlaScatter(buffer, data, indices,
false, indices_are_sorted_,
combiner, builder);
OP_REQUIRES_OK(ctx, result.status());
ctx->SetOutput(0, result.value());
}
protected:
xla::PrimitiveType type_;
bool indices_are_sorted_;
};
template <bool indices_are_sorted>
class SegmentSum : public SegmentReduce {
public:
explicit SegmentSum(OpKernelConstruction* ctx)
: SegmentReduce(ctx, indices_are_sorted) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::Zero(builder, type_);
};
xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override { return a + b; };
};
REGISTER_XLA_OP(Name("SegmentSumV2").CompileTimeConstantInput("num_segments"),
SegmentSum<true>);
REGISTER_XLA_OP(
Name("UnsortedSegmentSum").CompileTimeConstantInput("num_segments"),
SegmentSum<false>);
template <bool indices_are_sorted>
class SegmentProd : public SegmentReduce {
public:
explicit SegmentProd(OpKernelConstruction* ctx)
: SegmentReduce(ctx, indices_are_sorted) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::One(builder, type_);
};
xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override { return a * b; };
};
REGISTER_XLA_OP(
Name("UnsortedSegmentProd").CompileTimeConstantInput("num_segments"),
SegmentProd<false>);
REGISTER_XLA_OP(Name("SegmentProdV2").CompileTimeConstantInput("num_segments"),
SegmentProd<true>);
template <bool indices_are_sorted>
class SegmentMin : public SegmentReduce {
public:
explicit SegmentMin(OpKernelConstruction* ctx)
: SegmentReduce(ctx, indices_are_sorted) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MaxFiniteValue(builder, type_);
};
xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override {
return xla::Min(a, b);
};
};
REGISTER_XLA_OP(
Name("UnsortedSegmentMin").CompileTimeConstantInput("num_segments"),
SegmentMin<false>);
REGISTER_XLA_OP(Name("SegmentMinV2").CompileTimeConstantInput("num_segments"),
SegmentMin<true>);
template <bool indices_are_sorted>
class SegmentMax : public SegmentReduce {
public:
explicit SegmentMax(OpKernelConstruction* ctx)
: SegmentReduce(ctx, indices_are_sorted) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MinFiniteValue(builder, type_);
};
xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override {
return xla::Max(a, b);
};
};
REGISTER_XLA_OP(
Name("UnsortedSegmentMax").CompileTimeConstantInput("num_segments"),
SegmentMax<false>);
REGISTER_XLA_OP(Name("SegmentMaxV2").CompileTimeConstantInput("num_segments"),
SegmentMax<true>);
}
} | #include <functional>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
static void BM_UnsortedSegmentReduction(::testing::benchmark::State& state,
const string& reduction, int num_rows,
int num_cols, int segment_size) {
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"));
absl::InlinedVector<TensorValue, 4> reduction_inputs;
TensorShape shape1({num_rows, num_cols});
Tensor input(DT_FLOAT, shape1);
reduction_inputs.push_back({nullptr, &input});
TensorShape shape2({num_rows});
Tensor indices(DT_INT32, shape2);
test::FillFn<int>(&indices,
[&segment_size](int i) -> int { return i % segment_size; });
reduction_inputs.push_back({nullptr, &indices});
Tensor num_segments(DT_INT32, TensorShape({}));
num_segments.scalar<int>()() = segment_size;
reduction_inputs.push_back({nullptr, &num_segments});
NodeDef reduction_node_def;
TF_CHECK_OK(NodeDefBuilder(reduction, reduction)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Finalize(&reduction_node_def));
Status status;
std::unique_ptr<OpKernel> reduction_op(
CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(),
reduction_node_def, TF_GRAPH_DEF_VERSION, &status));
OpKernelContext::Params params;
params.device = device.get();
params.frame_iter = FrameAndIter(0, 0);
params.inputs = reduction_inputs;
params.op_kernel = reduction_op.get();
std::vector<AllocatorAttributes> attrs;
test::SetOutputAttrs(¶ms, &attrs);
std::unique_ptr<OpKernelContext> reduction_context(
new OpKernelContext(¶ms));
reduction_op->Compute(reduction_context.get());
TF_CHECK_OK(reduction_context->status());
for (auto s : state) {
delete reduction_context->release_output(0).tensor;
reduction_op->Compute(reduction_context.get());
}
int64_t bytes_per_iter =
static_cast<int64_t>(num_rows * num_cols * sizeof(float));
state.SetBytesProcessed(bytes_per_iter * state.iterations());
}
#define BM_UnsortedReduce(O, R, C, S) \
static void BM_##O##_##R##_##C##_##S(::testing::benchmark::State& state) { \
BM_UnsortedSegmentReduction(state, #O, R, C, S); \
} \
BENCHMARK(BM_##O##_##R##_##C##_##S);
#define BM_UnsortedReduce_Arg(R, C, S) \
BM_UnsortedReduce(UnsortedSegmentSum, R, C, S);
BM_UnsortedReduce_Arg(4096, 1024, 1);
BM_UnsortedReduce_Arg(4096, 1024, 128);
template <typename Index>
static void BM_SegmentReduction(::testing::benchmark::State& state,
const string& reduction, Index num_rows,
Index num_cols, Index segment_size) {
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"));
absl::InlinedVector<TensorValue, 4> reduction_inputs;
TensorShape shape1({num_rows, num_cols});
Tensor input1(DT_FLOAT, shape1);
reduction_inputs.push_back({nullptr, &input1});
TensorShape shape2({num_rows});
Tensor input2(DataTypeToEnum<Index>::v(), shape2);
test::FillFn<Index>(&input2, [&num_rows, &segment_size](Index i) -> Index {
return std::min(i / segment_size, num_rows - 1);
});
reduction_inputs.push_back({nullptr, &input2});
NodeDef reduction_node_def;
TF_CHECK_OK(NodeDefBuilder(reduction, reduction)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DataTypeToEnum<Index>::v()))
.Finalize(&reduction_node_def));
Status status;
std::unique_ptr<OpKernel> reduction_op(
CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(),
reduction_node_def, TF_GRAPH_DEF_VERSION, &status));
OpKernelContext::Params params;
params.device = device.get();
params.frame_iter = FrameAndIter(0, 0);
params.inputs = reduction_inputs;
params.op_kernel = reduction_op.get();
std::vector<AllocatorAttributes> attrs;
test::SetOutputAttrs(¶ms, &attrs);
std::unique_ptr<OpKernelContext> reduction_context(
new OpKernelContext(¶ms));
reduction_op->Compute(reduction_context.get());
TF_CHECK_OK(reduction_context->status());
for (auto s : state) {
delete reduction_context->release_output(0).tensor;
reduction_op->Compute(reduction_context.get());
}
int64_t bytes_per_iter =
static_cast<int64_t>(num_rows * num_cols * sizeof(float));
state.SetBytesProcessed(bytes_per_iter * state.iterations());
}
#define BM_Reduce(O, R, C, S) \
static void BM_Reduce_##O##_##R##_##C##_##S##_int32( \
::testing::benchmark::State & state) { \
BM_SegmentReduction<int32>(state, #O, R, C, S); \
} \
static void BM_Reduce_##O##_##R##_##C##_##S##_int64( \
::testing::benchmark::State & state) { \
BM_SegmentReduction<int64_t>(state, #O, R, C, S); \
} \
BENCHMARK(BM_Reduce_##O##_##R##_##C##_##S##_int32); \
BENCHMARK(BM_Reduce_##O##_##R##_##C##_##S##_int64);
#define BM_Reduce_Arg(R, C, S) \
BM_Reduce(SegmentSum, R, C, S); \
BM_Reduce(SegmentMean, R, C, S);
BM_Reduce_Arg(64, 32, 1);
BM_Reduce_Arg(4096, 128, 1);
BM_Reduce_Arg(16, 8, 2);
BM_Reduce_Arg(64, 32, 2);
BM_Reduce_Arg(4096, 32, 2);
BM_Reduce_Arg(4096, 128, 2);
template <DataType T>
static void SparseSegmentMeanGradHelper(::testing::benchmark::State& state,
float uniqueness, int size) {
typedef typename EnumToDataType<T>::Type DT;
Graph* g = new Graph(OpRegistry::Global());
CHECK_LE(uniqueness, 1.0);
CHECK_GT(uniqueness, 0.0);
const int kNumIndices = size;
Tensor indices(DT_INT32, TensorShape({kNumIndices}));
auto indices_flat = indices.flat<int32>();
Tensor segments(DT_INT32, TensorShape({kNumIndices}));
auto segments_flat = segments.flat<int32>();
int kUniqueIndices = uniqueness * kNumIndices;
Tensor output_dim0(DT_INT32, TensorShape({}));
output_dim0.scalar<int32>()() = kUniqueIndices;
for (int i = 0; i < kNumIndices; ++i) {
indices_flat(i) = (i * 31) % kUniqueIndices;
segments_flat(i) = i * .8;
}
const int kDim1 = segments_flat(kNumIndices - 1) + 1;
const int kDim2 = 128;
Tensor input(T, TensorShape({kDim1, kDim2}));
input.flat<DT>().setRandom();
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "SparseSegmentMeanGrad")
.Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, indices))
.Input(test::graph::Constant(g, segments))
.Input(test::graph::Constant(g, output_dim0))
.Attr("T", T)
.Finalize(g, &node));
test::Benchmark("cpu", g, false).Run(state);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
(kDim1 * kDim2) * sizeof(float));
}
static void BM_SparseSegmentMeanGrad_Low_FP32(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_FLOAT>(state, 1.0, size);
}
static void BM_SparseSegmentMeanGrad_High_FP32(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_FLOAT>(state, 0.01, size);
}
static void BM_SparseSegmentMeanGrad_Low_BF16(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_BFLOAT16>(state, 1.0, size);
}
static void BM_SparseSegmentMeanGrad_High_BF16(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_BFLOAT16>(state, 0.01, size);
}
static void BM_SparseSegmentMeanGrad_Low_FP16(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_HALF>(state, 1.0, size);
}
static void BM_SparseSegmentMeanGrad_High_FP16(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_HALF>(state, 0.01, size);
}
BENCHMARK(BM_SparseSegmentMeanGrad_Low_FP32)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_High_FP32)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_Low_BF16)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_High_BF16)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_Low_FP16)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_High_FP16)
->UseRealTime()
->Arg(1000)
->Arg(100000);
} |
1,132 | cpp | tensorflow/tensorflow | sendrecv_ops | tensorflow/core/kernels/sendrecv_ops.cc | tensorflow/core/kernels/sendrecv_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SENDRECV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_SENDRECV_OPS_H_
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
class SendOp : public OpKernel {
public:
explicit SendOp(OpKernelConstruction* ctx);
void Compute(OpKernelContext* ctx) override;
string TraceString(const OpKernelContext& ctx, bool verbose) const override;
private:
string key_prefix_;
Rendezvous::ParsedKey parsed_key_;
bool hostmem_sendrecv_;
SendOp(const SendOp&) = delete;
void operator=(const SendOp&) = delete;
};
class RecvOp : public AsyncOpKernel {
public:
explicit RecvOp(OpKernelConstruction* ctx);
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
string TraceString(const OpKernelContext& ctx, bool verbose) const override;
private:
string key_prefix_;
Rendezvous::ParsedKey parsed_key_;
bool hostmem_sendrecv_;
RecvOp(const RecvOp&) = delete;
void operator=(const RecvOp&) = delete;
};
}
#endif
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
REGISTER_OP("_Send")
.Input("tensor: T")
.Attr("T: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
Sends the named tensor from send_device to recv_device.
tensor: The tensor to send.
tensor_name: The name of the tensor to send.
send_device: The name of the device sending the tensor.
send_device_incarnation: The current incarnation of send_device.
recv_device: The name of the device receiving the tensor.
client_terminated: If set to true, this indicates that the node was added
to the graph as a result of a client-side feed or fetch of Tensor data,
in which case the corresponding send or recv is expected to be managed
locally by the caller.
)doc");
REGISTER_OP("Send")
.Input("tensor: T")
.Attr("T: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("_Recv")
.Output("tensor: tensor_type")
.Attr("tensor_type: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
Receives the named tensor from send_device on recv_device.
tensor: The tensor to receive.
tensor_name: The name of the tensor to receive.
send_device: The name of the device sending the tensor.
send_device_incarnation: The current incarnation of send_device.
recv_device: The name of the device receiving the tensor.
client_terminated: If set to true, this indicates that the node was added
to the graph as a result of a client-side feed or fetch of Tensor data,
in which case the corresponding send or recv is expected to be managed
locally by the caller.
)doc");
REGISTER_OP("Recv")
.Output("tensor: tensor_type")
.Attr("tensor_type: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("_HostSend")
.Input("tensor: T")
.Attr("T: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
Sends the named tensor from send_device to recv_device.
_HostSend requires its input on host memory whereas _Send requires its
input on device memory.
tensor: The tensor to send.
tensor_name: The name of the tensor to send.
send_device: The name of the device sending the tensor.
send_device_incarnation: The current incarnation of send_device.
recv_device: The name of the device receiving the tensor.
client_terminated: If set to true, this indicates that the node was added
to the graph as a result of a client-side feed or fetch of Tensor data,
in which case the corresponding send or recv is expected to be managed
locally by the caller.
)doc");
REGISTER_OP("_HostRecv")
.Output("tensor: tensor_type")
.Attr("tensor_type: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
Receives the named tensor from send_device on recv_device.
_HostRecv produces its output on host memory whereas _Recv produces its
output on device memory.
tensor: The tensor to receive.
tensor_name: The name of the tensor to receive.
send_device: The name of the device sending the tensor.
send_device_incarnation: The current incarnation of send_device.
recv_device: The name of the device receiving the tensor.
client_terminated: If set to true, this indicates that the node was added
to the graph as a result of a client-side feed or fetch of Tensor data,
in which case the corresponding send or recv is expected to be managed
locally by the caller.
)doc");
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class DummyRendezvous : public Rendezvous {
Status Send(const ParsedKey& key, const Args& args, const Tensor& val,
const bool is_dead) override {
return absl::OkStatus();
}
void RecvAsync(const ParsedKey& key, const Args& args,
DoneCallback done) override {
static Tensor* t = new Tensor(DT_FLOAT, TensorShape({0}));
done(absl::OkStatus(), args, args, *t, false);
}
void StartAbort(const Status& status) override {}
};
static Graph* Send() {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(DT_FLOAT, TensorShape({0}));
test::graph::Send(g, test::graph::Constant(g, in0), "T", "/cpu:0", 1,
"/cpu:0");
test::graph::Recv(g, "T", "float", "/cpu:0", 1, "/cpu:0");
return g;
}
static Graph* Recv() {
Graph* g = new Graph(OpRegistry::Global());
test::graph::Recv(g, "T", "float", "/cpu:0", 1, "/cpu:0");
return g;
}
void BM_Send(::testing::benchmark::State& state) {
test::Benchmark("cpu", Send(), nullptr, nullptr, new DummyRendezvous, "",
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_Send)->UseRealTime();
void BM_Recv(::testing::benchmark::State& state) {
test::Benchmark("cpu", Recv(), nullptr, nullptr, new DummyRendezvous, "",
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_Recv)->UseRealTime();
}
} |
1,133 | cpp | tensorflow/tensorflow | const_op | tensorflow/cc/ops/const_op.cc | tensorflow/cc/ops/const_op_test.cc | #ifndef TENSORFLOW_CC_OPS_CONST_OP_H_
#define TENSORFLOW_CC_OPS_CONST_OP_H_
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/graph/node_builder.h"
namespace tensorflow {
namespace ops {
Output Const(const Scope& scope, const Input::Initializer& val);
Output ConstFromProto(const Scope& scope, const TensorProto& proto);
NodeBuilder::NodeOut AsNodeOut(const Scope& scope, const Input& inp);
template <typename T>
Output Const(const Scope& scope, const Input::Initializer& val) {
auto orig_const_output = Const(scope, val);
if (!scope.ok()) return Output();
typedef typename Input::Initializer::RealType<T>::type DstT;
if (val.tensor.dtype() == DataTypeToEnum<DstT>::v()) {
return orig_const_output;
}
if (val.tensor.NumElements() == 0) {
Tensor t(DataTypeToEnum<DstT>::v(), val.tensor.shape());
return Const(scope, Input::Initializer(t));
}
auto orig_const = AsNodeOut(scope, orig_const_output);
const auto cast_op_name = scope.GetUniqueNameForOp("Cast");
auto cast_builder = NodeBuilder(cast_op_name, "Cast")
.Input(orig_const)
.Attr("DstT", DataTypeToEnum<DstT>::v());
scope.UpdateBuilder(&cast_builder);
Node* ret;
scope.UpdateStatus(cast_builder.Finalize(scope.graph(), &ret));
if (!scope.ok()) return Output();
scope.UpdateStatus(scope.DoShapeInference(ret));
return Output(ret, 0);
}
template <typename T>
Output Const(const Scope& scope, const T& v, const TensorShape shape) {
return Const(scope, Input::Initializer(v, shape));
}
template <typename T>
Output Const(const Scope& scope, const std::initializer_list<T>& v,
const TensorShape shape) {
return Const(scope, Input::Initializer(v, shape));
}
std::vector<NodeBuilder::NodeOut> AsNodeOutList(const Scope& scope,
const InputList& inp);
}
}
#endif
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
namespace ops {
namespace {
template <typename T>
Output ConstHelper(const Scope& scope, const T& value, DataType dtype) {
if (!scope.ok()) return Output();
Node* ret;
Graph* graph = scope.graph();
const string unique_name = scope.GetUniqueNameForOp("Const");
auto builder = NodeBuilder(unique_name, "Const")
.Attr("value", value)
.Attr("dtype", dtype);
scope.UpdateBuilder(&builder);
scope.UpdateStatus(builder.Finalize(graph, &ret));
if (!scope.ok()) return Output();
scope.UpdateStatus(scope.DoShapeInference(ret));
if (!scope.ok()) return Output();
return Output(ret);
}
}
Output Const(const Scope& scope, const Input::Initializer& val) {
if (!val.status.ok()) {
scope.UpdateStatus(val.status);
return Output();
}
return ConstHelper(scope, val.tensor, val.tensor.dtype());
}
Output ConstFromProto(const Scope& scope, const TensorProto& proto) {
return ConstHelper(scope, proto, proto.dtype());
}
NodeBuilder::NodeOut AsNodeOut(const Scope& scope, const Input& inp) {
if (!inp.status().ok()) {
scope.UpdateStatus(inp.status());
return NodeBuilder::NodeOut(inp.node(), inp.index());
}
if (inp.node()) {
return NodeBuilder::NodeOut(inp.node(), inp.index());
}
if (!inp.node_name().empty()) {
return NodeBuilder::NodeOut(inp.node_name(), inp.index(), inp.data_type());
}
auto transformed = Input{
Const(scope.NewSubScope("Const"), Input::Initializer(inp.tensor()))};
return NodeBuilder::NodeOut{transformed.node(), transformed.index()};
}
std::vector<NodeBuilder::NodeOut> AsNodeOutList(const Scope& scope,
const InputList& inp) {
std::vector<NodeBuilder::NodeOut> out;
for (const auto& i : inp) {
const auto node_out = AsNodeOut(scope, i);
if (!scope.ok()) {
return {};
}
out.push_back(node_out);
}
return out;
}
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
template <typename T>
void ExpectNodeEqual(const Node* n, gtl::ArraySlice<T> values,
TensorShape shape) {
EXPECT_TRUE(n->IsConstant());
Tensor tensor;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "value", &tensor));
DataType dtype;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "dtype", &dtype));
EXPECT_EQ(tensor.dtype(), dtype);
test::ExpectTensorEqual<T>(tensor, test::AsTensor(values, shape));
}
void ExpectTypeAndShape(const Node* n, DataType expected_dtype,
TensorShape expected_shape) {
EXPECT_TRUE(n->IsConstant());
Tensor tensor;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "value", &tensor));
DataType dtype;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "dtype", &dtype));
EXPECT_EQ(dtype, expected_dtype);
EXPECT_EQ(expected_shape, TensorShape(tensor.shape()));
}
}
TEST(ConstOpTest, Basic) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0f);
TF_EXPECT_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_FLOAT);
ExpectNodeEqual<float>(c.node(), {42.0f}, {});
}
TEST(ConstOpTest, MultiDim) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, {{2.0}, {3.0}});
TF_CHECK_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_DOUBLE);
ExpectNodeEqual<double>(c.node(), {2.0, 3.0}, {2, 1});
}
TEST(ConstOpTest, Empty) {
Scope root = Scope::NewRootScope();
auto c1 = ops::Const(root, {});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c1.node(), DT_FLOAT, {0});
auto c2 = ops::Const(root, {{}});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c2.node(), DT_FLOAT, {1, 0});
auto c3 = ops::Const(root, {{{}, {}}});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c3.node(), DT_FLOAT, {1, 2, 0});
auto c4 = ops::Const<int>(root, {{{}}});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c4.node(), DT_INT32, {1, 1, 0});
ops::Const(root, {{}, {{}}});
EXPECT_FALSE(root.status().ok());
}
TEST(ConstOpTest, WithExplicitShape) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0, {2, 2});
TF_CHECK_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_DOUBLE);
ExpectNodeEqual<double>(c.node(), {42.0, 42.0, 42.0, 42.0}, {2, 2});
auto d = ops::Const(root, {"1", "2", "3", "4", "5", "6"}, {2, 3});
TF_CHECK_OK(root.status());
EXPECT_EQ(d.op().output_type(0), DT_STRING);
ExpectNodeEqual<tstring>(d.node(), {"1", "2", "3", "4", "5", "6"}, {2, 3});
}
TEST(ConstOpTest, FromProto) {
Scope root = Scope::NewRootScope();
TensorProto proto;
proto.set_dtype(DT_DOUBLE);
TensorShape({2, 2}).AsProto(proto.mutable_tensor_shape());
for (int i = 0; i < 4; ++i) {
proto.add_double_val(static_cast<double>(i));
}
auto c = ops::ConstFromProto(root, proto);
TF_CHECK_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_DOUBLE);
ExpectNodeEqual<double>(c.node(), {0.0, 1.0, 2.0, 3.0}, {2, 2});
}
TEST(ConstOpTest, InvalidInitializer) {
Scope root = Scope::NewRootScope();
ops::Const(root, {{2.0}, {"df"}});
EXPECT_FALSE(root.status().ok());
}
TEST(ConstOpTest, Names) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, {{2.0}, {3.0}});
EXPECT_EQ(c.node()->name(), "Const");
auto c_1 = ops::Const(root, {{2.0}, {3.0}});
EXPECT_EQ(c_1.node()->name(), "Const_1");
auto x = ops::Const(root.WithOpName("x"), 1);
EXPECT_EQ(x.node()->name(), "x");
auto x_1 = ops::Const(root.WithOpName("x"), 1);
EXPECT_EQ(x_1.node()->name(), "x_1");
Scope child = root.NewSubScope("c");
auto c_y = ops::Const(child.WithOpName("y"), 1);
EXPECT_EQ(c_y.node()->name(), "c/y");
auto c_y_1 = ops::Const(child.WithOpName("y"), 1);
EXPECT_EQ(c_y_1.node()->name(), "c/y_1");
}
TEST(ConstOpTest, TemplatedConst) {
Scope root = Scope::NewRootScope();
auto c1 = ops::Const<int>(root, {1, 2});
ExpectTypeAndShape(c1.node(), DT_INT32, {2});
auto c2 = ops::Const<tstring>(root, {{"this"}, {"is"}, {"a"}, {"constant"}});
ExpectTypeAndShape(c2.node(), DT_STRING, {4, 1});
}
} |
1,134 | cpp | tensorflow/tensorflow | slice_op | tensorflow/core/kernels/slice_op.cc | tensorflow/core/kernels/slice_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SLICE_OP_H_
#define TENSORFLOW_CORE_KERNELS_SLICE_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T, int NDIMS>
struct Slice {
void operator()(const Device& d, typename TTypes<T, NDIMS>::Tensor output,
typename TTypes<T, NDIMS>::ConstTensor input,
const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& slice_indices,
const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& slice_sizes) {
MaybeWith32BitIndexing<Device>(
[&](auto output32, auto input32, auto slice_indices32,
auto slice_sizes32) {
output32.device(d) = input32.slice(slice_indices32, slice_sizes32);
},
output, input, slice_indices, slice_sizes);
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/kernels/slice_op.h"
#include "absl/base/prefetch.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
namespace tensorflow {
namespace {
void IntTensorToInt64Vec(const Tensor& tensor,
absl::InlinedVector<int64_t, 4>* out) {
out->resize(tensor.NumElements());
int64_t* out_ptr = out->data();
if (tensor.dtype() == DT_INT32) {
const int32* tensor_ptr = tensor.flat<int32>().data();
for (int64_t i = 0; i < tensor.NumElements(); ++i) {
out_ptr[i] = tensor_ptr[i];
}
} else if (tensor.dtype() == DT_INT64) {
const int64_t* tensor_ptr = tensor.flat<int64_t>().data();
for (int64_t i = 0; i < tensor.NumElements(); ++i) {
out_ptr[i] = tensor_ptr[i];
}
} else {
LOG(FATAL) << "begin must be either int32 or int64";
}
}
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
void SharedSliceValidation(OpKernelContext* context, const Tensor& input,
TensorShape* output_shape, bool* is_identity,
bool* slice_dim0,
absl::InlinedVector<int64_t, 4>* begin,
absl::InlinedVector<int64_t, 4>* size) {
const Tensor& begin_tensor = context->input(1);
const Tensor& size_tensor = context->input(2);
OP_REQUIRES(
context,
TensorShapeUtils::IsVector(begin_tensor.shape()) &&
TensorShapeUtils::IsVector(size_tensor.shape()) &&
begin_tensor.NumElements() == input.dims() &&
size_tensor.NumElements() == input.dims(),
errors::InvalidArgument(
"Expected begin and size arguments to be 1-D tensors of size ",
input.dims(), ", but got shapes ", begin_tensor.shape().DebugString(),
" and ", size_tensor.shape().DebugString(), " instead."));
const int input_dims = input.dims();
IntTensorToInt64Vec(begin_tensor, begin);
IntTensorToInt64Vec(size_tensor, size);
for (int i = 0; i < input_dims; ++i) {
if ((*size)[i] == -1) {
(*size)[i] = input.dim_size(i) - (*begin)[i];
}
}
*is_identity = true;
*slice_dim0 = true;
for (int i = 0; i < input_dims; ++i) {
int64_t b = (*begin)[i];
int64_t s = (*size)[i];
if (input.dim_size(i) == 0) {
OP_REQUIRES(
context, b == 0 && s == 0,
errors::InvalidArgument("Expected begin[", i, "] == 0 (got ", b,
") and size[", i, "] == 0 ", "(got ", s,
") when ", "input.dim_size(", i, ") == 0"));
} else {
OP_REQUIRES(context, 0 <= b && b <= input.dim_size(i),
errors::InvalidArgument("Expected begin[", i, "] in [0, ",
input.dim_size(i), "], but got ", b));
OP_REQUIRES(
context, 0 <= s && b + s <= input.dim_size(i),
errors::InvalidArgument("Expected size[", i, "] in [0, ",
input.dim_size(i) - b, "], but ", "got ", s));
}
OP_REQUIRES_OK(context, output_shape->AddDimWithStatus(s));
const bool take_all = (b == 0) && (s == input.dim_size(i));
(*is_identity) &= take_all;
(*slice_dim0) &= (i == 0) || take_all;
}
}
template <typename T>
static void SharedSliceCommonCases(OpKernelContext* context,
const Tensor& input,
absl::InlinedVector<int64, 4>* begin,
absl::InlinedVector<int64, 4>* size,
Tensor** result, bool* done) {
bool is_identity = true;
bool slice_dim0 = true;
TensorShape output_shape;
*done = false;
SharedSliceValidation(context, input, &output_shape, &is_identity,
&slice_dim0, begin, size);
if (!context->status().ok()) return;
if (is_identity) {
VLOG(1) << "Slice identity";
context->set_output(0, input);
*done = true;
return;
}
if (slice_dim0 &&
IsDim0SliceAligned<T>(input.shape(), (*begin)[0], (*size)[0])) {
VLOG(1) << "Slice dim 0: " << input.shape().DebugString();
CHECK_GE(input.dims(), 1);
context->set_output(0, input.Slice((*begin)[0], (*begin)[0] + (*size)[0]));
*done = true;
return;
}
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, result));
}
template <typename Device, typename T>
class SliceOp : public OpKernel {
public:
explicit SliceOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
absl::InlinedVector<int64_t, 4> begin;
absl::InlinedVector<int64_t, 4> size;
const Tensor& input = context->input(0);
Tensor* result = nullptr;
bool done = false;
SharedSliceCommonCases<T>(context, input, &begin, &size, &result, &done);
if (!context->status().ok() || done == true) return;
const int input_dims = input.dims();
if (result->NumElements() > 0) {
if (std::is_same<Device, CPUDevice>::value && input_dims == 2 &&
DataTypeCanUseMemcpy(DataTypeToEnum<T>::v())) {
auto input_t = input.tensor<T, 2>();
auto output_t = result->tensor<T, 2>();
const int64_t row_begin = begin[0];
const int64_t col_begin = begin[1];
const int64_t row_size = size[0];
const int64_t col_size = size[1];
for (int i = 0; i < row_size; ++i) {
const int64_t row = row_begin + i;
if (i + 1 < size[0]) {
absl::PrefetchToLocalCache(&output_t(i + 1, 0));
absl::PrefetchToLocalCache(&input_t(row + 1, col_begin));
}
memcpy(&output_t(i, 0), &input_t(row, col_begin),
col_size * sizeof(T));
}
return;
}
#define HANDLE_DIM(NDIM) \
if (input_dims == NDIM) { \
HandleCase<NDIM>(context, begin, size, input, result); \
return; \
}
HANDLE_DIM(1);
HANDLE_DIM(2);
HANDLE_DIM(3);
HANDLE_DIM(4);
HANDLE_DIM(5);
HANDLE_DIM(6);
HANDLE_DIM(7);
HANDLE_DIM(8);
#undef HANDLE_DIM
OP_REQUIRES(
context, false,
errors::Unimplemented("SliceOp : Unhandled input dimensions"));
}
}
private:
template <int NDIM>
void HandleCase(OpKernelContext* context, absl::Span<const int64_t> begin,
absl::Span<const int64_t> size, const Tensor& input,
Tensor* result) {
Eigen::DSizes<Eigen::DenseIndex, NDIM> indices;
Eigen::DSizes<Eigen::DenseIndex, NDIM> sizes;
for (int i = 0; i < NDIM; ++i) {
indices[i] = begin[i];
sizes[i] = size[i];
}
functor::Slice<Device, T, NDIM>()(context->eigen_device<Device>(),
result->tensor<T, NDIM>(),
input.tensor<T, NDIM>(), indices, sizes);
}
};
}
namespace functor {
#define DECLARE_CPU_SPEC(T, NDIM) \
template <> \
void Slice<CPUDevice, T, NDIM>::operator()( \
const CPUDevice& d, typename TTypes<T, NDIM>::Tensor output, \
typename TTypes<T, NDIM>::ConstTensor input, \
const Eigen::DSizes<Eigen::DenseIndex, NDIM>& indices, \
const Eigen::DSizes<Eigen::DenseIndex, NDIM>& sizes); \
extern template struct Slice<CPUDevice, T, NDIM>;
#define DECLARE_FOR_N(T) \
DECLARE_CPU_SPEC(T, 1); \
DECLARE_CPU_SPEC(T, 2); \
DECLARE_CPU_SPEC(T, 3); \
DECLARE_CPU_SPEC(T, 4); \
DECLARE_CPU_SPEC(T, 5); \
DECLARE_CPU_SPEC(T, 6); \
DECLARE_CPU_SPEC(T, 7); \
DECLARE_CPU_SPEC(T, 8);
TF_CALL_ALL_TYPES(DECLARE_FOR_N);
#undef DECLARE_FOR_N
#undef DECLARE_CPU_SPEC
}
#define REGISTER_SLICE(type) \
REGISTER_KERNEL_BUILDER(Name("Slice") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.HostMemory("begin") \
.HostMemory("size"), \
SliceOp<CPUDevice, type>)
TF_CALL_POD_STRING_TYPES(REGISTER_SLICE);
TF_CALL_QUANTIZED_TYPES(REGISTER_SLICE);
TF_CALL_float8_e5m2(REGISTER_SLICE);
TF_CALL_float8_e4m3fn(REGISTER_SLICE);
#undef REGISTER_SLICE
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPEC(T, NDIM) \
template <> \
void Slice<GPUDevice, T, NDIM>::operator()( \
const GPUDevice& d, typename TTypes<T, NDIM>::Tensor output, \
typename TTypes<T, NDIM>::ConstTensor input, \
const Eigen::DSizes<Eigen::DenseIndex, NDIM>& indices, \
const Eigen::DSizes<Eigen::DenseIndex, NDIM>& sizes); \
extern template struct Slice<GPUDevice, T, NDIM>;
#define DECLARE_FOR_N(T) \
DECLARE_GPU_SPEC(T, 1); \
DECLARE_GPU_SPEC(T, 2); \
DECLARE_GPU_SPEC(T, 3); \
DECLARE_GPU_SPEC(T, 4); \
DECLARE_GPU_SPEC(T, 5); \
DECLARE_GPU_SPEC(T, 6); \
DECLARE_GPU_SPEC(T, 7); \
DECLARE_GPU_SPEC(T, 8);
TF_CALL_int8(DECLARE_FOR_N);
TF_CALL_int32(DECLARE_FOR_N);
TF_CALL_int64(DECLARE_FOR_N);
TF_CALL_GPU_ALL_TYPES(DECLARE_FOR_N);
#undef DECLARE_FOR_N
#undef DECLARE_GPU_SPEC
}
#define REGISTER_GPU(type) \
REGISTER_KERNEL_BUILDER(Name("Slice") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.HostMemory("begin") \
.HostMemory("size"), \
SliceOp<GPUDevice, type>)
TF_CALL_int8(REGISTER_GPU);
TF_CALL_int64(REGISTER_GPU);
TF_CALL_GPU_ALL_TYPES(REGISTER_GPU);
#undef REGISTER_GPU
#endif
REGISTER_KERNEL_BUILDER(Name("Slice")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.HostMemory("input")
.HostMemory("begin")
.HostMemory("size")
.HostMemory("output"),
SliceOp<CPUDevice, int32>);
} | #include <functional>
#include <memory>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
template <typename T>
static void SliceHelper(::testing::benchmark::State& state) {
const int size = state.range(0);
Graph* g = new Graph(OpRegistry::Global());
DataType dt = DataTypeToEnum<T>::v();
int kDim = 100;
int kMaxSize = 15000;
CHECK_LT(size, kMaxSize);
Tensor begin(DT_INT32, TensorShape({2}));
begin.flat<int32>()(0) = 10;
begin.flat<int32>()(1) = 10;
Tensor sizes(DT_INT32, TensorShape({2}));
sizes.flat<int32>()(0) = kDim;
sizes.flat<int32>()(1) = size;
Tensor input(dt, TensorShape({2 * kDim, kMaxSize}));
input.flat<T>().setRandom();
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Slice")
.Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, begin))
.Input(test::graph::Constant(g, sizes))
.Attr("T", dt)
.Finalize(g, &node));
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, nullptr, nullptr, nullptr,
"SINGLE_THREADED_EXECUTOR", false)
.Run(state);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * kDim *
size * sizeof(T));
}
void BM_SliceFloat(::testing::benchmark::State& state) {
SliceHelper<float>(state);
}
BENCHMARK(BM_SliceFloat)->UseRealTime()->Arg(100)->Arg(1000)->Arg(10000);
void BM_SliceBFloat16(::testing::benchmark::State& state) {
SliceHelper<bfloat16>(state);
}
BENCHMARK(BM_SliceBFloat16)->UseRealTime()->Arg(100)->Arg(1000)->Arg(10000);
}
} |
1,135 | cpp | tensorflow/tensorflow | training_ops | tensorflow/core/kernels/training_ops.cc | tensorflow/core/kernels/training_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_TRAINING_OPS_H_
#define TENSORFLOW_CORE_KERNELS_TRAINING_OPS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct ApplyGradientDescent {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::ConstScalar alpha,
typename TTypes<T>::ConstFlat delta);
};
template <typename Device, typename T>
struct ApplyAdadelta {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::Flat accum_update,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar rho,
typename TTypes<T>::ConstScalar epsilon,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T, typename Tindex>
struct SparseApplyAdadelta {
void operator()(const Device& d, typename TTypes<T>::Matrix var,
typename TTypes<T>::Matrix accum,
typename TTypes<T>::Matrix accum_update,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar rho,
typename TTypes<T>::ConstScalar epsilon,
typename TTypes<T>::ConstMatrix grad,
typename TTypes<Tindex>::ConstFlat indices);
};
template <typename Device, typename T>
struct FobosElasticNet {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T>
struct ApplyProximalGradientDescent {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T>
struct ApplyAdagrad {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstFlat grad, bool update_slots);
};
template <typename Device, typename T>
struct ApplyAdagradV2 {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar epsilon,
typename TTypes<T>::ConstFlat grad, bool update_slots);
};
template <typename Device, typename T>
struct ApplyAdagradDA {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat gradient_accum,
typename TTypes<T>::Flat gradient_squared_accum,
typename TTypes<T>::ConstScalar lr, int64_t global_step,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T, typename Tindex, bool has_epsilon>
struct SparseApplyAdagrad {
Status operator()(const Device& d, typename TTypes<T>::Matrix var,
typename TTypes<T>::Matrix accum,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar epsilon,
typename TTypes<T>::ConstMatrix grad,
typename TTypes<Tindex>::ConstVec indices,
int64_t inner_dim, bool update_slots);
};
template <typename Device, typename T>
struct ApplyProximalAdagrad {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T, typename Tindex>
struct SparseApplyProximalAdagrad {
Status operator()(const Device& d, typename TTypes<T>::Matrix var,
typename TTypes<T>::Matrix accum,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstMatrix grad,
typename TTypes<Tindex>::ConstVec indices,
int64_t inner_dim);
};
template <typename Device, typename T>
struct ApplyFtrl {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::Flat linear,
typename TTypes<T>::ConstFlat grad,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstScalar lr_power);
};
template <typename Device, typename T>
struct ApplyFtrlMultiplyLinearByLr {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::Flat linear,
typename TTypes<T>::ConstFlat grad,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstScalar lr_power);
};
template <typename Device, typename T>
struct ApplyFtrlV2 {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::Flat linear,
typename TTypes<T>::ConstFlat grad,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstScalar l2_shrinkage,
typename TTypes<T>::ConstScalar lr_power);
};
template <typename Device, typename T>
struct ApplyFtrlV2MultiplyLinearByLr {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::Flat linear,
typename TTypes<T>::ConstFlat grad,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstScalar l2_shrinkage,
typename TTypes<T>::ConstScalar lr_power);
};
template <typename Device, typename T, typename Tindex, bool has_l2_shrinkage>
struct SparseApplyFtrl {
Status operator()(const Device& d, typename TTypes<T>::Matrix var_flat,
typename TTypes<T>::Matrix accum_flat,
typename TTypes<T>::Matrix linear_flat,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar l1,
typename TTypes<T>::ConstScalar l2,
typename TTypes<T>::ConstScalar l2_shrinkage,
typename TTypes<T>::ConstScalar lr_power,
typename TTypes<T>::ConstMatrix grad_flat,
typename TTypes<Tindex>::ConstVec indices_vec,
int64_t inner_dim, bool multiply_linear_by_lr);
};
template <typename Device, typename T>
struct ApplyMomentum {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstFlat grad,
typename TTypes<T>::ConstScalar momentum, bool use_nesterov);
};
template <typename Device, typename T>
struct ApplyKerasMomentum {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat accum,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstFlat grad,
typename TTypes<T>::ConstScalar momentum, bool use_nesterov);
};
template <typename Device, typename T, typename Tindex>
struct SparseApplyKerasMomentum {
Tindex operator()(const Device& d, typename TTypes<T>::Matrix var,
typename TTypes<T>::Matrix accum,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstMatrix grad,
typename TTypes<Tindex>::ConstFlat indices,
typename TTypes<T>::ConstScalar momentum,
bool use_nesterov);
};
template <typename Device, typename T>
struct ApplyAdam {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat m, typename TTypes<T>::Flat v,
typename TTypes<T>::ConstScalar beta1_power,
typename TTypes<T>::ConstScalar beta2_power,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar beta1,
typename TTypes<T>::ConstScalar beta2,
typename TTypes<T>::ConstScalar epsilon,
typename TTypes<T>::ConstFlat grad, bool use_nesterov);
};
template <typename Device, typename T>
struct ApplyAdamWithAmsgrad {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat m, typename TTypes<T>::Flat v,
typename TTypes<T>::Flat vhat,
typename TTypes<T>::ConstScalar beta1_power,
typename TTypes<T>::ConstScalar beta2_power,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar beta1,
typename TTypes<T>::ConstScalar beta2,
typename TTypes<T>::ConstScalar epsilon,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T>
struct ApplyAdaMax {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat m, typename TTypes<T>::Flat v,
typename TTypes<T>::ConstScalar beta1_power,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar beta1,
typename TTypes<T>::ConstScalar beta2,
typename TTypes<T>::ConstScalar epsilon,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T>
struct ApplyRMSProp {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat ms, typename TTypes<T>::Flat mom,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar rho,
typename TTypes<T>::ConstScalar momentum,
typename TTypes<T>::ConstScalar epsilon,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T>
struct ApplyCenteredRMSProp {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat mg, typename TTypes<T>::Flat ms,
typename TTypes<T>::Flat mom,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar rho,
typename TTypes<T>::ConstScalar momentum,
typename TTypes<T>::ConstScalar epsilon,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T>
struct ApplyAddSign {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat m,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar alpha,
typename TTypes<T>::ConstScalar sign_decay,
typename TTypes<T>::ConstScalar beta,
typename TTypes<T>::ConstFlat grad);
};
template <typename Device, typename T>
struct ApplyPowerSign {
void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat m,
typename TTypes<T>::ConstScalar lr,
typename TTypes<T>::ConstScalar logbase,
typename TTypes<T>::ConstScalar sign_decay,
typename TTypes<T>::ConstScalar beta,
typename TTypes<T>::ConstFlat grad);
};
}
}
#endif
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
template <bool is_resource>
ShapeHandle ShapeOrHandleShape(InferenceContext* c, int input) {
auto* handle_data = c->input_handle_shapes_and_types(input);
if (handle_data != nullptr && !handle_data->empty() &&
(*handle_data)[0].dtype != DT_INVALID) {
return (*handle_data)[0].shape;
}
return c->input(input);
}
template <>
ShapeHandle ShapeOrHandleShape<true>(InferenceContext* c, int input) {
auto* handle_data = c->input_handle_shapes_and_types(input);
if (handle_data != nullptr && !handle_data->empty() &&
(*handle_data)[0].dtype != DT_INVALID) {
return (*handle_data)[0].shape;
}
return c->UnknownShape();
}
template <bool is_sparse, bool is_resource>
static Status HandleGradAndIndicesInputs(InferenceContext* c, int grad_idx,
ShapeHandle* s) {
ShapeHandle grad = ShapeOrHandleShape<is_resource>(c, grad_idx);
if (!is_sparse) {
TF_RETURN_IF_ERROR(c->Merge(*s, grad, s));
return absl::OkStatus();
}
ShapeHandle indices;
TF_RETURN_IF_ERROR(c->WithRank(c->input(grad_idx + 1), 1, &indices));
DimensionHandle unused;
const auto rank = c->Rank(grad);
if (!rank) {
return absl::InvalidArgumentError(absl::StrCat(
"Argument grad must not be a scalar. ", "Got grad with rank ", rank));
}
TF_RETURN_IF_ERROR(c->Merge(c->Dim(indices, 0), c->Dim(grad, 0), &unused));
ShapeHandle grad_unknown_first;
TF_RETURN_IF_ERROR(
c->ReplaceDim(grad, 0, c->UnknownDim(), &grad_unknown_first));
TF_RETURN_IF_ERROR(c->Merge(*s, grad_unknown_first, s));
return absl::OkStatus();
}
template <bool is_resource>
static Status ApplyGradientDescentShapeFn(InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s = ShapeOrHandleShape<is_resource>(c, 0);
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->Merge(s, c->input(2), &s));
if (c->num_outputs() > 0) {
c->set_output(0, s);
}
return absl::OkStatus();
}
REGISTER_OP("ApplyGradientDescent")
.Input("var: Ref(T)")
.Input("alpha: T")
.Input("delta: T")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(ApplyGradientDescentShapeFn<false>);
REGISTER_OP("ResourceApplyGradientDescent")
.Input("var: resource")
.Input("alpha: T")
.Input("delta: T")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(ApplyGradientDescentShapeFn<true>);
template <bool is_sparse, bool is_resource>
Status ApplyProximalGradientDescentShapeFn(InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s = ShapeOrHandleShape<is_resource>(c, 0);
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
TF_RETURN_IF_ERROR(HandleGradAndIndicesInputs<is_sparse, is_resource>(
c, 4 , &s));
if (c->num_outputs() > 0) {
c->set_output(0, s);
}
return absl::OkStatus();
}
REGISTER_OP("ApplyProximalGradientDescent")
.Input("var: Ref(T)")
.Input("alpha: T")
.Input("l1: T")
.Input("l2: T")
.Input("delta: T")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(ApplyProximalGradientDescentShapeFn<false,
false>);
REGISTER_OP("SparseApplyProximalGradientDescent")
.Input("var: Ref(T)")
.Input("alpha: T")
.Input("l1: T")
.Input("l2: T")
.Input("grad: T")
.Input("indices: Tindices")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ApplyProximalGradientDescentShapeFn<true,
false>);
REGISTER_OP("ResourceApplyProximalGradientDescent")
.Input("var: resource")
.Input("alpha: T")
.Input("l1: T")
.Input("l2: T")
.Input("delta: T")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(ApplyProximalGradientDescentShapeFn<false,
true>);
REGISTER_OP("ResourceSparseApplyProximalGradientDescent")
.Input("var: resource")
.Input("alpha: T")
.Input("l1: T")
.Input("l2: T")
.Input("grad: T")
.Input("indices: Tindices")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ApplyProximalGradientDescentShapeFn<true,
true>);
template <bool is_sparse, bool is_resource>
static Status ApplyAdadeltaShapeFn(InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s = ShapeOrHandleShape<is_resource>(c, 0);
TF_RETURN_IF_ERROR(
c->Merge(s, ShapeOrHandleShape<is_resource>(c, 1), &s));
TF_RETURN_IF_ERROR(
c->Merge(s, ShapeOrHandleShape<is_resource>(c, 2), &s));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &unused));
TF_RETURN_IF_ERROR(HandleGradAndIndicesInputs<is_sparse, is_resource>(
c, 6 , &s));
if (c->num_outputs() > 0) {
c->set_output(0, s);
}
return absl::OkStatus();
}
REGISTER_OP("ApplyAdadelta")
.Input("var: Ref(T)")
.Input("accum: Ref(T)")
.Input("accum_update: Ref(T)")
.Input("lr: T")
.Input("rho: T")
.Input("epsilon: T")
.Input("grad: T")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(
ApplyAdadeltaShapeFn<false, false>);
REGISTER_OP("SparseApplyAdadelta")
.Input("var: Ref(T)")
.Input("accum: Ref(T)")
.Input("accum_update: Ref(T)")
.Input("lr: T")
.Input("rho: T")
.Input("epsilon: T")
.Input("grad: T")
.Input("indices: Tindices")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(
ApplyAdadeltaShapeFn<true, false>);
REGISTER_OP("ResourceApplyAdadelta")
.Input("var: resource")
.Input("accum: resource")
.Input("accum_update: resource")
.Input("lr: T")
.Input("rho: T")
.Input("epsilon: T")
.Input("grad: T")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(
ApplyAdadeltaShapeFn<false, true>);
REGISTER_OP("ResourceSparseApplyAdadelta")
.Input("var: resource")
.Input("accum: resource")
.Input("accum_update: resource")
.Input("lr: T")
.Input("rho: T")
.Input("epsilon: T")
.Input("grad: T")
.Input("indices: Tindices")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ApplyAdadeltaShapeFn<true, true>);
template <bool is_sparse, bool is_resource>
static Status ApplyAdagradShapeFn(InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s = ShapeOrHandleShape<is_resource>(c, 0);
TF_RETURN_IF_ERROR(
c->Merge(s, ShapeOrHandleShape<is_resource>(c, 1), &s));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
TF_RETURN_IF_ERROR(HandleGradAndIndicesInputs<is_sparse, is_resource>(
c, 3 , &s));
if (c->num_outputs() > 0) {
c->set_output(0, s);
}
return absl::OkStatus();
}
REGISTER_OP("ApplyAdagrad")
.Input("var: Ref(T)")
.Input("accum: Ref(T)")
.Input("lr: T")
.Input("grad: T")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.Attr("update_slots: bool = true")
.SetShapeFn(
ApplyAdagradShapeFn<false, false>);
REGISTER_OP("ResourceApplyAdagrad")
.Input("var: resource")
.Input("accum: resource")
.Input("lr: T")
.Input("grad: T")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.Attr("update_slots: bool = true")
.SetShapeFn(ApplyAdagradShapeFn<false, true>);
REGISTER_OP("SparseApplyAdagrad")
.Input("var: Ref(T)")
.Input("accum: Ref(T)")
.Input("lr: T")
.Input("grad: T")
.Input("indices: Tindices")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Attr("update_slots: bool = true")
.SetShapeFn(ApplyAdagradShapeFn<true, false>);
REGISTER_OP("ResourceSparseApplyAdagrad")
.Input("var: resource")
.Input("accum: resource")
.Input("lr: T")
.Input("grad: T")
.Input("indices: Tindices")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Attr("update_slots: bool = true")
.SetShapeFn(ApplyAdagradShapeFn<true, true>);
template <bool is_sparse, bool is_resource>
static Status ApplyAdagradV2ShapeFn(InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s = ShapeOrHandleShape<is_resource>(c, 0);
TF_RETURN_IF_ERROR(
c->Merge(s, ShapeOrHandleShape<is_resource>(c, 1), &s));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
TF_RETURN_IF_ERROR(HandleGradAndIndicesInputs<is_sparse, is_resource>(
c, 4 , &s));
if (c->num_outputs() > 0) {
c->set_output(0, s);
}
return absl::OkStatus();
}
REGISTER_OP("ApplyAdagradV2")
.Input("var: Ref(T)")
.Input("accum: Ref(T)")
.Input("lr: T")
.Input("epsilon: T")
.Input("grad: T")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.Attr("update_slots: bool = true")
.SetShapeFn(
ApplyAdagradV2ShapeFn<false, false>);
REGISTER_OP("ResourceApplyAdagradV2")
.Input("var: resource")
.Input("accum: resource")
.Input("lr: T")
.Input("epsilon: T")
.Input("grad: T")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.Attr("update_slots: bool = true")
.SetShapeFn(
ApplyAdagradV2ShapeFn<false, true>);
REGISTER_OP("SparseApplyAdagradV2")
.Input("var: Ref(T)")
.Input("accum: Ref(T)")
.Input("lr: T")
.Input("epsilon: T")
.Input("grad: T")
.Input("indices: Tindices")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Attr("update_slots: bool = true")
.SetShapeFn(
ApplyAdagradV2ShapeFn<true, false>);
REGISTER_OP("ResourceSparseApplyAdagradV2")
.Input("var: resource")
.Input("accum: resource")
.Input("lr: T")
.Input("epsilon: T")
.Input("grad: T")
.Input("indices: Tindices")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Attr("update_slots: bool = true")
.SetShapeFn(
ApplyAdagradV2ShapeFn<true, true>);
template <bool is_sparse, bool is_resource>
static Status ApplyProximalAdagradShapeFn(InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s = ShapeOrHandleShape<is_resource>(c, 0);
TF_RETURN_IF_ERROR(
c->Merge(s, ShapeOrHandleShape<is_resource>(c, 1), &s));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused));
TF_RETURN_IF_ERROR(HandleGradAndIndicesInputs<is_sparse, is_resource>(
c, 5 , &s));
if (c->num_outputs() > 0) {
c->set_output(0, s);
}
return absl::OkStatus();
}
REGISTER_OP("ApplyProximalAdagrad")
.Input("var: Ref(T)")
.Input("accum: Ref(T)")
.Input("lr: T")
.Input("l1: T")
.Input("l2: T")
.Input("grad: T")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(ApplyProximalAdagradShapeFn<false,
false>);
REGISTER_OP("ResourceApplyProximalAdagrad")
.Input("var: resource")
.Input("accum: resource")
.Input("lr: T")
.Input("l1: T")
.Input("l2: T")
.Input("grad: T")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(ApplyProximalAdagradShapeFn<false,
false>);
REGISTER_OP("SparseApplyProximalAdagrad")
.Input("var: Ref(T)")
.Input("accum: Ref(T)")
.Input("lr: T")
.Input("l1: T")
.Input("l2: T")
.Input("grad: T")
.Input("indices: Tindices")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(
ApplyProximalAdagradShapeFn<true, false>);
REGISTER_OP("ResourceSparseApplyProximalAdagrad")
.Input("var: resource")
.Input("accum: resource")
.Input("lr: T")
.Input("l1: T")
.Input("l2: T")
.Input("grad: T")
.Input("indices: Tindices")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(
ApplyProximalAdagradShapeFn<true, true>);
template <bool is_sparse, bool is_resource>
static Status ApplyAdagradDAShapeFn(InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s = ShapeOrHandleShape<is_resource>(c, 0);
TF_RETURN_IF_ERROR(c->Merge(s, ShapeOrHandleShape<is_resource>(c, 1),
&s));
TF_RETURN_IF_ERROR(c->Merge(s, ShapeOrHandleShape<is_resource>(c, 2),
&s));
TF_RETURN_IF_ERROR(HandleGradAndIndicesInputs<is_sparse, is_resource>(
c, 3 , &s));
int idx = is_sparse ? 5 : 4;
TF_RETURN_IF_ERROR(c->WithRank(c->input(idx++), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(idx++), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(idx++), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(idx++), 0, &unused));
if (c->num_outputs() > 0) {
c->set_output(0, s);
}
return absl::OkStatus();
}
REGISTER_OP("ApplyAdagradDA")
.Input("var: Ref(T)")
.Input("gradient_accumulator: Ref(T)")
.Input("gradient_squared_accumulator: Ref(T)")
.Input("grad: T")
.Input("lr: T")
.Input("l1: T")
.Input("l2: T")
.Input("global_step: int64")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(
ApplyAdagradDAShapeFn<false, false>);
REGISTER_OP("SparseApplyAdagradDA")
.Input("var: Ref(T)")
.Input("gradient_accumulator: Ref(T)")
.Input("gradient_squared_accumulator: Ref(T)")
.Input("grad: T")
.Input("indices: Tindices")
.Input("lr: T")
.Input("l1: T")
.Input("l2: T")
.Input("global_step: int64")
.Output("out: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(
ApplyAdagradDAShapeFn<true, false>);
REGISTER_OP("ResourceApplyAdagradDA")
.Input("var: resource")
.Input("gradient_accumulator: resource")
.Input("gradient_squared_accumulator: resource")
.Input("grad: T")
.Input("lr: T")
.Input("l1: T")
.Input("l2: T")
.Input("global_step: int64")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(
ApplyAdagradDAShapeFn<false, true>);
REGISTER_OP("ResourceSparseApplyAdagradDA")
.Input("var: resource")
.Input("gradient_accumulator: resource")
.Input("gradient_squared_accumulator: resource")
.Input("grad: T")
.Input("indices: Tindices")
.Input("lr: T")
.Input("l1: T")
.Input("l2: T")
.Input("global_step: int64")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(
ApplyAdagradDAShapeFn<true, true>);
template <bool is_sparse, bool is_resource>
static Status ApplyFtrlShapeFn(InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s = ShapeOrHandleShape<is_resource>(c, 0);
TF_RETURN_IF_ERROR(
c->Merge(s, ShapeOrHandleShape<is_resource>(c, 1), &s));
TF_RETURN_IF_ERROR(
c->Merge(s, ShapeOrHandleShape<is_resource>(c, 2), &s));
TF_RETURN_IF_ERROR(HandleGradAndIndicesInputs<is_sparse, is_resource>(
c, 3 , &s));
int idx = is_sparse ? 5 : 4;
TF_RETURN_IF_ERRO | #include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
static void TestGradAndIndicesErrorHandling(const ShapeInferenceTestOp& op,
string shape_spec_middle,
const string& shape_spec_end = "") {
auto shape_spec = [&shape_spec_middle, shape_spec_end](
const char* var_spec, const char* grad_indices_spec) {
return strings::StrCat(var_spec, ";", shape_spec_middle, ";",
grad_indices_spec, shape_spec_end);
};
INFER_ERROR("Dimension 1 in both shapes must be equal", op,
shape_spec("[?,1]", "[?,2];[?]"));
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op,
shape_spec("?", "[2,?];[1]"));
INFER_ERROR("must be equal rank", op, shape_spec("[1]", "[?,2];[?]"));
INFER_ERROR("Shape must be rank 1 but is rank 2", op,
shape_spec("[?]", "[?];[1,2]"));
}
TEST(TrainingOpsTest, ApplyGradientDescent_ShapeFn) {
ShapeInferenceTestOp op("ApplyGradientDescent");
INFER_OK(op, "[1,?];[];[?,2]", "[d0_0,d2_1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[];[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[?];?");
}
TEST(TrainingOpsTest, ApplyProximalGradientDescent_ShapeFn) {
ShapeInferenceTestOp op("ApplyProximalGradientDescent");
INFER_OK(op, "[1,?];[];[];[];[?,2]", "[d0_0,d4_1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[];[];[];[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?");
}
TEST(TrainingOpsTest, SparseApplyProximalGradientDescent_ShapeFn) {
ShapeInferenceTestOp op("SparseApplyProximalGradientDescent");
INFER_OK(op, "[1,?];[];[];[];[?,2];[3]", "[d0_0,d4_1]");
TestGradAndIndicesErrorHandling(op, "[];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[?];?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?");
}
TEST(TrainingOpsTest, ApplyAdadelta_ShapeFn) {
ShapeInferenceTestOp op("ApplyAdadelta");
INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[?,?,?,4]",
"[d0_0,d1_1,d2_2,d6_3]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[1];[];[];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[2];[];[];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[1];[];[];[];[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?");
}
TEST(TrainingOpsTest, SparseApplyAdadelta_ShapeFn) {
ShapeInferenceTestOp op("SparseApplyAdadelta");
INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[?,?,?,4];?",
"[d0_0,d1_1,d2_2,d6_3]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[1];[];[];[];[1];?");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[2];[];[];[];[1];?");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op,
"[?,1];[?,1];[?,1];[];[];[];[?,2];?");
TestGradAndIndicesErrorHandling(op, "?;?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?");
}
TEST(TrainingOpsTest, ApplyAdagrad_ShapeFn) {
ShapeInferenceTestOp op("ApplyAdagrad");
INFER_OK(op, "[1,?,?];[?,2,?];[];[?,?,3]", "[d0_0,d1_1,d3_2]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[];[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?");
}
TEST(TrainingOpsTest, SparseApplyAdagrad_ShapeFn) {
ShapeInferenceTestOp op("SparseApplyAdagrad");
INFER_OK(op, "[1,?,?];[?,2,?];[];[?,?,3];?", "[d0_0,d1_1,d3_2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op,
"[?,1];[?,2];[];[?,1];?");
INFER_ERROR("Shapes must be equal rank, but are 2 and 3", op,
"[?,1];[?,1];[];[?,?,2];?");
TestGradAndIndicesErrorHandling(op, "?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?");
}
TEST(TrainingOpsTest, ApplyProximalAdagrad_ShapeFn) {
ShapeInferenceTestOp op("ApplyProximalAdagrad");
INFER_OK(op, "[1,?,?];[?,2,?];[];[];[];[?,?,3]", "[d0_0,d1_1,d5_2]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[];[];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[];[];[];[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?");
}
TEST(TrainingOpsTest, SparseApplyProximalAdagrad_ShapeFn) {
ShapeInferenceTestOp op("SparseApplyProximalAdagrad");
INFER_OK(op, "[1,?,?];[?,2,?];[];[];[];[?,?,3];?", "[d0_0,d1_1,d5_2]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[];[];[];[?,1];?");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op,
"[?,1];[?,1];[];[];[];[?,2];?");
TestGradAndIndicesErrorHandling(op, "?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?");
}
TEST(TrainingOpsTest, ApplyFtrl_ShapeFn) {
ShapeInferenceTestOp op("ApplyFtrl");
INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[?,?,?,4];[];[];[];[]",
"[d0_0,d1_1,d2_2,d3_3]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[1];[1];[];[];[];[]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[2];[1];[];[];[];[]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[1];[2];[];[];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;[?];?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;?;[?]");
}
TEST(TrainingOpsTest, SparseApplyFtrl_ShapeFn) {
ShapeInferenceTestOp op("SparseApplyFtrl");
INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[?,?,?,4];?;[];[];[];[]",
"[d0_0,d1_1,d2_2,d3_3]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[1];[?,1];?;[];[];[];[]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[2];[?,1];?;[];[];[];[]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op,
"[?,1];[?,1];[?,1];[?,2];?;[];[];[];[]");
TestGradAndIndicesErrorHandling(op, "?;?", ";?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;[?];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;?;[?];?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;?;?;[?]");
}
TEST(TrainingOpsTest, ApplyMomentum_ShapeFn) {
ShapeInferenceTestOp op("ApplyMomentum");
INFER_OK(op, "[1,?,?];[?,2,?];[];[?,?,3];[]", "[d0_0,d1_1,d3_2]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[];[1];[]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[];[2];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?]");
}
TEST(TrainingOpsTest, SparseApplyMomentum_ShapeFn) {
ShapeInferenceTestOp op("SparseApplyMomentum");
INFER_OK(op, "[1,?,?];[?,2,?];[];[?,?,3];?;[]", "[d0_0,d1_1,d3_2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op,
"[?,1];[?,2];[];[?,1];?;[]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op,
"[?,1];[?,1];[];[?,2];?;[]");
TestGradAndIndicesErrorHandling(op, "?;?", ";?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?]");
}
TEST(TrainingOpsTest, ApplyAdam_ShapeFn) {
ShapeInferenceTestOp op("ApplyAdam");
INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[];[];[];[?,?,?,4]",
"[d0_0,d1_1,d2_2,d9_3]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[1];[];[];[];[];[];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[2];[];[];[];[];[];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[1];[];[];[];[];[];[];[2]");
const char err[] = "Shape must be rank 0 but is rank 1";
INFER_ERROR(err, op, "?;?;?;[?];?;?;?;?;?;?");
INFER_ERROR(err, op, "?;?;?;?;[?];?;?;?;?;?");
INFER_ERROR(err, op, "?;?;?;?;?;[?];?;?;?;?");
INFER_ERROR(err, op, "?;?;?;?;?;?;[?];?;?;?");
INFER_ERROR(err, op, "?;?;?;?;?;?;?;[?];?;?");
INFER_ERROR(err, op, "?;?;?;?;?;?;?;?;[?];?");
}
TEST(TrainingOpsTest, ApplyRMSProp_ShapeFn) {
ShapeInferenceTestOp op("ApplyRMSProp");
INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[];[?,?,?,4]",
"[d0_0,d1_1,d2_2,d7_3]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[1];[];[];[];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[2];[];[];[];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[1];[];[];[];[];[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;[?];?");
}
TEST(TrainingOpsTest, SparseApplyRMSProp_ShapeFn) {
ShapeInferenceTestOp op("SparseApplyRMSProp");
INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[];[?,?,?,4];?",
"[d0_0,d1_1,d2_2,d7_3]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[1];[];[];[];[];[1];?");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[2];[];[];[];[];[1];?");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op,
"[?,1];[?,1];[?,1];[];[];[];[];[?,2];?");
TestGradAndIndicesErrorHandling(op, "?;?;?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;[?];?;?");
}
TEST(TrainingOpsTest, ApplyAddSign_ShapeFn) {
ShapeInferenceTestOp op("ApplyAddSign");
INFER_OK(op, "[1,?,?];[?,2,?];[];[];[];[];[?,?,2]", "[d0_0,d1_1,d6_2]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[];[];[];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[];[];[];[];[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?");
}
TEST(TrainingOpsTest, ApplyPowerSign_ShapeFn) {
ShapeInferenceTestOp op("ApplyPowerSign");
INFER_OK(op, "[1,?,?];[?,2,?];[];[];[];[];[?,?,2]", "[d0_0,d1_1,d6_2]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[2];[];[];[];[];[1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"[1];[1];[];[];[];[];[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?");
}
} |
1,136 | cpp | tensorflow/tensorflow | one_hot_op | tensorflow/core/kernels/one_hot_op.cc | tensorflow/core/kernels/one_hot_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_ONE_HOT_OP_H_
#define TENSORFLOW_CORE_KERNELS_ONE_HOT_OP_H_
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
namespace generator {
template <typename T, typename TI>
class OneGenerator {
public:
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
OneGenerator(const typename TTypes<TI>::ConstMatrix& indices,
const typename TTypes<T>::ConstScalar& on_value,
const typename TTypes<T>::ConstScalar& off_value)
: indices_(indices), on_value_(on_value), off_value_(off_value) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
operator()(const Eigen::array<Eigen::DenseIndex, 3>& pre_depth_suff) const {
return (indices_(pre_depth_suff[0], pre_depth_suff[2]) == pre_depth_suff[1])
? on_value_()
: off_value_();
}
private:
const typename TTypes<TI>::ConstMatrix indices_;
const typename TTypes<T>::ConstScalar on_value_;
const typename TTypes<T>::ConstScalar off_value_;
};
}
namespace functor {
template <typename Device, typename T, typename TI>
struct OneHot {
EIGEN_ALWAYS_INLINE static void Compute(
const Device& d, const typename TTypes<TI>::ConstMatrix& indices,
const typename TTypes<T>::ConstScalar& on_value,
const typename TTypes<T>::ConstScalar& off_value,
typename TTypes<T, 3>::Tensor* output) {
generator::OneGenerator<T, TI> generator(indices, on_value, off_value);
output->device(d) = output->generate(generator);
}
};
template <typename T, typename TI>
struct OneHot<CPUDevice, T, TI> {
EIGEN_ALWAYS_INLINE static void Compute(
const CPUDevice& d, const typename TTypes<TI>::ConstMatrix& indices,
const typename TTypes<T>::ConstScalar& on_value,
const typename TTypes<T>::ConstScalar& off_value,
typename TTypes<T, 3>::Tensor* output) {
output->device(d) = output->constant(off_value());
Eigen::Index prefix_size = output->dimensions()[0];
Eigen::Index depth_size = output->dimensions()[1];
Eigen::Index suffix_size = output->dimensions()[2];
double bytes_loaded = sizeof(T);
double bytes_stored = sizeof(T);
double cycles = 0.0;
const Eigen::TensorOpCost cost(bytes_loaded, bytes_stored, cycles);
if (suffix_size == 1) {
const auto func = [&](Eigen::Index start, Eigen::Index end) -> void {
for (Eigen::Index i = start; i < end; ++i) {
const TI depth = internal::SubtleMustCopy(indices(i, 0));
if (FastBoundsCheck(depth, depth_size)) {
(*output)(i, depth, 0) = on_value();
}
}
};
d.parallelFor(prefix_size, cost, func);
} else {
const auto func = [&](Eigen::Index start, Eigen::Index end) -> void {
for (Eigen::Index i = start; i < end; ++i) {
const Eigen::Index d0 = i / suffix_size;
const Eigen::Index d1 = i - (d0 * suffix_size);
const TI depth = internal::SubtleMustCopy(indices(d0, d1));
if (FastBoundsCheck(depth, depth_size)) {
(*output)(d0, depth, d1) = on_value();
}
}
};
d.parallelFor(prefix_size * suffix_size, cost * suffix_size, func);
}
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/kernels/one_hot_op.h"
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/overflow.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T, typename TI>
class OneHotOp : public OpKernel {
public:
explicit OneHotOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& indices = ctx->input(0);
const Tensor& depth = ctx->input(1);
const Tensor& on_value = ctx->input(2);
const Tensor& off_value = ctx->input(3);
const TensorShape& indices_shape = indices.shape();
const int indices_dims = indices_shape.dims();
const int output_dims = indices_dims + 1;
OP_REQUIRES(
ctx, axis_ == -1 || (axis_ >= 0 && axis_ < output_dims),
errors::InvalidArgument("Expected axis to be -1 or between [0, ",
output_dims, "). But received: ", axis_));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(depth.shape()),
errors::InvalidArgument("depth must be a scalar, but got: ",
depth.shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(on_value.shape()),
errors::InvalidArgument("on_value must be a scalar, but got: ",
on_value.shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(off_value.shape()),
errors::InvalidArgument("off_value must be a scalar, but got: ",
off_value.shape().DebugString()));
const int axis = (axis_ == -1) ? indices_dims : axis_;
const int32_t depth_v = depth.scalar<int32>()();
OP_REQUIRES(
ctx, depth_v >= 0,
errors::InvalidArgument("depth must be non-negative, got: ", depth_v));
OP_REQUIRES(
ctx,
MultiplyWithoutOverflow(indices_shape.num_elements(), depth_v) >= 0,
errors::InvalidArgument("OneHot result would have shape ",
indices_shape.DebugString(), " + [", depth_v,
"], which exceeds 2**63 - 1 elements"));
TensorShape output_shape = indices_shape;
output_shape.InsertDim(axis, depth_v);
auto on_value_t = on_value.scalar<T>();
auto off_value_t = off_value.scalar<T>();
Tensor* output;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &output));
if (output_shape.num_elements() > 0) {
int64_t prefix_dim_size = 1;
for (int i = 0; i < axis; ++i) {
prefix_dim_size *= indices_shape.dim_size(i);
}
int64_t suffix_dim_size = indices_shape.num_elements() / prefix_dim_size;
auto indices_t =
indices.shaped<TI, 2>({prefix_dim_size, suffix_dim_size});
auto output_t =
output->shaped<T, 3>({prefix_dim_size, depth_v, suffix_dim_size});
functor::OneHot<Device, T, TI>::Compute(ctx->eigen_device<Device>(),
indices_t, on_value_t,
off_value_t, &output_t);
}
}
private:
int32 axis_;
OneHotOp(const OneHotOp&) = delete;
void operator=(const OneHotOp&) = delete;
};
#define REGISTER_ONE_HOT_INDEX(type, index_type) \
REGISTER_KERNEL_BUILDER(Name("OneHot") \
.Device(DEVICE_CPU) \
.TypeConstraint<index_type>("TI") \
.TypeConstraint<type>("T") \
.HostMemory("depth"), \
OneHotOp<CPUDevice, type, index_type>);
#define REGISTER_ONE_HOT(type) \
REGISTER_ONE_HOT_INDEX(type, uint8); \
REGISTER_ONE_HOT_INDEX(type, int8); \
REGISTER_ONE_HOT_INDEX(type, int32); \
REGISTER_ONE_HOT_INDEX(type, int64_t)
TF_CALL_ALL_TYPES(REGISTER_ONE_HOT);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
namespace functor {
#define DECLARE_GPU_SPEC_INDEX(T, TI) \
template <> \
void OneHot<GPUDevice, T, TI>::Compute( \
const GPUDevice& d, const typename TTypes<TI>::ConstMatrix& indices, \
const typename TTypes<T>::ConstScalar& on_value, \
const typename TTypes<T>::ConstScalar& off_value, \
typename TTypes<T, 3>::Tensor* output); \
extern template struct OneHot<GPUDevice, T, TI>;
#define DECLARE_GPU_SPEC(T) \
DECLARE_GPU_SPEC_INDEX(T, uint8); \
DECLARE_GPU_SPEC_INDEX(T, int8); \
DECLARE_GPU_SPEC_INDEX(T, int32); \
DECLARE_GPU_SPEC_INDEX(T, int64_t);
TF_CALL_int8(DECLARE_GPU_SPEC);
TF_CALL_int32(DECLARE_GPU_SPEC);
TF_CALL_int64(DECLARE_GPU_SPEC);
TF_CALL_GPU_ALL_TYPES(DECLARE_GPU_SPEC);
#undef DECLARE_GPU_SPEC_INDEX
#undef DECLARE_GPU_SPEC
}
#define REGISTER_ONE_HOT_GPU_INDEX(type, index_type) \
REGISTER_KERNEL_BUILDER(Name("OneHot") \
.Device(DEVICE_GPU) \
.TypeConstraint<index_type>("TI") \
.TypeConstraint<type>("T") \
.HostMemory("depth"), \
OneHotOp<GPUDevice, type, index_type>);
#define REGISTER_ONE_HOT_GPU(type) \
REGISTER_ONE_HOT_GPU_INDEX(type, uint8); \
REGISTER_ONE_HOT_GPU_INDEX(type, int8); \
REGISTER_ONE_HOT_GPU_INDEX(type, int32); \
REGISTER_ONE_HOT_GPU_INDEX(type, int64_t);
TF_CALL_int8(REGISTER_ONE_HOT_GPU);
TF_CALL_int32(REGISTER_ONE_HOT_GPU);
TF_CALL_int64(REGISTER_ONE_HOT_GPU);
TF_CALL_GPU_ALL_TYPES(REGISTER_ONE_HOT_GPU);
#undef REGISTER_ONE_HOT_GPU_INDEX
#undef REGISTER_ONE_HOT_GPU
#endif
} | #include <random>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* OneHot(int batch_size, int num_classes, int axis) {
Graph* g = new Graph(OpRegistry::Global());
Tensor indices(DT_INT32, TensorShape({batch_size}));
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dist(0, num_classes - 1);
auto indices_t = indices.flat<int32>();
for (int i = 0; i < batch_size; ++i) {
indices_t(i) = dist(gen);
}
Tensor depth(DT_INT32, TensorShape({}));
depth.scalar<int32>()() = num_classes;
Tensor on_value(DT_FLOAT, TensorShape({}));
on_value.scalar<float>()() = 1.0f;
Tensor off_value(DT_FLOAT, TensorShape({}));
off_value.scalar<float>()() = 0.0f;
test::graph::Multi(g, "OneHot",
{
test::graph::Constant(g, indices),
test::graph::Constant(g, depth),
test::graph::Constant(g, on_value),
test::graph::Constant(g, off_value),
})
->AddAttr("axis", axis);
return g;
}
#define BM_OneHot(BATCH, CLASS, AXIS, DEVICE) \
static void BM_OneHot##_##BATCH##_##CLASS##_##AXIS##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, OneHot(BATCH, CLASS, AXIS), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * BATCH * \
CLASS); \
} \
BENCHMARK(BM_OneHot##_##BATCH##_##CLASS##_##AXIS##_##DEVICE);
BM_OneHot(32, 512, 1, cpu);
BM_OneHot(64, 512, 1, cpu);
BM_OneHot(128, 512, 1, cpu);
BM_OneHot(32, 1024, 1, cpu);
BM_OneHot(64, 1024, 1, cpu);
BM_OneHot(128, 1024, 1, cpu);
BM_OneHot(32, 10000, 1, cpu);
BM_OneHot(64, 10000, 1, cpu);
BM_OneHot(128, 10000, 1, cpu);
BM_OneHot(32, 512, 0, cpu);
BM_OneHot(64, 512, 0, cpu);
BM_OneHot(128, 512, 0, cpu);
BM_OneHot(32, 1024, 0, cpu);
BM_OneHot(64, 1024, 0, cpu);
BM_OneHot(128, 1024, 0, cpu);
BM_OneHot(32, 10000, 0, cpu);
BM_OneHot(64, 10000, 0, cpu);
BM_OneHot(128, 10000, 0, cpu);
} |
1,137 | cpp | tensorflow/tensorflow | while_op | tensorflow/compiler/tf2xla/kernels/while_op.cc | tensorflow/core/kernels/while_op_test.cc | #ifndef TENSORFLOW_COMPILER_TF2XLA_KERNELS_WHILE_OP_H_
#define TENSORFLOW_COMPILER_TF2XLA_KERNELS_WHILE_OP_H_
#include <vector>
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/core/framework/attr_value.pb.h"
namespace tensorflow {
class XlaWhileOp : public XlaOpKernel {
public:
explicit XlaWhileOp(OpKernelConstruction* ctx);
void Compile(XlaOpKernelContext* ctx) override;
private:
NameAttrList cond_name_attr_;
NameAttrList body_name_attr_;
bool has_token_input_output_;
std::vector<string> token_input_nodes_;
string original_node_name_;
bool propagate_compile_time_consts_ = false;
XlaWhileOp(const XlaWhileOp&) = delete;
void operator=(const XlaWhileOp&) = delete;
};
}
#endif
#include "tensorflow/compiler/tf2xla/kernels/while_op.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/log.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/tf2xla/kernels/if_while_utils.h"
#include "tensorflow/compiler/tf2xla/kernels/tensor_list_utils.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/tf2xla/xla_resource.h"
#include "xla/client/client.h"
#include "xla/client/lib/tuple.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
Status VerifyResourceArgsGroupedAtEnd(XlaOpKernelContext* ctx,
const NameAttrList& body_name_attr) {
const FunctionBody* body;
TF_RETURN_IF_ERROR(ctx->compiler()->FindFunctionBody(body_name_attr, &body));
bool has_seen_resource = false;
for (int i = 0; i < body->arg_types.size(); i++) {
DataType arg_type = body->arg_types[i];
if (has_seen_resource) {
if (arg_type != DT_RESOURCE) {
return errors::InvalidArgument(
"Expect input resources are grouped in the end of while body ",
body_name_attr.name(), ", but the ", i, "-th argument ",
body->arg_nodes[i]->name(), " is not a resource.");
}
} else {
if (arg_type == DT_RESOURCE) {
has_seen_resource = true;
}
}
}
return absl::OkStatus();
}
Status MakeXlaCompilerArgumentsFromInputs(
XlaOpKernelContext* ctx, std::vector<XlaCompiler::Argument>* args,
bool* has_uninitialized_vars, bool* has_tensor_arrays,
bool* has_uninitialized_tensor_lists) {
VLOG(2) << "Num inputs " << ctx->num_inputs();
args->resize(ctx->num_inputs());
*has_uninitialized_vars = false;
*has_tensor_arrays = false;
*has_uninitialized_tensor_lists = false;
for (int i = 0; i < ctx->num_inputs(); ++i) {
VLOG(2) << " Input " << i << " type: " << DataTypeString(ctx->input_type(i))
<< " shape: " << ctx->InputShape(i).DebugString();
XlaCompiler::Argument& arg = (*args)[i];
DataType type = ctx->input_type(i);
if (type == DT_RESOURCE) {
XlaResource* resource;
TF_RETURN_IF_ERROR(ctx->GetResourceInput(i, &resource));
XlaCompiler::PopulateArgumentFromResource(*resource, &arg);
if (arg.resource_kind == XlaResource::kTensorArray) {
*has_tensor_arrays = true;
}
if (!arg.initialized) {
*has_uninitialized_vars = true;
}
VLOG(2) << " resource " << resource->name()
<< " type: " << DataTypeString(arg.type)
<< " shape: " << arg.ShapeHumanString()
<< " initialized: " << arg.initialized;
} else {
arg.kind = XlaCompiler::Argument::kParameter;
arg.type = type;
TF_ASSIGN_OR_RETURN(arg.shape, ctx->builder()->GetShape(ctx->Input(i)));
if (IsTensorListInput(ctx, i)) {
TF_RETURN_IF_ERROR(
IsTensorListInitialized(ctx->Input(i), &arg.initialized));
if (!arg.initialized) {
*has_uninitialized_tensor_lists = true;
}
}
}
}
return absl::OkStatus();
}
void GetLoopInvariants(XlaOpKernelContext* ctx,
const NameAttrList& body_name_attr,
std::vector<bool>* const loop_invariants) {
const FunctionBody* body;
OP_REQUIRES_OK(ctx, ctx->compiler()->FindFunctionBody(body_name_attr, &body));
const tensorflow::FunctionLibraryDefinition* fld =
ctx->compiler()->flib_runtime()->GetFunctionLibraryDefinition();
for (int i = 0; i < body->ret_nodes.size(); i++) {
absl::StatusOr<bool> is_loop_invariant = IsLoopInvariant(body, i, fld);
OP_REQUIRES_OK(ctx, is_loop_invariant.status());
(*loop_invariants)[i] = *is_loop_invariant;
VLOG(2) << "Arg " << i << " of " << body_name_attr.name() << " is "
<< ((*loop_invariants)[i] ? "" : "not ") << "loop invariant";
}
}
Status ConvertLoopInvariantsToConst(
XlaOpKernelContext* ctx, const NameAttrList& body_name_attr,
const NameAttrList& cond_name_attr,
std::vector<XlaCompiler::Argument>* args,
std::vector<bool>* compile_time_const_arg_indices,
int* num_compile_time_const_args, xla::Client* client) {
std::vector<bool> loop_invariants(ctx->num_inputs());
GetLoopInvariants(ctx, body_name_attr, &loop_invariants);
std::vector<bool> body_must_be_const_nodes;
const FunctionBody* body;
std::vector<bool> cond_must_be_const_nodes;
const FunctionBody* cond;
TF_RETURN_IF_ERROR(FindMustBeConstNodes(ctx, body_name_attr,
&body_must_be_const_nodes, &body));
TF_RETURN_IF_ERROR(FindMustBeConstNodes(ctx, cond_name_attr,
&cond_must_be_const_nodes, &cond));
auto should_convert_to_const = [&](int arg_idx) {
XlaCompiler::Argument& arg = (*args)[arg_idx];
return arg.kind != XlaCompiler::Argument::kResource &&
loop_invariants[arg_idx] &&
(body_must_be_const_nodes[body->arg_nodes[arg_idx]->id()] ||
cond_must_be_const_nodes[cond->arg_nodes[arg_idx]->id()]);
};
absl::InlinedVector<int, 5> converted_constants =
ConvertCompileTimeConstArgumentsToConst(ctx, args,
0,
should_convert_to_const);
VLOG(2) << "Converted args to constants: {"
<< absl::StrJoin(converted_constants, ",") << "}";
for (int arg_idx : converted_constants) {
compile_time_const_arg_indices->at(arg_idx) = true;
(*num_compile_time_const_args)++;
}
return absl::OkStatus();
}
Status VerifyBodyInputAndOutputShapeMatch(
XlaOpKernelContext* ctx,
const std::vector<bool>& compile_time_const_arg_indices,
const XlaCompiler::CompilationResult& body, bool has_token_input_output) {
xla::Shape body_input_shape = body.xla_input_shapes[0];
xla::Shape body_output_shape;
body_output_shape.set_element_type(xla::TUPLE);
for (int i = 0; i < ctx->num_outputs(); i++) {
if (!compile_time_const_arg_indices[i]) {
*(body_output_shape.add_tuple_shapes()) =
body.xla_output_shape.tuple_shapes(i);
}
}
if (has_token_input_output) {
*(body_output_shape.add_tuple_shapes()) =
body.xla_output_shape.tuple_shapes(ctx->num_inputs());
}
if (!xla::ShapeUtil::Compatible(body_input_shape, body_output_shape)) {
return errors::InvalidArgument(
"Input and output shapes of loop body do not match: ",
xla::ShapeUtil::HumanString(body_input_shape), " vs. ",
xla::ShapeUtil::HumanString(body_output_shape));
}
return absl::OkStatus();
}
absl::StatusOr<xla::XlaComputation> BuildWrappedCond(
XlaOpKernelContext* ctx, const XlaCompiler::CompilationResult& cond) {
xla::Shape cond_input_shape = cond.xla_input_shapes[0];
std::unique_ptr<xla::XlaBuilder> cb =
ctx->builder()->CreateSubBuilder("cond_wrapper");
auto inputs = xla::Parameter(cb.get(), 0, cond_input_shape, "inputs");
auto outputs = xla::Call(cb.get(), *cond.computation, {inputs});
xla::GetTupleElement(outputs, 0);
return cb->Build();
}
absl::StatusOr<xla::XlaComputation> BuildWrappedBody(
XlaOpKernelContext* ctx, const XlaCompiler::CompilationResult& body,
const std::vector<bool>& compile_time_const_arg_indices,
int num_compile_time_const_args, bool has_token_input_output) {
if (num_compile_time_const_args <= 0 &&
body.xla_input_shapes[0] == body.xla_output_shape) {
return xla::XlaComputation(body.computation->proto());
}
xla::XlaComputation body_wrapper;
std::unique_ptr<xla::XlaBuilder> cb =
ctx->builder()->CreateSubBuilder("body_wrapper");
xla::Shape body_input_shape = body.xla_input_shapes[0];
auto inputs = xla::Parameter(cb.get(), 0, body_input_shape, "inputs");
auto outputs = xla::Call(cb.get(), *body.computation, {inputs});
std::vector<xla::XlaOp> non_compile_time_const_outputs;
int input_num = 0;
for (int i = 0; i < compile_time_const_arg_indices.size(); i++) {
if (!compile_time_const_arg_indices[i]) {
xla::XlaOp output = xla::GetTupleElement(outputs, i);
const xla::Shape& input_shape = body_input_shape.tuple_shapes(input_num);
const xla::Shape& output_shape = body.xla_output_shape.tuple_shapes(i);
TF_RET_CHECK(xla::ShapeUtil::Compatible(input_shape, output_shape));
if (input_shape != output_shape) {
TF_ASSIGN_OR_RETURN(xla::ShapeTree<xla::XlaOp> disassembled_tuple,
xla::DisassembleTuple(output));
disassembled_tuple.ForEachMutableElement(
[&](const xla::ShapeIndex& index, xla::XlaOp* element) {
const xla::Shape& output_subshape =
xla::ShapeUtil::GetSubshape(output_shape, index);
if (output_subshape.IsArray()) {
const xla::Shape& input_subshape =
xla::ShapeUtil::GetSubshape(input_shape, index);
for (int d = 0; d < output_subshape.rank(); ++d) {
if (input_subshape.is_dynamic_dimension(d) &&
!output_subshape.is_dynamic_dimension(d)) {
*element = xla::SetDimensionSize(
*element,
xla::ConstantR0(
cb.get(),
static_cast<int32_t>(output_shape.dimensions()[d])),
d);
}
}
}
});
output =
xla::AssembleTuple(output.builder(), std::move(disassembled_tuple));
}
non_compile_time_const_outputs.push_back(output);
++input_num;
}
}
if (has_token_input_output) {
non_compile_time_const_outputs.push_back(
xla::GetTupleElement(outputs, ctx->num_outputs()));
}
xla::Tuple(cb.get(), non_compile_time_const_outputs);
return cb->Build();
}
xla::XlaOp BuildWhile(XlaOpKernelContext* ctx,
const xla::XlaComputation& wrapped_cond,
const xla::XlaComputation& wrapped_body,
const xla::XlaOp initial_values,
const std::vector<int>& input_mapping,
const std::vector<bool>& compile_time_const_arg_indices,
int num_compile_time_const_args,
bool has_token_input_output) {
xla::XlaOp while_result =
xla::While(wrapped_cond, wrapped_body, initial_values);
std::vector<xla::XlaOp> padded_while_outputs(ctx->num_outputs());
int while_result_index = 0;
for (int i = 0; i < ctx->num_inputs(); i++) {
if (!compile_time_const_arg_indices[i]) {
padded_while_outputs[input_mapping[while_result_index]] =
xla::GetTupleElement(while_result, while_result_index);
while_result_index++;
} else {
padded_while_outputs[i] = ctx->Input(i);
}
}
if (has_token_input_output) {
padded_while_outputs.push_back(xla::GetTupleElement(
while_result, ctx->num_inputs() - num_compile_time_const_args));
}
return xla::Tuple(ctx->builder(), padded_while_outputs);
}
}
XlaWhileOp::XlaWhileOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
const NameAttrList* name_attr;
OP_REQUIRES_OK(ctx, ctx->GetAttr("cond", &name_attr));
cond_name_attr_ = *name_attr;
OP_REQUIRES_OK(ctx, ctx->GetAttr("body", &name_attr));
body_name_attr_ = *name_attr;
if (!ctx->GetAttr(kXlaTokenInputNodesAttrName, &token_input_nodes_).ok()) {
has_token_input_output_ = false;
} else {
has_token_input_output_ = !token_input_nodes_.empty();
}
if (ctx->HasAttr(kPropagateCompileTimeConsts)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kPropagateCompileTimeConsts,
&propagate_compile_time_consts_));
}
if (!ctx->GetAttr(kXlaOriginalOutsideCompilationNodeName,
&original_node_name_)
.ok())
original_node_name_ = name();
}
void XlaWhileOp::Compile(XlaOpKernelContext* ctx) {
VLOG(1) << "WhileOp::Compile";
OP_REQUIRES_OK(ctx, VerifyResourceArgsGroupedAtEnd(ctx, body_name_attr_));
std::vector<XlaCompiler::Argument> arguments;
bool has_uninitialized_vars;
bool has_tensor_arrays;
bool has_uninitialized_tensor_lists;
OP_REQUIRES_OK(ctx, MakeXlaCompilerArgumentsFromInputs(
ctx, &arguments, &has_uninitialized_vars,
&has_tensor_arrays, &has_uninitialized_tensor_lists));
xla::XlaBuilder* builder = ctx->builder();
XlaCompiler* compiler = ctx->compiler();
std::vector<bool> compile_time_const_arg_indices(ctx->num_inputs());
int num_compile_time_const_args = 0;
if (propagate_compile_time_consts_) {
OP_REQUIRES_OK(ctx, ConvertLoopInvariantsToConst(
ctx, body_name_attr_, cond_name_attr_, &arguments,
&compile_time_const_arg_indices,
&num_compile_time_const_args, compiler->client()));
}
VLOG(1) << "Compiling body";
XlaCompiler::CompileOptions body_options;
body_options.use_tuple_arg = true;
body_options.return_updated_values_for_all_resources = true;
body_options.is_entry_computation = false;
body_options.add_token_input_output = has_token_input_output_;
auto body = std::make_unique<XlaCompiler::CompilationResult>();
OP_REQUIRES_OK(ctx, compiler->CompileFunction(body_options, body_name_attr_,
arguments, body.get()));
OP_REQUIRES_OK(
ctx, ctx->xla_context()->RecordCollectiveInfoFromNestedCompilationResult(
*body.get()));
if (has_uninitialized_vars || has_tensor_arrays ||
has_uninitialized_tensor_lists) {
VLOG(2) << "Recompiling loop body: has_uninitialized_vars: "
<< has_uninitialized_vars
<< " has_tensor_arrays: " << has_tensor_arrays
<< " has_uninitialized_tensor_lists: "
<< has_uninitialized_tensor_lists;
for (int i = 0; i < body->resource_updates.size(); ++i) {
const XlaCompiler::ResourceUpdate& update = body->resource_updates[i];
XlaResource* resource;
OP_REQUIRES_OK(ctx, ctx->GetResourceInput(update.input_index, &resource));
XlaCompiler::Argument& arg = arguments[update.input_index];
if (!arg.initialized) {
VLOG(2) << "Update shape for argument " << update.input_index << " "
<< update.shape.DebugString();
arg.initialized = true;
arg.shape = update.shape;
OP_REQUIRES_OK(ctx,
resource->SetTypeAndShape(update.type, update.shape));
OP_REQUIRES_OK(ctx, resource->SetZeroValue(builder));
}
for (const string& grad_source : update.tensor_array_gradients_accessed) {
VLOG(4) << "TensorArray " << resource->name() << " accessed gradient "
<< grad_source;
XlaResource* gradient;
OP_REQUIRES_OK(ctx, resource->GetOrCreateTensorArrayGradient(
grad_source, builder, &gradient));
}
for (const auto& gradient : resource->tensor_array_gradients()) {
arg.tensor_array_gradients.insert(gradient.first);
}
}
xla::Shape body_output_shape = body->xla_output_shape;
OP_REQUIRES(ctx, body_output_shape.IsTuple(),
errors::FailedPrecondition(
"xla_output_shape of while body must be a tuple."));
for (int i = 0; i < arguments.size(); i++) {
XlaCompiler::Argument& arg = arguments[i];
if (arg.initialized || !IsTensorListInput(ctx, i)) {
continue;
}
arg.shape = body_output_shape.tuple_shapes(i);
arg.initialized = true;
}
VLOG(1) << "Recompiling body with corrected resource shapes";
*body = {};
OP_REQUIRES_OK(ctx, compiler->CompileFunction(body_options, body_name_attr_,
arguments, body.get()));
}
VLOG(1) << "Compiling condition";
XlaCompiler::CompileOptions cond_options;
cond_options.use_tuple_arg = true;
cond_options.is_entry_computation = false;
cond_options.add_token_input_output = has_token_input_output_;
XlaCompiler::CompilationResult cond;
OP_REQUIRES_OK(ctx, compiler->CompileFunction(cond_options, cond_name_attr_,
arguments, &cond));
OP_REQUIRES(ctx, body->xla_input_shapes.size() == 1,
errors::FailedPrecondition("Expected one input shape"));
xla::Shape body_input_shape = body->xla_input_shapes[0];
OP_REQUIRES(ctx, body_input_shape.IsTuple(),
errors::FailedPrecondition("Expected tuple shape"));
OP_REQUIRES(ctx, cond.xla_input_shapes.size() == 1,
errors::FailedPrecondition("Expected one input shape"));
xla::Shape cond_input_shape = cond.xla_input_shapes[0];
OP_REQUIRES(ctx, cond_input_shape.IsTuple(),
errors::FailedPrecondition("Expected tuple shape"));
VLOG(2) << "Body shape: " << xla::ShapeUtil::HumanString(body_input_shape)
<< " -> " << xla::ShapeUtil::HumanString(body->xla_output_shape);
VLOG(2) << "Cond shape: " << xla::ShapeUtil::HumanString(cond_input_shape)
<< " -> " << xla::ShapeUtil::HumanString(cond.xla_output_shape);
OP_REQUIRES(ctx,
xla::ShapeUtil::Compatible(body_input_shape, cond_input_shape),
errors::InvalidArgument(
"Input shapes of loop body and condition do not match: ",
xla::ShapeUtil::HumanString(body_input_shape), " vs. ",
xla::ShapeUtil::HumanString(cond_input_shape)));
OP_REQUIRES_OK(ctx, VerifyBodyInputAndOutputShapeMatch(
ctx, compile_time_const_arg_indices, *body.get(),
has_token_input_output_));
xla::Shape expected_cond_output_shape_without_side_effect =
xla::ShapeUtil::MakeTupleShape(
{xla::ShapeUtil::MakeShape(xla::PRED, {})});
xla::Shape expected_cond_output_shape_with_side_effect =
xla::ShapeUtil::MakeTupleShape({xla::ShapeUtil::MakeShape(xla::PRED, {}),
xla::ShapeUtil::MakeTokenShape()});
OP_REQUIRES(ctx,
xla::ShapeUtil::Compatible(
cond.xla_output_shape,
expected_cond_output_shape_without_side_effect) ||
xla::ShapeUtil::Compatible(
cond.xla_output_shape,
expected_cond_output_shape_with_side_effect),
errors::InvalidArgument(
"Output shape of loop condition should be (pred[]) or "
"(pred[], token[]), got: ",
xla::ShapeUtil::HumanString(cond.xla_output_shape)));
int num_inputs = body->input_mapping.size();
std::vector<xla::XlaOp> inputs(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
int input_num = body->input_mapping[i];
if (has_token_input_output_ && i == num_inputs - 1) {
std::vector<xla::XlaOp> token_inputs;
token_inputs.reserve(token_input_nodes_.size());
for (const string& node_name : token_input_nodes_) {
auto token_or = compiler->GetNodeToken(node_name);
OP_REQUIRES_OK(ctx, token_or.status());
token_inputs.push_back(token_or.value());
}
inputs[i] = xla::AfterAll(builder, token_inputs);
} else if (ctx->input_type(input_num) == DT_RESOURCE) {
XlaResource* resource;
OP_REQUIRES_OK(ctx, ctx->GetResourceInput(input_num, &resource));
OP_REQUIRES_OK(ctx, resource->Pack(&inputs[i], builder));
} else if (IsTensorListInput(ctx, input_num)) {
xla::XlaOp input = ctx->Input(input_num);
auto input_shape_or = ctx->builder()->GetShape(input);
OP_REQUIRES_OK(ctx, input_shape_or.status());
xla::Shape input_shape = input_shape_or.value();
const xla::Shape& list_shape = body_input_shape.tuple_shapes(i);
if (input_shape != list_shape) {
std::vector<std::vector<xla::XlaOp>> list_dynamic_dims;
for (int i = 0; i < list_shape.tuple_shapes_size() - 1; ++i) {
std::vector<xla::XlaOp> dynamic_dims;
const xla::Shape& shape = list_shape.tuple_shapes(i);
if (shape.is_dynamic_dimension(0)) {
xla::XlaOp leading_dim_size = xla::GetDimensionSize(input, 0);
dynamic_dims.push_back(leading_dim_size);
} else {
int32_t dim_size = shape.dimensions(0);
dynamic_dims.push_back(
xla::ConstantR0<int32>(ctx->builder(), dim_size));
}
for (int64_t dim = 1; dim < shape.dimensions_size(); ++dim) {
int32_t dim_size = shape.dimensions(dim);
if (shape.is_dynamic_dimension(dim)) {
dim_size = 0;
}
dynamic_dims.push_back(
xla::ConstantR0<int32_t>(ctx->builder(), dim_size));
}
list_dynamic_dims.push_back(dynamic_dims);
}
OP_REQUIRES_OK(
ctx, CreateZerosTensorListWithShape(ctx->builder(), list_shape,
list_dynamic_dims, &inputs[i]));
} else {
inputs[i] = ctx->Input(input_num);
}
} else {
inputs[i] = ctx->Input(input_num);
}
}
xla::XlaOp init = xla::Tuple(builder, inputs);
VLOG(1) << "Building while loop";
absl::StatusOr<xla::XlaComputation> cond_result = BuildWrappedCond(ctx, cond);
OP_REQUIRES_OK(ctx, cond_result.status());
xla::XlaComputation wrapped_cond = std::move(cond_result.value());
absl::StatusOr<xla::XlaComputation> body_result =
BuildWrappedBody(ctx, *body.get(), compile_time_const_arg_indices,
num_compile_time_const_args, has_token_input_output_);
OP_REQUIRES_OK(ctx, body_result.status());
xla::XlaComputation wrapped_body = std::move(body_result.value());
xla::XlaOp while_result =
BuildWhile(ctx, wrapped_cond, wrapped_body, init, body->input_mapping,
compile_time_const_arg_indices, num_compile_time_const_args,
has_token_input_output_);
int resource_index = 0;
for (int i = 0; i < ctx->num_outputs(); ++i) {
if (ctx->input_type(i) != DT_RESOURCE) {
if (IsTensorListInput(ctx, i)) {
ctx->SetTensorListOutput(i, xla::GetTupleElement(while_result, i));
} else {
ctx->SetOutput(i, xla::GetTupleElement(while_result, i));
}
++resource_index;
} else {
break;
}
}
if (has_token_input_output_) {
xla::XlaOp token_output =
xla::GetTupleElement(while_result, ctx->num_outputs());
auto shape_or = builder->GetShape(token_output);
OP_REQUIRES_OK(ctx, shape_or.status());
OP_REQUIRES(ctx, shape_or.value().IsToken( | #include "tensorflow/c/experimental/stream_executor/stream_executor.h"
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h"
#include "tensorflow/c/experimental/stream_executor/stream_executor_test_util.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
namespace tensorflow {
namespace {
class WhileOpTest : public OpsTestBase {
protected:
WhileOpTest() {}
void SetUp() override {
stream_executor::test_util::PopulateDefaultPlatform(&platform_,
&platform_fns_);
stream_executor::test_util::PopulateDefaultDeviceFns(&device_fns_);
stream_executor::test_util::PopulateDefaultStreamExecutor(&se_);
stream_executor::test_util::PopulateDefaultTimerFns(&timer_fns_);
}
void TearDown() override {}
SP_Platform platform_;
SP_PlatformFns platform_fns_;
SP_DeviceFns device_fns_;
SP_StreamExecutor se_;
SP_TimerFns timer_fns_;
};
FunctionDef LessThanOrEqualToNWithCast(int64_t N) {
typedef FunctionDefHelper FDH;
const Tensor kN = test::AsScalar<int64_t>(N);
return FDH::Define(
"LessThanOrEqualToNWithCast",
{"x: T"},
{"z: bool"},
{"T: {float, double, int32, int64}"},
{
{{"N"}, "Const", {}, {{"value", kN}, {"dtype", DT_INT64}}},
{{"y"}, "_HostCast", {"N"}, {{"SrcT", DT_INT64}, {"DstT", DT_INT32}}},
{{"x_cst"}, "_HostCast", {"x"}, {{"SrcT", "$T"}, {"DstT", DT_INT32}}},
{{"z"}, "LessEqual", {"x_cst", "y"}, {{"T", DT_INT32}}},
});
}
FunctionDef XTimesTwoWithCast() {
typedef FunctionDefHelper FDH;
const Tensor kTwo = test::AsScalar<int64_t>(2);
return FDH::Define(
"XTimesTwoWithCast",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"two_cst"},
"_HostCast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT32}}},
{{"x_cst"}, "_HostCast", {"x"}, {{"SrcT", "$T"}, {"DstT", DT_INT32}}},
{{"y_cast"}, "Mul", {"x_cst", "two_cst"}, {{"T", DT_INT32}}},
{{"y"},
"_HostCast",
{"y_cast"},
{{"SrcT", DT_INT32}, {"DstT", "$T"}}},
});
}
TEST_F(WhileOpTest, WhileOpCPUBuildWithPluggableDevice) {
const std::string platform_name = "MY_TEST";
const std::string platform_type = "FAKE";
platform_.name = platform_name.c_str();
platform_.type = platform_type.c_str();
static bool memcpy_d2h_called = false;
se_.memcpy_dtoh = [](const SP_Device* device, SP_Stream stream,
void* host_dst, const SP_DeviceMemoryBase* device_src,
uint64_t size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
memcpy_d2h_called = true;
std::memcpy(host_dst, device_src->opaque, size);
};
se_.memcpy_htod = [](const SP_Device* const device, SP_Stream stream,
SP_DeviceMemoryBase* const device_dst,
const void* host_src, uint64_t size,
TF_Status* const status) {
TF_SetStatus(status, TF_OK, "");
std::memcpy(device_dst->opaque, host_src, size);
};
se_.host_memory_allocate = [](const SP_Device* const device, uint64_t size) {
#if EIGEN_MAX_ALIGN_BYTES == 0
return malloc(size);
#else
return tensorflow::port::AlignedMalloc(size, EIGEN_MAX_ALIGN_BYTES);
#endif
};
se_.host_memory_deallocate = [](const SP_Device* const device, void* mem) {
free(mem);
};
se_.allocate = [](const SP_Device* const device, uint64_t size,
int64_t memory_space, SP_DeviceMemoryBase* const mem) {
mem->struct_size = SP_DEVICE_MEMORY_BASE_STRUCT_SIZE;
#if EIGEN_MAX_ALIGN_BYTES == 0
mem->opaque = malloc(size);
#else
mem->opaque = tensorflow::port::AlignedMalloc(size, EIGEN_MAX_ALIGN_BYTES);
#endif
mem->size = size;
};
se_.deallocate = [](const SP_Device* const device,
SP_DeviceMemoryBase* const mem) {
free(mem->opaque);
mem->opaque = nullptr;
mem->size = 0;
};
static SE_EventStatus event_status = SE_EVENT_COMPLETE;
se_.create_event = [](const SP_Device* const device, SP_Event* event,
TF_Status* const status) -> void {
*event = new SP_Event_st(666);
};
se_.destroy_event = [](const SP_Device* const device,
SP_Event event) -> void { delete event; };
se_.get_event_status = [](const SP_Device* const device,
SP_Event event) -> SE_EventStatus {
EXPECT_EQ(event->event_id, 666);
return event_status;
};
std::unique_ptr<stream_executor::CPlatform> cplatform(
new stream_executor::CPlatform(
std::move(platform_), stream_executor::test_util::DestroyPlatform,
std::move(platform_fns_),
stream_executor::test_util::DestroyPlatformFns,
std::move(device_fns_), std::move(se_), std::move(timer_fns_)));
TF_CHECK_OK(
stream_executor::PlatformManager::RegisterPlatform(std::move(cplatform)));
DeviceFactory::Register(
platform_type, new PluggableDeviceFactory(platform_type, platform_name),
220, true);
std::unique_ptr<Device> plug_device(
DeviceFactory::NewDevice(platform_type, {}, "/job:a/replica:0"));
OpsTestBase::SetDevice(platform_type.c_str(), std::move(plug_device));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDef x_times_two = XTimesTwoWithCast();
FunctionDef less_than_or_eq = LessThanOrEqualToNWithCast(8);
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = x_times_two;
*f_lib_proto.add_function() = less_than_or_eq;
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT);
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToNWithCast");
(*cond_func.mutable_func()->mutable_attr())["T"].set_type(DT_FLOAT);
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwoWithCast");
(*body_func.mutable_func()->mutable_attr())["T"].set_type(DT_FLOAT);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* node;
TF_EXPECT_OK(NodeBuilder("while_test", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_FLOAT})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Finalize(root.graph(), &node));
auto c = ops::Identity(
root.WithOpName("C").WithControlDependencies(Output(node)), Output(node));
TF_ASSERT_OK(root.DoShapeInference(node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
ClientSession session(root);
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(1.f));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(c.node())}, &out_tensors));
ASSERT_EQ(memcpy_d2h_called, true);
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<float>()(), 16.f);
}
}
}
} |
1,138 | cpp | tensorflow/tensorflow | scan_ops | tensorflow/core/kernels/scan_ops.cc | tensorflow/core/kernels/scan_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SCAN_OPS_H_
#define TENSORFLOW_CORE_KERNELS_SCAN_OPS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
typedef Eigen::Index Index;
template <typename Device, typename Reducer, typename T>
struct Scan {
void operator()(const Device& d, typename TTypes<T, 3>::ConstTensor in,
typename TTypes<T, 3>::Tensor out, const Reducer& reducer,
const bool reverse, const bool exclusive) {
Eigen::array<bool, 3> dims;
dims[0] = false;
dims[1] = reverse;
dims[2] = false;
MaybeWith32BitIndexing<Device>(
[&](auto in32, auto out32) {
out32.device(d) =
in32.reverse(dims).scan(1, reducer, exclusive).reverse(dims);
},
in, out);
}
};
template <typename T>
struct LogSumExp {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& a,
const T& b) const {
auto mi = Eigen::internal::scalar_min_op<T>()(a, b);
auto ma = Eigen::internal::scalar_max_op<T>()(a, b);
auto sub = Eigen::internal::scalar_difference_op<T>();
auto add = Eigen::internal::scalar_sum_op<T>();
auto exp = Eigen::internal::scalar_exp_op<T>();
auto log1p = Eigen::internal::scalar_log1p_op<T>();
auto cmp_lt =
Eigen::internal::scalar_cmp_op<T, T, Eigen::internal::cmp_LT>();
auto logsumexp = add(log1p(exp(sub(mi, ma))), ma);
return cmp_lt(ma, Eigen::NumTraits<T>::lowest()) ? ma : logsumexp;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const T& a,
const T& b) const {
auto mi = Eigen::internal::pmin(a, b);
auto ma = Eigen::internal::pmax(a, b);
using Eigen::internal::padd;
using Eigen::internal::pcmp_lt;
using Eigen::internal::pexp;
using Eigen::internal::plog1p;
using Eigen::internal::pset1;
using Eigen::internal::psub;
auto logsumexp = padd(plog1p(pexp(psub(mi, ma))), ma);
return pselect(pcmp_lt(ma, pset1(Eigen::NumTraits<T>::lowest())), ma,
logsumexp);
}
};
template <typename T>
struct LogSumExpReducer {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
LogSumExp<T> logsumexp;
*accum = logsumexp(*accum, t);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p,
Packet* accum) const {
LogSumExp<T> logsumexp;
*accum = logsumexp.packetOp(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return -Eigen::NumTraits<T>::infinity();
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return Eigen::internal::pset1(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet
finalizePacket(const Packet& vaccum) const {
return vaccum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T
finalizeBoth(const T saccum, const Packet& vaccum) const {
auto max_reducer = Eigen::internal::MaxReducer<T, Eigen::PropagateNaN>();
auto sum_reducer = Eigen::internal::SumReducer<T>();
auto exp = Eigen::internal::scalar_exp_op<T>();
auto cmp_lt =
Eigen::internal::scalar_cmp_op<T, T, Eigen::internal::cmp_LT>();
auto log = Eigen::internal::scalar_log_op<T>();
auto add = Eigen::internal::scalar_sum_op<T>();
using Eigen::internal::pexp;
using Eigen::internal::psub;
auto ma = max_reducer.finalizeBoth(saccum, vaccum);
auto logsumexp = add(log(sum_reducer.finalizeBoth(
exp(saccum - ma), pexp(psub(vaccum, pset1(ma))))),
ma);
return cmp_lt(ma, Eigen::NumTraits<T>::lowest()) ? initialize() : logsumexp;
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/kernels/scan_ops.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, class T, typename Reducer, typename Tidx>
class ScanOp : public OpKernel {
public:
explicit ScanOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("reverse", &reverse_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("exclusive", &exclusive_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
const Tensor& tensor_axis = ctx->input(1);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tensor_axis.shape()),
errors::InvalidArgument("ScanOp: axis must be a scalar, not ",
tensor_axis.shape().DebugString()));
const Tidx axis_arg =
internal::SubtleMustCopy(tensor_axis.scalar<Tidx>()());
const Tidx axis = (axis_arg < 0) ? input.dims() + axis_arg : axis_arg;
OP_REQUIRES(ctx, FastBoundsCheck(axis, input.dims()),
errors::InvalidArgument(
"ScanOp: Expected scan axis in the range [", -input.dims(),
", ", input.dims(), "), but got ", axis));
const TensorShape& output_shape = input.shape();
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &output));
if (output_shape.num_elements() == 0) return;
const Device& d = ctx->eigen_device<Device>();
Reducer reducer;
int64_t reduced_shape[3] = {1, 1, 1};
for (Tidx i = 0; i < axis; ++i) {
reduced_shape[0] *= input.dim_size(i);
}
reduced_shape[1] = input.dim_size(axis);
for (Tidx i = axis + 1; i < input.dims(); ++i) {
reduced_shape[2] *= input.dim_size(i);
}
functor::Scan<Device, Reducer, T>()(d, input.shaped<T, 3>(reduced_shape),
output->shaped<T, 3>(reduced_shape),
reducer, reverse_, exclusive_);
}
private:
bool reverse_;
bool exclusive_;
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE(REDUCER, T) \
template <> \
void Scan<GPUDevice, REDUCER, T>::operator()( \
const GPUDevice& d, TTypes<T, 3>::ConstTensor in, \
TTypes<T, 3>::Tensor out, const REDUCER& reducer, const bool reverse, \
const bool exclusive); \
extern template struct Scan<GPUDevice, REDUCER, T>;
#define DECLARE_FOR_ALL_REDUCERS(T) \
DECLARE(Eigen::internal::SumReducer<T>, T); \
DECLARE(Eigen::internal::ProdReducer<T>, T);
TF_CALL_GPU_NUMBER_TYPES(DECLARE_FOR_ALL_REDUCERS);
DECLARE_FOR_ALL_REDUCERS(int32);
DECLARE_FOR_ALL_REDUCERS(int64_t);
#undef DECLARE_FOR_ALL_REDUCERS
#define DECLARE_FOR_LOGSUMEXP_REDUCER(T) DECLARE(LogSumExpReducer<T>, T);
TF_CALL_GPU_NUMBER_TYPES(DECLARE_FOR_LOGSUMEXP_REDUCER);
#undef DECLARE_FOR_LOGSUMEXP_REDUCER
#undef DECLARE
}
#endif
#define REGISTER_CPU_KERNELS(type) \
REGISTER_KERNEL_BUILDER( \
Name("Cumsum") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tidx"), \
ScanOp<CPUDevice, type, Eigen::internal::SumReducer<type>, int32>) \
REGISTER_KERNEL_BUILDER( \
Name("Cumsum") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tidx"), \
ScanOp<CPUDevice, type, Eigen::internal::SumReducer<type>, int64>)
TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNELS(type) \
REGISTER_KERNEL_BUILDER( \
Name("Cumsum") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tidx") \
.HostMemory("axis"), \
ScanOp<GPUDevice, type, Eigen::internal::SumReducer<type>, int32>) \
REGISTER_KERNEL_BUILDER( \
Name("Cumsum") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tidx") \
.HostMemory("axis"), \
ScanOp<GPUDevice, type, Eigen::internal::SumReducer<type>, int64>)
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNELS);
REGISTER_GPU_KERNELS(int32);
REGISTER_GPU_KERNELS(int64_t);
#undef REGISTER_GPU_KERNELS
#endif
#define REGISTER_CPU_KERNELS(type) \
REGISTER_KERNEL_BUILDER( \
Name("Cumprod") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tidx"), \
ScanOp<CPUDevice, type, Eigen::internal::ProdReducer<type>, int32>) \
REGISTER_KERNEL_BUILDER( \
Name("Cumprod") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tidx"), \
ScanOp<CPUDevice, type, Eigen::internal::ProdReducer<type>, int64>)
TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNELS(type) \
REGISTER_KERNEL_BUILDER( \
Name("Cumprod") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tidx") \
.HostMemory("axis"), \
ScanOp<GPUDevice, type, Eigen::internal::ProdReducer<type>, int32>) \
REGISTER_KERNEL_BUILDER( \
Name("Cumprod") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tidx") \
.HostMemory("axis"), \
ScanOp<GPUDevice, type, Eigen::internal::ProdReducer<type>, int64>)
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNELS);
REGISTER_GPU_KERNELS(int32);
REGISTER_GPU_KERNELS(int64_t);
#undef REGISTER_GPU_KERNELS
#endif
#define REGISTER_CUMLOGSUMEXP_KERNEL(device, device_type, type, type_idx) \
REGISTER_KERNEL_BUILDER( \
Name("CumulativeLogsumexp") \
.Device(device) \
.TypeConstraint<type>("T") \
.TypeConstraint<type_idx>("Tidx") \
.HostMemory("axis"), \
ScanOp<device_type, type, functor::LogSumExpReducer<type>, type_idx>)
#define REGISTER_CPU_KERNELS(type) \
REGISTER_CUMLOGSUMEXP_KERNEL(DEVICE_CPU, CPUDevice, type, int32) \
REGISTER_CUMLOGSUMEXP_KERNEL(DEVICE_CPU, CPUDevice, type, int64_t)
TF_CALL_FLOAT_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNELS(type) \
REGISTER_CUMLOGSUMEXP_KERNEL(DEVICE_GPU, GPUDevice, type, int32) \
REGISTER_CUMLOGSUMEXP_KERNEL(DEVICE_GPU, GPUDevice, type, int64_t)
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNELS);
#undef REGISTER_GPU_KERNELS
#endif
#undef REGISTER_CUMLOGSUMEXP_KERNEL
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <typename T>
static Graph* LargeOneDCumsum(int num_x, bool reverse = false) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DataTypeToEnum<T>::value, TensorShape({num_x}));
data.flat<T>().setRandom();
Tensor axes(DT_INT32, TensorShape({}));
axes.flat<int32>()(0) = 0;
test::graph::Cumsum(g, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ColCumsum(int num_x, int num_y, bool reverse = false) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({num_x, num_y}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({}));
axes.flat<int32>()(0) = 0;
test::graph::Cumsum(g, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* RowCumsum(int num_x, int num_y, bool reverse = false) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({num_x, num_y}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({}));
axes.flat<int32>()(0) = 1;
test::graph::Cumsum(g, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ThreeDYCumsum(int num_y, int num_z, bool reverse = false) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({32, num_y, num_z}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({}));
axes.flat<int32>()(0) = 1;
test::graph::Cumsum(g, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
template <typename T>
static void LargeOneDimensional(::testing::benchmark::State& state,
const string& device, int num_x,
bool reverse = false) {
test::Benchmark(device, LargeOneDCumsum<T>(num_x, reverse),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
sizeof(T));
}
static void DoRowCumsum(::testing::benchmark::State& state,
const string& device, int num_x, int num_y,
bool reverse = false) {
test::Benchmark(device, RowCumsum(num_x, num_y, reverse),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void DoColCumsum(::testing::benchmark::State& state,
const string& device, int num_x, int num_y,
bool reverse = false) {
test::Benchmark(device, ColCumsum(num_x, num_y, reverse),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void Do3DYCumsum(::testing::benchmark::State& state,
const string& device, int num_x, int num_y,
bool reverse = false) {
test::Benchmark(device, ThreeDYCumsum(num_x, num_y, reverse),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void BM_OneDCumsumGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
LargeOneDimensional<float>(state, "gpu", num_x);
}
BENCHMARK(BM_OneDCumsumGPU)->Range(1, 1 << 21);
static void BM_OneDCumsumGPUHalf(::testing::benchmark::State& state) {
const int num_x = state.range(0);
LargeOneDimensional<Eigen::half>(state, "gpu", num_x);
}
BENCHMARK(BM_OneDCumsumGPUHalf)->Range(1, 1 << 21);
static void BM_Sum2DRowCumsumGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoRowCumsum(state, "gpu", num_x, num_y);
}
BENCHMARK(BM_Sum2DRowCumsumGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DColumnCumsumGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoColCumsum(state, "gpu", num_x, num_y);
}
BENCHMARK(BM_Sum2DColumnCumsumGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum3DYCumsumGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
Do3DYCumsum(state, "gpu", num_x, num_y);
}
BENCHMARK(BM_Sum3DYCumsumGPU)->RangePair(64, 4096, 64, 4096);
static void BM_OneDCumsumGPU_reverse(::testing::benchmark::State& state) {
const int num_x = state.range(0);
LargeOneDimensional<float>(state, "gpu", num_x, true);
}
BENCHMARK(BM_OneDCumsumGPU_reverse)->Range(1, 1 << 21);
static void BM_Sum2DRowCumsumGPU_reverse(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoRowCumsum(state, "gpu", num_x, num_y, true);
}
BENCHMARK(BM_Sum2DRowCumsumGPU_reverse)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DColumnCumsumGPU_reverse(
::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoColCumsum(state, "gpu", num_x, num_y, true);
}
BENCHMARK(BM_Sum2DColumnCumsumGPU_reverse)->RangePair(1, 8192, 1, 8192);
static void BM_Sum3DYCumsumGPU_reverse(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
Do3DYCumsum(state, "gpu", num_x, num_y, true);
}
BENCHMARK(BM_Sum3DYCumsumGPU_reverse)->RangePair(32, 2048, 32, 2048);
} |
1,139 | cpp | tensorflow/tensorflow | conv_ops | tensorflow/core/kernels/conv_ops.cc | tensorflow/core/kernels/conv_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_CONV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_CONV_OPS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/util/tensor_format.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/kernels/conv_ops_gpu.h"
#include "tensorflow/core/platform/stream_executor.h"
#endif
namespace tensorflow {
class OpKernelContext;
template <typename Device, typename T>
struct LaunchConv2DOp {
void operator()(OpKernelContext* ctx, bool use_cudnn, bool cudnn_use_autotune,
const Tensor& input, const Tensor& filter, int row_dilation,
int col_dilation, int row_stride, int col_stride,
const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format);
};
template <typename Device, typename T>
struct LaunchConvOp {
void operator()(OpKernelContext* context, bool cudnn_use_autotune,
const Tensor& input, const Tensor& filter,
const std::vector<int64>& dilations,
const std::vector<int64>& strides, Padding padding,
const std::vector<int64_t>& explicit_paddings,
TensorFormat data_format, Tensor* output);
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
template <typename T>
struct LaunchConv2DOp<Eigen::GpuDevice, T> {
void operator()(OpKernelContext* ctx, bool use_cudnn, bool cudnn_use_autotune,
const Tensor& input, const Tensor& filter, int row_dilation,
int col_dilation, int row_stride, int col_stride,
const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format);
};
template <typename T>
struct LaunchConvOp<Eigen::GpuDevice, T> {
void operator()(OpKernelContext* context, bool cudnn_use_autotune,
const Tensor& input, const Tensor& filter,
const std::vector<int64>& dilations,
const std::vector<int64>& strides, const Padding padding,
const std::vector<int64_t>& explicit_paddings,
TensorFormat data_format, Tensor* output);
};
#endif
template <class T, size_t size>
struct Im2ColBufferResource : public ResourceBase {
Im2ColBufferResource<T, size>() {
data = static_cast<T*>(port::Malloc(size * sizeof(T)));
}
~Im2ColBufferResource<T, size>() { port::Free(data); }
mutex mu;
T* data;
string DebugString() const { return "Im2ColBufferResource"; }
};
struct Conv2DParameters {
std::vector<int32> dilations;
std::vector<int32> strides;
Padding padding;
TensorFormat data_format;
std::vector<int64_t> explicit_paddings;
};
struct Conv2DDimensions {
int batch;
int input_rows;
int input_cols;
int in_depth;
int filter_rows;
int filter_cols;
int patch_depth;
int out_depth;
int stride_rows;
int stride_cols;
int dilation_rows;
int dilation_cols;
int64_t out_rows;
int64_t out_cols;
int64_t pad_rows_before;
int64_t pad_rows_after;
int64_t pad_cols_before;
int64_t pad_cols_after;
};
Status InitConv2DParameters(const OpKernelConstruction* context,
Conv2DParameters* params);
Status ComputeConv2DDimension(const Conv2DParameters& params,
const Tensor& input, const Tensor& filter,
Conv2DDimensions* dimensions);
}
#endif
#define USE_EIGEN_TENSOR
#define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/kernels/conv_ops.h"
#include <string.h>
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/kernel_shape_util.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
#define TF_REQUIRES(EXP, STATUS) \
do { \
if (!TF_PREDICT_TRUE(EXP)) return (STATUS); \
} while (false)
Status InitConv2DParameters(const OpKernelConstruction* context,
Conv2DParameters* params) {
TF_RETURN_IF_ERROR(context->GetAttr("dilations", ¶ms->dilations));
TF_RETURN_IF_ERROR(context->GetAttr("strides", ¶ms->strides));
TF_RETURN_IF_ERROR(context->GetAttr("padding", ¶ms->padding));
if (context->HasAttr("explicit_paddings")) {
TF_RETURN_IF_ERROR(
context->GetAttr("explicit_paddings", ¶ms->explicit_paddings));
}
string data_format_string;
TF_RETURN_IF_ERROR(context->GetAttr("data_format", &data_format_string));
TF_REQUIRES(FormatFromString(data_format_string, ¶ms->data_format),
errors::InvalidArgument("Invalid data format"));
const auto& strides = params->strides;
const auto& dilations = params->dilations;
const auto& data_format = params->data_format;
TF_REQUIRES(dilations.size() == 4,
errors::InvalidArgument("Sliding window dilations field must "
"specify 4 dimensions"));
TF_REQUIRES(strides.size() == 4,
errors::InvalidArgument("Sliding window strides field must "
"specify 4 dimensions"));
const int64_t stride_n = GetTensorDim(strides, data_format, 'N');
const int64_t stride_c = GetTensorDim(strides, data_format, 'C');
const int64_t stride_h = GetTensorDim(strides, data_format, 'H');
const int64_t stride_w = GetTensorDim(strides, data_format, 'W');
TF_REQUIRES(
stride_n == 1 && stride_c == 1,
errors::Unimplemented("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
TF_REQUIRES(stride_h > 0 && stride_w > 0,
errors::InvalidArgument(
"Row and column strides should be larger than 0."));
const int64_t dilation_n = GetTensorDim(dilations, data_format, 'N');
const int64_t dilation_c = GetTensorDim(dilations, data_format, 'C');
const int64_t dilation_h = GetTensorDim(dilations, data_format, 'H');
const int64_t dilation_w = GetTensorDim(dilations, data_format, 'W');
TF_REQUIRES(
dilation_n == 1 && dilation_c == 1,
errors::Unimplemented("Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
TF_REQUIRES(
dilation_h > 0 && dilation_w > 0,
errors::InvalidArgument("Dilated rates should be larger than 0."));
int num_dims = data_format == TensorFormat::FORMAT_NCHW_VECT_C ? 5 : 4;
TF_RETURN_IF_ERROR(CheckValidPadding(
params->padding, params->explicit_paddings, num_dims, data_format));
return absl::OkStatus();
}
Status ComputeConv2DDimension(const Conv2DParameters& params,
const Tensor& input, const Tensor& filter,
Conv2DDimensions* dimensions) {
int required_dims =
params.data_format == TensorFormat::FORMAT_NCHW_VECT_C ? 5 : 4;
TF_REQUIRES(
input.dims() == required_dims,
errors::InvalidArgument("convolution input must be ", required_dims,
"-dimensional: ", input.shape().DebugString()));
TF_REQUIRES(
filter.dims() == required_dims,
errors::InvalidArgument("convolution filter must be ", required_dims,
"-dimensional: ", filter.shape().DebugString()));
for (int i = 0; i < required_dims - 1; i++) {
TF_REQUIRES(
FastBoundsCheck(filter.dim_size(i), std::numeric_limits<int>::max()),
errors::InvalidArgument("filter too large"));
}
FilterTensorFormat filter_format =
params.data_format == TensorFormat::FORMAT_NCHW_VECT_C
? FilterTensorFormat::FORMAT_OIHW_VECT_I
: FilterTensorFormat::FORMAT_HWIO;
const int64_t in_depth_raw = GetTensorDim(input, params.data_format, 'C');
const int64_t patch_depth_raw = GetFilterDim(filter, filter_format, 'I');
TF_REQUIRES(FastBoundsCheck(in_depth_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("Input depth too large"));
TF_REQUIRES(FastBoundsCheck(patch_depth_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("Patch depth too large"));
const int in_depth = static_cast<int>(in_depth_raw);
const int patch_depth = static_cast<int>(patch_depth_raw);
TF_REQUIRES(patch_depth > 0,
errors::InvalidArgument(
"filter depth must be stricly positive, got ", patch_depth));
TF_REQUIRES(in_depth % patch_depth == 0,
errors::InvalidArgument(
"input depth must be evenly divisible by filter depth: ",
in_depth, " vs ", patch_depth));
const int out_depth =
static_cast<int>(GetFilterDim(filter, filter_format, 'O'));
const int64_t input_rows_raw = GetTensorDim(input, params.data_format, 'H');
TF_REQUIRES(FastBoundsCheck(input_rows_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("Input rows too large"));
const int input_rows = static_cast<int>(input_rows_raw);
const int filter_rows =
static_cast<int>(GetFilterDim(filter, filter_format, 'H'));
const int64_t input_cols_raw = GetTensorDim(input, params.data_format, 'W');
TF_REQUIRES(FastBoundsCheck(input_cols_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("Input cols too large"));
const int input_cols = static_cast<int>(input_cols_raw);
const int filter_cols =
static_cast<int>(GetFilterDim(filter, filter_format, 'W'));
const int64_t batch_raw = GetTensorDim(input, params.data_format, 'N');
TF_REQUIRES(FastBoundsCheck(batch_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("batch is too large"));
const int batch = static_cast<int>(batch_raw);
const int stride_rows = GetTensorDim(params.strides, params.data_format, 'H');
const int stride_cols = GetTensorDim(params.strides, params.data_format, 'W');
const int dilation_rows =
GetTensorDim(params.dilations, params.data_format, 'H');
const int dilation_cols =
GetTensorDim(params.dilations, params.data_format, 'W');
int64_t pad_rows_before, pad_rows_after, pad_cols_before, pad_cols_after;
if (params.padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(params.explicit_paddings, params.data_format, 'H',
&pad_rows_before, &pad_rows_after);
GetExplicitPaddingForDim(params.explicit_paddings, params.data_format, 'W',
&pad_cols_before, &pad_cols_after);
}
int64_t out_rows = 0, out_cols = 0;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose(
input_rows, filter_rows, dilation_rows, stride_rows, params.padding,
&out_rows, &pad_rows_before, &pad_rows_after));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose(
input_cols, filter_cols, dilation_cols, stride_cols, params.padding,
&out_cols, &pad_cols_before, &pad_cols_after));
dimensions->batch = batch;
dimensions->input_rows = input_rows;
dimensions->input_cols = input_cols;
dimensions->in_depth = in_depth;
dimensions->filter_rows = filter_rows;
dimensions->filter_cols = filter_cols;
dimensions->patch_depth = patch_depth;
dimensions->out_depth = out_depth;
dimensions->stride_rows = stride_rows;
dimensions->stride_cols = stride_cols;
dimensions->dilation_rows = dilation_rows;
dimensions->dilation_cols = dilation_cols;
dimensions->out_rows = out_rows;
dimensions->out_cols = out_cols;
dimensions->pad_rows_before = pad_rows_before;
dimensions->pad_rows_after = pad_rows_after;
dimensions->pad_cols_before = pad_cols_before;
dimensions->pad_cols_after = pad_cols_after;
return absl::OkStatus();
}
#undef TF_REQUIRES
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
int64_t GetDnnWorkspaceLimit(const string& envvar_in_mb,
int64_t default_value_in_bytes) {
const char* workspace_limit_in_mb_str = getenv(envvar_in_mb.c_str());
if (workspace_limit_in_mb_str != nullptr &&
strcmp(workspace_limit_in_mb_str, "") != 0) {
int64_t scratch_limit_in_mb = -1;
if (strings::safe_strto64(workspace_limit_in_mb_str,
&scratch_limit_in_mb)) {
return scratch_limit_in_mb * (1 << 20);
} else {
LOG(WARNING) << "Invalid value for env-var " << envvar_in_mb << ": "
<< workspace_limit_in_mb_str;
}
}
return default_value_in_bytes;
}
int64_t GetDnnWorkspaceLimitOrDefault() {
return GetDnnWorkspaceLimit("TF_CUDNN_WORKSPACE_LIMIT_IN_MB",
1LL << 33);
}
#endif
} | #include <cmath>
#include <optional>
#include <string>
#include <type_traits>
#include <vector>
#include "absl/algorithm/container.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/kernel_shape_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/conv_ops_gpu.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
class FusedResizePadConvOpTest : public OpsTestBase {
protected:
template <typename T>
void HandwrittenConv(DataType dtype) {
const int stride = 1;
TF_EXPECT_OK(NodeDefBuilder("fused_resize_op", "FusedResizeAndPadConv2D")
.Input(FakeInput(dtype))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(dtype))
.Attr("T", dtype)
.Attr("resize_align_corners", false)
.Attr("mode", "REFLECT")
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
Tensor image(dtype, {image_batch_count, image_height, image_width, depth});
test::FillValues<T>(&image, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
const int filter_size = 3;
const int filter_count = 1;
Tensor filter(dtype, {filter_size, filter_size, depth, filter_count});
test::FillValues<T>(&filter, {1, 4, 7, 2, 5, 8, 3, 6, 9});
const int resized_width = image_width;
const int resized_height = image_height;
const int top_padding = 0;
const int bottom_padding = 0;
const int left_padding = 0;
const int right_padding = 0;
AddInputFromArray<T>(image.shape(), image.flat<T>());
AddInputFromArray<int32>(TensorShape({2}), {resized_height, resized_width});
AddInputFromArray<int32>(
TensorShape({4, 2}),
{0, 0, top_padding, bottom_padding, left_padding, right_padding, 0, 0});
AddInputFromArray<T>(filter.shape(), filter.flat<T>());
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height * filter_count;
Tensor expected(dtype, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<T>(
&expected, {105, 150, 183, 95, 235, 312, 357, 178, 187, 234, 261, 121});
const Tensor& output = *GetOutput(0);
test::ExpectTensorNear<T>(expected, output, 1e-5);
}
template <typename T>
void CompareFusedAndSeparate(int input_width, int input_height,
int input_depth, int resize_width,
int resize_height, int y_padding, int x_padding,
int filter_size, int filter_count,
bool resize_align_corners,
const string& pad_mode, int stride,
const string& padding, DataType dtype) {
Scope root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT,
TensorShape({1, input_height, input_width, input_depth}));
test::FillIota<float>(&input_data, 1.0f);
Output input =
Const(root.WithOpName("input"), Input::Initializer(input_data));
Output casted_input = Cast(root.WithOpName("casted_input"), input, dtype);
Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size,
input_depth, filter_count}));
test::FillIota<float>(&filter_data, 1.0f);
Output filter =
Const(root.WithOpName("filter"), Input::Initializer(filter_data));
Output casted_filter =
Cast(root.WithOpName("casted_filter"), filter, dtype);
Output resize_size =
Const(root.WithOpName("resize_size"), {resize_height, resize_width});
Output resize =
ResizeBilinear(root.WithOpName("resize"), input, resize_size,
ResizeBilinear::AlignCorners(resize_align_corners));
Output casted_resize = Cast(root.WithOpName("cast"), resize, dtype);
Output paddings =
Const(root.WithOpName("paddings"),
{{0, 0}, {y_padding, y_padding}, {x_padding, x_padding}, {0, 0}});
Output mirror_pad = MirrorPad(root.WithOpName("mirror_pad"), casted_resize,
paddings, pad_mode);
Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, casted_filter,
{1, stride, stride, 1}, padding);
Output fused_conv = FusedResizeAndPadConv2D(
root.WithOpName("fused_conv"), casted_input, resize_size, paddings,
casted_filter, pad_mode, {1, stride, stride, 1}, padding,
FusedResizeAndPadConv2D::ResizeAlignCorners(resize_align_corners));
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(tensorflow::SessionOptions()));
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> unfused_tensors;
TF_ASSERT_OK(session->Run({}, {"conv"}, {}, &unfused_tensors));
std::vector<Tensor> fused_tensors;
TF_ASSERT_OK(session->Run({}, {"fused_conv"}, {}, &fused_tensors));
test::ExpectClose(unfused_tensors[0], fused_tensors[0]);
}
template <typename T>
void CompareFusedPadOnlyAndSeparate(int input_width, int input_height,
int input_depth, int y_padding,
int x_padding, int filter_size,
int filter_count, const string& pad_mode,
int stride, const string& padding,
DataType dtype) {
Scope root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT,
TensorShape({1, input_height, input_width, input_depth}));
test::FillIota<float>(&input_data, 1.0f);
Output input =
Const(root.WithOpName("input"), Input::Initializer(input_data));
Output casted_input = Cast(root.WithOpName("casted_input"), input, dtype);
Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size,
input_depth, filter_count}));
test::FillIota<float>(&filter_data, 1.0f);
Output filter =
Const(root.WithOpName("filter"), Input::Initializer(filter_data));
Output casted_filter =
Cast(root.WithOpName("casted_filter"), filter, dtype);
Output paddings =
Const(root.WithOpName("paddings"),
{{0, 0}, {y_padding, y_padding}, {x_padding, x_padding}, {0, 0}});
Output mirror_pad = MirrorPad(root.WithOpName("mirror_pad"), casted_input,
paddings, pad_mode);
Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, casted_filter,
{1, stride, stride, 1}, padding);
Output fused_conv = FusedPadConv2D(
root.WithOpName("fused_conv"), casted_input, paddings, casted_filter,
pad_mode, {1, stride, stride, 1}, padding);
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(tensorflow::SessionOptions()));
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> unfused_tensors;
TF_ASSERT_OK(session->Run({}, {"conv"}, {}, &unfused_tensors));
std::vector<Tensor> fused_tensors;
TF_ASSERT_OK(session->Run({}, {"fused_conv"}, {}, &fused_tensors));
test::ExpectClose(unfused_tensors[0], fused_tensors[0]);
}
};
TEST_F(FusedResizePadConvOpTest, HandwrittenConvHalf) {
HandwrittenConv<Eigen::half>(DT_HALF);
}
TEST_F(FusedResizePadConvOpTest, HandwrittenConvFloat) {
HandwrittenConv<float>(DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, HandwrittenConvDouble) {
HandwrittenConv<double>(DT_DOUBLE);
}
TEST_F(FusedResizePadConvOpTest, IdentityComparativeHalf) {
CompareFusedAndSeparate<Eigen::half>(10, 10, 1, 10, 10, 0, 0, 1, 1, false,
"REFLECT", 1, "SAME", DT_HALF);
}
TEST_F(FusedResizePadConvOpTest, IdentityComparativeFloat) {
CompareFusedAndSeparate<float>(10, 10, 1, 10, 10, 0, 0, 1, 1, false,
"REFLECT", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, IdentityComparativeDouble) {
CompareFusedAndSeparate<double>(10, 10, 1, 10, 10, 0, 0, 1, 1, false,
"REFLECT", 1, "SAME", DT_DOUBLE);
}
TEST_F(FusedResizePadConvOpTest, ConvOnlyComparative) {
CompareFusedAndSeparate<float>(10, 10, 3, 10, 10, 0, 0, 4, 4, false,
"REFLECT", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeOnlyComparative) {
CompareFusedAndSeparate<float>(10, 10, 1, 20, 20, 0, 0, 1, 1, false,
"REFLECT", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndConvComparative) {
CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAlignAndConvComparative) {
CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndConvStridedComparative) {
CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 2,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAlignAndConvValidComparative) {
CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1,
"VALID", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, PadOnlyComparative) {
CompareFusedAndSeparate<float>(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, PadOnlyWithChannelsComparative) {
CompareFusedAndSeparate<float>(4, 4, 3, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndPadComparative) {
CompareFusedAndSeparate<float>(4, 4, 1, 6, 6, 2, 2, 1, 1, false, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, PadOnlySymmetricComparative) {
CompareFusedAndSeparate<float>(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "SYMMETRIC",
1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndPadSymmetricComparative) {
CompareFusedAndSeparate<float>(4, 4, 3, 6, 6, 2, 2, 1, 1, false, "SYMMETRIC",
1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndPadSymmetricComparativeLarge) {
CompareFusedAndSeparate<float>(1000, 1000, 3, 1006, 1006, 2, 2, 1, 1, false,
"SYMMETRIC", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeHalf) {
CompareFusedPadOnlyAndSeparate<Eigen::half>(10, 10, 1, 0, 0, 1, 1, "REFLECT",
1, "SAME", DT_HALF);
}
TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeBFloat16) {
CompareFusedPadOnlyAndSeparate<bfloat16>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1,
"SAME", DT_BFLOAT16);
}
TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeFloat) {
CompareFusedPadOnlyAndSeparate<float>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeDouble) {
CompareFusedPadOnlyAndSeparate<double>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1,
"SAME", DT_DOUBLE);
}
TEST_F(FusedResizePadConvOpTest, NoResizeConvOnlyComparative) {
CompareFusedPadOnlyAndSeparate<float>(10, 10, 3, 0, 0, 4, 4, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizePadOnlyComparative) {
CompareFusedPadOnlyAndSeparate<float>(4, 4, 1, 2, 2, 1, 1, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizePadOnlyWithChannelsComparative) {
CompareFusedPadOnlyAndSeparate<float>(4, 4, 3, 2, 2, 1, 1, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizePadOnlySymmetricComparative) {
CompareFusedPadOnlyAndSeparate<float>(4, 4, 1, 2, 2, 1, 1, "SYMMETRIC", 1,
"SAME", DT_FLOAT);
}
class ConvOpTest : public OpsTestBase {
protected:
void HandwrittenConv() {
const int stride = 1;
TF_EXPECT_OK(NodeDefBuilder("conv_op", "Conv2D")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
Tensor image(DT_FLOAT,
{image_batch_count, image_height, image_width, depth});
test::FillValues<float>(&image, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
const int filter_size = 3;
const int filter_count = 1;
Tensor filter(DT_FLOAT, {filter_size, filter_size, depth, filter_count});
test::FillValues<float>(&filter, {1, 4, 7, 2, 5, 8, 3, 6, 9});
AddInputFromArray<float>(image.shape(), image.flat<float>());
AddInputFromArray<float>(filter.shape(), filter.flat<float>());
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height * filter_count;
Tensor expected(DT_FLOAT, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<float>(
&expected, {105, 150, 183, 95, 235, 312, 357, 178, 187, 234, 261, 121});
const Tensor& output = *GetOutput(0);
test::ExpectTensorNear<float>(expected, output, 1e-5);
}
void AnisotropicStrides() {
const int stride_width = 3;
const int stride_height = 1;
TF_EXPECT_OK(NodeDefBuilder("conv_op", "Conv2D")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Attr("strides", {1, stride_height, stride_width, 1})
.Attr("padding", "VALID")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
const int depth = 1;
const int image_width = 6;
const int image_height = 3;
const int image_batch_count = 1;
Tensor image(DT_FLOAT,
{image_batch_count, image_height, image_width, depth});
test::FillValues<float>(&image, {
3, 2, 1, -1, -2, -3,
4, 3, 2, -2, -3, -4,
5, 4, 3, -3, -4, -5,
});
const int filter_size = 2;
const int filter_count = 1;
Tensor filter(DT_FLOAT, {filter_size, filter_size, depth, filter_count});
test::FillValues<float>(&filter, {
1, 2,
3, 4,
});
AddInputFromArray<float>(image.shape(), image.flat<float>());
AddInputFromArray<float>(filter.shape(), filter.flat<float>());
TF_ASSERT_OK(RunOpKernel());
const int expected_width = 2;
const int expected_height = 2;
Tensor expected(DT_FLOAT, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<float>(&expected, {31, -23, 41, -33});
const Tensor& output = *GetOutput(0);
test::ExpectTensorNear<float>(expected, output, 1e-5);
}
};
TEST_F(ConvOpTest, HandwrittenConv) { HandwrittenConv(); }
TEST_F(ConvOpTest, AnisotropicStride) { AnisotropicStrides(); }
template <typename T>
class FusedConv2DOpTest : public OpsTestBase {
protected:
static constexpr int kDepth = 4;
static constexpr int kImageWidth = 32;
static constexpr int kImageHeight = 32;
static constexpr int kImageBatchCount = 8;
static constexpr bool kIsInt8 =
std::is_same<T, int8>::value || std::is_same<T, qint8>::value;
using BiasAddGraphRunner =
std::function<void(const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out)>;
using BatchNormGraphRunner = std::function<void(
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data, Tensor* out)>;
static bool HasGpuDevice() {
tensorflow::SessionOptions session_options;
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(session_options));
std::vector<DeviceAttributes> available_devices;
[&]() { TF_ASSERT_OK(session->ListDevices(&available_devices)); }();
const bool has_gpu_device =
absl::c_any_of(available_devices, [](const DeviceAttributes& device) {
return device.device_type() == DEVICE_GPU;
});
return has_gpu_device;
}
void RunAndFetch(const tensorflow::Scope& root, const std::string& fetch,
Tensor* output, bool allow_gpu_device,
const NodeDef* fetch_node = nullptr) {
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
if (fetch_node) {
*graph.add_node() = *fetch_node;
}
tensorflow::SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
tensorflow::RewriterConfig* cfg =
session_options.config.mutable_graph_options()
->mutable_rewrite_options();
cfg->set_constant_folding(tensorflow::RewriterConfig::OFF);
cfg->set_layout_optimizer(tensorflow::RewriterConfig::OFF);
cfg->set_remapping(tensorflow::RewriterConfig::OFF);
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(session_options));
const bool has_gpu_device = HasGpuDevice();
const bool place_all_on_gpu = allow_gpu_device && has_gpu_device;
const std::string device =
place_all_on_gpu ? "/device:GPU:0" : "/device:CPU:0";
for (NodeDef& mutable_node : *graph.mutable_node()) {
mutable_node.set_device(device);
}
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> unfused_tensors;
TF_ASSERT_OK(session->Run({}, {fetch}, {}, &unfused_tensors));
*output = unfused_tensors[0];
}
void RunConv2DWithBias(const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, const std::string& padding,
const std::vector<int>& explicit_paddings,
Tensor* output, bool allow_gpu_device = false,
int stride = 1) {
RunConv2DWithBiasAndActivation(input_data, filter_data, bias_data,
std::nullopt, padding, explicit_paddings,
output, allow_gpu_device, stride);
}
template <typename From, typename To>
static Tensor Cast(
const Tensor& from, const std::function<To(From)>& cast = [](From v) {
return static_cast<To>(v);
}) {
Tensor to(DataTypeToEnum<To>::v(), from.shape());
for (int i = 0; i < from.NumElements(); ++i) {
to.flat<To>()(i) = cast(from.flat<From>()(i));
}
return to;
}
void RunConv2DWithBiasAndActivation(
Tensor input_data, Tensor filter_data, Tensor bias_data,
std::optional<std::string> activation_type, const std::string& padding,
const std::vector<int>& explicit_paddings, Tensor* output,
bool allow_gpu_device = false, int stride = 1) {
Scope root = tensorflow::Scope::NewRootScope();
if (kIsInt8) {
input_data = Cast<T, float>(input_data);
filter_data = Cast<T, float>(filter_data);
bias_data = Cast<T, float>(bias_data);
}
ops::Conv2D conv = ops::Conv2D(
root.WithOpName("conv"),
ops::Const(root.WithOpName("input"), Input::Initializer(input_data)),
ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding,
ops::Conv2D::Attrs().ExplicitPaddings(explicit_paddings));
ops::BiasAdd with_bias = ops::BiasAdd(
root.WithOpName("with_bias"), conv,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
if (activation_type.has_value()) {
if (*activation_type == "Relu") {
ops::Relu(root.WithOpName("with_activation"), with_bias);
} else if (*activation_type == "Relu6") {
ops::Relu6(root.WithOpName("with_activation"), with_bias);
} else if (*activation_type == "Elu") {
ops::Elu(root.WithOpName("with_activation"), with_bias);
} else if (*activation_type == "LeakyRelu") {
ops::internal::LeakyRelu(root.WithOpName("with_activation"), with_bias);
} else {
ops::Identity(root.WithOpName("with_activation"), with_bias);
}
}
RunAndFetch(root,
activation_type.has_value() ? "with_activation" : "with_bias",
output, allow_gpu_device);
if (kIsInt8) {
*output = Cast<float, T>(
*output, [](float v) { return static_cast<T>(std::lround(v)); });
}
}
void RunConv2DWithBatchNorm(
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data,
const std::string& padding, const std::vector<int>& explicit_paddings,
Tensor* output, bool allow_gpu_device = false, int stride = 1) {
Scope root = tensorflow::Scope::NewRootScope();
ops::Conv2D conv = ops::Conv2D(
root.WithOpName("conv"),
ops::Const(root.WithOpName("input"), Input::Initializer(input_data)),
ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding,
ops::Conv2D::Attrs().ExplicitPaddings(explicit_paddings));
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
ops::FusedBatchNorm with_fused_batch_norm = ops::FusedBatchNorm(
root.WithOpName("with_fused_batch_norm"), conv,
ops::Const(root.WithOpName("scale"), Input::Initializer(scale_data)),
ops::Const(root.WithOpName("offset"), Input::Initializer(offset_data)),
ops::Const(root.WithOpName("mean"), Input::Initializer(mean_data)),
ops::Const(root.WithOpName("var"), Input::Initializer(variance_data)),
attr);
RunAndFetch(root, "with_fused_batch_norm", output, allow_gpu_device);
}
void RunConv2DWithBatchNormAndActivation(
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data,
const string& activation_type, const std::string& padding,
const std::vector<int>& explicit_paddings, Tensor* output,
bool allow_gpu_device = false, int stride = 1) {
Scope root = tensorflow::Scope::NewRootScope();
ops::Conv2D conv = ops::Conv2D(
root.WithOpName("conv"),
ops::Const(root.WithOpName("input"), Input::Initializer(input_data)),
ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding,
ops::Conv2D::Attrs().ExplicitPaddings(explicit_paddings));
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
ops::FusedBatchNorm with_fused_batch_norm = ops::FusedBatchNorm(
root.WithOpName("with_fused_batch_norm"), conv,
ops::Const(root.WithOpName("scale"), Input::Initializer(scale_data)),
ops::Const(root.WithOpName("offset"), Input::Initializer(offset_data)),
ops::Const(root.WithOpName("mean"), Input::Initializer(mean_data)),
ops::Const(root.WithOpName("var"), Input::Initializer(variance_data)),
attr);
if (activation_type == "Relu") {
ops::Relu(root.WithOpName("with_activation"), with_fused_batch_norm.y);
} else if (activation_type == "Relu6") {
ops::Relu6(root.WithOpName("with_activation"), with_fused_batch_norm.y);
} else if (activation_type == "Elu") {
ops::Elu(root.WithOpName("with_activation"), with_fused_batch_norm.y);
} else if (activation_type == "LeakyRelu") {
ops::internal::LeakyRelu(root.WithOpName("with_activation"),
with_fused_batch_norm.y);
} else {
ops::Identity(root.WithOpName("with_activation"),
with_fused_batch_norm.y);
}
RunAndFetch(root, "with_activation", output, allow_gpu_device);
}
void RunFusedConv2DOp(Tensor input_data, Tensor filter_data,
std::vector<Tensor> args_data,
const std::vector<std::string>& fused_ops,
const std::string& padding,
const std::vector<int>& explicit_paddings,
Tensor* output, bool allow_gpu_device = false,
int stride = 1) {
Scope root = tensorflow::Scope::NewRootScope();
DataType dtype = DataTypeToEnum<T>::v();
const bool has_gpu_device = HasGpuDevice();
const bool has_extra_parameters = kIsInt8;
const bool has_float_bias = kIsInt8;
DataType dtype_args =
has_float_bias ? DataTypeToEnum<float>::v() : DataTypeToEnum<T>::v();
const int n = GetTensorDim(input_data, FORMAT_NHWC, 'N');
const int h = GetTensorDim(input_data, FORMAT_NHWC, 'H');
const int w = GetTensorDim(input_data, FORMAT_NHWC, 'W');
const int kh = GetFilterDim(filter_data, FORMAT_HWIO, 'H');
const int kw = GetFilterDim(filter_data, FORMAT_HWIO, 'W');
const int ic = GetFilterDim(filter_data, FORMAT_HWIO, 'I');
const int oc = GetFilterDim(filter_data, FORMAT_HWIO, 'O');
const int v = (kIsInt8 && allow_gpu_device && has_gpu_device) ? 4 : 1;
if (v > 1) {
{
TensorShape shape;
TF_EXPECT_OK(
ShapeFromFormatWithStatus(FORMAT_NCHW_VECT_C, n, h, w, ic, &shape));
Tensor input_data_nchwv(dtype, shape);
input_data_nchwv.tensor<T, 5>() =
input_data.shaped<T, 5>({n, h, w, ic / v, v})
.shuffle(Eigen::array<int, 5>{0, 3, 1, 2, 4});
input_data = input_data_nchwv;
}
{
Tensor filter_data_oihwv(
dtype,
ShapeFromFilterTensorFormat(FORMAT |
1,140 | cpp | tensorflow/tensorflow | cast_op | tensorflow/core/kernels/cast_op.cc | tensorflow/core/kernels/cast_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_CAST_OP_H_
#define TENSORFLOW_CORE_KERNELS_CAST_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bfloat16.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/types.h"
#ifdef SPECIALIZE_FOR_GPUS
#define SPECIALIZE_CAST(DEVICE, OUT_TYPE, IN_TYPE) \
template <typename Device> \
struct CastFunctor<Device, OUT_TYPE, IN_TYPE> { \
void operator()(const Device& d, \
typename TTypes<OUT_TYPE>::Flat out_tensor, \
typename TTypes<IN_TYPE>::ConstFlat in_tensor, \
bool truncate = false) { \
if (truncate) { \
out_tensor.device(d) = \
in_tensor.unaryExpr(LSBZeroSetter<IN_TYPE, OUT_TYPE>()) \
.template cast<OUT_TYPE>(); \
} else { \
out_tensor.device(d) = in_tensor.template cast<OUT_TYPE>(); \
} \
} \
}; \
template struct CastFunctor<DEVICE, OUT_TYPE, IN_TYPE>;
#else
#define SPECIALIZE_CAST(DEVICE, OUT_TYPE, IN_TYPE) \
template <> \
struct CastFunctor<DEVICE, OUT_TYPE, IN_TYPE> { \
void operator()(const DEVICE& d, \
typename TTypes<OUT_TYPE>::Flat out_tensor, \
typename TTypes<IN_TYPE>::ConstFlat in_tensor, \
bool truncate = false) { \
if (truncate) { \
out_tensor.device(d) = \
in_tensor.unaryExpr(LSBZeroSetter<IN_TYPE, OUT_TYPE>()) \
.template cast<OUT_TYPE>(); \
} else { \
out_tensor.device(d) = in_tensor.template cast<OUT_TYPE>(); \
} \
} \
};
#endif
#define CAST_FUNCTORS(devname) \
SPECIALIZE_CAST(devname, float, double) \
SPECIALIZE_CAST(devname, float, std::complex<double>) \
SPECIALIZE_CAST(devname, std::complex<float>, std::complex<double>) \
SPECIALIZE_CAST(devname, std::complex<float>, double) \
SPECIALIZE_CAST(devname, Eigen::half, double) \
SPECIALIZE_CAST(devname, Eigen::half, float) \
SPECIALIZE_CAST(devname, Eigen::half, std::complex<double>) \
SPECIALIZE_CAST(devname, Eigen::half, std::complex<float>) \
SPECIALIZE_CAST(devname, bfloat16, float) \
SPECIALIZE_CAST(devname, float8_e5m2, double) \
SPECIALIZE_CAST(devname, float8_e5m2, float) \
SPECIALIZE_CAST(devname, float8_e5m2, bfloat16) \
SPECIALIZE_CAST(devname, float8_e5m2, Eigen::half) \
SPECIALIZE_CAST(devname, float8_e5m2, float8_e4m3fn) \
SPECIALIZE_CAST(devname, float8_e4m3fn, double) \
SPECIALIZE_CAST(devname, float8_e4m3fn, float) \
SPECIALIZE_CAST(devname, float8_e4m3fn, bfloat16) \
SPECIALIZE_CAST(devname, float8_e4m3fn, Eigen::half) \
template <typename OUT_TYPE, typename IN_TYPE> \
struct CastFunctor<devname, OUT_TYPE, IN_TYPE> { \
void operator()(const devname& d, \
typename TTypes<OUT_TYPE>::Flat out_tensor, \
typename TTypes<IN_TYPE>::ConstFlat in_tensor, \
bool truncate = false) { \
out_tensor.device(d) = in_tensor.template cast<OUT_TYPE>(); \
} \
};
#if defined(MLIR_GENERATED_GPU_KERNELS_ENABLED)
#define CAST_FUNCTORS_SUBSET(devname) \
SPECIALIZE_CAST(devname, float, double) \
SPECIALIZE_CAST(devname, Eigen::half, float) \
SPECIALIZE_CAST(devname, bfloat16, float) \
SPECIALIZE_CAST(devname, float8_e5m2, double) \
SPECIALIZE_CAST(devname, float8_e5m2, float) \
SPECIALIZE_CAST(devname, float8_e5m2, bfloat16) \
SPECIALIZE_CAST(devname, float8_e5m2, Eigen::half) \
SPECIALIZE_CAST(devname, float8_e5m2, float8_e4m3fn) \
SPECIALIZE_CAST(devname, float8_e4m3fn, double) \
SPECIALIZE_CAST(devname, float8_e4m3fn, float) \
SPECIALIZE_CAST(devname, float8_e4m3fn, bfloat16) \
SPECIALIZE_CAST(devname, float8_e4m3fn, Eigen::half) \
template <typename OUT_TYPE, typename IN_TYPE> \
struct CastFunctor<devname, OUT_TYPE, IN_TYPE> { \
void operator()(const devname& d, \
typename TTypes<OUT_TYPE>::Flat out_tensor, \
typename TTypes<IN_TYPE>::ConstFlat in_tensor, \
bool truncate = false) { \
out_tensor.device(d) = in_tensor.template cast<OUT_TYPE>(); \
} \
};
#endif
namespace tensorflow {
typedef std::function<void(OpKernelContext*, const Tensor&, Tensor*,
bool trunc)>
CastFunctorType;
class CastOpBase : public OpKernel {
public:
explicit CastOpBase(OpKernelConstruction* ctx);
void Compute(OpKernelContext* ctx) override;
protected:
DataType src_dtype_;
DataType dst_dtype_;
DataType external_src_dtype_;
DataType external_dst_dtype_;
bool use_truncation_;
CastFunctorType work_ = nullptr;
Status Unimplemented();
CastOpBase(const CastOpBase&) = delete;
void operator=(const CastOpBase&) = delete;
};
class CpuCastOp : public CastOpBase {
public:
explicit CpuCastOp(OpKernelConstruction* ctx);
private:
Status Prepare();
};
namespace functor {
template <typename I>
constexpr int MantissaWidth() {
return std::numeric_limits<I>::digits;
}
template <>
constexpr int MantissaWidth<Eigen::half>() {
return 10 + 1;
}
template <>
constexpr int MantissaWidth<bfloat16>() {
return 7 + 1;
}
template <typename Device, typename Tout, typename Tin>
void Cast(const Device& d, typename TTypes<Tout>::Flat o,
typename TTypes<Tin>::ConstFlat i) {
o.device(d) = i.template cast<Tout>();
}
template <typename Device, typename Tout, typename Tin>
struct CastFunctor {
void operator()(const Device& d, typename TTypes<Tout>::Flat o,
typename TTypes<Tin>::ConstFlat i, bool truncate = false);
};
template <typename I>
typename std::enable_if<sizeof(I) == 8, void>::type EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE static LSBZeroSetterHelper(I& t, int n) {
if (!Eigen::numext::isnan(t)) {
uint64_t* p = reinterpret_cast<uint64_t*>(&t);
*p &= (0xFFFFFFFFFFFFFFFF << n);
}
}
template <typename I>
typename std::enable_if<sizeof(I) == 4, void>::type EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE static LSBZeroSetterHelper(I& t, int n) {
if (!Eigen::numext::isnan(t)) {
uint32_t* p = reinterpret_cast<uint32_t*>(&t);
*p &= (0xFFFFFFFF << n);
}
}
template <typename I>
typename std::enable_if<sizeof(I) == 2, void>::type EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE static LSBZeroSetterHelper(I& t, int n) {
if (!Eigen::numext::isnan(t)) {
uint16_t* p = reinterpret_cast<uint16_t*>(&t);
*p &= (0xFFFF << n);
}
}
template <typename I>
typename std::enable_if<sizeof(I) == 1, void>::type EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE static LSBZeroSetterHelper(I& t, int n) {
if (!Eigen::numext::isnan(t)) {
uint8_t* p = reinterpret_cast<uint8_t*>(&t);
*p &= (0xFF << n);
}
}
template <typename I, typename O>
struct LSBZeroSetter {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE I operator()(const I& a) const {
constexpr int bits = MantissaWidth<I>() - MantissaWidth<O>();
static_assert(
bits > 0,
"The output type must have fewer mantissa bits than the input type\n");
I t = a;
LSBZeroSetterHelper(t, bits);
return t;
}
};
template <typename I, typename O>
struct LSBZeroSetter<std::complex<I>, std::complex<O>> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<I> operator()(
const std::complex<I>& a) const {
constexpr int bits = MantissaWidth<I>() - MantissaWidth<O>();
static_assert(
bits > 0,
"The output type must have fewer mantissa bits than the input type\n");
I re = Eigen::numext::real(a);
I img = Eigen::numext::imag(a);
LSBZeroSetterHelper(re, bits);
LSBZeroSetterHelper(img, bits);
std::complex<I> toReturn(re, img);
return toReturn;
}
};
template <typename I, typename O>
struct LSBZeroSetter<std::complex<I>, O> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<I> operator()(
const std::complex<I>& a) const {
constexpr int bits = MantissaWidth<I>() - MantissaWidth<O>();
static_assert(
bits > 0,
"The output type must have fewer mantissa bits than the input type\n");
I re = Eigen::numext::real(a);
I img = Eigen::numext::imag(a);
LSBZeroSetterHelper(re, bits);
LSBZeroSetterHelper(img, bits);
std::complex<I> toReturn(re, img);
return toReturn;
}
};
}
}
namespace Eigen {
namespace internal {
template <typename From, typename To>
struct scalar_cast_op<std::complex<From>, To> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE To
operator()(const std::complex<From>& a) const {
return static_cast<To>(a.real());
}
};
template <typename From>
struct scalar_cast_op<std::complex<From>, bool> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(
const std::complex<From>& a) const {
return static_cast<bool>(a.real());
}
};
template <typename From, typename To>
struct scalar_cast_op<From, std::complex<To>> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<To> operator()(
const From& a) const {
return std::complex<To>(static_cast<To>(a), To(0));
}
};
template <typename From, typename To>
struct scalar_cast_op<std::complex<From>, std::complex<To>> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<To> operator()(
const std::complex<From>& a) const {
return std::complex<To>(static_cast<To>(a.real()),
static_cast<To>(a.imag()));
}
};
template <typename From, typename To>
struct functor_traits_complex_impl {
enum { Cost = NumTraits<To>::AddCost, PacketAccess = false };
};
template <typename From>
struct functor_traits<scalar_cast_op<std::complex<From>, bool>>
: functor_traits_complex_impl<std::complex<From>, bool> {};
template <typename From, typename To>
struct functor_traits<scalar_cast_op<std::complex<From>, To>>
: functor_traits_complex_impl<std::complex<From>, To> {};
template <typename From, typename To>
struct functor_traits<scalar_cast_op<From, std::complex<To>>>
: functor_traits_complex_impl<From, std::complex<To>> {};
template <typename From, typename To>
struct functor_traits<scalar_cast_op<std::complex<From>, std::complex<To>>>
: functor_traits_complex_impl<std::complex<From>, std::complex<To>> {};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/cast_op.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/cast_op_impl.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
#define CURRY_TYPES2(FN, arg0) \
FN(arg0, bool); \
FN(arg0, uint8); \
FN(arg0, uint16); \
FN(arg0, uint32); \
FN(arg0, uint64); \
FN(arg0, int8); \
FN(arg0, int16); \
FN(arg0, int32); \
FN(arg0, int64_t); \
FN(arg0, Eigen::half); \
FN(arg0, bfloat16); \
FN(arg0, float); \
FN(arg0, double); \
FN(arg0, std::complex<float>); \
FN(arg0, std::complex<double>)
CastOpBase::CastOpBase(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("SrcT", &external_src_dtype_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("DstT", &external_dst_dtype_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("Truncate", &use_truncation_));
if (external_dst_dtype_ == DT_QUINT8) {
dst_dtype_ = DT_UINT8;
} else if (external_dst_dtype_ == DT_QINT8) {
dst_dtype_ = DT_INT8;
} else if (external_dst_dtype_ == DT_QINT32) {
dst_dtype_ = DT_INT32;
} else if (external_dst_dtype_ == DT_QINT16) {
dst_dtype_ = DT_INT16;
} else if (external_dst_dtype_ == DT_QUINT16) {
dst_dtype_ = DT_UINT16;
} else {
dst_dtype_ = external_dst_dtype_;
}
if (external_src_dtype_ == DT_QUINT8) {
src_dtype_ = DT_UINT8;
} else if (external_src_dtype_ == DT_QINT8) {
src_dtype_ = DT_INT8;
} else if (external_src_dtype_ == DT_QINT32) {
src_dtype_ = DT_INT32;
} else if (external_src_dtype_ == DT_QINT16) {
src_dtype_ = DT_INT16;
} else if (external_src_dtype_ == DT_QUINT16) {
src_dtype_ = DT_UINT16;
} else {
src_dtype_ = external_src_dtype_;
}
}
void CastOpBase::Compute(OpKernelContext* ctx) {
const Tensor& inp = ctx->input(0);
if (work_ == nullptr) {
ctx->set_output(0, inp);
} else if (external_src_dtype_ != src_dtype_ ||
external_dst_dtype_ != dst_dtype_) {
Tensor in;
OP_REQUIRES_OK(ctx, in.BitcastFrom(inp, src_dtype_, inp.shape()));
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, in.shape(), &out));
out->set_dtype(dst_dtype_);
work_(ctx, in, out, use_truncation_);
out->set_dtype(external_dst_dtype_);
} else {
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, inp.shape(), &out));
work_(ctx, inp, out, use_truncation_);
}
}
Status CastOpBase::Unimplemented() {
return errors::Unimplemented("Cast ", DataTypeString(external_src_dtype_),
" to ", DataTypeString(external_dst_dtype_),
" is not supported");
}
CpuCastOp::CpuCastOp(OpKernelConstruction* ctx) : CastOpBase(ctx) {
OP_REQUIRES_OK(ctx, Prepare());
}
Status CpuCastOp::Prepare() {
if (external_src_dtype_ == external_dst_dtype_) {
work_ = nullptr;
return absl::OkStatus();
}
if (src_dtype_ == DT_BOOL) {
work_ = GetCpuCastFromBool(dst_dtype_);
} else if (src_dtype_ == DT_UINT8) {
work_ = GetCpuCastFromUint8(dst_dtype_);
} else if (src_dtype_ == DT_UINT16) {
work_ = GetCpuCastFromUint16(dst_dtype_);
} else if (src_dtype_ == DT_UINT32) {
work_ = GetCpuCastFromUint32(dst_dtype_);
} else if (src_dtype_ == DT_UINT64) {
work_ = GetCpuCastFromUint64(dst_dtype_);
} else if (src_dtype_ == DT_INT8) {
work_ = GetCpuCastFromInt8(dst_dtype_);
} else if (src_dtype_ == DT_INT16) {
work_ = GetCpuCastFromInt16(dst_dtype_);
} else if (src_dtype_ == DT_INT32) {
work_ = GetCpuCastFromInt32(dst_dtype_);
} else if (src_dtype_ == DT_INT64) {
work_ = GetCpuCastFromInt64(dst_dtype_);
} else if (src_dtype_ == DT_HALF) {
work_ = GetCpuCastFromHalf(dst_dtype_);
} else if (src_dtype_ == DT_FLOAT) {
work_ = GetCpuCastFromFloat(dst_dtype_);
} else if (src_dtype_ == DT_DOUBLE) {
work_ = GetCpuCastFromDouble(dst_dtype_);
} else if (src_dtype_ == DT_COMPLEX64) {
work_ = GetCpuCastFromComplex64(dst_dtype_);
} else if (src_dtype_ == DT_COMPLEX128) {
work_ = GetCpuCastFromComplex128(dst_dtype_);
} else if (src_dtype_ == DT_BFLOAT16) {
work_ = GetCpuCastFromBfloat(dst_dtype_);
} else if (src_dtype_ == DT_FLOAT8_E5M2) {
work_ = GetCpuCastFromFloat8e5m2(dst_dtype_);
} else if (src_dtype_ == DT_FLOAT8_E4M3FN) {
work_ = GetCpuCastFromFloat8e4m3fn(dst_dtype_);
} else if (src_dtype_ == DT_INT4) {
work_ = GetCpuCastFromInt4(dst_dtype_);
} else if (src_dtype_ == DT_UINT4) {
work_ = GetCpuCastFromUint4(dst_dtype_);
}
return work_ == nullptr ? Unimplemented() : absl::OkStatus();
}
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
class GpuCastOp : public CastOpBase {
public:
explicit GpuCastOp(OpKernelConstruction* ctx) : CastOpBase(ctx) {
OP_REQUIRES_OK(ctx, Prepare());
}
private:
Status Prepare() {
if (external_src_dtype_ == external_dst_dtype_) {
work_ = nullptr;
return OkStatus();
}
if (src_dtype_ == DT_BOOL) {
work_ = GetGpuCastFromBool(dst_dtype_);
} else if (src_dtype_ == DT_UINT8) {
work_ = GetGpuCastFromUint8(dst_dtype_);
} else if (src_dtype_ == DT_UINT16) {
work_ = GetGpuCastFromUint16(dst_dtype_);
} else if (src_dtype_ == DT_UINT32) {
work_ = GetGpuCastFromUint32(dst_dtype_);
} else if (src_dtype_ == DT_UINT64) {
work_ = GetGpuCastFromUint64(dst_dtype_);
} else if (src_dtype_ == DT_INT8) {
work_ = GetGpuCastFromInt8(dst_dtype_);
} else if (src_dtype_ == DT_INT16) {
work_ = GetGpuCastFromInt16(dst_dtype_);
} else if (src_dtype_ == DT_INT32) {
work_ = GetGpuCastFromInt32(dst_dtype_);
} else if (src_dtype_ == DT_INT64) {
work_ = GetGpuCastFromInt64(dst_dtype_);
} else if (src_dtype_ == DT_HALF) {
work_ = GetGpuCastFromHalf(dst_dtype_);
} else if (src_dtype_ == DT_FLOAT) {
work_ = GetGpuCastFromFloat(dst_dtype_);
} else if (src_dtype_ == DT_DOUBLE) {
work_ = GetGpuCastFromDouble(dst_dtype_);
} else if (src_dtype_ == DT_COMPLEX64) {
work_ = GetGpuCastFromComplex64(dst_dtype_);
} else if (src_dtype_ == DT_COMPLEX128) {
work_ = GetGpuCastFromComplex128(dst_dtype_);
} else if (src_dtype_ == DT_BFLOAT16) {
work_ = GetGpuCastFromBfloat(dst_dtype_);
} else if (src_dtype_ == DT_FLOAT8_E5M2) {
work_ = GetGpuCastFromFloat8e5m2(dst_dtype_);
} else if (src_dtype_ == DT_FLOAT8_E4M3FN) {
work_ = GetGpuCastFromFloat8e4m3fn(dst_dtype_);
} else if (src_dtype_ == DT_INT4) {
work_ = GetGpuCastFromInt4(dst_dtype_);
} else if (src_dtype_ == DT_UINT4) {
work_ = GetGpuCastFromUint4(dst_dtype_);
}
return work_ == nullptr ? Unimplemented() : OkStatus();
}
};
#endif
#undef CAST_CASE
REGISTER_KERNEL_BUILDER(Name("Cast").Device(DEVICE_CPU), CpuCastOp);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#define REGISTER_CAST_GPU(srctype, dsttype) \
REGISTER_KERNEL_BUILDER(Name("Cast") \
.TypeConstraint<srctype>("SrcT") \
.TypeConstraint<dsttype>("DstT") \
.Device(DEVICE_GPU), \
GpuCastOp)
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED)
CURRY_TYPES2(REGISTER_CAST_GPU, bool);
CURRY_TYPES2(REGISTER_CAST_GPU, int8);
CURRY_TYPES2(REGISTER_CAST_GPU, int16);
CURRY_TYPES2(REGISTER_CAST_GPU, int32);
CURRY_TYPES2(REGISTER_CAST_GPU, int64);
CURRY_TYPES2(REGISTER_CAST_GPU, uint8);
CURRY_TYPES2(REGISTER_CAST_GPU, uint16);
CURRY_TYPES2(REGISTER_CAST_GPU, uint32);
CURRY_TYPES2(REGISTER_CAST_GPU, uint64);
CURRY_TYPES2(REGISTER_CAST_GPU, Eigen::half);
CURRY_TYPES2(REGISTER_CAST_GPU, float);
CURRY_TYPES2(REGISTER_CAST_GPU, double);
CURRY_TYPES2(REGISTER_CAST_GPU, std::complex<float>);
CURRY_TYPES2(REGISTER_CAST_GPU, std::complex<double>);
#else
REGISTER_CAST_GPU(bool, bfloat16);
REGISTER_CAST_GPU(int8, bfloat16);
REGISTER_CAST_GPU(int16, bfloat16);
REGISTER_CAST_GPU(int32, bfloat16);
REGISTER_CAST_GPU(int64, bfloat16);
REGISTER_CAST_GPU(uint8, bfloat16);
REGISTER_CAST_GPU(uint16, bfloat16);
REGISTER_CAST_GPU(uint32, bfloat16);
REGISTER_CAST_GPU(uint64, bfloat16);
REGISTER_CAST_GPU(Eigen::half, bfloat16);
REGISTER_CAST_GPU(float, bfloat16);
REGISTER_CAST_GPU(double, bfloat16);
REGISTER_CAST_GPU(std::complex<float>, bfloat16);
REGISTER_CAST_GPU(std::complex<double>, bfloat16);
#endif
CURRY_TYPES2(REGISTER_CAST_GPU, bfloat16);
REGISTER_CAST_GPU(float, float8_e5m2);
REGISTER_CAST_GPU(float, float8_e4m3fn);
REGISTER_CAST_GPU(bfloat16, float8_e5m2);
REGISTER_CAST_GPU(bfloat16, float8_e4m3fn);
REGISTER_CAST_GPU(Eigen::half, float8_e5m2);
REGISTER_CAST_GPU(Eigen::half, float8_e4m3fn);
REGISTER_CAST_GPU(float8_e5m2, float);
REGISTER_CAST_GPU(float8_e5m2, bfloat16);
REGISTER_CAST_GPU(float8_e5m2, Eigen::half);
REGISTER_CAST_GPU(float8_e5m2, float8_e5m2);
REGISTER_CAST_GPU(float8_e5m2, float8_e4m3fn);
REGISTER_CAST_GPU(float8_e4m3fn, float);
REGISTER_CAST_GPU(float8_e4m3fn, bfloat16);
REGISTER_CAST_GPU(float8_e4m3fn, Eigen::half);
REGISTER_CAST_GPU(float8_e4m3fn, float8_e5m2);
REGISTER_CAST_GPU(float8_e4m3fn, float8_e4m3fn);
REGISTER_CAST_GPU(int4, int4);
REGISTER_CAST_GPU(int4, int8);
REGISTER_CAST_GPU(int4, int16);
REGISTER_CAST_GPU(int4, int32);
REGISTER_CAST_GPU(int4, int64_t);
REGISTER_CAST_GPU(int4, uint4);
REGISTER_CAST_GPU(int4, uint8);
REGISTER_CAST_GPU(int4, uint16);
REGISTER_CAST_GPU(int4, uint32);
REGISTER_CAST_GPU(int4, uint64_t);
REGISTER_CAST_GPU(int8, int4);
REGISTER_CAST_GPU(int16, int4);
REGISTER_CAST_GPU(int32, int4);
REGISTER_CAST_GPU(int64_t, int4);
REGISTER_CAST_GPU(uint4, int4);
REGISTER_CAST_GPU(uint8, int4);
REGISTER_CAST_GPU(uint16, int4);
REGISTER_CAST_GPU(uint32, int4);
REGISTER_CAST_GPU(uint64_t, int4);
REGISTER_CAST_GPU(uint4, int8);
REGISTER_CAST_GPU(uint4, int16);
REGISTER_CAST_GPU(uint4, int32);
REGISTER_CAST_GPU(uint4, int64_t);
REGISTER_CAST_GPU(uint4, uint4);
REGISTER_CAST_GPU(uint4, uint8);
REGISTER_CAST_GPU(uint4, uint16);
REGISTER_CAST_GPU(uint4, uint32);
REGISTER_CAST_GPU(uint4, uint64_t);
REGISTER_CAST_GPU(int8, uint4);
REGISTER_CAST_GPU(int16, uint4);
REGISTER_CAST_GPU(int32, uint4);
REGISTER_CAST_GPU(int64_t, uint4);
REGISTER_CAST_GPU(uint8, uint4);
REGISTER_CAST_GPU(uint16, uint4);
REGISTER_CAST_GPU(uint32, uint4);
REGISTER_CAST_GPU(uint64_t, uint4);
#undef REGISTER_CAST_GPU
#endif
#undef CURRY_TYPES2
REGISTER_KERNEL_BUILDER(Name("_HostCast").Device(DEVICE_CPU), CpuCastOp);
REGISTER_KERNEL_BUILDER(
Name("_HostCast").Device(DEVICE_DEFAULT).HostMemory("x").HostMemory("y"),
CpuCastOp);
} | #include <cstdint>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
using Eigen::half;
namespace tensorflow {
template <typename Src, typename Dst>
static Graph* Cast(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor data(DataTypeToEnum<Src>::value,
TensorShape({64, 64, num / (64 * 64)}));
data.flat<Src>().setRandom();
test::graph::Cast(g, test::graph::Constant(g, data),
DataTypeToEnum<Dst>::value);
return g;
}
class CastOpTest : public OpsTestBase {
protected:
void MakeOp(DataType src, DataType dst, bool trunc) {
if (trunc) {
TF_EXPECT_OK(NodeDefBuilder("cast_op", "Cast")
.Input(FakeInput(src))
.Attr("SrcT", src)
.Attr("DstT", dst)
.Attr("Truncate", true)
.Finalize(node_def()));
} else {
TF_EXPECT_OK(NodeDefBuilder("cast_op", "Cast")
.Input(FakeInput(src))
.Attr("SrcT", src)
.Attr("DstT", dst)
.Finalize(node_def()));
}
TF_EXPECT_OK(InitOp());
}
template <typename INPUT, typename OUTPUT>
void CheckCast(bool trunc) {
DataType in_type = DataTypeToEnum<INPUT>::v();
DataType out_type = DataTypeToEnum<OUTPUT>::v();
MakeOp(in_type, out_type, trunc);
AddInputFromArray<INPUT>(TensorShape({1, 2, 2, 1}),
{INPUT(1), INPUT(2), INPUT(3), INPUT(4)});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), out_type, TensorShape({1, 2, 2, 1}));
test::FillValues<OUTPUT>(&expected,
{OUTPUT(1), OUTPUT(2), OUTPUT(3), OUTPUT(4)});
test::ExpectTensorEqual<OUTPUT>(expected, *GetOutput(0));
}
};
#define TEST_CAST(in, out) \
TEST_F(CastOpTest, TestCast##_##in##_##out) { CheckCast<in, out>(false); } \
TEST_F(CastOpTest, TestCastTruncate_##_##in##_##out) { \
CheckCast<in, out>(true); \
}
#define TEST_ALL_CASTS_FROM(in) \
TEST_CAST(in, uint8) \
TEST_CAST(in, uint16) \
TEST_CAST(in, uint32) \
TEST_CAST(in, uint64) \
TEST_CAST(in, int8) \
TEST_CAST(in, int16) \
TEST_CAST(in, int32) \
TEST_CAST(in, int64_t) \
TEST_CAST(in, half) \
TEST_CAST(in, float) \
TEST_CAST(in, double) \
TEST_CAST(in, bfloat16) \
TEST_CAST(in, quint8) \
TEST_CAST(in, qint8) \
TEST_CAST(in, qint32) \
TEST_CAST(in, qint16) \
TEST_CAST(in, quint16)
TEST_ALL_CASTS_FROM(uint8)
TEST_ALL_CASTS_FROM(uint16)
TEST_ALL_CASTS_FROM(uint32)
TEST_ALL_CASTS_FROM(uint64)
TEST_ALL_CASTS_FROM(int16)
TEST_ALL_CASTS_FROM(int32)
TEST_ALL_CASTS_FROM(int64_t)
TEST_ALL_CASTS_FROM(half)
TEST_ALL_CASTS_FROM(float)
TEST_ALL_CASTS_FROM(double)
TEST_ALL_CASTS_FROM(bfloat16)
TEST_ALL_CASTS_FROM(quint8)
TEST_ALL_CASTS_FROM(qint8)
TEST_ALL_CASTS_FROM(qint32)
TEST_ALL_CASTS_FROM(qint16)
TEST_ALL_CASTS_FROM(quint16)
#undef TEST_ALL_CASTS_FROM
#define TEST_INT_CASTS_FROM(in) \
TEST_CAST(in, uint8) \
TEST_CAST(in, uint16) \
TEST_CAST(in, uint32) \
TEST_CAST(in, uint64) \
TEST_CAST(in, int8) \
TEST_CAST(in, int16) \
TEST_CAST(in, int32) \
TEST_CAST(in, int64_t)
#define TEST_INT_CASTS_TO(out) \
TEST_CAST(uint8, out) \
TEST_CAST(uint16, out) \
TEST_CAST(uint32, out) \
TEST_CAST(uint64, out) \
TEST_CAST(int8, out) \
TEST_CAST(int16, out) \
TEST_CAST(int32, out) \
TEST_CAST(int64_t, out)
TEST_INT_CASTS_FROM(int4)
TEST_INT_CASTS_FROM(uint4)
TEST_INT_CASTS_TO(int4)
TEST_INT_CASTS_TO(uint4)
TEST_CAST(int4, int4)
TEST_CAST(int4, uint4)
TEST_CAST(uint4, int4)
TEST_CAST(uint4, uint4)
#undef TEST_INT_CASTS_FROM
#undef TEST_INT_CASTS_TO
#undef TEST_CAST
static void BM_cpu_float_int64(::testing::benchmark::State& state) {
const int num = state.range(0);
test::Benchmark("cpu", Cast<float, int64_t>(num), false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(float) + sizeof(int64_t)));
}
BENCHMARK(BM_cpu_float_int64)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
static void BM_gpu_float_int64(::testing::benchmark::State& state) {
const int num = state.range(0);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
test::Benchmark("gpu", Cast<float, int64_t>(num), false)
.Run(state);
#endif
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(float) + sizeof(int64_t)));
}
BENCHMARK(BM_gpu_float_int64)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
static void BM_cpu_bool_float(::testing::benchmark::State& state) {
const int num = state.range(0);
test::Benchmark("cpu", Cast<bool, float>(num), false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(bool) + sizeof(float)));
}
BENCHMARK(BM_cpu_bool_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
static void BM_gpu_bool_float(::testing::benchmark::State& state) {
const int num = state.range(0);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
test::Benchmark("gpu", Cast<bool, float>(num), false)
.Run(state);
#endif
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(bool) + sizeof(float)));
}
BENCHMARK(BM_gpu_bool_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
static void BM_cpu_float_bfloat16(::testing::benchmark::State& state) {
const int num = state.range(0);
test::Benchmark("cpu", Cast<float, bfloat16>(num),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(float) + sizeof(bfloat16)));
}
BENCHMARK(BM_cpu_float_bfloat16)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
static void BM_cpu_bfloat16_float(::testing::benchmark::State& state) {
const int num = state.range(0);
test::Benchmark("cpu", Cast<bfloat16, float>(num),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(float) + sizeof(bfloat16)));
}
BENCHMARK(BM_cpu_bfloat16_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
static void BM_cpu_float_half(::testing::benchmark::State& state) {
const int num = state.range(0);
test::Benchmark("cpu", Cast<float, Eigen::half>(num),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(float) + sizeof(Eigen::half)));
}
BENCHMARK(BM_cpu_float_half)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
static void BM_cpu_half_float(::testing::benchmark::State& state) {
const int num = state.range(0);
test::Benchmark("cpu", Cast<Eigen::half, float>(num),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(float) + sizeof(Eigen::half)));
}
BENCHMARK(BM_cpu_half_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
static void BM_gpu_float_half(::testing::benchmark::State& state) {
const int num = state.range(0);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
test::Benchmark("gpu", Cast<float, Eigen::half>(num),
false)
.Run(state);
#endif
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(float) + sizeof(Eigen::half)));
}
BENCHMARK(BM_gpu_float_half)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
static void BM_gpu_half_float(::testing::benchmark::State& state) {
const int num = state.range(0);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
test::Benchmark("gpu", Cast<Eigen::half, float>(num),
false)
.Run(state);
#endif
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num *
(sizeof(float) + sizeof(Eigen::half)));
}
BENCHMARK(BM_gpu_half_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20);
} |
1,141 | cpp | tensorflow/tensorflow | scatter_nd_op | tensorflow/core/kernels/scatter_nd_op.cc | tensorflow/core/kernels/scatter_nd_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SCATTER_ND_OP_H_
#define TENSORFLOW_CORE_KERNELS_SCATTER_ND_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
class OpKernelContext;
namespace scatter_nd_op {
enum class UpdateOp { ASSIGN, ADD, SUB, MIN, MAX };
}
namespace functor {
template <typename Device, typename T, typename Index,
scatter_nd_op::UpdateOp op, int IXDIM>
struct ScatterNdFunctor {
Index operator()(
const Device& d, const Index slice_size,
const Eigen::array<Eigen::DenseIndex, IXDIM> output_shape_prefix,
typename TTypes<T, 2>::Tensor Tparams,
typename TTypes<Index, 2>::ConstTensor Tindices,
typename TTypes<T, 2>::ConstTensor Tupdates,
typename TTypes<T, 2>::Tensor Toutput);
};
template <typename Device, typename T, typename Index,
scatter_nd_op::UpdateOp Op>
Status DoScatterNd(OpKernelContext* c, const Tensor& indices,
const Tensor& updates, const TensorShape& shape, Tensor* out,
bool allocate);
}
}
#endif
#define EIGEN_USE_THREADS
#include <string>
#include <type_traits>
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#include "tensorflow/core/platform/stream_executor.h"
#endif
#include "absl/status/statusor.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/dense_update_functor.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/kernels/inplace_ops_functor.h"
#include "tensorflow/core/kernels/scatter_nd_op.h"
#include "tensorflow/core/kernels/scatter_nd_util.h"
#include "tensorflow/core/kernels/training_op_helpers.h"
#include "tensorflow/core/kernels/variable_ops.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/bad_indices_policy.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace {
constexpr char kBadIndicesPolicyAtrr[] = "bad_indices_policy";
}
namespace functor {
template <typename Device, typename T, typename Index,
scatter_nd_op::UpdateOp Op>
Status DoScatterNd(OpKernelContext* c, const Tensor& indices,
const Tensor& updates, const TensorShape& shape, Tensor* out,
bool allocate, BadIndicesPolicy bad_indices_policy);
}
bool ValidEmptyOutputShape(int64_t num_inputs, int64_t num_indices,
int64_t num_updates) {
if (num_indices == 0 && num_updates == 0) {
return true;
}
return (num_inputs != 0 && num_indices != 0 && num_updates != 0);
}
template <typename Device, typename T, typename Index>
class ScatterNdOp : public OpKernel {
public:
explicit ScatterNdOp(OpKernelConstruction* c) : OpKernel(c) {
const DataType dt = DataTypeToEnum<T>::v();
const DataType index_t = DataTypeToEnum<Index>::v();
OP_REQUIRES_OK(c, c->MatchSignature({index_t, dt, index_t}, {dt}));
std::string bad_indices_policy_str;
OP_REQUIRES_OK(c,
c->GetAttr(kBadIndicesPolicyAtrr, &bad_indices_policy_str));
absl::StatusOr<BadIndicesPolicy> bad_indices_policy =
BadIndicesPolicyFromString(bad_indices_policy_str);
OP_REQUIRES_OK(c, bad_indices_policy.status());
bad_indices_policy_ = *bad_indices_policy;
if constexpr (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
c, bad_indices_policy_ != BadIndicesPolicy::kError,
errors::InvalidArgument(
"ERROR bad_indices_policy is not supported on GPU devices."));
}
}
void Compute(OpKernelContext* c) override {
const Tensor& indices = c->input(0);
const Tensor& updates = c->input(1);
const Tensor& shape_input = c->input(2);
OP_REQUIRES(c, indices.shape().dims() >= 1,
errors::InvalidArgument(
"Indices shape must have rank at least one. Found:",
indices.shape().DebugString()));
OP_REQUIRES(c, updates.shape().dims() >= 1,
errors::InvalidArgument(
"Updates shape must have rank at least one. Found:",
updates.shape().DebugString()));
auto vec = shape_input.flat<Index>();
TensorShape shape;
OP_REQUIRES_OK(c,
TensorShapeUtils::MakeShape(vec.data(), vec.size(), &shape));
OP_REQUIRES(c,
ValidEmptyOutputShape(shape_input.NumElements(),
indices.shape().num_elements(),
updates.shape().num_elements()),
errors::InvalidArgument(
"Indices and updates specified for empty output shape"));
const int64_t outer_dims = indices.shape().dims() - 1;
for (int i = 0; i < outer_dims; ++i) {
OP_REQUIRES(
c, indices.shape().dim_size(i) == updates.shape().dim_size(i),
errors::InvalidArgument(
"Dimensions [0,", outer_dims,
") of indices[shape=", indices.shape().DebugString(),
"] must match dimensions [0,", outer_dims,
") of updates[shape=", updates.shape().DebugString(), "]"));
}
const int64_t ix = indices.shape().dim_size(outer_dims);
OP_REQUIRES(c, updates.shape().dims() - outer_dims == shape.dims() - ix,
errors::InvalidArgument(
"Dimensions [", ix, ",", shape.dims(), ") of input[shape=",
shape.DebugString(), "] must match dimensions [",
outer_dims, ",", updates.shape().dims(),
") of updates[shape=", updates.shape().DebugString(), "]"));
for (int i = 0; i + outer_dims < updates.shape().dims(); ++i) {
OP_REQUIRES(
c, updates.shape().dim_size(i + outer_dims) == shape.dim_size(ix + i),
errors::InvalidArgument("Dimensions [", ix, ",", shape.dims(),
") of input[shape=", shape.DebugString(),
"] must match dimensions [", outer_dims, ",",
updates.shape().dims(), ") of updates[shape=",
updates.shape().DebugString(), "]"));
}
OP_REQUIRES(c, shape_input.dims() == 1,
errors::InvalidArgument("Shape must be a vector"));
Tensor out;
OP_REQUIRES_OK(
c, functor::DoScatterNd<Device, T, Index, scatter_nd_op::UpdateOp::ADD>(
c, indices, updates, shape, &out, true ,
bad_indices_policy_));
c->set_output(0, out);
}
private:
BadIndicesPolicy bad_indices_policy_ = BadIndicesPolicy::kDefault;
};
template <typename Device, typename T, typename Index,
scatter_nd_op::UpdateOp op>
class TensorScatterOp : public OpKernel {
public:
explicit TensorScatterOp(OpKernelConstruction* c) : OpKernel(c) {
const DataType dt = DataTypeToEnum<T>::v();
const DataType index_t = DataTypeToEnum<Index>::v();
OP_REQUIRES_OK(c, c->MatchSignature({dt, index_t, dt}, {dt}));
}
void Compute(OpKernelContext* c) override {
const Tensor& input = c->input(0);
const Tensor& indices = c->input(1);
const Tensor& updates = c->input(2);
OP_REQUIRES(c, indices.shape().dims() >= 1,
errors::InvalidArgument(
"Indices shape must have rank at least one. Found:",
indices.shape().DebugString()));
OP_REQUIRES(c, updates.shape().dims() >= 1,
errors::InvalidArgument(
"Updates shape must have rank at least one. Found:",
updates.shape().DebugString()));
TensorShape shape = input.shape();
OP_REQUIRES(c,
ValidEmptyOutputShape(shape.num_elements(),
indices.shape().num_elements(),
updates.shape().num_elements()),
errors::InvalidArgument(
"Indices and updates specified for empty output shape"));
const int64_t outer_dims = indices.shape().dims() - 1;
for (int i = 0; i < outer_dims; ++i) {
OP_REQUIRES(c, indices.shape().dim_size(i) == updates.shape().dim_size(i),
errors::InvalidArgument(
"Outer dimensions of indices and update must match. "
"Indices shape: ",
indices.shape().DebugString(),
", updates shape:", updates.shape().DebugString()));
}
const int64_t ix = indices.shape().dim_size(outer_dims);
OP_REQUIRES(
c, updates.shape().dims() - outer_dims == shape.dims() - ix,
errors::InvalidArgument("Inner dimensions of output shape must match "
"inner dimensions of updates shape. Output: ",
shape.DebugString(),
" updates: ", updates.shape().DebugString()));
for (int i = 0; i + outer_dims < updates.shape().dims(); ++i) {
OP_REQUIRES(
c, updates.shape().dim_size(i + outer_dims) == shape.dim_size(ix + i),
errors::InvalidArgument(
"The inner ", shape.dims() - ix,
" dimensions of output.shape=", shape.DebugString(),
" must match the inner ", updates.shape().dims() - outer_dims,
" dimensions of updates.shape=", updates.shape().DebugString()));
}
AllocatorAttributes alloc_attr;
MemoryType memory_type = DEVICE_MEMORY;
if (std::is_same<Device, CPUDevice>::value) {
alloc_attr.set_on_host(true);
memory_type = HOST_MEMORY;
} else {
memory_type = DEVICE_MEMORY;
}
std::unique_ptr<Tensor> forwarded_input =
c->forward_input(0, 0, input.dtype(), shape, memory_type, alloc_attr);
if (forwarded_input == nullptr) {
Tensor* out;
OP_REQUIRES_OK(c, c->allocate_output(0, input.shape(), &out));
OP_REQUIRES_OK(c, tensorflow::functor::DoCopy(c->eigen_device<Device>(),
input, out));
OP_REQUIRES_OK(c,
functor::DoScatterNd<Device, T, Index, op>(
c, indices, updates, shape, out, false ));
} else {
OP_REQUIRES_OK(c, functor::DoScatterNd<Device, T, Index, op>(
c, indices, updates, shape, forwarded_input.get(),
false ));
c->set_output(0, *forwarded_input);
}
}
};
template <typename Device, typename T, typename Index,
scatter_nd_op::UpdateOp op>
class ScatterNdUpdateOp : public OpKernel {
public:
explicit ScatterNdUpdateOp(OpKernelConstruction* c) : OpKernel(c) {
const DataType dt = DataTypeToEnum<T>::v();
const DataType dt_ref = DataTypeToEnum<T>::ref();
const DataType index_t = DataTypeToEnum<Index>::v();
dtype_ = c->input_type(0);
if (c->input_type(0) == DT_RESOURCE) {
} else if (IsRefType(c->input_type(0))) {
OP_REQUIRES_OK(c, c->MatchSignature({dt_ref, index_t, dt}, {dt_ref}));
OP_REQUIRES_OK(c, c->GetAttr("use_locking", &use_exclusive_lock_));
} else {
OP_REQUIRES_OK(c, c->MatchSignature({dt, index_t, dt}, {dt}));
use_exclusive_lock_ = false;
}
}
void Compute(OpKernelContext* c) override {
if (dtype_ == DT_RESOURCE) {
core::RefCountPtr<Var> v;
OP_REQUIRES_OK(c, LookupResource(c, HandleFromInput(c, 0), &v));
OP_REQUIRES_OK(c, EnsureSparseVariableAccess<Device, T>(c, v.get()));
mutex_lock m(*v->mu());
DoCompute(c);
} else if (use_exclusive_lock_) {
DCHECK(IsRefType(c->input_dtype(0)));
mutex_lock l(*c->input_ref_mutex(0));
DoCompute(c);
} else {
DoCompute(c);
}
}
private:
DataType dtype_;
bool use_exclusive_lock_;
void DoCompute(OpKernelContext* c) {
const Tensor& indices = c->input(1);
const Tensor& updates = c->input(2);
Tensor params;
TensorShape params_shape;
if (dtype_ == DT_RESOURCE) {
core::RefCountPtr<Var> v;
OP_REQUIRES_OK(c, LookupResource(c, HandleFromInput(c, 0), &v));
Tensor* t = v->tensor();
params = *t;
params_shape = params.shape();
} else if (IsRefType(c->input_dtype(0))) {
params = c->mutable_input(0, use_exclusive_lock_);
params_shape = params.shape();
c->forward_ref_input_to_ref_output(0, 0);
OP_REQUIRES(c, params.IsInitialized(),
errors::FailedPrecondition("Null ref for params"));
} else {
Tensor* params_ptr;
params_shape = c->input(0).shape();
if (!c->forward_input_to_output_with_shape(0, 0, params_shape,
¶ms_ptr)) {
OP_REQUIRES_OK(c, c->allocate_output(0, params_shape, ¶ms_ptr));
params = *params_ptr;
functor::DenseUpdate<Device, T, ASSIGN> copy;
const Tensor& input_copy = c->input(0);
copy(c->eigen_device<Device>(), params.flat<T>(), input_copy.flat<T>());
} else {
params = *params_ptr;
}
}
OP_REQUIRES_OK(
c, functor::DoScatterNd<Device, T, Index, op>(
c, indices, updates, params_shape, ¶ms, false ));
}
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_SCATTER_ND_ASSIGN_FUNCTION_GPU(type) \
template Status functor::DoScatterNd<GPUDevice, type, int64, \
scatter_nd_op::UpdateOp::ASSIGN>( \
OpKernelContext*, Tensor const&, Tensor const&, TensorShape const&, \
Tensor*, bool);
REGISTER_SCATTER_ND_ASSIGN_FUNCTION_GPU(float)
REGISTER_SCATTER_ND_ASSIGN_FUNCTION_GPU(double)
REGISTER_SCATTER_ND_ASSIGN_FUNCTION_GPU(complex64)
REGISTER_SCATTER_ND_ASSIGN_FUNCTION_GPU(complex128)
#undef REGISTER_SCATTER_ND_ASSIGN_FUNCTION_GPU
#endif
#define REGISTER_SCATTER_ND_KERNEL_INDEX(type, index_type, dev, name) \
REGISTER_KERNEL_BUILDER(Name(name) \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("T") \
.TypeConstraint<index_type>("Tindices") \
.HostMemory("shape"), \
ScatterNdOp<dev##Device, type, index_type>)
#define REGISTER_SCATTER_ND_KERNEL_INDEX_INT32_GPU(index_type, name) \
REGISTER_KERNEL_BUILDER(Name(name) \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<int32>("T") \
.TypeConstraint<index_type>("Tindices") \
.HostMemory("indices") \
.HostMemory("updates") \
.HostMemory("shape") \
.HostMemory("output"), \
ScatterNdOp<CPUDevice, int32, index_type>)
#define REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX(type, index_type, dev, name, \
op) \
REGISTER_KERNEL_BUILDER( \
Name(name) \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("T") \
.TypeConstraint<index_type>("Tindices"), \
ScatterNdUpdateOp<dev##Device, type, index_type, op>)
#define REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX_INT32_GPU(index_type, name, \
op) \
REGISTER_KERNEL_BUILDER(Name(name) \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<int32>("T") \
.TypeConstraint<index_type>("Tindices") \
.HostMemory("ref") \
.HostMemory("indices") \
.HostMemory("updates") \
.HostMemory("output_ref"), \
ScatterNdUpdateOp<CPUDevice, int32, index_type, op>)
#define REGISTER_SCATTER_ND_NON_ALIASING_UPDATE_KERNEL_INDEX_INT32_GPU( \
index_type, name, op) \
REGISTER_KERNEL_BUILDER(Name(name) \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<int32>("T") \
.TypeConstraint<index_type>("Tindices") \
.HostMemory("input") \
.HostMemory("indices") \
.HostMemory("updates") \
.HostMemory("output"), \
ScatterNdUpdateOp<CPUDevice, int32, index_type, op>)
#define REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INDEX(type, index_type, \
dev, name, op) \
REGISTER_KERNEL_BUILDER( \
Name(name) \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("T") \
.TypeConstraint<index_type>("Tindices") \
.HostMemory("ref"), \
ScatterNdUpdateOp<dev##Device, type, index_type, op>)
#define REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INDEX_INT32_GPU(index_type, \
name, op) \
REGISTER_KERNEL_BUILDER(Name(name) \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<int32>("T") \
.TypeConstraint<index_type>("Tindices") \
.HostMemory("ref") \
.HostMemory("indices") \
.HostMemory("updates"), \
ScatterNdUpdateOp<CPUDevice, int32, index_type, op>)
#define REGISTER_SCATTER_ND_KERNEL(type, dev, name) \
REGISTER_SCATTER_ND_KERNEL_INDEX(type, int32, dev, name); \
REGISTER_SCATTER_ND_KERNEL_INDEX(type, int64_t, dev, name)
#define REGISTER_SCATTER_ND_KERNEL_INT32_GPU(name) \
REGISTER_SCATTER_ND_KERNEL_INDEX_INT32_GPU(int32, name); \
REGISTER_SCATTER_ND_KERNEL_INDEX_INT32_GPU(int64_t, name)
#define REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, name, op) \
REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX(type, int32, dev, name, op); \
REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX(type, int64_t, dev, name, op)
#define REGISTER_SCATTER_ND_UPDATE_KERNEL_INT32_GPU(name, op) \
REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX_INT32_GPU(int32, name, op); \
REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX_INT32_GPU(int64_t, name, op)
#define REGISTER_SCATTER_ND_NON_ALIASING_UPDATE_KERNEL_INT32_GPU(name, op) \
REGISTER_SCATTER_ND_NON_ALIASING_UPDATE_KERNEL_INDEX_INT32_GPU(int32, name, \
op); \
REGISTER_SCATTER_ND_NON_ALIASING_UPDATE_KERNEL_INDEX_INT32_GPU(int64_t, \
name, op)
#define REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL(type, dev, name, op) \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INDEX(type, int32, dev, name, \
op); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INDEX(type, int64_t, dev, name, op)
#define REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INT32_GPU(name, op) \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INDEX_INT32_GPU(int32, name, op); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INDEX_INT32_GPU(int64_t, name, op)
#define REGISTER_SCATTER_ND_ADD_SUB(type, dev) \
REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdAdd", \
scatter_nd_op::UpdateOp::ADD); \
REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdNonAliasingAdd", \
scatter_nd_op::UpdateOp::ADD); \
REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdSub", \
scatter_nd_op::UpdateOp::SUB); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL( \
type, dev, "ResourceScatterNdAdd", scatter_nd_op::UpdateOp::ADD); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL( \
type, dev, "ResourceScatterNdSub", scatter_nd_op::UpdateOp::SUB);
#define REGISTER_SCATTER_ND_ADD_SUB_INT32_GPU() \
REGISTER_SCATTER_ND_NON_ALIASING_UPDATE_KERNEL_INT32_GPU( \
"ScatterNdNonAliasingAdd", scatter_nd_op::UpdateOp::ADD); \
REGISTER_SCATTER_ND_UPDATE_KERNEL_INT32_GPU("ScatterNdAdd", \
scatter_nd_op::UpdateOp::ADD); \
REGISTER_SCATTER_ND_UPDATE_KERNEL_INT32_GPU("ScatterNdSub", \
scatter_nd_op::UpdateOp::SUB); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INT32_GPU( \
"ResourceScatterNdAdd", scatter_nd_op::UpdateOp::ADD); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INT32_GPU( \
"ResourceScatterNdSub", scatter_nd_op::UpdateOp::SUB);
#define REGISTER_SCATTER_ND(type, dev) \
REGISTER_SCATTER_ND_KERNEL(type, dev, "ScatterNd");
#define REGISTER_SCATTER_ND_INT32_GPU() \
REGISTER_SCATTER_ND_KERNEL_INT32_GPU("ScatterNd");
#define REGISTER_SCATTER_ND_UPDATE(type, dev) \
REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdUpdate", \
scatter_nd_op::UpdateOp::ASSIGN); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL( \
type, dev, "ResourceScatterNdUpdate", scatter_nd_op::UpdateOp::ASSIGN);
#define REGISTER_SCATTER_ND_UPDATE_INT32_GPU() \
REGISTER_SCATTER_ND_UPDATE_KERNEL_INT32_GPU( \
"ScatterNdUpdate", scatter_nd_op::UpdateOp::ASSIGN); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INT32_GPU( \
"ResourceScatterNdUpdate", scatter_nd_op::UpdateOp::ASSIGN);
#define REGISTER_SCATTER_ND_MIN_MAX(type, dev) \
REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdMax", \
scatter_nd_op::UpdateOp::MAX); \
REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdMin", \
scatter_nd_op::UpdateOp::MIN); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL( \
type, dev, "ResourceScatterNdMin", scatter_nd_op::UpdateOp::MIN); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL( \
type, dev, "ResourceScatterNdMax", scatter_nd_op::UpdateOp::MAX);
#define REGISTER_SCATTER_ND_MIN_MAX_INT32_GPU() \
REGISTER_SCATTER_ND_UPDATE_KERNEL_INT32_GPU("ScatterNdMax", \
scatter_nd_op::UpdateOp::MAX); \
REGISTER_SCATTER_ND_UPDATE_KERNEL_INT32_GPU("ScatterNdMin", \
scatter_nd_op::UpdateOp::MIN); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INT32_GPU( \
"ResourceScatterNdMin", scatter_nd_op::UpdateOp::MIN); \
REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INT32_GPU( \
"ResourceScatterNdMax", scatter_nd_op::UpdateOp::MAX);
#define REGISTER_SCATTER_ND_ADD_SUB_CPU(type) \
REGISTER_SCATTER_ND_ADD_SUB(type, CPU);
#define REGISTER_SCATTER_ND_UPDATE_CPU(type) \
REGISTER_SCATTER_ND_UPDATE(type, CPU);
#define REGISTER_SCATTER_ND_MIN_MAX_CPU(type) \
REGISTER_SCATTER_ND_MIN_MAX(type, CPU);
#define REGISTER_SCATTER_ND_CPU(type) REGISTER_SCATTER_ND(type, CPU);
#define REGISTER_SCATTER_ND_GPU(type) REGISTER_SCATTER_ND(type, GPU);
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_ADD_SUB_CPU);
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_UPDATE_CPU);
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_CPU);
TF_CALL_tstring(REGISTER_SCATTER_ND_CPU);
TF_CALL_tstring(REGISTER_SCATTER_ND_UPDATE_CPU);
TF_CALL_bool(REGISTER_SCATTER_ND_ADD_SUB_CPU);
TF_CALL_bool(REGISTER_SCATTER_ND_UPDATE_CPU);
TF_CALL_bool(REGISTER_SCATTER_ND_CPU);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_SCATTER_ND_MIN_MAX_CPU);
#define REGISTER_SCATTER_ND_TENSOR_UPDATE_TYPE_INDEX_TYPE(type, index_type, \
dev) \
REGISTER_KERNEL_BUILDER(Name("TensorScatterUpdate") \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("T") \
.TypeConstraint<index_type>("Tindices"), \
TensorScatterOp<dev##Device, type, index_type, \
scatter_nd_op::UpdateOp::ASSIGN>)
#define REGISTER_SCATTER_ND_TENSOR_UPDATE_INT32_GPU_INDEX_TYPE(index_type) \
REGISTER_KERNEL_BUILDER(Name("TensorScatterUpdate") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<int32>("T") \
.TypeConstraint<index_type>("Tindices") \
.HostMemory("tensor") \
.HostMemory("indices") \
.HostMemory("updates") \
.HostMemory("output"), \
TensorScatterOp<CPUDevice, int32, index_type, \
scatter_nd_op::UpdateOp::ASSIGN>)
#define REGISTER_SCATTER_ND_TENSOR_ADD_TYPE_INDEX_TYPE(type, index_type, dev) \
REGISTER_KERNEL_BUILDER(Name("TensorScatterAdd") \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("T") \
.TypeConstraint<index_type>("Tindices"), \
TensorScatterOp<dev##Device, type, index_type, \
scatter_nd_op::UpdateOp::ADD>)
#define REGISTER_SCATTER_ND_TENSOR_ADD_INT32_GPU_INDEX_TYPE(index_type) \
REGISTER_KERNEL_BUILDER(Name("TensorScatterAdd") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<int32>("T") \
.TypeConstraint<index_type>("Tindices") \
.HostMemory("tensor") \
.HostMemory("indices") \
.HostMemory("updates") \
.HostMemory("output"), \
TensorScatterOp<CPUDevice, int32, index_type, \
scatter_nd_op::UpdateOp::ADD>)
#define REGISTER_SCATTER_ND_TENSOR_SUB_TYPE_INDEX_TYPE(type, index_type, dev) \
REGISTER_KERNEL_BUILDER(Name("TensorScatterSub") \ | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class ScatterNdUpdateOpTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_ref_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNdUpdate")
.Input(FakeInput(variable_ref_type))
.Input(FakeInput(index_type))
.Input(FakeInput(RemoveRefType(variable_ref_type)))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdUpdateOpTest, Simple_TwoD32) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
10002, 0, 0, 0, 777, 778, 779});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, Simple_Two64) {
MakeOp(DT_FLOAT_REF, DT_INT64);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int64_t>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
10002, 0, 0, 0, 777, 778, 779});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, Simple_ZeroD) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {3});
AddInputFromArray<float>(TensorShape({1}), {101});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
test::FillValues<float>(&expected, {0, 0, 0, 101, 0});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, Simple_OneD) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3}), {100, 101, 102});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 101});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, HigherRank) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({8}), {0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({2, 3, 1}), {0, 4, 2, 1, 3, 6});
AddInputFromArray<float>(TensorShape({2, 3}), {10, 20, 30, 40, 50, 60});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({8}));
test::FillValues<float>(&expected, {10, 40, 30, 50, 20, 0, 60, 0});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 99, 4});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(), "indices[1] = [99] does not index into shape [5,3]"))
<< s;
}
TEST_F(ScatterNdUpdateOpTest, Error_WrongDimsIndices) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({1, 3, 1}), {0, 4, 99});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(),
"Dimensions [0,1) of indices[shape=[1,3,1]] = 1 must match dimensions "
"[0,1) of updates[shape=[3,3]] = 3"))
<< s;
}
TEST_F(ScatterNdUpdateOpTest, Error_MismatchedParamsAndUpdateDimensions) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(
TensorShape({3, 4}),
{100, 101, 102, 103, 777, 778, 779, 780, 10000, 10001, 10002, 10004});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(),
"Dimensions [1,2) of input[shape=[5,3]] must match dimensions [1,2) of "
"updates[shape=[3,4]]"))
<< s;
}
TEST_F(ScatterNdUpdateOpTest, Error_MismatchedIndicesAndUpdateDimensions) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({2, 3}),
{100, 101, 102, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(),
"Dimensions [0,1) of indices[shape=[3,1]] = 3 must match dimensions [0,1)"
" of updates[shape=[2,3]] = 2"))
<< s;
}
class ScatterNdOpTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd")
.Input(FakeInput(index_type))
.Input(FakeInput(variable_type))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdOpTest, Simple_OneD) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
AddInputFromArray<int32>(TensorShape({2}), {5, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 101});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(ScatterNdOpTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
AddInputFromArray<int32>(TensorShape({2}), {5, 1});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(), "indices[1] = [5] does not index into shape [5,1]"))
<< s;
}
class ScatterNdOpErrorOnBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd")
.Input(FakeInput(index_type))
.Input(FakeInput(variable_type))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "ERROR")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdOpErrorOnBadIndicesTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
AddInputFromArray<int32>(TensorShape({2}), {5, 1});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(), "indices[1] = [5] does not index into shape [5,1]"))
<< s;
}
class ScatterNdOpIgnoreBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd")
.Input(FakeInput(index_type))
.Input(FakeInput(variable_type))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "IGNORE")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdOpIgnoreBadIndicesTest, DropOutOfRangeIndices) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
AddInputFromArray<int32>(TensorShape({2}), {5, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
class ScatterNdOpConstructionTest : public OpsTestBase {};
TEST_F(ScatterNdOpConstructionTest, Error_BadIndicesPolicyInvalid) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "AN_UNRECOGNIZED_POLICY")
.Finalize(node_def()));
EXPECT_NE(InitOp(), absl::OkStatus());
}
class ScatterNdUpdateBM : public ScatterNdUpdateOpTest {
public:
void TestBody() override {}
void MakeBenchmarkOp(const char* op, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", op)
.Input(FakeInput(DT_FLOAT_REF))
.Input(FakeInput(index_type))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
}
};
template <typename Index>
void BM_ScatterNdHelper(::testing::benchmark::State& state, int embedding_size,
const char* op) {
const int kRows = 10000000 / embedding_size;
std::vector<float> values;
values.reserve(kRows);
for (int i = 0; i < kRows * embedding_size; i++) {
values.push_back(i);
}
const int kNumUpdates = 1000;
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
std::vector<Index> indices;
std::vector<float> updates;
for (int i = 0; i < kNumUpdates; i++) {
indices.push_back(rnd.Uniform(kRows));
for (int j = 0; j < embedding_size; j++) {
updates.push_back(i * 10 + j);
}
}
ScatterNdUpdateBM bm;
bm.MakeBenchmarkOp(op, DataTypeToEnum<Index>::v());
bm.AddInputFromArray<float>(TensorShape({kRows, embedding_size}), values);
bm.AddInputFromArray<Index>(TensorShape({kNumUpdates}), indices);
bm.AddInputFromArray<float>(TensorShape({kNumUpdates, embedding_size}),
updates);
for (auto i : state) {
Status s = bm.RunOpKernel();
}
state.SetItemsProcessed((static_cast<int64_t>(kNumUpdates) * embedding_size) *
state.iterations());
}
void BM_ScatterNdUpdateInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterNdHelper<int32>(state, embedding_size, "ScatterNdUpdate");
}
void BM_ScatterNdUpdateInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterNdHelper<int64_t>(state, embedding_size, "ScatterNdUpdate");
}
void BM_ScatterNdAddInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterNdHelper<int32>(state, embedding_size, "ScatterNdAdd");
}
void BM_ScatterNdAddInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterNdHelper<int64_t>(state, embedding_size, "ScatterNdAdd");
}
BENCHMARK(BM_ScatterNdUpdateInt32)
->Arg(1)
->Arg(10)
->Arg(64)
->Arg(256)
->Arg(1024);
BENCHMARK(BM_ScatterNdUpdateInt64)
->Arg(1)
->Arg(10)
->Arg(64)
->Arg(256)
->Arg(1024);
BENCHMARK(BM_ScatterNdAddInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterNdAddInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
}
} |
1,142 | cpp | tensorflow/tensorflow | broadcast | third_party/xla/xla/client/lib/broadcast.cc | third_party/xla/xla/tests/broadcast_test.cc | #ifndef XLA_CLIENT_LIB_BROADCAST_H_
#define XLA_CLIENT_LIB_BROADCAST_H_
#include "xla/client/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
absl::StatusOr<XlaOp> BroadcastTo(XlaOp input,
absl::Span<int64_t const> output_dims);
}
#endif
#include "xla/client/lib/broadcast.h"
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/str_join.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
namespace xla {
absl::StatusOr<XlaOp> BroadcastTo(XlaOp input,
absl::Span<int64_t const> output_dims) {
XlaBuilder* builder = input.builder();
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
absl::Span<int64_t const> input_dims = input_shape.dimensions();
if (input_dims == output_dims) {
return input;
}
if (input_dims.size() > output_dims.size()) {
return tsl::errors::InvalidArgument(
"Input shape (", ShapeUtil::HumanString(input_shape),
") must have rank less than or equal to the output shape [",
absl::StrJoin(output_dims, ","), "]");
}
std::vector<int64_t> broadcast_dims;
std::vector<int64_t> broadcast_shape;
auto input_it = input_dims.rbegin();
for (auto output_it = output_dims.rbegin(); output_it != output_dims.rend();
++output_it) {
if (input_it != input_dims.rend()) {
if (!(*output_it == 0 && *input_it == 0) &&
!(*input_it != 0 && *output_it % *input_it == 0)) {
return tsl::errors::InvalidArgument(
"Invalid shape broadcast from ",
ShapeUtil::HumanString(input_shape), " to [",
absl::StrJoin(output_dims, ","), "]");
}
broadcast_dims.push_back(broadcast_shape.size());
if (*output_it == *input_it || *input_it == 1) {
broadcast_shape.push_back(*output_it);
} else if (*output_it != *input_it) {
broadcast_shape.push_back(*input_it);
broadcast_shape.push_back(*output_it / *input_it);
}
++input_it;
} else {
broadcast_shape.push_back(*output_it);
}
}
TF_RET_CHECK(input_it == input_dims.rend());
absl::c_reverse(broadcast_dims);
int broadcast_shape_size = broadcast_shape.size();
for (int64_t& broadcast_dim : broadcast_dims) {
broadcast_dim = broadcast_shape_size - broadcast_dim - 1;
}
absl::c_reverse(broadcast_shape);
XlaOp output = BroadcastInDim(input, broadcast_shape, broadcast_dims);
if (broadcast_shape != output_dims) {
output = Reshape(output, output_dims);
}
return output;
}
} | #include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class BroadcastTest : public HloTestBase {};
XLA_TEST_F(BroadcastTest, BroadcastScalarToScalar) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {}), input, {}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(LiteralUtil::CreateR0<float>(42.0), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, BroadcastScalarTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{42.0, 42.0}, {42.0, 42.0}}), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, BroadcastVectorTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto element1 = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {3, 2}), input, {0}));
auto element2 = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3}), input, {1}));
builder.AddInstruction(HloInstruction::CreateTuple({element1, element2}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 1.0}, {2.0, 2.0}, {3.0, 3.0}}),
LiteralSlice(result, {0}), error_spec_));
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 2.0, 3.0}, {1.0, 2.0, 3.0}}),
LiteralSlice(result, {1}), error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {0, 1}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo2DTranspose) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {1, 0}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 3.0}, {2.0, 4.0}}), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo3D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3, 2}), input, {0, 2}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}},
{{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}}),
result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R1_2_To_R4_2x2x3x3) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1.0, 2.0})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2, 3, 3}), input, {1}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(2, 2, 3, 3);
Array2D<float> pz({{1, 2}, {1, 2}});
expected.FillWithPZ(pz);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R1_1025_To_R4_3x3x3x1025) {
auto builder = HloComputation::Builder(TestName());
std::vector<float> input_data(1025);
int64_t r1_size = input_data.size();
std::iota(input_data.begin(), input_data.end(), 0.0f);
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(input_data)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {3, 3, 3, r1_size}), input, {3}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(3, 3, 3, 1025);
Array2D<float> yx(3, r1_size);
for (int64_t y = 0; y < 3; ++y) {
for (int64_t x = 0; x < r1_size; ++x) {
yx(y, x) = input_data[x];
}
}
expected.FillWithYX(yx);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast_R1_64_To_R4_32x64x7x7) {
auto builder = HloComputation::Builder(TestName());
Array4D<float> r4_array(32, 64, 7, 7);
r4_array.Fill(42.0);
std::vector<float> r1_array(64, 42.0);
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(r1_array)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {32, 64, 7, 7}), input, {1}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(LiteralUtil::CreateR4FromArray4D(r4_array),
result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R0_to_R4_64x64x3x3) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {64, 64, 3, 3}), input, {}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
LOG(INFO) << hlo_module->ToString();
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(64, 64, 3, 3);
expected.Fill(1.0f);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R2_2x2_To_R4_3x3x2x2) {
auto builder = HloComputation::Builder(TestName());
Array2D<float> to_broadcast({{1.0f, 2.0f}, {3.0f, 4.0f}});
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2FromArray2D<float>(to_broadcast)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {3, 3, 2, 2}), input, {2, 3}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(3, 3, 2, 2);
expected.FillWithYX(to_broadcast);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R3_2x3x4_to_R4_2x3x4x5) {
auto builder = HloComputation::Builder(TestName());
Array3D<float> input_vals(2, 3, 4);
input_vals.FillRandom(1.0);
Array4D<float> expected(2, 3, 4, 5);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 4; ++k) {
for (int m = 0; m < 5; ++m) {
expected(i, j, k, m) = input_vals(i, j, k);
}
}
}
}
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR3FromArray3D<float>(input_vals)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3, 4, 5}), input, {0, 1, 2}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
}
} |
1,143 | cpp | tensorflow/tensorflow | random | third_party/xla/third_party/tsl/tsl/platform/random.cc | third_party/xla/third_party/tsl/tsl/platform/random_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_RANDOM_H_
#define TENSORFLOW_TSL_PLATFORM_RANDOM_H_
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
uint64 New64();
uint64 ThreadLocalNew64();
uint64 New64DefaultSeed();
}
}
#endif
#include "tsl/platform/random.h"
#include <memory>
#include <random>
#include "tsl/platform/mutex.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
namespace {
std::mt19937_64* InitRngWithRandomSeed() {
std::random_device device("/dev/urandom");
return new std::mt19937_64(device());
}
std::mt19937_64 InitRngWithDefaultSeed() { return std::mt19937_64(); }
}
uint64 New64() {
static std::mt19937_64* rng = InitRngWithRandomSeed();
static mutex mu(LINKER_INITIALIZED);
mutex_lock l(mu);
return (*rng)();
}
uint64 ThreadLocalNew64() {
static thread_local std::unique_ptr<std::mt19937_64> rng =
std::unique_ptr<std::mt19937_64>(InitRngWithRandomSeed());
return (*rng)();
}
uint64 New64DefaultSeed() {
static std::mt19937_64 rng = InitRngWithDefaultSeed();
static mutex mu(LINKER_INITIALIZED);
mutex_lock l(mu);
return rng();
}
}
} | #include "tsl/platform/random.h"
#include <set>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
namespace {
TEST(New64Test, SanityCheck) {
std::set<uint64> values;
for (int i = 0; i < 1000000; i++) {
uint64 x = New64();
EXPECT_TRUE(values.insert(x).second) << "duplicate " << x;
}
}
}
}
} |
1,144 | cpp | tensorflow/tensorflow | scatter | third_party/xla/xla/service/gpu/fusions/legacy/scatter.cc | third_party/xla/xla/service/gpu/fusions/legacy/scatter_test.cc | #ifndef XLA_SERVICE_GPU_FUSIONS_SCATTER_H_
#define XLA_SERVICE_GPU_FUSIONS_SCATTER_H_
#include <optional>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "llvm/IR/IRBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/ir_array.h"
namespace xla {
namespace gpu {
class ScatterFusion : public KernelFusionEmitterBase {
public:
explicit ScatterFusion(const HloFusionAnalysis& analysis);
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override {
return std::nullopt;
}
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override;
protected:
absl::Status EmitKernel(IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims,
std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs,
llvm::IRBuilder<>* builder) const override;
private:
const HloFusionAnalysis& analysis_;
LaunchDimensionsConfig config_;
};
}
}
#endif
#include "xla/service/gpu/fusions/scatter.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/elemental_ir_emitter.h"
#include "xla/service/gpu/fusions/loop.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_nested.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/parallel_loop_emitter.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
ScatterFusion::ScatterFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis), config_(ComputeLoopFusionConfig(analysis)) {
CHECK_EQ(analysis.fusion_root_count(), 1);
CHECK_EQ(analysis.fusion_root(0).opcode(), HloOpcode::kScatter);
}
LaunchDimensions ScatterFusion::launch_dimensions() const {
const auto& updates_shape =
analysis_.fusion_root(0).instruction().operands().back()->shape();
return CalculateLaunchDimensions(updates_shape, analysis_.device_info());
}
absl::Status ScatterFusion::EmitKernel(IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims,
std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs,
llvm::IRBuilder<>* builder) const {
GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder);
FusedIrEmitter scatter_fused_emitter(elemental_emitter);
auto* fused_computation = fusion.fused_instructions_computation();
for (int i = 0; i < fused_computation->num_parameters(); i++) {
auto fused_operand = fused_computation->parameter_instruction(i);
scatter_fused_emitter.BindGenerator(
*fused_operand, [builder, &input = inputs[i],
fused_operand](llvm_ir::IrArray::Index index) {
return input.EmitReadArrayElement(index, builder,
fused_operand->name());
});
}
auto* root = fused_computation->root_instruction();
const xla::ScatterDimensionNumbers& scatter_dims =
Cast<HloScatterInstruction>(root)->scatter_dimension_numbers();
std::string name = llvm_ir::IrName(root);
const Shape& operand_shape = root->operand(0)->shape();
const Shape& scatter_indices_shape = root->operand(1)->shape();
const Shape& updates_shape = root->operand(2)->shape();
const HloComputation& update_computation = *root->called_computations()[0];
TF_ASSIGN_OR_RETURN(auto scatter_indices_gen,
scatter_fused_emitter.GetGenerator(*root->operand(1)));
TF_ASSIGN_OR_RETURN(auto updates_gen,
scatter_fused_emitter.GetGenerator(*root->operand(2)));
auto loop_body_emitter =
[&](const llvm_ir::IrArray::Index& index) -> absl::Status {
std::vector<llvm::Value*> raw_window_multidim;
std::vector<llvm::Value*> input_scatter_multidim;
std::vector<int64_t> raw_window_bounds;
auto get_i64_array = [](absl::Span<const int64_t> container) {
return llvm::ArrayRef<int64_t>{container.data(),
static_cast<size_t>(container.size())};
};
llvm::ArrayRef<int64_t> update_window_dims =
get_i64_array(scatter_dims.update_window_dims());
for (int64_t i = 0, e = index.size(); i != e; ++i) {
if (llvm::is_contained(update_window_dims, i)) {
raw_window_multidim.push_back(index[i]);
raw_window_bounds.push_back(updates_shape.dimensions(i));
} else {
input_scatter_multidim.push_back(index[i]);
}
}
DCHECK_EQ(raw_window_multidim.size(),
scatter_dims.update_window_dims_size());
int64_t raw_window_multidim_idx = 0;
llvm::SmallVector<llvm::Value*> input_window_multidim;
llvm::SmallVector<int64_t> input_window_bounds;
const int64_t rank = operand_shape.rank();
input_window_bounds.reserve(rank);
input_window_multidim.reserve(rank);
llvm::ArrayRef<int64_t> inserted_window_dims =
get_i64_array(scatter_dims.inserted_window_dims());
for (int64_t i = 0; i != rank; ++i) {
if (llvm::is_contained(inserted_window_dims, i)) {
input_window_bounds.push_back(1);
input_window_multidim.push_back(index.GetConstantWithIndexType(0));
} else {
input_window_bounds.push_back(
raw_window_bounds[raw_window_multidim_idx]);
input_window_multidim.push_back(
raw_window_multidim[raw_window_multidim_idx]);
++raw_window_multidim_idx;
}
}
DCHECK_EQ(input_window_multidim.size(), operand_shape.rank());
Shape scatter_indices_shape_fixed = scatter_indices_shape;
if (scatter_dims.index_vector_dim() == scatter_indices_shape.rank()) {
scatter_indices_shape_fixed.add_dimensions(1);
scatter_indices_shape_fixed.mutable_layout()->add_minor_to_major(
scatter_dims.index_vector_dim());
}
std::vector<llvm::Value*> raw_scatter_index_multidim =
input_scatter_multidim;
raw_scatter_index_multidim.insert(
raw_scatter_index_multidim.begin() + scatter_dims.index_vector_dim(),
nullptr);
llvm::ArrayRef<int64_t> scatter_dims_to_operand_dims =
get_i64_array(scatter_dims.scatter_dims_to_operand_dims());
llvm::Value* is_in_bounds = builder->getTrue();
for (int64_t i = 0, e = scatter_dims_to_operand_dims.size(); i != e; ++i) {
raw_scatter_index_multidim[scatter_dims.index_vector_dim()] =
index.GetConstantWithIndexType(i);
llvm_ir::IrArray::Index raw_scatter_index_index(
raw_scatter_index_multidim, scatter_indices_shape_fixed,
index.GetType());
int64_t operand_dim = scatter_dims_to_operand_dims[i];
if (operand_dim > rank) {
return absl::OutOfRangeError(
"The provided scatter_dims_to_operand_dims was out of range.");
}
TF_ASSIGN_OR_RETURN(
llvm::Value* const loaded_scatter_index,
scatter_indices_gen(raw_scatter_index_index.SourceIndexOfReshape(
scatter_indices_shape_fixed, scatter_indices_shape, builder)));
llvm::Value* casted_scatter_index = builder->CreateIntCast(
loaded_scatter_index, index.GetType(),
ShapeUtil::ElementIsSigned(scatter_indices_shape));
llvm::Value* dim_offset = builder->CreateAdd(
input_window_multidim[operand_dim], casted_scatter_index);
input_window_multidim[operand_dim] = dim_offset;
int64_t max_index = operand_shape.dimensions(operand_dim) -
input_window_bounds[operand_dim] + 1;
is_in_bounds = builder->CreateAnd(
is_in_bounds,
builder->CreateICmpULT(casted_scatter_index,
index.GetConstantWithIndexType(max_index)));
}
llvm_ir::LlvmIfData if_window_in_bounds_data = llvm_ir::EmitIfThenElse(
is_in_bounds, "scatter.in_bounds", builder, false);
llvm_ir::SetToFirstInsertPoint(if_window_in_bounds_data.true_block,
builder);
llvm_ir::IrArray::Index input_window_index(
input_window_multidim, outputs.back().GetShape(), index.GetType());
llvm::Value* output_address =
outputs.back().EmitArrayElementAddress(input_window_index, builder);
llvm::Value* input_address = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(updates_shape.element_type(),
ir_emitter_context.llvm_module()),
"input_address", builder);
TF_ASSIGN_OR_RETURN(llvm::Value* const input_ir_value, updates_gen(index));
builder->CreateStore(input_ir_value, input_address);
if (root->unique_indices()) {
return CallNestedComputation(
builder, ir_emitter_context, update_computation,
{output_address, input_address}, output_address);
}
return EmitAtomicOperationForNestedComputation(
builder, ir_emitter_context, update_computation, output_address,
input_address, outputs.back().GetElementLlvmType());
};
auto index_type =
GetIndexTypeForKernel(root, launch_dims.launch_bound(), builder);
return ParallelLoopEmitter(loop_body_emitter, updates_shape, launch_dims,
builder)
.EmitLoop(name, index_type);
}
std::optional<IndexingMap> ScatterFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const {
const auto* scatter =
DynCast<HloScatterInstruction>(&analysis_.fusion_hero(0).instruction());
int64_t scatter_operand_count = scatter->scatter_operand_count();
if (hero_operand_index < scatter_operand_count) {
return std::nullopt;
}
Shape scatter_update_shape = scatter->scatter_updates().front()->shape();
IndexingMap scatter_update_map = GetDefaultThreadIdIndexingMap(
launch_dimensions(), config_.unroll_factor, scatter_update_shape, ctx);
if (hero_operand_index == scatter_operand_count) {
Shape scatter_indices_shape = scatter->scatter_indices()->shape();
CHECK_EQ(scatter_indices_shape.rank(), 2) << scatter->ToString();
IndexingMap updates_to_indices_map{
mlir::AffineMap::get(
scatter_update_shape.rank(), 1,
{mlir::getAffineDimExpr(0, ctx), mlir::getAffineSymbolExpr(0, ctx)},
ctx),
DimVarsFromTensorSizes(scatter_update_shape.dimensions()),
RangeVarsFromTensorSizes({scatter_indices_shape.dimensions(1)}),
{}};
auto scatter_indices_map = scatter_update_map * updates_to_indices_map;
scatter_indices_map.Simplify();
return scatter_indices_map;
}
return scatter_update_map;
}
}
} | #include <limits>
#include <vector>
#include "absl/strings/substitute.h"
#include "xla/array2d.h"
#include "xla/error_spec.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
namespace xla {
namespace {
class ScatterTest : public HloTestBase {
protected:
void RunTest(const std::string& hlo_text, Literal* operand,
Literal* scatter_indices, Literal* updates) {
RunTest(hlo_text, {operand, scatter_indices, updates});
}
void RunTest(const std::string& hlo_text, absl::Span<Literal* const> args) {
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), args, std::nullopt));
}
};
XLA_TEST_F(ScatterTest, TensorFlowScatterV1_Update) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatterV1_WithFusedAdds) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
p0 = s32[3,3] parameter(0)
operand = s32[3,3] add(p0, p0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 1});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatterV2_Update) {
const char* hlo_text = R"(
HloModule TensorFlowScatterV2
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[3,2] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={0},
inserted_window_dims={1},
scatter_dims_to_operand_dims={1},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 30}, {40, 60}, {70, 90}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatterV2_InversePermutation) {
const char* hlo_text = R"(
HloModule TensorFlowScatterV2
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
permutation = s32[3,4] parameter(0)
reshape = s32[3,4,1] reshape(permutation)
operand = s32[3,4] iota(), iota_dimension=1
updates = s32[3,4,1,1] iota(), iota_dimension=1
iota = s32[3,4,1] iota(), iota_dimension=0
indices = s32[3,4,2] concatenate(iota, reshape), dimensions={2}
ROOT scatter = s32[3,4] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={2,3},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=2
}
)";
Literal permutation = LiteralUtil::CreateR2<int32_t>(
{{1, 3, 2, 0}, {3, 0, 2, 1}, {2, 3, 1, 0}});
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
auto actual = ExecuteAndTransfer(std::move(module), {&permutation});
Literal expected = LiteralUtil::CreateR2<int32_t>(
{{3, 0, 2, 1}, {1, 3, 2, 0}, {3, 2, 0, 1}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, actual));
}
XLA_TEST_F(ScatterTest, SimpleR4) {
const char* hlo_text = R"(
HloModule SimpleR4
add_f32 (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(f32[] lhs, f32[] rhs)
}
ENTRY main {
operand = f32[1,2,2,1] parameter(0)
indices = s32[1,3] parameter(1)
updates = f32[1,2,2,1] parameter(2)
ROOT scatter = f32[1,2,2,1] scatter(operand, indices, updates),
to_apply=add_f32,
update_window_dims={1,2,3},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0, 2, 1},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR4<float>({{{{0.f}, {0.f}}, {{0.f}, {0.f}}}});
Literal updates =
LiteralUtil::CreateR4<float>({{{{0.12}, {0.28}}, {{0.018}, {0.42}}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0, 0}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatter_Add) {
const std::string hlo_text = R"(
HloModule TensorFlowScatter_Add
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatter_Add_UniqueIndices) {
const std::string hlo_text = R"(
HloModule TensorFlowScatter_Add
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
unique_indices=true
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatter_Mul) {
const std::string hlo_text = R"(
HloModule TensorFlowScatter_Mul
mul_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT mul = s32[] multiply(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=mul_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatter_F32) {
const std::string hlo_text = R"(
HloModule TensorFlowScatter_F32
add_f32 (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(f32[] lhs, f32[] rhs)
}
ENTRY main {
operand = f32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = f32[2,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, updates),
to_apply=add_f32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Literal operand = LiteralUtil::CreateR2<float>(
{{1.1, 2.2, 3.3}, {4.4, 5.5, 6.6}, {7.7, 8.8, 9.9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({2, 1});
Literal updates =
LiteralUtil::CreateR2<float>({{0.4, 1.1, 0.7}, {2.3, 3.1, 1.6}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatter_F16) {
const std::string hlo_text = R"(
HloModule TensorFlowScatter_F16
add_f16 (lhs: f16[], rhs: f16[]) -> f16[] {
lhs = f16[] parameter(0)
rhs = f16[] parameter(1)
ROOT add = f16[] add(f16[] lhs, f16[] rhs)
}
ENTRY main {
operand = f16[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = f16[2,3] parameter(2)
ROOT scatter = f16[3,3] scatter(operand, indices, updates),
to_apply=add_f16,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Array2D<Eigen::half> operand_array(
{{1.1f, 2.2f, 3.3f}, {4.4f, 5.5f, 6.6f}, {7.7f, 8.8f, 9.9f}});
Literal operand(ShapeUtil::MakeShape(F16, {3, 3}));
operand.PopulateR2FromArray2D(operand_array);
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({2, 1});
Array2D<Eigen::half> updates_array({{0.4f, 1.1f, 0.7f}, {2.3f, 3.1f, 1.6f}});
Literal updates(ShapeUtil::MakeShape(F16, {2, 3}));
updates.PopulateR2FromArray2D(updates_array);
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatter_RepeatedIndices) {
const char* hlo_text = R"(
HloModule TensorFlowScatter
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({1, 1});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatter_MultipleBatchDims) {
const char* hlo_text = R"(
HloModule TensorFlowScatterMultipleBatchDims
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,3,2] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={1},
scatter_dims_to_operand_dims={1},
index_vector_dim=2
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 2}, {2, 1}});
Literal updates = LiteralUtil::CreateR3<int32_t>(
{{{10, 30}, {40, 60}, {70, 90}}, {{5, 5}, {5, 5}, {5, 5}}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatterNd) {
const char* hlo_text = R"(
HloModule TensorFlowScatterNd
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,2] parameter(2)
ROOT scatter = s32[3,3,2] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates = LiteralUtil::CreateR2<int32_t>({{-10, 10}, {-40, 40}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatterNdS64) {
constexpr char hlo_text[] = R"(
HloModule S64Scatter
update {
lhs = s64[] parameter(0)
ROOT rhs = s64[] parameter(1)
}
ENTRY main {
operand = s64[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
updates = s64[2,2] parameter(2)
ROOT scatter = s64[3,3,2] scatter(operand, indices, updates),
to_apply=update,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR3<int64_t>({{{-1, 1LL << 62}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6LL << 59}},
{{-7, 7}, {-8, 8LL << 49}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates =
LiteralUtil::CreateR2<int64_t>({{-10, 10LL << 46}, {-(4LL << 38), 40}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, TensorFlowScatterNd_NonDefaultIndexVectorDim) {
const char* hlo_text = R"(
HloModule TensorFlowScatterNdNonDefaultIndexVectorDim
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,2] parameter(2)
ROOT scatter = s32[3,3,2] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
}
)";
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates = LiteralUtil::CreateR2<int32_t>({{-10, 10}, {-20, 20}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, DynamicUpdateSlice) {
const char* hlo_text = R"(
HloModule DynamicUpdateSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[1,1] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={0,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({1, 1});
Literal updates = LiteralUtil::CreateR2<int32_t>({{10}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, BatchDynamicUpdateSlice) {
const char* hlo_text = R"(
HloModule BatchDynamicUpdateSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,1,1] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{2, 1}, {1, 1}});
Literal updates = LiteralUtil::CreateR3<int32_t>({{{10}}, {{20}}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, ZeroDimBounds) {
const char* hlo_text = R"(
HloModule TensorFlowScatter_ZeroDimBounds
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,0] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,0] parameter(2)
ROOT scatter = s32[3,0] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Literal operand = LiteralUtil::CreateR2<int32_t>({{}, {}, {}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates = LiteralUtil::CreateR2<int32_t>({{}, {}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, NoUpdateWindowDims) {
const std::string hlo_text = R"(
HloModule Scatter_NoUpdateWindowDims
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3] parameter(0)
indices = s32[2,2,1] parameter(1)
updates = s32[2,2] parameter(2)
ROOT scatter = s32[3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({0, 1, 2});
Literal scatter_indices =
LiteralUtil::CreateR3<int32_t>({{{0}, {1}}, {{2}, {1}}});
Literal updates = LiteralUtil::CreateR2<int32_t>({{10, 20}, {30, 40}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, OutOfBoundsIndex) {
const std::string hlo_text = R"(
HloModule BatchDynamicSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3]{1,0} parameter(0)
indices = s32[6,2]{1,0} parameter(1)
updates = s32[6,1,1]{2,1,0} parameter(2)
ROOT scatter = s32[3,3]{1,0} scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>(
{{2, 7}, {2, 1}, {1, 1}, {5, 1}, {2147483647, 1}, {1, 2}});
Literal updates = LiteralUtil::CreateR3<int32_t>(
{{{10}}, {{20}}, {{30}}, {{40}}, {{50}}, {{60}}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, OutOfBoundsUnsignedIndex) {
const std::string hlo_text = R"(
HloModule BatchDynamicSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3]{1,0} parameter(0)
indices = u32[6,2]{1,0} parameter(1)
updates = s32[6,1,1]{2,1,0} parameter(2)
ROOT scatter = s32[3,3]{1,0} scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<uint32_t>(
{{2, 7}, {2, 1}, {1, 1}, {5, 1}, {2147483648u, 1}, {1, 2}});
Literal updates = LiteralUtil::CreateR3<int32_t>(
{{{10}}, {{20}}, {{30}}, {{40}}, {{50}}, {{60}}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, U8Index) {
const std::string hlo_text = R"(
HloModule BatchDynamicSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[129,3]{1,0} parameter(0)
indices = u8[6,2]{1,0} parameter(1)
updates = s32[6,1,1]{2,1,0} parameter(2)
ROOT scatter = s32[129,3]{1,0} scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateRandomLiteral<S32>(ShapeUtil::MakeShape(S32, {129, 3}),
500, 100)
.value();
Literal scatter_indices = LiteralUtil::CreateR2<uint8_t>(
{{2, 7}, {2, 1}, {1, 1}, {5, 1}, {0x80, 1}, {1, 2}});
Literal updates = LiteralUtil::CreateR3<int32_t>(
{{{10}}, {{20}}, {{30}}, {{40}}, {{50}}, {{60}}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, NegativeIndex) {
const std::string hlo_text = R"(
HloModule BatchDynamicSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3]{1,0} parameter(0)
indices = s32[6,2]{1,0} parameter(1)
updates = s32[6,1,1]{2,1,0} parameter(2)
ROOT scatter = s32[3,3]{1,0} scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices =
LiteralUtil::CreateR2<int32_t>({{2, 7},
{2, 1},
{1, 1},
{-500, 1},
{static_cast<int32_t>(-2147483648), 1},
{1, 2}});
Literal updates = LiteralUtil::CreateR3<int32_t>(
{{{10}}, {{20}}, {{30}}, {{40}}, {{50}}, {{60}}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, OutOfBoundsUpdateWindow) {
const char* hlo_text = R"(
HloModule TensorFlowScatterNd_OobUpdateWindow
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[1,2] parameter(1)
updates = s32[1,2,2] parameter(2)
ROOT scatter = s32[3,3,2] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 2}});
Literal updates = LiteralUtil::CreateR3<int32_t>({{{-10, 10}, {-40, 40}}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, OneScalarIndex) {
const char* hlo_text = R"(
HloModule OneScalarIndex
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[2,3,2]{2,1,0} parameter(0)
index = s32[] parameter(1)
updates = s32[1,3,2]{2,1,0} parameter(2)
ROOT scatter = s32[2,3,2]{2,1,0} scatter(operand, index, updates),
to_apply=update_s32,
update_window_dims={0,1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=0
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>(
{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}});
Literal scatter_indices = LiteralUtil::CreateR0<int32_t>(1);
Literal updates =
LiteralUtil::CreateR3<int32_t>({{{10, 20}, {30, 40}, {50, 60}}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, ScalarUpdate) {
const char* hlo_text = R"(
HloModule ScalarUpdate
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[4]{0} parameter(0)
index = s32[] parameter(1)
updates = s32[] parameter(2)
ROOT scatter = s32[4]{0} scatter(operand, index, updates),
to_apply=update_s32,
update_window_dims={},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=0
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4});
Literal scatter_indices = LiteralUtil::CreateR0<int32_t>(1);
Literal updates = LiteralUtil::CreateR0<int32_t>(25);
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, EmptyIndices) {
const std::string hlo_text = R"(
HloModule EmptyIndices
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3] parameter(0)
indices = s32[0] parameter(1)
updates = s32[0] parameter(2)
ROOT scatter = s32[3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({});
Literal updates = LiteralUtil::CreateR1<int32_t>({});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, ScatterIntoScalar) {
const char* hlo_text = R"(
HloModule ScatterIntoScalar
update_s32 {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
parameter.1 = s32[] parameter(0)
parameter.2 = s32[0]{0} parameter(1)
parameter.3 = s32[] parameter(2)
ROOT scatter = s32[] scatter(parameter.1, parameter.2, parameter.3),
update_window_dims={},
inserted_window_dims={},
scatter_dims_to_operand_dims={},
index_vector_dim=0,
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR0<int32_t>(1);
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({});
Literal updates = LiteralUtil::CreateR0<int32_t>(2);
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
XLA_TEST_F(ScatterTest, DISABLED_ON_GPU(Multioutput)) {
if (IsMlirLoweringEnabled()) {
GTEST_SKIP() << "Variadic scatter not supported by MLIR";
}
constexpr char hlo_text[] = R"(
HloModule MultioutputScatter
update {
lhs0 = s32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = s32[] parameter(2)
rhs1 = f32[] parameter(3)
ROOT tuple = (s32[], f32[]) tuple(rhs0, rhs1)
}
ENTRY main {
operand0 = s32[3,3,2] parameter(0)
operand1 = f32[3,3,2] parameter(1)
indices = s32[2,2] parameter(2)
updates0 = s32[2,2] parameter(3)
updates1 = f32[2,2] parameter(4)
ROOT scatter = (s32[3,3,2], f32[3,3,2]) scatter(operand0, operand1, indices, updates0, updates1),
to_apply=update,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
Literal operand0 =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal operand1 =
LiteralUtil::CreateR3<float>({{{-2, 2}, {-3, 3}, {-4, 4}},
{{-5, 5}, {-6, 6}, {-7, 7}},
{{-8, 8}, {-9, 9}, {-10, 10}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates0 = LiteralUtil::CreateR2<int32_t>({{-10, 10}, {-40, 40}});
Literal updates1 = LiteralUtil::CreateR2<float>({{-11, 11}, {-41, 41}});
RunTest(hlo_text,
{&operand0, &operand1, &scatter_indices, &updates0, &updates1});
}
XLA_TEST_F(ScatterTest, TensorFlowScatter_Max_F32) {
const std::string hlo_text = R"(
HloModule TensorFlowScatter_Max_F32
max_f32 (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT max = f32[] maximum(f32[] lhs, f32[] rhs)
}
ENTRY main {
operand = f32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = f32[2,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, updates),
to_apply=max_f32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
Literal operand = LiteralUtil::CreateR2<float>(
{{1.1, 2.2, 3.3}, {4.4, 5.5, 6.6}, {7.7, 8.8, 9.9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({2, 1});
Literal updates =
LiteralUtil::CreateR2<float>({{0.4, 1.1, 0.7}, {2.3, 3.1, 1.6}});
RunTest(hlo_text, &operand, &scatter_indices, &updates);
}
class ScatterEdgeCaseTestP
: public ScatterTest,
public ::testing::WithParamInterface<absl::string_view > {};
XLA_TEST_P(ScatterEdgeCaseTestP, DoIt) {
using L = std::numeric_limits<float>;
std::vector<float> edge_cases = {
0.f,
-0.f,
-1.f,
1.f,
L::min(),
-L::min(),
L::max(),
L::lowest(),
L::epsilon(),
L::infinity(),
-L::infinity(),
L::quiet_NaN(),
-L::quiet_NaN(),
};
int n = edge_cases.size();
float init_value;
absl::string_view operation = GetParam();
if (operation == "minimum") {
init_value = L::infinity();
} else if (operation == "maximum") {
init_value = -L::infinity();
} else if (operation == "add") {
init_value = 0;
} else {
FAIL() << "Invalid operation " << operation;
}
const std::string hlo_text = absl::Substitute(R"(
HloModule test
max_f32 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT max = maximum(lhs, rhs)
}
ENTRY main {
init = f32[$0, $0] broadcast(f32[] constant($2))
indices = s32[$1] parameter(0)
updates = f32[$1, $0] parameter(1)
ROOT scatter = f32[$0, $0] scatter(init, indices, updates),
to_apply=max_f32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)",
n, 2 * n, init_value);
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_enable_fast_min_max(false);
HloModuleConfig config;
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifi |
1,145 | cpp | tensorflow/tensorflow | trt_convert_api | tensorflow/compiler/tf2tensorrt/trt_convert_api.cc | tensorflow/compiler/tf2tensorrt/trt_convert_api_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_TRT_CONVERT_API_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_TRT_CONVERT_API_H_
#include <climits>
#include <string>
#include <vector>
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/trt_parameters.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
struct SavedModelBundle;
namespace tensorrt {
struct TfTrtConversionParams {
#if IS_TRT_VERSION_GE(8, 4, 0, 0)
size_t max_workspace_size_bytes = LLONG_MAX - 512;
#else
size_t max_workspace_size_bytes = 1 << 30;
#endif
TrtPrecisionMode precision_mode = TrtPrecisionMode::FP32;
int minimum_segment_size = 3;
int max_cached_engines = 1;
bool use_calibration = true;
bool use_dynamic_shape = true;
ProfileStrategy profile_strategy = ProfileStrategy::kRange;
bool allow_build_at_runtime = true;
bool convert_to_static_engine = true;
};
StatusOr<GraphDef> ConvertAndBuild(
const GraphDef& frozen_graph_def, const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<std::vector<tensorflow::Tensor>>& inputs,
const TfTrtConversionParams& conv_params);
StatusOr<GraphDef> ConvertAndBuild(
SavedModelBundle* bundle,
const std::string& signature_key = "serving_default",
const std::vector<std::vector<tensorflow::Tensor>>& inputs = {},
const TfTrtConversionParams& conversion_params = TfTrtConversionParams());
}
}
#endif
#endif
#include "tensorflow/compiler/tf2tensorrt/trt_convert_api.h"
#include <iostream>
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/cc/tools/freeze_saved_model.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/grappler_item_builder.h"
#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace {
Status NewCluster(grappler::Cluster** cluster) {
int num_cpu_cores = grappler::GetNumAvailableLogicalCPUCores();
int num_gpus = grappler::GetNumAvailableGPUs();
int timeout_s = 60 * 10;
*cluster = new grappler::SingleMachine(timeout_s, num_cpu_cores, num_gpus);
(*cluster)->DisableDetailedStats(true);
(*cluster)->AllowSoftPlacement(true);
(*cluster)->SetNumWarmupSteps(10);
TF_RETURN_IF_ERROR((*cluster)->Provision());
return OkStatus();
}
Status RunGrappler(const MetaGraphDef& meta_graph_def,
const std::vector<std::string>& input_names,
const std::vector<std::string>& output_names,
const ConfigProto& config_proto, grappler::Cluster* cluster,
GraphDef* out_graph_def) {
grappler::ItemConfig item_config;
for (const string& name : input_names) {
item_config.feed_nodes.insert(name);
}
for (const string& name : output_names) {
item_config.fetch_nodes.insert(name);
}
std::unique_ptr<grappler::GrapplerItem> item =
grappler::GrapplerItemFromMetaGraphDef("tf_graph", meta_graph_def,
item_config);
if (!item) {
return tensorflow::errors::Internal(
"Failed to create grappler item from MetaGraphDef.");
}
tensorflow::DeviceBase* cpu_device = nullptr;
TF_RETURN_IF_ERROR(grappler::RunMetaOptimizer(
std::move(*item), config_proto, cpu_device, cluster, out_graph_def));
VLOG(2) << "Grappler finished\n";
return OkStatus();
}
Status ImportGraphDefToSession(Session* session, const GraphDef& graph_def,
const string& prefix) {
ImportGraphDefOptions opts;
opts.prefix = prefix;
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef(opts, graph_def, &graph, nullptr));
GraphDef new_graph_def;
graph.ToGraphDef(&new_graph_def);
TF_RETURN_IF_ERROR(session->Extend(new_graph_def));
return OkStatus();
}
Status GetTrtRewriterConfig(const TfTrtConversionParams& params,
const GraphDef& frozen_graph_def,
RewriterConfig* opt_config) {
opt_config->set_meta_optimizer_iterations(tensorflow::RewriterConfig::ONE);
opt_config->set_min_graph_nodes(-1);
opt_config->set_remapping(RewriterConfig_Toggle::RewriterConfig_Toggle_OFF);
opt_config->set_experimental_disable_folding_quantization_emulation(
IS_TRT_VERSION_GE(8, 0, 0, 0));
opt_config->add_optimizers("function");
opt_config->add_optimizers("constfold");
opt_config->add_optimizers("layout");
opt_config->add_optimizers("constfold");
auto trt_optimizer = opt_config->add_custom_optimizers();
trt_optimizer->set_name("TensorRTOptimizer");
auto trt_parameter_map = trt_optimizer->mutable_parameter_map();
(*trt_parameter_map)["is_dynamic_op"].set_b(true);
(*trt_parameter_map)["minimum_segment_size"].set_i(
params.minimum_segment_size);
string prec_string;
TF_RETURN_IF_ERROR(
TrtPrecisionModeToName(params.precision_mode, &prec_string));
(*trt_parameter_map)["precision_mode"].set_s(prec_string);
(*trt_parameter_map)["max_batch_size"].set_i(1);
(*trt_parameter_map)["max_workspace_size_bytes"].set_i(
params.max_workspace_size_bytes);
(*trt_parameter_map)["max_cached_engines"].set_i(params.max_cached_engines);
(*trt_parameter_map)["use_calibration"].set_b(params.use_calibration);
(*trt_parameter_map)["profile_strategy"].set_s(
ProfileStrategyToName(params.profile_strategy));
(*trt_parameter_map)["use_implicit_batch"].set_b(!params.use_dynamic_shape);
(*trt_parameter_map)["_allow_build_at_runtime"].set_b(
params.allow_build_at_runtime);
return OkStatus();
}
Status RunTfTrt(const MetaGraphDef& meta_graph_def,
const std::vector<std::string>& input_names,
const std::vector<std::string>& output_names,
const RewriterConfig& rewriter_config,
GraphDef* segmented_graph_def) {
ConfigProto config_proto;
*config_proto.mutable_graph_options()->mutable_rewrite_options() =
rewriter_config;
VLOG(4) << "Setting up Grappler parameters\n" << config_proto.DebugString();
std::unique_ptr<grappler::Cluster> cluster;
grappler::Cluster* p_cluster;
mutex mu_cluster;
mutex_lock lock(mu_cluster);
TF_RETURN_IF_ERROR(NewCluster(&p_cluster));
cluster.reset(p_cluster);
TF_RETURN_IF_ERROR(RunGrappler(meta_graph_def, input_names, output_names,
config_proto, cluster.get(),
segmented_graph_def));
TF_RETURN_IF_ERROR(cluster->Shutdown());
return OkStatus();
}
Status SetProfileGenerationMode(GraphDef* graph_def, bool mode) {
VLOG(3) << "Setting _profile_generation_mode=" << mode;
std::string op{"TRTEngineOp"};
for (auto& node : *(graph_def->mutable_node())) {
if (!op.compare(node.op())) {
auto* attr = node.mutable_attr();
AttrValue profile_generation_mode;
profile_generation_mode.set_b(mode);
(*attr)["_profile_generation_mode"] = profile_generation_mode;
}
}
return OkStatus();
}
Status RunSession(Session* session, const std::vector<std::string>& input_names,
const std::vector<std::string>& output_names,
const std::vector<Tensor>& input_tensors,
string prefix = "") {
TRT_ENSURE(!input_names.empty());
TRT_ENSURE(!output_names.empty());
TRT_ENSURE(!input_tensors.empty());
std::vector<std::pair<std::string, tensorflow::Tensor>> input_pairs;
std::vector<std::string> prefixed_output_names;
auto prefixed_name = [](std::string prefix, std::string name) {
return !prefix.empty() ? absl::StrJoin({prefix, name}, "/") : name;
};
for (int i = 0; i < input_names.size(); i++) {
input_pairs.push_back(
{prefixed_name(prefix, input_names.at(i)), input_tensors.at(i)});
}
for (int i = 0; i < output_names.size(); i++) {
prefixed_output_names.push_back(prefixed_name(prefix, output_names.at(i)));
}
std::vector<tensorflow::Tensor> output_tensors;
for (int i = 0; i < output_names.size(); i++) {
output_tensors.push_back({});
}
VLOG(3) << "TF-TRT Build mode: running inference\n";
TF_RETURN_IF_ERROR(
session->Run(input_pairs, prefixed_output_names, {}, &output_tensors));
return OkStatus();
}
Status Build(GraphDef& segmented_graph_def,
const std::vector<std::string>& input_names,
const std::vector<std::string>& output_names,
const std::vector<std::vector<tensorflow::Tensor>>& inputs,
Session* session, const TfTrtConversionParams params) {
VLOG(2) << "Building the model";
bool need_collect_profiles = params.use_dynamic_shape && inputs.size() > 1;
if (need_collect_profiles) {
TF_RETURN_IF_ERROR(SetProfileGenerationMode(&segmented_graph_def, true));
}
TF_RETURN_IF_ERROR(session->Create(segmented_graph_def));
string prefix = "";
if (need_collect_profiles) {
for (auto const& input : inputs) {
TF_RETURN_IF_ERROR(RunSession(session, input_names, output_names, input));
}
prefix = "TrtBuildStep";
TF_RETURN_IF_ERROR(SetProfileGenerationMode(&segmented_graph_def, false));
VLOG(3) << "Importing graph with _profile_generation_mode disabled";
TF_RETURN_IF_ERROR(
ImportGraphDefToSession(session, segmented_graph_def, prefix));
}
TF_RETURN_IF_ERROR(
RunSession(session, input_names, output_names, *inputs.begin(), prefix));
return OkStatus();
}
Status GetResourceManager(const NodeDef& node, Session* session,
ResourceMgr** rm) {
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session->LocalDeviceManager(&device_mgr));
Device* device;
string device_name = node.device().empty()
? "/job:localhost/replica:0/task:0/device:GPU:0"
: node.device();
TF_RETURN_IF_ERROR(device_mgr->LookupDevice(device_name, &device));
*rm = device->resource_manager();
return OkStatus();
}
Status GetEngineCacheResource(const NodeDef& node, Session* session,
TRTEngineCacheResource** resource) {
ResourceMgr* rm;
TF_RETURN_IF_ERROR(GetResourceManager(node, session, &rm));
absl::string_view resource_name = node.name();
size_t last_slash = resource_name.find_last_of('/');
if (last_slash != absl::string_view::npos) {
resource_name.remove_prefix(last_slash + 1);
}
const std::string container(kTfTrtContainerName);
*resource = nullptr;
TF_RETURN_IF_ERROR(
rm->Lookup(container, std::string(resource_name), resource));
if (resource == nullptr || (*resource)->cache_.size() == 0) {
return errors::Internal("Engine cache not found for", resource_name);
}
return OkStatus();
}
Status ReadSerializedEngine(
const NodeDef& node, Session* session,
TrtUniquePtrType<nvinfer1::IHostMemory>* engine_data) {
TRTEngineCacheResource* resource;
TF_RETURN_IF_ERROR(GetEngineCacheResource(node, session, &resource));
core::ScopedUnref unref_cache_res(resource);
if (resource->cache_.size() > 1) {
return errors::Internal(
"Multiple engines found, but we can only serialize one");
}
const std::unique_ptr<EngineContext>& engine =
resource->cache_.begin()->second;
if (!engine) {
return errors::Internal("Engine not found for", node.name());
}
if (engine->GetCudaEngine()) {
engine_data->reset(engine->GetCudaEngine()->serialize());
} else {
LOG(WARNING) << "Engine cache contains nullptr";
}
return OkStatus();
}
Status ConvertToStaticEngine(const GraphDef graph_def,
GraphDef* static_graph_def, Session* session) {
*static_graph_def = graph_def;
VLOG(1) << "Saving TRT engines as static engine";
std::string op{"TRTEngineOp"};
for (auto& node : *(static_graph_def->mutable_node())) {
if (!op.compare(node.op())) {
VLOG(2) << "Saving TRT engine for " << node.name()
<< ", device: " << node.device();
TrtUniquePtrType<nvinfer1::IHostMemory> engine_data;
TF_RETURN_IF_ERROR(ReadSerializedEngine(node, session, &engine_data));
auto* attr = node.mutable_attr();
AttrValue static_engine;
static_engine.set_b(true);
AttrValue engine_string;
if (engine_data) {
engine_string.set_s(engine_data->data(), engine_data->size());
}
(*attr)["static_engine"] = static_engine;
(*attr)["serialized_segment"] = engine_string;
}
}
return OkStatus();
}
Status ValidateConversionParams(const TfTrtConversionParams& p, int n_inputs) {
if (p.precision_mode == TrtPrecisionMode::INT8 && p.use_calibration) {
return errors::InvalidArgument(
"Calibration not yet implemented through the C++ interface. Please use "
"our Python API for calibration.");
}
if (p.convert_to_static_engine && n_inputs == 0) {
return errors::InvalidArgument(
"TRT Engine needs to be built before we can convert it to static "
"engine. Please provide input data to build the model.");
}
if (!p.convert_to_static_engine && n_inputs >= 0) {
LOG(WARNING)
<< "Skipping build mode because we cannot save the "
"engines. Use convert_to_static_engines=true conversion "
"parameter to enable build mode and save the engines in the graph.";
}
if (!p.allow_build_at_runtime && n_inputs == 0) {
LOG(WARNING)
<< "TRT will not be used since allow_build_at_runtime is disabled and "
"no inputs are provided to build during conversion.";
}
return OkStatus();
}
tensorflow::SessionOptions GetSessionConfg() {
tensorflow::SessionOptions opts;
auto* rewriter_opts =
opts.config.mutable_graph_options()->mutable_rewrite_options();
rewriter_opts->set_experimental_disable_folding_quantization_emulation(true);
rewriter_opts->set_disable_meta_optimizer(true);
opts.config.mutable_experimental()->set_disable_optimize_for_static_graph(
true);
return opts;
}
}
StatusOr<GraphDef> ConvertAndBuild(
const GraphDef& frozen_graph_def, const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<std::vector<tensorflow::Tensor>>& inputs,
const TfTrtConversionParams& conv_params) {
TF_RETURN_IF_ERROR(ValidateConversionParams(conv_params, inputs.size()));
MetaGraphDef meta_graph;
*meta_graph.mutable_graph_def() = frozen_graph_def;
RewriterConfig rewriter_config;
TF_RETURN_IF_ERROR(
GetTrtRewriterConfig(conv_params, frozen_graph_def, &rewriter_config));
GraphDef segmented_graph_def;
TF_RETURN_IF_ERROR(RunTfTrt(meta_graph, input_names, output_names,
rewriter_config, &segmented_graph_def));
GraphDef output;
if (!inputs.empty() && conv_params.convert_to_static_engine) {
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(GetSessionConfg()));
if (!session) {
return errors::Internal("Failed to create build session");
}
TF_RETURN_IF_ERROR(Build(segmented_graph_def, input_names, output_names,
inputs, session.get(), conv_params));
TF_RETURN_IF_ERROR(
ConvertToStaticEngine(segmented_graph_def, &output, session.get()));
} else {
output = segmented_graph_def;
}
VLOG(1) << "TF-TRT conversion finished";
return output;
}
Status InlineFunctions(const MetaGraphDef& meta_graph_def,
GraphDef* out_graph_def) {
ConfigProto config_proto;
auto opt_config =
config_proto.mutable_graph_options()->mutable_rewrite_options();
opt_config->set_meta_optimizer_iterations(tensorflow::RewriterConfig::ONE);
opt_config->set_min_graph_nodes(-1);
opt_config->add_optimizers("function");
TF_RETURN_IF_ERROR(RunGrappler(meta_graph_def, {}, {}, config_proto, nullptr,
out_graph_def));
VLOG(2) << "Graph is inlined";
return OkStatus();
}
Status FreezeGraph(SavedModelBundle& bundle, MetaGraphDef* frozen_meta_graph) {
std::unordered_set<std::string> inputs;
std::unordered_set<std::string> outputs;
GraphDef frozen_graph_def;
TF_RETURN_IF_ERROR(
FreezeSavedModel(bundle, &frozen_graph_def, &inputs, &outputs));
*frozen_meta_graph = bundle.meta_graph_def;
GraphDef* gdef = frozen_meta_graph->mutable_graph_def();
*gdef = frozen_graph_def;
VLOG(2) << "Graph frozen";
return OkStatus();
}
std::vector<std::string> GetNodeNames(
const google::protobuf::Map<std::string, tensorflow::TensorInfo>& signature) {
std::vector<std::string> names;
for (auto const& item : signature) {
absl::string_view name = item.second.name();
size_t last_colon = name.find_last_of(':');
if (last_colon != absl::string_view::npos) {
name.remove_suffix(name.size() - last_colon);
}
names.push_back(std::string(name));
}
return names;
}
StatusOr<GraphDef> ConvertAndBuild(
SavedModelBundle* bundle, const std::string& signature_key,
const std::vector<std::vector<tensorflow::Tensor>>& inputs,
const TfTrtConversionParams& conversion_params) {
GraphDef inlined_graph_def;
TF_RETURN_IF_ERROR(
InlineFunctions(bundle->meta_graph_def, &inlined_graph_def));
*bundle->meta_graph_def.mutable_graph_def() = inlined_graph_def;
MetaGraphDef frozen_meta_graph;
TF_RETURN_IF_ERROR(FreezeGraph(*bundle, &frozen_meta_graph));
auto signature_map = bundle->GetSignatures();
const tensorflow::SignatureDef& signature = signature_map[signature_key];
std::vector<std::string> input_names = GetNodeNames(signature.inputs());
std::vector<std::string> output_names = GetNodeNames(signature.outputs());
return ConvertAndBuild(frozen_meta_graph.graph_def(), input_names,
output_names, inputs, conversion_params);
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/trt_convert_api.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/state_ops.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace tensorrt {
struct TestParam {
TfTrtConversionParams conv_params;
std::vector<std::vector<int64>> input_shapes;
};
class TrtConverterTest
: public ::testing::TestWithParam<std::tuple<TestParam, bool, bool>> {
protected:
TrtConverterTest() {
param_ = std::get<0>(GetParam());
use_variable_ = std::get<1>(GetParam());
use_function_ = std::get<2>(GetParam());
input_tensors_ = GetInputTensors();
}
GraphDef GetGraphDef(PartialTensorShape input_shape) {
Scope root = Scope::NewRootScope();
Output c;
c = ops::Const(root.WithOpName("my_const"), {{42.0f, 137.0f}});
Output v;
if (use_variable_) {
Output v_handle = ops::VarHandleOp(root.WithOpName("my_var"),
DataType::DT_FLOAT, {1, 2});
v = ops::ReadVariableOp(root.WithOpName("my_var/Read/ReadVariableOp"),
v_handle, DataType::DT_FLOAT);
auto v_init =
ops::AssignVariableOp(root.WithOpName("my_var/init"), v_handle, c);
} else {
v = c;
}
const auto attrs = ops::Placeholder::Shape(input_shape);
auto x = ops::Placeholder(root.WithOpName("input"), DT_FLOAT, attrs);
auto y = ops::Mul(root.WithOpName("my_mul"), x, v);
auto z = ops::Add(root.WithOpName("my_add"), x, y);
auto q = ops::Identity(root.WithOpName("output"), z);
GraphDef out;
TF_CHECK_OK(root.ToGraphDef(&out));
return out;
}
GraphDef GetGraphWithFunction(PartialTensorShape input_shape) {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
const Tensor kOne = test::AsScalar<float>(1.0f);
TensorShapeProto value_shape_proto;
kOne.shape().AsProto(&value_shape_proto);
TensorShapeProto input_shape_proto;
input_shape.AsProto(&input_shape_proto);
NodeDef value_node;
if (use_variable_) {
value_node =
NDef("my_value", "Identity", {"my_var:0"}, {{"T", DT_RESOURCE}});
} else {
value_node =
NDef("my_value", "Identity", {"my_const:0"}, {{"T", DT_FLOAT}});
}
GraphDef gdef = GDef(
{
NDef("input", "Placeholder", {},
{{"dtype", DT_FLOAT}, {"shape", input_shape_proto}}),
NDef("my_const", "Const", {},
{{"dtype", DT_FLOAT}, {"value", kOne}}),
value_node,
NDef("call", "StatefulPartitionedCall", {"input", "my_value"},
{{"Tin", DataTypeSlice{DT_FLOAT, use_variable_ ? DT_RESOURCE
: DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FunctionDefHelper::FunctionRef("f", {})}}),
NDef("output", "Identity", {"call:0"}, {{"T", DT_FLOAT}}),
},
{});
FunctionDef fdef;
if (use_variable_) {
*gdef.add_node() =
NDef("my_var", "VarHandleOp", {},
{{"dtype", DT_FLOAT}, {"shape", value_shape_proto}});
*gdef.add_node() = NDef("my_var/init", "AssignVariableOp",
{"my_var", "my_const"}, {{"dtype", DT_FLOAT}});
*gdef.add_node() = NDef("my_var/Read/ReadVariableOp", "ReadVariableOp",
{"my_var"}, {{"dtype", DT_FLOAT}});
fdef = FunctionDefHelper::Define(
"f",
{"x: float", "v: resource"},
{"q: float"},
{},
{{{"my_var/Read/ReadVariableOp"},
"ReadVariableOp",
{"v"},
{{"dtype", DT_FLOAT}}},
{{"my_mul"},
"Mul",
{"x", "my_var/Read/ReadVariableOp"},
{{"T", DT_FLOAT}}},
{{"my_add"}, "AddV2", {"x", "my_mul"}, {{"T", DT_FLOAT}}},
{{"q"}, "Identity", {"my_add"}, {{"T", DT_FLOAT}}}});
} else {
fdef = FunctionDefHelper::Define(
"f",
{"x: float", "v: float"},
{"q: float"},
{},
{{{"my_mul"}, "Mul", {"x", "v"}, {{"T", DT_FLOAT}}},
{{"my_add"}, "AddV2", {"x", "my_mul"}, {{"T", DT_FLOAT}}},
{{"q"}, "Identity", {"my_add"}, {{"T", DT_FLOAT}}}});
}
*gdef.mutable_library()->add_function() = fdef;
return gdef;
}
MetaGraphDef GetModel() {
PartialTensorShape shape({-1, 2});
MetaGraphDef out;
if (use_function_) {
*(out.mutable_graph_def()) = GetGraphWithFunction(shape);
} else {
*(out.mutable_graph_def()) = GetGraphDef(shape);
}
VLOG(2) << out.graph_def().DebugString();
TensorShapeProto shape_proto;
shape.AsProto(&shape_proto);
SignatureDef signature_def;
(*signature_def.mutable_inputs())["input"].set_name("input:0");
(*signature_def.mutable_inputs())["input"].set_dtype(DT_FLOAT);
*(*signature_def.mutable_inputs())["input"].mutable_tensor_shape() =
shape_proto;
(*signature_def.mutable_outputs())["output"].set_name("output:0");
(*signature_def.mutable_outputs())["output"].set_dtype(DT_FLOAT);
*(*signature_def.mutable_outputs())["output"].mutable_tensor_shape() =
shape_proto;
(*out.mutable_signature_def())["serving_default"] = signature_def;
VLOG(2) << signature_def.DebugString();
return out;
}
Status GetSavedModelBundle(SavedModelBundle* bundle) {
bundle->meta_graph_def = GetModel();
Session* session = nullptr;
TF_RETURN_IF_ERROR(NewSession(tensorflow::SessionOptions(), &session));
TF_RETURN_IF_ERROR(session->Create(bundle->meta_graph_def.graph_def()));
bundle->session.reset(session);
TF_RETURN_IF_ERROR(session->Run( {}, {},
{"my_var/init"}, nullptr));
return OkStatus();
}
void CheckTrtNode(const GraphDef& converted_graph_def) {
int n_trt_ops = 0;
string op_name{"TRTEngineOp"};
for (const auto& node : converted_graph_def.node()) {
if (!op_name.compare(node.op())) {
n_trt_ops++;
const auto& attr = node.attr();
EXPECT_EQ(attr.at("static_engine").b(),
param_.conv_params.convert_to_static_engine);
if (param_.conv_params.convert_to_static_engine) {
VLOG(2) << "Found serialized segment with size "
<< attr.at("serialized_segment").s().size();
EXPECT_GT(attr.at("serialized_segment").s().size(), 0);
}
}
}
EXPECT_EQ(n_trt_ops, 1);
}
std::vector<std::vector<Tensor>> GetInputTensors() {
std::vector<std::vector<Tensor>> input_tensors;
for (const std::vector<int64>& shape : param_.input_shapes) {
Tensor tensor(DT_FLOAT, TensorShape(shape));
test::FillIota(&tensor, 1.0f);
input_tensors.push_back({tensor});
}
return input_tensors;
}
void RunAndCompareResults(Session* session,
const GraphDef& converted_graph_def) {
Session* p_session = nullptr;
TF_EXPECT_OK(NewSession(SessionOptions(), &p_session));
std::unique_ptr<tensorflow::Session> trt_session(p_session);
TF_EXPECT_OK(trt_session->Create(converted_graph_def));
for (const std::vector<Tensor>& input : input_tensors_) {
std::vector<Tensor> outputs;
TF_EXPECT_OK(
session->Run({{"input", input.at(0)}}, {"output"}, {}, &outputs));
std::cout << outputs.at(0).DebugString() << std::endl;
std::vector<Tensor> trt_outputs;
TF_EXPECT_OK(trt_session->Run({{"input", input.at(0)}}, {"output"}, {},
&trt_outputs));
std::cout << trt_outputs.at(0).DebugString() << std::endl;
ASSERT_EQ(outputs.size(), 1);
ASSERT_EQ(trt_outputs.size(), 1);
tensorflow::test::ExpectEqual(outputs[0], trt_outputs[0]);
}
}
void ConvertAndRunFrozenGraph() {
MetaGraphDef meta_graph_def = GetModel();
StatusOr<GraphDef> result = tensorrt::ConvertAndBuild(
meta_graph_def.graph_def(), {"input"}, {"output"}, input_tensors_,
param_.conv_params);
TF_ASSERT_OK(result.status());
const GraphDef& converted_graph_def = result.value();
CheckTrtNode(converted_graph_def);
Session* p_session = nullptr;
TF_EXPECT_OK(NewSession(SessionOptions(), &p_session));
std::unique_ptr<tensorflow::Session> session(p_session);
TF_EXPECT_OK(session->Create(meta_graph_def.graph_def()));
RunAndCompareResults(session.get(), converted_graph_def);
}
void ConvertAndRunSavedModel() {
SavedModelBundle bundle;
TF_CHECK_OK(GetSavedModelBundle(&bundle));
StatusOr<GraphDef> result = tensorrt::ConvertAndBuild(
&bundle, "serving_default", input_tensors_, param_.conv_params);
TF_ASSERT_OK(result.status());
const GraphDef& converted_graph_def = result.value();
CheckTrtNode(converted_graph_def);
RunAndCompareResults(bundle.GetSession(), converted_graph_def);
}
TestParam param_;
bool use_variable_;
bool use_function_;
std::vector<std::vector<Tensor>> input_tensors_;
};
INSTANTIATE_TEST_CASE_P(
TrtConverterTestInstantiation, TrtConverterTest,
::testing::Combine(
::testing::Values(
TestParam{TfTrtConversionParams{
1 << 20,
TrtPrecisionMode::FP32,
3,
1,
false,
true,
ProfileStrategy::kOptimal,
true,
true
},
{{1, 2}, {4, 2}}},
TestParam{TfTrtConversionParams{
1 << 20,
TrtPrecisionMode::FP16,
3,
1,
false,
false,
ProfileStrategy::kRange,
true,
true
},
{{1, 2}}},
TestParam{TfTrtConversionParams{
1 << 20,
TrtPrecisionMode::FP32,
3,
1,
false,
true,
ProfileStrategy::kOptimal,
true,
false
},
{{1, 2}, {4, 2}}},
TestParam{TfTrtConversionParams{
1 << 20,
TrtPrecisionMode::FP16,
3,
2,
false,
false,
ProfileStrategy::kRange,
true,
false
},
{{1, 2}, {4, 2}}}),
::testing::Values(false, true),
::testing::Values(false, true)));
TEST_P(TrtConverterTest, Basic) {
if (use_variable_) {
ConvertAndRunSavedModel();
} else {
ConvertAndRunFrozenGraph();
}
}
}
}
#endif |
1,146 | cpp | tensorflow/tensorflow | trt_testutils | tensorflow/compiler/tf2tensorrt/utils/trt_testutils.cc | tensorflow/compiler/tf2tensorrt/utils/trt_testutils_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_TESTUTILS_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_TESTUTILS_H_
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <algorithm>
#include <map>
#include <numeric>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_engine_utils.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
NodeDef MakeNodeDef(const std::string& name, const std::string& op,
const std::vector<std::string>& inputs,
const std::map<std::string, AttrValue> attrs = {});
template <typename T>
NodeDef MakeConstNodeDef(const std::string& name, const std::vector<T>& vals,
const TensorShape& shape) {
Scope s = Scope::NewRootScope();
Tensor t = test::AsTensor<T>(vals, shape);
auto const_op = ops::Const(s.WithOpName(name), t);
return const_op.node()->def();
}
template <typename T>
NodeDef MakeConstNodeDef(const std::string& name, const std::vector<T>& vals) {
TensorShape shape;
const std::vector<int32> shape_dims = {static_cast<int32>(vals.size())};
TF_EXPECT_OK(TensorShapeUtils::MakeShape(shape_dims, &shape));
return MakeConstNodeDef(name, vals, shape);
}
nvinfer1::Dims CreateDims(const std::vector<int>& d);
::testing::Matcher<std::vector<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error = 1e-5,
bool nan_sensitive = false);
MATCHER_P(DimsAreArrayHelper, array_value,
absl::StrFormat("%s [%s]", negation ? "are" : "are not",
::testing::PrintToString(array_value))) {
if (arg.nbDims != array_value.size()) return false;
for (int i = 0; i < arg.nbDims; ++i) {
if (arg.d[i] != array_value[i]) {
return false;
}
}
return true;
}
using DimsAreArray = DimsAreArrayHelperMatcherP<std::vector<int>>;
MATCHER_P(LayerNamesAreArrayHelper, array_value,
absl::StrFormat("layer names %s [%s]", negation ? "are" : "are not",
::testing::PrintToString(array_value))) {
if (array_value.size() != arg->getNbLayers()) return false;
for (int i = 0; i < arg->getNbLayers(); ++i) {
if (arg->getLayer(i)->getName() == nullptr) {
return false;
}
}
return true;
}
using LayerNamesAreArray =
LayerNamesAreArrayHelperMatcherP<std::vector<std::string>>;
MATCHER(LayerNamesNonEmpty, "") {
for (int i = 0; i < arg->getNbLayers(); ++i) {
if (arg->getLayer(i)->getName() == nullptr) {
return false;
}
}
return true;
}
MATCHER_P2(ShapedWeightsHasDimsAndValuesHelper, dims_vec, expected_values, "") {
DimsAdapter dims(dims_vec);
if (arg.Shape() != dims) {
return false;
}
if (arg.count() != expected_values.size()) {
return false;
}
using T = typename decltype(expected_values)::value_type;
const T* actual_values = arg.template GetPointer<T>();
for (int i = 0; i < expected_values.size(); ++i) {
if (expected_values[i] != actual_values[i]) {
return false;
}
}
return true;
}
template <typename T>
using ShapedWeightsHasDimsAndValues =
ShapedWeightsHasDimsAndValuesHelperMatcherP2<std::vector<int>,
std::vector<T>>;
template <typename InCType, typename OutCType>
std::vector<OutCType> CastVector(
const gtl::ArraySlice<InCType>& vals) {
std::vector<OutCType> res(vals.size());
std::transform(vals.begin(), vals.end(), res.begin(),
[](const InCType in_val) -> OutCType {
return static_cast<OutCType>(in_val);
});
return res;
}
template <typename CType>
std::vector<CType> CreateVectorIota(int size, CType start_value = CType(0)) {
std::vector<CType> res(size);
std::iota(res.begin(), res.end(), start_value);
return res;
}
}
}
}
#endif
#endif
#include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <map>
#include <string>
#include <vector>
#include <gmock/gmock.h>
namespace tensorflow {
namespace tensorrt {
namespace convert {
::testing::Matcher<std::vector<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error, bool nan_sensitive) {
std::vector<::testing::Matcher<float>> matchers;
matchers.reserve(values.size());
for (const float& v : values) {
if (nan_sensitive) {
matchers.emplace_back(::testing::NanSensitiveFloatNear(v, max_abs_error));
} else if (max_abs_error == 0) {
matchers.emplace_back(::testing::FloatEq(v));
} else {
EXPECT_GE(max_abs_error, 0);
matchers.emplace_back(::testing::FloatNear(v, max_abs_error));
}
}
return ::testing::ElementsAreArray(matchers);
}
nvinfer1::Dims CreateDims(const std::vector<int>& d) {
nvinfer1::Dims dims;
dims.nbDims = d.size();
for (int i = 0; i < d.size(); ++i) {
dims.d[i] = d[i];
}
return dims;
}
NodeDef MakeNodeDef(const std::string& name, const std::string& op,
const std::vector<std::string>& inputs,
const std::map<std::string, AttrValue> attrs) {
NodeDef node_def;
node_def.set_name(name);
node_def.set_op(op);
for (const auto& input : inputs) {
node_def.add_input(input);
}
for (const auto& attr : attrs) {
(*node_def.mutable_attr())[attr.first] = attr.second;
}
return node_def;
}
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
using ::testing::AllOf;
using ::testing::AnyOf;
using ::testing::Eq;
using ::testing::Not;
TEST(TrtDimsMatcher, ParameterizedMatchers) {
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), DimsAreArray({1, 2, 3, 4}));
EXPECT_THAT(nvinfer1::Dims{}, Not(DimsAreArray({1, 2})));
std::vector<int> empty_dims;
EXPECT_THAT(nvinfer1::Dims{}, DimsAreArray(empty_dims));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Not(DimsAreArray({1, 2, 3, 5})));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Not(DimsAreArray({1, 2, 5})));
}
TEST(TrtDimsMatcher, EqualityMatcher) {
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Eq(nvinfer1::Dims4(1, 2, 3, 4)));
EXPECT_THAT(nvinfer1::Dims{}, Eq(nvinfer1::Dims()));
EXPECT_THAT(nvinfer1::Dims{}, Not(Eq(nvinfer1::DimsHW())));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4),
Not(Eq(nvinfer1::Dims4(1, 2, 3, 3))));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Not(Eq(nvinfer1::Dims2(1, 2))));
}
TEST(INetworkDefinitionMatchers, CorrectlyMatch) {
Logger& logger = *Logger::GetLogger();
TrtUniquePtrType<nvinfer1::IBuilder> builder(
nvinfer1::createInferBuilder(logger));
TrtUniquePtrType<nvinfer1::INetworkDefinition> network(
builder->createNetworkV2(0L));
EXPECT_THAT(network.get(), AllOf(Not(LayerNamesAreArray({"some layer"})),
LayerNamesNonEmpty()));
nvinfer1::Weights weights;
weights.type = nvinfer1::DataType::kFLOAT;
std::array<float, 1> vals;
weights.values = vals.data();
weights.count = 1;
auto input = network->addInput("input-tensor", nvinfer1::DataType::kFLOAT,
nvinfer1::Dims3{1, 1, 1});
ASSERT_NE(input, nullptr);
const char* fc_layer_name = "my-fc-layer";
auto layer = network->addFullyConnected(*input, 1, weights, weights);
ASSERT_NE(layer, nullptr);
layer->setName(fc_layer_name);
EXPECT_THAT(network.get(),
AllOf(LayerNamesNonEmpty(), LayerNamesAreArray({fc_layer_name})));
layer = network->addFullyConnected(*input, 1, weights, weights);
EXPECT_THAT(network.get(), AllOf(LayerNamesNonEmpty(),
Not(LayerNamesAreArray({fc_layer_name}))));
}
}
}
}
#endif |
1,147 | cpp | tensorflow/tensorflow | trt_allocator | tensorflow/compiler/tf2tensorrt/utils/trt_allocator.cc | tensorflow/compiler/tf2tensorrt/utils/trt_allocator_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_
#include <unordered_map>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/platform/mutex.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
#endif
namespace tensorflow {
namespace tensorrt {
void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space);
}
}
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
class TRTBaseAllocator : public nvinfer1::IGpuAllocator {
public:
virtual ~TRTBaseAllocator() = default;
};
class TRTDeviceAllocator : public TRTBaseAllocator {
public:
TRTDeviceAllocator(Allocator* allocator);
virtual ~TRTDeviceAllocator() {
VLOG(1) << "Destroying allocator attached to " << allocator_->Name();
}
void* allocate(uint64_t size, uint64_t alignment,
uint32_t flags) noexcept override;
void free(void* memory) noexcept override;
private:
mutex mu_;
Allocator* allocator_;
std::unordered_map<void*, void*> mem_map_ TF_GUARDED_BY(mu_);
};
}
}
#endif
#endif
#include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#endif
namespace tensorflow {
namespace tensorrt {
void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space) {
QCHECK_GT(alignment, 0ul) << "alignment must be greater than 0.";
QCHECK_EQ(0, alignment & (alignment - 1)) << "Alignment must be power of 2.";
QCHECK_GT(size, 0ul) << "size must be greater than 0.";
QCHECK(ptr) << "ptr must not be nullptr.";
QCHECK_GT(space, 0ul) << "space must be greater than 0.";
const uintptr_t ptr_val = reinterpret_cast<uintptr_t>(ptr);
QCHECK_GE(ptr_val + space, ptr_val) << "Provided space overflows.";
if (size > space) return nullptr;
const uintptr_t aligned_ptr_val = ((ptr_val + alignment - 1) & -alignment);
if (aligned_ptr_val > ptr_val + space - size) return nullptr;
ptr = reinterpret_cast<void*>(aligned_ptr_val);
const uintptr_t diff = aligned_ptr_val - ptr_val;
space -= diff;
return ptr;
}
}
}
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
void* TRTDeviceAllocator::allocate(uint64_t size, uint64_t alignment,
uint32_t flags) noexcept {
if (size == 0) return nullptr;
alignment = 512;
assert((alignment & (alignment - 1)) == 0);
uint64_t total_size = size + alignment;
AllocationAttributes attributes;
attributes.retry_on_failure = false;
void* mem = allocator_->AllocateRaw(alignment, total_size, attributes);
if (!mem) return nullptr;
void* alloc_mem = mem;
QCHECK(Align(alignment, size, mem, total_size));
mutex_lock lock(mu_);
if (mem != alloc_mem) {
QCHECK(mem_map_.insert({mem, alloc_mem}).second);
}
VLOG(2) << "Allocated " << total_size << " bytes memory @" << alloc_mem
<< "; aligned to " << size << " bytes @" << mem << " with alignment "
<< alignment;
return mem;
}
TRTDeviceAllocator::TRTDeviceAllocator(Allocator* allocator)
: allocator_(allocator) {
VLOG(1) << "Using " << allocator->Name() << " allocator from TensorFlow";
}
void TRTDeviceAllocator::free(void* memory) noexcept {
mutex_lock lock(mu_);
VLOG(2) << "Deallocating @ " << memory;
if (memory) {
auto alloc_mem = mem_map_.find(memory);
if (alloc_mem != mem_map_.end()) {
memory = alloc_mem->second;
mem_map_.erase(alloc_mem->first);
}
allocator_->DeallocateRaw(memory);
}
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tensorrt {
bool RunTest(const uint64_t alignment, const uint64_t size,
const intptr_t orig_ptr_val, const uint64_t orig_space) {
void* const orig_ptr = reinterpret_cast<void*>(orig_ptr_val);
void* ptr = orig_ptr;
uint64_t space = orig_space;
void* result = Align(alignment, size, ptr, space);
if (result == nullptr) {
EXPECT_EQ(orig_ptr, ptr);
EXPECT_EQ(orig_space, space);
return false;
} else {
EXPECT_EQ(result, ptr);
const intptr_t ptr_val = reinterpret_cast<intptr_t>(ptr);
EXPECT_EQ(0, ptr_val % alignment);
EXPECT_GE(ptr_val, orig_ptr_val);
EXPECT_GE(space, size);
EXPECT_LE(space, orig_space);
EXPECT_EQ(ptr_val + space, orig_ptr_val + orig_space);
return true;
}
}
TEST(TRTAllocatorTest, Align) {
for (const uint64_t space :
{1ul, 2ul, 3ul, 4ul, 7ul, 8ul, 9ul, 10ul, 16ul, 32ul, 511ul, 512ul,
513ul, 700ul, 12345ul, 1ul << 32}) {
for (uint64_t alignment = 1; alignment <= space * 4; alignment *= 2) {
for (const uintptr_t ptr_val :
{static_cast<uint64_t>(1),
alignment == 1 ? static_cast<uint64_t>(1) : alignment - 1,
alignment, alignment + 1, alignment + (alignment / 2)}) {
if (ptr_val % alignment == 0) {
for (const uint64_t size :
{static_cast<uint64_t>(1),
space == 1 ? static_cast<uint64_t>(1) : space - 1, space,
space + 1}) {
EXPECT_EQ(space >= size, RunTest(alignment, size, ptr_val, space));
}
} else {
EXPECT_FALSE(RunTest(alignment, space, ptr_val, space));
const uint64_t diff = alignment - ptr_val % alignment;
if (space > diff) {
EXPECT_TRUE(
RunTest(alignment, space - diff, ptr_val + diff, space - diff));
for (const uint64_t size :
{static_cast<uint64_t>(1),
space - diff > 1 ? space - diff - 1
: static_cast<uint64_t>(1),
space - diff, space - diff + 1, space - 1}) {
EXPECT_EQ(space - diff >= size,
RunTest(alignment, size, ptr_val, space));
}
} else {
EXPECT_FALSE(RunTest(alignment, 1, ptr_val, space));
}
}
}
}
}
}
}
} |
1,148 | cpp | tensorflow/tensorflow | trt_lru_cache | tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.cc | tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_LRU_CACHE_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_LRU_CACHE_H_
#include <list>
#include <thread>
#include <unordered_map>
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_engine_utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_int8_calibrator.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/lib/core/errors.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
#endif
namespace tensorflow {
namespace tensorrt {
template <class Key, class Value, class HashFunction>
class LRUCache {
public:
typedef Value value_type;
typedef Key key_type;
typedef HashFunction hasher;
typedef typename std::unordered_map<key_type, value_type, hasher> map_type;
typedef typename map_type::iterator iterator;
typedef typename map_type::const_iterator const_iterator;
LRUCache() : capacity_(0) {}
explicit LRUCache(size_t capacity) : capacity_(capacity) {}
size_t capacity() const { return capacity_; }
void reserve(size_t capacity) {
capacity_ = capacity;
DiscardOld();
}
size_t size() const { return objects_.size(); }
size_t count(const key_type& key) const { return objects_.count(key); }
value_type& at(const key_type& key) { return Touch(key); }
const_iterator begin() const { return objects_.begin(); }
const_iterator end() const { return objects_.end(); }
iterator begin() { return objects_.begin(); }
iterator end() { return objects_.end(); }
template <typename... Args>
std::pair<iterator, bool> emplace(Args&&... args) {
DiscardOld(1);
std::pair<iterator, bool> result =
objects_.emplace(std::forward<Args>(args)...);
key_type key = result.first->first;
if (result.second) {
keys_.push_front(key);
} else {
TouchNoCheck(key);
}
return result;
}
private:
std::unordered_map<key_type, value_type, hasher> objects_;
std::list<key_type> keys_;
size_t capacity_;
value_type not_found_value_;
value_type& Touch(const key_type& key) {
value_type& value = objects_.at(key);
TouchNoCheck(key);
return value;
}
void TouchNoCheck(const key_type& key) {
auto rank = std::find(keys_.begin(), keys_.end(), key);
if (rank != keys_.begin()) {
keys_.erase(rank);
keys_.push_front(key);
}
}
void DiscardOld(size_t n = 0) {
DCHECK(capacity_ >= n) << "Insufficient capacity in cache (capacity = "
<< capacity_ << ", requested " << n << ")";
while (objects_.size() > (capacity_ - n)) {
key_type discard_key = keys_.back();
keys_.pop_back();
objects_.erase(discard_key);
}
}
};
#if GOOGLE_CUDA && GOOGLE_TENSORRT
struct EngineContext {
EngineContext() {}
EngineContext(TrtUniquePtrType<nvinfer1::ICudaEngine>&& cuda_engine,
ExecutionContext&& execution_context)
: cuda_engine_(std::move(cuda_engine)) {
execution_contexts.push_back(std::move(execution_context));
device_memory_size_ =
cuda_engine_ ? cuda_engine_->getDeviceMemorySize() : 0;
}
EngineContext(TrtUniquePtrType<nvinfer1::ICudaEngine>&& cuda_engine,
std::vector<ExecutionContext>&& execution_contexts)
: cuda_engine_(std::move(cuda_engine)),
execution_contexts(std::move(execution_contexts)) {
device_memory_size_ =
cuda_engine_ ? cuda_engine_->getDeviceMemorySize() : 0;
}
mutex mu;
nvinfer1::ICudaEngine* GetCudaEngine() { return cuda_engine_.get(); }
Status GetExecutionContext(int idx, nvinfer1::IExecutionContext** exec_ctx,
bool* has_device_memory)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu) {
if (idx >= execution_contexts.size()) {
return errors::Internal("Requested engine context with index ", idx,
", but only ", execution_contexts.size(),
"contexts are present.");
}
*exec_ctx = execution_contexts[idx].get();
*has_device_memory = execution_contexts[idx].HasDeviceMemory();
return OkStatus();
}
int GetNumContexts() {
mutex_lock lock(mu);
return execution_contexts.size();
}
size_t GetDeviceMemorySize() { return device_memory_size_; }
private:
TrtUniquePtrType<nvinfer1::ICudaEngine> cuda_engine_;
public:
std::vector<ExecutionContext> execution_contexts TF_GUARDED_BY(mu);
private:
size_t device_memory_size_;
};
class CalibrationContext {
public:
string TerminateCalibration();
std::unordered_map<string, std::pair<void*, size_t>> device_buffers_;
std::vector<Tensor> device_tensors_;
std::unique_ptr<TRTInt8Calibrator> calibrator_;
TrtUniquePtrType<nvinfer1::IBuilder> builder_;
TrtUniquePtrType<nvinfer1::ICudaEngine> engine_;
std::unique_ptr<std::thread> thr_;
private:
mutex mu_;
bool terminated_ TF_GUARDED_BY(mu_) = false;
std::string calibration_table_ TF_GUARDED_BY(mu_);
};
ABSL_CONST_INIT extern const absl::string_view kTfTrtContainerName;
class TRTEngineCacheResource : public ResourceBase {
public:
static Logger& GetLogger();
TRTEngineCacheResource(OpKernelContext* ctx, size_t capacity);
~TRTEngineCacheResource() override;
string DebugString() const override;
EngineContext* GetEngineContext(const std::vector<TensorShape>& input_shapes);
EngineContext* GetEngineContext(const int profile_id);
std::unique_ptr<TRTBaseAllocator> allocator_;
LRUCache<std::vector<TensorShape>, std::unique_ptr<EngineContext>,
VectorTensorShapeHasher>
cache_;
std::unique_ptr<CalibrationContext> calib_ctx_;
TrtShapeOptimizationProfile profiles_;
};
#endif
}
}
#endif
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include <sstream>
#include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/mutex.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
string CalibrationContext::TerminateCalibration() {
mutex_lock l(mu_);
if (terminated_) return calibration_table_;
TRTInt8Calibrator* raw_calibrator = calibrator_.get();
raw_calibrator->waitAndSetDone();
terminated_ = true;
thr_->join();
calibration_table_ = raw_calibrator->getCalibrationTableAsString();
return calibration_table_;
}
const absl::string_view kTfTrtContainerName = "TF-TRT";
Logger& TRTEngineCacheResource::GetLogger() {
static Logger* logger = new Logger();
return *logger;
}
TRTEngineCacheResource::TRTEngineCacheResource(OpKernelContext* ctx,
size_t capacity)
: cache_(capacity) {
auto device = ctx->device();
auto alloc = device->GetAllocator(AllocatorAttributes());
if (!alloc) {
LOG(ERROR) << "Can't find device allocator for gpu device "
<< device->name();
allocator_ = nullptr;
} else {
allocator_.reset(new TRTDeviceAllocator(alloc));
}
}
TRTEngineCacheResource::~TRTEngineCacheResource() {
VLOG(1) << "Destroying TRTEngineCacheResource...";
}
string TRTEngineCacheResource::DebugString() const {
std::stringstream oss;
using std::dec;
using std::endl;
using std::hex;
oss << "TRTEngineCacheResource: ";
oss << "TRTBaseAllocator = " << hex << allocator_.get() << dec << ", ";
oss << "LRUCache = " << hex << &cache_ << dec << endl;
oss << "Containing " << cache_.size() << " entries: " << endl;
for (const auto& item : cache_) {
mutex_lock lock(item.second->mu);
oss << TensorShapeUtils::ShapeListString(item.first) << ": " << hex
<< "ICudaEngine: " << item.second->GetCudaEngine() << ", "
<< "IExecutionContext: ";
absl::c_for_each(
item.second->execution_contexts,
[&](const ExecutionContext& ctx) { oss << ctx.get() << ","; });
oss << dec << endl;
}
return oss.str();
}
EngineContext* TRTEngineCacheResource::GetEngineContext(
const std::vector<TensorShape>& input_shapes) {
EngineContext* engine_context = nullptr;
int64 min_matched_batch_size = kint64max;
for (const auto& pair : cache_) {
const std::vector<TensorShape>& cached_input_shapes = pair.first;
if (input_shapes.size() != cached_input_shapes.size()) {
LOG(ERROR) << "Input shape list size mismatch"
<< ", cached size: " << cached_input_shapes.size()
<< " vs. input size: " << input_shapes.size();
}
if (AreShapesCompatible(input_shapes, cached_input_shapes)) {
const int cached_batch_size = cached_input_shapes[0].dim_size(0);
if (min_matched_batch_size > cached_batch_size) {
min_matched_batch_size = cached_batch_size;
engine_context = pair.second.get();
}
}
}
return engine_context;
}
EngineContext* TRTEngineCacheResource::GetEngineContext(const int profile_id) {
if (profiles_.NeedProfiles() && profile_id >= profiles_.GetNumProfiles()) {
LOG(ERROR) << "Out of range: profile_id " << profile_id
<< " is larger than number of profiles "
<< profiles_.GetNumProfiles();
return nullptr;
}
if (cache_.size() > 1) {
LOG(ERROR) << "Cache is expected to have at most "
<< "1 engine in explicit batch mode where profiles are used.";
return nullptr;
}
if (cache_.size() == 0) {
return nullptr;
}
return cache_.begin()->second.get();
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tensorrt {
TEST(LRUCacheTest, Basic) {
LRUCache<int, int, std::hash<int>> cache;
cache.reserve(2);
cache.emplace(10, 100);
EXPECT_EQ(cache.size(), 1);
EXPECT_EQ(cache.count(10), 1);
EXPECT_EQ(cache.at(10), 100);
EXPECT_EQ(cache.count(100), 0);
cache.emplace(20, 200);
EXPECT_EQ(cache.size(), 2);
EXPECT_EQ(cache.count(10), 1);
EXPECT_EQ(cache.count(20), 1);
EXPECT_EQ(cache.at(10), 100);
EXPECT_EQ(cache.at(20), 200);
EXPECT_EQ(cache.count(100), 0);
EXPECT_EQ(cache.count(200), 0);
cache.emplace(30, 300);
EXPECT_EQ(cache.count(10), 0);
EXPECT_EQ(cache.count(20), 1);
EXPECT_EQ(cache.count(30), 1);
cache.at(20);
cache.emplace(40, 400);
EXPECT_EQ(cache.count(10), 0);
EXPECT_EQ(cache.count(20), 1);
EXPECT_EQ(cache.count(30), 0);
EXPECT_EQ(cache.count(40), 1);
}
}
} |
1,149 | cpp | tensorflow/tensorflow | trt_shape_optimization_profiles | tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc | tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_SHAPE_OPTIMIZATION_PROFILES_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_SHAPE_OPTIMIZATION_PROFILES_H_
#include <list>
#include <string>
#include <unordered_set>
#include <vector>
#include "tensorflow/compiler/tf2tensorrt/common/datavec.h"
#include "tensorflow/compiler/tf2tensorrt/convert/trt_parameters.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_execution_context.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
struct OptimizationProfileConfig {
std::vector<nvinfer1::Dims> min;
std::vector<nvinfer1::Dims> opt;
std::vector<nvinfer1::Dims> max;
string DebugString() const {
using absl::StrCat;
return StrCat("[min: ", tensorflow::tensorrt::DebugString(min),
", opt: : ", tensorflow::tensorrt::DebugString(opt),
", max: ", tensorflow::tensorrt::DebugString(max), "]");
}
Status SetDimensions(const nvinfer1::INetworkDefinition* network,
nvinfer1::IOptimizationProfile* profile,
const std::vector<bool>& input_mask) const {
int n_inputs_trt = network->getNbInputs();
int n_inputs_tf = opt.size() / 2;
if (input_mask.size() != n_inputs_tf) {
return errors::Internal("Incorrect input mask size: ", input_mask.size());
}
int n_mask_true = 0;
for (bool mask_val : input_mask) {
if (mask_val) {
n_mask_true++;
}
}
if (n_mask_true != n_inputs_trt) {
return errors::Internal(
"Number of true elements in input_mask (", n_mask_true,
") doesn't match expected TRT inputs (", n_inputs_trt, ")");
}
int j = 0;
for (int i = 0; i < n_inputs_tf; i++) {
if (input_mask[i]) {
const ITensorProxyPtr input = network->getInput(j);
const char* name = input->getName();
if (input->isShapeTensor()) {
int idx = i + n_inputs_tf;
VLOG(2) << "Setting shape values for " << name << ", "
<< ::tensorflow::tensorrt::DebugString(opt[idx]);
profile->setShapeValues(name, nvinfer1::OptProfileSelector::kMIN,
min[idx].d, min[idx].nbDims);
profile->setShapeValues(name, nvinfer1::OptProfileSelector::kOPT,
opt[idx].d, opt[idx].nbDims);
profile->setShapeValues(name, nvinfer1::OptProfileSelector::kMAX,
max[idx].d, max[idx].nbDims);
}
VLOG(2) << "Setting input dimensions for " << name << ", "
<< ::tensorflow::tensorrt::DebugString(opt[i]);
profile->setDimensions(name, nvinfer1::OptProfileSelector::kMIN,
min[i]);
profile->setDimensions(name, nvinfer1::OptProfileSelector::kOPT,
opt[i]);
profile->setDimensions(name, nvinfer1::OptProfileSelector::kMAX,
max[i]);
j++;
}
}
return OkStatus();
}
bool IncludesShapes(const std::vector<TensorShape>& shapes,
bool has_shape_tensor,
const std::vector<nvinfer1::Dims>& shape_values,
const std::vector<bool>& is_pruned_input,
const std::vector<bool>& is_shape_tensor) const {
if (min.size() != shapes.size() * 2 ||
(has_shape_tensor && min.size() != shape_values.size() * 2)) {
VLOG(2) << "Profile size mismatch min size " << min.size()
<< " vs input shapes size " << shapes.size() << " "
<< shape_values.size();
return false;
}
for (int i = 0; i < shapes.size(); i++) {
if (is_pruned_input[i]) {
continue;
}
auto current_shape = shapes[i];
if (min[i].nbDims != current_shape.dims()) {
return false;
}
for (int dim = 0; dim < current_shape.dims(); dim++) {
if ((min[i].d[dim] > current_shape.dim_size(dim)) ||
(max[i].d[dim] < current_shape.dim_size(dim))) {
return false;
}
}
}
if (has_shape_tensor) {
int offset = shapes.size();
for (int i = 0; i < shape_values.size(); i++) {
if (is_pruned_input[i] || !is_shape_tensor[i]) {
continue;
}
auto shape_val = shape_values[i];
if (min[i + offset].nbDims != shape_val.nbDims) {
return false;
}
for (int dim = 0; dim < shape_val.nbDims; dim++) {
if (min[i + offset].d[dim] > shape_val.d[dim] ||
max[i + offset].d[dim] < shape_val.d[dim]) {
return false;
}
}
}
}
return true;
}
};
class TrtShapeOptimizationProfile {
public:
TrtShapeOptimizationProfile() {}
void AddShape(const std::vector<TensorShape>& shapes) {
input_shapes_.push_back(shapes);
input_shape_values_.push_back(actual_shape_values_);
VLOG(1) << "Collected shape(s) " << DebugString(shapes) << " for profiles.";
}
void SetInputMask(const std::vector<bool>& input_mask) {
input_mask_ = input_mask;
}
Status CollectShapeValues(OpKernelContext* ctx);
Status CollectShapeValues(const DataVec& input);
void clear() { profiles_.clear(); }
int GetProfileNumber(const std::vector<TensorShape>& shapes);
Status ConfigureBuilder(nvinfer1::IBuilder* builder,
nvinfer1::IBuilderConfig* config,
const nvinfer1::INetworkDefinition* network);
Status CreateExecutionContexts(nvinfer1::ICudaEngine* engine,
std::vector<ExecutionContext>* exec_contexts);
Status SetInputShapeBinding(int input_index, int binding_index,
nvinfer1::ICudaEngine* cuda_engine,
nvinfer1::IExecutionContext* exec_context) const;
void InitProfiles(const std::vector<PartialTensorShape>& input_partial_shapes,
ProfileStrategy strategy);
void InitCalibProfile(const std::vector<TensorShape>& shapes);
int GetNumProfiles() const;
bool HasShape() const { return !input_shapes_.empty(); }
bool NeedProfiles() const { return need_profiles_; }
Status RestoreProfiles(const nvinfer1::ICudaEngine* engine,
int n_network_inputs);
bool HasShapeTensor() const { return has_shape_tensor_; }
void SetShapeTensorMask(const nvinfer1::INetworkDefinition* network);
bool IsStaticCompatible() {
return strategy_ == ProfileStrategy::kOptimal && profiles_.size() == 1
#if !IS_TRT_VERSION_GE(8, 0, 0, 0)
&& !HasShapeTensor()
#endif
;
}
private:
std::vector<std::vector<TensorShape>> input_shapes_;
std::vector<std::vector<nvinfer1::Dims>> input_shape_values_;
std::vector<nvinfer1::Dims> actual_shape_values_;
std::vector<OptimizationProfileConfig> profiles_;
OptimizationProfileConfig calib_profiles_;
std::vector<bool> input_mask_;
bool has_shape_tensor_ = true;
bool need_profiles_ = false;
std::vector<bool> is_shape_tensor_;
std::vector<bool> is_pruned_input_;
ProfileStrategy strategy_;
Status AddProfiles(nvinfer1::IBuilder* builder,
nvinfer1::IBuilderConfig* config,
const nvinfer1::INetworkDefinition* network);
void SetShapeTensorMask(const nvinfer1::ICudaEngine* engine, int n_inputs);
void SetShapeTensorMask(
const std::vector<PartialTensorShape>& input_partial_shapes);
Status SetPrunedMask(const nvinfer1::ICudaEngine* engine,
int n_network_inputs);
void ImplicitBatchModeCompatibleStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes);
void OptimalStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes);
Status RangeStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes);
};
}
}
#endif
#endif
#include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
#include <algorithm>
#include <functional>
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
namespace tensorflow {
namespace tensorrt {
template <typename TensorShapeType>
std::vector<nvinfer1::Dims> GetDimVec(std::vector<TensorShapeType> shape_vec) {
std::vector<nvinfer1::Dims> dimvec(shape_vec.size());
absl::c_transform(shape_vec, dimvec.begin(), [](TensorShapeType shape) {
auto adap = DimsAdapter::Create(shape);
TF_CHECK_OK(adap.status());
return adap->AsTrtDims();
});
return dimvec;
}
void EnforceCompatibility(nvinfer1::Dims* prof_dims,
const PartialTensorShape& input_shape) {
for (int i = 0; i < input_shape.dims(); i++) {
if (input_shape.dim_size(i) != -1) {
prof_dims->d[i] = input_shape.dim_size(i);
}
}
}
void SetImplicitBatchModeCompatibleProfile(
const std::vector<nvinfer1::Dims>& dimvec, std::vector<nvinfer1::Dims>* min,
std::vector<nvinfer1::Dims>* opt, std::vector<nvinfer1::Dims>* max) {
*min = dimvec;
for (auto& dim : *min) {
if (dim.d[0] != -1) dim.d[0] = 1;
}
*opt = dimvec;
*max = dimvec;
}
void TrtShapeOptimizationProfile::ImplicitBatchModeCompatibleStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes) {
for (auto& shape_vec : collected_shapes) {
std::vector<nvinfer1::Dims> min, opt, max;
SetImplicitBatchModeCompatibleProfile(shape_vec, &min, &opt, &max);
VLOG(2) << "Initializing optimization profile config with min="
<< DebugString(min) << ", opt=max=" << DebugString(max);
OptimizationProfileConfig profConfig{min, opt, max};
profiles_.push_back(std::move(profConfig));
}
}
template <typename BinaryOperation>
Status ShapeProfileBinaryOp(std::vector<nvinfer1::Dims>* x,
const std::vector<nvinfer1::Dims>& y,
BinaryOperation op) {
if (x->size() != y.size())
return errors::InvalidArgument(
"Number of input tensors differ during profile creation");
for (int i = 0; i < x->size(); i++) {
if (x->at(i).nbDims != y[i].nbDims)
return errors::InvalidArgument(
"Number of input dimensions differ during profile creation at dim ",
i, ", values ", x->at(i).nbDims, y[i].nbDims);
for (int j = 0; j < x->at(i).nbDims; j++) {
x->at(i).d[j] = op(x->at(i).d[j], y[i].d[j]);
}
}
return OkStatus();
}
Status TrtShapeOptimizationProfile::RangeStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes) {
if (collected_shapes.empty()) return OkStatus();
std::vector<nvinfer1::Dims> min = collected_shapes[0];
std::vector<nvinfer1::Dims> max = min;
for (int i = 1; i < collected_shapes.size(); i++) {
TF_RETURN_IF_ERROR(
ShapeProfileBinaryOp(&min, collected_shapes[i],
[](int a, int b) { return std::min(a, b); }));
TF_RETURN_IF_ERROR(
ShapeProfileBinaryOp(&max, collected_shapes[i],
[](int a, int b) { return std::max(a, b); }));
}
VLOG(2) << "Initializing optimization profile config with min="
<< DebugString(min) << ", opt=max=" << DebugString(max);
OptimizationProfileConfig profConfig{min, max, max};
profiles_.push_back(std::move(profConfig));
return OkStatus();
}
void TrtShapeOptimizationProfile::OptimalStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes) {
for (auto& shape_vec : collected_shapes) {
std::vector<nvinfer1::Dims> min = shape_vec;
std::vector<nvinfer1::Dims> opt = min;
std::vector<nvinfer1::Dims> max = min;
VLOG(2) << "Initializing optimization profile config with min=opt=max="
<< DebugString(min);
OptimizationProfileConfig profConfig{min, opt, max};
profiles_.push_back(std::move(profConfig));
}
}
Status TrtShapeOptimizationProfile::CollectShapeValues(OpKernelContext* ctx) {
tensorflow::profiler::TraceMe activity(
"TrtShapeOptimizationProfile::CollectShapeValues",
tensorflow::profiler::TraceMeLevel::kInfo);
cudaStream_t stream = reinterpret_cast<cudaStream_t>(CHECK_NOTNULL(
ctx->op_device_context()->stream()->platform_specific_handle().stream));
actual_shape_values_.resize(ctx->num_inputs());
if (is_shape_tensor_.empty()) {
is_shape_tensor_.resize(ctx->num_inputs());
for (int i = 0; i < ctx->num_inputs(); i++) {
is_shape_tensor_[i] = IsTrtShapeTensorCompatible(ctx->input(i));
}
}
int n_shape_val = 0;
for (int i = 0; i < ctx->num_inputs(); i++) {
if (is_shape_tensor_[i]) {
if (ctx->input_dtype(i) != DT_INT32) {
is_shape_tensor_[i] = false;
continue;
}
if (input_shape_values_.size() > 0 &&
input_shape_values_[0][i].nbDims != ctx->input(i).NumElements()) {
is_shape_tensor_[i] = false;
continue;
}
n_shape_val++;
const Tensor& input = ctx->input(i);
actual_shape_values_[i].nbDims = input.NumElements();
auto ret = cudaMemcpyAsync(
actual_shape_values_[i].d, input.flat<int32>().data(),
input.NumElements() * sizeof(int32), cudaMemcpyDeviceToHost, stream);
if (ret != 0) {
return errors::Internal("Could not copy shape tensor values");
}
VLOG(2) << "Input " << i << " is (probably) a shape tensor, n_values="
<< input.NumElements();
} else {
actual_shape_values_[i] = {0, {}};
}
}
if (n_shape_val > 0) {
cudaStreamSynchronize(stream);
}
return OkStatus();
}
Status TrtShapeOptimizationProfile::CollectShapeValues(const DataVec& input) {
actual_shape_values_.resize(input.size());
for (int i = 0; i < input.size(); i++) {
if (is_shape_tensor_[i]) {
if (!IsTrtShapeTensorCompatible(input[i].tensor)) {
return errors::Internal("Inconsistent shape tensor ", input[i].name,
", ", i);
}
int n_elements = input[i].tensor.NumElements();
actual_shape_values_[i].nbDims = n_elements;
std::copy(input[i].tensor.flat<int32>().data(),
input[i].tensor.flat<int32>().data() + n_elements,
actual_shape_values_[i].d);
VLOG(2) << "Collected tensor shape values "
<< DebugString(actual_shape_values_[i]);
} else {
actual_shape_values_[i] = {0, {}};
}
}
return OkStatus();
}
void FixShapeValueProfile(OptimizationProfileConfig* prof,
const std::vector<bool>& is_shape_tensor) {
int shape_value_offset = is_shape_tensor.size();
for (int i = 0; i < is_shape_tensor.size(); i++) {
if (is_shape_tensor[i] &&
std::equal(prof->min[shape_value_offset + i].d,
prof->min[shape_value_offset + i].d +
prof->min[shape_value_offset + i].nbDims,
prof->max[shape_value_offset + i].d)) {
prof->max[shape_value_offset + i].d[0]++;
VLOG(2) << "Adjusted profile for shape value tensor " << i << " "
<< DebugString(prof->max[shape_value_offset + i]);
} else {
VLOG(2) << i << " is not a shape tensor." << is_shape_tensor[i];
}
}
}
bool AlreadyCollected(const std::vector<std::vector<nvinfer1::Dims>>& values,
const std::vector<nvinfer1::Dims>& rhs) {
for (auto& lhs : values) {
bool ret = lhs.size() == rhs.size();
for (int i = 0; ret && i < lhs.size(); i++) {
ret &= lhs[i].nbDims == rhs[i].nbDims;
for (int j = 0; ret && j < lhs[i].nbDims; j++) {
ret &= (lhs[i].d[j] == rhs[i].d[j]);
}
}
if (ret) return true;
}
return false;
}
void TrtShapeOptimizationProfile::InitProfiles(
const std::vector<PartialTensorShape>& input_partial_shapes,
ProfileStrategy strategy) {
strategy_ = strategy;
if (input_shapes_.size() == 0) {
VLOG(1) << "Not creating profiles without input_shapes. "
"You have to enable profile generation mode first (build).";
return;
}
std::vector<std::vector<nvinfer1::Dims>> collected_shapes;
for (int i = 0; i < input_shapes_.size(); i++) {
auto shape_vec = input_shapes_[i];
VLOG(2) << "Initprofiles, processing shape " << i;
if (!shape_vec.empty()) {
for (int k = 0; k < input_shape_values_[i].size(); k++) {
if (!is_shape_tensor_[k])
input_shape_values_[i][k] = nvinfer1::Dims{0, {}};
}
std::vector<nvinfer1::Dims> dimvec = GetDimVec(shape_vec);
dimvec.insert(dimvec.end(), input_shape_values_[i].begin(),
input_shape_values_[i].end());
if (!AlreadyCollected(collected_shapes, dimvec)) {
collected_shapes.push_back(dimvec);
}
}
}
switch (strategy_) {
case ProfileStrategy::kImplicitBatchModeCompatible:
VLOG(1) << "Creating profiles with ImplicitBatchModeCompatible strategy";
ImplicitBatchModeCompatibleStrategy(collected_shapes);
break;
case ProfileStrategy::kRange:
VLOG(1) << "Creating profiles with Range strategy";
TF_CHECK_OK(RangeStrategy(collected_shapes));
break;
case ProfileStrategy::kRangeOptimal:
VLOG(1) << "Creating profiles with RangeOptimal strategy";
OptimalStrategy(collected_shapes);
TF_CHECK_OK(RangeStrategy(collected_shapes));
break;
case ProfileStrategy::kOptimal:
VLOG(1) << "Creating profiles with Optimal strategy";
OptimalStrategy(collected_shapes);
break;
}
SetShapeTensorMask(input_partial_shapes);
if (input_partial_shapes.size() > 0) {
for (OptimizationProfileConfig& prof : profiles_) {
#if !IS_TRT_VERSION_GE(8, 0, 0, 0)
FixShapeValueProfile(&prof, is_shape_tensor_);
#endif
for (int i = 0; i < input_partial_shapes.size(); i++) {
auto network_input = input_partial_shapes[i];
EnforceCompatibility(&prof.min[i], network_input);
EnforceCompatibility(&prof.opt[i], network_input);
EnforceCompatibility(&prof.max[i], network_input);
}
}
}
}
void TrtShapeOptimizationProfile::InitCalibProfile(
const std::vector<TensorShape>& shapes) {
VLOG(1) << "Collected shape(s) " << DebugString(shapes) << " for "
<< " calibration profile.";
auto shape_vec = shapes;
if (!shape_vec.empty()) {
std::vector<nvinfer1::Dims> dimvec = GetDimVec(shape_vec);
dimvec.insert(dimvec.end(), actual_shape_values_.begin(),
actual_shape_values_.end());
VLOG(2) << "Initializing calibration optimization profile config with "
<< "min=opt=max " << DebugString(dimvec);
OptimizationProfileConfig profConfig{dimvec, dimvec, dimvec};
calib_profiles_ = std::move(profConfig);
} else {
VLOG(2) << "Failed to initialize calibration optimization profile.";
}
}
Status TrtShapeOptimizationProfile::AddProfiles(
nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config,
const nvinfer1::INetworkDefinition* network) {
if (!calib_profiles_.min.empty()) {
VLOG(2) << "Setting up calibration profiles";
auto* calibProfile = builder->createOptimizationProfile();
Status status =
calib_profiles_.SetDimensions(network, calibProfile, input_mask_);
if (!status.ok()) {
return status;
}
bool result = false;
if (calibProfile->isValid()) {
result = config->setCalibrationProfile(calibProfile);
} else {
VLOG(2) << "Calibration profile is not valid";
}
if (result) {
VLOG(2) << "Added calibration optimization profile "
<< calib_profiles_.DebugString() << " to builder config.";
} else {
VLOG(2) << "FAILED TO ADD PROFILE";
LOG(ERROR) << "Failed to add calibration optimization profile "
<< calib_profiles_.DebugString()
<< ". This usually happens when profile is invalid.";
}
}
for (int i = 0; i < profiles_.size(); i++) {
auto* optProfile = builder->createOptimizationProfile();
Status status =
profiles_[i].SetDimensions(network, optProfile, input_mask_);
if (!status.ok()) {
return status;
}
int idx = -1;
if (optProfile->isValid()) {
idx = config->addOptimizationProfile(optProfile);
}
if (idx >= 0) {
if (i != idx) {
return errors::Internal(
"Profile index of engine config is different from source profile "
"index: ",
i, " != ", idx);
}
VLOG(1) << "Added optimization profile " << profiles_[i].DebugString()
<< " with idx " << idx << " to builder config.";
} else {
LOG(ERROR) << "Failed to add optimization profile "
<< profiles_[i].DebugString()
<< ". This usually happens when profile is invalid.";
}
}
if (!profiles_.empty() && config->getNbOptimizationProfiles() == 0) {
return errors::Internal("Failure in adding an optimization profile.");
}
need_profiles_ = config->getNbOptimizationProfiles() > 0;
SetShapeTensorMask(network);
is_pruned_input_.resize(network->getNbInputs());
absl::c_fill(is_pruned_input_, false);
return OkStatus();
}
Status TrtShapeOptimizationProfile::ConfigureBuilder(
nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config,
const nvinfer1::INetworkDefinition* network) {
TF_RETURN_IF_ERROR(AddProfiles(builder, config, network));
return OkStatus();
}
void TrtShapeOptimizationProfile::SetShapeTensorMask(
const nvinfer1::ICudaEngine* engine, int n_inputs) {
is_shape_tensor_.resize(n_inputs, false);
for (int i = 0; i < n_inputs; i++) {
int binding_index;
Status status = GetTrtBindingIndex(i, 0, engine, &binding_index);
if (!status.ok()) {
continue;
}
is_shape_tensor_[i] = engine->isShapeBinding(binding_index);
if (is_shape_tensor_[i]) {
VLOG(2) << "Found | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <string.h>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/test.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
std::vector<TensorShape> DimVecToShapeVec(
std::vector<nvinfer1::Dims3> dimvec,
bool expand_with_empty_shape_values = false) {
std::vector<TensorShape> shapevec(dimvec.size());
for (int i = 0; i < dimvec.size(); i++) {
TensorShape shape;
TF_CHECK_OK(
TensorShapeUtils::MakeShape(dimvec[i].d, dimvec[i].nbDims, &shape));
shapevec[i] = shape;
}
if (expand_with_empty_shape_values) {
shapevec.resize(2 * dimvec.size());
}
return shapevec;
}
bool DimsContained(const nvinfer1::Dims& dim, const nvinfer1::Dims& min,
const nvinfer1::Dims& max) {
if (dim.nbDims != min.nbDims || dim.nbDims != max.nbDims) {
return false;
}
for (int i = 0; i < dim.nbDims; i++) {
if (dim.d[i] < min.d[i] || dim.d[i] > max.d[i]) {
return false;
}
}
return true;
}
bool DimsEqual(const nvinfer1::Dims& a, const nvinfer1::Dims& b) {
if (a.nbDims != b.nbDims) {
return false;
}
for (int i = 0; i < a.nbDims; i++) {
if (a.d[i] != b.d[i]) {
return false;
}
}
return true;
}
class TrtShapeOptimizationProfileTest
: public ::testing::TestWithParam<ProfileStrategy> {
protected:
TrtShapeOptimizationProfileTest() {
strategy_ = GetParam();
builder_ = TrtUniquePtrType<nvinfer1::IBuilder>(
nvinfer1::createInferBuilder(logger_));
network_ = TrtUniquePtrType<nvinfer1::INetworkDefinition>(
builder_->createNetworkV2(flags_));
builder_config_ = TrtUniquePtrType<nvinfer1::IBuilderConfig>(
builder_->createBuilderConfig());
builder_config_->setMaxWorkspaceSize(1 << 10);
}
void DefineNetwork(nvinfer1::INetworkDefinition* network,
nvinfer1::Dims3& dims) {
ITensorProxyPtr input1 =
network->addInput("input1", nvinfer1::DataType::kFLOAT, dims);
EXPECT_NE(nullptr, input1->trt_tensor());
ITensorProxyPtr input2 =
network->addInput("input2", nvinfer1::DataType::kFLOAT, dims);
EXPECT_NE(nullptr, input2->trt_tensor());
auto layer =
network->addElementWise(*input1->trt_tensor(), *input2->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUM);
EXPECT_NE(nullptr, layer);
ITensorProxyPtr output = layer->getOutput(0);
output->setName("output");
network->markOutput(*output->trt_tensor());
}
void CheckProfile(const std::vector<nvinfer1::Dims3>& dimvec,
TrtShapeOptimizationProfile* profile, bool has_prof,
bool test_optimality) {
std::vector<TensorShape> shape_vec = DimVecToShapeVec(dimvec);
int idx = profile->GetProfileNumber(shape_vec);
ASSERT_EQ(idx >= 0, has_prof);
if (idx < 0) return;
int prof_idx = exec_contexts_[idx]->getOptimizationProfile();
ASSERT_GE(prof_idx, 0);
for (int j = 0; j < dimvec.size(); j++) {
nvinfer1::Dims min = engine->getProfileDimensions(
j, prof_idx, nvinfer1::OptProfileSelector::kMIN);
nvinfer1::Dims max = engine->getProfileDimensions(
j, prof_idx, nvinfer1::OptProfileSelector::kMAX);
nvinfer1::Dims opt = engine->getProfileDimensions(
j, prof_idx, nvinfer1::OptProfileSelector::kOPT);
EXPECT_TRUE(DimsContained(dimvec[j], min, max));
if (test_optimality) {
EXPECT_TRUE(DimsEqual(dimvec[j], opt));
}
}
}
Logger& logger_ = *Logger::GetLogger();
TrtUniquePtrType<nvinfer1::IBuilder> builder_;
TrtUniquePtrType<nvinfer1::INetworkDefinition> network_;
TrtUniquePtrType<nvinfer1::IBuilderConfig> builder_config_;
TrtUniquePtrType<nvinfer1::ICudaEngine> engine;
std::vector<ExecutionContext> exec_contexts_;
const uint32_t flags_ =
1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
ProfileStrategy strategy_;
};
INSTANTIATE_TEST_CASE_P(
OptProfilesTestInstantiation, TrtShapeOptimizationProfileTest,
::testing::Values(ProfileStrategy::kRange, ProfileStrategy::kOptimal,
ProfileStrategy::kRangeOptimal,
ProfileStrategy::kImplicitBatchModeCompatible));
TEST_P(TrtShapeOptimizationProfileTest, Static) {
if (strategy_ != ProfileStrategy::kRange) return;
nvinfer1::Dims3 dims(8, 8, 10);
DefineNetwork(network_.get(), dims);
TrtShapeOptimizationProfile profile;
TF_CHECK_OK(profile.ConfigureBuilder(builder_.get(), builder_config_.get(),
network_.get()));
engine = TrtUniquePtrType<nvinfer1::ICudaEngine>(
builder_->buildEngineWithConfig(*network_, *builder_config_));
EXPECT_NE(nullptr, engine);
TF_CHECK_OK(profile.CreateExecutionContexts(engine.get(), &exec_contexts_));
ASSERT_EQ(exec_contexts_.size(), 1);
EXPECT_NE(nullptr, exec_contexts_[0]);
std::vector<nvinfer1::Dims3> dim_vec(2, dims);
std::vector<TensorShape> shape_vec = DimVecToShapeVec(dim_vec);
EXPECT_EQ(0, profile.GetProfileNumber(shape_vec));
}
TEST_P(TrtShapeOptimizationProfileTest, Dynamic) {
nvinfer1::Dims3 dims(-1, -1, 10);
DefineNetwork(network_.get(), dims);
TrtShapeOptimizationProfile profile;
std::vector<bool> input_mask(2, true);
profile.SetInputMask(input_mask);
std::vector<std::vector<nvinfer1::Dims3>> input_profiles{
{nvinfer1::Dims3(2, 2, 10), nvinfer1::Dims3(2, 2, 10)},
{nvinfer1::Dims3(3, 3, 10), nvinfer1::Dims3(3, 3, 10)},
{nvinfer1::Dims3(16, 16, 10), nvinfer1::Dims3(16, 16, 10)},
};
std::vector<nvinfer1::Dims3> unseen_shapes{nvinfer1::Dims3(5, 5, 10),
nvinfer1::Dims3(9, 9, 10)};
for (auto dim_vec : input_profiles) {
std::vector<TensorShape> shape_vec = DimVecToShapeVec(dim_vec, true);
profile.AddShape(shape_vec);
}
std::vector<PartialTensorShape> input_partial_shapes;
TF_CHECK_OK(GetNetworkInputShapes(network_.get(), &input_partial_shapes));
profile.InitProfiles(input_partial_shapes, strategy_);
TF_CHECK_OK(profile.ConfigureBuilder(builder_.get(), builder_config_.get(),
network_.get()));
engine = TrtUniquePtrType<nvinfer1::ICudaEngine>(
builder_->buildEngineWithConfig(*network_.get(), *builder_config_.get()));
ASSERT_NE(nullptr, engine);
TF_CHECK_OK(profile.CreateExecutionContexts(engine.get(), &exec_contexts_));
int n_profiles_exp;
switch (strategy_) {
case (ProfileStrategy::kImplicitBatchModeCompatible):
case (ProfileStrategy::kOptimal):
n_profiles_exp = input_profiles.size();
break;
case (ProfileStrategy::kRange):
n_profiles_exp = 1;
break;
case (ProfileStrategy::kRangeOptimal):
n_profiles_exp = 1 + input_profiles.size();
break;
}
EXPECT_EQ(exec_contexts_.size(), n_profiles_exp);
profile.SetShapeTensorMask(network_.get());
EXPECT_EQ(profile.HasShapeTensor(), false);
for (auto dimvec : input_profiles) {
bool test_optimal_prof = strategy_ == ProfileStrategy::kOptimal ||
strategy_ == ProfileStrategy::kRangeOptimal;
CheckProfile(dimvec, &profile, true, test_optimal_prof);
}
bool has_prof = (strategy_ == ProfileStrategy::kRange ||
strategy_ == ProfileStrategy::kRangeOptimal);
CheckProfile(unseen_shapes, &profile, has_prof, false);
}
}
}
#endif |
1,150 | cpp | tensorflow/tensorflow | logger_registry | tensorflow/compiler/tf2tensorrt/convert/logger_registry.cc | tensorflow/compiler/tf2tensorrt/convert/logger_registry_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_LOGGER_REGISTRY_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_LOGGER_REGISTRY_H_
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
class LoggerRegistry {
public:
virtual Status Register(const string& name, nvinfer1::ILogger* logger) = 0;
virtual nvinfer1::ILogger* LookUp(const string& name) = 0;
virtual ~LoggerRegistry() {}
};
LoggerRegistry* GetLoggerRegistry();
class RegisterLogger {
public:
RegisterLogger(const string& name, nvinfer1::ILogger* logger) {
TF_CHECK_OK(GetLoggerRegistry()->Register(name, logger));
}
};
#define REGISTER_TENSORRT_LOGGER(name, logger) \
REGISTER_TENSORRT_LOGGER_UNIQ_HELPER(__COUNTER__, name, logger)
#define REGISTER_TENSORRT_LOGGER_UNIQ_HELPER(ctr, name, logger) \
REGISTER_TENSORRT_LOGGER_UNIQ(ctr, name, logger)
#define REGISTER_TENSORRT_LOGGER_UNIQ(ctr, name, logger) \
static ::tensorflow::tensorrt::RegisterLogger register_trt_logger##ctr \
TF_ATTRIBUTE_UNUSED = \
::tensorflow::tensorrt::RegisterLogger(name, logger)
}
}
#endif
#endif
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
#include <unordered_map>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace tensorrt {
class LoggerRegistryImpl : public LoggerRegistry {
Status Register(const string& name, nvinfer1::ILogger* logger) override {
mutex_lock lock(mu_);
if (!registry_.emplace(name, std::unique_ptr<nvinfer1::ILogger>(logger))
.second) {
return errors::AlreadyExists("Logger ", name, " already registered");
}
return OkStatus();
}
nvinfer1::ILogger* LookUp(const string& name) override {
mutex_lock lock(mu_);
const auto found = registry_.find(name);
if (found == registry_.end()) {
return nullptr;
}
return found->second.get();
}
private:
mutable mutex mu_;
mutable std::unordered_map<string, std::unique_ptr<nvinfer1::ILogger>>
registry_ TF_GUARDED_BY(mu_);
};
LoggerRegistry* GetLoggerRegistry() {
static LoggerRegistryImpl* registry = new LoggerRegistryImpl;
return registry;
}
}
}
#endif | #include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
class TestLogger : public nvinfer1::ILogger {
void log(nvinfer1::ILogger::Severity severity, const char* msg) override {}
};
TestLogger test_logger;
REGISTER_TENSORRT_LOGGER("test_logger", &test_logger);
TEST(LoggerRegistryTest, RegistersCorrectly) {
auto registered_logger = GetLoggerRegistry()->LookUp("test_logger");
EXPECT_THAT(registered_logger, Eq(&test_logger));
}
} |
1,151 | cpp | tensorflow/tensorflow | op_converter_registry | tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.cc | tensorflow/compiler/tf2tensorrt/convert/op_converter_registry_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_OP_CONVERTER_REGISTRY_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_OP_CONVERTER_REGISTRY_H_
#include <initializer_list>
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <array>
#include <type_traits>
#include <vector>
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class OpConverterRegistry {
public:
OpConverterRegistry();
~OpConverterRegistry() = default;
InitOnStartupMarker Register(const string& name, const int priority,
OpConverter converter);
InitOnStartupMarker Register(const std::initializer_list<std::string>& names,
const int priority, OpConverter converter) {
for (const auto& name : names) {
Register(name, priority, converter);
}
return {};
}
template <typename T,
typename std::enable_if<std::is_convertible<
typename T::value_type, std::string>::value>::type* = nullptr>
InitOnStartupMarker Register(const T& names, const int priority,
OpConverter converter) {
for (const auto& name : names) {
Register(name, priority, converter);
}
return {};
}
void Clear(const std::string& name);
StatusOr<OpConverter> LookUp(const string& name);
std::vector<std::string> ListRegisteredOps() const;
private:
class Impl;
std::unique_ptr<Impl> impl_;
};
OpConverterRegistry* GetOpConverterRegistry();
class RegisterOpConverter {
public:
RegisterOpConverter(const string& name, const int priority,
OpConverter converter) {
GetOpConverterRegistry()->Register(name, priority, converter);
}
};
constexpr int kDefaultConverterPriority = 1;
}
}
#define REGISTER_TRT_OP_CONVERTER_IMPL(ctr, func, priority, ...) \
static ::tensorflow::InitOnStartupMarker const \
register_trt_op_converter##ctr TF_ATTRIBUTE_UNUSED = \
TF_INIT_ON_STARTUP_IF(true) \
<< tensorrt::convert::GetOpConverterRegistry()->Register( \
__VA_ARGS__, priority, func)
#define REGISTER_TRT_OP_CONVERTER(func, priority, ...) \
TF_NEW_ID_FOR_INIT(REGISTER_TRT_OP_CONVERTER_IMPL, func, priority, \
__VA_ARGS__)
#define REGISTER_DEFAULT_TRT_OP_CONVERTER(func, ...) \
REGISTER_TRT_OP_CONVERTER( \
func, tensorrt::convert::kDefaultConverterPriority, __VA_ARGS__)
}
#endif
#endif
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include <set>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/env_var.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace convert {
struct OpConverterRegistration {
OpConverter converter;
int priority;
};
class OpConverterRegistry::Impl {
public:
~Impl() = default;
InitOnStartupMarker Register(const string& name, const int priority,
OpConverter converter) {
mutex_lock lock(mu_);
auto item = registry_.find(name);
if (item != registry_.end()) {
const int existing_priority = item->second.priority;
if (priority <= existing_priority) {
LOG(WARNING) << absl::StrCat(
"Ignoring TF->TRT ", name, " op converter with priority ",
existing_priority, " due to another converter with priority ",
priority);
return {};
} else {
LOG(WARNING) << absl::StrCat(
"Overwriting TF->TRT ", name, " op converter with priority ",
existing_priority, " using another converter with priority ",
priority);
registry_.erase(item);
}
}
registry_.insert({name, OpConverterRegistration{converter, priority}});
return {};
}
StatusOr<OpConverter> LookUp(string name) {
static const absl::flat_hash_set<string> tftrt_op_fakelist = [] {
string tftrt_op_fakelist_str;
TF_CHECK_OK(ReadStringFromEnvVar("TF_TRT_OP_FAKELIST",
"",
&tftrt_op_fakelist_str));
absl::flat_hash_set<string> tftrt_op_fakelist{};
for (const auto& x : str_util::Split(tftrt_op_fakelist_str, ",")) {
tftrt_op_fakelist.insert(x);
}
tftrt_op_fakelist.rehash(0);
return tftrt_op_fakelist;
}();
if (tftrt_op_fakelist.contains(name)) {
LOG_FIRST_N(INFO, 2) << "Emulating OP Converter: `" << name << "`. It "
<< "will cause TRT engine building to fail. This "
<< "feature is only intended to be used for "
<< "TF-TRT graph segmentation experiments. This "
<< "feature is controlled using: "
<< "`TF_TRT_OP_FAKELIST=OpName1,OpName2`.";
mutex_lock lock(mu_);
return registry_.find("FakeOp")->second.converter;
}
mutex_lock lock(mu_);
auto found = registry_.find(name);
if (found != registry_.end()) {
return found->second.converter;
}
return errors::NotFound("No converter for op ", name);
}
void Clear(const std::string& name) {
mutex_lock lock(mu_);
auto itr = registry_.find(name);
if (itr == registry_.end()) {
return;
}
registry_.erase(itr);
}
std::vector<std::string> ListRegisteredOps() const {
mutex_lock lock(mu_);
std::vector<std::string> result;
result.reserve(registry_.size());
for (const auto& item : registry_) {
result.push_back(item.first);
}
return result;
}
private:
mutable mutex mu_;
mutable std::unordered_map<std::string, OpConverterRegistration> registry_
TF_GUARDED_BY(mu_);
};
OpConverterRegistry::OpConverterRegistry() : impl_(std::make_unique<Impl>()) {}
StatusOr<OpConverter> OpConverterRegistry::LookUp(const string& name) {
return impl_->LookUp(name);
}
InitOnStartupMarker OpConverterRegistry::Register(const string& name,
const int priority,
OpConverter converter) {
return impl_->Register(name, priority, converter);
}
std::vector<std::string> OpConverterRegistry::ListRegisteredOps() const {
return impl_->ListRegisteredOps();
}
void OpConverterRegistry::Clear(const std::string& name) { impl_->Clear(name); }
OpConverterRegistry* GetOpConverterRegistry() {
static OpConverterRegistry* registry = new OpConverterRegistry();
return registry;
}
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
TEST(TestOpConverterRegistry, TestOpConverterRegistry) {
bool flag{false};
auto set_true_func = [&flag](const OpConverterParams*) -> Status {
flag = true;
return OkStatus();
};
auto set_false_func = [&flag](const OpConverterParams*) -> Status {
flag = false;
return OkStatus();
};
GetOpConverterRegistry()->Register("FakeFunc", kDefaultConverterPriority,
set_true_func);
GetOpConverterRegistry()->Register("FakeFunc", kDefaultConverterPriority - 1,
set_false_func);
auto func = GetOpConverterRegistry()->LookUp("FakeFunc");
EXPECT_TRUE(func.ok());
EXPECT_TRUE(((*func)(nullptr)).ok());
EXPECT_TRUE(flag);
GetOpConverterRegistry()->Register("FakeFunc", kDefaultConverterPriority + 1,
set_false_func);
func = GetOpConverterRegistry()->LookUp("FakeFunc");
EXPECT_TRUE(func.ok());
EXPECT_TRUE((*func)(nullptr).ok());
EXPECT_FALSE(flag);
GetOpConverterRegistry()->Clear("FakeFunc");
EXPECT_FALSE(GetOpConverterRegistry()->LookUp("FakeFunc").ok());
}
}
}
}
#endif |
1,152 | cpp | tensorflow/tensorflow | algorithm_selector | tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.cc | tensorflow/compiler/tf2tensorrt/convert/algorithm_selector_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_ALGORITHM_SELECTOR_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_ALGORITHM_SELECTOR_H_
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <array>
#include <memory>
#include <set>
#include "absl/types/optional.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class AlgorithmSelectorImpl {
public:
using TRTVersion = std::array<int, 4>;
using ImplementationID = int64_t;
using TacticID = int64_t;
static constexpr TRTVersion CompileTimeTRTVersion() {
return TRTVersion{NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH,
NV_TENSORRT_BUILD};
}
explicit AlgorithmSelectorImpl(
const TRTVersion& version = CompileTimeTRTVersion())
: version_(version) {}
bool IsShuffleLayer(ImplementationID id) const;
bool IsBannedTactic(TacticID id) const;
bool AllowShuffleAlgorithm(TacticID tactic, nvinfer1::DataType input_dtype,
nvinfer1::TensorFormat input_format) const;
bool IsTrtVersionGE(const TRTVersion& version) const;
bool IsAlgorithmSelectorRequired() const;
static std::set<TacticID> GetBannedTRT72TuringTactics();
private:
TRTVersion version_;
};
class TftrtAlgorithmSelector : public nvinfer1::IAlgorithmSelector {
private:
using TacticID = AlgorithmSelectorImpl::TacticID;
std::optional<int32_t> fixed_algorithm_idx_;
AlgorithmSelectorImpl selector_;
public:
TftrtAlgorithmSelector();
static std::optional<int64_t> GetFixedAlgorithmID();
bool AlgorithmPolicy(const nvinfer1::IAlgorithmContext& context,
const nvinfer1::IAlgorithm& alg) const;
int32_t selectAlgorithms(const nvinfer1::IAlgorithmContext& algoContext,
const nvinfer1::IAlgorithm* const* algoChoices,
int32_t nbChoices,
int32_t* selection) noexcept override;
void reportAlgorithms(const nvinfer1::IAlgorithmContext* const* algoContexts,
const nvinfer1::IAlgorithm* const* algoChoices,
int32_t nbAlgorithms) noexcept override;
bool IsRequired() const {
return selector_.IsAlgorithmSelectorRequired() ||
fixed_algorithm_idx_ != std::nullopt;
}
};
std::unique_ptr<TftrtAlgorithmSelector> MaybeCreateAlgorithmSelector();
}
}
}
#endif
#endif
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.h"
#include <utility>
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/core/util/env_var.h"
#include "third_party/tensorrt/NvInfer.h"
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
#define ALGORITHM_IO_INFO_BY_IDX(alg, idx) *(alg).getAlgorithmIOInfoByIndex(idx)
#else
#define ALGORITHM_IO_INFO_BY_IDX(alg, idx) (alg).getAlgorithmIOInfo(idx)
#endif
namespace nvinfer1 {
std::ostream& operator<<(std::ostream& os,
const nvinfer1::IAlgorithmContext& ctx) {
os << "AlgorithmContext(name=" << ctx.getName()
<< ",nbInputs=" << ctx.getNbInputs() << ",nbOutputs=" << ctx.getNbOutputs()
<< ")";
return os;
}
std::ostream& operator<<(std::ostream& os, const nvinfer1::IAlgorithm& alg) {
const nvinfer1::IAlgorithmVariant& variant = alg.getAlgorithmVariant();
os << "Algorithm(" << "variant.implementation=" << variant.getImplementation()
<< ",variant.tactic=" << variant.getTactic()
<< ",timingMSec=" << alg.getTimingMSec()
<< ",workspaceSize=" << alg.getWorkspaceSize() << ")";
return os;
}
std::ostream& operator<<(std::ostream& os,
const nvinfer1::IAlgorithmIOInfo& info) {
os << "IOTensor(format=" << info.getTensorFormat()
<< ",dtype=" << info.getDataType() << ",strides=" << info.getStrides()
<< ")";
return os;
}
}
namespace tensorflow {
namespace tensorrt {
namespace convert {
bool operator>=(const AlgorithmSelectorImpl::TRTVersion& lhs,
const AlgorithmSelectorImpl::TRTVersion& rhs) {
if (lhs[0] > rhs[0]) return true;
if (lhs[0] == rhs[0] && lhs[1] > rhs[1]) return true;
if (lhs[0] == rhs[0] && lhs[1] == rhs[1] && lhs[2] > rhs[2]) return true;
if (lhs[0] == rhs[0] && lhs[1] == rhs[1] && lhs[2] == rhs[2] &&
lhs[3] >= rhs[3]) {
return true;
}
return false;
}
bool AlgorithmSelectorImpl::IsTrtVersionGE(const TRTVersion& version) const {
return version_ >= version;
}
bool AlgorithmSelectorImpl::IsShuffleLayer(ImplementationID id) const {
if (IsTrtVersionGE({8, 2, 0, 0})) {
return id == 0x80000000 + 13;
}
if (IsTrtVersionGE({8, 0, 0, 0})) {
return id == 0x80000000 + 14;
}
if (IsTrtVersionGE({7, 2, 0, 0})) {
return id == 0x80000000 + 16;
}
return id == 18;
}
std::set<AlgorithmSelectorImpl::TacticID>
AlgorithmSelectorImpl::GetBannedTRT72TuringTactics() {
static const std::set<TacticID> banned_turing_72{
-5927686925093575778,
-3848538574386518527,
-959009792490796596};
return banned_turing_72;
}
bool AlgorithmSelectorImpl::IsBannedTactic(TacticID id) const {
if (IsTrtVersionGE({7, 2, 0, 0}) && !IsTrtVersionGE({8, 0, 0, 0})) {
auto banned_turing_72 = GetBannedTRT72TuringTactics();
return banned_turing_72.find(id) != banned_turing_72.end();
}
return false;
}
bool AlgorithmSelectorImpl::AllowShuffleAlgorithm(
TacticID tactic, nvinfer1::DataType input_dtype,
nvinfer1::TensorFormat input_format) const {
if (IsTrtVersionGE({8, 0, 0, 0}) && !IsTrtVersionGE({8, 0, 3, 0})) {
return !(input_format == nvinfer1::TensorFormat::kLINEAR &&
input_dtype == nvinfer1::DataType::kINT8);
}
if (IsTrtVersionGE({7, 2, 0, 0}) && !IsTrtVersionGE({8, 0, 0, 0})) {
return !(input_format == nvinfer1::TensorFormat::kCHW32 &&
input_dtype == nvinfer1::DataType::kFLOAT);
}
return true;
}
bool AlgorithmSelectorImpl::IsAlgorithmSelectorRequired() const {
if (IsTrtVersionGE({7, 2, 0, 0}) && !IsTrtVersionGE({8, 0, 0, 0})) {
return true;
}
if (IsTrtVersionGE({8, 0, 0, 0}) && !IsTrtVersionGE({8, 0, 3, 0})) {
return true;
}
return false;
}
namespace {
string FormatAlgorithmList(const nvinfer1::IAlgorithmContext& ctx,
absl::Span<const nvinfer1::IAlgorithm* const> algs) {
return absl::StrFormat(
"%s:\n\t%s", absl::FormatStreamed(ctx),
absl::StrJoin(
algs, "\n\t",
[&ctx](std::string* out, const nvinfer1::IAlgorithm* const alg) {
absl::StrAppendFormat(out, "%s", absl::FormatStreamed(*alg));
for (int i = 0; i < ctx.getNbInputs() + ctx.getNbOutputs(); i++) {
absl::StrAppendFormat(
out, "\n\t\t%s",
absl::FormatStreamed(ALGORITHM_IO_INFO_BY_IDX(*alg, i)));
}
}));
}
}
TftrtAlgorithmSelector::TftrtAlgorithmSelector()
: fixed_algorithm_idx_(GetFixedAlgorithmID()),
selector_(AlgorithmSelectorImpl::CompileTimeTRTVersion()) {}
std::optional<int64_t> TftrtAlgorithmSelector::GetFixedAlgorithmID() {
int64_t trt_algorithm_idx = 0;
constexpr auto null_idx =
std::numeric_limits<decltype(trt_algorithm_idx)>::min();
Status status = tensorflow::ReadInt64FromEnvVar("TF_TRT_FIXED_ALGORITHM_ID",
null_idx,
&trt_algorithm_idx);
if (!status.ok()) {
LOG(ERROR) << status;
return std::nullopt;
}
if (trt_algorithm_idx != null_idx) {
return std::max(static_cast<int32_t>(trt_algorithm_idx), 0);
}
return std::nullopt;
}
bool TftrtAlgorithmSelector::AlgorithmPolicy(
const nvinfer1::IAlgorithmContext& context,
const nvinfer1::IAlgorithm& alg) const {
const nvinfer1::IAlgorithmVariant& variant = alg.getAlgorithmVariant();
TacticID tactic_id = variant.getTactic();
if (selector_.IsBannedTactic(tactic_id)) {
return false;
}
if (selector_.IsShuffleLayer(variant.getImplementation())) {
return selector_.AllowShuffleAlgorithm(
tactic_id, alg.getAlgorithmIOInfo(0).getDataType(),
alg.getAlgorithmIOInfo(0).getTensorFormat());
}
return true;
}
int32_t TftrtAlgorithmSelector::selectAlgorithms(
const nvinfer1::IAlgorithmContext& algoContext,
const nvinfer1::IAlgorithm* const* algoChoices, int32_t nbChoices,
int32_t* selection) noexcept {
if (fixed_algorithm_idx_) {
LOG(WARNING) << "Forcing TRT algorithm selection to: ID = "
<< *fixed_algorithm_idx_;
selection[0] = std::min(*fixed_algorithm_idx_, nbChoices - 1);
return 1;
}
int num_selections = 0;
VLOG(1) << "Algorithm selection choices: "
<< FormatAlgorithmList(algoContext,
absl::MakeSpan(algoChoices, nbChoices));
for (int i = 0; i < nbChoices; i++) {
const nvinfer1::IAlgorithm& alg = *algoChoices[i];
if (!AlgorithmPolicy(algoContext, alg)) {
LOG(WARNING) << absl::StrFormat("Rejecting Algorithm: %s ",
absl::FormatStreamed(alg));
continue;
}
selection[num_selections++] = i;
}
return num_selections;
}
void TftrtAlgorithmSelector::reportAlgorithms(
const nvinfer1::IAlgorithmContext* const* algoContexts,
const nvinfer1::IAlgorithm* const* algoChoices,
int32_t nbAlgorithms) noexcept {
if (VLOG_IS_ON(1)) {
string selection_msg = "Algorithms selected:\n";
for (int i = 0; i < nbAlgorithms; i++) {
absl::StrAppend(&selection_msg,
FormatAlgorithmList(*algoContexts[i],
absl::MakeSpan(algoChoices + i, 1)));
}
VLOG(1) << selection_msg;
}
}
std::unique_ptr<TftrtAlgorithmSelector> MaybeCreateAlgorithmSelector() {
auto selector = std::make_unique<TftrtAlgorithmSelector>();
if (selector->IsRequired()) {
return selector;
}
return nullptr;
}
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.h"
#include <memory>
#include <gtest/gtest.h>
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
TEST(TestAlgorithmSelector, TensorRT7_1) {
AlgorithmSelectorImpl sel71({7, 1, 3, 4});
ASSERT_FALSE(sel71.IsAlgorithmSelectorRequired());
}
TEST(TestAlgorithmSelector, TensorRT7_2) {
AlgorithmSelectorImpl sel72({7, 2, 0, 0});
ASSERT_TRUE(sel72.IsAlgorithmSelectorRequired());
auto turing_tactics = AlgorithmSelectorImpl::GetBannedTRT72TuringTactics();
for (auto id : turing_tactics) {
EXPECT_TRUE(sel72.IsBannedTactic(id));
}
EXPECT_FALSE(sel72.AllowShuffleAlgorithm(0, nvinfer1::DataType::kFLOAT,
nvinfer1::TensorFormat::kCHW32));
EXPECT_TRUE(sel72.AllowShuffleAlgorithm(0, nvinfer1::DataType::kHALF,
nvinfer1::TensorFormat::kCHW32));
EXPECT_TRUE(sel72.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT32,
nvinfer1::TensorFormat::kCHW32));
EXPECT_TRUE(sel72.AllowShuffleAlgorithm(0, nvinfer1::DataType::kFLOAT,
nvinfer1::TensorFormat::kCHW16));
}
TEST(TestAlgorithmSelector, TensorRT8_0) {
AlgorithmSelectorImpl sel80({8, 0, 1, 6});
ASSERT_TRUE(sel80.IsAlgorithmSelectorRequired());
auto turing_tactics = AlgorithmSelectorImpl::GetBannedTRT72TuringTactics();
for (auto id : turing_tactics) {
EXPECT_FALSE(sel80.IsBannedTactic(id));
}
EXPECT_FALSE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT8,
nvinfer1::TensorFormat::kLINEAR));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kHALF,
nvinfer1::TensorFormat::kLINEAR));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT32,
nvinfer1::TensorFormat::kLINEAR));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kFLOAT,
nvinfer1::TensorFormat::kLINEAR));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT8,
nvinfer1::TensorFormat::kCHW16));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT8,
nvinfer1::TensorFormat::kCHW32));
}
TEST(TestAlgorithmSelector, TensorRT8_2) {
AlgorithmSelectorImpl sel({8, 2, 0, 0});
ASSERT_FALSE(sel.IsAlgorithmSelectorRequired());
}
}
}
}
#endif |
1,153 | cpp | tensorflow/tensorflow | convert_nodes | tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc | tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_NODES_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_NODES_H_
#include <set>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/weights.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_int8_calibrator.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_tensor_proxy.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/lib/core/status.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
using ::tsl::StatusOr;
struct EngineConnection {
EngineConnection(const string& outside, int out_id, int out_port,
const string& inside, int in_id, int in_port,
bool input_edge, int port)
: outside_node_name(outside),
outside_id(out_id),
outside_port(out_port),
inside_node_name(inside),
inside_id(in_id),
inside_port(in_port),
is_input_edge(input_edge),
port_number(port) {}
EngineConnection(const string& outside, int out_id, const string& inside,
int in_id, bool input_edge)
: outside_node_name(outside),
outside_id(out_id),
outside_port(Graph::kControlSlot),
inside_node_name(inside),
inside_id(in_id),
inside_port(Graph::kControlSlot),
is_input_edge(input_edge),
port_number(Graph::kControlSlot) {}
bool is_control_edge() const { return port_number == Graph::kControlSlot; }
const string outside_node_name;
const int outside_id;
const int outside_port;
PartialTensorShape outside_shape;
const string inside_node_name;
const int inside_id;
const int inside_port;
PartialTensorShape inside_shape;
DataType connection_type;
const bool is_input_edge;
const int port_number;
};
struct EngineInfo {
EngineInfo()
: engine_type(EngineType::TRTStatic),
max_workspace_size_bytes(0),
max_batch_size(std::nullopt),
maximum_cached_engines(0),
precision_mode(TrtPrecisionMode::FP32),
use_calibration(true),
allow_build_at_runtime(true),
use_explicit_precision(false) {}
string engine_name;
string device;
GraphDef segment_graph_def;
std::vector<EngineConnection> connections;
enum class EngineType { TRTStatic = 0, TRTDynamic = 1 };
EngineType engine_type;
int64 max_workspace_size_bytes;
std::optional<int> max_batch_size;
int maximum_cached_engines;
TrtPrecisionMode precision_mode;
bool use_calibration;
bool allow_build_at_runtime;
bool use_explicit_precision;
};
Status ConvertSegmentToGraphDef(
const Graph* graph, const grappler::GraphProperties& graph_properties,
const std::vector<const Node*>& subgraph_nodes, EngineInfo* engine_info);
Status ConvertGraphDefToEngine(
const GraphDef& gdef, OpKernelContext* ctx, TrtPrecisionMode precision_mode,
int max_batch_size, size_t max_workspace_size_bytes,
const std::vector<PartialTensorShape>& input_shapes,
nvinfer1::ILogger* logger, nvinfer1::IGpuAllocator* allocator,
TRTInt8Calibrator* calibrator,
TrtUniquePtrType<nvinfer1::ICudaEngine>* engine, bool use_calibration,
const bool use_implicit_batch, bool* convert_successfully,
TrtShapeOptimizationProfile* profiles, absl::string_view engine_name,
bool use_explicit_precision,
tensorflow::grappler::Cluster* cluster = nullptr,
const string& device = "");
class OutputEdgeValidator {
public:
bool operator()(const Edge* out_edge) const;
};
class TrtNodeValidator {
public:
TrtNodeValidator(const grappler::GraphProperties& graph_properties,
TrtPrecisionMode precision_mode, bool use_calibration,
bool use_implicit_batch, bool use_explicit_precision);
Status IsTensorRTCandidate(const Node* node);
static const std::set<string>* quantize_ops;
StatusOr<OpConverter> GetValidator(const std::string& op);
private:
Status ConvertConstToWeights(const NodeDef& const_node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
TRT_TensorOrWeights* output);
Status ConvertVariableToWeights(
const NodeDef& const_node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
TRT_TensorOrWeights* output);
Status ConvertToTensorOrWeights(const NodeDef& node_def, int output_port,
TRT_TensorOrWeights* tensor_or_weights);
TrtWeightStore weight_store_;
const grappler::GraphProperties& graph_properties_;
const TrtPrecisionMode precision_mode_;
const bool use_calibration_;
const bool use_implicit_batch_;
const bool use_explicit_precision_;
friend class ValidatorTest;
friend class OpConverterTest;
};
class Converter {
public:
struct EngineOutputInfo {
string source_tensor_name;
string dest_node_name;
nvinfer1::DataType trt_dtype;
};
static StatusOr<std::unique_ptr<Converter>> Create(
TrtPrecisionMode precision_mode, bool use_calibration,
nvinfer1::ILogger* trt_logger, const bool use_implicit_batch,
absl::string_view engine_name, bool use_explicit_precision = false,
OpKernelContext* ctx = nullptr);
Status ConvertNode(const NodeDef& node_def);
Status AddInputTensor(const string& name, nvinfer1::DataType dtype,
const nvinfer1::Dims& dims, int batch_size);
Status AddInputResource(const string& name, const ResourceHandle& resource);
Status RenameAndMarkOutputTensors(
const std::vector<EngineOutputInfo>& output_tensors);
Status BuildCudaEngine(TrtUniquePtrType<nvinfer1::ICudaEngine>* engine,
int max_batch_size, size_t max_workspace_size_bytes,
nvinfer1::IGpuAllocator* allocator,
TRTInt8Calibrator* calibrator,
TrtShapeOptimizationProfile* profiles);
nvinfer1::INetworkDefinition* network() { return trt_network_.get(); }
TrtPrecisionMode precision_mode() const { return precision_mode_; }
OpKernelContext* context() { return ctx_; }
bool use_calibration() const { return use_calibration_; }
bool use_implicit_batch() const { return use_implicit_batch_; }
void ProvideQuantizationRange(ITensorProxyPtr* tensor, float min_range,
float max_range);
void MaybeApplyQuantizationRanges();
Status TransposeTensor(ITensorProxyPtr input_tensor,
const std::vector<int>& order_with_batch_dim,
ITensorProxyPtr* output_tensor,
const NodeDef& node_def,
absl::string_view sub_op_name = "");
Status DynamicReshape(ITensorProxyPtr input,
std::vector<std::pair<int, int>> slices,
const OpConverterParams* params,
ITensorProxyPtr* output,
std::vector<int> size_for_added_dims = {},
std::optional<int> op_instance = std::nullopt);
Status DynamicExpandDims(ITensorProxyPtr input, const nvinfer1::Dims& dims,
int axis, const OpConverterParams* params,
ITensorProxyPtr* output,
std::optional<int> op_instance = std::nullopt);
Status SqueezeTensor(ITensorProxyPtr input, std::vector<int>* input_dims,
const OpConverterParams* params, ITensorProxyPtr* output,
std::optional<int> op_instance = std::nullopt);
ITensorProxyPtr CreateConstantLayer(const TRT_ShapedWeights& weights,
const nvinfer1::Dims& dims);
Status GetWeightRange(const TRT_ShapedWeights& weights, float* out_min,
float* out_max) const;
void SetLayerName(nvinfer1::ILayer* layer, const NodeDef& node_def,
absl::string_view sub_op_name = "",
std::optional<int> sub_op_instance = std::nullopt,
std::optional<std::string> origin_node_name = std::nullopt);
void SetLayerName(nvinfer1::ILayer* layer, absl::string_view main_op_name,
absl::string_view sub_op_name,
std::optional<int> sub_op_instance = std::nullopt);
std::unordered_map<string, TRT_TensorOrWeights>& TensorsMap() {
return trt_tensors_;
}
bool UseExplicitPrecision() const { return use_explicit_precision_; }
private:
Converter(TrtPrecisionMode precision_mode, bool use_calibration,
nvinfer1::ILogger* trt_logger, const bool use_implicit_batch,
absl::string_view engine_name, bool use_explicit_precision,
OpKernelContext* ctx);
Status Init(nvinfer1::ILogger* trt_logger);
Status MaybeUpdateBatchSize(int batch_size);
Status AddTensorOrWeights(const string& name, TRT_TensorOrWeights input);
Status GetTensorOrWeights(const string& name, TRT_TensorOrWeights* output);
Status GetInputs(const NodeDef& node_def,
std::vector<TRT_TensorOrWeights>* inputs) const;
std::unordered_map<string, TRT_TensorOrWeights> trt_tensors_;
TrtUniquePtrType<nvinfer1::IBuilder> trt_builder_;
TrtUniquePtrType<nvinfer1::INetworkDefinition> trt_network_;
TrtWeightStore weight_store_;
OpKernelContext* ctx_;
std::unordered_map<ITensorProxyPtr*, float> quantization_ranges_proxy_;
std::unordered_map<nvinfer1::ITensor*, float> quantization_ranges_;
const TrtPrecisionMode precision_mode_;
const bool use_calibration_;
const bool use_implicit_batch_;
int batch_size_ = -1;
int next_constant_layer_id_ = 0;
absl::string_view engine_name_;
bool use_explicit_precision_;
friend class ConverterTest;
friend class OpConverterTest;
};
Status TfTensorToTrtWeights(const Tensor& tensor, TrtWeightStore* weight_store,
TRT_ShapedWeights* weights);
Status PrepareTensorForShape(
Converter* converter, const TRT_TensorOrWeights& input,
const DimsAdapter& dims, const bool validation_only,
ITensorProxyPtr* tensor, const NodeDef& node_def,
std::optional<int> op_instance = std::nullopt,
std::optional<std::string> origin_node_name = std::nullopt);
Status GetTrtBroadcastShape(const TRT_TensorOrWeights& operand_l,
const TRT_TensorOrWeights& operand_r,
const bool check_feasibility,
const bool use_implicit_batch,
nvinfer1::Dims* operand_l_new_dims,
nvinfer1::Dims* operand_r_new_dims);
template <typename T>
using OperationMap = std::unordered_map<std::string, T>;
using UnaryOperationMapType = OperationMap<nvinfer1::UnaryOperation>;
const UnaryOperationMapType* UnaryOperationMap();
const UnaryOperationMapType* UnaryBooleanOperationMap();
using ActivationTypeMapType = OperationMap<nvinfer1::ActivationType>;
const ActivationTypeMapType* ActivationTypeMap();
using BinaryOperationMapType = OperationMap<nvinfer1::ElementWiseOperation>;
const BinaryOperationMapType* BinaryOperationMap();
const BinaryOperationMapType* BinaryBooleanOperationMap();
template <typename T>
absl::InlinedVector<std::string, 10> GetOperationNames(const T& set) {
absl::InlinedVector<std::string, 10> result;
absl::c_transform(set, std::back_inserter(result),
[](const auto x) { return x.first; });
return result;
}
StatusOr<ITensorProxyPtr> ConvertMatMulImpl(const OpConverterParams* params,
TRT_TensorOrWeights input_a,
TRT_TensorOrWeights input_b,
bool transpose_a, bool transpose_b);
Status ApplyBroadcast(std::unique_ptr<TRT_TensorOrWeights>& operand,
const DimsAdapter& broadcasted_dims,
const OpConverterParams* params,
std::optional<int> op_instance);
std::string convert_range_error_msg(float start, float limit, float delta);
std::string convert_range_expected_msg(const NodeDef& node_def);
std::string bool_weight_error_msg(const NodeDef& node_def);
std::string unexpected_type_error_msg(nvinfer1::DataType type_being_checked,
nvinfer1::DataType type_expected,
const NodeDef& node_def, int idx = 0);
std::string then_else_dtypes_error_msg(nvinfer1::DataType type_then,
nvinfer1::DataType type_else,
const NodeDef& node_def);
std::string input_shapes_error_msg(const nvinfer1::Dims& shape1,
const nvinfer1::Dims& shape2,
const NodeDef& node,
bool then_vs_else = false);
std::string batch_size_error(absl::string_view name, absl::string_view comment);
inline bool find_name(const string& name, const std::vector<string> names) {
return std::find(names.begin(), names.end(), name) != names.end();
}
Status check_type(nvinfer1::DataType type_being_checked,
nvinfer1::DataType type_expected, const NodeDef& node_def,
int idx = 0);
}
}
}
#endif
#endif
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include <algorithm>
#include <bitset>
#include <cmath>
#include <cstring>
#include <map>
#include <memory>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/slice_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/timing_cache.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_experimental_features.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/annotated_traceme.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/strided_slice_op.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
#include "third_party/tensorrt/NvInferPlugin.h"
#define TFTRT_CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
#define TFTRT_CHECK_INPUT_SIZE(size, exp_size, node_def) \
if ((size) != (exp_size)) { \
TFTRT_ERROR(errors::InvalidArgument, node_def.op(), " got ", (size), \
" inputs but expected ", (exp_size)); \
}
#define MAX_KERNEL_DIMS_PRODUCT(x) (int64_t(std::pow(100000.0F, (x) * 0.5F)))
namespace tensorflow {
namespace tensorrt {
namespace convert {
using absl::StrAppend;
using absl::StrCat;
namespace {
#define ADD_LAYER(layer_name) \
case nvinfer1::LayerType::k##layer_name: \
return #layer_name;
const char* LayerTypeToString(nvinfer1::LayerType layer_type) {
switch (layer_type) {
ADD_LAYER(CONVOLUTION)
ADD_LAYER(FULLY_CONNECTED)
ADD_LAYER(ACTIVATION)
ADD_LAYER(POOLING)
ADD_LAYER(LRN)
ADD_LAYER(SCALE)
ADD_LAYER(SOFTMAX)
ADD_LAYER(DECONVOLUTION)
ADD_LAYER(CONCATENATION)
ADD_LAYER(ELEMENTWISE)
ADD_LAYER(PLUGIN)
ADD_LAYER(UNARY)
ADD_LAYER(PADDING)
ADD_LAYER(SHUFFLE)
ADD_LAYER(REDUCE)
ADD_LAYER(TOPK)
ADD_LAYER(GATHER)
#if IS_TRT_VERSION_GE(8, 5, 0, 0)
ADD_LAYER(GRID_SAMPLE)
#endif
ADD_LAYER(MATRIX_MULTIPLY)
ADD_LAYER(RAGGED_SOFTMAX)
ADD_LAYER(CONSTANT)
ADD_LAYER(RNN_V2)
ADD_LAYER(IDENTITY)
ADD_LAYER(PLUGIN_V2)
ADD_LAYER(SLICE)
ADD_LAYER(SHAPE)
ADD_LAYER(PARAMETRIC_RELU)
ADD_LAYER(RESIZE)
ADD_LAYER(TRIP_LIMIT)
ADD_LAYER(RECURRENCE)
ADD_LAYER(ITERATOR)
ADD_LAYER(LOOP_OUTPUT)
ADD_LAYER(SELECT)
ADD_LAYER(FILL)
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
ADD_LAYER(QUANTIZE)
ADD_LAYER(DEQUANTIZE)
#endif
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
ADD_LAYER(CONDITION)
ADD_LAYER(CONDITIONAL_INPUT)
ADD_LAYER(CONDITIONAL_OUTPUT)
ADD_LAYER(SCATTER)
ADD_LAYER(EINSUM)
ADD_LAYER(ASSERTION)
#endif
#if IS_TRT_VERSION_GE(8, 5, 0, 0)
ADD_LAYER(ONE_HOT)
ADD_LAYER(NON_ZERO)
ADD_LAYER(NMS)
#endif
#if IS_TRT_VERSION_GE(8, 6, 0, 0)
ADD_LAYER(REVERSE_SEQUENCE)
#endif
#if !IS_TRT_VERSION_GE(8, 0, 0, 0)
ADD_LAYER(RNN)
#endif
default:
return "UNKNOWN_LAYER";
}
}
#undef ADD_LAYER
void SetLayerNameHelper(nvinfer1::ILayer* layer, absl::string_view engine_name,
absl::string_view tf_name) {
const char* trt_name = LayerTypeToString(layer->getType());
layer->setName(
absl::StrCat(engine_name, "/", tf_name, ":", trt_name).c_str());
}
std::string GetLayerNameSuffix(absl::string_view sub_op_name,
std::optional<int> sub_op_instance) {
std::string op_suffix(sub_op_name);
if (sub_op_instance.has_value()) {
op_suffix =
absl::StrCat(op_suffix, "_", std::to_string(sub_op_instance.value()));
}
return op_suffix;
}
}
bool IsEngineInput(absl::string_view name) {
return absl::StartsWith(name, IONamePrefixes::kInputPHName);
}
bool IsEngineOutput(absl::string_view name) {
return absl::StartsWith(na | #include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include <algorithm>
#include <cmath>
#include <functional>
#include <iterator>
#include <memory>
#include <numeric>
#include <type_traits>
#include <unordered_map>
#include <vector>
#include "absl/time/civil_time.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/base/call_once.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2tensorrt/common/datavec.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_engine_utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/gpu/gpu_managed_allocator.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/kernels/variable_ops.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
enum class TrtTestMode {
kImplicitBatch = 0,
kExplicitBatch = 1,
kDynamicShape = 2
};
string DebugString(const TrtTestMode mode) {
switch (mode) {
case TrtTestMode::kImplicitBatch:
return "kImplicitBatch";
case TrtTestMode::kExplicitBatch:
return "kExplicitBatch";
case TrtTestMode::kDynamicShape:
return "kDynamicShape";
default:
return "Invalid TrtTestMode";
}
}
namespace convert {
using absl::StrCat;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::Matcher;
using ::testing::PrintToString;
using ::tensorflow::testing::IsOk;
using ::tensorflow::testing::StatusIs;
constexpr std::array<TrtTestMode, 3> ValidTrtModes = {
TrtTestMode::kImplicitBatch, TrtTestMode::kExplicitBatch,
TrtTestMode::kDynamicShape};
bool TrtShapedWeightsEquals(const TRT_ShapedWeights& lhs,
const TRT_ShapedWeights& rhs) {
return lhs.Shape() == rhs.Shape() && lhs.TrtDType() == rhs.TrtDType() &&
lhs.GetPointer<int8>() == rhs.GetPointer<int8>();
}
template <typename T>
void ValidateWeights(const TRT_ShapedWeights& weights,
const std::vector<int>& expected_dims,
const std::vector<T>& expected_value) {
EXPECT_EQ(weights.Shape(), DimsAdapter(expected_dims));
ASSERT_EQ(expected_value.size(), weights.count()) << weights.DebugString();
const T* actual_values = weights.GetPointer<T>();
for (int i = 0; i < expected_value.size(); ++i) {
EXPECT_EQ(expected_value[i], actual_values[i]);
}
}
TEST(TRT_ShapedWeights_Test, Basic) {
{
TRT_ShapedWeights weights;
TRT_ShapedWeights copy(weights);
for (auto ptr : {&weights, ©}) {
nvinfer1::Weights trt_weights = ptr->GetTrtWeights();
EXPECT_EQ(nvinfer1::DataType::kFLOAT, trt_weights.type);
EXPECT_EQ(nullptr, trt_weights.values);
EXPECT_EQ(0, trt_weights.count);
EXPECT_EQ(nullptr, ptr->GetPointer<int8>());
EXPECT_EQ(0, ptr->count());
EXPECT_EQ(0, ptr->size_bytes());
}
}
{
TRT_ShapedWeights weights(nvinfer1::DataType::kFLOAT);
TRT_ShapedWeights copy(weights);
for (auto ptr : {&weights, ©}) {
nvinfer1::Weights trt_weights = ptr->GetTrtWeights();
EXPECT_EQ(nvinfer1::DataType::kFLOAT, trt_weights.type);
EXPECT_EQ(nullptr, trt_weights.values);
EXPECT_EQ(0, trt_weights.count);
EXPECT_EQ(nullptr, ptr->GetPointer<int8>());
EXPECT_EQ(0, ptr->count());
EXPECT_EQ(0, ptr->size_bytes());
}
}
{
TrtWeightStore store;
TRT_ShapedWeights weights =
store.GetTempWeights(nvinfer1::DataType::kFLOAT, CreateDims({2, 5}))
.value();
TRT_ShapedWeights copy(weights);
for (auto ptr : {&weights, ©}) {
nvinfer1::Weights trt_weights = ptr->GetTrtWeights();
EXPECT_EQ(nvinfer1::DataType::kFLOAT, trt_weights.type);
EXPECT_NE(nullptr, trt_weights.values);
EXPECT_EQ(10, trt_weights.count);
EXPECT_EQ(trt_weights.values, ptr->GetPointer<int8>());
EXPECT_EQ(10, ptr->count());
EXPECT_EQ(40, ptr->size_bytes());
}
EXPECT_EQ(weights.GetPointer<int8>(), copy.GetPointer<int8>());
}
}
TEST(TRT_TensorOrWeights_Test, Basic) {
{
TRT_TensorOrWeights tw;
TRT_TensorOrWeights copy(tw);
TRT_TensorOrWeights assigned;
assigned = tw;
for (auto ptr : {&tw, ©, &assigned}) {
EXPECT_EQ(false, ptr->is_tensor());
EXPECT_EQ(false, ptr->is_weights());
EXPECT_EQ(-1, ptr->batch_size());
}
}
{
nvinfer1::Dims dims;
dims.nbDims = 1;
dims.d[0] = 1;
ITensorProxyPtr itensor(dims);
TRT_TensorOrWeights tw(itensor);
TRT_TensorOrWeights tw1(itensor, 1);
for (auto original_ptr : {&tw, &tw1}) {
TRT_TensorOrWeights copy(*original_ptr);
TRT_TensorOrWeights assigned;
assigned = *original_ptr;
for (auto ptr : {original_ptr, ©, &assigned}) {
ASSERT_TRUE(ptr->is_tensor());
EXPECT_EQ(false, ptr->is_weights());
if (original_ptr == &tw) {
EXPECT_EQ(-1, ptr->batch_size());
} else {
EXPECT_EQ(1, ptr->batch_size());
}
EXPECT_EQ(itensor->simple_tensor(), ptr->tensor()->simple_tensor());
EXPECT_THAT(ptr->GetTrtDims(), DimsAreArray({1}));
}
}
}
{
nvinfer1::Dims dims;
dims.nbDims = 1;
dims.d[0] = 1;
TRT_TensorOrWeights tw(nvinfer1::DataType::kFLOAT, dims, 1);
TRT_TensorOrWeights copy(tw);
TRT_TensorOrWeights assigned;
assigned = tw;
for (auto ptr : {&tw, ©, &assigned}) {
ASSERT_TRUE(ptr->is_tensor());
EXPECT_EQ(false, ptr->is_weights());
EXPECT_EQ(1, ptr->batch_size());
EXPECT_NE(nullptr, ptr->tensor()->simple_tensor());
EXPECT_THAT(ptr->GetTrtDims(), DimsAreArray({1}));
}
}
{
TRT_ShapedWeights weights;
TRT_TensorOrWeights tw(weights);
TRT_TensorOrWeights copy(tw);
TRT_TensorOrWeights assigned;
assigned = tw;
for (auto ptr : {&tw, ©, &assigned}) {
EXPECT_EQ(false, ptr->is_tensor());
EXPECT_EQ(true, ptr->is_weights());
EXPECT_TRUE(TrtShapedWeightsEquals(weights, ptr->weights()));
std::vector<int> empty_dims;
EXPECT_THAT(ptr->GetTrtDims(), DimsAreArray(empty_dims));
}
}
}
class ValidatorTest : public ::testing::Test {
public:
ValidatorTest() {}
Status ConvertToTensorOrWeights(const Scope& scope, const Node* node,
int output_port,
TRT_TensorOrWeights* tensor_or_weights) {
grappler::GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
TrtNodeValidator validator(graph_properties, TrtPrecisionMode::FP32,
false,
true,
false);
return validator.ConvertToTensorOrWeights(node->def(), output_port,
tensor_or_weights);
}
};
TEST_F(ValidatorTest, ConvertToTensorOrWeights) {
{
Scope s = Scope::NewRootScope();
auto node =
ops::Const(s.WithOpName("my_const"), {1.0f, 2.0f}, TensorShape({2}));
TRT_TensorOrWeights output;
EXPECT_THAT(ConvertToTensorOrWeights(s, node.op().node(),
0, &output),
IsOk());
ValidateWeights<float>(output.weights(), {2}, {1.0, 2.0});
}
auto convert_to_tensor_or_weights = [this](const std::vector<int64_t>& dims,
TRT_TensorOrWeights* output) {
Scope s = Scope::NewRootScope();
const auto attrs = ops::Placeholder::Shape(PartialTensorShape{dims});
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT, attrs);
auto add = ops::Add(s.WithOpName("add"), feed, feed);
return this->ConvertToTensorOrWeights(s, add.operation.node(),
0, output);
};
{
TRT_TensorOrWeights output;
EXPECT_THAT(
convert_to_tensor_or_weights(
std::vector<int64_t>(nvinfer1::Dims::MAX_DIMS + 2, 1), &output),
StatusIs(absl::StatusCode::kOutOfRange,
HasSubstr("Input tensor rank is greater than 9")));
}
{
TRT_TensorOrWeights output;
EXPECT_THAT(convert_to_tensor_or_weights({}, &output),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Scalar input tensor is not supported since "
"the first dimension "
"is treated as batch dimension by TRT")));
}
for (const int32 non_batch_dim : {-1, 2}) {
const int32 batch_size = 12;
TRT_TensorOrWeights output;
EXPECT_THAT(
convert_to_tensor_or_weights({batch_size, non_batch_dim}, &output),
IsOk());
ASSERT_TRUE(output.is_tensor());
EXPECT_EQ(batch_size, output.batch_size());
EXPECT_NE(nullptr, output.tensor()->simple_tensor());
EXPECT_THAT(output.GetTrtDims(), DimsAreArray({non_batch_dim}));
}
}
TEST_F(ValidatorTest, IsTensorRTCandidate_Basics) {
Scope s = Scope::NewRootScope();
auto input =
ops::Const(s.WithOpName("const"), {1.0f, 2.0f}, TensorShape({2}));
auto add = ops::Add(s.WithOpName("add"), input, input);
const Node* add_node = add.operation.node();
grappler::GrapplerItem item;
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
TrtNodeValidator validator(graph_properties, TrtPrecisionMode::FP32,
false,
true,
false);
bool start_conversion = false;
bool should_fail = false;
auto op_converter = [&start_conversion, &should_fail](
const OpConverterParams* params) -> Status {
if (should_fail) return errors::InvalidArgument("");
if (!params->validation_only) start_conversion = true;
return OkStatus();
};
auto original_op_converter = GetOpConverterRegistry()->LookUp("Add");
ASSERT_TRUE(original_op_converter.ok());
GetOpConverterRegistry()->Clear("Add");
EXPECT_THAT(validator.IsTensorRTCandidate(add_node),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("Op type Add is not supported.")));
GetOpConverterRegistry()->Register("Add", kDefaultConverterPriority + 1,
op_converter);
TF_EXPECT_OK(validator.IsTensorRTCandidate(add_node));
EXPECT_EQ(false, start_conversion);
should_fail = true;
EXPECT_THAT(validator.IsTensorRTCandidate(add_node),
StatusIs(absl::StatusCode::kInvalidArgument));
GetOpConverterRegistry()->Clear("Add");
GetOpConverterRegistry()->Register("Add", kDefaultConverterPriority,
*original_op_converter);
}
TEST(TrtNodeValidator, IsTensorRTCandidate) {
const std::vector<int32> input_shape_array{2, 2};
TensorShape input_shape;
TF_EXPECT_OK(TensorShapeUtils::MakeShape(input_shape_array, &input_shape));
Scope s = Scope::NewRootScope();
ops::Placeholder::Attrs feed_attrs;
TF_EXPECT_OK(
TensorShapeUtils::MakeShape(input_shape_array, &feed_attrs.shape_));
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT, feed_attrs);
auto const_1 = ops::Const(s.WithOpName("const_1"), 1.0f, input_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), feed, const_1);
ops::MatMul::Attrs matmul_attrs;
matmul_attrs.transpose_a_ = true;
auto incompatible_matmul = ops::MatMul(s.WithOpName("incompatible_matmul"),
feed, const_1, matmul_attrs);
auto unsupported_op = ops::Erfc(s.WithOpName("sin"), feed);
auto incompatible_feed = ops::Placeholder(s.WithOpName("feed"), DT_DOUBLE);
auto const_2 = ops::Const(s.WithOpName("const_2"), 1.0, input_shape);
auto matmul_with_incompatible_input =
ops::MatMul(s.WithOpName("matmul_with_incompatible_input"),
incompatible_feed, const_2);
auto quantize_attrs = ops::FakeQuantWithMinMaxArgs::Min(-6.0f).Max(6.0f);
auto quantize = ops::FakeQuantWithMinMaxArgs(s.WithOpName("quantize"), feed,
quantize_attrs);
grappler::GrapplerItem item;
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
Tensor feed_tensor(DT_FLOAT, input_shape);
item.feed.push_back(std::make_pair("feed", feed_tensor));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
for (const TrtPrecisionMode precision_mode :
{TrtPrecisionMode::FP32, TrtPrecisionMode::INT8}) {
TrtNodeValidator validator(graph_properties, precision_mode,
false,
true,
false);
TF_EXPECT_OK(validator.IsTensorRTCandidate(matmul.operation.node()));
EXPECT_THAT(
validator.IsTensorRTCandidate(incompatible_matmul.operation.node()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("MatMul with 2D tensors requires explicit batch "
"mode, or that tensor A "
"is not transposed and B is a constant tensor.")));
EXPECT_THAT(validator.IsTensorRTCandidate(unsupported_op.operation.node()),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("Op type Erfc is not supported")));
EXPECT_THAT(validator.IsTensorRTCandidate(
matmul_with_incompatible_input.operation.node()),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to convert at least one input to a "
"TRT_TensorOrWeights:")));
if (precision_mode == TrtPrecisionMode::INT8) {
TF_EXPECT_OK(validator.IsTensorRTCandidate(quantize.operation.node()));
} else {
EXPECT_THAT(
validator.IsTensorRTCandidate(quantize.operation.node()),
StatusIs(
absl::StatusCode::kUnimplemented,
HasSubstr("Op type FakeQuantWithMinMaxArgs is not supported")));
}
}
}
class ConverterTest : public ::testing::Test {
public:
ConverterTest() { Reset(); }
void Reset() {
GetOpConverterRegistry()->Clear("MyOp");
GetOpConverterRegistry()->Clear("DummyOp");
converter_ =
std::move(Converter::Create(TrtPrecisionMode::FP32,
false, &logger_,
true,
"TRTEngineOp_000_000",
false)
.value());
weight_store_ = &converter_->weight_store_;
}
Status MaybeUpdateBatchSize(int batch_size) {
return converter_->MaybeUpdateBatchSize(batch_size);
}
Status AddTensorOrWeights(const string& name, TRT_TensorOrWeights input) {
return converter_->AddTensorOrWeights(name, input);
}
Status GetTensorOrWeights(const string& name, TRT_TensorOrWeights* output) {
return converter_->GetTensorOrWeights(name, output);
}
Status GetInputs(const NodeDef& node_def,
std::vector<TRT_TensorOrWeights>* inputs) const {
return converter_->GetInputs(node_def, inputs);
}
Status GetWeightRange(const TRT_ShapedWeights& weights, float* out_min,
float* out_max) const {
return converter_->GetWeightRange(weights, out_min, out_max);
}
int batch_size() const { return converter_->batch_size_; }
std::unordered_map<ITensorProxyPtr*, float>& quantization_ranges_proxy() {
return converter_->quantization_ranges_proxy_;
}
std::unordered_map<nvinfer1::ITensor*, float>& quantization_ranges() {
return converter_->quantization_ranges_;
}
private:
Logger& logger_ = *Logger::GetLogger();
protected:
std::unique_ptr<Converter> converter_;
TrtWeightStore* weight_store_;
};
TEST_F(ConverterTest, ConvertNode) {
ITensorProxyPtr output_tensors[2];
auto op_converter =
[&output_tensors](const OpConverterParams* params) -> Status {
nvinfer1::Dims dims = params->inputs[0].tensor()->getDimensions();
for (int i = 0; i < 2; ++i) {
dims.d[0] += 1;
output_tensors[i]->setDimensions(dims);
params->outputs->push_back(TRT_TensorOrWeights(output_tensors[i]));
}
return OkStatus();
};
NodeDef node_def = MakeNodeDef("my_op", "MyOp", {"my_input"});
TF_ASSERT_OK(converter_->AddInputTensor(
"my_input", nvinfer1::DataType::kFLOAT, CreateDims({123}), 1));
EXPECT_THAT(converter_->ConvertNode(node_def),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("No converter for op MyOp")));
GetOpConverterRegistry()->Register("MyOp", kDefaultConverterPriority,
op_converter);
TF_ASSERT_OK(converter_->ConvertNode(node_def));
TRT_TensorOrWeights actual_output_1;
TF_EXPECT_OK(GetTensorOrWeights("my_op", &actual_output_1));
EXPECT_EQ(output_tensors[0]->simple_tensor(),
actual_output_1.tensor()->simple_tensor());
EXPECT_EQ(124, actual_output_1.tensor()->getDimensions().d[0]);
TRT_TensorOrWeights actual_output_2;
TF_EXPECT_OK(GetTensorOrWeights("my_op:1", &actual_output_2));
EXPECT_EQ(output_tensors[1]->simple_tensor(),
actual_output_2.tensor()->simple_tensor());
EXPECT_EQ(125, actual_output_2.tensor()->getDimensions().d[0]);
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, AddAndGetInputs) {
NodeDef node_def;
node_def.add_input("^control_input");
node_def.add_input("input");
node_def.add_input("input:0");
node_def.add_input("input:1");
node_def.add_input("weird_input:2:3:4:0");
TF_EXPECT_OK(converter_->AddInputTensor("input", nvinfer1::DataType::kFLOAT,
CreateDims({1}), 1));
TF_EXPECT_OK(converter_->AddInputTensor("input:1", nvinfer1::DataType::kINT32,
CreateDims({2, 3}), 1));
TF_EXPECT_OK(converter_->AddInputTensor(
"weird_input:2:3:4", nvinfer1::DataType::kHALF, CreateDims({5, 3}), 1));
std::vector<TRT_TensorOrWeights> inputs;
TF_EXPECT_OK(GetInputs(node_def, &inputs));
EXPECT_EQ(4, inputs.size());
EXPECT_EQ(inputs[0].tensor()->trt_tensor(), inputs[1].tensor()->trt_tensor());
EXPECT_EQ(nvinfer1::DataType::kFLOAT, inputs[0].tensor()->getType());
EXPECT_EQ(nvinfer1::DataType::kINT32, inputs[2].tensor()->getType());
EXPECT_EQ(nvinfer1::DataType::kHALF, inputs[3].tensor()->getType());
EXPECT_THAT(inputs[0].tensor()->getDimensions(), DimsAreArray({1}));
EXPECT_THAT(inputs[2].tensor()->getDimensions(), DimsAreArray({2, 3}));
EXPECT_THAT(inputs[3].tensor()->getDimensions(), DimsAreArray({5, 3}));
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, RenameAndMarkOutputTensors) {
std::vector<ITensorProxyPtr> output_tensors;
auto op_converter =
[&output_tensors](const OpConverterParams* params) -> Status {
nvinfer1::Permutation perm;
perm.order[0] = 1;
perm.order[1] = 0;
for (int i = 0; i < 2; ++i) {
ITensorProxyPtr input_tensor = params->inputs[0].tensor();
nvinfer1::IShuffleLayer* layer =
params->converter->network()->addShuffle(*input_tensor->trt_tensor());
layer->setFirstTranspose(perm);
ITensorProxyPtr output_tensor = layer->getOutput(0);
params->outputs->emplace_back(output_tensor);
output_tensors.push_back(output_tensor);
}
TRT_ShapedWeights output_weights(nvinfer1::DataType::kFLOAT);
params->outputs->emplace_back(output_weights);
return OkStatus();
};
GetOpConverterRegistry()->Register("MyOp", kDefaultConverterPriority,
op_converter);
NodeDef node_def = MakeNodeDef("my_op", "MyOp", {"my_input"});
TF_EXPECT_OK(converter_->AddInputTensor(
"my_input", nvinfer1::DataType::kFLOAT, CreateDims({1, 2}), 1));
TF_EXPECT_OK(converter_->ConvertNode(node_def));
EXPECT_THAT(
converter_->RenameAndMarkOutputTensors({{"my_op:2", "my_output"}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Output my_op:2 is weights not tensor")));
TF_EXPECT_OK(converter_->RenameAndMarkOutputTensors(
{{"my_op", "my_output"}, {"my_op:1", "my_output_1"}}));
EXPECT_EQ(2, output_tensors.size());
for (auto output_tensor : output_tensors) {
EXPECT_THAT(output_tensor->getDimensions(), DimsAreArray({2, 1}));
}
EXPECT_EQ("my_output", string(output_tensors[0]->getName()));
EXPECT_EQ("my_output_1", string(output_tensors[1]->getName()));
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, TransposeTensor) {
ITensorProxyPtr input_tensor = converter_->network()->addInput(
"", nvinfer1::DataType::kFLOAT, CreateDims({2, 3, 5}));
ITensorProxyPtr output_tensor = nullptr;
NodeDef dummy_node_def = MakeNodeDef("dummy_op", "DummyOp", {});
EXPECT_THAT(converter_->TransposeTensor(input_tensor, {0, 1}, &output_tensor,
dummy_node_def, "sub1"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Rank of perm for transpose does not match "
"with that of the input")));
EXPECT_THAT(
converter_->TransposeTensor(input_tensor, {1, 0, 2, 3}, &output_tensor,
dummy_node_def, "sub2"),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("Transpose at batch dimension is not supported.")));
TF_EXPECT_OK(converter_->TransposeTensor(
input_tensor, {0, 3, 1, 2}, &output_tensor, dummy_node_def, "sub3"));
EXPECT_THAT(output_tensor->getDimensions(), DimsAreArray({5, 2, 3}));
EXPECT_THAT(
converter_->network(),
LayerNamesAreArray({"TRTEngineOp_000_000/dummy_op-sub3:SHUFFLE"}));
}
void TestPrepareTensorForShape(
const std::vector<int>& input_dims, const std::vector<int>& reshape_dims,
const std::vector<int>& expected_tensor_dims, bool input_is_tensor,
Converter* converter, TrtWeightStore* weight_store,
absl::StatusCode expected_code = absl::StatusCode::kOk,
const char* expected_error_msg_substr = nullptr) {
TRT_TensorOrWeights input;
if (input_is_tensor) {
input = TRT_TensorOrWeights(converter->network()->addInput(
"", nvinfer1::DataType::kFLOAT, CreateDims(input_dims)));
} else {
input = TRT_TensorOrWeights(
weight_store
->GetTempWeights(nvinfer1::DataType::kFLOAT, CreateDims(input_dims))
.value());
}
ITensorProxyPtr output_tensor = nullptr;
NodeDef dummy_node_def = MakeNodeDef("dummy_op", "DummyOp", {});
for (bool validation_only : {false, true}) {
const Status status =
PrepareTensorForShape(converter, input, DimsAdapter(reshape_dims),
validation_only, &output_tensor, dummy_node_def);
if (expected_code == absl::StatusCode::kOk) {
TF_EXPECT_OK(status);
if (validation_only) {
EXPECT_EQ(nullptr, *output_tensor);
} else {
EXPECT_THAT(output_tensor->getDimensions(),
DimsAreArray(expected_tensor_dims));
}
} else {
EXPECT_THAT(status, StatusIs(expected_code,
HasSubstr(expected_error_msg_substr)));
}
}
}
TEST_F(ConverterTest, PrepareTensorForShape) {
for (bool input_is_tensor : {true, false}) {
Reset();
TestPrepareTensorForShape({2, 3, 5}, {2, 3, 6}, {}, input_is_tensor,
converter_.get(), weight_store_,
absl::StatusCode::kInvalidArgument,
"Incompatible shapes");
Reset();
TestPrepareTensorForShape({2, 3, 5}, {10, 3}, {10, 3}, input_is_tensor,
converter_.get(), weight_store_);
Reset();
TestPrepareTensorForShape({1, 1}, {}, {}, input_is_tensor, converter_.get(),
weight_store_);
}
Reset();
TestPrepareTensorForShape({}, {1, 1}, {1, 1}, true,
converter_.get(), weight_store_);
Reset();
TestPrepareTensorForShape({2, 3, 5}, {-1, 2}, {15, 2},
true, converter_.get(),
weight_store_);
Reset();
TestPrepareTensorForShape({2, 3, 5}, {-1, 2}, {15, 2},
false, converter_.get(),
weight_store_, absl::StatusCode::kInvalidArgument,
"Shape is not fully defined");
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, MaybeUpdateBatchSize) {
EXPECT_EQ(-1, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(-1));
EXPECT_EQ(-1, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(123));
EXPECT_EQ(123, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(123));
EXPECT_EQ(123, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(-1));
EXPECT_EQ(123, batch_size());
EXPECT_THAT(
MaybeUpdateBatchSize(124),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(
"Provided batch size does not match converter batch size")));
}
TEST_F(ConverterTest, AddAndGetTensorOrWeights) {
ITensorProxyPtr simple_tensor;
TRT_TensorOrWeights tensor(simple_tensor);
EXPECT_EQ(-1, tensor.batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(123));
TF_EXPECT_OK(AddTensorOrWeights("my_tensor", tensor));
TRT_TensorOrWeights added_tensor;
TF_EXPECT_OK(GetTensorOrWeights("my_tensor", &added_tensor));
EXPECT_EQ(123, added_tensor.batch_size());
EXPECT_THAT(AddTensorOrWeights("my_tensor", tensor),
StatusIs(absl::StatusCode::kAlreadyExists,
HasSubstr("tensor/weights my_tensor already exist")));
}
template <typename T>
void TestGetWeightRange(ConverterTest* test, TrtWeightStore* weight_store) {
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(DataTy |
1,154 | cpp | tensorflow/tensorflow | convert_graph | tensorflow/compiler/tf2tensorrt/convert/convert_graph.cc | tensorflow/compiler/tf2tensorrt/convert/convert_graph_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_GRAPH_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_GRAPH_H_
#include <vector>
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/trt_optimization_pass.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace convert {
Status ConvertGraph(const TRTOptimizationPass::ConversionParams& params,
grappler::GrapplerItem& grappler_item,
const std::vector<string>& input_output_names,
grappler::Cluster* cluster, GraphDef* output);
std::pair<int, Allocator*> GetDeviceAndAllocator(
const grappler::Cluster* cluster, const EngineInfo& engine);
Status RegisterGraphToFunctionLibrary(const GraphDef& segment_graph_def,
Graph* graph, const string& engine_name);
Status CreateStaticEngine(const TRTOptimizationPass::ConversionParams& params,
const EngineInfo& info, int max_batch_size,
const std::vector<PartialTensorShape>& input_shapes,
TrtShapeOptimizationProfile* profile,
string* segment_string, grappler::Cluster* cluster);
}
}
}
#endif
#endif
#include "tensorflow/compiler/tf2tensorrt/convert/convert_graph.h"
#include <fstream>
#include <list>
#include <map>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/segment/segment.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
using absl::StrAppend;
using absl::StrCat;
using ::tensorflow::tensorrt::segment::ClusterProperty;
using ::tensorflow::tensorrt::segment::NodePtrCompare;
using ::tensorflow::tensorrt::segment::Segment;
namespace {
Status BuildNodeMap(const Graph& graph,
std::unordered_map<string, Node*>* node_map) {
for (auto* node : graph.op_nodes()) {
if (!node_map->insert({node->name(), node}).second) {
return errors::AlreadyExists("Node name is not unique in graph: " +
node->name());
}
}
return OkStatus();
}
EngineInfo::EngineType GetEngineType(
const TRTOptimizationPass::ConversionParams& params) {
return (params.is_dynamic_op || params.use_calibration)
? EngineInfo::EngineType::TRTDynamic
: EngineInfo::EngineType::TRTStatic;
}
bool AllowDynamicNonBatchDimension(
const TRTOptimizationPass::ConversionParams& params) {
return !params.use_implicit_batch ||
GetEngineType(params) == EngineInfo::EngineType::TRTDynamic;
}
struct EdgePtrCompare {
bool operator()(const Edge* lhs, const Edge* rhs) const {
return lhs->id() < rhs->id();
}
};
std::pair<TfDeviceId, PlatformDeviceId> GetFirstValidDeviceId() {
for (int tf_device_id_value = 0; tf_device_id_value < 100;
++tf_device_id_value) {
TfDeviceId tf_device_id(tf_device_id_value);
PlatformDeviceId platform_device_id;
Status s =
GpuIdManager::TfToPlatformDeviceId(tf_device_id, &platform_device_id);
if (s.ok()) {
VLOG(1) << "Found TF GPU " << tf_device_id.value() << " at cuda device "
<< platform_device_id.value();
return std::make_pair(tf_device_id, platform_device_id);
}
}
LOG(ERROR) << "Could not find any TF GPUs";
return std::make_pair(TfDeviceId(-1), PlatformDeviceId(-1));
}
bool ShallKeepControlEdgeFrom(const Node* input_node) {
if (!input_node) {
LOG(ERROR) << "Node pointer is null, this should not happen";
return false;
}
return input_node->type_string() != "Const";
}
Status GetEngineInfo(const Graph* g,
const grappler::GraphProperties& graph_properties,
const Segment& segment,
const std::vector<Node*>& reverse_topo_order,
EngineInfo* info) {
std::vector<const Node*> subgraph_nodes;
std::set<const Node*> added_const_nodes;
const ClusterProperty& segment_property = segment.property;
const std::set<const Node*, NodePtrCompare>& segment_nodes = segment.nodes;
const DeviceNameUtils::ParsedName segment_device =
segment_property.DeviceName();
info->max_batch_size = segment_property.BatchSize().GetOptionalMaxBatchSize();
std::unordered_map<string, int> input_to_engine_port, output_to_engine_port;
for (auto it = reverse_topo_order.rbegin(); it != reverse_topo_order.rend();
++it) {
const Node* node = *it;
if (segment_nodes.count(node) == 0) continue;
subgraph_nodes.push_back(node);
const int node_id = node->id();
const string& node_name = node->name();
std::vector<const Edge*> in_edges(node->in_edges().begin(),
node->in_edges().end());
std::sort(in_edges.begin(), in_edges.end(), EdgePtrCompare());
for (const auto edge : in_edges) {
auto input_node = edge->src();
if (input_node->IsSource() || segment_nodes.count(input_node)) {
continue;
}
if (edge->IsControlEdge()) {
if (ShallKeepControlEdgeFrom(input_node)) {
info->connections.emplace_back(input_node->name(), input_node->id(),
node_name, node_id,
true);
}
} else if (input_node->type_string() == "Const") {
if (!added_const_nodes.insert(input_node).second) {
continue;
}
VLOG(1) << "Adding const node " << input_node->name();
} else {
int port = Graph::kControlSlot - 1;
const string s = StrCat(input_node->name(), ":", edge->src_output());
VLOG(1) << "Input edge = " << s;
if (input_to_engine_port.count(s)) {
port = input_to_engine_port.at(s);
} else {
port = input_to_engine_port.size();
input_to_engine_port.insert({s, port});
}
info->connections.emplace_back(
input_node->name(), input_node->id(), edge->src_output(), node_name,
node_id, edge->dst_input(), true, port);
}
}
std::vector<const Edge*> out_edges(node->out_edges().begin(),
node->out_edges().end());
std::sort(out_edges.begin(), out_edges.end(), EdgePtrCompare());
for (const auto edge : out_edges) {
auto output_node = edge->dst();
if (output_node->IsSink() || segment_nodes.count(output_node)) {
continue;
}
if (edge->IsControlEdge()) {
if (ShallKeepControlEdgeFrom(node)) {
info->connections.emplace_back(output_node->name(), output_node->id(),
node_name, node_id,
false);
}
} else {
int port = Graph::kControlSlot - 1;
const string s = StrCat(node_name, ":", edge->src_output());
VLOG(1) << "Output edge = " << s;
if (output_to_engine_port.count(s)) {
port = output_to_engine_port.at(s);
} else {
port = output_to_engine_port.size();
output_to_engine_port.insert({s, port});
}
info->connections.emplace_back(
output_node->name(), output_node->id(), edge->dst_input(),
node_name, node_id, edge->src_output(), false, port);
}
}
}
subgraph_nodes.insert(subgraph_nodes.begin(), added_const_nodes.begin(),
added_const_nodes.end());
TF_RETURN_IF_ERROR(
ConvertSegmentToGraphDef(g, graph_properties, subgraph_nodes, info));
VLOG(1) << "Converted TensorRT candidate segment '" << info->engine_name
<< "' to a GraphDef";
if (segment_device.has_type) {
if (segment_device.type != "GPU") {
return errors::Internal(
"segment device is not GPU: ",
DeviceNameUtils::ParsedNameToString(segment_device));
}
info->device = DeviceNameUtils::ParsedNameToString(segment_device);
} else {
TfDeviceId tf_device_id;
PlatformDeviceId platform_device_id;
std::tie(tf_device_id, platform_device_id) = GetFirstValidDeviceId();
if (tf_device_id.value() >= 0) {
DeviceNameUtils::ParsedName parsed_name;
parsed_name.type = "GPU";
parsed_name.has_type = true;
parsed_name.id = tf_device_id.value();
parsed_name.has_id = true;
info->device = DeviceNameUtils::ParsedNameToString(parsed_name);
} else {
VLOG(1) << "No device is assigned to the segment. A device will be "
"assigned during graph execution (inference).";
}
}
return OkStatus();
}
void UpdateToEngineNode(const std::vector<EngineInfo>& infos,
const size_t my_engine_id,
const std::vector<Node*>& engine_nodes,
const bool is_input_edge, const string& node_name,
Node** node, int* port) {
for (size_t t = 0; t < infos.size(); ++t) {
if (t == my_engine_id) {
continue;
}
const auto& info = infos.at(t);
for (const auto& eng_conn : info.connections) {
if (is_input_edge == eng_conn.is_input_edge) continue;
if (eng_conn.inside_node_name == node_name &&
eng_conn.inside_port == *port) {
*node = CHECK_NOTNULL(engine_nodes[t]);
QCHECK_EQ(info.engine_name, (**node).name())
<< "Engine name mismatch: " << info.engine_name << " vs "
<< (**node).name();
*port = eng_conn.port_number;
return;
}
}
}
LOG(FATAL) << "Node " << node_name << " not found in any engine.";
}
tensorflow::TensorShapeProto ComputeTRTNodeIOShape(
std::vector<PartialTensorShape>& partial_tensorshape_vect,
std::vector<tensorflow::TensorShapeProto>& shape_proto_vect,
const PartialTensorShape& conn_shape, int port_number) {
tensorflow::TensorShapeProto tmp_shape_proto;
conn_shape.AsProto(&tmp_shape_proto);
if (partial_tensorshape_vect.size() <= port_number) {
shape_proto_vect.resize(port_number + 1);
partial_tensorshape_vect.resize(port_number + 1);
}
return tmp_shape_proto;
}
Status CreateTRTNode(const TRTOptimizationPass::ConversionParams& params,
const std::vector<EngineInfo>& infos, int pos,
int default_max_batch_size, Graph* graph,
std::vector<Node*>* engine_nodes,
grappler::Cluster* cluster) {
const auto& info = infos.at(pos);
std::vector<tensorflow::TensorShapeProto> input_shape_protos;
std::vector<tensorflow::TensorShapeProto> output_shape_protos;
std::vector<PartialTensorShape> input_shapes;
std::vector<PartialTensorShape> output_shapes;
std::vector<NodeDefBuilder::NodeOut> inputs;
std::vector<Node*> input_nodes;
std::vector<Node*> control_input_nodes;
std::unordered_set<string> control_input_names;
std::vector<DataType> out_types;
VLOG(1) << "Processing " << info.engine_name;
for (const auto& conn : info.connections) {
if (conn.is_control_edge()) {
if (!conn.is_input_edge) continue;
Node* input_node = graph->FindNodeId(conn.outside_id);
int port = Graph::kControlSlot;
if (!input_node) {
UpdateToEngineNode(infos, pos, *engine_nodes, true,
conn.outside_node_name, &input_node, &port);
QCHECK_EQ(Graph::kControlSlot, port);
}
if (!control_input_names.insert(input_node->name()).second) {
continue;
}
control_input_nodes.push_back(input_node);
VLOG(1) << "Engine Control Input " << input_node->name() << " -> "
<< info.engine_name;
} else {
if (!conn.is_input_edge) {
tensorflow::TensorShapeProto out_shape = ComputeTRTNodeIOShape(
output_shapes,
output_shape_protos,
conn.inside_shape,
conn.port_number);
output_shape_protos.at(conn.port_number) = out_shape;
output_shapes.at(conn.port_number) = conn.inside_shape;
if (out_types.size() <= conn.port_number) {
out_types.resize(conn.port_number + 1);
}
out_types.at(conn.port_number) = conn.connection_type;
VLOG(2) << "Collected output shape "
<< output_shape_protos.at(conn.port_number).DebugString();
} else {
tensorflow::TensorShapeProto in_shape = ComputeTRTNodeIOShape(
input_shapes,
input_shape_protos,
conn.outside_shape,
conn.port_number);
input_shape_protos.at(conn.port_number) = in_shape;
input_shapes.at(conn.port_number) = conn.outside_shape;
if (params.use_implicit_batch &&
info.engine_type == EngineInfo::EngineType::TRTStatic) {
for (int i = 1; i < conn.outside_shape.dims(); i++) {
if (conn.outside_shape.dim_size(i) <= 0) {
return errors::Internal(
"Not fully defined input shape when in static mode which "
"should have been excluded by the segmenter. ");
}
}
}
Node* input_node = graph->FindNodeId(conn.outside_id);
int port = conn.outside_port;
if (!input_node) {
UpdateToEngineNode(infos, pos, *engine_nodes, true,
conn.outside_node_name, &input_node, &port);
}
if (std::find_if(
std::begin(inputs), std::end(inputs),
[input_node, &port](const NodeDefBuilder::NodeOut& inp) {
return inp.node == input_node->name() && inp.index == port;
}) == std::end(inputs)) {
inputs.emplace_back(input_node->name(), port, conn.connection_type);
input_nodes.push_back(CHECK_NOTNULL(input_node));
VLOG(1) << "Engine Input " << input_node->name() << ":" << port
<< " -> " << info.engine_name << ":" << inputs.size() - 1;
}
}
}
}
if (inputs.empty()) {
return errors::Internal(
"Segment has no inputs (possible constfold failure)");
}
string segment_string;
int max_batch_size = info.max_batch_size.has_value()
? info.max_batch_size.value()
: default_max_batch_size;
if (info.engine_type == EngineInfo::EngineType::TRTStatic) {
TF_RETURN_IF_ERROR(CreateStaticEngine(params, info, max_batch_size,
input_shapes, nullptr,
&segment_string, cluster));
}
string prec_string;
TF_RETURN_IF_ERROR(TrtPrecisionModeToName(info.precision_mode, &prec_string));
NodeDefBuilder node_builder(info.engine_name, "TRTEngineOp");
if (!info.device.empty()) node_builder.Device(info.device);
if (VLOG_IS_ON(1)) {
string ins = StrCat(info.engine_name, " inputs= ");
for (const auto& ii : inputs) {
StrAppend(&ins, ii.node, ":", ii.index, " ");
}
VLOG(1) << ins;
}
node_builder.Input(inputs);
for (const string& c : control_input_names) {
node_builder.ControlInput(c);
}
NodeDef trt_node;
NameAttrList function;
function.set_name(StrCat(info.engine_name, "_native_segment"));
node_builder.Attr("input_shapes", input_shape_protos)
.Attr("output_shapes", output_shape_protos)
.Attr("static_engine",
info.engine_type == EngineInfo::EngineType::TRTStatic)
.Attr("segment_func", function)
.Attr("serialized_segment", segment_string)
.Attr("calibration_data", "")
.Attr("max_cached_engines_count", info.maximum_cached_engines)
.Attr("workspace_size_bytes", info.max_workspace_size_bytes)
.Attr("max_batch_size", max_batch_size)
.Attr("precision_mode", prec_string)
.Attr("use_calibration", info.use_calibration)
.Attr("_use_implicit_batch", params.use_implicit_batch)
.Attr("use_explicit_precision", params.use_explicit_precision)
.Attr("_allow_build_at_runtime", info.allow_build_at_runtime)
.Attr("OutT", out_types);
if (!params.use_implicit_batch) {
node_builder.Attr("profile_strategy",
ProfileStrategyToName(params.profile_strategy));
}
Status status = node_builder.Finalize(&trt_node);
if (!status.ok()) {
LOG(ERROR) << "Node construction failed with" << status;
return status;
}
VLOG(1) << "Adding TRTEngine " << info.engine_name << " to graph";
TF_ASSIGN_OR_RETURN(Node * engine_node, graph->AddNode(trt_node));
(*engine_nodes)[pos] = engine_node;
for (const auto in : control_input_nodes) {
VLOG(1) << "Connecting control edge from " << in->name() << " to "
<< engine_node->name();
graph->AddControlEdge(in, engine_node);
}
VLOG(1) << "input_nodes size = " << input_nodes.size();
for (int i = 0; i < input_nodes.size(); ++i) {
Node* n = CHECK_NOTNULL(input_nodes[i]);
const auto& in = inputs[i];
VLOG(1) << "Connecting data edge from " << n->name() << ":" << in.index
<< " to " << engine_node->name() << ":" << i;
graph->AddEdge(n, in.index, engine_node, i);
}
for (auto& conn : info.connections) {
if (conn.is_input_edge) {
continue;
}
Node* output_node = graph->FindNodeId(conn.outside_id);
int port = conn.outside_port;
if (!output_node) {
UpdateToEngineNode(infos, pos, *engine_nodes, false,
conn.outside_node_name, &output_node, &port);
}
if (conn.is_control_edge()) {
VLOG(1) << "Updating control edge from " << engine_node->name() << " to "
<< output_node->name();
QCHECK_EQ(Graph::kControlSlot, port);
graph->AddControlEdge(engine_node, output_node);
} else {
VLOG(1) << "Updating data edge from " << engine_node->name() << ":"
<< conn.port_number << " to " << output_node->name() << ":"
<< port;
TF_CHECK_OK(
graph->UpdateEdge(engine_node, conn.port_number, output_node, port));
}
}
return OkStatus();
}
int64 GetNextGraphSequenceNumber() {
static std::atomic<int64_t> graph_sequence_num;
return graph_sequence_num++;
}
constexpr char kCastInputTypeAttrName[] = "SrcT";
Status MaybeRewriteCastToFp32(GraphDef* graph_def, NodeDef* node_def) {
if (node_def->op() != "Cast") {
return OkStatus();
}
DataTypeVector input_types;
DataTypeVector output_types;
TF_RETURN_IF_ERROR(
graph_transforms::GetInOutTypes(*node_def, &input_types, &output_types));
if (input_types.size() != 1 || output_types.size() != 1) {
return errors::Internal("Bad cast operation");
}
if (input_types[0] == DT_HALF || output_types[0] != DT_FLOAT) {
return OkStatus();
}
VLOG(2) << "Rewriting cast to FP32 " << node_def->DebugString();
NodeDef* castToFp16 = graph_def->add_node();
for (auto attr_value : node_def->attr()) {
(*castToFp16->mutable_attr())[attr_value.first] = attr_value.second;
}
castToFp16->set_name(node_def->name() + "_split");
castToFp16->set_op("Cast");
castToFp16->set_device(node_def->device());
castToFp16->add_input(node_def->input(0));
(*castToFp16->mutable_attr())[kCastOutputTypeAttrName].set_type(DT_HALF);
node_def->set_input(0, castToFp16->name() + ":0");
(*node_def->mutable_attr())[kCastInputTypeAttrName].set_type(DT_HALF);
VLOG(2) << castToFp16->DebugString();
VLOG(2) << node_def->DebugString();
return OkStatus();
}
}
Status RegisterGraphToFunctionLibrary(const GraphDef& segment_graph_def,
Graph* graph, const string& engine_name) {
Graph segment_graph(graph->flib_def());
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(GraphConstructorOptions(),
segment_graph_def, &segment_graph));
FunctionDefLibrary library;
auto segment_func = library.add_function();
TF_RETURN_IF_ERROR(GraphToFunctionDef(
segment_graph, StrCat(engine_name, "_native_segment"), segment_func));
if (VLOG_IS_ON(7)) {
VLOG(7) << engine_name << " Function_Def ";
VLOG(7) << segment_func->DebugString();
}
VLOG(1) << "Adding funcdef " << segment_func->signature().name()
<< " to graphlib";
TF_RETURN_IF_ERROR(graph->AddFunctionLibrary(library));
return OkStatus();
}
std::pair<int, Allocator*> GetDeviceAndAllocator(
const grappler::Cluster* cluster, const EngineInfo& engine) {
int cuda_device_id = -1;
Allocator* dev_allocator = nullptr;
if (cluster == nullptr || cluster->GetDeviceSet() == nullptr ||
engine.device.empty()) {
TfDeviceId tf_device_id;
PlatformDeviceId platform_device_id;
std::tie(tf_device_id, platform_device_id) = GetFirstValidDeviceId();
cuda_device_id = platform_device_id.value();
if (cuda_device_id >= 0) {
GPUOptions gpu_options;
dev_allocator = GPUProcessState::singleton()->GetGPUAllocator(
gpu_options, tf_device_id, 1, {});
}
return std::make_pair(cuda_device_id, dev_allocator);
}
auto device_set = cluster->GetDeviceSet();
std::vector<Device*> devices;
DeviceNameUtils::ParsedName parsed_name;
if (DeviceNameUtils::ParseFullName(engine.device, &parsed_name) &&
parsed_name.has_id) {
device_set->FindMatchingDevices(parsed_name, &devices);
}
if (!devices.empty()) {
if (devices.size() > 1) {
string msg = "Found multiple matching devices using name '";
StrAppend(&msg, engine.device, "': ");
for (auto d : devices) StrAppend(&msg, d->name(), ", ");
StrAppend(&msg, ". Will get the allocator from first one.");
LOG_WARNING_WITH_PREFIX << msg;
}
AllocatorAttributes alloc_attr;
cuda_device_id = devices[0]->tensorflow_accelerator_device_info()->gpu_id;
dev_allocator = devices[0]->GetAllocator(alloc_attr);
VLOG(1) << "Using allocator " << dev_allocator->Name()
<< " and cuda_device_id " << cuda_device_id;
} else {
LOG_WARNING_WITH_PREFIX << "Cluster is set but device '" << engine.device
<< "' is not found in the cluster";
}
return std::make_pair(cuda_device_id, dev_allocator);
}
Status CreateStaticEngine(const TRTOptimizationPass::ConversionParams& params,
const EngineInfo& info, int max_batch_size,
const std::vector<PartialTensorShape>& input_shapes,
TrtShapeOptimizationProfile* profile,
string* segment_string, grappler::Cluster* cluster) {
std::pair<int, Allocator*> device_allocator =
GetDeviceAndAllocator(cluster, info);
int cuda_device_id = 0;
std::unique_ptr<TRTBaseAllocator> trt_allocator;
if (device_allocator.first >= 0) {
cuda_device_id = device_allocator.first;
trt_allocator.reset(new TRTDeviceAllocator(device_allocator.second));
} else {
LOG_WARNING_WITH_PREFIX << "Can't identify the | #include "tensorflow/compiler/tf2tensorrt/convert/convert_graph.h"
#include <regex>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace convert {
class FakeCluster : public grappler::Cluster {
public:
FakeCluster() : Cluster(0) {}
void SetDeviceSet(const DeviceSet* device_set) { device_set_ = device_set; }
const DeviceSet* GetDeviceSet() const override { return device_set_; }
string type() const override { return ""; }
Status Provision() override { return OkStatus(); }
Status Initialize(const grappler::GrapplerItem& item) override {
return OkStatus();
}
Status Run(const GraphDef& graph_def,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* metadata) override {
return OkStatus();
}
private:
const DeviceSet* device_set_ = nullptr;
};
TEST(GetDeviceAndAllocatorTest, GetDeviceAndAllocator) {
TRTOptimizationPass::ConversionParams params;
EngineInfo engine_info;
{
auto result = GetDeviceAndAllocator(nullptr, engine_info);
EXPECT_EQ(-1, result.first);
EXPECT_EQ(nullptr, result.second);
}
SessionOptions options;
ConfigProto* config = &options.config;
GPUOptions* gpu_options = config->mutable_gpu_options();
auto virtual_devices =
gpu_options->mutable_experimental()->add_virtual_devices();
virtual_devices->add_memory_limit_mb(200);
virtual_devices->add_memory_limit_mb(200);
std::unique_ptr<Session> session(NewSession(options));
{
auto result = GetDeviceAndAllocator(nullptr, engine_info);
EXPECT_EQ(0, result.first);
EXPECT_NE(nullptr, result.second);
EXPECT_EQ("GPU_0_bfc", result.second->Name());
}
FakeCluster cluster;
{
auto result = GetDeviceAndAllocator(&cluster, engine_info);
EXPECT_EQ(0, result.first);
EXPECT_NE(nullptr, result.second);
EXPECT_EQ("GPU_0_bfc", result.second->Name());
}
DeviceSet device_set;
const DeviceMgr* device_mgr = nullptr;
TF_ASSERT_OK(session->LocalDeviceManager(&device_mgr));
for (auto d : device_mgr->ListDevices()) {
device_set.AddDevice(d);
}
cluster.SetDeviceSet(&device_set);
{
auto result = GetDeviceAndAllocator(&cluster, engine_info);
EXPECT_EQ(0, result.first);
EXPECT_NE(nullptr, result.second);
EXPECT_EQ("GPU_0_bfc", result.second->Name());
}
engine_info.device = "/GPU:1";
{
auto result = GetDeviceAndAllocator(&cluster, engine_info);
EXPECT_EQ(0, result.first);
EXPECT_NE(nullptr, result.second);
EXPECT_EQ("GPU_1_bfc", result.second->Name());
}
engine_info.device = "/GPU:3";
{
auto result = GetDeviceAndAllocator(&cluster, engine_info);
EXPECT_EQ(-1, result.first);
EXPECT_EQ(nullptr, result.second);
}
}
class ConvertGraphTest : public ::testing::Test {
public:
Status RunConvertGraph(Scope s, GraphDef* output_graph_def,
int maximum_batch_size = 1000) {
grappler::GrapplerItem item;
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
const std::vector<string> input_output_names{"output"};
TRTOptimizationPass::ConversionParams params;
params.max_batch_size = maximum_batch_size;
params.max_workspace_size_bytes = 8 << 20;
params.minimum_segment_size = 1;
params.use_calibration = false;
params.trt_logger_name = "DefaultLogger";
return ConvertGraph(params, item, input_output_names, nullptr,
output_graph_def);
}
};
TEST_F(ConvertGraphTest, DirectlyConnectedEngines) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({2, 1}));
auto segment_root_1 = ops::Identity(s.WithOpName("segment_root_b"), input);
auto add1 = ops::Add(s.WithOpName("add1"), segment_root_1, segment_root_1);
auto incompatible =
ops::Reshape(s.WithOpName("reshape1"), add1, Input({1, 2}));
incompatible =
ops::Reshape(s.WithOpName("reshape2"), incompatible, Input({2, 1}));
auto add2 = ops::Add(s.WithOpName("add2"), incompatible, add1);
auto segment_root_2 = ops::Identity(s.WithOpName("segment_root_a"), add1);
auto add3 = ops::Add(s.WithOpName("add3"), add2, segment_root_2);
ops::Identity(s.WithOpName("output"), add3);
GraphDef output_graph_def;
TF_EXPECT_OK(RunConvertGraph(s, &output_graph_def));
auto remove_graph_sequence_number = [](std::string node_name) {
const std::regex pattern("TRTEngineOp_[0-9]+_");
return std::regex_replace(node_name, pattern, "TRTEngineOp_");
};
int num_trt_ops = 0;
for (const NodeDef& node : output_graph_def.node()) {
std::string node_name = node.name();
if (node.op() != "TRTEngineOp") continue;
node_name = remove_graph_sequence_number(node_name);
if (node_name == "TRTEngineOp_001") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("input", node.input(0));
++num_trt_ops;
} else if (node_name == "TRTEngineOp_000") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("TRTEngineOp_001", remove_graph_sequence_number(node.input(0)));
EXPECT_EQ("reshape2", node.input(1));
++num_trt_ops;
}
}
EXPECT_EQ(2, num_trt_ops);
}
}
}
}
#endif |
1,155 | cpp | tensorflow/tensorflow | log_softmax | tensorflow/compiler/tf2tensorrt/convert/ops/log_softmax.cc | tensorflow/lite/kernels/log_softmax_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOG_SOFTMAX_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOG_SOFTMAX_H_
#include <algorithm>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void LogSoftmax(int32_t input_multiplier, int32_t input_shift,
int32_t reverse_multiplier, int32_t reverse_shift,
int32_t diff_min, int32_t outer_size, int32_t depth,
const int8* input_data, int8* output_data) {
static constexpr int8_t kMinInt8 = std::numeric_limits<int8_t>::min();
static constexpr int8_t kMaxInt8 = std::numeric_limits<int8_t>::max();
static constexpr int32_t kMinInt32 = std::numeric_limits<int32_t>::min();
static constexpr int32_t kOutputZeroPoint = 127;
static constexpr int kInputIntegerBits = 5;
static constexpr int kAccumulationIntegerBits = 12;
static constexpr int kOutputIntegerBits = 4;
using F5 = gemmlowp::FixedPoint<int32, kInputIntegerBits>;
using F12 = gemmlowp::FixedPoint<int32, kAccumulationIntegerBits>;
for (int outer_index = 0; outer_index < outer_size; ++outer_index) {
int8 max_in_row = kMinInt8;
for (int inner_index = 0; inner_index < depth; ++inner_index) {
max_in_row =
std::max(max_in_row, input_data[outer_index * depth + inner_index]);
}
F12 sum_of_exps_in_q12 = F12::FromRaw(0);
for (int inner_index = 0; inner_index < depth; ++inner_index) {
int32_t input_diff =
static_cast<int32_t>(input_data[outer_index * depth + inner_index]) -
max_in_row;
if (input_diff >= diff_min) {
const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier(
input_diff, input_multiplier, input_shift);
sum_of_exps_in_q12 =
sum_of_exps_in_q12 +
gemmlowp::Rescale<kAccumulationIntegerBits>(
exp_on_negative_values(F5::FromRaw(input_diff_in_q5)));
}
}
const int32_t log_sum_of_exps_in_q5 =
log_x_for_x_greater_than_or_equal_to_1<kInputIntegerBits>(
sum_of_exps_in_q12)
.raw();
const int32_t shifted_log_sum_of_exps_in_q5 =
log_sum_of_exps_in_q5 + kMinInt32;
const int32_t adjusted_diff_min = std::max(
diff_min - 1,
MultiplyByQuantizedMultiplier(shifted_log_sum_of_exps_in_q5,
reverse_multiplier, -reverse_shift));
for (int inner_index = 0; inner_index < depth; ++inner_index) {
int32_t input_diff =
static_cast<int32_t>(input_data[outer_index * depth + inner_index]) -
max_in_row;
if (input_diff > adjusted_diff_min) {
const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier(
input_diff, input_multiplier, input_shift);
int32_t output_in_q27 =
gemmlowp::RoundingDivideByPOT(
(input_diff_in_q5 - log_sum_of_exps_in_q5),
31 - kInputIntegerBits - kOutputIntegerBits) +
kOutputZeroPoint;
output_in_q27 =
std::max(std::min(output_in_q27, static_cast<int32_t>(kMaxInt8)),
static_cast<int32_t>(kMinInt8));
output_data[outer_index * depth + inner_index] =
static_cast<int8_t>(output_in_q27);
} else {
output_data[outer_index * depth + inner_index] = kMinInt8;
}
}
}
}
}
}
#endif
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class ConvertLogSoftmax : public OpConverterBase<ConvertLogSoftmax> {
public:
explicit ConvertLogSoftmax(const OpConverterParams *params)
: OpConverterBase<ConvertLogSoftmax>(params) {}
static constexpr std::array<InputArgSpec, 1> InputSpec() {
return std::array<InputArgSpec, 1>{
InputArgSpec::Create("logits", TrtInputArg::kTensor)};
}
Status Validate() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
if (!num_trt_dims && params.use_implicit_batch) {
return errors::InvalidArgument(
"TensorRT LogSoftmax cannot apply on the batch dimension");
}
return OkStatus();
}
Status Convert() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
const auto &node_def = params.node_def;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
nvinfer1::IUnaryLayer *exp = params.converter->network()->addUnary(
*logits_tensor->trt_tensor(), nvinfer1::UnaryOperation::kEXP);
TFTRT_RETURN_ERROR_IF_NULLPTR(exp, node_def.name());
params.converter->SetLayerName(exp, node_def, "exp");
nvinfer1::IReduceLayer *reduced_sum =
params.converter->network()->addReduce(
*exp->getOutput(0), nvinfer1::ReduceOperation::kSUM,
(1 << (num_trt_dims - 1)),
true );
params.converter->SetLayerName(reduced_sum, node_def, "reduced_sum");
nvinfer1::IUnaryLayer *log_reduced_sum =
params.converter->network()->addUnary(*reduced_sum->getOutput(0),
nvinfer1::UnaryOperation::kLOG);
TFTRT_RETURN_ERROR_IF_NULLPTR(log_reduced_sum, node_def.name());
params.converter->SetLayerName(log_reduced_sum, node_def,
"log_reduced_sum");
nvinfer1::IElementWiseLayer *sub =
params.converter->network()->addElementWise(
*logits_tensor->trt_tensor(), *log_reduced_sum->getOutput(0),
nvinfer1::ElementWiseOperation::kSUB);
TFTRT_RETURN_ERROR_IF_NULLPTR(sub, node_def.name());
params.converter->SetLayerName(sub, node_def, "sub");
params.outputs->push_back(TRT_TensorOrWeights(sub->getOutput(0)));
return OkStatus();
}
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertLogSoftmax>(),
"LogSoftmax");
}
}
}
#endif | #include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
class LogSoftmaxOpModel : public SingleOpModel {
public:
LogSoftmaxOpModel(int batches, int size)
: batches_(batches), input_size_(size) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_LOG_SOFTMAX, BuiltinOptions_LogSoftmaxOptions,
CreateLogSoftmaxOptions(builder_).Union());
BuildInterpreter({{batches_, input_size_}});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
int batches_;
int input_size_;
};
TEST(LogSoftmaxOpTest, SimpleTest) {
LogSoftmaxOpModel m(2, 5);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-4.45191431, -3.45191431, -2.45191431, -1.45191443, -0.4519144,
-0.4519144, -1.45191443, -2.45191431, -3.45191431, -4.45191431},
1e-6)));
}
TEST(LogSoftmaxOpTest, CompareWithTFmini) {
const int batch_size = 2;
const int input_size = 5;
static float input_buffer[] = {
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
};
LogSoftmaxOpModel m(batch_size, input_size);
m.SetInput(0, input_buffer, input_buffer + input_size * batch_size);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::unique_ptr<float[]> output_buffer(new float[input_size * batch_size]);
auto input_shape = RuntimeShape({batch_size, 1, 1, input_size});
SoftmaxParams params;
tflite::reference_ops::LogSoftmax(params, input_shape, input_buffer,
input_shape, output_buffer.get());
std::vector<float> expected;
expected.insert(expected.end(), output_buffer.get(),
output_buffer.get() + input_size * batch_size);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(expected, 1e-6)));
}
}
} |
1,156 | cpp | tensorflow/tensorflow | quantization_ops | tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.cc | tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_OPS_QUANTIZATION_OPS_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_OPS_QUANTIZATION_OPS_H_
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
constexpr std::array<const char*, 4> kQuantizationOpNames = {
"QuantizeAndDequantizeV2",
"QuantizeAndDequantizeV3",
"FakeQuantWithMinMaxVars",
"FakeQuantWithMinMaxArgs",
};
constexpr std::array<const char*, 1> kExplicitQuantizationOpNames = {
"QuantizeAndDequantizeV2",
};
template <typename T, size_t N>
struct QuantizationScales {
std::array<T, N> quantize_scale;
std::array<T, N> dequantize_scale;
};
using UniformQuantizationScales = QuantizationScales<float, 1>;
template <size_t ChannelDimSize>
using PerChannelQuantizationScales = QuantizationScales<float, ChannelDimSize>;
template <typename T, size_t N>
std::ostream& operator<<(std::ostream& os,
const QuantizationScales<T, N>& scales) {
os << absl::StrFormat("QuantizationScales[quantize={%s},dequantize={%s}]",
absl::StrJoin(scales.quantize_scale, ","),
absl::StrJoin(scales.dequantize_scale, ","));
return os;
}
bool IsQuantizeAndDequantizeOp(const Node*);
}
}
}
#endif
#endif
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include "absl/strings/str_format.h"
#include "tensorflow/cc/ops
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/weights.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
bool IsQuantizeAndDequantizeOp(const Node* node) {
return absl::c_find(kQuantizationOpNames, node->def().op()) !=
kQuantizationOpNames.end();
}
namespace {
template <typename T>
QuantizationScales<T, 1> ComputeQuantizationRange(bool signed_input,
int num_bits,
bool narrow_range,
T* min_range, T* max_range) {
const int64_t min_quantized =
signed_input ? narrow_range ? -(1ULL << (num_bits - 1)) + 1
: -(1ULL << (num_bits - 1))
: 0;
const int64_t max_quantized =
signed_input ? (1ULL << (num_bits - 1)) - 1 : (1ULL << num_bits) - 1;
const T scale_from_min_side = (min_quantized * *min_range > 0)
? min_quantized / *min_range
: std::numeric_limits<T>::max();
const T scale_from_max_side = (max_quantized * *max_range > 0)
? max_quantized / *max_range
: std::numeric_limits<T>::max();
QuantizationScales<T, 1> scales;
if (scale_from_min_side < scale_from_max_side) {
scales.quantize_scale[0] = scale_from_min_side;
scales.dequantize_scale[0] = *min_range / min_quantized;
*max_range = max_quantized * scales.dequantize_scale[0];
} else {
scales.quantize_scale[0] = scale_from_max_side;
scales.dequantize_scale[0] = *max_range / max_quantized;
*min_range = min_quantized * scales.dequantize_scale[0];
}
return scales;
}
StatusOr<nvinfer1::ITensor*> ExlicitQDQInputToTensor(
TRTNetworkBuilder* builder, const OpConverterParams* params,
const TRT_TensorOrWeights& input) {
if (input.is_tensor()) {
return input.tensor()->trt_tensor();
}
if (!IS_TRT_VERSION_GE(8, 0, 0, 0) && input.weights().count() > 1) {
LOG(WARNING) << absl::StrCat(
"QDQ per-channel for weights not "
"implemented, assuming uniform scaling");
}
TRT_ShapedWeights trt_weights = input.weights();
StatusOr<nvinfer1::IConstantLayer*> weights_const =
builder->WeightsToConstant(trt_weights.GetTrtWeights(),
trt_weights.Shape());
TRT_ENSURE_PTR_OK(weights_const);
params->converter->SetLayerName(*weights_const, params->node_def, "const");
nvinfer1::ITensor* qdq_input = (*weights_const)->getOutput(0);
std::string name = absl::StrCat((*weights_const)->getName(), "_output");
qdq_input->setName(name.c_str());
return qdq_input;
}
}
template <typename T>
struct QDQOpSpec {};
template <>
struct QDQOpSpec<ops::QuantizeAndDequantizeV2> {
static constexpr std::array<InputArgSpec, 3> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
InputArgSpec::Create("input_min", TrtInputArg::kWeight),
InputArgSpec::Create("input_max", TrtInputArg::kWeight),
};
}
struct Attrs {
float min_range;
float max_range;
bool narrow_range;
std::string round_mode;
UniformQuantizationScales scales;
};
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "round_mode", &args->round_mode));
if (args->round_mode != "HALF_TO_EVEN") {
LOG(WARNING) << node_def.op() << ": " << node_def.name()
<< " has round_mode=" << args->round_mode
<< ", but for TensorRT conversion, "
"round_mode=HALF_TO_EVEN is recommended.";
}
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "narrow_range", &args->narrow_range));
if (args->narrow_range) {
LOG(WARNING) << node_def.op() << ": " << node_def.name()
<< " has narrow_range=true, but for TensorRT conversion, "
"narrow_range=false is recommended.";
}
args->min_range = inputs.at(1).weights().template GetPointer<float>()[0];
args->max_range = inputs.at(2).weights().template GetPointer<float>()[0];
const int num_bits = 8;
args->scales = ComputeQuantizationRange<float>(
true, num_bits, args->narrow_range, &args->min_range,
&args->max_range);
TRT_ENSURE(args->scales.dequantize_scale[0] != 0);
TRT_ENSURE(args->scales.quantize_scale[0] != 0);
return OkStatus();
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
const auto& node_def = params->node_def;
StatusOr<TRTNetworkBuilder> builder = TRTNetworkBuilder::Create(
params->converter->network(), params->weight_store);
StatusOr<nvinfer1::ITensor*> qdq_input =
ExlicitQDQInputToTensor(&*builder, params, params->inputs.at(0));
TRT_ENSURE_PTR_OK(qdq_input);
const int required_dims = params->use_implicit_batch ? 3 : 4;
const nvinfer1::Dims idims = (*qdq_input)->getDimensions();
nvinfer1::Dims intermediate_dims = idims;
TRT_ENSURE(idims.nbDims > 0);
if (idims.nbDims < required_dims) {
const int nb_extra_dims = required_dims - idims.nbDims;
intermediate_dims.nbDims = required_dims;
std::vector<int> ones(nb_extra_dims, 1);
TRT_ENSURE(ones.size() == nb_extra_dims && nb_extra_dims > 0);
if (!params->use_implicit_batch) {
intermediate_dims.d[0] = idims.d[0];
std::copy(ones.begin(), ones.end(), intermediate_dims.d + 1);
std::copy_n(idims.d + 1, idims.nbDims - 1,
intermediate_dims.d + ones.size() + 1);
} else {
std::copy(ones.begin(), ones.end(), intermediate_dims.d);
std::copy_n(idims.d, idims.nbDims, intermediate_dims.d + ones.size());
}
LOG(WARNING) << absl::StrCat(
node_def.name(), ":", node_def.op(), ": tensor ",
(*qdq_input)->getName(), " has shape ", DebugString(idims),
" but TRT scale layer requires at least 3 dims excluding batch dim, "
"trying to recover by inserting 1's to create shape ",
DebugString(intermediate_dims));
StatusOr<nvinfer1::IShuffleLayer*> reshape =
builder->Reshape(*qdq_input, intermediate_dims);
TRT_ENSURE_PTR_OK(reshape);
*qdq_input = (*reshape)->getOutput(0);
}
VLOG(1) << "[ExplicitPrecision]" << node_def.op() << ": " << node_def.name()
<< " computed scales: " << args.scales << " from min/max ranges "
<< args.min_range << "/" << args.max_range;
StatusOr<nvinfer1::ILayer*> qdq =
builder->UniformQuantizeDequantizeExplicit(
*qdq_input, args.scales.quantize_scale[0],
args.scales.dequantize_scale[0], node_def.name());
TRT_ENSURE_PTR_OK(qdq);
ITensorProxyPtr final_output = (*qdq)->getOutput(0);
if (idims.nbDims != intermediate_dims.nbDims) {
StatusOr<nvinfer1::IShuffleLayer*> undo_reshape =
builder->Reshape(*qdq_input, idims);
TRT_ENSURE_PTR_OK(undo_reshape);
final_output = (*undo_reshape)->getOutput(0);
}
params->outputs->push_back(final_output);
return OkStatus();
}
};
template <>
struct QDQOpSpec<ops::QuantizeAndDequantizeV3> {
static constexpr std::array<InputArgSpec, 4> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
InputArgSpec::Create("min", TrtInputArg::kWeight),
InputArgSpec::Create("max", TrtInputArg::kWeight),
InputArgSpec::Create("num_bits", TrtInputArg::kWeight),
};
}
using Attrs = QDQOpSpec<ops::QuantizeAndDequantizeV2>::Attrs;
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
return QDQOpSpec<
ops::QuantizeAndDequantizeV2>::ValidateQDQForExplicitPrecision(inputs,
node_def,
args);
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
return QDQOpSpec<ops::QuantizeAndDequantizeV2>::ConvertExplicit(params,
args);
}
};
template <>
struct QDQOpSpec<ops::FakeQuantWithMinMaxVars> {
static constexpr std::array<InputArgSpec, 3> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
InputArgSpec::Create("min", TrtInputArg::kWeight),
InputArgSpec::Create("max", TrtInputArg::kWeight),
};
}
struct Attrs {
int num_bits;
bool narrow_range;
};
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
return errors::Unimplemented("");
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
return errors::Unimplemented("");
}
};
template <>
struct QDQOpSpec<ops::FakeQuantWithMinMaxArgs> {
static constexpr std::array<InputArgSpec, 1> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
};
}
struct Attrs {
float min;
float max;
int num_bits;
bool narrow_range;
};
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
return errors::Unimplemented("");
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
return errors::Unimplemented("");
}
};
Status ConvertDynamicRangeMode(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
float min_range = 0.0f;
float max_range = 0.0f;
const auto& op_name = node_def.op();
if (op_name == "FakeQuantWithMinMaxArgs") {
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "min", &min_range));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "max", &max_range));
} else if (op_name == "FakeQuantWithMinMaxVars" ||
op_name == "QuantizeAndDequantizeV2" ||
op_name == "QuantizeAndDequantizeV3") {
auto get_weights_value = [&inputs](int index) {
const auto* raw_weights = inputs.at(index).weights().GetPointer<float>();
return raw_weights[0];
};
min_range = get_weights_value(1);
max_range = get_weights_value(2);
} else {
return errors::InvalidArgument("Unknown quantization op ", op_name, ", at ",
node_def.name());
}
if (params->validation_only) {
return OkStatus();
}
ITensorProxyPtr input0 = inputs.at(0).tensor();
params->converter->ProvideQuantizationRange(&input0, min_range, max_range);
params->outputs->push_back(inputs.at(0));
return OkStatus();
}
template <typename TFOpType>
class ConvertQDQ : public OpConverterBase<ConvertQDQ<TFOpType>> {
public:
explicit ConvertQDQ(const OpConverterParams* params)
: OpConverterBase<ConvertQDQ<TFOpType>>(params) {}
static constexpr auto InputSpec() { return QDQOpSpec<TFOpType>::InputSpec(); }
static constexpr const char* NodeDefDataTypeAttributeName() { return ""; }
Status ValidateDynamicRangeINT8Mode() {
if (this->params_->validation_only) {
return ConvertDynamicRangeMode(this->params_);
}
return OkStatus();
}
Status Validate() {
if (!this->params_->use_explicit_precision) {
return ValidateDynamicRangeINT8Mode();
}
return OpSpec::ValidateQDQForExplicitPrecision(
this->params_->inputs, this->params_->node_def, &attrs_);
}
Status Convert() {
if (!this->params_->use_explicit_precision) {
return ConvertDynamicRangeMode(this->params_);
}
return OpSpec::ConvertExplicit(this->params_, attrs_);
}
using OpSpec = QDQOpSpec<TFOpType>;
using OpSpecAttrs = typename QDQOpSpec<TFOpType>::Attrs;
OpSpecAttrs attrs_;
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::QuantizeAndDequantizeV2>>(),
"QuantizeAndDequantizeV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::QuantizeAndDequantizeV3>>(),
"QuantizeAndDequantizeV3");
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::FakeQuantWithMinMaxVars>>(),
"FakeQuantWithMinMaxVars");
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::FakeQuantWithMinMaxArgs>>(),
"FakeQuantWithMinMaxArgs");
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/linalg_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/compiler/jit/shape_inference.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/trt_convert_api.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
namespace tensorflow {
namespace tensorrt {
namespace convert {
namespace ops = ::tensorflow::ops;
using ::tensorflow::testing::StatusIs;
namespace {
enum class ConvEpilogueType {
kNone,
kReLU,
kBatchNorm,
kReLUBatchnorm,
kBatchnormReLU
};
std::ostream& operator<<(std::ostream& os, ConvEpilogueType epilogue) {
switch (epilogue) {
case ConvEpilogueType::kNone:
return os << "None";
case ConvEpilogueType::kReLU:
return os << "ReLU only";
case ConvEpilogueType::kBatchNorm:
return os << "BatchNorm Only";
case ConvEpilogueType::kReLUBatchnorm:
return os << "ReLU+Batchnorm";
case ConvEpilogueType::kBatchnormReLU:
return os << "BatchNorm+ReLU";
}
}
std::string DebugString(ConvEpilogueType epilogue) {
std::stringstream ss;
ss << epilogue;
return ss.str();
}
ops::Placeholder AddInput(Scope scope, int input_idx,
const std::string data_format,
std::array<int, 3> size_chw = {1, 3, 3}) {
PartialTensorShape input_shape;
if (data_format == "NCHW") {
input_shape =
PartialTensorShape({1, size_chw[0], size_chw[1], size_chw[2]});
} else if (data_format == "NHWC") {
input_shape =
PartialTensorShape({1, size_chw[1], size_chw[2], size_chw[0]});
} else if (data_format == "NHW") {
input_shape = PartialTensorShape({1, size_chw[1], size_chw[2]});
} else {
LOG(FATAL) << "Unknown input shape type " << data_format;
}
auto input_attrs = ops::Placeholder::Attrs().Shape(input_shape);
return ops::Placeholder(scope.WithOpName(absl::StrCat("input_", input_idx)),
DT_FLOAT, input_attrs);
}
Output AddQDQV2(Scope scope, Input input) {
auto input_min =
ops::Const<float>(scope.WithOpName("in_min"), -1.0f, TensorShape{});
auto input_max =
ops::Const<float>(scope.WithOpName("in_max"), 1.0f, TensorShape{});
return ops::QuantizeAndDequantizeV2(scope.WithOpName("qdq"), input, input_min,
input_max);
}
Output AddOutput(Scope scope, Output input, int idx, bool add_qdq) {
Output out = input;
if (add_qdq) {
out = AddQDQV2(scope, input);
}
return ops::Identity(scope.WithOpName(StrCat("output_", idx)), out);
}
Output AddConv2D(Scope scope, Input input, int in_channels, int out_channels,
std::array<int, 2> filter_size = {1, 1},
std::array<int, 2> stride = {1, 1},
const std::string& data_format = "NCHW", bool with_bias = true,
ConvEpilogueType epilogue = ConvEpilogueType::kBatchnormReLU,
bool qdq_on_output = false) {
auto weights_const = ops::Const(
scope.WithOpName("weights"), 1.0f,
TensorShape({filter_size[0], filter_size[1], in_channels, out_channels}));
auto conv_input =
!qdq_on_output ? AddQDQV2(scope.WithOpName("qdq_input"), input) : input;
Output result = ops::Conv2D(
scope.WithOpName("conv2d"), conv_input, AddQDQV2(scope, weights_const),
{1, 1, 1, 1},
"SAME", ops::Conv2D::Attrs().DataFormat(data_format));
if (with_bias) {
auto bias_const = ops::Const(scope.WithOpName("bias_weights"), 1.0f,
TensorShape({
out_channels,
}));
result = ops::BiasAdd(scope.WithOpName("bias"), result, bias_const,
ops::BiasAdd::Attrs().DataFormat(data_format));
}
auto add_bn = [scope, data_format](Input input,
const int channels) -> Output {
TensorShape constant_shape = TensorShape({channels});
auto bn_scale =
ops::Const(scope.WithOpName("bn_scale"), 1.0f, constant_shape);
auto bn_offset =
ops::Const(scope.WithOpName("bn_offset"), 1.0f, constant_shape);
auto bn_mean =
ops::Const(scope.WithOpName("bn_mean"), 0.1f, TensorShape({channels}));
auto bn_var =
ops::Const(scope.WithOpName("bn_var"), 1.0f, TensorShape({channels}));
Input conv_bn_input = IS_TRT_VERSION_GE(8, 0, 1, 0)
? input
: AddQDQV2(scope.WithOpName("qdq_input"), input);
return ops::FusedBatchNormV3(
scope.WithOpName("bn"), conv_bn_input, bn_scale, bn_offset,
bn_mean, bn_var,
ops::FusedBatchNormV3::Attrs().IsTraining(false).DataFormat(
data_format))
.y;
};
switch (epilogue) {
case ConvEpilogueType::kBatchNorm: {
result = add_bn(result, out_channels);
break;
}
case ConvEpilogueType::kReLU: {
result = ops::Relu(scope.WithOpName("relu"), result);
break;
}
case ConvEpilogueType::kReLUBatchnorm: {
result = ops::Relu(scope.WithOpName("relu"), result);
result = add_bn(result, out_channels);
break;
}
case ConvEpilogueType::kBatchnormReLU: {
result = add_bn(result, out_channels);
result = ops::Relu(scope.WithOpName("relu"), result);
break;
}
case ConvEpilogueType::kNone:
break;
}
if (qdq_on_output) {
result = AddQDQV2(scope.WithOpName("qdq_out"), result);
}
return result;
}
ops::BatchMatMulV2 AddMatMul(Scope scope, const std::string& name,
Input input) {
auto input_qdq = AddQDQV2(scope, input);
auto weights_const =
ops::Const(scope.WithOpName(name + "_weights"),
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f},
TensorShape({3, 3}));
auto weights_qdq = AddQDQV2(scope.WithOpName("weights_qdq"), weights_const);
return ops::BatchMatMulV2(scope.WithOpName(name), input_qdq, weights_qdq);
}
}
struct QDQTestOptions {
bool conv_has_bias{true};
std::string data_format{"NCHW"};
bool qdq_on_output{false};
bool final_qdq{true};
ConvEpilogueType conv_epilogue;
TfTrtConversionParams conversion_params{};
};
std::ostream& operator<<(std::ostream& os, const QDQTestOptions opts) {
return os << absl::StrCat(
"QDQTestOptions(conv_has_bias=",
static_cast<int>(opts.conv_has_bias),
", qdq_on_output=", static_cast<int>(opts.qdq_on_output),
", data_format=", opts.data_format,
", conv_epilogue=", DebugString(opts.conv_epilogue),
", final_qdq=", opts.final_qdq, ")");
}
std::vector<QDQTestOptions> EnumerateQDQTestOptions() {
std::vector<QDQTestOptions> result;
for (const absl::string_view data_format : {"NCHW", "NHWC"}) {
for (auto use_bias : {true, false}) {
for (auto qdq_on_output : {false, true}) {
for (auto final_qdq : {true, false}) {
for (auto conv_epilogue :
{ConvEpilogueType::kReLU, ConvEpilogueType::kNone,
ConvEpilogueType::kBatchnormReLU}) {
if (data_format == "NHWC" &&
(conv_epilogue == ConvEpilogueType::kBatchnormReLU ||
conv_epilogue == ConvEpilogueType::kBatchNorm ||
conv_epilogue == ConvEpilogueType::kBatchnormReLU)) {
continue;
}
QDQTestOptions opts{};
opts.conv_has_bias = use_bias;
opts.data_format = data_format;
opts.qdq_on_output = qdq_on_output;
opts.final_qdq = final_qdq;
opts.conv_epilogue = conv_epilogue;
result.push_back(opts);
}
}
}
}
}
return result;
}
class QDQExplicitTest : public ::testing::Test,
public ::testing::WithParamInterface<QDQTestOptions> {
public:
static StatusOr<PartialTensorShape> GetShape(const std::string& name,
const GraphShapeInfo& shapes) {
TRT_ENSURE(shapes.find(name) != shapes.end());
TRT_ENSURE(shapes.at(name).size() == 1);
return shapes.at(name)[0].shape;
}
StatusOr<MetaGraphDef> GetModel(const GraphDef& graph_def,
const std::vector<const NodeDef*>& inputs,
const std::vector<const NodeDef*>& outputs,
const GraphShapeInfo& shapes) {
TRT_ENSURE(!inputs.empty());
TRT_ENSURE(!outputs.empty());
MetaGraphDef out;
out.mutable_graph_def()->CopyFrom(graph_def);
SignatureDef signature_def;
auto& mutable_inputs = *signature_def.mutable_inputs();
for (int i = 0; i < inputs.size(); i++) {
std::string input_name = inputs[i]->name();
auto& input = mutable_inputs[input_name];
input.set_name(input_name);
input.set_dtype(DT_FLOAT);
TRT_ENSURE(shapes.find(input_name) != shapes.end());
TRT_ENSURE(shapes.at(input_name).size() == 1);
PartialTensorShape input_shape = shapes.at(input_name)[0].shape;
input_shape.AsProto(input.mutable_tensor_shape());
}
auto& mutable_outputs = *signature_def.mutable_outputs();
for (int i = 0; i < outputs.size(); i++) {
std::string output_name = outputs[i]->name();
auto& output = mutable_outputs[output_name];
output.set_name(output_name);
output.set_dtype(DT_FLOAT);
TRT_ENSURE(shapes.find(output_name) != shapes.end());
TRT_ENSURE(shapes.at(output_name).size() == 1);
PartialTensorShape output_shape = shapes.at(output_name)[0].shape;
output_shape.AsProto(output.mutable_tensor_shape());
}
(*out.mutable_signature_def())["serving_default"] = signature_def;
return out;
}
static Status CheckTrtNode(const GraphDef& converted_graph_def) {
int n_trt_ops = 0;
string op_name{"TRTEngineOp"};
for (const auto& node : converted_graph_def.node()) {
if (op_name == node.op()) {
n_trt_ops++;
const auto& attr = node.attr();
TRT_ENSURE(attr.at("static_engine").b());
VLOG(2) << "Found serialized segment with size "
<< attr.at("serialized_segment").s().size();
TRT_ENSURE(!attr.at("serialized_segment").s().empty());
}
}
TRT_ENSURE(n_trt_ops == 1);
return OkStatus();
}
Status ConvertAndRun(Scope* scope) {
std::vector<const NodeDef*> inputs;
std::vector<const NodeDef*> outputs;
GraphDef gdef;
TF_RETURN_IF_ERROR(scope->ToGraphDef(&gdef));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_RETURN_IF_ERROR(scope->ToGraph(graph.get()));
GraphShapeInfo shape_info;
TF_RETURN_IF_ERROR(InferShapes(graph.get(), {},
nullptr, &shape_info));
for (const NodeDef& node : gdef.node()) {
if (absl::StartsWith(node.name(), "input_")) {
inputs.push_back(&node);
} else if (absl::StartsWith(node.name(), "output_")) {
outputs.push_back(&node);
}
}
StatusOr<MetaGraphDef> meta_graph_def =
GetModel(gdef, inputs, outputs, shape_info);
TRT_ENSURE_OK(meta_graph_def);
std::vector<Tensor> input_tensors;
std::vector<std::string> input_names;
for (const auto& input : inputs) {
input_names.push_back(input->name());
StatusOr<PartialTensorShape> input_shape =
GetShape(input->name(), shape_info);
TRT_ENSURE_OK(input_shape);
TensorShape shape;
input_shape->AsTensorShape(&shape);
Tensor tensor(DT_FLOAT, shape);
test::FillIota(&tensor, 1.0f);
input_tensors.push_back(tensor);
}
std::vector<std::string> output_names;
for (const auto& output : outputs) {
output_names.push_back(output->name());
}
TfTrtConversionParams conversion_params;
conversion_params.allow_build_at_runtime = true;
conversion_params.precision_mode = TrtPrecisionMode::INT8;
conversion_params.use_calibration = false;
conversion_params.convert_to_static_engine = true;
TRT_ENSURE(input_names.size() == input_tensors.size());
StatusOr<GraphDef> converted_gdef = tensorrt::ConvertAndBuild(
meta_graph_def->graph_def(), input_names, output_names, {input_tensors},
conversion_params);
TRT_ENSURE_OK(converted_gdef);
return CheckTrtNode(*converted_gdef);
}
protected:
TfTrtConversionParams params_;
TrtUniquePtrType<nvinfer1::ICudaEngine> engine_;
};
class TestQDQSuite : public QDQExplicitTest {};
#define EXPECT_QDQ_ON_OUTPUT_FAILURE(params, scope) \
if ((params).qdq_on_output) { \
EXPECT_THAT(ConvertAndRun(&(scope)), StatusIs(error::INTERNAL)); \
return; \
}
#define EXPECT_NO_FINAL_QDQ_FAILURE(params, scope) \
if (!(params).final_qdq) { \
EXPECT_THAT(ConvertAndRun(&(scope)), StatusIs(error::INTERNAL)); \
return; \
}
#define EXPECT_BUILD_OK(scope) TF_EXPECT_OK(ConvertAndRun(&(scope)))
#define POLICY_TRT7(params, scope) \
if (!IS_TRT_VERSION_GE(8, 0, 0, 0)) { \
EXPECT_QDQ_ON_OUTPUT_FAILURE(params, scope); \
EXPECT_NO_FINAL_QDQ_FAILURE(params, scope); \
EXPECT_BUILD_OK(scope); \
}
#define POLICY_TRT8(params, scope) \
if (IS_TRT_VERSION_GE(8, 0, 0, 0)) { \
if (((params).conv_epilogue == ConvEpilogueType::kBatchNorm || \
(params).conv_epilogue == ConvEpilogueType::kBatchnormReLU || \
(params).conv_epilogue == ConvEpilogueType::kReLUBatchnorm) && \
(params).data_format == "NHWC") { \
EXPECT_THAT(ConvertAndRun(&(scope)), StatusIs(error::UNIMPLEMENTED)); \
return; \
} \
EXPECT_BUILD_OK(scope); \
}
#define SKIP_TRT7(x) \
if (!IS_TRT_VERSION_GE(8, 0, 0, 0) && (x)) { \
GTEST_SKIP(); \
}
TEST_P(TestQDQSuite, TestConv2DBasic) {
SKIP_TRT7(GetParam().qdq_on_output);
SKIP_TRT7(GetParam().data_format != "NCHW");
SKIP_TRT7(!GetParam().final_qdq);
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, GetParam().data_format, {3, 28, 28});
Output out = input;
const int num_conv = 1;
std::array<int, 2> in_channels = {3, 16};
std::array<int, 2> out_channels = {16, 32};
for (int i = 0; i < num_conv; i++) {
out = AddConv2D(scope.WithOpName(absl::StrCat("conv_", i)), out,
in_channels[i], out_channels[i], {3, 3},
{1, 1}, GetParam().data_format,
GetParam().conv_has_bias, GetParam().conv_epilogue,
GetParam().qdq_on_output);
}
out = AddOutput(scope, out, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
TEST_P(TestQDQSuite, TestMatMulBasic) {
if (GetParam().data_format != "NCHW" || !GetParam().conv_has_bias ||
GetParam().qdq_on_output ||
GetParam().conv_epilogue != ConvEpilogueType::kReLU) {
GTEST_SKIP();
}
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, "NHW");
auto matmul_op = AddMatMul(scope, "matmul", input);
auto out = AddOutput(scope, matmul_op, 0, GetParam().final_qdq);
TF_EXPECT_OK(ConvertAndRun(&scope));
}
TEST_P(TestQDQSuite, AddBothBranchesQDQConvSingleInput) {
SKIP_TRT7(!GetParam().final_qdq);
SKIP_TRT7(GetParam().data_format != "NCHW");
Scope scope = Scope::NewRootScope();
auto input1 = AddInput(scope, 0, GetParam().data_format,
{3, 28, 28});
auto conv1 =
AddConv2D(scope, input1, 3, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto conv2 =
AddConv2D(scope, input1, 3, 16, {3, 3},
{1, 1}, GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto add =
ops::Add(scope.WithOpName("add"),
!GetParam().qdq_on_output ? AddQDQV2(scope, conv1) : conv1,
!GetParam().qdq_on_output ? AddQDQV2(scope, conv2) : conv2);
auto conv3 =
AddConv2D(scope.WithOpName("conv3"), conv2, 16, 16, {1, 1}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto out =
AddOutput(scope.WithOpName("output"), conv3, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
TEST_P(TestQDQSuite, AddBothBranchesQDQMultipleInput) {
SKIP_TRT7(true);
Scope scope = Scope::NewRootScope();
auto input1 = AddInput(scope, 0, GetParam().data_format);
auto input2 = AddInput(scope, 1, GetParam().data_format);
auto add =
ops::Add(scope.WithOpName("add"),
!GetParam().qdq_on_output ? AddQDQV2(scope, input1) : input1,
!GetParam().qdq_on_output ? AddQDQV2(scope, input2) : input2);
auto output = AddOutput(scope, add, 0, true);
TF_EXPECT_OK(ConvertAndRun(&scope));
}
TEST_P(TestQDQSuite, TestConvMaxpool) {
SKIP_TRT7(!GetParam().final_qdq);
SKIP_TRT7(GetParam().data_format != "NCHW");
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, GetParam().data_format,
{3, 28, 28});
auto conv1 =
AddConv2D(scope, input, 3, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
ops::MaxPool maxpool =
ops::MaxPool(scope.WithOpName("maxpool"),
AddQDQV2(scope.WithOpName("mp_qdq_in"), conv1), {1, 1, 1, 1},
{1, 1, 1, 1}, "SAME",
ops::MaxPool::Attrs().DataFormat(GetParam().data_format));
auto output =
AddOutput(scope.WithOpName("output"), maxpool, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
TEST_P(TestQDQSuite, TestConvMaxpoolConv) {
SKIP_TRT7(!GetParam().final_qdq);
SKIP_TRT7(GetParam().data_format != "NCHW");
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, GetParam().data_format,
{3, 28, 28});
auto conv1 =
AddConv2D(scope, input, 3, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
ops::MaxPool maxpool =
ops::MaxPool(scope.WithOpName("maxpool"),
AddQDQV2(scope.WithOpName("mp_qdq_in"), conv1), {1, 1, 1, 1},
{1, 1, 1, 1}, "SAME",
ops::MaxPool::Attrs().DataFormat(GetParam().data_format));
auto conv2 = AddConv2D(scope, maxpool, 16, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto output =
AddOutput(scope.WithOpName("out"), conv2, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
INSTANTIATE_TEST_SUITE_P(TestQDQSuiteInst, TestQDQSuite,
::testing::ValuesIn(EnumerateQDQTestOptions()));
}
}
}
#endif
#endif |
1,157 | cpp | tensorflow/tensorflow | segment | tensorflow/compiler/tf2tensorrt/segment/segment.cc | tensorflow/compiler/tf2tensorrt/segment/segment_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_SEGMENT_SEGMENT_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_SEGMENT_SEGMENT_H_
#include <set>
#include <vector>
#include "absl/types/optional.h"
#include "tensorflow/compiler/tf2tensorrt/segment/union_find.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace segment {
constexpr char kTftrtOpMaxBatchSizeAttr[] = "_tftrt_op_max_batch_size";
struct SegmentOptions {
int minimum_segment_size = 2;
bool use_implicit_batch = true;
std::optional<int> maximum_batch_size = std::nullopt;
bool allow_dynamic_non_batch_dim = false;
std::set<string> exclude_node_list;
};
struct NodePtrCompare {
bool operator()(const Node* lhs, const Node* rhs) const {
return lhs->name() < rhs->name();
}
};
struct Segment {
Segment() {}
Segment(const ClusterProperty& property,
const std::set<const Node*, NodePtrCompare>& nodes)
: property(property), nodes(nodes) {}
ClusterProperty property;
std::set<const Node*, NodePtrCompare> nodes;
};
using SegmentVector = std::vector<Segment>;
Status SegmentGraph(const Graph* tf_graph,
const grappler::GraphProperties* graph_properties,
const std::function<Status(const Node*)>& candidate_fn,
const std::function<bool(const Edge*)>& input_candidate_fn,
const std::function<bool(const Edge*)>& output_candidate_fn,
const SegmentOptions& options, SegmentVector* segments);
}
}
}
#endif
#endif
#include "tensorflow/compiler/tf2tensorrt/segment/segment.h"
#include <algorithm>
#include <fstream>
#include <map>
#include <numeric>
#include <queue>
#include <tuple>
#include <unordered_map>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/util/env_var.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace segment {
namespace {
using absl::StrAppend;
using absl::StrAppendFormat;
using absl::StrCat;
using absl::StrJoin;
class SimpleNode;
class SimpleGraph;
class SimpleEdge {
public:
SimpleEdge(int id, SimpleNode* src, int src_port, SimpleNode* dst,
int dst_port, bool is_control = false)
: id_(id),
src_(src),
src_port_(src_port),
dst_(dst),
dst_port_(dst_port),
control_(is_control) {}
~SimpleEdge() {}
SimpleNode* src() const { return src_; }
SimpleNode* dst() const { return dst_; }
int src_output() const { return src_port_; }
int dst_input() const { return dst_port_; }
int id() const { return id_; }
bool IsControlEdge() const { return control_; }
private:
int id_;
SimpleNode* src_;
int src_port_;
SimpleNode* dst_;
int dst_port_;
bool control_;
};
class SimpleNode {
public:
SimpleNode(const Node* node, const int id);
const std::vector<SimpleEdge*>& in_edges() const { return in_edges_; }
const std::vector<SimpleEdge*>& out_edges() const { return out_edges_; }
std::vector<SimpleNode*> in_nodes() const {
std::vector<SimpleNode*> res;
res.reserve(in_edges_.size());
for (const auto e : in_edges_) {
if (e) res.push_back(e->src());
}
return res;
}
std::vector<SimpleNode*> out_nodes() const {
std::vector<SimpleNode*> res;
res.reserve(out_edges_.size());
for (const auto e : out_edges_) {
if (e) res.push_back(e->dst());
}
return res;
}
const string& name() const { return node_->name(); }
const Node* tf_node() const { return node_; }
int id() const { return id_; }
private:
const Node* node_;
std::vector<SimpleEdge*> in_edges_;
std::vector<SimpleEdge*> out_edges_;
int id_;
friend class SimpleGraph;
};
class SimpleGraph {
public:
explicit SimpleGraph(const Graph* g);
~SimpleGraph();
void AddControlEdge(SimpleNode* src, SimpleNode* dst);
void AddEdge(SimpleNode* src, int out_port, SimpleNode* dst, int in_port);
void RemoveEdge(const SimpleEdge*);
SimpleNode* FindNodeId(int node_id) {
if (node_id < 0 || node_id > static_cast<int>(nodes_.size())) {
return nullptr;
}
return nodes_[node_id];
}
int num_node_ids() const { return nodes_.size(); }
const SimpleNode* source_node() const { return nodes_[Graph::kSourceId]; }
const SimpleNode* sink_node() const { return nodes_[Graph::kSinkId]; }
private:
const Graph* g_;
std::vector<SimpleNode*> nodes_;
std::vector<SimpleEdge*> edges_;
std::set<int> free_edge_ids_;
std::set<int> free_node_ids_;
};
SimpleNode::SimpleNode(const Node* node, const int id) : node_(node), id_(id) {
if (node_) {
in_edges_.reserve(node_->in_edges().size());
out_edges_.reserve(node_->out_edges().size());
}
}
SimpleGraph::SimpleGraph(const Graph* g) : g_(g) {
int n_nodes = g_->num_node_ids();
nodes_.resize(n_nodes, nullptr);
nodes_[g->kSourceId] = new SimpleNode(g->source_node(), g->kSourceId);
nodes_[g->kSinkId] = new SimpleNode(g->sink_node(), g->kSinkId);
int n_edges = g->num_edge_ids();
edges_.resize(n_edges, nullptr);
for (int i = 2; i < n_nodes; i++) {
const auto n = g->FindNodeId(i);
if (n) {
nodes_[i] = new SimpleNode(n, i);
} else {
free_node_ids_.insert(i);
}
}
for (int i = 0; i < n_edges; i++) {
const auto e = g->FindEdgeId(i);
if (e) {
const auto tfsrc = e->src();
const auto tfdst = e->dst();
bool is_control = e->IsControlEdge();
auto src = nodes_[tfsrc->id()];
auto dst = nodes_[tfdst->id()];
auto edge = new SimpleEdge(i, src, e->src_output(), dst, e->dst_input(),
is_control);
edges_[i] = edge;
src->out_edges_.push_back(edge);
dst->in_edges_.push_back(edge);
} else {
free_edge_ids_.insert(i);
}
}
}
void SimpleGraph::AddEdge(SimpleNode* src, int out_port, SimpleNode* dst,
int in_port) {
int i = edges_.size();
if (!free_edge_ids_.empty()) {
auto it = free_edge_ids_.begin();
i = *it;
free_edge_ids_.erase(it);
} else {
edges_.push_back(nullptr);
}
bool is_control = (out_port == Graph::kControlSlot);
is_control |= (in_port == Graph::kControlSlot);
auto edge = new SimpleEdge(i, src, out_port, dst, in_port, is_control);
edges_[i] = edge;
src->out_edges_.push_back(edge);
dst->in_edges_.push_back(edge);
}
void SimpleGraph::AddControlEdge(SimpleNode* src, SimpleNode* dst) {
AddEdge(src, Graph::kControlSlot, dst, Graph::kControlSlot);
}
void SimpleGraph::RemoveEdge(const SimpleEdge* edge) {
auto src = edge->src();
auto dst = edge->dst();
for (auto it = src->out_edges_.begin(); it != src->out_edges_.end(); ++it) {
if (*it == edge) {
src->out_edges_.erase(it);
break;
}
}
for (auto it = dst->in_edges_.begin(); it != dst->in_edges_.end(); ++it) {
if (*it == edge) {
dst->in_edges_.erase(it);
break;
}
}
}
SimpleGraph::~SimpleGraph() {
for (auto x : nodes_) delete x;
for (auto x : edges_) delete x;
}
struct SimpleEdgePtrCompare {
bool operator()(const SimpleEdge* lhs, const SimpleEdge* rhs) const {
return lhs->id() < rhs->id();
}
};
void StableDFS(const SimpleGraph& g, bool reverse,
const std::vector<const SimpleNode*>& start,
const std::function<bool(const SimpleNode*)>& enter,
const std::function<bool(const SimpleNode*)>& leave) {
struct Work {
const SimpleNode* node;
bool leave;
};
std::vector<Work> stack(start.size());
for (int i = 0; i < start.size(); ++i) {
stack[i] = Work{start[i], false};
}
auto get_nodes = [reverse](const SimpleNode* n) {
return reverse ? n->in_nodes() : n->out_nodes();
};
std::vector<bool> visited(g.num_node_ids(), false);
while (!stack.empty()) {
Work w = stack.back();
stack.pop_back();
auto n = w.node;
if (w.leave) {
if (leave && !leave(n)) return;
continue;
}
if (visited[n->id()]) continue;
visited[n->id()] = true;
if (enter && !enter(n)) return;
if (leave) stack.push_back(Work{n, true});
auto nodes = get_nodes(n);
std::vector<const SimpleNode*> nodes_sorted(nodes.begin(), nodes.end());
std::sort(nodes_sorted.begin(), nodes_sorted.end(),
[](const SimpleNode* lhs, const SimpleNode* rhs) {
return lhs->name() < rhs->name();
});
for (const SimpleNode* node : nodes_sorted) {
if (!visited[node->id()]) {
stack.push_back(Work{node, false});
}
}
}
}
bool CanContractEdge(const SimpleEdge* edge,
const std::unique_ptr<SimpleGraph>& graph) {
const auto src = edge->src();
const auto dst = edge->dst();
std::vector<const SimpleNode*> dfs_start_nodes;
for (const SimpleNode* node : dst->in_nodes()) {
if (node != src) {
dfs_start_nodes.push_back(node);
}
}
bool has_cycle = false;
StableDFS(*graph, true, dfs_start_nodes, nullptr,
[&has_cycle, src](const SimpleNode* n) {
if (n == src) {
has_cycle = true;
return false;
}
return true;
});
return !has_cycle;
}
string TensorPropertiesToString(const OpInfo::TensorProperties& prop) {
string s = StrCat(DataTypeString(prop.dtype()), ": ");
StrAppend(&s, "[");
if (prop.shape().unknown_rank()) {
StrAppend(&s, "?");
} else {
StrAppend(&s, StrJoin(prop.shape().dim(), ",",
[](string* out, const TensorShapeProto_Dim& d) {
StrAppendFormat(out, "%d", d.size());
}));
}
StrAppend(&s, "]");
return s;
}
string TensorPropertiesToString(
const std::vector<OpInfo::TensorProperties>& properties) {
return StrJoin(properties, "; ",
[](string* out, const OpInfo::TensorProperties& prop) {
StrAppend(out, TensorPropertiesToString(prop));
});
}
std::optional<const TensorShapeProto*> FindLeadingShape(
absl::Span<const OpInfo::TensorProperties> properties) {
DCHECK(!properties.empty());
const TensorShapeProto* result;
int max_batch_dim_value;
auto choose_shape_with_higher_rank = [&](const TensorShapeProto* s) {
result = s;
max_batch_dim_value = s->dim_size() < 1 ? 1 : s->dim(0).size();
};
DCHECK(!properties[0].shape().unknown_rank());
choose_shape_with_higher_rank(&properties[0].shape());
for (const OpInfo::TensorProperties& p : properties.subspan(1)) {
DCHECK(!p.shape().unknown_rank());
if (p.shape().dim_size() < result->dim_size()) continue;
if (p.shape().dim_size() > result->dim_size()) {
choose_shape_with_higher_rank(&p.shape());
continue;
}
if (result->dim_size() < 1) continue;
if (p.shape().dim(0).size() < 0 || result->dim(0).size() < 0) {
if (p.shape().dim(0).size() < 0 && result->dim(0).size() >= 0) {
result = &p.shape();
} else {
max_batch_dim_value =
std::max<int>(max_batch_dim_value, p.shape().dim(0).size());
}
continue;
}
if (p.shape().dim(0).size() > result->dim(0).size()) {
result = &p.shape();
max_batch_dim_value = result->dim(0).size();
}
}
if (result->dim_size() > 0 && result->dim(0).size() < 0) {
if (max_batch_dim_value <= 1) {
return result;
} else {
return std::nullopt;
}
}
return result;
}
absl::Span<const OpInfo::TensorProperties> GetInputsToDeterminateBatchSize(
const Node* node, const std::vector<OpInfo::TensorProperties>& all_inputs) {
static std::set<string> broadcast_supporting_ops = {
"Add",
"AddV2",
"Mul",
"Sub",
"Div",
"FloorDiv",
"RealDiv",
"Minimum",
"Maximum",
"Pow",
"BiasAdd",
"SquaredDifference",
"BatchMatMul",
"BatchMatMulV2",
};
const string& op = node->def().op();
if (op == "Conv2DBackpropInput" || op == "Conv3DBackpropInputV2") {
DCHECK_EQ(all_inputs.size(), 3);
return absl::MakeSpan(all_inputs).subspan(2, 1);
}
if (broadcast_supporting_ops.count(op)) {
return absl::MakeSpan(all_inputs);
}
return absl::MakeSpan(all_inputs).subspan(0, 1);
}
bool OperationCanBeTranslatedToImplicitBatch(
const grappler::GraphProperties* graph_properties, const Node* node) {
VLOG(3) << "process node " << node->name();
if (node->num_inputs() == 0) return true;
if (!graph_properties || !graph_properties->HasInputProperties(node->name()))
return false;
VLOG(3) << "input shapes "
<< TensorPropertiesToString(
graph_properties->GetInputProperties(node->name()));
const std::vector<OpInfo::TensorProperties>& all_input_properties =
graph_properties->GetInputProperties(node->name());
absl::Span<const OpInfo::TensorProperties> input_properties =
GetInputsToDeterminateBatchSize(node, all_input_properties);
if (absl::c_any_of(input_properties, [](const OpInfo::TensorProperties& p) {
return p.shape().unknown_rank();
})) {
return false;
}
std::optional<const TensorShapeProto*> leading_shape =
FindLeadingShape(input_properties);
return leading_shape.has_value() && leading_shape.value()->dim_size() >= 2;
}
bool HasDynamicNonBatchDimension(const OpInfo::TensorProperties& prop) {
const TensorShapeProto& shape = prop.shape();
if (shape.unknown_rank()) return true;
if (shape.dim_size() == 0) return false;
for (int i = 1; i < shape.dim_size(); ++i) {
if (shape.dim(i).size() <= -1) {
return true;
}
}
return false;
}
bool OperationHasDynamicNonBatchDimension(
const grappler::GraphProperties* graph_properties, const Node* node) {
VLOG(3) << "process node " << node->name();
if (node->num_inputs() == 0 || node->num_outputs() == 0) return false;
if (!graph_properties->HasOutputProperties(node->name())) return true;
VLOG(3) << "output shapes "
<< TensorPropertiesToString(
graph_properties->GetOutputProperties(node->name()));
return HasDynamicNonBatchDimension(
graph_properties->GetOutputProperties(node->name()).at(0));
}
void ContractEdge(SimpleEdge* edge, SimpleGraph* graph,
std::vector<const SimpleEdge*>* remove_edges) {
auto src = edge->src();
auto dst = edge->dst();
std::vector<const SimpleEdge*> in_edges(dst->in_edges().begin(),
dst->in_edges().end());
for (const SimpleEdge* in_edge : in_edges) {
if (in_edge->IsControlEdge()) {
if (in_edge->src() != src) {
SimpleEdge* e = const_cast<SimpleEdge*>(in_edge);
graph->AddControlEdge(e->src(), src);
}
} else {
if (in_edge->src() != src) {
SimpleEdge* e = const_cast<SimpleEdge*>(in_edge);
if (e->src() == graph->source_node()) {
graph->AddEdge(e->src(), e->src_output(), src, Graph::kControlSlot);
} else {
graph->AddEdge(e->src(), e->src_output(), src, 0 );
}
}
}
}
std::vector<const SimpleEdge*> out_edges(dst->out_edges().begin(),
dst->out_edges().end());
for (const SimpleEdge* out_edge : out_edges) {
if (out_edge->IsControlEdge()) {
SimpleEdge* e = const_cast<SimpleEdge*>(out_edge);
graph->AddControlEdge(src, e->dst());
} else {
SimpleEdge* e = const_cast<SimpleEdge*>(out_edge);
if (e->dst() == graph->sink_node()) {
VLOG(1) << " edge to sink node " << src->name() << " -> "
<< e->dst()->name();
graph->AddEdge(src, Graph::kControlSlot, e->dst(), e->dst_input());
} else {
graph->AddEdge(src, 0 , e->dst(), e->dst_input());
}
}
}
for (const auto& in_edge : dst->in_edges()) {
remove_edges->push_back(in_edge);
}
for (const auto& out_edge : dst->out_edges()) {
remove_edges->push_back(out_edge);
}
}
ClusterBatchSize GetClusterBatchSizeForNode(
const grappler::GraphProperties* graph_properties, const Node* node,
bool use_implicit_batch) {
ClusterBatchSize cluster_batch_size;
if (!use_implicit_batch || !node || node->num_inputs() == 0) {
return cluster_batch_size;
}
const NodeDef& node_def = node->def();
if (node_def.attr().count(kTftrtOpMaxBatchSizeAttr)) {
cluster_batch_size.SetMaxBatchSize(
node_def.attr().at(kTftrtOpMaxBatchSizeAttr).i());
}
if (!graph_properties ||
!graph_properties->HasInputProperties(node->name())) {
VLOG(3) << "doesn't have input property";
return cluster_batch_size;
}
const std::vector<OpInfo::TensorProperties>& input_properties =
graph_properties->GetInputProperties(node->name());
std::optional<const TensorShapeProto*> optional_leading_shape =
FindLeadingShape(GetInputsToDeterminateBatchSize(node, input_properties));
DCHECK(optional_leading_shape.has_value());
const TensorShapeProto* leading_shape = optional_leading_shape.value();
DCHECK(!leading_shape->unknown_rank() && leading_shape->dim_size() >= 2);
VLOG(3) << "set batch size as " << leading_shape->dim(0).size();
return cluster_batch_size.SetBatchSize(leading_shape->dim(0).size());
}
void AddSegmentForNode(const grappler::GraphProperties* graph_properties,
std::vector<UnionFind<SimpleNode*>>* segments,
SimpleNode* node,
const DeviceNameUtils::ParsedName& device_name,
bool use_implicit_batch) {
tensorflow::profiler::TraceMe activity(
"AddSegmentForNode", tensorflow::profiler::TraceMeLevel::kInfo);
ClusterProperty property(
GetClusterBatchSizeForNode(graph_properties,
node == nullptr ? nullptr : node->tf_node(),
use_implicit_batch),
device_name);
segments->emplace_back(node, std::move(property));
}
}
Status ExportNonConversionReportToCSV(
string filename,
std::map<string, std::map<string, int>>& nonconverted_ops_map,
string sep = "|") {
tensorflow::profiler::TraceMe activity(
"ExportNonConversionReportToCSV",
tensorflow::profiler::TraceMeLevel::kInfo);
std::unique_ptr<WritableFile> csv_file;
auto open_status = Env::Default()->NewWritableFile(filename, &csv_file);
if (!open_status.ok()) {
return errors::Internal("Failed to open output file: `", filename, "`");
}
LOG(WARNING) << "TF-TRT Non-Conversion Report saved at: `" << filename << "`";
std::ostringstream sstream;
sstream << "OP Name" << sep << "Reason" << sep << "Count" << std::endl;
for (auto& op_details : nonconverted_ops_map) {
auto op_name = op_details.first;
auto op_data = op_details.second;
for (auto& reject_data : op_data) {
auto reason = reject_data.first;
auto count = reject_data.second;
sstream << op_name << sep << reason << sep << count << std::endl;
}
}
auto append_status = csv_file->Append(sstream.str());
if (!append_status.ok()) {
return errors::Internal("Error writing to output file `", filename, "`.");
}
auto close_status = csv_file->Close();
if (!close_status.ok()) {
return errors::Internal("Error closing the file `", filename,
"`. The file might be corrupted.");
}
return OkStatus();
}
string GenerateNonConversionReport(
std::map<string, std::map<string, int>>& nonconverted_ops_map) {
tensorflow::profiler::TraceMe activity(
"GenerateNonConversionReport", tensorflow::profiler::TraceMeLevel::kInfo);
string detailed_report_var;
TF_CHECK_OK(ReadStringFromEnvVar("TF_TRT_SHOW_DETAILED_REPORT",
"", &detailed_report_var));
bool show_detailed_conversion_report = false;
if (detailed_report_var != "") {
if (detailed_report_var.find_first_not_of("-0123456789") != string::npos) {
const Status status = ExportNonConversionReportToCSV(
detailed_report_var, nonconverted_ops_map);
if (!status.ok()) {
LOG(ERROR) << "Problem encountered while generating the TF-TRT "
<< "Non-Conversion Report in CSV Format:\n"
<< status.message();
}
show_detailed_conversion_report = true;
} else if (std::stoi(detailed_report_var) >= 1) {
show_detailed_conversion_report = true;
}
}
string unsupported_op_report =
StrCat("\n\n", string(80, '#'), "\n",
"TensorRT unsupported/non-converted OP Report:");
int total_nonconverted_ops{0};
using ReasonCounterVector = std::vector<std::pair<string, int>>;
using NotConvertedOPTuple = std::tuple<string, int, ReasonCounterVector>;
std::vector<NotConvertedOPTuple> nonconverted_ops_vec;
for (auto& nonconverted_op_data : nonconverted_ops_map) {
int total_nonconverted_op{0};
ReasonCounterVector reason_occurances_vect;
auto op_name = nonconverted_op_data.first;
auto op_data = nonconverted_op_data.second;
for (auto& notconversion_reason_data : op_data) {
auto reason_count = notconversion_reason_data.second;
total_nonconverted_op += reason_count;
reason_occurances_vect.push_back(notconversion_reason_data);
} | #include "tensorflow/compiler/tf2tensorrt/segment/segment.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace segment {
namespace test {
class SegmentTest : public ::testing::Test {
protected:
std::function<Status(const Node*)> MakeCandidateFn(
const std::set<string>& node_names) {
return [node_names](const Node* node) -> Status {
if (node_names.find(node->name()) != node_names.end()) {
return OkStatus();
}
return errors::NotFound("Not a user specified candidate");
};
}
std::function<bool(const Edge*)> MakeInputEdgeCandidateFn(
const std::set<string>& node_names) {
return [node_names](const Edge* in_edge) -> bool {
return node_names.find(in_edge->dst()->name()) != node_names.end();
};
}
std::function<bool(const Edge*)> MakeOutputEdgeCandidateFn(
const std::set<string>& node_names) {
return [node_names](const Edge* out_edge) -> bool {
return node_names.find(out_edge->src()->name()) != node_names.end();
};
}
void RunTest(const Graph* graph,
const grappler::GraphProperties* graph_properties,
const std::set<string>& candidates,
const std::set<string>& input_candidates,
const std::set<string>& output_candidates,
const std::vector<std::set<string>>& expected_segments) {
SegmentVector segments;
TF_EXPECT_OK(SegmentGraph(graph, graph_properties,
MakeCandidateFn(candidates),
MakeInputEdgeCandidateFn(input_candidates),
MakeOutputEdgeCandidateFn(output_candidates),
segment_options_, &segments));
ValidateSegment(segments, expected_segments);
}
void RunTest(const Graph* graph, const std::set<string>& candidates,
const std::set<string>& input_candidates,
const std::set<string>& output_candidates,
const std::vector<std::set<string>>& expected_segments) {
RunTest(graph, nullptr, candidates, input_candidates, output_candidates,
expected_segments);
}
void ValidateSegment(const SegmentVector& segments,
const std::vector<std::set<string>>& expected_segments) {
EXPECT_EQ(expected_segments.size(), segments.size());
for (int i = 0; i < segments.size(); ++i) {
std::set<string> segment_node_names;
for (const Node* node : segments[i].nodes) {
segment_node_names.insert(node->name());
}
const auto& expected = expected_segments[i];
for (const auto& name : expected) {
EXPECT_TRUE(segment_node_names.count(name))
<< "Segment " << i << " is missing expected node: " << name;
}
if (segment_node_names.size() == expected.size()) continue;
for (const auto& name : segment_node_names) {
EXPECT_TRUE(expected.count(name))
<< "Unexpected node found in segment " << i << ": " << name;
}
}
}
void DisableImplicitBatchMode() {
segment_options_.use_implicit_batch = false;
segment_options_.allow_dynamic_non_batch_dim = true;
}
void EnableImplicitBatchModeForStaticEngine(int maximum_batch_size = 1000) {
segment_options_.use_implicit_batch = true;
segment_options_.maximum_batch_size = maximum_batch_size;
segment_options_.allow_dynamic_non_batch_dim = false;
}
SegmentOptions segment_options_;
};
std::set<string> operator-(const std::set<string>& lhs, const string& rhs) {
std::set<string> result = lhs;
CHECK(result.erase(rhs));
return result;
}
TEST_F(SegmentTest, Empty) {
Scope s = Scope::NewRootScope();
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
DisableImplicitBatchMode();
RunTest(&g, {}, {}, {}, {});
}
TEST_F(SegmentTest, Simple) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4"};
DisableImplicitBatchMode();
RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
auto without_add1 = all_adds - "add1";
RunTest(&g, without_add1, without_add1, without_add1, {without_add1});
auto without_add2 = all_adds - "add2";
RunTest(&g, without_add1, without_add2, without_add1, {{"add3", "add4"}});
RunTest(&g, all_adds, without_add2, all_adds, {all_adds});
RunTest(&g, all_adds, without_add1, all_adds, {without_add1});
auto without_add3 = all_adds - "add3";
RunTest(&g, all_adds, all_adds, without_add3, {all_adds});
}
TEST_F(SegmentTest, WithDeviceAssignments) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4"};
DisableImplicitBatchMode();
{
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
}
{
add1.node()->set_assigned_device_name("/device:CPU:0");
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {all_adds - "add1"});
add1.node()->set_assigned_device_name("");
}
{
constexpr char kGpu0[] = "/device:GPU:0";
add0.node()->set_assigned_device_name(kGpu0);
add1.node()->set_assigned_device_name(kGpu0);
add2.node()->set_assigned_device_name(kGpu0);
constexpr char kGpu1[] = "/device:GPU:1";
add3.node()->set_assigned_device_name(kGpu1);
add4.node()->set_assigned_device_name(kGpu1);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {{"add0", "add1", "add2"}});
}
{
constexpr char kGpuAny[] = "/device:GPU:*";
add3.node()->set_assigned_device_name(kGpuAny);
add4.node()->set_assigned_device_name(kGpuAny);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
}
}
TEST_F(SegmentTest, AvoidCycle) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> without_add2 = {"add0", "add1", "add3", "add4"};
DisableImplicitBatchMode();
RunTest(&g, without_add2, without_add2, without_add2, {});
}
TEST_F(SegmentTest, Multiple) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add7 = ops::Add(s.WithOpName("add7"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add5 = ops::Add(s.WithOpName("add5"), add2, add7);
auto add8 = ops::Add(s.WithOpName("add8"), add7, add7);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add5);
auto add6 = ops::Add(s.WithOpName("add6"), add5, add8);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4",
"add5", "add6", "add7", "add8"};
auto without_add5 = all_adds - "add5";
DisableImplicitBatchMode();
RunTest(&g, without_add5, without_add5, without_add5,
{{"add0", "add1", "add2", "add3"}, {"add6", "add8"}});
auto without_add8 = all_adds - "add8";
auto without_add6 = all_adds - "add6";
RunTest(&g, without_add8, without_add6, all_adds, {{"add3", "add4"}});
auto without_add3 = all_adds - "add3";
auto without_add0 = all_adds - "add0";
RunTest(&g, without_add3, all_adds, without_add0, {{"add1", "add7", "add8"}});
}
TEST_F(SegmentTest, BigIfElse) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), add0, add0);
auto add2 = ops::Add(s.WithOpName("add2"), add1, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add2, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add0, add0);
auto add5 = ops::Add(s.WithOpName("add5"), add4, add4);
auto add6 = ops::Add(s.WithOpName("add6"), add5, add5);
auto add7 = ops::Add(s.WithOpName("add7"), add3, add6);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_adds = {"add0", "add1", "add2", "add3",
"add4", "add5", "add6", "add7"};
DisableImplicitBatchMode();
RunTest(&g, all_adds - "add2", all_adds, all_adds,
{{"add0", "add1"}, {"add3", "add4", "add5", "add6", "add7"}});
}
TEST_F(SegmentTest, IdentityOps) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto identity0 = ops::Identity(s.WithOpName("identity0"), feed);
auto identity1 = ops::Identity(s.WithOpName("identity1"), identity0);
auto identity2 = ops::Identity(s.WithOpName("identity2"), identity1);
auto identity3 = ops::Identity(s.WithOpName("identity3"), identity2);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_identities = {"identity0", "identity1",
"identity2", "identity3"};
DisableImplicitBatchMode();
RunTest(&g, all_identities, all_identities, all_identities, {});
}
TEST_F(SegmentTest, ExcludeAddWithDynamicNonBatchDimension) {
Scope s = Scope::NewRootScope();
auto feed_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2, 3}));
auto feed_1_shape = ops::Placeholder::Shape(PartialTensorShape({-1, -1, 3}));
auto const_val = ops::Const<float>(s, {1.0}, {});
auto feed_0 =
ops::Placeholder(s.WithOpName("feed-1"), DT_FLOAT, feed_0_shape);
auto feed_1 =
ops::Placeholder(s.WithOpName("feed-2"), DT_FLOAT, feed_1_shape);
auto add_0 = ops::Add(s.WithOpName("add-0"), feed_0, const_val);
auto add_1 = ops::Add(s.WithOpName("add-1"), add_0, feed_0);
auto add_2 = ops::Add(s.WithOpName("add-2"), const_val, feed_1);
grappler::GrapplerItem item;
item.fetch.push_back("add-2");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"add-0", "add-1", "add-2"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{all_nodes - "add-2"});
}
TEST_F(SegmentTest, ExcludeReshapeWithDynamicNonBatchDimensionInOutput) {
Scope s = Scope::NewRootScope();
auto feed_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2, 3}));
auto const_val = ops::Const<float>(s, {1.0}, {});
auto feed_0 =
ops::Placeholder(s.WithOpName("feed-1"), DT_FLOAT, feed_0_shape);
auto add_0 = ops::Add(s.WithOpName("add-0"), feed_0, const_val);
auto reshape = ops::Reshape(s.WithOpName("reshape"), add_0, Input({6, -1}));
auto add_1 = ops::Add(s.WithOpName("add-1"), reshape, const_val);
grappler::GrapplerItem item;
item.fetch.push_back("add-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"add-0", "reshape", "add-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes, {});
}
TEST_F(SegmentTest, RankOneCannotUseImplicitBatch) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(TensorShape({3}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({3}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-scalar"), 1.0f, {});
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, const_val);
auto output_1 = ops::Add(s.WithOpName("output-1"), input_1, const_val);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
item.fetch.push_back("output-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-scalar", "output-0", "output-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes, {});
}
TEST_F(SegmentTest, TwoChainsDiffBatchSizes) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(TensorShape({2, 3}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({5, 3}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-scalar"), 1.0f, {});
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, const_val);
auto output_1 = ops::Add(s.WithOpName("output-1"), input_1, const_val);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
item.fetch.push_back("output-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-scalar", "output-0", "output-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{{"output-0", "const-scalar"}});
EnableImplicitBatchModeForStaticEngine(1);
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{{"output-0", "const-scalar"}});
}
TEST_F(SegmentTest, SameRankImplicitBroadcastingStaticBatchSize) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(TensorShape({2, 3, 1}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({1, 3, 4}));
auto input_2_shape = ops::Placeholder::Shape(TensorShape({2, 3, 4}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto input_2 =
ops::Placeholder(s.WithOpName("input-2"), DT_FLOAT, input_2_shape);
auto multiple = ops::Mul(s.WithOpName("multiple"), input_2, input_2);
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, multiple);
auto output_1 = ops::Add(s.WithOpName("output-1"), input_1, multiple);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
item.fetch.push_back("output-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"multiple", "output-0", "output-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{all_nodes});
}
TEST_F(SegmentTest, SameRankImplicitBroadcastingDynamicBatchSize) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({1, 2}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-val"), 1.0f, {1, 1});
auto add_0 = ops::Add(s.WithOpName("add-0"), input_0, const_val);
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, add_0);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-val", "add-0", "output-0"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{{"const-val", "add-0", "output-0"}});
}
TEST_F(SegmentTest, IncompatibleBatchSizes) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({2, 2}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-val"), 1.0f, {2, 2});
auto add_0 = ops::Add(s.WithOpName("add-0"), input_0, const_val);
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, add_0);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-val", "add-0", "output-0"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes, {});
}
}
}
}
}
#endif |
1,158 | cpp | tensorflow/tensorflow | register_common_dialects | tensorflow/compiler/mlir/register_common_dialects.cc | tensorflow/compiler/mlir/register_common_dialects_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_REGISTER_COMMON_DIALECTS_H_
#define TENSORFLOW_COMPILER_MLIR_REGISTER_COMMON_DIALECTS_H_
#include "mlir/IR/DialectRegistry.h"
namespace mlir {
void RegisterCommonToolingDialects(mlir::DialectRegistry& registry);
};
#endif
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "mlir/Dialect/Quant/QuantOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
#include "mlir/InitAllDialects.h"
#include "mlir/InitAllExtensions.h"
#include "stablehlo/dialect/Register.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mlprogram_util.h"
#include "tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.h"
#include "xla/mlir/framework/ir/xla_framework.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
namespace mlir {
void RegisterCommonToolingDialects(mlir::DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::registerAllDialects(registry);
mlir::registerAllExtensions(registry);
mlir::stablehlo::registerAllDialects(registry);
registry.insert<mlir::TFL::TensorFlowLiteDialect>();
registry.insert<mlir::kernel_gen::tf_framework::TFFrameworkDialect>();
registry.insert<mlir::quant::QuantizationDialect>();
registry.insert<mlir::quantfork::QuantizationForkDialect>();
registry.insert<mlir::shape::ShapeDialect>();
registry.insert<mlir::tensor::TensorDialect>();
registry.insert<mlir::tosa::TosaDialect>();
registry.insert<mlir::xla_framework::XLAFrameworkDialect,
mlir::TF::TensorFlowDialect, mlir::tf_type::TFTypeDialect>();
}
}; | #include "tensorflow/compiler/mlir/register_common_dialects.h"
#include <gtest/gtest.h>
#include "mlir/IR/DialectRegistry.h"
namespace mlir {
namespace {
TEST(RegisterCommonDialectsTest, DoesntCrash) {
mlir::DialectRegistry registry;
mlir::RegisterCommonToolingDialects(registry);
EXPECT_FALSE(registry.getDialectNames().empty());
}
}
} |
1,159 | cpp | tensorflow/tensorflow | mlir_graph_optimization_pass | tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc | tensorflow/compiler/mlir/mlir_graph_optimization_pass_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_MLIR_GRAPH_OPTIMIZATION_PASS_H_
#define TENSORFLOW_COMPILER_MLIR_MLIR_GRAPH_OPTIMIZATION_PASS_H_
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/compiler/mlir/tf2xla/mlir_bridge_rollout_policy.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
namespace tensorflow {
enum class MlirOptimizationPassState { Disabled, Enabled, FallbackEnabled };
class MlirOptimizationPass {
public:
virtual ~MlirOptimizationPass() = default;
virtual llvm::StringRef name() const = 0;
virtual MlirOptimizationPassState GetPassState(
const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library) const = 0;
virtual Status Run(const std::string& function_name,
const ConfigProto& config_proto, mlir::ModuleOp module,
const Graph& graph,
const FunctionLibraryDefinition& function_library) = 0;
};
class MlirOptimizationPassRegistry {
public:
struct PassRegistration {
int priority;
std::unique_ptr<MlirOptimizationPass> pass;
};
struct PriorityComparator {
bool operator()(const PassRegistration& x,
const PassRegistration& y) const {
return x.priority < y.priority;
}
};
using Passes = std::set<PassRegistration, PriorityComparator>;
static MlirOptimizationPassRegistry& Global();
void Add(int priority, std::unique_ptr<MlirOptimizationPass> pass) {
auto inserted = passes_.insert({priority, std::move(pass)});
CHECK(inserted.second)
<< "Pass priority must be unique. "
<< "Previously registered pass with the same priority: "
<< inserted.first->pass->name().str();
}
void ClearPasses() { passes_.clear(); }
const Passes& passes() const { return passes_; }
private:
Passes passes_;
};
class MlirFunctionOptimizationPass : public FunctionOptimizationPass {
public:
explicit MlirFunctionOptimizationPass(
const MlirOptimizationPassRegistry* registry =
&MlirOptimizationPassRegistry::Global())
: registry_(registry) {}
Status Run(const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptimizationPass::FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) override;
private:
const MlirOptimizationPassRegistry* registry_;
};
class MlirV1CompatOptimizationPass {
public:
virtual ~MlirV1CompatOptimizationPass() = default;
virtual llvm::StringRef name() const = 0;
virtual MlirOptimizationPassState GetPassState(
const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library) const = 0;
virtual Status Run(const GraphOptimizationPassOptions& options,
mlir::ModuleOp module) = 0;
};
class MlirV1CompatOptimizationPassRegistry {
public:
static MlirV1CompatOptimizationPassRegistry& Global();
void Add(std::unique_ptr<MlirV1CompatOptimizationPass> pass) {
CHECK(pass_ == nullptr) << "Only a single pass can be registered";
pass_ = std::move(pass);
}
MlirV1CompatOptimizationPass* pass() const {
return pass_ ? pass_.get() : nullptr;
}
void ClearPass() { pass_.reset(); }
private:
std::unique_ptr<MlirV1CompatOptimizationPass> pass_{};
};
class MlirV1CompatGraphOptimizationPass : public GraphOptimizationPass {
public:
explicit MlirV1CompatGraphOptimizationPass(
const MlirV1CompatOptimizationPassRegistry* registry =
&MlirV1CompatOptimizationPassRegistry::Global())
: registry_(registry) {}
Status Run(const GraphOptimizationPassOptions& options) override;
private:
const MlirV1CompatOptimizationPassRegistry* registry_;
};
namespace mlir_pass_registration {
class MlirOptimizationPassRegistration {
public:
explicit MlirOptimizationPassRegistration(
int priority, std::unique_ptr<MlirOptimizationPass> pass) {
MlirOptimizationPassRegistry::Global().Add(priority, std::move(pass));
}
};
class MlirV1CompatOptimizationPassRegistration {
public:
explicit MlirV1CompatOptimizationPassRegistration(
std::unique_ptr<MlirV1CompatOptimizationPass> pass) {
MlirV1CompatOptimizationPassRegistry::Global().Add(std::move(pass));
}
};
}
}
#endif
#include "tensorflow/compiler/mlir/mlir_graph_optimization_pass.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/compiler/mlir/tf2xla/mlir_bridge_rollout_policy.h"
#include "absl/container/flat_hash_set.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_os_ostream.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/IR/OperationSupport.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_graphdef.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
auto* mlir_function_pass_fallback_count = monitoring::Counter<1>::New(
"/tensorflow/core/mlir_function_pass_fallback_count",
"Track success/failure of MLIR pass runs when fallback used",
"status");
auto* mlir_graph_optimization_pass_fallback_count = monitoring::Counter<1>::New(
"/tensorflow/core/mlir_graph_optimization_pass_fallback_count",
"Track success/failure of MLIR graph optimization pass runs when fallback "
"used",
"status");
auto* mlir_function_pass_graph_conversion_count = monitoring::Counter<1>::New(
"/tensorflow/core/mlir_function_pass_graph_conversion_count",
"Track success/failure of Graph to MLIR conversions in function "
"optimization pass",
"status");
constexpr char kSuccess[] = "kSuccess";
constexpr char kFailure[] = "kFailure";
static inline absl::string_view StringRefToView(llvm::StringRef ref) {
return {ref.data(), ref.size()};
}
static void DumpModule(mlir::ModuleOp module, std::string file_prefix) {
std::string prefix = GetDumpDirFromEnvVar();
if (prefix.empty()) return;
auto* env = tensorflow::Env::Default();
auto status = env->RecursivelyCreateDir(prefix);
if (!status.ok()) {
LOG(WARNING) << "cannot create directory '" << prefix
<< "': " << status.message();
return;
}
prefix += "/" + file_prefix;
if (!tensorflow::Env::Default()->CreateUniqueFileName(&prefix, ".mlir")) {
LOG(WARNING) << "cannot create unique filename, won't dump MLIR module.";
return;
}
std::unique_ptr<WritableFile> file_writer;
status = env->NewWritableFile(prefix, &file_writer);
if (!status.ok()) {
LOG(WARNING) << "cannot open file '" << prefix << "': " << status.message();
return;
}
std::string txt_module;
{
llvm::raw_string_ostream os(txt_module);
module.print(os);
}
status = file_writer->Append(txt_module);
if (!status.ok()) {
LOG(WARNING) << "error writing to file '" << prefix
<< "': " << status.message();
return;
}
(void)file_writer->Close();
VLOG(1) << "Dumped MLIR module to " << prefix;
}
MlirOptimizationPassRegistry& MlirOptimizationPassRegistry::Global() {
static auto* global = new MlirOptimizationPassRegistry();
return *global;
}
static void RegisterDialects(mlir::DialectRegistry& registry) {
registry.insert<mlir::arith::ArithDialect,
mlir::func::FuncDialect,
mlir::TF::TensorFlowDialect,
mlir::shape::ShapeDialect,
mlir::tf_device::TensorFlowDeviceDialect,
mlir::tf_executor::TensorFlowExecutorDialect>();
mlir::func::registerAllExtensions(registry);
}
Status MlirFunctionOptimizationPass::Run(
const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptimizationPass::FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) {
MlirOptimizationPassState overall_state = MlirOptimizationPassState::Disabled;
std::vector<MlirOptimizationPassState> per_pass_state;
per_pass_state.reserve(registry_->passes().size());
int num_passes_enabled = 0, num_passes_disabled = 0,
num_passes_fallback_enabled = 0;
for (const auto& pass_registration : registry_->passes()) {
MlirOptimizationPassState pass_state = pass_registration.pass->GetPassState(
&device_set, config_proto, **graph, *flib_def);
per_pass_state.push_back(pass_state);
switch (pass_state) {
case MlirOptimizationPassState::FallbackEnabled: {
if (overall_state != MlirOptimizationPassState::Enabled)
overall_state = MlirOptimizationPassState::FallbackEnabled;
++num_passes_fallback_enabled;
break;
}
case MlirOptimizationPassState::Enabled: {
overall_state = MlirOptimizationPassState::Enabled;
++num_passes_enabled;
break;
}
case MlirOptimizationPassState::Disabled: {
++num_passes_disabled;
break;
}
}
}
if (overall_state == MlirOptimizationPassState::Disabled) {
if (VLOG_IS_ON(1)) {
LOG_FIRST_N(INFO, 1)
<< "None of the MLIR Optimization Passes are enabled "
<< "(registered " << registry_->passes().size() << ")";
}
return absl::OkStatus();
}
if (VLOG_IS_ON(1)) {
LOG_FIRST_N(INFO, 1) << "MLIR Graph Optimization Passes."
<< " Enabled: " << num_passes_enabled
<< ", Disabled: " << num_passes_disabled
<< ", FallbackEnabled: " << num_passes_fallback_enabled
<< ", Total: " << registry_->passes().size();
}
GraphDebugInfo debug_info;
mlir::DialectRegistry registry;
RegisterDialects(registry);
mlir::MLIRContext context(registry);
GraphImportConfig import_config;
import_config.graph_as_function = true;
import_config.control_outputs = *control_ret_node_names;
import_config.upgrade_legacy = true;
import_config.enable_shape_inference = false;
import_config.xla_compile_device_type =
function_options.xla_compile_device_type;
import_config.enable_soft_placement = function_options.allow_soft_placement;
static const char* kTfMlirCategory = "TfMlir";
tensorflow::metrics::ScopedCounter<2> timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{kTfMlirCategory, "convert_graph_to_mlir"});
auto module_ref_status = ConvertGraphToMlir(**graph, debug_info, *flib_def,
import_config, &context);
mlir_function_pass_graph_conversion_count
->GetCell(absl::StatusCodeToString(module_ref_status.status().code()))
->IncrementBy(1);
timings.ReportAndStop();
if (!module_ref_status.ok()) {
if (overall_state == MlirOptimizationPassState::Enabled) {
return module_ref_status.status();
}
LOG(WARNING) << "Failed to convert graph to MLIR: "
<< module_ref_status.status()
<< " , continuing without MlirOptimizationPass because "
"fallback enabled.";
return absl::OkStatus();
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
std::move(module_ref_status.value());
AddDevicesToOp(*module_ref, &device_set);
int per_pass_state_index = 0;
bool is_module_updated = false;
for (auto& pass_registration : registry_->passes()) {
llvm::StringRef name = pass_registration.pass->name();
if (DEBUG_DATA_DUMPER()->ShouldDump(function_name, kDebugGroupMain) ||
VLOG_IS_ON(1)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(
function_name, kDebugGroupMain,
llvm::formatv("mlir_{0}_before", name)),
*module_ref, llvm::StringRef(), nullptr);
}
Status pass_status = absl::OkStatus();
auto pass_state = per_pass_state[per_pass_state_index++];
if (pass_state == MlirOptimizationPassState::Enabled) {
VLOG(2) << "Run MLIR graph optimization pass: " << StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
timings.Reset({kTfMlirCategory, name.str()});
pass_status = pass_registration.pass->Run(
function_name, config_proto, *module_ref, **graph, *flib_def);
timings.ReportAndStop();
if (pass_status.ok()) {
VLOG(2) << "Finished MLIR graph optimization pass: "
<< StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
is_module_updated = true;
}
} else if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
VLOG(2) << "Run MLIR graph optimization pass with fallback: "
<< StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
auto module_ref_clone = module_ref->clone();
timings.Reset({kTfMlirCategory, name.str() + "_fallback"});
pass_status = pass_registration.pass->Run(
function_name, config_proto, module_ref_clone, **graph, *flib_def);
timings.ReportAndStop();
if (pass_status.ok()) {
VLOG(2) << "Finished MLIR graph optimization pass with fallback: "
<< StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
module_ref = module_ref_clone;
is_module_updated = true;
} else {
module_ref_clone->destroy();
}
} else {
VLOG(2) << "MLIR graph optimization pass: " << StringRefToView(name)
<< " is disabled and will not be run.";
}
if (!pass_status.ok()) {
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
LOG(WARNING) << StringRefToView(name)
<< " pass failed, continuing without the pass because the "
"pass has fallback enabled";
mlir_function_pass_fallback_count->GetCell(kFailure)->IncrementBy(1);
} else if (pass_state == MlirOptimizationPassState::Enabled) {
return pass_status;
}
} else {
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
mlir_function_pass_fallback_count->GetCell(kSuccess)->IncrementBy(1);
}
}
if (DEBUG_DATA_DUMPER()->ShouldDump(function_name, kDebugGroupMain) ||
VLOG_IS_ON(1)) {
::tensorflow::DumpMlirOpToFile(DEBUG_DATA_DUMPER()->GetDumpFilename(
function_name, kDebugGroupMain,
llvm::formatv("mlir_{0}_after", name)),
*module_ref, llvm::StringRef(), nullptr);
}
}
if (!is_module_updated) {
VLOG(2) << "MLIR module is not updated. Using the original graph. "
<< "Do not convert mlir module back to graph";
return absl::OkStatus();
}
GraphExportConfig export_config;
absl::flat_hash_set<Node*> control_ret_nodes;
timings.Reset({kTfMlirCategory, "convert_mlir_to_graph"});
Status status = tensorflow::tf2xla::v2::ConvertMlirToGraph(
*module_ref, export_config, graph, flib_def, &control_ret_nodes);
if (!status.ok()) {
errors::AppendToMessage(&status,
"Error converting MLIR module back to graph");
return status;
}
timings.ReportAndStop();
control_ret_node_names->clear();
control_ret_node_names->reserve(control_ret_nodes.size());
for (const auto* node : control_ret_nodes)
control_ret_node_names->push_back(node->name());
*control_rets_updated = true;
return absl::OkStatus();
}
MlirV1CompatOptimizationPassRegistry&
MlirV1CompatOptimizationPassRegistry::Global() {
static auto* global = new MlirV1CompatOptimizationPassRegistry();
return *global;
}
Status MlirV1CompatGraphOptimizationPass::Run(
const GraphOptimizationPassOptions& options) {
if (options.is_function_graph || !registry_->pass()) return absl::OkStatus();
auto pass = registry_->pass();
auto pass_state =
pass->GetPassState(options.device_set, options.session_options->config,
**options.graph, *options.flib_def);
if (pass_state == MlirOptimizationPassState::Disabled) {
LOG_FIRST_N(INFO, 1) << "MLIR V1 optimization pass is not enabled";
return absl::OkStatus();
}
LOG_FIRST_N(INFO, 1) << "Running MLIR Graph Optimization V1 Compat Pass";
GraphDebugInfo debug_info;
mlir::DialectRegistry registry;
RegisterDialects(registry);
mlir::MLIRContext context(registry);
GraphImportConfig import_config;
import_config.upgrade_legacy = true;
import_config.restrict_functionalization_to_compiled_nodes = true;
auto module_ref_status = ConvertGraphToMlir(
**options.graph, debug_info, *options.flib_def, import_config, &context);
if (!module_ref_status.ok()) {
if (pass_state == MlirOptimizationPassState::Enabled) {
return module_ref_status.status();
}
LOG(WARNING) << "Failed to convert graph to MLIR: "
<< module_ref_status.status()
<< " , continuing without MlirOptimizationPass because "
"fallback enabled.";
return absl::OkStatus();
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
std::move(module_ref_status.value());
AddDevicesToOp(*module_ref, options.device_set);
auto module_ref_clone = module_ref->clone();
llvm::StringRef name = pass->name();
VLOG(2) << "Run MLIR V1 graph optimization pass: " << StringRefToView(name);
if (VLOG_IS_ON(1)) {
DumpModule(*module_ref, llvm::formatv("mlir_{0}_before_", name));
}
Status pass_status = pass->Run(options, *module_ref);
bool is_module_updated = !mlir::OperationEquivalence::isEquivalentTo(
module_ref_clone, *module_ref,
mlir::OperationEquivalence::Flags::IgnoreLocations);
module_ref_clone->destroy();
if (!pass_status.ok()) {
if (pass_state == MlirOptimizationPassState::Enabled) return pass_status;
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
LOG(WARNING) << StringRefToView(name)
<< " pass failed, continuing without the pass because the "
"pass has fallback enabled";
mlir_graph_optimization_pass_fallback_count->GetCell(kFailure)
->IncrementBy(1);
return absl::OkStatus();
}
} else {
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
mlir_graph_optimization_pass_fallback_count->GetCell(kSuccess)
->IncrementBy(1);
}
}
if (VLOG_IS_ON(1)) {
DumpModule(*module_ref, llvm::formatv("mlir_{0}_after_", name));
}
if (!is_module_updated) {
VLOG(2) << "MLIR module is not updated. Using the original graph. "
<< "Do not convert mlir module back to graph";
return absl::OkStatus();
}
GraphExportConfig export_config;
absl::flat_hash_set<Node*> control_ret_nodes;
TF_RETURN_WITH_CONTEXT_IF_ERROR(tensorflow::tf2xla::v2::ConvertMlirToGraph(
*module_ref, export_config, options.graph,
options.flib_def, &control_ret_nodes),
"Error converting MLIR module back to graph");
return absl::OkStatus();
}
} | #include "tensorflow/compiler/mlir/mlir_graph_optimization_pass.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "mlir/IR/Builders.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
using ::testing::_;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::Test;
constexpr char kOk[] = "OK";
constexpr char kInvalidArgument[] = "INVALID_ARGUMENT";
constexpr char kSuccess[] = "kSuccess";
constexpr char kFailure[] = "kFailure";
class MockMlirOptimizationPass : public MlirOptimizationPass {
public:
MOCK_METHOD(llvm::StringRef, name, (), (const, override));
MOCK_METHOD(MlirOptimizationPassState, GetPassState,
(const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(const, override));
MOCK_METHOD(Status, Run,
(const std::string& function_name,
const ConfigProto& config_proto, mlir::ModuleOp module,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(override));
};
class MockMlirV1CompatOptimizationPass : public MlirV1CompatOptimizationPass {
public:
MOCK_METHOD(llvm::StringRef, name, (), (const, override));
MOCK_METHOD(MlirOptimizationPassState, GetPassState,
(const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(const, override));
MOCK_METHOD(Status, Run,
(const GraphOptimizationPassOptions& options,
mlir::ModuleOp module),
(override));
};
class ModifyMlirModulePass : public MlirOptimizationPass {
public:
explicit ModifyMlirModulePass(Status run_status) : run_status_(run_status) {}
MOCK_METHOD(llvm::StringRef, name, (), (const, override));
MOCK_METHOD(MlirOptimizationPassState, GetPassState,
(const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(const, override));
Status Run(const std::string& function_name, const ConfigProto& config_proto,
mlir::ModuleOp module, const Graph& graph,
const FunctionLibraryDefinition& function_library) override {
mlir::Builder b(module.getContext());
auto producer = b.getNamedAttr("producer", b.getI32IntegerAttr(0));
auto min_consumer = b.getNamedAttr("min_consumer", b.getI32IntegerAttr(0));
auto bad_consumers =
b.getNamedAttr("bad_consumers", b.getI32ArrayAttr({1, 2, 3, 4}));
module->setAttr("tf.versions",
b.getDictionaryAttr(llvm::ArrayRef<mlir::NamedAttribute>(
{producer, min_consumer, bad_consumers})));
return run_status_;
}
Status run_status_;
};
FunctionDef XTimesTwo() {
const Tensor kTwo = test::AsScalar<int64>(2);
return FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
}
class MlirGraphOptimizationPassTest : public Test {
public:
void Init(Status pass_run_result,
const std::vector<MlirOptimizationPassState>& pass_states) {
graph_ = std::make_unique<Graph>(OpRegistry::Global());
int pass_priority = 0;
for (const MlirOptimizationPassState& pass_state : pass_states) {
auto optimization_pass =
std::make_unique<NiceMock<MockMlirOptimizationPass>>();
ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
.WillByDefault(Return(pass_state));
ON_CALL(*optimization_pass, Run(_, _, _, _, _))
.WillByDefault(Return(pass_run_result));
MlirOptimizationPassRegistry::Global().Add(pass_priority++,
std::move(optimization_pass));
pass_result_expected_[pass_state][pass_run_result.ok()]++;
}
flib_ = std::make_unique<FunctionLibraryDefinition>(graph_->flib_def());
}
void AddModuleModificationPass(MlirOptimizationPassState pass_state,
Status run_status) {
auto optimization_pass =
std::make_unique<NiceMock<ModifyMlirModulePass>>(run_status);
ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
.WillByDefault(Return(pass_state));
MlirOptimizationPassRegistry::Global().Add(10,
std::move(optimization_pass));
pass_result_expected_[pass_state][run_status.ok()]++;
}
void TearDown() override {
MlirOptimizationPassRegistry::Global().ClearPasses();
}
void verifyGraph(const GraphDef& original_graph_def, bool changed = false) {
#if defined(PLATFORM_GOOGLE)
GraphDef resulted_graph_def;
graph_->ToGraphDef(&resulted_graph_def);
if (changed)
EXPECT_THAT(resulted_graph_def,
Not(::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def))));
else
EXPECT_THAT(resulted_graph_def,
::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def)));
#endif
}
void verifyCounters() {
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kSuccess),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[true]);
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kFailure),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[false]);
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kOk), 1);
}
ConfigProto config_proto_;
FunctionOptimizationPass::FunctionOptions function_options_;
MlirFunctionOptimizationPass function_optimization_pass_;
DeviceSet device_set_;
std::unique_ptr<Graph> graph_;
std::unique_ptr<FunctionLibraryDefinition> flib_;
std::vector<std::string> control_ret_node_names_;
bool control_rets_updated_{false};
monitoring::testing::CellReader<int64_t> mlir_function_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_graph_optimization_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_graph_optimization_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_function_pass_graph_conversion_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_graph_conversion_count");
std::map<MlirOptimizationPassState, std::map<bool, int64_t>>
pass_result_expected_;
};
TEST_F(MlirGraphOptimizationPassTest, OptimizationPassFailsNoFallback) {
Init(Status(absl::StatusCode::kAborted, "aborted"),
{MlirOptimizationPassState::Enabled});
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
Status(absl::StatusCode::kAborted, "aborted"));
verifyGraph(original_graph_def);
verifyCounters();
}
TEST_F(MlirGraphOptimizationPassTest, OptimizationPassFailsDisabledFallback) {
Init(Status(absl::StatusCode::kAborted, "aborted"),
{MlirOptimizationPassState::Disabled,
MlirOptimizationPassState::FallbackEnabled});
FunctionDefLibrary flib;
*flib.add_function() = XTimesTwo();
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
graph_ = std::make_unique<Graph>(flib_def);
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
AddModuleModificationPass(MlirOptimizationPassState::FallbackEnabled,
Status(absl::StatusCode::kAborted, "aborted"));
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
absl::OkStatus());
verifyGraph(original_graph_def);
verifyCounters();
}
TEST_F(MlirGraphOptimizationPassTest, OptimizationPassDoesNotFailFallback) {
Init(absl::OkStatus(), {MlirOptimizationPassState::FallbackEnabled});
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
AddModuleModificationPass(MlirOptimizationPassState::FallbackEnabled,
absl::OkStatus());
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
absl::OkStatus());
verifyGraph(original_graph_def, true);
verifyCounters();
}
TEST_F(MlirGraphOptimizationPassTest, GraphDoesntConvertUpdatesCounter) {
Init(absl::OkStatus(), {MlirOptimizationPassState::FallbackEnabled});
graph_ = std::make_unique<Graph>(OpRegistry::Global());
control_ret_node_names_.push_back("foo");
AddModuleModificationPass(MlirOptimizationPassState::FallbackEnabled,
absl::OkStatus());
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
absl::OkStatus());
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kOk), 0);
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kInvalidArgument),
1);
}
TEST(MlirOptimizationPassRegistry, RegisterPassesWithTheSamePriorityFails) {
MlirOptimizationPassRegistry::Global().Add(
0, std::make_unique<NiceMock<MockMlirOptimizationPass>>());
EXPECT_DEATH(MlirOptimizationPassRegistry::Global().Add(
0, std::make_unique<NiceMock<MockMlirOptimizationPass>>()),
"Pass priority must be unique.");
}
TEST(MlirV1CompatOptimizationPassRegistry, RegisterMultiplePassesFails) {
MlirV1CompatOptimizationPassRegistry::Global().Add(
std::make_unique<NiceMock<MockMlirV1CompatOptimizationPass>>());
EXPECT_DEATH(
MlirV1CompatOptimizationPassRegistry::Global().Add(
std::make_unique<NiceMock<MockMlirV1CompatOptimizationPass>>()),
"Only a single pass can be registered");
}
class MlirGraphOptimizationV1PassTest : public Test {
public:
void Init(Status pass_run_result,
const std::vector<MlirOptimizationPassState>& pass_states) {
graph_ = std::make_unique<Graph>(OpRegistry::Global());
MlirV1CompatOptimizationPassRegistry::Global().ClearPass();
for (const MlirOptimizationPassState& pass_state : pass_states) {
auto optimization_pass =
std::make_unique<NiceMock<MockMlirV1CompatOptimizationPass>>();
ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
.WillByDefault(Return(pass_state));
ON_CALL(*optimization_pass, Run(_, _))
.WillByDefault(Return(pass_run_result));
MlirV1CompatOptimizationPassRegistry::Global().Add(
std::move(optimization_pass));
pass_result_expected_[pass_state][pass_run_result.ok()]++;
}
flib_ = std::make_unique<FunctionLibraryDefinition>(graph_->flib_def());
InitGraphOptions();
}
void verifyGraph(const GraphDef& original_graph_def, bool changed = false) {
#if defined(PLATFORM_GOOGLE)
GraphDef resulted_graph_def;
graph_->ToGraphDef(&resulted_graph_def);
if (changed)
EXPECT_THAT(resulted_graph_def,
Not(::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def))));
else
EXPECT_THAT(resulted_graph_def,
::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def)));
#endif
}
void InitGraphOptions() {
session_options_.config = config_proto_;
graph_optimization_pass_options_.device_set = &device_set_;
graph_optimization_pass_options_.session_options = &session_options_;
graph_optimization_pass_options_.graph = &graph_;
graph_optimization_pass_options_.flib_def = flib_.get();
}
void verifyCounters() {
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kSuccess),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[false]);
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kFailure),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[false]);
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kOk), 0);
}
void TearDown() override {
MlirV1CompatOptimizationPassRegistry::Global().ClearPass();
}
ConfigProto config_proto_;
FunctionOptimizationPass::FunctionOptions function_options_;
MlirV1CompatGraphOptimizationPass function_optimization_pass_;
DeviceSet device_set_;
std::unique_ptr<Graph> graph_;
std::unique_ptr<FunctionLibraryDefinition> flib_;
std::vector<std::string> control_ret_node_names_;
bool control_rets_updated_{false};
SessionOptions session_options_;
tensorflow::GraphOptimizationPassOptions graph_optimization_pass_options_;
std::map<MlirOptimizationPassState, std::map<bool, int64_t>>
pass_result_expected_;
monitoring::testing::CellReader<int64_t> mlir_function_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_graph_optimization_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_graph_optimization_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_function_pass_graph_conversion_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_graph_conversion_count");
};
TEST_F(MlirGraphOptimizationV1PassTest, OptimizationPassDoesNotFailFallback) {
Init(absl::OkStatus(), {MlirOptimizationPassState::FallbackEnabled});
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
EXPECT_EQ(function_optimization_pass_.Run(graph_optimization_pass_options_),
absl::OkStatus());
verifyGraph(original_graph_def, false);
verifyCounters();
}
} |
1,160 | cpp | tensorflow/tensorflow | size_utils | tensorflow/compiler/mlir/lite/utils/size_utils.cc | tensorflow/compiler/mlir/lite/utils/size_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_UTILS_SIZE_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_UTILS_SIZE_UTILS_H_
#include <cstdint>
namespace mlir {
namespace TFL {
int32_t ConvertToTfliteSize(int64_t size);
}
}
#endif
#include "tensorflow/compiler/mlir/lite/utils/size_utils.h"
#include <cstdint>
#include "mlir/IR/BuiltinTypes.h"
namespace mlir {
namespace TFL {
int32_t ConvertToTfliteSize(int64_t size) {
return mlir::ShapedType::isDynamic(size) ? -1 : static_cast<int32_t>(size);
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/size_utils.h"
#include "mlir/IR/BuiltinTypes.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
namespace {
TEST(SizeUtilTest, TestConvertsSize) {
ASSERT_EQ(ConvertToTfliteSize(1), 1);
ASSERT_EQ(ConvertToTfliteSize(-1), -1);
ASSERT_EQ(ConvertToTfliteSize(mlir::ShapedType::kDynamic), -1);
}
}
}
} |
1,161 | cpp | tensorflow/tensorflow | tftext_utils | tensorflow/compiler/mlir/lite/utils/tftext_utils.cc | tensorflow/compiler/mlir/lite/utils/tftext_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_UTILS_TFTEXT_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_UTILS_TFTEXT_UTILS_H_
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/core/framework/op.h"
namespace mlir {
namespace TFL {
LogicalResult ConvertTFTextAPI(mlir::func::FuncOp func, llvm::StringRef api,
mlir::TF::FuncAttr attr);
bool IsTFTextRegistered(const tensorflow::OpRegistry* op_registery);
}
}
#endif
#include "tensorflow/compiler/mlir/lite/utils/tftext_utils.h"
#include <optional>
#include <string>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace TFL {
namespace {
constexpr char kNgrams[] = "tftext:Ngrams";
constexpr char kWhitespaceTokenizer[] = "tftext:WhitespaceTokenizer";
constexpr char kCustomSgnnProjection[] = "tftext:custom:SgnnProjection";
constexpr char kTFImplements[] = "tf._implements";
using mlir::TF::FuncAttr;
using mlir::TF::StringType;
inline ConstBytesAttr CustomOption(OpBuilder* builder,
const std::string& content) {
return ConstBytesAttr::get(builder->getContext(),
StringRef(content.data(), content.size()));
}
inline TensorType GetInputType(func::FuncOp func, int idx) {
return mlir::dyn_cast_or_null<TensorType>(
func.getFunctionType().getInput(idx));
}
inline TensorType GetResultType(func::FuncOp func, int idx) {
return mlir::dyn_cast_or_null<TensorType>(
func.getFunctionType().getResult(idx));
}
inline bool RankEquals(const TensorType& type, int rank) {
return type && type.hasRank() && type.getRank() == rank;
}
LogicalResult VerifyWhitespaceTokenizer(func::FuncOp func) {
auto input_type = GetInputType(func, 0);
if (!input_type || !mlir::isa<StringType>(input_type.getElementType()) ||
!input_type.hasRank()) {
return func.emitError() << "Input should be a string tensor";
}
const std::vector<int> kValidNumOfOutput = {1, 2, 3};
if (input_type.getRank() >= kValidNumOfOutput.size()) {
return func.emitError()
<< "Unrecognized input rank: " << input_type.getRank();
}
if (func.getNumResults() != kValidNumOfOutput[input_type.getRank()]) {
return func.emitError()
<< "Expect " << kValidNumOfOutput[input_type.getRank()]
<< "output(s) when input has rank " << input_type.getRank();
}
auto value_type = GetResultType(func, 0);
if (!RankEquals(value_type, 1) ||
!mlir::isa<StringType>(value_type.getElementType())) {
return func.emitError() << "1st output should be string tensor";
}
if (func.getNumResults() > 1) {
auto offset_type = GetResultType(func, 1);
if (!RankEquals(offset_type, 1) ||
!offset_type.getElementType().isInteger(64)) {
return func.emitError() << "2nd output should be int64 tensor";
}
}
if (func.getNumResults() > 2) {
auto offset_type = GetResultType(func, 2);
if (!RankEquals(offset_type, 1) ||
!offset_type.getElementType().isInteger(64)) {
return func.emitError() << "3rd output should be int64 tensor";
}
}
return success();
}
LogicalResult ConvertWhitespaceTokenizer(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
func.eraseBody();
func.addEntryBlock();
func->setAttr(kTFImplements, attr);
OpBuilder builder(func.getBody());
std::string empty_option_buffer;
auto op = builder.create<CustomOp>(
func.getLoc(), func.getFunctionType().getResults(), func.getArguments(),
api, CustomOption(&builder, empty_option_buffer));
builder.create<func::ReturnOp>(func.getLoc(), op.getResults());
return success();
}
LogicalResult VerifyNgrams(func::FuncOp func) {
constexpr int kValues = 0;
constexpr int kRowSplits = 1;
if (func.getFunctionType().getInputs().size() !=
func.getFunctionType().getResults().size()) {
return func.emitError() << "Mismatched number of inputs and outputs.";
}
int row_splits = func.getFunctionType().getInputs().size() - kRowSplits;
if (row_splits == 0) {
auto input_values = GetInputType(func, kValues);
if (!input_values ||
!mlir::isa<StringType>(input_values.getElementType())) {
return func.emitError()
<< "Input " << kValues << " should be a string tensor";
}
auto output_values = GetResultType(func, kValues);
if (!output_values ||
!mlir::isa<StringType>(output_values.getElementType())) {
return func.emitError()
<< "Output " << kValues << " should be a string tensor";
}
if (input_values.hasRank() && output_values.hasRank() &&
input_values.getRank() != output_values.getRank()) {
return func.emitError() << "Input " << kValues << " and output "
<< kValues << " should have the same rank";
}
} else {
auto input_values = GetInputType(func, kValues);
if (!RankEquals(input_values, 1) ||
!mlir::isa<StringType>(input_values.getElementType())) {
return func.emitError()
<< "Input " << kValues << " should be a 1D string tensor";
}
auto output_values = GetResultType(func, kValues);
if (!RankEquals(output_values, 1) ||
!mlir::isa<StringType>(output_values.getElementType())) {
return func.emitError()
<< "Output " << kValues << " should be a 1D string tensor";
}
for (int i = 0; i < row_splits; ++i) {
const int row_index = i + kRowSplits;
auto input_row_splits = GetInputType(func, row_index);
if (!RankEquals(input_row_splits, 1) ||
!input_row_splits.getElementType().isInteger(64)) {
return func.emitError()
<< "Input " << row_index << " should be a 1D int64 tensor";
}
auto output_row_splits = GetResultType(func, row_index);
if (!RankEquals(output_row_splits, 1) ||
!output_row_splits.getElementType().isInteger(64)) {
return func.emitError()
<< "Output " << row_index << " should be a 1D int64 tensor";
}
}
}
return success();
}
LogicalResult CreateNgramsCustomOption(func::FuncOp func, DictionaryAttr attrs,
std::string& custom_option_buffer) {
flexbuffers::Builder fbb;
size_t start_map = fbb.StartMap();
auto width = mlir::dyn_cast_or_null<IntegerAttr>(attrs.get("width"));
if (!width) {
return func.emitError() << "'width' attribute is not set or not an integer";
}
fbb.Int("width", width.getInt());
auto string_separator =
mlir::dyn_cast_or_null<StringAttr>(attrs.get("string_separator"));
if (!string_separator) {
return func.emitError()
<< "'string_separator' attribute is not set or not a string";
}
std::string string_separator_str(string_separator.getValue().data(),
string_separator.getValue().size());
fbb.String("string_separator", string_separator_str);
auto axis = mlir::dyn_cast_or_null<IntegerAttr>(attrs.get("axis"));
if (!axis) {
return func.emitError() << "'axis' attribute is not set or not an integer";
}
fbb.Int("axis", axis.getInt());
auto reduction_type =
mlir::dyn_cast_or_null<StringAttr>(attrs.get("reduction_type"));
if (!reduction_type) {
return func.emitError()
<< "'reduction_type' attribute is not set or not a string";
}
std::string reduction_type_str(reduction_type.getValue().data(),
reduction_type.getValue().size());
fbb.String("reduction_type", reduction_type_str);
fbb.EndMap(start_map);
fbb.Finish();
custom_option_buffer.assign(fbb.GetBuffer().begin(), fbb.GetBuffer().end());
return success();
}
LogicalResult ConvertNgrams(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
func.eraseBody();
func.addEntryBlock();
func->setAttr(kTFImplements, attr);
OpBuilder builder(func.getBody());
std::string custom_option_buffer;
if (failed(CreateNgramsCustomOption(func, attr.getAttrs(),
custom_option_buffer))) {
return failure();
}
auto op = builder.create<CustomOp>(
func.getLoc(), func.getFunctionType().getResults(), func.getArguments(),
api, CustomOption(&builder, custom_option_buffer));
builder.create<func::ReturnOp>(func.getLoc(), op.getResults());
return success();
}
LogicalResult VerifySgnnProjection(func::FuncOp func, FuncAttr attr) {
if (func.getFunctionType().getNumInputs() != 2 ||
func.getFunctionType().getNumResults() != 1) {
return func.emitError() << "Mismatched number of inputs and outputs.";
}
auto values_type = GetInputType(func, 0);
if (!values_type || !mlir::isa<StringType>(values_type.getElementType())) {
return func.emitError() << "First input should be a string tensor";
}
auto row_splits_type = GetInputType(func, 1);
if (!row_splits_type ||
!mlir::isa<IntegerType>(row_splits_type.getElementType())) {
return func.emitError() << "Second input should be an integer tensor";
}
auto hash_seed =
mlir::dyn_cast_or_null<ArrayAttr>(attr.getAttrs().get("hash_seed"));
if (!hash_seed) {
return func.emitError()
<< "'hash_seed' attribute is not set or not an array";
}
auto output_type = GetResultType(func, 0);
if (!output_type || !mlir::isa<FloatType>(output_type.getElementType()) ||
!RankEquals(output_type, 2)) {
return func.emitError() << "Output should be a 2D float tensor.";
}
if (output_type.getDimSize(1) != hash_seed.size()) {
return func.emitError()
<< "Output 2nd dimension should be the num of hash seeds.";
}
auto buckets =
mlir::dyn_cast_or_null<IntegerAttr>(attr.getAttrs().get("buckets"));
if (!buckets) {
return func.emitError() << "'buckets' attribute is not set or not int";
}
return success();
}
LogicalResult CreateSgnnProjectionCustomOption(
func::FuncOp func, DictionaryAttr attrs,
std::string& custom_option_buffer) {
flexbuffers::Builder fbb;
size_t start_map = fbb.StartMap();
auto hash_seed = mlir::dyn_cast_or_null<ArrayAttr>(attrs.get("hash_seed"));
auto vector_start = fbb.StartVector("hash_seed");
for (int i = 0; i < hash_seed.size(); i++) {
fbb.Add(static_cast<int32_t>(
mlir::dyn_cast<IntegerAttr>(*(hash_seed.getValue().data() + i))
.getInt()));
}
fbb.EndVector(vector_start, true, false);
auto buckets = mlir::dyn_cast_or_null<IntegerAttr>(attrs.get("buckets"));
fbb.Int("buckets", buckets.getInt());
fbb.EndMap(start_map);
fbb.Finish();
custom_option_buffer.assign(fbb.GetBuffer().begin(), fbb.GetBuffer().end());
return success();
}
LogicalResult ConvertSgnnProjection(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
func.eraseBody();
func.addEntryBlock();
func->setAttr(kTFImplements, attr);
OpBuilder builder(func.getBody());
std::string custom_option_buffer;
if (failed(CreateSgnnProjectionCustomOption(func, attr.getAttrs(),
custom_option_buffer))) {
return failure();
}
auto op = builder.create<CustomOp>(
func.getLoc(), func.getFunctionType().getResults(), func.getArguments(),
api, CustomOption(&builder, custom_option_buffer));
builder.create<func::ReturnOp>(func.getLoc(), op.getResults());
return success();
}
}
LogicalResult ConvertTFTextAPI(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
if (api.str() == kWhitespaceTokenizer) {
if (succeeded(VerifyWhitespaceTokenizer(func))) {
return ConvertWhitespaceTokenizer(func, api, attr);
}
} else if (api.str() == kNgrams) {
if (succeeded(VerifyNgrams(func))) {
return ConvertNgrams(func, api, attr);
}
} else if (api.str() == kCustomSgnnProjection) {
if (succeeded(VerifySgnnProjection(func, attr))) {
return ConvertSgnnProjection(func, api, attr);
}
}
return failure();
}
bool IsTFTextRegistered(const tensorflow::OpRegistry* op_registery) {
const std::vector<std::string> kTFTextOps = {
"WhitespaceTokenizeWithOffsets",
};
for (const auto& iter : kTFTextOps) {
if (op_registery->LookUp(iter)) {
return true;
}
}
return false;
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/tftext_utils.h"
#include <memory>
#include <string>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
using tensorflow::OpRegistrationData;
using tensorflow::OpRegistry;
using tensorflow::Status;
namespace {
void Register(const std::string& op_name, OpRegistry* registry) {
registry->Register([op_name](OpRegistrationData* op_reg_data) -> Status {
op_reg_data->op_def.set_name(op_name);
return absl::OkStatus();
});
}
}
TEST(TfTextUtilsTest, TestTfTextRegistered) {
std::unique_ptr<OpRegistry> registry(new OpRegistry);
Register("WhitespaceTokenizeWithOffsets", registry.get());
EXPECT_TRUE(IsTFTextRegistered(registry.get()));
}
TEST(TfTextUtilsTest, TestTfTextNotRegistered) {
std::unique_ptr<OpRegistry> registry(new OpRegistry);
Register("Test", registry.get());
EXPECT_FALSE(IsTFTextRegistered(registry.get()));
}
}
} |
1,162 | cpp | tensorflow/tensorflow | perception_ops_utils | tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc | tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_UTILS_PERCEPTION_OPS_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_UTILS_PERCEPTION_OPS_UTILS_H_
#include <string>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
namespace mlir {
namespace TFL {
class ConvertMaxUnpoolingFunc {
public:
explicit ConvertMaxUnpoolingFunc(func::FuncOp func, mlir::TF::FuncAttr attr)
: func_(func), attr_(attr) {}
LogicalResult RewriteFunc();
LogicalResult VerifySignature();
private:
LogicalResult CreateCustomOptions(std::string& custom_option_buffer);
func::FuncOp func_;
mlir::TF::FuncAttr attr_;
};
class ConvertDenseImageWarpFunc {
public:
explicit ConvertDenseImageWarpFunc(func::FuncOp func) : func_(func) {}
LogicalResult RewriteFunc();
LogicalResult VerifySignature();
private:
func::FuncOp func_;
};
}
}
#endif
#include "tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h"
#include <string>
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/lite/c/builtin_op_data.h"
namespace mlir {
namespace TFL {
namespace {
constexpr char kTFImplements[] = "tf._implements";
constexpr char kMaxUnpooling[] = "MaxUnpooling2D";
constexpr char kImageWarping[] = "DenseImageWarp";
inline ConstBytesAttr CustomOption(OpBuilder* builder,
const std::string& content) {
return ConstBytesAttr::get(builder->getContext(),
StringRef(content.data(), content.size()));
}
inline LogicalResult HasIntegerArrayWithSize(func::FuncOp* func,
const DictionaryAttr& attrs,
const std::string& attr_name,
int N) {
ArrayAttr array_attr =
mlir::dyn_cast_or_null<ArrayAttr>(attrs.get(attr_name));
if (array_attr == nullptr || array_attr.size() != N) {
return func->emitWarning()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " must be set and has size of " << N;
}
for (Attribute integer_attr : array_attr.getValue()) {
IntegerAttr value = mlir::dyn_cast<IntegerAttr>(integer_attr);
if (!value) {
return func->emitWarning()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " does not contain integer values";
}
}
return success();
}
inline LogicalResult GetIntegerArraySafe(
func::FuncOp* func, const DictionaryAttr& attrs,
const std::string& attr_name, llvm::SmallVectorImpl<int32_t>* results,
int N) {
ArrayAttr array_attr =
mlir::dyn_cast_or_null<ArrayAttr>(attrs.get(attr_name));
if (array_attr == nullptr || array_attr.size() != N) {
return func->emitError()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " must be set and has size of " << N;
}
results->reserve(N);
for (Attribute integer_attr : array_attr.getValue()) {
IntegerAttr value = mlir::dyn_cast<IntegerAttr>(integer_attr);
if (!value) {
return func->emitError()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " does not contain integer values";
}
results->push_back(value.getInt());
}
return success();
}
}
LogicalResult ConvertMaxUnpoolingFunc::RewriteFunc() {
func_.eraseBody();
func_.addEntryBlock();
func_->setAttr(kTFImplements,
StringAttr::get(func_.getContext(), kMaxUnpooling));
OpBuilder builder(func_.getBody());
std::string custom_option_buffer;
if (failed(CreateCustomOptions(custom_option_buffer))) {
return failure();
}
auto op = builder.create<CustomOp>(
func_.getLoc(), func_.getFunctionType().getResults(),
func_.getArguments(), kMaxUnpooling,
CustomOption(&builder, custom_option_buffer));
builder.create<func::ReturnOp>(func_.getLoc(), op.getResults());
return success();
}
LogicalResult ConvertMaxUnpoolingFunc::VerifySignature() {
if (func_.getNumArguments() != 2) {
return func_.emitWarning()
<< "Invalid number of arguments to " << kMaxUnpooling << ": "
<< func_.getNumArguments();
}
if (func_.getFunctionType().getNumResults() != 1) {
return func_.emitWarning()
<< "Invalid number of results from " << kMaxUnpooling << ": "
<< func_.getFunctionType().getNumResults();
}
auto attrs = attr_.getAttrs();
if (failed(HasIntegerArrayWithSize(&func_, attrs, "pool_size", 2))) {
return failure();
}
if (failed(HasIntegerArrayWithSize(&func_, attrs, "strides", 2))) {
return failure();
}
auto padding = mlir::dyn_cast_or_null<StringAttr>(attrs.get("padding"));
if (!padding) {
return func_.emitWarning() << "'padding' attribute for " << kMaxUnpooling
<< " is not set or not a string";
}
if (padding.getValue() != "VALID" && padding.getValue() != "SAME") {
return func_.emitWarning()
<< "Padding for " << kMaxUnpooling << " must be 'SAME' or 'VALID'";
}
return success();
}
LogicalResult ConvertMaxUnpoolingFunc::CreateCustomOptions(
std::string& custom_option_buffer) {
auto attrs = attr_.getAttrs();
TfLitePoolParams pool_params;
llvm::SmallVector<int32_t, 2> pool_size;
if (failed(GetIntegerArraySafe(&func_, attrs, "pool_size", &pool_size, 2))) {
return failure();
}
pool_params.filter_height = pool_size[0];
pool_params.filter_width = pool_size[1];
llvm::SmallVector<int32_t, 2> strides;
if (failed(GetIntegerArraySafe(&func_, attrs, "strides", &strides, 2))) {
return failure();
}
pool_params.stride_height = strides[0];
pool_params.stride_width = strides[1];
auto padding = mlir::dyn_cast_or_null<StringAttr>(attrs.get("padding"));
if (!padding) {
return func_.emitError() << "'padding' attribute for " << kMaxUnpooling
<< " is not set or not a string";
}
if (padding.getValue() == "VALID") {
pool_params.padding = kTfLitePaddingValid;
} else if (padding.getValue() == "SAME") {
pool_params.padding = kTfLitePaddingSame;
} else {
return func_.emitError()
<< "Padding for " << kMaxUnpooling << " must be 'SAME' or 'VALID'";
}
pool_params.activation = kTfLiteActNone;
pool_params.computed.padding = TfLitePaddingValues{0, 0, 0, 0};
#if FLATBUFFERS_LITTLEENDIAN == 0
int32_t* p = reinterpret_cast<int32_t*>(&pool_params);
for (size_t i = 0; i < sizeof(TfLitePoolParams) / 4; i++, p++)
*p = flatbuffers::EndianSwap(*p);
#endif
custom_option_buffer.assign(reinterpret_cast<char*>(&pool_params),
sizeof(TfLitePoolParams));
return success();
}
LogicalResult ConvertDenseImageWarpFunc::RewriteFunc() {
func_.eraseBody();
func_.addEntryBlock();
func_->setAttr(kTFImplements,
StringAttr::get(func_.getContext(), kImageWarping));
OpBuilder builder(func_.getBody());
auto op = builder.create<CustomOp>(func_.getLoc(),
func_.getFunctionType().getResults(),
func_.getArguments(), kImageWarping,
CustomOption(&builder, ""));
builder.create<func::ReturnOp>(func_.getLoc(), op.getResults());
return success();
}
LogicalResult ConvertDenseImageWarpFunc::VerifySignature() {
if (func_.getNumArguments() != 2) {
return func_.emitWarning()
<< "Invalid number of arguments to " << kImageWarping << ": "
<< func_.getNumArguments();
}
if (func_.getFunctionType().getNumResults() != 1) {
return func_.emitWarning()
<< "Invalid number of results from " << kImageWarping << ": "
<< func_.getFunctionType().getNumResults();
}
auto image_type = mlir::dyn_cast_or_null<RankedTensorType>(
func_.getFunctionType().getInput(0));
if (!image_type || !image_type.getElementType().isF32() ||
image_type.getRank() != 4) {
return func_.emitWarning() << "Image should be a 4D float tensor";
}
auto flow_type = mlir::dyn_cast_or_null<RankedTensorType>(
func_.getFunctionType().getInput(1));
if (!flow_type || !flow_type.getElementType().isF32() ||
flow_type.getRank() != 4) {
return func_.emitWarning() << "Flow should be a 4D float tensor";
}
auto output_type = mlir::dyn_cast_or_null<RankedTensorType>(
func_.getFunctionType().getResult(0));
if (!output_type || !output_type.getElementType().isF32() ||
output_type.getRank() != 4) {
return func_.emitWarning() << "Output should be a 4D float tensor";
}
return success();
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
namespace {
template <int NInput, int NOutput>
func::FuncOp createMaxUnpoolingFunc(
mlir::Builder* builder, const SmallVector<mlir::Type, NInput>& input_types,
const SmallVector<mlir::Type, NOutput>& output_types) {
auto func_type = builder->getFunctionType(input_types, output_types);
auto func = func::FuncOp::create(
mlir::NameLoc::get(builder->getStringAttr("fused_func")), "fused_func",
func_type, {});
func.addEntryBlock();
mlir::StringAttr attr_value = builder->getStringAttr("MaxUnpooling2D");
func->setAttr("tf._implements", attr_value);
return func;
}
func::FuncOp createMaxUnpoolingFunc(
mlir::Builder* builder, const SmallVector<int64_t, 4>& input_shape,
const SmallVector<int64_t, 4>& output_shape) {
auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
auto indices_type = RankedTensorType::get(input_shape, builder->getI64Type());
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type());
SmallVector<mlir::Type, 2> input_types{input_type, indices_type};
SmallVector<mlir::Type, 1> output_types{output_type};
return createMaxUnpoolingFunc<2, 1>(builder, input_types, output_types);
}
template <int N>
ArrayAttr createInt32Array(mlir::Builder* builder, mlir::MLIRContext* context,
const SmallVector<int32_t, N>& values) {
SmallVector<Attribute, N> ret;
for (int32_t value : values) {
ret.push_back(builder->getI32IntegerAttr(value));
}
return ArrayAttr::get(context, ret);
}
template <int N>
ArrayAttr createInt64Array(mlir::Builder* builder, mlir::MLIRContext* context,
const SmallVector<int64_t, N>& values) {
SmallVector<Attribute, N> ret;
for (int64_t value : values) {
ret.push_back(builder->getI64IntegerAttr(value));
}
return ArrayAttr::get(context, ret);
}
mlir::TF::FuncAttr createMaxUnpoolingAttr(mlir::MLIRContext* context,
const std::string& padding,
const ArrayAttr& pool_size,
const ArrayAttr& strides) {
SmallVector<::mlir::NamedAttribute, 3> fields;
auto padding_id = ::mlir::StringAttr::get(context, "padding");
fields.emplace_back(padding_id, StringAttr::get(context, padding));
auto pool_size_id = ::mlir::StringAttr::get(context, "pool_size");
fields.emplace_back(pool_size_id, pool_size);
auto strides_id = ::mlir::StringAttr::get(context, "strides");
fields.emplace_back(strides_id, strides);
DictionaryAttr dict = DictionaryAttr::get(context, fields);
return TF::FuncAttr::get(context, "MaxUnpooling2D", dict);
}
}
class PerceptionUtilsTest : public ::testing::Test {
protected:
PerceptionUtilsTest() {}
void SetUp() override {
context_ = std::make_unique<mlir::MLIRContext>();
context_->loadDialect<mlir::arith::ArithDialect, mlir::func::FuncDialect,
mlir::TF::TensorFlowDialect, TensorFlowLiteDialect>();
builder_ = std::make_unique<mlir::Builder>(context_.get());
fused_max_unpooling_func_ =
createMaxUnpoolingFunc(builder_.get(), {2, 4, 4, 2}, {2, 2, 2, 2});
func_attr_ = createMaxUnpoolingAttr(
context_.get(), "SAME",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}));
}
void TearDown() override {
fused_max_unpooling_func_.erase();
builder_.reset();
}
func::FuncOp fused_max_unpooling_func_;
mlir::TF::FuncAttr func_attr_;
std::unique_ptr<mlir::MLIRContext> context_;
std::unique_ptr<mlir::Builder> builder_;
};
TEST_F(PerceptionUtilsTest, VerifySignatureValid) {
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr_);
EXPECT_FALSE(failed(convert.VerifySignature()));
}
TEST_F(PerceptionUtilsTest, VerifySignatureInvalid) {
auto input_type = RankedTensorType::get({1, 2, 2, 1}, builder_->getF32Type());
auto output_type =
RankedTensorType::get({1, 2, 1, 1}, builder_->getF32Type());
SmallVector<mlir::Type, 1> input_types{input_type};
SmallVector<mlir::Type, 1> output_types{output_type};
auto max_unpooling_func =
createMaxUnpoolingFunc<1, 1>(builder_.get(), input_types, output_types);
mlir::TFL::ConvertMaxUnpoolingFunc convert(max_unpooling_func, func_attr_);
EXPECT_TRUE(failed(convert.VerifySignature()));
max_unpooling_func->erase();
}
TEST_F(PerceptionUtilsTest, RewriteValid) {
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr_);
EXPECT_FALSE(failed(convert.RewriteFunc()));
}
TEST_F(PerceptionUtilsTest, RewriteWrongPadding) {
auto func_attr = createMaxUnpoolingAttr(
context_.get(), "INVALID",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}));
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr);
EXPECT_TRUE(failed(convert.RewriteFunc()));
}
TEST_F(PerceptionUtilsTest, RewriteWrongFilter) {
auto func_attr = createMaxUnpoolingAttr(
context_.get(), "VALID",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}));
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr);
EXPECT_TRUE(failed(convert.RewriteFunc()));
}
TEST_F(PerceptionUtilsTest, RewriteWrongStrides) {
auto func_attr = createMaxUnpoolingAttr(
context_.get(), "VALID",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2, 0}));
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr);
EXPECT_TRUE(failed(convert.RewriteFunc()));
}
}
} |
1,163 | cpp | tensorflow/tensorflow | convert_type | tensorflow/compiler/mlir/tensorflow/utils/convert_type.cc | tensorflow/compiler/mlir/tensorflow/utils/convert_type_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_CONVERT_TYPE_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_CONVERT_TYPE_H_
#include "mlir/IR/Builders.h"
#include "mlir/IR/Types.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
using tsl::StatusOr;
Status ConvertDataType(DataType dtype, mlir::Builder builder, mlir::Type* type);
Status ConvertScalarTypeToDataType(mlir::Type type, DataType* dtype);
Status ConvertToDataType(mlir::Type type, DataType* dtype);
void ConvertToMlirShape(const TensorShape& input_shape,
llvm::SmallVectorImpl<int64_t>* shape);
Status ConvertToMlirShape(const TensorShapeProto& input_shape,
llvm::SmallVectorImpl<int64_t>* shape);
absl::StatusOr<mlir::Type> ConvertToMlirTensorType(
const TensorShapeProto& shape, DataType dtype, mlir::Builder* builder);
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include <limits>
#include "absl/strings/str_cat.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
using mlir::Builder;
using mlir::ShapedType;
using mlir::Type;
Status ConvertDataType(DataType dtype, Builder builder, Type* type) {
switch (dtype) {
case DT_HALF:
*type = builder.getF16Type();
return absl::OkStatus();
case DT_FLOAT:
*type = builder.getF32Type();
return absl::OkStatus();
case DT_DOUBLE:
*type = builder.getF64Type();
return absl::OkStatus();
case DT_BOOL:
*type = builder.getIntegerType(1);
return absl::OkStatus();
case DT_INT8:
*type = builder.getIntegerType(8);
return absl::OkStatus();
case DT_INT16:
*type = builder.getIntegerType(16);
return absl::OkStatus();
case DT_INT32:
*type = builder.getIntegerType(32);
return absl::OkStatus();
case DT_INT64:
*type = builder.getIntegerType(64);
return absl::OkStatus();
case DT_UINT8:
*type = builder.getIntegerType(8, false);
return absl::OkStatus();
case DT_UINT16:
*type = builder.getIntegerType(16, false);
return absl::OkStatus();
case DT_UINT32:
*type = builder.getIntegerType(32, false);
return absl::OkStatus();
case DT_UINT64:
*type = builder.getIntegerType(64, false);
return absl::OkStatus();
case DT_BFLOAT16:
*type = builder.getBF16Type();
return absl::OkStatus();
case DT_COMPLEX64:
*type = mlir::ComplexType::get(builder.getF32Type());
return absl::OkStatus();
case DT_COMPLEX128:
*type = mlir::ComplexType::get(builder.getF64Type());
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E4M3FN:
*type = builder.getFloat8E4M3FNType();
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E5M2:
*type = builder.getFloat8E5M2Type();
return absl::OkStatus();
case DT_INT4:
*type = builder.getIntegerType(4, true);
return absl::OkStatus();
case DT_UINT4:
*type = builder.getIntegerType(4, false);
return absl::OkStatus();
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
case DT_##enumerant: \
*type = builder.getType<mlir::tf_type::tftype##Type>(); \
return OkStatus();
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.def"
default:
return errors::Unimplemented(absl::StrCat(
"Converting DataType '", DataTypeString(dtype), "' to MLIR Type"));
}
}
Status ConvertScalarTypeToDataType(Type type, DataType* dtype) {
if (type.isF16()) {
*dtype = DT_HALF;
return absl::OkStatus();
} else if (type.isF32()) {
*dtype = DT_FLOAT;
return absl::OkStatus();
} else if (type.isF64()) {
*dtype = DT_DOUBLE;
return absl::OkStatus();
} else if (type.isBF16()) {
*dtype = DT_BFLOAT16;
return absl::OkStatus();
} else if (type.isFloat8E4M3FN()) {
*dtype = DT_FLOAT8_E4M3FN;
return absl::OkStatus();
} else if (type.isFloat8E5M2()) {
*dtype = DT_FLOAT8_E5M2;
return absl::OkStatus();
} else if (auto itype = mlir::dyn_cast<mlir::IntegerType>(type)) {
switch (itype.getWidth()) {
case 1:
*dtype = DT_BOOL;
return absl::OkStatus();
case 4:
*dtype = itype.isUnsigned() ? DT_UINT4 : DT_INT4;
return absl::OkStatus();
case 8:
*dtype = itype.isUnsigned() ? DT_UINT8 : DT_INT8;
return absl::OkStatus();
case 16:
*dtype = itype.isUnsigned() ? DT_UINT16 : DT_INT16;
return absl::OkStatus();
case 32:
*dtype = itype.isUnsigned() ? DT_UINT32 : DT_INT32;
return absl::OkStatus();
case 64:
*dtype = itype.isUnsigned() ? DT_UINT64 : DT_INT64;
return absl::OkStatus();
default:
return errors::Unimplemented(
absl::StrCat("Converting ", debugString(type), " to DataType"));
}
} else if (auto complex_type = mlir::dyn_cast<mlir::ComplexType>(type)) {
auto etype = complex_type.getElementType();
if (etype.isF32()) {
*dtype = DT_COMPLEX64;
return absl::OkStatus();
} else if (etype.isF64()) {
*dtype = DT_COMPLEX128;
return absl::OkStatus();
}
return errors::Unimplemented(
absl::StrCat("Converting ", debugString(type), " to DataType"));
}
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (type.isa<mlir::tf_type::tftype##Type>()) { \
*dtype = DT_##enumerant; \
return OkStatus(); \
}
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.def"
return errors::Unimplemented(
absl::StrCat("Converting ", debugString(type), " to DataType"));
}
Status ConvertToDataType(Type type, DataType* dtype) {
if (auto stype = mlir::dyn_cast<ShapedType>(type)) {
TF_RETURN_IF_ERROR(
ConvertScalarTypeToDataType(stype.getElementType(), dtype));
} else {
TF_RETURN_IF_ERROR(ConvertScalarTypeToDataType(type, dtype));
}
return absl::OkStatus();
}
void ConvertToMlirShape(const TensorShape& input_shape,
llvm::SmallVectorImpl<int64_t>* shape) {
shape->reserve(input_shape.dims());
for (const auto& d : input_shape) {
shape->push_back(d.size == kTFDynamicSize ? ShapedType::kDynamic : d.size);
}
}
Status ConvertToMlirShape(const TensorShapeProto& input_shape,
llvm::SmallVectorImpl<int64_t>* shape) {
shape->reserve(input_shape.dim_size());
auto& dims = input_shape.dim();
for (auto& d : dims) {
if (d.size() > std::numeric_limits<int64_t>::max()) {
return errors::InvalidArgument("Shape element overflows");
}
shape->push_back(d.size() == kTFDynamicSize ? ShapedType::kDynamic
: d.size());
}
return absl::OkStatus();
}
absl::StatusOr<mlir::Type> ConvertToMlirTensorType(
const TensorShapeProto& shape, DataType dtype, mlir::Builder* builder) {
mlir::Type element_type;
TF_RETURN_IF_ERROR(ConvertDataType(dtype, *builder, &element_type));
if (shape.unknown_rank()) {
return mlir::UnrankedTensorType::get(element_type);
}
llvm::SmallVector<int64_t, 4> shape_dims;
TF_RETURN_IF_ERROR(ConvertToMlirShape(shape, &shape_dims));
return GetTypeFromTFTensorShape(shape_dims, element_type);
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include <string>
#include <vector>
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/test.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
std::string ConvertToMlirString(const std::vector<int64_t>& dims,
bool unknown_rank, DataType dtype) {
TensorShapeProto shape;
shape.set_unknown_rank(unknown_rank);
for (int64_t dim : dims) {
shape.add_dim()->set_size(dim);
}
mlir::MLIRContext context;
mlir::Builder b(&context);
auto status_or = ConvertToMlirTensorType(shape, dtype, &b);
std::string buf;
llvm::raw_string_ostream os(buf);
status_or.value().print(os);
return os.str();
}
TEST(MlirConvertType, ConvertToMlirTensorType) {
EXPECT_EQ("tensor<4x8x16xi32>",
ConvertToMlirString({4, 8, 16}, false,
DataType::DT_INT32));
EXPECT_EQ("tensor<?x27x?xbf16>",
ConvertToMlirString({-1, 27, -1}, false,
DataType::DT_BFLOAT16));
EXPECT_EQ("tensor<*xf32>",
ConvertToMlirString({}, true, DataType::DT_FLOAT));
}
}
} |
1,164 | cpp | tensorflow/tensorflow | error_collector_inst | tensorflow/compiler/mlir/lite/metrics/error_collector_inst.cc | tensorflow/compiler/mlir/lite/metrics/error_collector_inst_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_METRICS_ERROR_COLLECTOR_INST_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_METRICS_ERROR_COLLECTOR_INST_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/PassInstrumentation.h"
#include "tensorflow/compiler/mlir/lite/metrics/error_collector.h"
#include "tensorflow/compiler/mlir/lite/metrics/types_util.h"
#include "tensorflow/lite/python/metrics/converter_error_data.pb.h"
namespace mlir {
namespace TFL {
class ErrorCollectorInstrumentation : public PassInstrumentation {
using ConverterErrorData = tflite::metrics::ConverterErrorData;
using ErrorCode = ConverterErrorData::ErrorCode;
public:
explicit ErrorCollectorInstrumentation(MLIRContext *context);
private:
void runBeforePass(Pass *pass, Operation *module) override;
void runAfterPass(Pass *pass, Operation *module) override;
void runAfterPassFailed(Pass *pass, Operation *module) override;
std::unique_ptr<ScopedDiagnosticHandler> handler_;
std::unordered_map<Location, std::string, LocationHash> loc_to_name_;
std::string common_error_message_;
std::string pass_name_;
ErrorCollector *error_collector_;
};
constexpr char kErrorCodePrefix[] = "Error code: ";
inline InFlightDiagnostic AttachErrorCode(InFlightDiagnostic &&diag,
int error_code) {
using tflite::metrics::ConverterErrorData;
diag.attachNote() << kErrorCodePrefix
<< ConverterErrorData::ErrorCode_Name(error_code);
return std::move(diag);
}
}
}
#endif
#include "tensorflow/compiler/mlir/lite/metrics/error_collector_inst.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_split.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/metrics/error_collector.h"
#include "tensorflow/compiler/mlir/lite/metrics/types_util.h"
namespace mlir {
namespace TFL {
namespace {
inline std::string extract_pass_name(const std::string &signature) {
const std::vector<std::string> &v = absl::StrSplit(signature, "::");
return v.back();
}
inline std::string extract_op_name_from_error_message(
const std::string &error_message) {
int end_pos = error_message.find("' op");
if ((absl::StartsWith(error_message, "'tf.") ||
absl::StartsWith(error_message, "'tfl.")) &&
end_pos != std::string::npos) {
return error_message.substr(1, end_pos - 1);
}
return "";
}
const int kMaxAcceptedNoteSize = 1024;
}
ErrorCollectorInstrumentation::ErrorCollectorInstrumentation(
MLIRContext *context)
: error_collector_(ErrorCollector::GetErrorCollector()) {
handler_ = std::make_unique<ScopedDiagnosticHandler>(
context, [this](Diagnostic &diag) {
if (diag.getSeverity() == DiagnosticSeverity::Error) {
Location loc = diag.getLocation();
std::string error_message = diag.str();
std::string op_name, error_code;
if (loc_to_name_.count(loc)) {
op_name = loc_to_name_[loc];
} else {
op_name = extract_op_name_from_error_message(diag.str());
}
for (const auto ¬e : diag.getNotes()) {
const std::string note_str = note.str();
if (absl::StartsWith(note_str, kErrorCodePrefix)) {
error_code = note_str.substr(sizeof(kErrorCodePrefix) - 1);
}
error_message += "\n";
if (note_str.size() <= kMaxAcceptedNoteSize) {
error_message += note_str;
} else {
error_message += note_str.substr(0, kMaxAcceptedNoteSize);
error_message += "...";
}
}
ErrorCode error_code_enum = ConverterErrorData::UNKNOWN;
bool has_valid_error_code =
ConverterErrorData::ErrorCode_Parse(error_code, &error_code_enum);
if (!op_name.empty() || has_valid_error_code) {
error_collector_->ReportError(NewConverterErrorData(
pass_name_, error_message, error_code_enum, op_name, loc));
} else {
common_error_message_ += diag.str();
common_error_message_ += "\n";
}
}
return failure();
});
}
void ErrorCollectorInstrumentation::runBeforePass(Pass *pass,
Operation *module) {
auto collectOps = [this](Operation *op) {
const auto &op_name = op->getName().getStringRef().str();
if (absl::StartsWith(op_name, "tf.") || absl::StartsWith(op_name, "tfl.")) {
loc_to_name_.emplace(op->getLoc(), op_name);
}
};
for (auto ®ion : module->getRegions()) {
region.walk(collectOps);
}
pass_name_ = extract_pass_name(pass->getName().str());
error_collector_->Clear();
}
void ErrorCollectorInstrumentation::runAfterPass(Pass *pass,
Operation *module) {
loc_to_name_.clear();
pass_name_.clear();
common_error_message_.clear();
error_collector_->Clear();
}
void ErrorCollectorInstrumentation::runAfterPassFailed(Pass *pass,
Operation *module) {
if (error_collector_->CollectedErrors().empty() &&
!common_error_message_.empty()) {
error_collector_->ReportError(NewConverterErrorData(
pass_name_, common_error_message_, ConverterErrorData::UNKNOWN,
"", module->getLoc()));
}
loc_to_name_.clear();
pass_name_.clear();
common_error_message_.clear();
}
}
} | #include "tensorflow/compiler/mlir/lite/metrics/error_collector_inst.h"
#include <cstddef>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/lite/metrics/error_collector.h"
#include "tensorflow/compiler/mlir/lite/metrics/types_util.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/lite/python/metrics/converter_error_data.pb.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace TFL {
namespace {
using tsl::StatusOr;
class MockSuccessPass
: public PassWrapper<MockSuccessPass, OperationPass<ModuleOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry.insert<TF::TensorFlowDialect>();
}
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MockSuccessPass)
explicit MockSuccessPass() = default;
private:
void runOnOperation() override {
getOperation().walk([](Operation* nestedOp) {
nestedOp->emitError()
<< "Error at " << nestedOp->getName().getStringRef().str() << " op";
});
};
};
class MockFailurePass
: public PassWrapper<MockFailurePass, OperationPass<ModuleOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry.insert<TF::TensorFlowDialect>();
}
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MockFailurePass)
explicit MockFailurePass() = default;
private:
void runOnOperation() override {
getOperation().walk([](Operation* nestedOp) {
if (nestedOp->getName().getStringRef().str().rfind("tf.") != -1) {
AttachErrorCode(
nestedOp->emitError()
<< "Failed at " << nestedOp->getName().getStringRef().str()
<< " op",
tflite::metrics::ConverterErrorData::ERROR_NEEDS_FLEX_OPS);
}
});
signalPassFailure();
};
};
absl::StatusOr<OwningOpRef<mlir::ModuleOp>> LoadModule(
MLIRContext* context, const std::string& file_name) {
std::string error_message;
auto file = openInputFile(file_name, &error_message);
if (!file) {
return tensorflow::errors::InvalidArgument("fail to open input file");
}
llvm::SourceMgr source_mgr;
source_mgr.AddNewSourceBuffer(std::move(file), llvm::SMLoc());
return OwningOpRef<mlir::ModuleOp>(
parseSourceFile<mlir::ModuleOp>(source_mgr, context));
}
TEST(ErrorCollectorTest, TessSuccessPass) {
std::string input_file = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/lite/metrics/testdata/strided_slice.mlir");
MLIRContext context;
context.getOrLoadDialect<mlir::func::FuncDialect>();
context.getOrLoadDialect<TF::TensorFlowDialect>();
context.enableMultithreading();
auto module = LoadModule(&context, input_file);
EXPECT_EQ(module.ok(), true);
PassManager pm(module.value().get()->getName(),
OpPassManager::Nesting::Implicit);
pm.addPass(std::make_unique<MockSuccessPass>());
pm.addInstrumentation(
std::make_unique<ErrorCollectorInstrumentation>(&context));
EXPECT_EQ(succeeded(pm.run(module.value().get())), true);
auto collected_errors =
ErrorCollector::GetErrorCollector()->CollectedErrors();
EXPECT_EQ(collected_errors.size(), 0);
}
TEST(ErrorCollectorTest, TessFailurePass) {
using tflite::metrics::ConverterErrorData;
MLIRContext context;
context.getOrLoadDialect<mlir::func::FuncDialect>();
context.getOrLoadDialect<TF::TensorFlowDialect>();
const std::string input_file =
"tensorflow/compiler/mlir/lite/metrics/testdata/strided_slice.mlir";
auto input_file_id = StringAttr::get(&context, input_file);
context.enableMultithreading();
auto module =
LoadModule(&context, tensorflow::GetDataDependencyFilepath(input_file));
EXPECT_EQ(module.ok(), true);
PassManager pm(module.value().get()->getName(),
OpPassManager::Nesting::Implicit);
pm.addPass(std::make_unique<MockSuccessPass>());
pm.addPass(std::make_unique<MockFailurePass>());
pm.addInstrumentation(
std::make_unique<ErrorCollectorInstrumentation>(&context));
EXPECT_EQ(succeeded(pm.run(module.value().get())), false);
auto collected_errors =
ErrorCollector::GetErrorCollector()->CollectedErrors();
EXPECT_EQ(collected_errors.size(), 3);
EXPECT_EQ(collected_errors.count(NewConverterErrorData(
"MockFailurePass",
"Failed at tf.Const op\nsee current operation: %0 = "
"\"tf.Const\"() <{value = dense<1> : tensor<4xi32>}> : () -> "
"tensor<4xi32>\nError code: ERROR_NEEDS_FLEX_OPS",
ConverterErrorData::ERROR_NEEDS_FLEX_OPS, "tf.Const",
mlir::FileLineColLoc::get(input_file_id, 2, 9))),
1);
EXPECT_EQ(collected_errors.count(NewConverterErrorData(
"MockFailurePass",
"Failed at tf.Const op\nsee current operation: %1 = "
"\"tf.Const\"() <{value = dense<0> : tensor<4xi32>}> : () -> "
"tensor<4xi32>\nError code: ERROR_NEEDS_FLEX_OPS",
ConverterErrorData::ERROR_NEEDS_FLEX_OPS, "tf.Const",
mlir::FileLineColLoc::get(input_file_id, 2, 9))),
1);
EXPECT_EQ(
collected_errors.count(NewConverterErrorData(
"MockFailurePass",
"Failed at tf.StridedSlice op\nsee current operation: %2 = "
"\"tf.StridedSlice\"(%arg0, %1, %1, %0) <{begin_mask = 11 : "
"i64, ellipsis_mask = 0 : i64, end_mask = 11 : i64, new_axis_mask = "
"4 : i64, shrink_axis_mask = 0 : i64}> {device = \"\"} : "
"(tensor<*xf32>, tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) "
"-> tensor<*xf32>\nError code: ERROR_NEEDS_FLEX_OPS",
ConverterErrorData::ERROR_NEEDS_FLEX_OPS, "tf.StridedSlice",
mlir::FileLineColLoc::get(input_file_id, 4, 10))),
1);
std::vector<std::string> locations;
for (const auto& error : collected_errors) {
EXPECT_TRUE(error.has_location());
locations.push_back(error.location().DebugString());
}
EXPECT_THAT(locations, Each(testing::HasSubstr("CALLSITELOC")));
EXPECT_THAT(locations, Each(testing::HasSubstr(input_file)));
EXPECT_THAT(locations, Contains(testing::HasSubstr("line: 2")));
EXPECT_THAT(locations, Contains(testing::HasSubstr("column: 9")));
EXPECT_THAT(locations, Contains(testing::HasSubstr("line: 4")));
EXPECT_THAT(locations, Contains(testing::HasSubstr("column: 10")));
}
}
}
} |
1,165 | cpp | tensorflow/tensorflow | execution_metadata_exporter | tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.cc | tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_TAC_EXECUTION_METADATA_EXPORTER_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_TAC_EXECUTION_METADATA_EXPORTER_H_
#include <optional>
#include <string>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
namespace tflite {
std::optional<std::string> ExportRuntimeMetadata(mlir::ModuleOp module);
}
#endif
#include "tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.h"
#include <cstdint>
#include <map>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/string.h"
#include "flatbuffers/vector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Region.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/lite/experimental/tac/common/targets.h"
#include "tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.h"
#include "tensorflow/compiler/mlir/lite/experimental/tac/runtime_metadata_generated.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace tflite {
namespace {
bool IsConst(mlir::Operation* op) {
return llvm::isa<mlir::arith::ConstantOp, mlir::TF::ConstOp,
mlir::TFL::ConstOp, mlir::TFL::QConstOp>(op);
}
bool IsOpSupported(mlir::Operation* op, const std::string& hardware) {
auto* devce_hardware = mlir::TFL::tac::GetTargetHardware(hardware);
if (devce_hardware == nullptr) return {};
return devce_hardware->IsOpSupported(op);
}
bool HasValidHardwareTarget(mlir::Operation* op) {
return IsOpSupported(op, "CPU");
}
std::optional<std::string> GetDeviceName(mlir::Operation* op) {
if (IsConst(op)) return std::nullopt;
if (llvm::isa<mlir::func::ReturnOp, mlir::quantfork::StatisticsOp>(op))
return std::nullopt;
if (!HasValidHardwareTarget(op)) return std::nullopt;
auto device = op->getAttrOfType<mlir::StringAttr>(mlir::TFL::tac::kDevice);
if (device == nullptr) return std::nullopt;
llvm::StringRef device_name_str = device.getValue();
return device_name_str.str();
}
std::optional<std::vector<float>> GetPerDeviceCosts(
const std::map<std::string, uint8_t>& hardware_map, mlir::Operation* op) {
auto device_costs_attr =
op->getAttrOfType<mlir::DictionaryAttr>("per_device_costs");
if (device_costs_attr == nullptr) return std::nullopt;
std::vector<float> device_costs(hardware_map.size(), -1.f);
for (const auto& kv : hardware_map) {
auto cost_attr = device_costs_attr.getNamed(kv.first);
if (!cost_attr.has_value()) return std::nullopt;
float cost = mlir::dyn_cast_or_null<mlir::FloatAttr>(cost_attr->getValue())
.getValueAsDouble();
device_costs[kv.second] = cost;
}
return device_costs;
}
flatbuffers::Offset<SubgraphMetadata> CreateSubgraphMetadata(
const std::map<std::string, uint8_t>& hardware_map, mlir::Region* Region,
flatbuffers::FlatBufferBuilder* builder) {
auto& block = Region->front();
int index = 0;
std::vector<flatbuffers::Offset<tflite::OpMetadata>> ops;
for (auto& inst : block) {
if (IsConst(&inst)) continue;
if (llvm::isa<mlir::func::ReturnOp, mlir::quantfork::StatisticsOp>(&inst))
continue;
auto device_name = GetDeviceName(&inst);
if (device_name.has_value()) {
auto per_device_cost = GetPerDeviceCosts(hardware_map, &inst);
flatbuffers::Offset<flatbuffers::Vector<float>> per_device_cost_offset;
if (per_device_cost.has_value()) {
per_device_cost_offset = builder->CreateVector(*per_device_cost);
}
OpMetadataBuilder op_builder(*builder);
op_builder.add_index(index);
uint8_t hardware = hardware_map.at(*device_name);
op_builder.add_hardware(hardware);
if (per_device_cost.has_value()) {
op_builder.add_op_costs(per_device_cost_offset);
}
ops.push_back(op_builder.Finish());
}
index++;
}
return CreateSubgraphMetadata(*builder, builder->CreateVector(ops));
}
flatbuffers::Offset<tflite::HardwareMetadata>
CreateHardwareMetadataAndPopulateLookupTable(
std::vector<mlir::func::FuncOp>* funcs,
flatbuffers::FlatBufferBuilder* builder,
std::map<std::string, uint8_t>* hardware_names) {
uint8_t index = 0;
for (auto& func : *funcs) {
func.walk([&hardware_names, &index](mlir::Operation* op) {
auto device_name = GetDeviceName(op);
if (!device_name.has_value()) return;
auto iter = hardware_names->find(*device_name);
if (iter == hardware_names->end()) {
hardware_names->insert({*device_name, index++});
}
});
}
std::vector<flatbuffers::Offset<flatbuffers::String>> hardwares;
for (const auto& kv : *hardware_names) {
hardwares.push_back(builder->CreateString(kv.first));
}
return CreateHardwareMetadata(*builder, builder->CreateVector(hardwares));
}
}
std::optional<std::string> ExportRuntimeMetadata(mlir::ModuleOp module) {
mlir::func::FuncOp main_fn = module.lookupSymbol<mlir::func::FuncOp>("main");
if (!main_fn) return std::string("");
flatbuffers::FlatBufferBuilder fb_builder;
std::vector<mlir::func::FuncOp> funcs;
funcs.push_back(main_fn);
module.walk([&](mlir::func::FuncOp fn) {
if (fn != main_fn) {
funcs.push_back(fn);
}
});
std::map<std::string, uint8_t> hardware_map;
flatbuffers::Offset<tflite::HardwareMetadata> hardware_metadata_offset =
CreateHardwareMetadataAndPopulateLookupTable(&funcs, &fb_builder,
&hardware_map);
std::vector<flatbuffers::Offset<SubgraphMetadata>> subgraphs_metadata;
subgraphs_metadata.reserve(funcs.size());
for (auto& func : funcs) {
subgraphs_metadata.push_back(
CreateSubgraphMetadata(hardware_map, &func.getBody(), &fb_builder));
}
auto runtime_metadata =
CreateRuntimeMetadata(fb_builder, hardware_metadata_offset,
fb_builder.CreateVector(subgraphs_metadata));
fb_builder.Finish(runtime_metadata);
return std::string(
reinterpret_cast<const char*>(fb_builder.GetBufferPointer()),
fb_builder.GetSize());
}
} | #include "tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/string.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/lite/experimental/tac/runtime_metadata_generated.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
namespace tflite {
std::string CreateRuntimeMetadata() {
flatbuffers::FlatBufferBuilder fb_builder;
std::vector<flatbuffers::Offset<flatbuffers::String>> device_names = {
fb_builder.CreateString("GPU"), fb_builder.CreateString("CPU")};
const auto hardwares =
CreateHardwareMetadata(fb_builder, fb_builder.CreateVector(device_names));
const auto ops = {
CreateOpMetadata(fb_builder, 0, 0,
fb_builder.CreateVector(std::vector<float>({1.0, 5.0}))),
CreateOpMetadata(fb_builder, 1, 0,
fb_builder.CreateVector(std::vector<float>({1.0, 5.0}))),
CreateOpMetadata(fb_builder, 2, 0,
fb_builder.CreateVector(std::vector<float>({1.0, 5.0}))),
CreateOpMetadata(
fb_builder, 3, 1,
fb_builder.CreateVector(std::vector<float>({-1.0, 2.0}))),
};
const auto subgraphs = {CreateSubgraphMetadata(
fb_builder, fb_builder.CreateVector(ops.begin(), ops.size()))};
const auto metadata = CreateRuntimeMetadata(
fb_builder, hardwares,
fb_builder.CreateVector(subgraphs.begin(), subgraphs.size()));
fb_builder.Finish(metadata);
return std::string(
reinterpret_cast<const char*>(fb_builder.GetBufferPointer()),
fb_builder.GetSize());
}
void Verify(const RuntimeMetadata* result, const RuntimeMetadata* expected) {
EXPECT_EQ(result->subgraph_metadata()->size(),
expected->subgraph_metadata()->size());
for (int i = 0; i < result->subgraph_metadata()->size(); ++i) {
auto result_subgraph_metadata =
result->subgraph_metadata()->GetAs<SubgraphMetadata>(i);
auto expected_subgraph_metadata =
expected->subgraph_metadata()->GetAs<SubgraphMetadata>(i);
if (expected_subgraph_metadata->op_metadata() == nullptr &&
result_subgraph_metadata->op_metadata() == nullptr) {
return;
}
ASSERT_EQ(expected_subgraph_metadata->op_metadata()->size(),
result_subgraph_metadata->op_metadata()->size());
for (int j = 0; j < expected_subgraph_metadata->op_metadata()->size();
++j) {
auto result_op_metadata =
result_subgraph_metadata->op_metadata()->GetAs<OpMetadata>(j);
auto expected_op_metadata =
expected_subgraph_metadata->op_metadata()->GetAs<OpMetadata>(j);
EXPECT_EQ(result_op_metadata->index(), expected_op_metadata->index());
EXPECT_EQ(result_op_metadata->hardware(),
expected_op_metadata->hardware());
EXPECT_EQ(result_op_metadata->op_costs()->size(),
expected_op_metadata->op_costs()->size());
for (int i = 0; i < result_op_metadata->op_costs()->size(); ++i) {
EXPECT_FLOAT_EQ(result_op_metadata->op_costs()->Get(i),
expected_op_metadata->op_costs()->Get(i));
}
}
}
}
TEST(ExporterTest, Valid) {
const std::string kMLIR = R"(
func.func @main(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>, %arg3: tensor<1xf32>) -> tensor<2x1xf32> {
%0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6", per_device_costs = {CPU = 5.0 : f32, GPU = 1.0 : f32}, tac.device = "GPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
%1 = "tfl.mul"(%0, %arg2) {fused_activation_function = "RELU6", per_device_costs = {CPU = 5.0 : f32, GPU = 1.0 : f32}, tac.device = "GPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
%2 = "tfl.add"(%arg0, %arg3) {fused_activation_function = "RELU6", per_device_costs = {CPU = 5.0 : f32, GPU = 1.0 : f32}, tac.device = "GPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
%3 = "tfl.pack"(%1, %2) {axis = 0 : i32, per_device_costs = {CPU = 2.0 : f32, GPU = -1.0 : f32}, values_count = 2 : i32, tac.device = "CPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
func.return %3 : tensor<2x1xf32>
})";
const std::string kExpectedFB = CreateRuntimeMetadata();
mlir::DialectRegistry registry;
registry.insert<mlir::TFL::TensorFlowLiteDialect, mlir::arith::ArithDialect,
mlir::func::FuncDialect>();
mlir::MLIRContext context(registry);
auto module = mlir::OwningOpRef<mlir::ModuleOp>(
mlir::parseSourceString<mlir::ModuleOp>(kMLIR, &context));
auto module_op = module.get();
auto serialized_result_fb = ExportRuntimeMetadata(module_op);
const auto* result = GetRuntimeMetadata(serialized_result_fb.value().c_str());
const auto* expected = GetRuntimeMetadata(kExpectedFB.c_str());
ASSERT_TRUE(result != nullptr);
ASSERT_TRUE(result->subgraph_metadata() != nullptr);
ASSERT_TRUE(expected->subgraph_metadata() != nullptr);
Verify(result, expected);
}
} |
1,166 | cpp | tensorflow/tensorflow | rematerializer | tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc | tensorflow/compiler/mlir/lite/experimental/remat/rematerializer_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_REMAT_REMATERIALIZER_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_REMAT_REMATERIALIZER_H_
#include <algorithm>
#include <cinttypes>
#include <tuple>
#include <vector>
namespace mlir {
namespace TFL {
class Rematerializer {
public:
Rematerializer() = default;
virtual ~Rematerializer() = default;
using SizeT = int64_t;
using MemProfile = std::vector<SizeT>;
struct MemSpec {
int op_index;
SizeT size;
explicit MemSpec(int op_index = 0, SizeT size = 0)
: op_index(op_index), size(size) {}
};
static bool BySize(const MemSpec& a, const MemSpec& b) {
return std::tie(a.size, a.op_index) < std::tie(b.size, b.op_index);
}
static bool ByOpIndex(const MemSpec& a, const MemSpec& b) {
return std::tie(a.op_index, a.size) < std::tie(b.op_index, b.size);
}
struct RematSpec {
int begin;
int end;
int insert;
};
MemSpec GetPeakMemory(const RematSpec& remat = {}) const;
MemProfile GetMemProfile(const RematSpec& remat = {}) const;
void RunGreedyAlgorithm(int max_cost, int max_block_length,
SizeT min_savings);
virtual void ApplyRemat(const RematSpec& remat) {}
protected:
void Remat(const RematSpec& remat);
int AddTensor(SizeT size);
int AddOperation(bool is_stateful);
void AddUse(int ioperation, int itensor);
void DelUse(int ioperation, int itensor);
private:
std::tuple<SizeT, RematSpec> FindBestRemat(SizeT min_savings, int begin_len,
int end_len) const;
SizeT MaxSavings(int begin, int end, int peak_loc) const;
int FindBestRematPoint(int begin, int end, int peak_loc) const;
struct Tensor {
SizeT size;
std::vector<int> operations;
int first_use() const { return *operations.begin(); }
int last_use() const { return *operations.rbegin(); }
};
struct Operation {
bool is_stateful = false;
std::vector<int> tensors;
SizeT alloc = 0;
SizeT dealloc = 0;
};
std::vector<MemSpec> GetDeltas(const RematSpec& remat) const;
template <class Mapper>
void MapMem(const Mapper& mapper, const RematSpec& remat) const {
const auto deltas = GetDeltas(remat);
const auto len = (remat.end - remat.begin);
auto idelta = deltas.begin();
for (MemSpec m; m.op_index < operations_.size() + len; ++m.op_index) {
const bool patch =
(m.op_index >= remat.insert) && (m.op_index < remat.insert + len);
const int shift = (m.op_index >= remat.insert + len) ? len : 0;
m.size += patch ? 0 : operations_[m.op_index - shift].alloc;
for (; idelta != deltas.end() && idelta->op_index == m.op_index;
++idelta) {
m.size += idelta->size;
}
mapper(m);
m.size -= patch ? 0 : operations_[m.op_index - shift].dealloc;
}
}
std::vector<Operation> operations_;
std::vector<Tensor> tensors_;
};
}
}
#endif
#include "tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.h"
#include <algorithm>
#include <map>
#include <tuple>
#include <utility>
#include <vector>
namespace mlir {
namespace TFL {
namespace {
std::tuple<std::vector<int>::iterator, bool> Find(const int item,
std::vector<int>& items) {
const auto iter = std::lower_bound(items.begin(), items.end(), item);
return std::make_tuple(iter, iter != items.end() && *iter == item);
}
void Insert(const int item, std::vector<int>& items) {
const auto [iter, found] = Find(item, items);
if (!found) items.insert(iter, item);
}
void Erase(const int item, std::vector<int>& items) {
const auto [iter, found] = Find(item, items);
if (found) items.erase(iter);
}
}
int Rematerializer::AddOperation(const bool is_stateful) {
operations_.emplace_back();
operations_.back().is_stateful = is_stateful;
return operations_.size() - 1;
}
int Rematerializer::AddTensor(const SizeT size) {
tensors_.emplace_back();
tensors_.back().size = size;
return tensors_.size() - 1;
}
void Rematerializer::DelUse(const int ioperation, const int itensor) {
auto& tensor = tensors_[itensor];
auto& operation = operations_[ioperation];
const auto& size = tensor.size;
const bool was_first_use =
(!tensor.operations.empty() && ioperation == tensor.first_use());
const bool was_last_use =
(!tensor.operations.empty() && ioperation == tensor.last_use());
Erase(ioperation, tensor.operations);
Erase(itensor, operation.tensors);
if (was_first_use) {
operation.alloc -= size;
if (!was_last_use) {
operations_[tensor.first_use()].alloc += size;
}
}
if (was_last_use) {
operation.dealloc -= size;
if (!was_first_use) {
operations_[tensor.last_use()].dealloc += size;
}
}
}
void Rematerializer::AddUse(const int ioperation, const int itensor) {
auto& tensor = tensors_[itensor];
auto& operation = operations_[ioperation];
const auto& size = tensor.size;
const bool will_be_first_use =
tensor.operations.empty() || ioperation < tensor.first_use();
const bool will_be_last_use =
tensor.operations.empty() || ioperation > tensor.last_use();
if (will_be_first_use) {
operation.alloc += size;
if (!will_be_last_use) {
operations_[tensor.first_use()].alloc -= size;
}
}
if (will_be_last_use) {
operation.dealloc += size;
if (!will_be_first_use) {
operations_[tensor.last_use()].dealloc -= size;
}
}
Insert(ioperation, tensor.operations);
Insert(itensor, operation.tensors);
}
Rematerializer::SizeT Rematerializer::MaxSavings(const int begin, const int end,
const int peak_loc) const {
SizeT max_savings = 0;
for (int ioperation = begin; ioperation != end; ++ioperation) {
for (const int itensor : operations_[ioperation].tensors) {
if (const Tensor& tensor = tensors_[itensor];
tensor.first_use() == ioperation &&
tensor.last_use() > peak_loc ) {
max_savings += tensor.size;
}
}
}
return max_savings;
}
std::tuple<Rematerializer::SizeT, Rematerializer::RematSpec>
Rematerializer::FindBestRemat(const SizeT min_savings, const int begin_len,
const int end_len) const {
const auto peak = GetPeakMemory();
SizeT best_peak_mem = peak.size;
RematSpec best_remat = {};
for (int len = begin_len; len < end_len; ++len) {
std::vector<std::tuple<SizeT, int, int>> pre_screen;
for (int begin = 0, end = begin + len; end <= peak.op_index;
++begin, ++end) {
if (!std::any_of(operations_.begin() + begin, operations_.begin() + end,
[](const Operation& s) { return s.is_stateful; })) {
if (const auto max_savings = MaxSavings(begin, end, peak.op_index);
max_savings >= min_savings) {
pre_screen.emplace_back(max_savings, begin, end);
}
}
}
std::sort(pre_screen.begin(), pre_screen.end());
for (; !pre_screen.empty(); pre_screen.pop_back()) {
const auto& [max_savings, begin, end] = pre_screen.back();
const auto insert_before = FindBestRematPoint(begin, end, peak.op_index);
if (insert_before == operations_.size()) {
continue;
}
const RematSpec this_remat = {begin, end, insert_before};
if (const auto new_peak = GetPeakMemory(this_remat);
new_peak.size < best_peak_mem &&
peak.size >= new_peak.size + min_savings) {
best_peak_mem = new_peak.size;
best_remat = this_remat;
}
if (peak.size >= max_savings + best_peak_mem) {
break;
}
}
if (peak.size >= min_savings + best_peak_mem) {
break;
}
}
return std::make_tuple(best_peak_mem, best_remat);
}
std::vector<Rematerializer::MemSpec> Rematerializer::GetDeltas(
const RematSpec& remat) const {
std::vector<MemSpec> deltas;
if (remat.begin == remat.end) {
return deltas;
}
const auto source_to_target = [&](int i) {
return i + (remat.insert - remat.begin);
};
struct TensorUse {
int first_use;
int last_use;
};
std::map<int, TensorUse> source_uses;
for (int ioperation = remat.begin; ioperation < remat.end; ++ioperation) {
const auto& operation = operations_[ioperation];
for (const int itensor : operation.tensors) {
const auto [iter, inserted] = source_uses.emplace(
itensor,
TensorUse{ioperation, ioperation});
if (!inserted) {
iter->second.last_use = ioperation;
}
}
}
deltas.reserve(2 * source_uses.size());
for (const auto& [itensor, source] : source_uses) {
auto& tensor = tensors_[itensor];
const TensorUse global = {tensor.first_use(), tensor.last_use()};
auto add_alloc = [&](int pos) { deltas.emplace_back(pos, tensor.size); };
auto add_dealloc = [&](int pos) {
deltas.emplace_back(pos + 1, -tensor.size);
};
auto del_dealloc = [&](int pos) {
deltas.emplace_back(pos + 1, tensor.size);
};
if (global.first_use < remat.begin) {
if (global.last_use < remat.insert) {
del_dealloc(global.last_use);
add_dealloc(source_to_target(source.last_use));
}
} else {
add_alloc(source_to_target(source.first_use));
if (global.last_use < remat.insert) {
add_dealloc(source_to_target(source.last_use));
} else {
add_dealloc(*std::partition_point(
tensor.operations.rbegin(), tensor.operations.rend(),
[&](int i) { return i >= remat.insert; }));
}
}
}
std::sort(deltas.begin(), deltas.end(), ByOpIndex);
return deltas;
}
Rematerializer::MemProfile Rematerializer::GetMemProfile(
const RematSpec& remat) const {
const auto num_inserted = remat.end - remat.begin;
std::vector<SizeT> profile(operations_.size() + num_inserted);
MapMem([&](const MemSpec& m) { profile[m.op_index] = m.size; }, remat);
return profile;
}
Rematerializer::MemSpec Rematerializer::GetPeakMemory(
const RematSpec& remat) const {
MemSpec peak;
MapMem([&](const MemSpec& m) { peak = std::max(m, peak, BySize); }, remat);
return peak;
}
int Rematerializer::FindBestRematPoint(const int begin, const int end,
const int peak_loc) const {
int best = operations_.size();
for (int ioperation = begin; ioperation < end; ++ioperation) {
for (const int itensor : operations_[ioperation].tensors) {
if (const auto& tensor = tensors_[itensor];
tensor.first_use() >= begin && tensor.first_use() < end &&
tensor.last_use() > peak_loc) {
for (const int ioperation : tensor.operations) {
if (ioperation > peak_loc && ioperation < best) {
best = ioperation;
break;
}
}
}
}
}
return best;
}
void Rematerializer::Remat(const RematSpec& remat) {
const int num_inserted = remat.end - remat.begin;
for (auto& tensor : tensors_) {
std::for_each(std::lower_bound(tensor.operations.begin(),
tensor.operations.end(), remat.insert),
tensor.operations.end(),
[&](int& iop) { iop += num_inserted; });
}
operations_.insert(operations_.begin() + remat.insert, num_inserted, {});
std::vector<std::pair<int, int>> new_tensors;
for (int iop_old = remat.begin, iop_new = remat.insert; iop_old < remat.end;
++iop_old, ++iop_new) {
for (const auto itensor : operations_[iop_old].tensors) {
if (tensors_[itensor].first_use() == iop_old) {
new_tensors.emplace_back(itensor, AddTensor(tensors_[itensor].size));
}
AddUse(iop_new, itensor);
}
}
std::sort(new_tensors.begin(), new_tensors.end());
for (int iop = remat.insert; iop < operations_.size(); ++iop) {
for (const int old_tensor : std::vector<int>(operations_[iop].tensors)) {
const auto new_tensor =
std::lower_bound(new_tensors.begin(), new_tensors.end(),
std::make_pair(old_tensor, 0));
if (new_tensor != new_tensors.end() && new_tensor->first == old_tensor) {
DelUse(iop, old_tensor);
AddUse(iop, new_tensor->second);
}
}
}
}
void Rematerializer::RunGreedyAlgorithm(const int max_cost,
const int max_block_length,
const SizeT min_savings) {
const bool unlimited_cost = (max_cost < 0);
for (int min_block_length = 1, cost = 0;
min_block_length <= max_block_length &&
(unlimited_cost || cost <= max_cost);
min_block_length *= 2) {
while (unlimited_cost || cost <= max_cost) {
const auto [peak, remat] = FindBestRemat(
min_savings,
min_block_length,
std::min(1 + (unlimited_cost
? max_block_length
: std::min(max_block_length, max_cost - cost)),
2 * min_block_length));
if (remat.begin == remat.end) break;
Remat(remat);
ApplyRemat(remat);
cost += (remat.end - remat.begin);
}
}
}
}
} | #include "tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <initializer_list>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace mlir {
namespace TFL {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::FieldsAre;
using ::testing::StrictMock;
class RematTest : public ::testing::Test {
protected:
class TestableRematerializer : public Rematerializer {
public:
using Rematerializer::AddOperation;
using Rematerializer::AddTensor;
using Rematerializer::AddUse;
using Rematerializer::DelUse;
using Rematerializer::Remat;
};
TestableRematerializer r_;
};
TEST_F(RematTest, TensorUseSimple) {
for (int i = 0; i < 6; ++i) {
r_.AddOperation(false);
r_.AddTensor(1 << i);
}
r_.AddUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 4, 0, 0, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(2), Eq(4)));
r_.AddUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 4, 0, 0, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(2), Eq(4)));
r_.AddUse(4, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 4, 4, 4, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(4), Eq(4)));
r_.DelUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 0, 0, 4, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(4), Eq(4)));
r_.DelUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 0, 0, 4, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(4), Eq(4)));
r_.DelUse(4, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 0, 0, 0, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(5), Eq(0)));
}
TEST_F(RematTest, TensorUseMany) {
constexpr int n = 6;
for (int i = 0; i < n; ++i) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1 << (n - i - 1)));
}
for (int i = 0; i < n; ++i) {
r_.AddUse(r_.AddOperation(false),
n - 1 - i);
}
EXPECT_THAT(r_.GetMemProfile(), ElementsAreArray({32, 48, 56, 60, 62, 63, 63,
62, 60, 56, 48, 32}));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(6), Eq(63)));
}
TEST_F(RematTest, PeakTiesAreBrokenInFavorOfLaterOperations) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
ASSERT_THAT(r_.GetMemProfile(), ElementsAreArray({100, 1, 100}));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(2), Eq(100)));
}
TEST_F(RematTest, RematRecreatesOutput) {
r_.AddUse(r_.AddOperation(false), r_.AddTensor(100));
r_.AddOperation(false);
ASSERT_THAT(r_.GetMemProfile(), ElementsAre(100, 0));
EXPECT_THAT(r_.GetMemProfile({0, 1, 2}),
ElementsAre(100, 0, 100));
r_.Remat({0, 1, 2});
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(100, 0, 100));
EXPECT_THAT(r_.AddTensor(0), 2);
}
TEST_F(RematTest, RematExtendsInputAndRecreatesOutput) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
r_.AddUse(1, 0);
r_.AddOperation(false);
r_.AddOperation(false);
ASSERT_THAT(r_.GetMemProfile(), ElementsAre(1, 101, 0, 0));
EXPECT_THAT(r_.GetMemProfile({1, 2, 3}),
ElementsAre(1, 101, 1, 101, 0));
r_.Remat({1, 2, 3});
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(1, 101, 1, 101, 0));
EXPECT_THAT(r_.AddTensor(0), 3);
}
TEST_F(RematTest, BlockRematDuplicatesIntraBlockValues) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(10));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1000));
r_.AddOperation(false);
r_.AddUse(1, 0);
r_.AddUse(2, 0);
r_.AddUse(2, 1);
r_.AddUse(3, 0);
r_.AddUse(3, 1);
r_.AddUse(3, 2);
ASSERT_THAT(r_.GetMemProfile(), ElementsAre(1, 11, 111, 1111, 0));
EXPECT_THAT(r_.GetMemProfile({1, 4, 5}),
ElementsAre(1, 11, 111, 1111, 1, 11, 111, 1111));
r_.Remat({1, 4, 5});
EXPECT_THAT(r_.GetMemProfile(),
ElementsAre(1, 11, 111, 1111, 1, 11, 111, 1111));
EXPECT_THAT(r_.AddTensor(0), 7);
}
class RematSimulationTest : public testing::Test {
protected:
class RandomRemat : public Rematerializer {
public:
using Rematerializer::Remat;
RandomRemat(const int num_operations, const int num_tensors,
const int num_uses, std::mt19937& rng) {
std::uniform_int_distribution<int> some_size_log(0, 16);
std::uniform_int_distribution<int> some_tensor(0, num_tensors - 1);
std::uniform_int_distribution<int> some_operation(0, num_operations - 1);
for (int i = 0; i < num_tensors; ++i) {
AddTensor(SizeT{1} << some_size_log(rng));
}
for (int i = 0; i < num_operations; ++i) {
AddOperation(false);
}
for (int i = 0; i < num_uses; ++i) {
AddUse(some_operation(rng), some_tensor(rng));
}
}
};
};
TEST_F(RematSimulationTest, SimulationAgreesWithReality) {
constexpr int kNumOperations = 128;
constexpr int kNumTensors = 32;
constexpr int kNumUses = kNumOperations * kNumTensors / 4;
std::mt19937 rng;
for (int i = 0; i < 1024; ++i) {
RandomRemat remat(kNumOperations, kNumTensors, kNumUses, rng);
std::array<int, 3> randos;
const auto& [begin, end, insert] = randos;
for (int i = 0, num_operations = kNumOperations; i < 4;
++i, num_operations += end - begin) {
std::uniform_int_distribution<int> some_op(0, num_operations - 1);
for (auto& rando : randos) {
rando = some_op(rng);
}
std::sort(randos.begin(), randos.end());
const Rematerializer::RematSpec spec{begin, end, insert};
const auto simulated_profile = remat.GetMemProfile(spec);
remat.Remat(spec);
const auto actual_profile = remat.GetMemProfile();
EXPECT_THAT(simulated_profile, ElementsAreArray(actual_profile));
}
}
}
class GreedyRematTest : public testing::Test {
protected:
class RainbowRemat : public Rematerializer {
public:
explicit RainbowRemat(const std::vector<std::vector<int>>& sizes,
int extra_ops = 0, SizeT extra_size = 0) {
for (const auto& rainbow : sizes) {
int tensor = 0;
int op = 0;
for (const auto& size : rainbow) {
for (int i = 0; i < extra_ops; ++i) {
op = AddOperation(false);
if (i != 0) {
AddUse(op, tensor);
}
tensor = AddTensor(extra_size);
AddUse(op, tensor);
}
op = AddOperation(size < 0);
if (extra_ops > 0) {
AddUse(op, tensor);
}
tensor = AddTensor(std::abs(size));
AddUse(op, tensor);
}
for (int i = 0; i < rainbow.size(); ++i) {
op = AddOperation(false);
AddUse(op, tensor - i);
}
}
}
};
class MlpRemat : public Rematerializer {
public:
explicit MlpRemat(const std::vector<int>& sizes) {
int forward_tensor = -1;
int backward_tensor = -1;
int op = -1;
for (const int size : sizes) {
op = AddOperation(false);
if (forward_tensor >= 0) AddUse(op, forward_tensor);
forward_tensor = AddTensor(size);
AddUse(op, forward_tensor);
}
for (; forward_tensor >= 0; --forward_tensor) {
op = AddOperation(false);
AddUse(op, forward_tensor);
if (backward_tensor >= 0) AddUse(op, backward_tensor);
backward_tensor = AddTensor(sizes[forward_tensor]);
AddUse(op, backward_tensor);
}
}
MOCK_METHOD(void, ApplyRemat, (const RematSpec&));
};
};
TEST_F(GreedyRematTest, MlpBasic) {
StrictMock<MlpRemat> remat(std::vector<int>({1, 1, 1}));
ASSERT_THAT(remat.GetMemProfile(), ElementsAreArray({1, 2, 3, 4, 4, 3}));
EXPECT_CALL(remat, ApplyRemat(FieldsAre(0,
1,
5)));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(), ElementsAreArray({1, 2, 2, 3, 3, 2, 3}));
}
TEST_F(GreedyRematTest, MlpBinary) {
StrictMock<MlpRemat> remat(std::vector<int>({1, 2, 4, 8}));
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 23, 19, 9, 4}));
EXPECT_CALL(remat, ApplyRemat(FieldsAre(2,
3,
5)));
EXPECT_CALL(remat, ApplyRemat(FieldsAre(0,
1,
8)));
remat.RunGreedyAlgorithm(-1, 4,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 6, 14, 18, 14, 18, 8, 3, 4}));
}
TEST_F(GreedyRematTest, SimpleMax) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 8, 16, 16, 8, 8, 4, 4, 2, 2, 1, 1}));
}
TEST_F(GreedyRematTest, SimpleMaxLongWindow) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 4,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 8, 16, 16, 8, 8, 4, 4, 2, 2, 1, 1}));
}
TEST_F(GreedyRematTest, SimpleSizeThreshold) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 1,
4);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 11, 19, 19, 11, 11, 7, 7, 3, 1}));
}
TEST_F(GreedyRematTest, SimpleCostThreshold) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 23, 23, 15, 15, 7, 3, 1}));
}
TEST_F(GreedyRematTest, SimpleForbiddenOps) {
RainbowRemat remat({{1, 2, -4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 12, 20, 20, 12, 12, 4, 2, 2, 1, 1}));
}
TEST_F(GreedyRematTest, DoubleMax) {
RainbowRemat remat({{1, 2, 4, 8, 16}, {4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray(
{1, 3, 7, 15, 31, 31, 15, 7, 3, 1, 4, 12, 28, 28, 12, 4}));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 8, 16, 16, 8, 8, 4, 4, 2,
2, 1, 1, 4, 8, 16, 16, 8, 8, 4, 4}));
}
TEST_F(GreedyRematTest, DoubleCostThreshold) {
RainbowRemat remat({{1, 2, 4, 8, 16}, {4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray(
{1, 3, 7, 15, 31, 31, 15, 7, 3, 1, 4, 12, 28, 28, 12, 4}));
remat.RunGreedyAlgorithm(2, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 23, 23, 15, 15, 7, 3, 1, 4, 12, 20,
20, 12, 12, 4}));
}
TEST_F(GreedyRematTest, SingleLongerBlocksByWindowSize) {
std::vector<Rematerializer::SizeT> best_for_window_size;
for (int window_size : {0, 1, 2, 3, 4, 5}) {
RainbowRemat remat({{1, 2, 4, 8}}, 2, 16);
remat.RunGreedyAlgorithm(-1, window_size,
1);
best_for_window_size.push_back(remat.GetPeakMemory().size);
}
EXPECT_THAT(best_for_window_size, ElementsAreArray({44, 36, 36, 32, 32, 32}));
}
}
}
} |
1,167 | cpp | tensorflow/tensorflow | metadata_util | tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.cc | tensorflow/compiler/mlir/lite/experimental/remat/metadata_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_REMAT_METADATA_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_REMAT_METADATA_UTIL_H_
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include "tensorflow/compiler/mlir/lite/utils/control_edges.h"
namespace tflite {
using ModelControlDependencies = std::vector<ControlEdges>;
std::string SerializeModelControlDependencies(
const ModelControlDependencies& in);
bool ParseModelControlDependencies(const char* data, size_t size,
ModelControlDependencies* out);
constexpr char kModelControlDependenciesMetadataKey[] =
"model_control_dependencies";
constexpr uint32_t kModelControlDependenciesMetadataVersion = 1;
inline constexpr char kModelUseStablehloTensorKey[] = "keep_stablehlo_constant";
}
#endif
#include "tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.h"
#include <string>
#include <utility>
#include <vector>
namespace {
constexpr int kMod = (1 << 7);
void Serialize(std::string* out, uint32_t value) {
for (; value >= kMod; value /= kMod) {
out->push_back(value % kMod + kMod);
}
out->push_back(value);
}
bool Parse(const char** data, size_t* size, uint32_t* out) {
*out = 0;
uint32_t mul = 1;
for (bool done = false; !done;
mul *= kMod, done = !(**data & kMod), ++*data, --*size) {
if (*size == 0) {
return false;
}
*out += static_cast<unsigned char>(**data) % kMod * mul;
}
return true;
}
void Serialize(std::string* out, int32_t value) {
Serialize(out, static_cast<uint32_t>(
value < 0 ? static_cast<uint32_t>(-(value + 1)) * 2 + 1
: static_cast<uint32_t>(value) * 2));
}
bool Parse(const char** data, size_t* size, int32_t* out) {
uint32_t value = 0;
if (!Parse(data, size, &value)) {
return false;
}
const int32_t magnitude = value / 2;
*out = (value % 2) ? (-magnitude - 1) : magnitude;
return true;
}
template <class First, class Second>
void Serialize(std::string* out, const std::pair<First, Second>& in) {
Serialize(out, in.first);
Serialize(out, in.second);
}
template <class First, class Second>
bool Parse(const char** data, size_t* size, std::pair<First, Second>* out) {
return Parse(data, size, &(out->first)) && Parse(data, size, &(out->second));
}
template <class Value>
void Serialize(std::string* out, const std::vector<Value>& in) {
Serialize(out, static_cast<uint32_t>(in.size()));
for (const auto& val : in) {
Serialize(out, val);
}
}
template <class T>
bool Parse(const char** data, size_t* size, std::vector<T>* out) {
uint32_t num_elems = 0;
if (!Parse(data, size, &num_elems)) {
return false;
}
out->assign(num_elems, T{});
for (auto& elem : *out) {
if (!Parse(data, size, &elem)) {
return false;
}
}
return true;
}
}
namespace tflite {
std::string SerializeModelControlDependencies(
const ModelControlDependencies& in) {
std::string out;
Serialize(&out, kModelControlDependenciesMetadataVersion);
Serialize(&out, in);
return out;
}
bool ParseModelControlDependencies(const char* data, size_t size,
ModelControlDependencies* out) {
out->clear();
uint32_t version = 0;
return Parse(&data, &size, &version) &&
(version == kModelControlDependenciesMetadataVersion) &&
Parse(&data, &size, out) && (size == 0);
}
} | #include "tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.h"
#include <cstdint>
#include <limits>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
class MetadataSerializerTest : public ::testing::Test {
protected:
static constexpr auto kHuge = std::numeric_limits<int32_t>::max();
static constexpr auto kTiny = std::numeric_limits<int32_t>::min();
std::string RoundTrip(const ModelControlDependencies &in) const {
ModelControlDependencies out = {{{-1, -1}}};
const std::string serialized =
tflite::SerializeModelControlDependencies(in);
return tflite::ParseModelControlDependencies(serialized.data(),
serialized.size(), &out)
? (out == in) ? "ok" : "mismatch"
: "malformed";
}
};
TEST_F(MetadataSerializerTest, nothing) { EXPECT_THAT(RoundTrip({}), "ok"); }
TEST_F(MetadataSerializerTest, something) {
EXPECT_THAT(
RoundTrip({{{1, 2}, {2, 3}, {4, 5}},
{},
{{kHuge, kTiny}, {kTiny, kHuge}, {kHuge - 1, kTiny + 1}},
{{1, 0}}}),
"ok");
}
}
} |
1,168 | cpp | tensorflow/tensorflow | numerical_utils | tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc | tensorflow/compiler/mlir/lite/quantization/numerical_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_NUMERICAL_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_NUMERICAL_UTILS_H_
#include <cstdint>
#include <optional>
#include <utility>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
using QuantizedMultiplier = std::pair<int32_t, int32_t>;
using QuantizedRange = std::pair<int32_t, int32_t>;
QuantizedMultiplier QuantizeMultiplier(double double_multiplier);
QuantizedRange CalculateQuantizedRange(double scale, int32_t zero_point,
std::optional<double> rmin,
std::optional<double> rmax, int32_t qmin,
int32_t qmax);
}
}
#endif
#include "tensorflow/compiler/mlir/lite/quantization/numerical_utils.h"
#include <assert.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <optional>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
QuantizedMultiplier QuantizeMultiplier(double double_multiplier) {
if (double_multiplier < 1e-6) {
return {0, 0};
}
int32_t shift;
const double q = frexp(double_multiplier, &shift);
int64_t quantized_multiplier = round(q * (1LL << 31));
assert(quantized_multiplier <= (1LL << 31));
if (quantized_multiplier == (1LL << 31)) {
quantized_multiplier /= 2;
++shift;
}
assert(quantized_multiplier <= std::numeric_limits<int32_t>::max());
if (shift > 31 || shift < -31) {
return {0, 0};
}
return {static_cast<int32_t>(quantized_multiplier), shift};
}
QuantizedRange CalculateQuantizedRange(double scale, int32_t zero_point,
std::optional<double> rmin,
std::optional<double> rmax, int32_t qmin,
int32_t qmax) {
auto quantize = [scale, zero_point](float f) {
return zero_point + static_cast<int32_t>(std::round(f / scale));
};
if (rmin.has_value() && rmax.has_value()) {
return {std::max(qmin, quantize(rmin.value())),
std::min(qmax, quantize(rmax.value()))};
} else if (rmin.has_value()) {
return {std::max(qmin, quantize(rmin.value())), qmax};
} else if (rmax.has_value()) {
return {qmin, std::min(qmax, quantize(rmax.value()))};
} else {
return {qmin, qmax};
}
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/numerical_utils.h"
#include <cmath>
#include <optional>
#include <gtest/gtest.h>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
namespace {
double ComposeScale(const QuantizedMultiplier& input) {
return input.first * exp2(-31 + input.second);
}
TEST(NumericalUtils, QuantizeMultiplier) {
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e6)), 1.0e6);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e3)), 1.0e3);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(10.)), 10.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(5.)), 5.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(2.)), 2.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(0.0)), 0.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0)), 1.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-1)), 1.0e-1);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-2)), 1.0e-2);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-3)), 1.0e-3);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-4)), 1.0e-4);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-5)), 1.0e-5);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-6)), 1.0e-6);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-7)), 0.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-8)), 0.0);
}
TEST(NumericalUtils, ActivationRange) {
auto a =
CalculateQuantizedRange(1e-6, 0, std::nullopt, std::nullopt, -128, 127);
ASSERT_EQ(a.first, -128);
ASSERT_EQ(a.second, 127);
auto b = CalculateQuantizedRange(1e-6, 0, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(b.first, 0);
ASSERT_EQ(b.second, 127);
auto c = CalculateQuantizedRange(1e-6, 0, -1.0, 1.0, -128, 127);
ASSERT_EQ(c.first, -128);
ASSERT_EQ(c.second, 127);
auto d = CalculateQuantizedRange(1e-6, 0, 0.0, 6.0, -128, 127);
ASSERT_EQ(d.first, 0);
ASSERT_EQ(d.second, 127);
auto e =
CalculateQuantizedRange(1e-6, 100, std::nullopt, std::nullopt, -128, 127);
ASSERT_EQ(e.first, -128);
ASSERT_EQ(e.second, 127);
auto f = CalculateQuantizedRange(1e-6, 100, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(f.first, 100);
ASSERT_EQ(f.second, 127);
auto g = CalculateQuantizedRange(1e-6, 100, -1.0, 1.0, -128, 127);
ASSERT_EQ(g.first, -128);
ASSERT_EQ(g.second, 127);
auto h = CalculateQuantizedRange(1e-6, 100, 0.0, 6.0, -128, 127);
ASSERT_EQ(h.first, 100);
ASSERT_EQ(h.second, 127);
auto i = CalculateQuantizedRange(1e-6, -100, std::nullopt, std::nullopt, -128,
127);
ASSERT_EQ(i.first, -128);
ASSERT_EQ(i.second, 127);
auto j = CalculateQuantizedRange(1e-6, -100, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(j.first, -100);
ASSERT_EQ(j.second, 127);
auto k = CalculateQuantizedRange(1e-6, -100, -1.0, 1.0, -128, 127);
ASSERT_EQ(k.first, -128);
ASSERT_EQ(k.second, 127);
auto l = CalculateQuantizedRange(1e-6, -100, 0.0, 6.0, -128, 127);
ASSERT_EQ(l.first, -100);
ASSERT_EQ(l.second, 127);
}
}
}
} |
1,169 | cpp | tensorflow/tensorflow | quantization | tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.cc | tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_STABLEHLO_QUANTIZATION_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_STABLEHLO_QUANTIZATION_H_
#include <string>
#include <unordered_set>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h"
namespace tensorflow {
absl::StatusOr<mlir::ModuleOp> RunQuantization(
const SavedModelBundle* saved_model_bundle,
absl::string_view saved_model_dir,
const std::unordered_set<std::string>& saved_model_tags,
const stablehlo::quantization::QuantizationConfig& quantization_config,
const tensorflow::quantization::PyFunctionLibrary*
quantization_py_function_lib,
mlir::ModuleOp module_op);
}
#endif
#include "tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h"
#include <string>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/tf_stablehlo_pass.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/static_range_ptq.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_freeze_variables.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace {
using ::mlir::quant::stablehlo::StaticRangePtqComponent;
using ::mlir::quant::stablehlo::WeightOnlyPtqComponent;
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::PopulateDefaults;
using ::stablehlo::quantization::QuantizationConfig;
using ::tensorflow::SignatureDef;
using ::tensorflow::quantization::PyFunctionLibrary;
absl::flat_hash_map<std::string, SignatureDef> GetSignatureDefMapFromBundle(
const SavedModelBundle& saved_model_bundle) {
const protobuf::Map<std::string, SignatureDef>& signatures =
saved_model_bundle.GetSignatures();
absl::flat_hash_map<std::string, SignatureDef> signature_def_map(
signatures.begin(), signatures.end());
signature_def_map.erase(kSavedModelInitOpSignatureKey);
return signature_def_map;
}
absl::flat_hash_map<std::string, std::string> GetFunctionAliases(
const SavedModelBundle& saved_model_bundle) {
const protobuf::Map<std::string, std::string>& function_aliases =
saved_model_bundle.meta_graph_def.meta_info_def().function_aliases();
return absl::flat_hash_map<std::string, std::string>(function_aliases.begin(),
function_aliases.end());
}
}
absl::StatusOr<mlir::ModuleOp> RunQuantization(
const SavedModelBundle* saved_model_bundle,
const absl::string_view saved_model_dir,
const std::unordered_set<std::string>& saved_model_tags,
const QuantizationConfig& quantization_config,
const PyFunctionLibrary* quantization_py_function_lib,
mlir::ModuleOp module_op) {
if (saved_model_bundle == nullptr) {
return absl::InvalidArgumentError(
"Failed to run quantization. `saved_model_bundle` should not be "
"nullptr.");
}
if (quantization_py_function_lib == nullptr) {
return absl::InvalidArgumentError(
"Failed to run quantization. `quantization_py_function_lib` should not "
"be nullptr.");
}
LOG(INFO) << "User-provided quantization config: "
<< quantization_config.DebugString();
const QuantizationConfig updated_config =
ExpandPresets(PopulateDefaults(quantization_config));
LOG(INFO) << "Updated quantization config: " << updated_config.DebugString();
const absl::flat_hash_map<std::string, SignatureDef> signature_def_map =
GetSignatureDefMapFromBundle(*saved_model_bundle);
std::vector<std::string> exported_names;
for (const auto& [key, value_unused] : signature_def_map) {
exported_names.push_back(key);
}
if (failed(mlir::tf_saved_model::FreezeVariables(
module_op, saved_model_bundle->GetSession()))) {
return absl::InternalError("Failed to freeze variables.");
}
mlir::PassManager pm(module_op.getContext());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
mlir::odml::AddLegalizeTFToStablehloPasses(pm, true,
false,
false);
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::stablehlo::createRemoveShardingCustomCallPass());
if (failed(pm.run(module_op))) {
return absl::InternalError("Failed to run legalize TF to StableHLO.");
}
absl::StatusOr<mlir::ModuleOp> quantized_module_op;
if (HasQuantizationMethod(updated_config.specs(),
Method::MethodCase::kStaticRangePtq)) {
StaticRangePtqComponent static_range_ptq_component(
module_op.getContext(), quantization_py_function_lib, saved_model_dir,
exported_names, saved_model_tags, signature_def_map,
GetFunctionAliases(*saved_model_bundle));
quantized_module_op =
static_range_ptq_component.Run(module_op, updated_config);
} else if (HasQuantizationMethod(updated_config.specs(),
Method::MethodCase::kWeightOnlyPtq)) {
WeightOnlyPtqComponent weight_only_ptq_component(module_op.getContext());
quantized_module_op =
weight_only_ptq_component.Run(module_op, updated_config);
} else {
return absl::InvalidArgumentError(
"Quantization config must have either static_range_ptq_preset or "
"weight_only_ptq_preset.");
}
if (!quantized_module_op.ok()) {
return absl::InternalError("Failed to run quantization. Status msg: " +
quantized_module_op.status().ToString());
}
return quantized_module_op;
}
} | #include "tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
using ::stablehlo::quantization::QuantizationConfig;
using ::stablehlo::quantization::io::CreateTmpDir;
using ::testing::HasSubstr;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(RunQuantizationTest,
WhenSavedModelBundleIsNullptrReturnsInvalidArgumentError) {
const absl::StatusOr<std::string> tmp_saved_model_dir = CreateTmpDir();
ASSERT_THAT(tmp_saved_model_dir, IsOk());
QuantizationConfig config;
const absl::StatusOr<mlir::ModuleOp> quantized_module_op = RunQuantization(
nullptr, *tmp_saved_model_dir,
{}, config,
nullptr, {});
EXPECT_THAT(
quantized_module_op,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("`saved_model_bundle` should not be nullptr")));
}
TEST(RunQuantizationTest,
WhenPyFunctionLibIsNullptrReturnsInvalidArgumentError) {
const absl::StatusOr<std::string> tmp_saved_model_dir = CreateTmpDir();
ASSERT_THAT(tmp_saved_model_dir, IsOk());
SavedModelBundle bundle{};
QuantizationConfig config;
const absl::StatusOr<mlir::ModuleOp> quantized_module_op = RunQuantization(
&bundle, *tmp_saved_model_dir,
{}, config,
nullptr, {});
EXPECT_THAT(
quantized_module_op,
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("`quantization_py_function_lib` should not be nullptr")));
}
}
} |
1,170 | cpp | tensorflow/tensorflow | sparsify_model | tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc | tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_SPARSITY_SPARSIFY_MODEL_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_SPARSITY_SPARSIFY_MODEL_H_
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
namespace mlir {
namespace lite {
absl::Status SparsifyModel(const tflite::ModelT& input_model,
flatbuffers::FlatBufferBuilder* builder);
}
}
#endif
#include "tensorflow/compiler/mlir/lite/sparsity/sparsify_model.h"
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/lite/common/tfl_pass_config.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_import.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
#include "tensorflow/compiler/mlir/lite/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/lite/tools/optimize/reduced_precision_support.h"
namespace mlir {
namespace lite {
absl::Status SparsifyModel(const tflite::ModelT& input_model,
flatbuffers::FlatBufferBuilder* builder) {
MLIRContext context;
StatusScopedDiagnosticHandler statusHandler(&context,
true);
flatbuffers::FlatBufferBuilder input_builder;
flatbuffers::Offset<tflite::Model> input_model_location =
tflite::Model::Pack(input_builder, &input_model);
tflite::FinishModelBuffer(input_builder, input_model_location);
std::string serialized_model(
reinterpret_cast<const char*>(input_builder.GetBufferPointer()),
input_builder.GetSize());
OwningOpRef<mlir::ModuleOp> module = tflite::FlatBufferToMlir(
serialized_model, &context, UnknownLoc::get(&context));
if (!module) {
LOG(ERROR) << "Couldn't import flatbuffer to MLIR.";
return absl::InternalError("Couldn't import flatbuffer to MLIR.");
}
PassManager pm((*module)->getName(), OpPassManager::Nesting::Implicit);
pm.addPass(TFL::CreateDenseToSparsePass());
if (failed(pm.run(module.get()))) {
LOG(ERROR) << "Failed to sparsify: "
<< statusHandler.ConsumeStatus().message();
return absl::InternalError(absl::StrCat(
"Failed to sparsify: ", statusHandler.ConsumeStatus().message()));
}
std::string result;
tflite::FlatbufferExportOptions options;
options.toco_flags.set_force_select_tf_ops(false);
options.toco_flags.set_enable_select_tf_ops(true);
options.toco_flags.set_allow_custom_ops(true);
for (const auto& metadata : input_model.metadata) {
if (metadata->name != tflite::optimize::kTfLiteReducedPrecisionKey) {
continue;
}
const auto& data = input_model.buffers[metadata->buffer]->data;
options.metadata[metadata->name] = std::string(data.begin(), data.end());
break;
}
if (!tflite::MlirToFlatBufferTranslateFunction(module.get(), options,
&result)) {
LOG(ERROR) << "Failed to export MLIR to flatbuffer.";
return absl::InternalError("Failed to export MLIR to flatbuffer.");
}
builder->PushFlatBuffer(reinterpret_cast<const uint8_t*>(result.data()),
result.size());
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/mlir/lite/sparsity/sparsify_model.h"
#include <stdint.h>
#include <cstdarg>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/tools/optimize/reduced_precision_support.h"
namespace mlir {
namespace lite {
namespace {
TEST(SparsifyModelTest, MetadataIsAddedToOutputModel) {
std::string expected_key = tflite::optimize::kTfLiteReducedPrecisionKey;
std::string expected_value = "test_data";
auto input_fbm = tflite::FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/sparse_tensor.bin");
tflite::ModelT input_model;
input_fbm->GetModel()->UnPackTo(&input_model);
auto model_metadata_buffer = std::make_unique<tflite::BufferT>();
model_metadata_buffer->data =
std::vector<uint8_t>(expected_value.begin(), expected_value.end());
input_model.buffers.push_back(std::move(model_metadata_buffer));
auto metadata_t = std::make_unique<tflite::MetadataT>();
metadata_t->name = tflite::optimize::kTfLiteReducedPrecisionKey;
metadata_t->buffer = input_model.buffers.size() - 1;
input_model.metadata.push_back(std::move(metadata_t));
flatbuffers::FlatBufferBuilder output_builder;
ASSERT_TRUE(SparsifyModel(input_model, &output_builder).ok());
auto output_fbm = tflite::FlatBufferModel::BuildFromBuffer(
reinterpret_cast<const char*>(output_builder.GetCurrentBufferPointer()),
output_builder.GetSize());
tflite::ModelT output_model;
output_fbm->GetModel()->UnPackTo(&output_model);
std::map<std::string, std::string> output_metadata;
for (const auto& metadata : output_model.metadata) {
const auto& data = output_model.buffers[metadata->buffer]->data;
output_metadata[metadata->name] = std::string(data.begin(), data.end());
}
EXPECT_THAT(output_metadata,
testing::Contains(testing::Pair(expected_key, expected_value)));
}
}
}
} |
1,171 | cpp | tensorflow/tensorflow | hlo_matchers | third_party/xla/xla/hlo/utils/hlo_matchers.cc | third_party/xla/xla/hlo/utils/hlo_matchers_test.cc | #ifndef XLA_HLO_UTILS_HLO_MATCHERS_H_
#define XLA_HLO_UTILS_HLO_MATCHERS_H_
#include <optional>
#include <string>
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace testing {
class HloMatcher : public ::testing::MatcherInterface<const HloInstruction*> {
public:
HloMatcher(HloOpcode opcode,
std::vector<::testing::Matcher<const HloInstruction*>> operands)
: opcode_(opcode), operands_(operands) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(::std::ostream* os) const override;
private:
HloOpcode opcode_;
std::vector<::testing::Matcher<const HloInstruction*>> operands_;
};
class HloParameterMatcher : public HloMatcher {
public:
explicit HloParameterMatcher(int64_t parameter_number)
: HloMatcher(HloOpcode::kParameter, {}),
parameter_number_(parameter_number) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
private:
int64_t parameter_number_;
};
class HloComparisonMatcher : public HloMatcher {
public:
explicit HloComparisonMatcher(
ComparisonDirection direction,
std::vector<::testing::Matcher<const HloInstruction*>> operands)
: HloMatcher(HloOpcode::kCompare, operands), direction_(direction) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
private:
ComparisonDirection direction_;
};
class HloGetTupleElementMatcher : public HloMatcher {
public:
HloGetTupleElementMatcher(::testing::Matcher<const HloInstruction*> operand,
int64_t tuple_index)
: HloMatcher(HloOpcode::kGetTupleElement, {operand}),
tuple_index_(tuple_index) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
private:
int64_t tuple_index_;
};
class HloCustomCallMatcher : public HloMatcher {
public:
HloCustomCallMatcher(
::testing::Matcher<std::string> call_target_matcher,
std::vector<::testing::Matcher<const HloInstruction*>> operands)
: HloMatcher(HloOpcode::kCustomCall, operands),
call_target_matcher_(call_target_matcher) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
::testing::Matcher<std::string> call_target_matcher_;
};
class HloShapeMatcher
: public ::testing::MatcherInterface<const HloInstruction*> {
public:
explicit HloShapeMatcher(const Shape& shape) : shape_(shape) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
Shape shape_;
};
class HloShapeAndLayoutMatcher
: public ::testing::MatcherInterface<const HloInstruction*> {
public:
explicit HloShapeAndLayoutMatcher(const Shape& shape,
bool minor_to_major_only = false)
: shape_(shape), minor_to_major_only_(minor_to_major_only) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
Shape shape_;
bool minor_to_major_only_;
};
class HloShardingMatcher
: public ::testing::MatcherInterface<const HloInstruction*> {
public:
explicit HloShardingMatcher(const std::optional<HloSharding>& sharding)
: sharding_(sharding) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
std::optional<HloSharding> sharding_;
};
class HloDotWithContractingDimsMatcher : public HloMatcher {
public:
explicit HloDotWithContractingDimsMatcher(
::testing::Matcher<const HloInstruction*> lhs,
::testing::Matcher<const HloInstruction*> rhs,
int64_t lhs_contracting_dim, int64_t rhs_contracting_dim)
: HloMatcher(HloOpcode::kDot, {lhs, rhs}),
lhs_contracting_dim_(lhs_contracting_dim),
rhs_contracting_dim_(rhs_contracting_dim) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
int64_t lhs_contracting_dim_;
int64_t rhs_contracting_dim_;
};
class HloAsyncCopyMatcher : public HloMatcher {
public:
HloAsyncCopyMatcher(int64_t to_space, int64_t from_space,
::testing::Matcher<const HloInstruction*> operand)
: HloMatcher(HloOpcode::kCopyDone,
{::testing::MakeMatcher(
new HloMatcher(HloOpcode::kCopyStart, {operand}))}),
to_space_(to_space),
from_space_(from_space) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
int64_t to_space_;
int64_t from_space_;
};
class HloConstantMatcher : public HloMatcher {
public:
explicit HloConstantMatcher(Literal literal)
: HloMatcher(HloOpcode::kConstant, {}),
literal_(std::move(literal)) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
Literal literal_;
};
class HloReplicaGroupsMatcher
: public ::testing::MatcherInterface<const HloInstruction*> {
public:
explicit HloReplicaGroupsMatcher(
std::vector<std::vector<int64_t>> replica_groups)
: replica_groups_(std::move(replica_groups)) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
std::vector<std::vector<int64_t>> replica_groups_;
};
class HloSourceTargetPairsMatcher
: public ::testing::MatcherInterface<const HloInstruction*> {
public:
explicit HloSourceTargetPairsMatcher(
std::vector<std::pair<int64_t, int64_t>> source_target_pairs)
: source_target_pairs_(std::move(source_target_pairs)) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
std::vector<std::pair<int64_t, int64_t>> source_target_pairs_;
};
class HloMetadataMatcher
: public ::testing::MatcherInterface<const HloInstruction*> {
public:
explicit HloMetadataMatcher(OpMetadata metadata)
: metadata_(std::move(metadata)) {}
bool MatchAndExplain(const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override;
void DescribeTo(std::ostream* os) const override;
private:
OpMetadata metadata_;
};
namespace opcode_matchers {
#define HLO_MATCHER(opcode) \
template <typename... M> \
::testing::Matcher<const ::xla::HloInstruction*> opcode(M... operands) { \
return ::testing::MakeMatcher(new ::xla::testing::HloMatcher( \
::xla::HloOpcode::k##opcode, {operands...})); \
}
HLO_MATCHER(Abs);
HLO_MATCHER(Add);
HLO_MATCHER(AddDependency);
HLO_MATCHER(AfterAll);
HLO_MATCHER(AsyncStart);
HLO_MATCHER(AsyncUpdate);
HLO_MATCHER(AsyncDone);
HLO_MATCHER(AllGather);
HLO_MATCHER(AllGatherStart);
HLO_MATCHER(AllGatherDone);
HLO_MATCHER(AllReduce);
HLO_MATCHER(AllReduceStart);
HLO_MATCHER(AllReduceDone);
HLO_MATCHER(AllToAll);
HLO_MATCHER(And);
HLO_MATCHER(BatchNormGrad);
HLO_MATCHER(Bitcast);
HLO_MATCHER(BitcastConvert);
HLO_MATCHER(Broadcast);
HLO_MATCHER(Call);
HLO_MATCHER(Ceil);
HLO_MATCHER(Clamp);
HLO_MATCHER(CollectiveBroadcast);
HLO_MATCHER(CollectivePermute);
HLO_MATCHER(CollectivePermuteStart);
HLO_MATCHER(CollectivePermuteDone);
HLO_MATCHER(Compare);
HLO_MATCHER(Concatenate);
HLO_MATCHER(Conditional);
HLO_MATCHER(Convert);
HLO_MATCHER(Convolution);
HLO_MATCHER(Copy);
HLO_MATCHER(CopyDone);
HLO_MATCHER(CopyStart);
HLO_MATCHER(Divide);
HLO_MATCHER(Domain);
HLO_MATCHER(DynamicSlice);
HLO_MATCHER(DynamicUpdateSlice);
HLO_MATCHER(Erf);
HLO_MATCHER(Exp);
HLO_MATCHER(Fft);
HLO_MATCHER(Floor);
HLO_MATCHER(Fusion);
HLO_MATCHER(Gather);
HLO_MATCHER(GetDimensionSize);
HLO_MATCHER(Infeed);
HLO_MATCHER(Iota);
HLO_MATCHER(IsFinite);
HLO_MATCHER(Log);
HLO_MATCHER(Map);
HLO_MATCHER(Maximum);
HLO_MATCHER(Minimum);
HLO_MATCHER(Multiply);
HLO_MATCHER(Negate);
HLO_MATCHER(Not);
HLO_MATCHER(Or);
HLO_MATCHER(Outfeed);
HLO_MATCHER(Pad);
HLO_MATCHER(PartitionId);
HLO_MATCHER(Power);
HLO_MATCHER(Recv);
HLO_MATCHER(RecvDone);
HLO_MATCHER(Reduce);
HLO_MATCHER(ReducePrecision);
HLO_MATCHER(ReduceScatter);
HLO_MATCHER(ReduceWindow);
HLO_MATCHER(Remainder);
HLO_MATCHER(ReplicaId);
HLO_MATCHER(Reshape);
HLO_MATCHER(Reverse);
HLO_MATCHER(Rng);
HLO_MATCHER(RngBitGenerator);
HLO_MATCHER(RngGetAndUpdateState);
HLO_MATCHER(Scatter);
HLO_MATCHER(Select);
HLO_MATCHER(SelectAndScatter);
HLO_MATCHER(Send);
HLO_MATCHER(SendDone);
HLO_MATCHER(SetDimensionSize);
HLO_MATCHER(ShiftLeft);
HLO_MATCHER(ShiftRightArithmetic);
HLO_MATCHER(ShiftRightLogical);
HLO_MATCHER(Sign);
HLO_MATCHER(Slice);
HLO_MATCHER(Sort);
HLO_MATCHER(Subtract);
HLO_MATCHER(Tan);
HLO_MATCHER(Tanh);
HLO_MATCHER(Transpose);
HLO_MATCHER(Tuple);
HLO_MATCHER(While);
HLO_MATCHER(Xor);
HLO_MATCHER(OptimizationBarrier);
#define HLO_MATCHER_VECTOR_OPERANDS(opcode) \
template <> \
inline ::testing::Matcher<const ::xla::HloInstruction*> opcode( \
std::vector<::testing::Matcher<const HloInstruction*>> operands) { \
return ::testing::MakeMatcher(new ::xla::testing::HloMatcher( \
::xla::HloOpcode::k##opcode, operands)); \
}
HLO_MATCHER_VECTOR_OPERANDS(DynamicSlice);
inline ::testing::Matcher<const ::xla::HloInstruction*> Parameter(
int64_t parameter_number) {
return ::testing::MakeMatcher(
new ::xla::testing::HloParameterMatcher(parameter_number));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Parameter() {
return ::testing::MakeMatcher(
new ::xla::testing::HloMatcher(HloOpcode::kParameter, {}));
}
template <typename... M>
inline ::testing::Matcher<const ::xla::HloInstruction*> Eq(M... operands) {
return ::testing::MakeMatcher(new ::xla::testing::HloComparisonMatcher(
ComparisonDirection::kEq, {operands...}));
}
template <typename... M>
inline ::testing::Matcher<const ::xla::HloInstruction*> Ne(M... operands) {
return ::testing::MakeMatcher(new ::xla::testing::HloComparisonMatcher(
ComparisonDirection::kNe, {operands...}));
}
template <typename... M>
inline ::testing::Matcher<const ::xla::HloInstruction*> Ge(M... operands) {
return ::testing::MakeMatcher(new ::xla::testing::HloComparisonMatcher(
ComparisonDirection::kGe, {operands...}));
}
template <typename... M>
inline ::testing::Matcher<const ::xla::HloInstruction*> Gt(M... operands) {
return ::testing::MakeMatcher(new ::xla::testing::HloComparisonMatcher(
ComparisonDirection::kGt, {operands...}));
}
template <typename... M>
inline ::testing::Matcher<const ::xla::HloInstruction*> Le(M... operands) {
return ::testing::MakeMatcher(new ::xla::testing::HloComparisonMatcher(
ComparisonDirection::kLe, {operands...}));
}
template <typename... M>
inline ::testing::Matcher<const ::xla::HloInstruction*> Lt(M... operands) {
return ::testing::MakeMatcher(new ::xla::testing::HloComparisonMatcher(
ComparisonDirection::kLt, {operands...}));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> GetTupleElement(
::testing::Matcher<const HloInstruction*> operand, int64_t tuple_index) {
return ::testing::MakeMatcher(
new ::xla::testing::HloGetTupleElementMatcher(operand, tuple_index));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> GetTupleElement(
::testing::Matcher<const HloInstruction*> operand) {
return ::testing::MakeMatcher(
new ::xla::testing::HloMatcher(HloOpcode::kGetTupleElement, {operand}));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> GetTupleElement() {
return ::testing::MakeMatcher(
new ::xla::testing::HloMatcher(HloOpcode::kGetTupleElement, {}));
}
template <typename... M>
inline ::testing::Matcher<const ::xla::HloInstruction*> CustomCall(
::testing::Matcher<std::string> call_target_matcher, M... operands) {
return ::testing::MakeMatcher(new ::xla::testing::HloCustomCallMatcher(
call_target_matcher, {operands...}));
}
template <
typename FirstM, typename... M,
typename Dummy = typename std::enable_if<
!std::is_convertible<FirstM, ::testing::Matcher<std::string>>::value,
void>::type*>
inline ::testing::Matcher<const ::xla::HloInstruction*> CustomCall(
FirstM operands_first, M... operands_rest) {
return ::testing::MakeMatcher(new ::xla::testing::HloMatcher(
HloOpcode::kCustomCall, {operands_first, operands_rest...}));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> CustomCall() {
return ::testing::MakeMatcher(
new ::xla::testing::HloMatcher(HloOpcode::kCustomCall, {}));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Shape(
const class Shape& shape) {
return ::testing::MakeMatcher(new ::xla::testing::HloShapeMatcher(shape));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Shape(
absl::string_view shape) {
return ::testing::MakeMatcher(
new ::xla::testing::HloShapeMatcher(ParseShape(shape).value()));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> ShapeWithLayout(
const class Shape& shape) {
return ::testing::MakeMatcher(
new ::xla::testing::HloShapeAndLayoutMatcher(shape));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> ShapeWithLayout(
absl::string_view shape, bool minor_to_major_only = false) {
return ::testing::MakeMatcher(new ::xla::testing::HloShapeAndLayoutMatcher(
ParseShape(shape).value(), minor_to_major_only));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Sharding(
const HloSharding& sharding) {
return ::testing::MakeMatcher(
new ::xla::testing::HloShardingMatcher(sharding));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Sharding(
absl::string_view sharding) {
return ::testing::MakeMatcher(
new ::xla::testing::HloShardingMatcher(ParseSharding(sharding).value()));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> NoSharding() {
return ::testing::MakeMatcher(
new ::xla::testing::HloShardingMatcher(std::nullopt));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Dot() {
return ::testing::MakeMatcher(
new ::xla::testing::HloMatcher(::xla::HloOpcode::kDot, {}));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Dot(
::testing::Matcher<const HloInstruction*> lhs_matcher,
::testing::Matcher<const HloInstruction*> rhs_matcher) {
return ::testing::MakeMatcher(new ::xla::testing::HloMatcher(
::xla::HloOpcode::kDot, {lhs_matcher, rhs_matcher}));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Dot(
::testing::Matcher<const HloInstruction*> lhs_matcher,
::testing::Matcher<const HloInstruction*> rhs_matcher,
int64_t lhs_contracting_dim, int64_t rhs_contracting_dim) {
return ::testing::MakeMatcher(
new ::xla::testing::HloDotWithContractingDimsMatcher(
lhs_matcher, rhs_matcher, lhs_contracting_dim, rhs_contracting_dim));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> AsyncCopy(
int64_t to_space, int64_t from_space,
::testing::Matcher<const HloInstruction*> operand_matcher) {
return ::testing::MakeMatcher(new ::xla::testing::HloAsyncCopyMatcher(
to_space, from_space, operand_matcher));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Constant() {
return ::testing::MakeMatcher(
new ::xla::testing::HloMatcher(HloOpcode::kConstant, {}));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Constant(
Literal value) {
return ::testing::MakeMatcher(
new ::xla::testing::HloConstantMatcher(std::move(value)));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> ReplicaGroups(
std::vector<std::vector<int64_t>> replica_groups) {
return ::testing::MakeMatcher(
new ::xla::testing::HloReplicaGroupsMatcher(std::move(replica_groups)));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> SourceTargetPairs(
std::vector<std::pair<int64_t, int64_t>> source_target_pairs) {
return ::testing::MakeMatcher(new ::xla::testing::HloSourceTargetPairsMatcher(
std::move(source_target_pairs)));
}
inline ::testing::Matcher<const ::xla::HloInstruction*> Metadata(
OpMetadata metadata) {
return ::testing::MakeMatcher(
new ::xla::testing::HloMetadataMatcher(std::move(metadata)));
}
#undef HLO_MATCHER
}
template <typename Container>
std::vector<const HloInstruction*> Pointers(const Container& container) {
std::vector<const HloInstruction*> result;
result.reserve(container.size());
for (const auto& entry : container) result.push_back(entry.get());
return result;
}
}
void PrintTo(const HloInstruction* inst, ::std::ostream* os);
}
#endif
#include "xla/hlo/utils/hlo_matchers.h"
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
namespace xla {
namespace testing {
bool HloMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (!instruction) {
return false;
}
*listener << "(" << instruction->ToString() << ")";
if (instruction->opcode() != opcode_) {
return false;
}
if (operands_.empty()) {
return true;
}
const auto& operands = instruction->operands();
if (operands.size() != operands_.size()) {
*listener << " has too "
<< (operands.size() > operands_.size() ? "many" : "few")
<< " operands (got " << operands.size() << ", want "
<< operands_.size() << ")";
return false;
}
for (int index = 0; index < operands.size(); index++) {
::testing::StringMatchResultListener inner_listener;
if (!operands_[index].MatchAndExplain(operands[index], &inner_listener)) {
if (listener->IsInterested()) {
*listener << "\noperand " << index << ":\n\t"
<< operands[index]->ToString()
<< "\ndoesn't match expected:\n\t";
operands_[index].DescribeTo(listener->stream());
std::string explanation = inner_listener.str();
if (!explanation.empty()) {
*listener << ", " << explanation;
}
}
return false;
}
}
return true;
}
void HloMatcher::DescribeTo(::std::ostream* os) const {
*os << opcode_;
if (!operands_.empty()) {
*os << "(";
for (int i = 0; i < operands_.size(); i++) {
if (i > 0) {
*os << ", ";
}
operands_[i].DescribeTo(os);
}
*os << ")";
}
}
bool HloParameterMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (!HloMatcher::MatchAndExplain(instruction, listener)) {
return false;
}
if (instruction->parameter_number() != parameter_number_) {
*listener << " has wrong parameter number (got "
<< instruction->parameter_number() << ", want "
<< parameter_number_ << ")";
return false;
}
return true;
}
bool HloComparisonMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (!HloMatcher::MatchAndExplain(instruction, listener)) {
return false;
}
if (instruction->comparison_direction() != direction_) {
*listener << " has wrong comparison direction (got "
<< ComparisonDirectionToString(
instruction->comparison_direction())
<< ", want " << ComparisonDirectionToString(direction_) << ")";
return false;
}
return true;
}
bool HloGetTupleElementMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (!HloMatcher::MatchAndExplain(instruction, listener)) {
return false;
}
if (instruction->tuple_index() != tuple_index_) {
*listener << " has wrong tuple index (got " << instruction->tuple_index()
<< ", want " << tuple_index_ << ")";
return false;
}
return true;
}
void HloCustomCallMatcher::DescribeTo(std::ostream* os) const {
HloMatcher::DescribeTo(os);
*os << " with call target that ";
call_target_matcher_.DescribeTo(os);
}
bool HloCustomCallMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (!HloMatcher::MatchAndExplain(instruction, listener)) {
return false;
}
::testing::StringMatchResultListener sub_listener;
bool result = ExplainMatchResult(
call_target_matcher_, instruction->custom_call_target(), &sub_listener);
if (sub_listener.str().empty()) {
sub_listener << " that ";
std::stringstream desc_stream;
if (result) {
call_target_matcher_.DescribeTo(&desc_stream);
} else {
call_target_matcher_.DescribeNegationTo(&desc_stream);
}
sub_listener << desc_stream.str();
}
*listener << " custom-call with call target" << sub_listener.str();
return result;
}
bool HloShapeMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (ShapeUtil::Compatible(instruction->shape(), shape_)) {
return true;
}
*listener << instruction->ToString() << " has incorrect shape (expected: "
<< ShapeUtil::HumanString(shape_) << ")";
return false;
}
void HloShapeMatcher::DescribeTo(std::ostream* os) const {
*os << ShapeUtil::HumanString(shape_);
}
bool HloShapeAndLayoutMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
auto compare = Shape::Equal();
if (minor_to_major_only_) {
compare.MinorToMajorOnlyInLayout();
}
if (compare(instruction->shape(), shape_)) {
return true;
}
*listener << instruction->ToString() << " has incorrect shape (expected: "
<< ShapeUtil::HumanStringWithLayout(shape_) << ")";
return false;
}
void HloShapeAndLayoutMatcher::DescribeTo(std::ostream* os) const {
*os << ShapeUtil::HumanStringWithLayout(shape_);
}
bool HloShardingMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (!sharding_.has_value()) {
if (!instruction->has_sharding()) {
return true;
}
*listener << instruction->ToString() << " expected to have no sharding.";
return false;
}
if (instruction->has_sharding()) {
if (instruction->sharding() == sharding_.value()) {
return true;
}
*listener << instruction->ToString()
<< " has incorrect sharding (expected: " << sharding_->ToString()
<< ")";
return false;
} else {
*listener << instruction->ToString()
<< " has no sharding (expected: " << sharding_->ToString() << ")";
return false;
}
}
void HloShardingMatcher::DescribeTo(std::ostream* os) const {
if (sharding_.has_value()) {
*os << sharding_->ToString();
} else {
*os << "<no-sharding>";
}
}
bool HloDotWithContractingDimsMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (!HloMatcher::MatchAndExplain(instruction, listener)) {
return false;
}
const DotDimensionNumbers& dim_nums = instruction->dot_dimension_numbers();
if (dim_nums.lhs_contracting_dimensions_size() != 1 ||
dim_nums.lhs_contracting_dimensions(0) != lhs_contracting_dim_) {
*listener << " has wrong lhs_contracting_dimensions (got {"
<< absl::StrJoin(dim_nums.lhs_contracting_dimensions(), ",")
<< "} want {" << lhs_contracting_dim_ << "})";
return false;
}
if (dim_nums.rhs_contracting_dimensions_size() != 1 ||
dim_nums.rhs_contracting_dimensions(0) != rhs_contracting_dim_) {
*listener << " has wrong rhs_contracting_dimensions (got {"
<< absl::StrJoin(dim_nums.rhs_contracting_dimensions(), ",")
<< "} want {" << rhs_contracting_dim_ << "})";
return false;
}
return true;
}
void HloDotWithContractingDimsMatcher::DescribeTo(std::ostream* os) const {
HloMatcher::DescribeTo(os);
*os << " with lhs_contracting_dims={" << lhs_contracting_dim_
<< "} and rhs_contracting_dims={" << rhs_contracting_dim_ << "}";
}
bool HloAsyncCopyMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (!HloMatcher::MatchAndExplain(instruction, listener)) {
return false;
}
const HloInstruction* copy_done = instruction;
if (!copy_done->shape().has_layout()) {
*listener << " does not have layout, expected a layout with memory space "
<< to_space_;
return false;
}
if (copy_done->shape().layout().memory_space() != to_space_) {
*listener << " copies to memory space "
<< copy_done->shape().layout().memory_space() << ", expected "
<< to_space_;
return false;
}
const HloInstruction* copy_start_operand =
copy_done->operands()[0]->operands()[0];
if (!copy_start_operand->shape().has_layout()) {
*listener << copy_start_operand->ToString()
<< " does not have layout, expected a layout with memory space "
<< from_space_;
return false;
}
if (copy_start_operand->shape().layout().memory_space() != from_space_) {
*listener << " is in the memory space "
<< copy_start_operand->shape().layout().memory_space()
<< ", expected " << from_space_;
return false;
}
return true;
}
void HloAsyncCopyMatcher::DescribeTo(std::ostream* os) const {
HloMatcher::DescribeTo(os);
*os << " (copy from memory space " << from_space_ << " to " << to_space_
<< ")";
}
bool HloConstantMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
if (!HloMatcher::MatchAndExplain(instruction, listener)) {
return false;
}
if (instruction->literal() != literal_) {
*listener << " has wrong value (got " << instruction->literal().ToString()
<< ", want " << literal_.ToString() << ")";
return false;
}
return true;
}
void HloConstantMatcher::DescribeTo(std::ostream* os) const {
HloMatcher::DescribeTo(os);
*os << " (has value " << literal_.ToString() << ")";
}
bool HloReplicaGroupsMatcher::MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const {
const HloCollectiveInstruction* collective =
DynCast<HloCollectiveInstruction>(instruction);
if (!collective) {
*listener << instru | #include "xla/hlo/utils/hlo_matchers.h"
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
using ::testing::Eq;
using ::testing::HasSubstr;
namespace xla {
namespace {
using HloMatchersTest = HloTestBase;
std::string DescribeHloMatcher(
const ::testing::Matcher<const HloInstruction*>& m) {
std::stringstream ss;
m.DescribeTo(&ss);
return ss.str();
}
template <typename M, typename T>
std::string Explain(const T& t, const M& m) {
::testing::StringMatchResultListener listener;
EXPECT_THAT(t, ::testing::Not(m));
EXPECT_FALSE(m.MatchAndExplain(t, &listener));
return listener.str();
}
TEST_F(HloMatchersTest, Test) {
auto shape = ShapeUtil::MakeShape(F32, {1});
auto param = HloInstruction::CreateParameter(0, shape, "param");
auto mul = HloInstruction::CreateBinary(shape, HloOpcode::kMultiply,
param.get(), param.get());
auto add = HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param.get(),
mul.get());
EXPECT_THAT(add.get(), op::Add());
EXPECT_THAT(add.get(), op::Add(op::Parameter(), op::Multiply()));
EXPECT_THAT(add.get(),
op::Add(op::Parameter(), op::Multiply(_, op::Parameter())));
EXPECT_THAT(
Explain(add.get(), op::Parameter()),
Eq("(%add = f32[1]{0} add(f32[1]{0} %param, f32[1]{0} %multiply))"));
EXPECT_THAT(
Explain(add.get(), op::Add(op::Parameter())),
Eq("(%add = f32[1]{0} add(f32[1]{0} %param, f32[1]{0} %multiply)) "
"has too many operands (got 2, want 1)"));
EXPECT_THAT(
Explain(add.get(), op::Add(op::Parameter(), op::Parameter())),
Eq("(%add = f32[1]{0} add(f32[1]{0} %param, f32[1]{0} %multiply))"
"\noperand 1:\n\t"
"%multiply = f32[1]{0} multiply(f32[1]{0} %param, f32[1]{0} %param)\n"
"doesn't match expected:\n\t"
"parameter"
", (%multiply = f32[1]{0} multiply(f32[1]{0} %param, f32[1]{0} "
"%param))"));
EXPECT_THAT(
Explain(add.get(),
op::Add(op::Parameter(), op::Multiply(op::Add(), op::Add()))),
Eq("(%add = f32[1]{0} add(f32[1]{0} %param, f32[1]{0} %multiply))"
"\noperand 1:\n\t"
"%multiply = f32[1]{0} multiply(f32[1]{0} %param, f32[1]{0} %param)\n"
"doesn't match expected:\n\t"
"multiply(add, add)"
", (%multiply = f32[1]{0} multiply(f32[1]{0} %param, f32[1]{0} "
"%param))\n"
"operand 0:\n\t"
"%param = f32[1]{0} parameter(0)\n"
"doesn't match expected:\n\t"
"add, (%param = f32[1]{0} parameter(0))"));
}
TEST_F(HloMatchersTest, CustomCallMatcher) {
auto c1 =
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1, 2, 3}));
auto c2 =
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32_t>({1, 2, 3}));
auto call = HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {1}), {c1.get(), c2.get()}, "foo_target");
EXPECT_THAT(call.get(), op::CustomCall());
EXPECT_THAT(call.get(), op::CustomCall(c1.get(), c2.get()));
EXPECT_THAT(call.get(), op::CustomCall("foo_target"));
EXPECT_THAT(call.get(), op::CustomCall("foo_target", c1.get(), c2.get()));
EXPECT_THAT(call.get(), op::CustomCall(::testing::StartsWith("foo")));
EXPECT_THAT(call.get(),
op::CustomCall(::testing::Not(::testing::StartsWith("bar"))));
EXPECT_THAT(call.get(), ::testing::Not(op::CustomCall(c1.get())));
EXPECT_THAT(call.get(),
::testing::Not(op::CustomCall(::testing::StartsWith("bar"))));
EXPECT_THAT(Explain(call.get(), op::CustomCall("bar")),
"(%custom-call = f32[1]{0} custom-call(f32[3]{0} %constant, "
"s32[3]{0} %constant), custom_call_target=\"foo_target\") "
"custom-call with call target that isn't equal to \"bar\"");
EXPECT_THAT(DescribeHloMatcher(op::CustomCall("foo_target")),
R"(custom-call with call target that is equal to "foo_target")");
}
TEST_F(HloMatchersTest, ShapeMatcher) {
auto p0 = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 7}, {0, 1}), "param");
EXPECT_THAT(p0.get(), op::Shape(ShapeUtil::MakeShape(F32, {5, 7})));
EXPECT_THAT(p0.get(), op::Shape("f32[5,7]"));
EXPECT_THAT(
p0.get(),
::testing::Not(op::ShapeWithLayout(ShapeUtil::MakeShape(F32, {5, 7}))));
EXPECT_THAT(p0.get(), ::testing::Not(op::ShapeWithLayout("f32[5,7]")));
EXPECT_THAT(p0.get(),
::testing::Not(op::Shape(ShapeUtil::MakeShape(F32, {7, 5}))));
EXPECT_THAT(p0.get(), ::testing::Not(op::Shape("f32[7,5]")));
EXPECT_THAT(
p0.get(),
::testing::Not(op::ShapeWithLayout(ShapeUtil::MakeShape(F32, {7, 5}))));
EXPECT_THAT(p0.get(), ::testing::Not(op::ShapeWithLayout("f32[7,5]")));
EXPECT_THAT(p0.get(), op::Shape(ShapeUtil::MakeShapeWithDenseLayout(
F32, {5, 7}, {0, 1})));
EXPECT_THAT(p0.get(), op::Shape("f32[5,7]{0,1}"));
EXPECT_THAT(p0.get(), op::ShapeWithLayout(ShapeUtil::MakeShapeWithDenseLayout(
F32, {5, 7}, {0, 1})));
EXPECT_THAT(p0.get(), op::ShapeWithLayout("f32[5,7]{0,1}"));
EXPECT_THAT(p0.get(),
::testing::Not(op::ShapeWithLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 7}, {1, 0}))));
EXPECT_THAT(p0.get(), ::testing::Not(op::ShapeWithLayout("f32[5,7]{1,0}")));
EXPECT_THAT(Explain(p0.get(), op::Shape(ShapeUtil::MakeShape(F32, {7, 5}))),
"%param = f32[5,7]{0,1} parameter(0) has incorrect shape "
"(expected: f32[7,5])");
EXPECT_THAT(
Explain(p0.get(), op::ShapeWithLayout(ShapeUtil::MakeShapeWithDenseLayout(
F32, {7, 5}, {1, 0}))),
"%param = f32[5,7]{0,1} parameter(0) has incorrect shape "
"(expected: f32[7,5]{1,0})");
}
TEST_F(HloMatchersTest, ShardingMatcher) {
auto p0 = HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {5}),
"param.0");
p0->clear_sharding();
auto p1 = HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {7}),
"param.1");
p1->set_sharding(HloSharding::AssignDevice(1));
auto tuple_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {7}), ShapeUtil::MakeShape(S32, {9}),
ShapeUtil::MakeShape(F32, {11})});
auto p2 = HloInstruction::CreateParameter(1, tuple_shape, "param.2");
Array<int64_t> assignment({2});
assignment.SetValues({0, 1});
auto sharding = HloSharding::Tuple(
tuple_shape, {HloSharding::Tile(assignment), HloSharding::AssignDevice(1),
HloSharding::Replicate()});
p2->set_sharding(sharding);
EXPECT_THAT(p0.get(), op::NoSharding());
EXPECT_THAT(p0.get(),
::testing::Not(op::Sharding(HloSharding::AssignDevice(1))));
EXPECT_THAT(p1.get(), ::testing::Not(op::NoSharding()));
EXPECT_THAT(p1.get(),
::testing::Not(op::Sharding(HloSharding::AssignDevice(0))));
EXPECT_THAT(p1.get(), op::Sharding(HloSharding::AssignDevice(1)));
EXPECT_THAT(
p2.get(),
op::Sharding("{{devices=[2]0,1}, {maximal device=1}, {replicated}}"));
EXPECT_THAT(Explain(p0.get(), op::Sharding(HloSharding::AssignDevice(1))),
"%param.0 = f32[5]{0} parameter(0) has no sharding (expected: "
"{maximal device=1})");
EXPECT_THAT(Explain(p1.get(), op::NoSharding()),
"%param.1 = f32[7]{0} parameter(1), sharding={maximal device=1} "
"expected to have no sharding.");
EXPECT_THAT(Explain(p1.get(), op::Sharding(HloSharding::AssignDevice(0))),
"%param.1 = f32[7]{0} parameter(1), sharding={maximal device=1} "
"has incorrect sharding (expected: {maximal device=0})");
}
TEST_F(HloMatchersTest, DotMatcher) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[256,1024] parameter(1)
ROOT dot = f32[1,1024] dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Dot(op::Parameter(0), op::Parameter(1),
1,
0));
EXPECT_THAT(
Explain(root, op::Dot(op::Parameter(0), op::Parameter(1),
0,
0)),
"(%dot = f32[1,1024]{1,0} dot(f32[1,256]{1,0} %arg0, f32[256,1024]{1,0} "
"%arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}) has wrong "
"lhs_contracting_dimensions (got {1} want {0})");
EXPECT_THAT(
Explain(root, op::Dot(op::Parameter(0), op::Parameter(1),
1,
1)),
"(%dot = f32[1,1024]{1,0} dot(f32[1,256]{1,0} %arg0, f32[256,1024]{1,0} "
"%arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}) has wrong "
"rhs_contracting_dimensions (got {0} want {1})");
}
TEST_F(HloMatchersTest, ComparisonMatcher) {
auto shape = ShapeUtil::MakeShape(F32, {1});
auto p0 = HloInstruction::CreateParameter(0, shape, "param.0");
auto p1 = HloInstruction::CreateParameter(1, shape, "param.1");
auto eq = HloInstruction::CreateCompare(shape, p0.get(), p1.get(),
ComparisonDirection::kEq);
auto ne = HloInstruction::CreateCompare(shape, p0.get(), p1.get(),
ComparisonDirection::kNe);
auto add =
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0.get(), p1.get());
auto le = HloInstruction::CreateCompare(shape, p0.get(), add.get(),
ComparisonDirection::kLe);
EXPECT_THAT(eq.get(), op::Compare());
EXPECT_THAT(eq.get(), op::Eq());
EXPECT_THAT(ne.get(), op::Compare());
EXPECT_THAT(ne.get(), op::Ne());
EXPECT_THAT(le.get(),
op::Compare(op::Parameter(0),
op::Add(op::Parameter(0), op::Parameter(1))));
EXPECT_THAT(le.get(), op::Le(op::Parameter(0),
op::Add(op::Parameter(0), op::Parameter(1))));
EXPECT_THAT(Explain(eq.get(), op::Add()),
Eq("(%compare = f32[1]{0} compare(f32[1]{0} %param.0, "
"f32[1]{0} %param.1), direction=EQ)"));
EXPECT_THAT(Explain(eq.get(), op::Ne()),
Eq("(%compare = f32[1]{0} compare(f32[1]{0} %param.0, "
"f32[1]{0} %param.1), direction=EQ) "
"has wrong comparison direction (got EQ, want NE)"));
}
TEST_F(HloMatchersTest, AsyncCopyMatcher) {
Shape shape_memspace1 = ShapeUtil::MakeShapeWithDenseLayout(
F32, {16}, {0}, {},
1,
0,
1);
Shape shape_memspace2 = ShapeUtil::MakeShapeWithDenseLayout(
F32, {16}, {0}, {},
1,
0,
2);
auto p0 = HloInstruction::CreateParameter(0, shape_memspace1, "p0");
auto copy_start = HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape(
{shape_memspace2, shape_memspace1, ShapeUtil::MakeShape(U32, {})}),
p0.get());
auto copy_done = HloInstruction::CreateUnary(
shape_memspace2, HloOpcode::kCopyDone, copy_start.get());
EXPECT_THAT(copy_done.get(), op::AsyncCopy(2, 1, op::Parameter(0)));
EXPECT_THAT(Explain(copy_start.get(), op::AsyncCopy(2, 1, op::Parameter(0))),
Eq("(%copy-start = (f32[16]{0:S(2)}, f32[16]{0:S(1)}, u32[]) "
"copy-start(f32[16]{0:S(1)} %p0))"));
EXPECT_THAT(Explain(copy_done.get(), op::AsyncCopy(3, 1, op::Parameter(0))),
"(%copy-done = f32[16]{0:S(2)} copy-done((f32[16]{0:S(2)}, "
"f32[16]{0:S(1)}, u32[]) "
"%copy-start)) "
"copies to memory space 2, expected 3");
EXPECT_THAT(Explain(copy_done.get(), op::AsyncCopy(2, 3, op::Parameter(0))),
"(%copy-done = f32[16]{0:S(2)} copy-done((f32[16]{0:S(2)}, "
"f32[16]{0:S(1)}, u32[]) "
"%copy-start)) "
"is in the memory space 1, expected 3");
}
TEST_F(HloMatchersTest, ConstantMatcher) {
std::string hlo_string = R"(
HloModule Constant
ENTRY main {
ROOT x = u32[2] constant({1, 2})
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Constant());
EXPECT_THAT(root, op::Constant(LiteralUtil::CreateR1<uint32_t>({1, 2})));
EXPECT_THAT(root, ::testing::Not(
op::Constant(LiteralUtil::CreateR1<uint32_t>({1, 1}))));
EXPECT_THAT(Explain(root, op::Constant(LiteralUtil::CreateR0<uint32_t>(1))),
"(%x = u32[2]{0} constant({1, 2})) has wrong value (got u32[2] "
"{1, 2}, want u32[] 1)");
}
TEST_F(HloMatchersTest, ReplicaGroupsMatcher) {
Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
std::unique_ptr<HloInstruction> p0 =
HloInstruction::CreateParameter(0, shape, "param");
std::vector<ReplicaGroup> replica_groups(2);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(2);
replica_groups[1].add_replica_ids(1);
replica_groups[1].add_replica_ids(3);
std::unique_ptr<HloInstruction> all_to_all = HloInstruction::CreateAllToAll(
shape, {p0.get()}, CollectiveDeviceList(replica_groups),
false,
std::nullopt);
EXPECT_THAT(Explain(p0.get(), op::ReplicaGroups({})),
"%param = f32[5,7]{1,0} parameter(0) not a collective op");
EXPECT_THAT(Explain(all_to_all.get(), op::ReplicaGroups({{0, 1}, {2, 3}})),
"%all-to-all = f32[5,7]{1,0} all-to-all(f32[5,7]{1,0} %param), "
"replica_groups={{0,2},{1,3}} has incorrect replica_groups "
"(expected: {{0,1},{2,3}})");
EXPECT_THAT(all_to_all.get(), op::ReplicaGroups({{0, 2}, {1, 3}}));
}
TEST_F(HloMatchersTest, SourceTargetPairsMatcher) {
Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
std::unique_ptr<HloInstruction> p0 =
HloInstruction::CreateParameter(0, shape, "param");
std::vector<std::pair<int64_t, int64_t>> source_target_pairs = {
{0, 1}, {2, 3}, {1, 2}};
std::unique_ptr<HloInstruction> cp = HloInstruction::CreateCollectivePermute(
shape, p0.get(), source_target_pairs, std::nullopt);
EXPECT_THAT(Explain(p0.get(), op::SourceTargetPairs({{0, 1}})),
HasSubstr("not a collective permute"));
EXPECT_THAT(Explain(cp.get(), op::SourceTargetPairs({{0, 1}, {2, 3}})),
HasSubstr("source_target_pairs (expected: {{0,1},{2,3}}"));
EXPECT_THAT(cp.get(), op::SourceTargetPairs({{0, 1}, {2, 3}, {1, 2}}));
}
TEST_F(HloMatchersTest, MetadataMatcher) {
Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
std::unique_ptr<HloInstruction> p0 =
HloInstruction::CreateParameter(0, shape, "param");
OpMetadata metadata;
metadata.set_op_type("op_type1");
metadata.set_op_name("op_name1");
p0->set_metadata(metadata);
OpMetadata actual_opname;
actual_opname.set_op_type("op_type1");
actual_opname.set_op_name("op_name2");
OpMetadata actual_source_file;
actual_source_file.set_op_type("op_type1");
actual_source_file.set_op_name("op_name1");
actual_source_file.set_source_file("source_file");
OpMetadata actual_optype;
actual_optype.set_op_type("op_type2");
actual_optype.set_op_name("op_name1");
OpMetadata actual_source_line;
actual_source_line.set_op_type("op_type1");
actual_source_line.set_op_name("op_name1");
actual_source_line.set_source_line(1);
EXPECT_THAT(Explain(p0.get(), op::Metadata(actual_opname)),
HasSubstr("has wrong metadata (got op_name1, want op_name2)"));
EXPECT_THAT(Explain(p0.get(), op::Metadata(actual_source_file)),
HasSubstr("has wrong metadata (got "
", want source_file)"));
EXPECT_THAT(Explain(p0.get(), op::Metadata(actual_optype)),
HasSubstr("has wrong metadata (got"
" op_type1, want op_type2)"));
EXPECT_THAT(Explain(p0.get(), op::Metadata(actual_source_line)),
HasSubstr("has wrong metadata (got 0"
", want 1)"));
EXPECT_THAT(DescribeHloMatcher(op::Metadata(p0->metadata())),
R"( (metadata: op_type1 op_name1 0))");
}
}
} |
1,172 | cpp | tensorflow/tensorflow | custom_call | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/custom_call.cc | third_party/xla/xla/service/gpu/custom_call_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_STABLEHLO_TRANSFORMS_LEGALIZE_HLO_CONVERSIONS_CUSTOM_CALL_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_STABLEHLO_TRANSFORMS_LEGALIZE_HLO_CONVERSIONS_CUSTOM_CALL_H_
#include <optional>
#include "mlir/Transforms/DialectConversion.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace odml {
class ConvertCustomCallOp : public OpConversionPattern<mhlo::CustomCallOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::CustomCallOp mhlo_custom_call, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
std::optional<bool> IsCustomCallLegal(mhlo::CustomCallOp op);
}
}
#endif
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/custom_call.h"
#include <optional>
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace odml {
LogicalResult ConvertCustomCallOp::matchAndRewrite(
mhlo::CustomCallOp mhlo_custom_call, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto tfl_custom = rewriter.create<TFL::CustomOp>(
mhlo_custom_call.getLoc(), mhlo_custom_call.getResultTypes(),
mhlo_custom_call.getInputs());
tfl_custom.setCustomCodeAttr(
rewriter.getStringAttr(mhlo_custom_call.getCallTargetName()));
if (auto bc = mhlo_custom_call.getBackendConfig()) {
if (auto stringattr = mlir::dyn_cast_or_null<mlir::StringAttr>(*bc)) {
tfl_custom.setCustomOptionAttr(
TFL::ConstBytesAttr::get(rewriter.getContext(), stringattr));
}
} else {
tfl_custom.setCustomOptionAttr(
TFL::ConstBytesAttr::get(rewriter.getContext(), ""));
}
rewriter.replaceOp(mhlo_custom_call, tfl_custom);
return success();
}
std::optional<bool> IsCustomCallLegal(mhlo::CustomCallOp op) {
if (op.getCallTargetName().starts_with("custom_call.")) {
auto bc = op.getBackendConfig();
if (!bc || mlir::isa<mlir::StringAttr>(*bc)) {
return false;
}
}
return true;
}
}
} | #include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <ostream>
#include <tuple>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/custom_call_status.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/tests/test_utils.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace {
void R0F32Add2(float* out, float** in) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(in, sizeof(float*));
*out = **in + 2.0f;
}
void R0F32Add2InPlace(float* out, float** in) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(in, sizeof(float*));
**in = **in + 2.0f;
}
void R2F32ReduceSum(float* out, float** in) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(in, sizeof(float) * 4);
float* array = in[0];
*out = array[0] + array[1] + array[2] + array[3];
}
void Add1ToValues(float* out, float** in) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(in, sizeof(float) * 4);
float* array = in[0];
out[0] = array[0] + 1;
out[1] = array[1] + 1;
out[2] = array[2] + 1;
out[3] = array[3] + 1;
}
void F32TupleSwap(float** out, float** in) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(in[0], sizeof(float));
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(in[1], sizeof(float));
*out[0] = *in[1];
*out[1] = *in[0];
}
void R0F32Add2Succeed(float* out, float** in, XlaCustomCallStatus*) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(in, sizeof(float*));
*out = **in + 2.0f;
}
void CustomCallFail(float*, float** in, XlaCustomCallStatus* status) {
auto msg = absl::StrFormat("Failed: %.1f", in[0][0]);
XlaCustomCallStatusSetFailure(status, msg.data(), msg.length());
}
void CustomCallFailWithBackendConfigStr(float*, float**, const char* opaque,
size_t opaque_len,
XlaCustomCallStatus* status) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(opaque, opaque_len);
auto msg = absl::StrFormat("Fail with raw backend config str: %s.",
absl::string_view(opaque, opaque_len));
XlaCustomCallStatusSetFailure(status, msg.data(), msg.length());
}
XLA_CPU_REGISTER_CUSTOM_CALL_TARGET(R0F32Add2);
XLA_CPU_REGISTER_CUSTOM_CALL_TARGET(R0F32Add2InPlace);
XLA_CPU_REGISTER_CUSTOM_CALL_TARGET(R2F32ReduceSum);
XLA_CPU_REGISTER_CUSTOM_CALL_TARGET(Add1ToValues);
XLA_CPU_REGISTER_CUSTOM_CALL_TARGET(F32TupleSwap);
XLA_CPU_REGISTER_CUSTOM_CALL_TARGET(R0F32Add2Succeed);
XLA_CPU_REGISTER_CUSTOM_CALL_TARGET(CustomCallFail);
XLA_CPU_REGISTER_CUSTOM_CALL_TARGET(CustomCallFailWithBackendConfigStr);
enum class BinaryOp : int8_t { kAdd, kMul };
enum class InitMethod : int { kZero, kOne };
std::ostream& operator<<(std::ostream& os, BinaryOp op) {
switch (op) {
case BinaryOp::kAdd:
return os << "add";
case BinaryOp::kMul:
return os << "mul";
}
}
std::ostream& operator<<(std::ostream& os, InitMethod op) {
switch (op) {
case InitMethod::kZero:
return os << "zero";
case InitMethod::kOne:
return os << "one";
}
}
}
XLA_FFI_REGISTER_ENUM_ATTR_DECODING(BinaryOp);
XLA_FFI_REGISTER_ENUM_ATTR_DECODING(InitMethod);
namespace xla {
namespace {
using ::testing::HasSubstr;
class CustomCallTest : public HloTestBase {
protected:
Shape r0f32_ = ShapeUtil::MakeShape(F32, {});
Shape r2f32_ = ShapeUtil::MakeShape(F32, {2, 2});
};
XLA_TEST_F(CustomCallTest, CustomCallR0F32Add2) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(
HloInstruction::CreateCustomCall(r0f32_, {constant}, "R0F32Add2"));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {}));
LiteralTestUtil::ExpectR0Near<float>(44.0f, result, error_spec_);
}
XLA_TEST_F(CustomCallTest, CustomCallR0F32Add2Aliased) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder
.AddInstruction(HloInstruction::CreateCustomCall(r0f32_, {constant},
"R0F32Add2InPlace"))
->set_output_to_operand_aliasing({{{}, {0, {}}}});
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {}));
LiteralTestUtil::ExpectR0Near<float>(44.0f, result, error_spec_);
}
XLA_TEST_F(CustomCallTest, CustomCallR2F32Reduce) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Array2D<float> array(2, 2);
array(0, 0) = 1.0f;
array(0, 1) = 2.0f;
array(1, 0) = 3.0f;
array(1, 1) = 4.0f;
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2FromArray2D(array)));
builder.AddInstruction(
HloInstruction::CreateCustomCall(r0f32_, {constant}, "R2F32ReduceSum"));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {}));
LiteralTestUtil::ExpectR0Near<float>(10.0f, result, error_spec_);
}
XLA_TEST_F(CustomCallTest, UsedInOtherComputations) {
auto module = CreateNewVerifiedModule();
auto b = HloComputation::Builder(TestName());
auto input = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2FromArray2D(
Array2D<float>{{1.0f, 2.0f}, {3.0f, 4.0f}})));
auto incremented = b.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {1, 2, 2}), {input}, "Add1ToValues"));
auto incremented_again = b.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {1, 2, 2}), {incremented}, "Add1ToValues"));
b.AddInstruction(
HloInstruction::CreateConcatenate(ShapeUtil::MakeShape(F32, {2, 2, 2}),
{incremented, incremented_again}, 0));
module->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {}));
LiteralTestUtil::ExpectR3EqualArray3D<float>(
Array3D<float>{{{2, 3}, {4, 5}}, {{3, 4}, {5, 6}}}, result);
}
XLA_TEST_F(CustomCallTest, InputAndOutputLayoutDiffer) {
if (IsMlirLoweringEnabled()) {
GTEST_SKIP() << "Appears to test an XLA current implementation detail";
}
auto module = CreateNewVerifiedModule();
auto b = HloComputation::Builder(TestName());
auto input =
b.AddInstruction(HloInstruction::CreateParameter(0, r2f32_, "p"));
b.AddInstruction(
HloInstruction::CreateCustomCall(r2f32_, {input}, "Add1ToValues"));
module->AddEntryComputation(b.Build());
ForceParameterLayout(module.get(), 0, LayoutUtil::MakeLayout({1, 0}));
ForceResultLayout(module.get(), LayoutUtil::MakeLayout({0, 1}));
Literal argument = LiteralUtil::CreateR2<float>({{1.f, 2.f}, {3.f, 4.f}});
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {&argument}));
LiteralTestUtil::ExpectR2Equal<float>({{2.f, 4.f}, {3.f, 5.f}}, result);
}
XLA_TEST_F(CustomCallTest, LayoutConstrained) {
auto module = CreateNewVerifiedModule();
auto b = HloComputation::Builder(TestName());
auto input =
b.AddInstruction(HloInstruction::CreateParameter(0, r2f32_, "p"));
const Shape& r2f32_dim0_major =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0});
auto custom_call = b.AddInstruction(HloInstruction::CreateCustomCall(
r2f32_dim0_major, {input}, "Add1ToValues", {r2f32_dim0_major}));
b.AddInstruction(
custom_call->CloneWithNewOperands(r2f32_dim0_major, {custom_call}));
module->AddEntryComputation(b.Build());
ForceParameterLayout(module.get(), 0, LayoutUtil::MakeLayout({1, 0}));
ForceResultLayout(module.get(), LayoutUtil::MakeLayout({0, 1}));
Literal argument = LiteralUtil::CreateR2<float>({{1.f, 2.f}, {3.f, 4.f}});
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {&argument}));
LiteralTestUtil::ExpectR2Equal<float>({{3.f, 4.f}, {5.f, 6.f}}, result);
}
XLA_TEST_F(CustomCallTest, R2Dimensions_3x4) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto input_3x4 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {3, 4}), "arg3x4"));
builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTupleShape({}), {input_3x4},
"__xla_test$$VerifyR2Dimensions",
"{rows = 3 : i32, cols = 4 : i32}",
CustomCallApiVersion::API_VERSION_TYPED_FFI));
module->AddEntryComputation(builder.Build());
Literal arg3x4 = LiteralUtil::CreateR2<int>({
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
});
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {&arg3x4}));
}
XLA_TEST_F(CustomCallTest, R2Dimensions_5x2) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto input_5x2 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {5, 2}), "arg5x2"));
builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTupleShape({}), {input_5x2},
"__xla_test$$VerifyR2Dimensions",
"{rows = 5 : i32, cols = 2 : i32}",
CustomCallApiVersion::API_VERSION_TYPED_FFI));
module->AddEntryComputation(builder.Build());
Literal arg5x2 = LiteralUtil::CreateR2<int>({
{0, 0},
{0, 0},
{0, 0},
{0, 0},
{0, 0},
});
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {&arg5x2}));
}
XLA_TEST_F(CustomCallTest, TupleOutput) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT %custom-call = (f32[], f32[]) custom-call(f32[] %p0, f32[] %p1), custom_call_target="F32TupleSwap", operand_layout_constraints={f32[], f32[]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
Literal arg0 = LiteralUtil::CreateR0<float>(7.f);
Literal arg1 = LiteralUtil::CreateR0<float>(42.f);
Literal expected = LiteralUtil::MakeTuple({&arg1, &arg0});
TF_ASSERT_OK_AND_ASSIGN(auto result,
Execute(std::move(module), {&arg0, &arg1}));
EXPECT_EQ(result, expected);
}
XLA_TEST_F(CustomCallTest, ReportsSuccess) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateCustomCall(
r0f32_, {constant}, "R0F32Add2Succeed",
"", CustomCallApiVersion::API_VERSION_STATUS_RETURNING));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {}));
LiteralTestUtil::ExpectR0Near<float>(44.0f, result, error_spec_);
}
XLA_TEST_F(CustomCallTest, ReportsFailure) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {}), {constant}, "CustomCallFail",
"", CustomCallApiVersion::API_VERSION_STATUS_RETURNING));
module->AddEntryComputation(builder.Build());
auto status = Execute(std::move(module), {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(), ::testing::HasSubstr("Failed: 42.0"));
}
XLA_TEST_F(CustomCallTest, ReportsFirstFailure) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto constant_1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant_2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto res_1 = builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {}), {constant_1}, "CustomCallFail",
"", CustomCallApiVersion::API_VERSION_STATUS_RETURNING));
auto res_2 = builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {}), {constant_2}, "CustomCallFail",
"", CustomCallApiVersion::API_VERSION_STATUS_RETURNING));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, res_1, res_2));
module->AddEntryComputation(builder.Build());
auto status = Execute(std::move(module), {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(), ::testing::HasSubstr("Failed: 1.0"));
}
XLA_TEST_F(CustomCallTest, TransitiveCustomCallReportsFirstFailure) {
const char* const kModuleStr = R"(
HloModule m
sub {
p0 = f32[] parameter(0)
ROOT custom-call = f32[] custom-call(f32[] %p0), custom_call_target="CustomCallFail", api_version=API_VERSION_STATUS_RETURNING
}
ENTRY test {
c0 = f32[] constant(1.0)
c1 = f32[] constant(2.0)
call0 = f32[] call(f32[] %c0), to_apply=sub
call1 = f32[] call(f32[] %c1), to_apply=sub
ROOT sum = f32[] add(%call0, %call1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
auto status = Execute(std::move(module), {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(), HasSubstr("Failed: 1.0"));
}
XLA_TEST_F(CustomCallTest, FillStatusMsgWithBackendConfigStr) {
if (IsMlirLoweringEnabled()) {
GTEST_SKIP() << "Invalid values unsupported by MLIR";
}
const char* const kModuleStr = R"(
HloModule m
ENTRY test {
c0 = f32[] constant(1.0)
ROOT dummy-result = f32[] custom-call(f32[] %c0),
custom_call_target="CustomCallFailWithBackendConfigStr",
backend_config="foo",
api_version=API_VERSION_STATUS_RETURNING_UNIFIED
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
auto status = Execute(std::move(module), {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(),
HasSubstr("Fail with raw backend config str: foo"));
}
class CustomCallClientAPITest : public ClientLibraryTestBase {};
XLA_TEST_F(CustomCallClientAPITest, IllegalCustomCallTarget) {
XlaBuilder builder(TestName());
CustomCall(&builder, "$illegal", {},
ShapeUtil::MakeShape(F32, {1}));
absl::StatusOr<std::unique_ptr<GlobalData>> result =
Execute(&builder, {});
EXPECT_FALSE(result.ok());
}
namespace {
using ResultBufferBase = ffi::Result<ffi::AnyBuffer>;
template <PrimitiveType dtype, size_t rank = xla::ffi::internal::kDynamicRank>
using ResultBuffer = ffi::Result<ffi::Buffer<dtype, rank>>;
template <PrimitiveType dtype>
using ResultBufferR0 = ResultBuffer<dtype, 0>;
using R0F32Buffer = typename ffi::BufferR0<PrimitiveType::F32>;
using F32Buffer = typename ffi::Buffer<PrimitiveType::F32>;
using R0F32ResultBuffer = ResultBufferR0<PrimitiveType::F32>;
using F32ResultBuffer = ResultBuffer<PrimitiveType::F32>;
using AnyBuffer = ffi::AnyBuffer;
static absl::Status AlwaysSucceed(ResultBufferBase) { return absl::OkStatus(); }
XLA_FFI_DEFINE_HANDLER(kAlwaysSucceed, AlwaysSucceed,
ffi::Ffi::Bind().Ret<AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$always_succeed",
"Host", kAlwaysSucceed);
static absl::Status AlwaysFail(ResultBufferBase, int32_t value) {
return absl::InternalError(absl::StrCat("Failed: ", value));
}
XLA_FFI_DEFINE_HANDLER(kAlwaysFail, AlwaysFail,
ffi::Ffi::Bind()
.Ret<AnyBuffer>()
.Attr<int32_t>("value")
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$always_fail", "Host",
kAlwaysFail);
static absl::Status FfiR0F32Add2(R0F32Buffer in, R0F32ResultBuffer out) {
auto in_data = in.data.base();
auto out_data = out->data.base();
*out_data = *in_data + 2.0f;
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiR0F32Add2, FfiR0F32Add2,
ffi::Ffi::Bind()
.Arg<R0F32Buffer>()
.Ret<R0F32Buffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$FfiR0F32Add2",
"Host", kFfiR0F32Add2);
template <PrimitiveType dtype>
static absl::Status R0FAdd2(AnyBuffer in, ResultBufferBase out) {
using NativeType =
typename ::xla::primitive_util::PrimitiveTypeToNative<dtype>::type;
auto in_data = reinterpret_cast<const NativeType*>(in.data.opaque());
auto out_data = reinterpret_cast<NativeType*>(out->data.opaque());
*out_data = *in_data + 2.0f;
return absl::OkStatus();
}
static absl::Status FfiR0FAdd2BufferBase(AnyBuffer in, ResultBufferBase out) {
if (in.dtype != out->dtype) {
return absl::InternalError("Input and output dtypes mismatch");
}
switch (in.dtype) {
case PrimitiveType::F32:
return R0FAdd2<PrimitiveType::F32>(in, out);
case PrimitiveType::F64:
return R0FAdd2<PrimitiveType::F64>(in, out);
default:
return absl::InternalError("Incorrect type");
}
}
XLA_FFI_DEFINE_HANDLER(kFfiR0FAdd2BufferBase, FfiR0FAdd2BufferBase,
ffi::Ffi::Bind()
.Arg<AnyBuffer>()
.Ret<AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(),
"__xla_test$$FfiR0FAdd2BufferBase", "Host",
kFfiR0FAdd2BufferBase);
static absl::Status FfiR0F32AddN(R0F32Buffer in, R0F32ResultBuffer out,
float n) {
auto in_data = in.data.base();
auto out_data = out->data.base();
*out_data = *in_data + n;
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiR0F32AddN, FfiR0F32AddN,
ffi::Ffi::Bind()
.Arg<R0F32Buffer>()
.Ret<R0F32Buffer>()
.Attr<float>("n"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$FfiR0F32AddN",
"Host", kFfiR0F32AddN);
static absl::Status FfiR0F32AddNPointer(R0F32Buffer in, R0F32ResultBuffer out,
float* n) {
auto in_data = in.data.base();
auto out_data = out->data.base();
*out_data = *in_data + *n;
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiR0F32AddNPointer, FfiR0F32AddNPointer,
ffi::Ffi::Bind()
.Arg<R0F32Buffer>()
.Ret<R0F32Buffer>()
.Attr<ffi::Pointer<float>>("n"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$FfiR0F32AddNPointer",
"Host", kFfiR0F32AddNPointer);
static absl::Status FfiF32ReduceSum(F32Buffer in, R0F32ResultBuffer out) {
auto in_data = in.data.base();
auto out_data = out->data.base();
auto size = 1;
for (auto dim : in.dimensions) {
size *= dim;
}
*out_data = absl::c_accumulate(absl::MakeSpan(in_data, size), 0.0f);
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiF32ReduceSum, FfiF32ReduceSum,
ffi::Ffi::Bind()
.Arg<F32Buffer>()
.Ret<R0F32Buffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$FfiF32ReduceSum",
"Host", kFfiF32ReduceSum);
static absl::Status FfiF32Accumulate(F32Buffer in, InitMethod init,
R0F32ResultBuffer out,
BinaryOp binary_op) {
auto in_data = in.data.base();
auto out_data = out->data.base();
float init_value = (init == InitMethod::kZero) ? 0.0f : 1.0f;
auto size = absl::c_accumulate(in.dimensions, 1, std::multiplies<int>());
switch (binary_op) {
case BinaryOp::kAdd:
*out_data = absl::c_accumulate(absl::MakeSpan(in_data, size), init_value);
break;
case BinaryOp::kMul:
*out_data = absl::c_accumulate(absl::MakeSpan(in_data, size), init_value,
std::multiplies<float>());
break;
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiF32Accumulate, FfiF32Accumulate,
ffi::Ffi::Bind()
.Arg<F32Buffer>()
.Attr<InitMethod>("init")
.Ret<R0F32Buffer>()
.Attr<BinaryOp>("binary_op"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$FfiF32Accumulate",
"Host", kFfiF32Accumulate);
static absl::Status FfiF32Add1ToValues(F32Buffer in, F32ResultBuffer out) {
auto in_data = in.data.base();
auto out_data = out->data.base();
const auto in_size =
absl::c_accumulate(in.dimensions, 1, std::multiplies<int>());
const auto out_size =
absl::c_accumulate(out->dimensions, 1, std::multiplies<int>());
if (in_size != out_size) {
return absl::InternalError("Input and output sizes mismatch");
}
std::transform(in_data, in_data + in_size, out_data,
[](float x) { return x + 1; });
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiF32Add1ToValues, FfiF32Add1ToValues,
ffi::Ffi::Bind()
.Arg<F32Buffer>()
.Ret<F32Buffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$FfiF32Add1ToValues",
"Host", kFfiF32Add1ToValues);
static absl::Status FfiF32TupleSwap(R0F32Buffer in0, R0F32Buffer in1,
R0F32ResultBuffer out0,
R0F32ResultBuffer out1) {
auto in_data0 = in0.data.base();
auto in_data1 = in1.data.base();
auto out_data0 = out0->data.base();
auto out_data1 = out1->data.base();
*out_data0 = *in_data1;
*out_data1 = *in_data0;
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiF32TupleSwap, FfiF32TupleSwap,
ffi::Ffi::Bind()
.Arg<R0F32Buffer>()
.Arg<R0F32Buffer>()
.Ret<R0F32Buffer>()
.Ret<R0F32Buffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$FfiF32TupleSwap",
"Host", kFfiF32TupleSwap);
static absl::Status FfiTupleRotate(R0F32Buffer in0, R0F32Buffer in1,
R0F32Buffer in2, R0F32Buffer in3,
R0F32ResultBuffer out0,
R0F32ResultBuffer out1,
R0F32ResultBuffer out2,
R0F32ResultBuffer out3) {
auto in_data0 = in0.data.base();
auto in_data1 = in1.data.base();
auto in_data2 = in2.data.base();
auto in_data3 = in3.data.base();
auto out_data0 = out0->data.base();
auto out_data1 = out1->data.base();
auto out_data2 = out2->data.base();
auto out_data3 = out3->data.base();
*out_data0 = *in_data1;
*out_data1 = *in_data2;
*out_data2 = *in_data3;
*out_data3 = *in_data0;
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiTupleRotate, FfiTupleRotate,
ffi::Ffi::Bind()
.Arg<R0F32Buffer>()
.Arg<R0F32Buffer>()
.Arg<R0F32Buffer>()
.Arg<R0F32Buffer>()
.Ret<R0F32Buffer>()
.Ret<R0F32Buffer>()
.Ret<R0F32Buffer>()
.Ret<R0F32Buffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$FfiTupleRotate",
"Host", kFfiTupleRotate);
static absl::Status VerifyR2Dimensions(ffi::AnyBuffer in, int32_t rows,
int32_t cols) {
std::string message;
if (in.dimensions.size() != 2) {
message += absl::StrFormat("dimensions.size() != 2 because %d != 2\n",
in.dimensions.size());
}
if (in.dimensions.front() != rows) {
message += absl::StrFormat("dimensions.front() != rows because %d != %d\n",
in.dimensions.front(), rows);
}
if (in.dimensions.back() != cols) {
message += absl::StrFormat("dimensions.back() != cols because %d != %d\n",
in.dimensions.back(), cols);
}
if (!message.empty()) {
return absl::Status(absl::StatusCode::kFailedPrecondition,
std::move(message));
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kVerifyR2Dimensions, VerifyR2Dimensions,
ffi::Ffi::Bind()
.Arg<ffi::AnyBuffer>()
.Attr<int32_t>("rows")
.Attr<int32_t>("cols"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$VerifyR2Dimensions",
"Host", kVerifyR2Dimensions);
static absl::Status SwapTupleAnyBuffersToS16U32(ffi::AnyBuffer in_1,
ffi::AnyBuffer in_2,
ResultBufferR0<S16> out_1,
ResultBufferR0<U32> out_2) {
auto tuple_elem_1 = reinterpret_cast<uint32_t*>(in_1.data.opaque());
auto tuple_elem_2 = reinterpret_cast<int16_t*>(in_2.data.opaque());
out_1->data.base()[0] = tuple_elem_2[0];
out_2->data.base()[0] = tuple_elem_1[0];
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kSwapTupleAnyBuffersToS16U32,
SwapTupleAnyBuffersToS16U32,
ffi::Ffi::Bind()
.Arg<ffi::AnyBuffer>()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::BufferR0<S16>>()
.Ret<ffi::BufferR0<U32>>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(),
"__xla_test$$SwapTupleAnyBuffersToS16U32", "Host",
kSwapTupleAnyBuffersToS16U32);
static absl::Status SwapTupleU32S16ToS16U32(ffi::BufferR0<U32> in_1,
ffi::BufferR0<S16> in_2,
ResultBufferR0<S16> out_1,
ResultBufferR0<U32> out_2) {
auto tuple_elem_1 = in_1.data.base();
auto tuple_elem_2 = in_2.data.base();
out_1->data.base()[0] = tuple_elem_2[0];
out_2->data.base()[0] = tuple_elem_1[0];
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kSwapTupleU32S16ToS16U32, SwapTupleU32S16ToS16U32,
(ffi::Ffi::Bind()
.Arg<ffi::BufferR0<U32>>()
.Arg<ffi::BufferR0<S16>>()
.Ret<ffi::BufferR0<S16>>()
.Ret<ffi::BufferR0<U32>>()));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(),
"__xla_test$$SwapTupleU32S16ToS16U32", "Host",
kSwapTupleU32S16ToS16U32);
static absl::Status HandleTupleDifferentRanks(ffi::BufferR0<U32> x_1,
ffi::BufferR1<S16> x_2,
ffi::BufferR2<F32> y_1,
ffi::BufferR3<F32> y_2,
ResultBuffer<S32, 1> x_out,
ResultBuffer<F32, 3> y_out) {
if (x_2.data.ElementCount() != x_out->data.ElementCount()) {
return absl::FailedPreconditionError(
"`x_2` parameter should have the same number of elements as `x_out`");
}
if (y_1.dimensions != y_out->dimensions.subspan(1) ||
y_2.dimensions.front() + 1 != y_out->dimensions.front()) {
return absl::FailedPreconditionError(
"Cannot concatenate `y_1` and `y_2` due to dimensions mismatch. "
"`y_2` dimensions should represent a batched `y_1`");
}
const auto factor = x_1.data.base()[0];
for (int i = 0; i < x_2.data.ElementCount(); ++i) {
x_out->data.base()[i] = factor * x_2.data.base()[i];
}
auto last_pos =
std::copy_n(y_2.data.base(), y_2.data.ElementCount(), y_out->data.base());
std::copy_n(y_1.data.base(), y_1.data.ElementCount(), last_pos);
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kHandleTupleDifferentRanks, HandleTupleDifferentRanks,
ffi::Ffi::Bind()
.Arg<ffi::BufferR0<U32>>() |
1,173 | cpp | tensorflow/tensorflow | legalize_tf | tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc | tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V2_LEGALIZE_TF_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V2_LEGALIZE_TF_H_
#include <memory>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/variant.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/device_type.pb.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/compile_only_client.h"
#include "xla/pjrt/compile_options.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/tpu/kernels/tpu_compile.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
absl::StatusOr<tensorflow::XlaCompilationResult> LegalizeMlirToHlo(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client);
};
};
};
#endif
#include <climits>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <utility>
#include "mlir/Dialect/Quant/QuantOps.h"
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/compiler/mlir/tosa/transforms/legalize_common.h"
#include "tensorflow/compiler/mlir/tosa/transforms/legalize_utils.h"
#include "tensorflow/compiler/mlir/tosa/transforms/passes.h"
#define PASS_NAME "tosa-legalize-tf"
#define DEBUG_TYPE PASS_NAME
namespace mlir {
namespace tosa {
namespace {
#define GEN_PASS_DEF_TOSALEGALIZETFPASS
#include "tensorflow/compiler/mlir/tosa/transforms/passes.h.inc"
class LegalizeTF : public impl::TosaLegalizeTFPassBase<LegalizeTF> {
public:
explicit LegalizeTF() = default;
void runOnOperation() override;
};
#include "tensorflow/compiler/mlir/tosa/transforms/tf_legalize_patterns.inc"
#define DECL_CONVERT_OP(tf_op) \
struct ConvertTF##tf_op##Op : public RewritePattern { \
explicit ConvertTF##tf_op##Op(MLIRContext* context) \
: RewritePattern(TF::tf_op##Op::getOperationName(), 1, context) {} \
LogicalResult matchAndRewrite(Operation* op, \
PatternRewriter& rewriter) const override; \
}
DECL_CONVERT_OP(MatMul);
DECL_CONVERT_OP(Relu);
DECL_CONVERT_OP(Relu6);
DECL_CONVERT_OP(Equal);
DECL_CONVERT_OP(NotEqual);
DECL_CONVERT_OP(Greater);
DECL_CONVERT_OP(GreaterEqual);
DECL_CONVERT_OP(Add);
DECL_CONVERT_OP(AddV2);
DECL_CONVERT_OP(AddN);
DECL_CONVERT_OP(Sub);
DECL_CONVERT_OP(Mul);
DECL_CONVERT_OP(Square);
DECL_CONVERT_OP(SquaredDifference);
DECL_CONVERT_OP(Sign);
DECL_CONVERT_OP(Round);
DECL_CONVERT_OP(FloorDiv);
DECL_CONVERT_OP(FloorMod);
DECL_CONVERT_OP(Assert);
DECL_CONVERT_OP(Maximum);
DECL_CONVERT_OP(Minimum);
DECL_CONVERT_OP(RealDiv);
DECL_CONVERT_OP(ArgMax);
DECL_CONVERT_OP(AvgPool);
DECL_CONVERT_OP(MaxPool);
DECL_CONVERT_OP(ConcatV2);
DECL_CONVERT_OP(Reshape);
DECL_CONVERT_OP(Rank);
DECL_CONVERT_OP(Shape);
DECL_CONVERT_OP(ExpandDims);
DECL_CONVERT_OP(Squeeze);
DECL_CONVERT_OP(Fill);
DECL_CONVERT_OP(Conv2D);
DECL_CONVERT_OP(Conv3D);
DECL_CONVERT_OP(DepthwiseConv2dNative);
DECL_CONVERT_OP(Conv2DBackpropInput);
DECL_CONVERT_OP(Elu);
DECL_CONVERT_OP(Softmax);
DECL_CONVERT_OP(LogSoftmax);
DECL_CONVERT_OP(All);
DECL_CONVERT_OP(Any);
DECL_CONVERT_OP(Max);
DECL_CONVERT_OP(Min);
DECL_CONVERT_OP(Mean);
DECL_CONVERT_OP(Prod);
DECL_CONVERT_OP(Sum);
DECL_CONVERT_OP(FusedBatchNorm);
DECL_CONVERT_OP(FusedBatchNormV3);
DECL_CONVERT_OP(BiasAdd);
DECL_CONVERT_OP(Split);
DECL_CONVERT_OP(SplitV);
DECL_CONVERT_OP(Pack);
DECL_CONVERT_OP(Unpack);
DECL_CONVERT_OP(Transpose);
DECL_CONVERT_OP(Tile);
DECL_CONVERT_OP(Slice);
DECL_CONVERT_OP(StridedSlice);
DECL_CONVERT_OP(Less);
DECL_CONVERT_OP(LessEqual);
DECL_CONVERT_OP(Pad);
DECL_CONVERT_OP(MirrorPad);
DECL_CONVERT_OP(ResizeBilinear);
DECL_CONVERT_OP(ResizeNearestNeighbor);
DECL_CONVERT_OP(Gather);
DECL_CONVERT_OP(GatherV2);
DECL_CONVERT_OP(GatherNd);
DECL_CONVERT_OP(SelectV2);
DECL_CONVERT_OP(SpaceToDepth);
DECL_CONVERT_OP(DepthToSpace);
DECL_CONVERT_OP(Sin);
DECL_CONVERT_OP(Cos);
DECL_CONVERT_OP(SpaceToBatchND);
DECL_CONVERT_OP(BatchToSpaceND);
DECL_CONVERT_OP(ZerosLike);
DECL_CONVERT_OP(Sigmoid);
DECL_CONVERT_OP(Tanh);
DECL_CONVERT_OP(LeakyRelu);
DECL_CONVERT_OP(Neg);
DECL_CONVERT_OP(StopGradient);
DECL_CONVERT_OP(ReverseV2);
DECL_CONVERT_OP(FakeQuantWithMinMaxArgs);
DECL_CONVERT_OP(FakeQuantWithMinMaxVars);
DECL_CONVERT_OP(LeftShift);
DECL_CONVERT_OP(RightShift);
DECL_CONVERT_OP(OneHot);
DECL_CONVERT_OP(BatchMatMulV2);
DECL_CONVERT_OP(BroadcastTo);
#undef DECL_CONVERT_OP
LogicalResult ConvertTFReluOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_relu_op = cast<TF::ReluOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_relu_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::ClampOp>(
rewriter, op, output_type, tf_relu_op.getFeatures(),
rewriter.getI64IntegerAttr(0),
rewriter.getI64IntegerAttr(std::numeric_limits<int32_t>::max()),
rewriter.getF32FloatAttr(0.0f),
rewriter.getF32FloatAttr(std::numeric_limits<float>::max()));
return success();
}
LogicalResult ConvertTFRelu6Op::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_relu6_op = cast<TF::Relu6Op>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_relu6_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::ClampOp>(
rewriter, op, output_type, tf_relu6_op.getFeatures(),
rewriter.getI64IntegerAttr(0), rewriter.getI64IntegerAttr(6),
rewriter.getF32FloatAttr(0.0f), rewriter.getF32FloatAttr(6.0f));
return success();
}
LogicalResult ConvertTFEqualOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_equal_op = cast<TF::EqualOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_equal_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::EqualOp>(
rewriter, op, output_type, tf_equal_op.getX(), tf_equal_op.getY());
return success();
}
LogicalResult ConvertTFNotEqualOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_not_equal_op = cast<TF::NotEqualOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_not_equal_op.getResult().getType());
if (!output_type) return failure();
auto op1_equal_in = CreateOpAndInfer<tosa::EqualOp>(
rewriter, op->getLoc(), output_type, tf_not_equal_op.getX(),
tf_not_equal_op.getY());
auto op2_not_op1 = CreateOpAndInfer<tosa::LogicalNotOp>(
rewriter, op->getLoc(), output_type, op1_equal_in.getResult());
rewriter.replaceOp(op, {op2_not_op1.getResult()});
return success();
}
LogicalResult ConvertTFGreaterOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_greater_op = cast<TF::GreaterOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_greater_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::GreaterOp>(
rewriter, op, output_type, tf_greater_op.getX(), tf_greater_op.getY());
return success();
}
LogicalResult ConvertTFGreaterEqualOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_greater_equal_op = cast<TF::GreaterEqualOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_greater_equal_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::GreaterEqualOp>(rewriter, op, output_type,
tf_greater_equal_op.getX(),
tf_greater_equal_op.getY());
return success();
}
LogicalResult ConvertTFSignOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_sign_op = cast<TF::SignOp>(op);
RankedTensorType output_type =
mlir::cast<RankedTensorType>(tf_sign_op.getResult().getType());
std::optional<Value> result =
convertSignOp(rewriter, op, tf_sign_op.getX(), output_type);
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFSinOp::matchAndRewrite(Operation* op,
PatternRewriter& rewriter) const {
auto tf_sin_op = cast<TF::SinOp>(op);
ShapedType output_type =
mlir::cast<ShapedType>(tf_sin_op.getResult().getType());
std::optional<Value> result =
convertSinOp(rewriter, op, tf_sin_op.getX(), output_type);
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFCosOp::matchAndRewrite(Operation* op,
PatternRewriter& rewriter) const {
auto tf_cos_op = cast<TF::CosOp>(op);
Value input = tf_cos_op.getX();
RankedTensorType input_ty = dyn_cast<RankedTensorType>(input.getType());
ShapedType output_ty = dyn_cast<ShapedType>(tf_cos_op.getResult().getType());
if (!input_ty || !output_ty) return failure();
bool input_is_fp = mlir::isa<mlir::FloatType>(input_ty.getElementType());
bool output_is_fp = mlir::isa<mlir::FloatType>(output_ty.getElementType());
if (!input_is_fp || !output_is_fp) {
return rewriter.notifyMatchFailure(
op, "ConvertTFCosOp: input/result must be fp.");
}
auto fp_scalar_ty = RankedTensorType::get({}, rewriter.getF32Type());
auto pi_2 = rewriter.create<ConstOp>(
op->getLoc(), fp_scalar_ty,
DenseElementsAttr::get(fp_scalar_ty, {static_cast<float>(M_PI_2)}));
auto offset = rewriter.create<AddOp>(op->getLoc(), input_ty, input, pi_2);
CreateReplaceOpAndInfer<TF::SinOp>(rewriter, op, output_ty, offset);
return success();
}
LogicalResult ConvertTFAddOp::matchAndRewrite(Operation* op,
PatternRewriter& rewriter) const {
auto tf_add_op = cast<TF::AddOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_add_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::AddOp>(rewriter, op, output_type,
tf_add_op.getX(), tf_add_op.getY());
return success();
}
LogicalResult ConvertTFAddV2Op::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_addv2_op = cast<TF::AddV2Op>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_addv2_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::AddOp>(rewriter, op, output_type,
tf_addv2_op.getX(), tf_addv2_op.getY());
return success();
}
LogicalResult ConvertTFAddNOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_addn_op = cast<TF::AddNOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_addn_op.getResult().getType());
if (!output_type) return failure();
SmallVector<Value> inputs(tf_addn_op.getInputs());
assert(inputs.size() >= 2);
auto newOp = CreateOpAndInfer<tosa::AddOp>(rewriter, op->getLoc(),
output_type, inputs[0], inputs[1]);
for (int i = 2; i < inputs.size(); i++) {
newOp = CreateOpAndInfer<tosa::AddOp>(rewriter, op->getLoc(), output_type,
inputs[i], newOp.getResult());
}
rewriter.replaceOp(op, {newOp.getResult()});
return success();
}
LogicalResult ConvertTFSubOp::matchAndRewrite(Operation* op,
PatternRewriter& rewriter) const {
auto tf_sub_op = cast<TF::SubOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_sub_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::SubOp>(rewriter, op, output_type,
tf_sub_op.getX(), tf_sub_op.getY());
return success();
}
LogicalResult ConvertTFMulOp::matchAndRewrite(Operation* op,
PatternRewriter& rewriter) const {
auto tf_mul_op = cast<TF::MulOp>(op);
std::optional<Value> result = convertMultiplyOp(
rewriter, op, tf_mul_op.getResult(), tf_mul_op.getX(), tf_mul_op.getY());
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFSquareOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_square_op = cast<TF::SquareOp>(op);
std::optional<Value> result =
convertMultiplyOp(rewriter, op, tf_square_op.getResult(),
tf_square_op.getX(), tf_square_op.getX());
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFSquaredDifferenceOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_squared_op = cast<TF::SquaredDifferenceOp>(op);
std::optional<Value> result =
convertSquaredDifferenceOp(rewriter, op, tf_squared_op.getResult(),
tf_squared_op.getX(), tf_squared_op.getY());
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFRoundOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_round_op = cast<TF::RoundOp>(op);
TensorType input_type = dyn_cast<TensorType>(tf_round_op.getX().getType());
if (!input_type) {
return rewriter.notifyMatchFailure(op, "input not tensor type");
}
if (mlir::isa<FloatType>(input_type.getElementType())) {
std::optional<Value> result = convertRoundOp(
rewriter, op, tf_round_op.getResult(), tf_round_op.getX());
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
} else {
tf_round_op.replaceAllUsesWith(tf_round_op.getX());
return success();
}
}
LogicalResult ConvertTFFloorDivOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_floordiv_op = cast<TF::FloorDivOp>(op);
std::optional<Value> result =
convertFloorDivOp(rewriter, op, tf_floordiv_op.getResult(),
tf_floordiv_op.getX(), tf_floordiv_op.getY());
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFFloorModOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_floormod_op = cast<TF::FloorModOp>(op);
std::optional<Value> result =
convertFloorModOp(rewriter, op, tf_floormod_op.getResult(),
tf_floormod_op.getX(), tf_floormod_op.getY());
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFAssertOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
op->dropAllReferences();
op->erase();
return success();
}
LogicalResult ConvertTFMaximumOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_maximum_op = cast<TF::MaximumOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_maximum_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::MaximumOp>(
rewriter, op, output_type, tf_maximum_op.getX(), tf_maximum_op.getY());
return success();
}
LogicalResult ConvertTFMinimumOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_minimum_op = cast<TF::MinimumOp>(op);
TensorType output_type =
dyn_cast<TensorType>(tf_minimum_op.getResult().getType());
if (!output_type) return failure();
CreateReplaceOpAndInfer<tosa::MinimumOp>(
rewriter, op, output_type, tf_minimum_op.getX(), tf_minimum_op.getY());
return success();
}
LogicalResult ConvertTFRealDivOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_div_op = cast<TF::RealDivOp>(op);
TensorType y_type = dyn_cast<TensorType>(tf_div_op.getY().getType());
TensorType output_type =
dyn_cast<TensorType>(tf_div_op.getResult().getType());
if (!output_type || !y_type) return failure();
Type element_type = output_type.getElementType();
if (mlir::isa<IntegerType>(element_type)) {
CreateReplaceOpAndInfer<tosa::IntDivOp>(rewriter, op, output_type,
tf_div_op.getX(), tf_div_op.getY());
return success();
}
auto reciprocal_op = CreateOpAndInfer<tosa::ReciprocalOp>(
rewriter, op->getLoc(), tf_div_op.getY().getType(), tf_div_op.getY());
auto mul_op = CreateOpAndInfer<tosa::MulOp>(
rewriter, op->getLoc(), output_type, tf_div_op.getX(),
reciprocal_op.getResult(), rewriter.getI8IntegerAttr(0));
rewriter.replaceOp(op, {mul_op.getResult()});
return success();
}
LogicalResult ConvertTFArgMaxOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_argmax_op = cast<TF::ArgMaxOp>(op);
TensorType input_type =
dyn_cast<TensorType>(tf_argmax_op.getInput().getType());
TensorType output_type =
dyn_cast<TensorType>(tf_argmax_op.getResult().getType());
if (!output_type || !input_type) return failure();
ElementsAttr axis_elems;
if (!matchPattern(tf_argmax_op.getDimension(), m_Constant(&axis_elems)))
return failure();
int32_t axis = axis_elems.getValues<IntegerAttr>()[0].getInt();
if (axis < 0) {
axis += input_type.getRank();
}
if (axis < 0 || axis >= input_type.getRank()) {
return rewriter.notifyMatchFailure(op, "invalid axis value");
}
IntegerAttr axis_attr = rewriter.getI32IntegerAttr(axis);
CreateReplaceOpAndInfer<tosa::ArgMaxOp>(rewriter, op, output_type,
tf_argmax_op.getInput(), axis_attr);
return success();
}
LogicalResult ConvertTFAvgPoolOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_avgpool_op = cast<TF::AvgPoolOp>(op);
RankedTensorType input_type =
dyn_cast<RankedTensorType>(tf_avgpool_op.getValue().getType());
RankedTensorType output_type =
dyn_cast<RankedTensorType>(tf_avgpool_op.getResult().getType());
if (!input_type || !output_type) return failure();
auto tmpAttr = tf_avgpool_op.getDataFormatAttr();
if (tmpAttr && tmpAttr.getValue().str() != "NHWC") return failure();
DenseI64ArrayAttr pad;
DenseI64ArrayAttr stride;
DenseI64ArrayAttr kernel;
{
auto tmpAttr = tf_avgpool_op.getStrides();
if (!tmpAttr) {
stride = rewriter.getDenseI64ArrayAttr({1, 1});
} else {
int64_t stride_h = dyn_cast<IntegerAttr>(tmpAttr[1]).getInt();
int64_t stride_w = dyn_cast<IntegerAttr>(tmpAttr[2]).getInt();
stride = rewriter.getDenseI64ArrayAttr({stride_h, stride_w});
}
}
{
auto tmpAttr = tf_avgpool_op.getKsize();
if (!tmpAttr) {
kernel = rewriter.getDenseI64ArrayAttr({1, 1});
} else {
int64_t kernel_h = dyn_cast<IntegerAttr>(tmpAttr[1]).getInt();
int64_t kernel_w = dyn_cast<IntegerAttr>(tmpAttr[2]).getInt();
kernel = rewriter.getDenseI64ArrayAttr({kernel_h, kernel_w});
}
}
{
tensorflow::Padding tf_pad;
if (!GetPaddingFromString(tf_avgpool_op.getPadding().str(), &tf_pad).ok())
return failure();
DenseI64ArrayAttr dilation = rewriter.getDenseI64ArrayAttr(
{1, 1});
SmallVector<int64_t, 4> i64array;
for (auto& elem : tf_avgpool_op.getKsize()) {
int64_t value = dyn_cast<IntegerAttr>(elem).getInt();
i64array.emplace_back(value);
}
RankedTensorType filter_type = tensorflow::GetTypeFromTFTensorShape(
llvm::ArrayRef(i64array), rewriter.getIntegerType(64));
if (!getPaddingValuesFromPadType(
tf_pad,
tensorflow::FORMAT_NHWC,
1,
input_type, filter_type, stride, dilation, rewriter, pad))
return failure();
}
auto acc_attr = mlir::TypeAttr::get(rewriter.getF32Type());
CreateReplaceOpAndInfer<tosa::AvgPool2dOp>(rewriter, op, output_type,
tf_avgpool_op.getValue(), kernel,
stride, pad, acc_attr);
return success();
}
LogicalResult ConvertTFMaxPoolOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_maxpool_op = cast<TF::MaxPoolOp>(op);
RankedTensorType input_type =
dyn_cast<RankedTensorType>(tf_maxpool_op.getInput().getType());
RankedTensorType output_type =
dyn_cast<RankedTensorType>(tf_maxpool_op.getResult().getType());
if (!input_type || !output_type) return failure();
auto tmpAttr = tf_maxpool_op.getDataFormatAttr();
if (tmpAttr && tmpAttr.getValue().str() != "NHWC") return failure();
DenseI64ArrayAttr pad;
DenseI64ArrayAttr stride;
DenseI64ArrayAttr kernel;
{
auto tmpAttr = tf_maxpool_op.getStrides();
if (!tmpAttr) {
stride = rewriter.getDenseI64ArrayAttr({1, 1});
} else {
int64_t stride_h = dyn_cast<IntegerAttr>(tmpAttr[1]).getInt();
int64_t stride_w = dyn_cast<IntegerAttr>(tmpAttr[2]).getInt();
stride = rewriter.getDenseI64ArrayAttr({stride_h, stride_w});
}
}
{
auto tmpAttr = tf_maxpool_op.getKsize();
if (!tmpAttr) {
kernel = rewriter.getDenseI64ArrayAttr({1, 1});
} else {
int64_t kernel_h = dyn_cast<IntegerAttr>(tmpAttr[1]).getInt();
int64_t kernel_w = dyn_cast<IntegerAttr>(tmpAttr[2]).getInt();
kernel = rewriter.getDenseI64ArrayAttr({kernel_h, kernel_w});
}
}
{
tensorflow::Padding tf_pad;
if (!GetPaddingFromString(tf_maxpool_op.getPadding().str(), &tf_pad).ok())
return failure();
DenseI64ArrayAttr dilation = rewriter.getDenseI64ArrayAttr({1, 1});
SmallVector<int64_t, 4> i64array;
for (auto& elem : tf_maxpool_op.getKsize()) {
int64_t value = dyn_cast<IntegerAttr>(elem).getInt();
i64array.emplace_back(value);
}
RankedTensorType filter_type = tensorflow::GetTypeFromTFTensorShape(
llvm::ArrayRef(i64array), rewriter.getIntegerType(64));
if (!getPaddingValuesFromPadType(
tf_pad,
tensorflow::FORMAT_NHWC,
1,
input_type, filter_type, stride, dilation, rewriter, pad))
return failure();
}
CreateReplaceOpAndInfer<tosa::MaxPool2dOp>(
rewriter, op, output_type, tf_maxpool_op.getInput(), kernel, stride, pad);
return success();
}
LogicalResult ConvertTFConcatV2Op::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_concatv2_op = cast<TF::ConcatV2Op>(op);
auto result_type =
mlir::cast<ShapedType>(tf_concatv2_op.getResult().getType());
SmallVector<Value> values(tf_concatv2_op.getValues());
ElementsAttr axis_elems;
if (!matchPattern(tf_concatv2_op.getAxis(), m_Constant(&axis_elems)))
return failure();
int32_t axis = axis_elems.getValues<IntegerAttr>()[0].getInt();
std::optional<Value> result =
convertConcatV2Op(rewriter, op, result_type, values, axis);
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFReshapeOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_reshape_op = cast<TF::ReshapeOp>(op);
RankedTensorType output_type =
dyn_cast<RankedTensorType>(tf_reshape_op.getResult().getType());
if (!output_type) return failure();
SmallVector<int64_t> shape_vals;
for (int i = 0; i < output_type.getShape().size(); i++) {
shape_vals.push_back(output_type.getShape()[i]);
}
DenseI64ArrayAttr shape_attr = rewriter.getDenseI64ArrayAttr(shape_vals);
CreateReplaceOpAndInfer<tosa::ReshapeOp>(
rewriter, op, output_type, tf_reshape_op.getTensor(), shape_attr);
return success();
}
LogicalResult ConvertTFRankOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_rank_op = cast<TF::RankOp>(op);
RankedTensorType input_type =
dyn_cast<RankedTensorType>(tf_rank_op.getInput().getType());
if (!input_type) return failure();
int32_t rank = input_type.getRank();
RankedTensorType rank_type =
tensorflow::GetTypeFromTFTensorShape({1}, rewriter.getIntegerType(32));
auto rank_attr = DenseI32ArrayAttr::get(rewriter.getContext(), {rank});
auto rank_const = CreateOpAndInfer<tosa::ConstOp>(
rewriter, op->getLoc(), rank_type, cast<mlir::ElementsAttr>(rank_attr));
rewriter.replaceOp(op, {rank_const.getResult()});
return success();
}
LogicalResult ConvertTFShapeOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_shape_op = cast<TF::ShapeOp>(op);
RankedTensorType output_type =
dyn_cast<RankedTensorType>(tf_shape_op.getResult().getType());
if (!output_type) return failure();
RankedTensorType input_type =
dyn_cast<RankedTensorType>(tf_shape_op.getInput().getType());
if (!input_type) return failure();
auto input_shape = input_type.getShape();
SmallVector<int32_t> shape_arr;
for (int i = 0; i < input_shape.size(); i++) {
shape_arr.emplace_back(input_shape[i]);
}
RankedTensorType shape_type = tensorflow::GetTypeFromTFTensorShape(
{static_cast<int32_t>(shape_arr.size())}, rewriter.getIntegerType(32));
auto shape_attr =
DenseI32ArrayAttr::get(rewriter.getContext(), llvm::ArrayRef(shape_arr));
auto shape_const = CreateOpAndInfer<tosa::ConstOp>(
rewriter, op->getLoc(), shape_type, cast<mlir::ElementsAttr>(shape_attr));
rewriter.replaceOp(op, {shape_const.getResult()});
return success();
}
LogicalResult ConvertTFExpandDimsOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_expanddims_op = cast<TF::ExpandDimsOp>(op);
std::optional<Value> result = convertExpandDimsOp(
rewriter, op, tf_expanddims_op.getResult(), tf_expanddims_op.getInput(),
tf_expanddims_op.getDim());
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFSqueezeOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_squeeze_op = cast<TF::SqueezeOp>(op);
auto squeeze_dims_attr = tf_squeeze_op.getSqueezeDimsAttr();
SmallVector<int32_t> squeeze_dims;
for (auto& squeeze_dim : squeeze_dims_attr) {
squeeze_dims.emplace_back(dyn_cast<IntegerAttr>(squeeze_dim).getInt());
}
std::optional<Value> result =
convertSqueezeOp(rewriter, op, tf_squeeze_op.getResult(),
tf_squeeze_op.getInput(), squeeze_dims);
if (!result) return failure();
rewriter.replaceOp(op, {result.value()});
return success();
}
LogicalResult ConvertTFFillOp::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
auto tf_fill_op = cast<TF::FillOp>(op);
RankedTensorType output_type =
dyn_cast<RankedTensorType>(tf_fill_op.getResult().getType());
if (!output_type) return failure();
ElementsAttr dims_elems;
if (!matchPattern(tf_fill_op.getDims(), m_Constant(&dims_elems)))
return failure();
SmallVector<int64_t> dims_vals;
uint32_t total_size = 1;
for (int i = 0; i < dims_elems.getNumElements(); i++) {
dims_vals.push_back(dims_elems.getValues<IntegerAttr>()[i].getInt());
total_size *= dims_vals[i];
}
ElementsAttr value_elem;
if (!matchPattern(tf_fill_op.getValue(), m_Constant(&value_elem)))
return failure();
RankedTensorType fill_type = tensorflow::GetTypeFromTFTensorShape(
ArrayRef<int64_t>(dims_vals),
value_elem.getShapedType().getElementType());
DenseArrayAttr fill_attr;
if (mlir::isa<FloatType>(value_elem.getShapedType().getElementType())) {
SmallVector<float> fill_arr(
total_size,
value_elem.getValues<FloatAttr>()[0].getValue().convertToFloat());
fill_attr =
DenseF32ArrayAttr::get(rewriter.getContext(), llvm::ArrayRef(fill_arr));
} el | #include "tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/monitoring/test_utils.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::Not;
using ::testing::TestWithParam;
using tpu::FunctionToHloArgs;
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
using tpu::TPUCompileMetadataProto;
static constexpr char kCompilationTimeStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/phase2_compilation_time";
static constexpr char kFullBridge[] = "full_bridge";
static constexpr char kCompilationStatusStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/phase2_compilation_status";
static const char kMlirWithFallbackModeSuccess[] =
"kMlirWithFallbackModeSuccess";
static const char kMlirWithFallbackModeFailure[] =
"kMlirWithFallbackModeFailure";
static const char kOldBridgeMlirFilteredFailure[] =
"kOldBridgeMlirFilteredFailure";
static const char kOldBridgeWithFallbackModeFailure[] =
"kOldBridgeWithFallbackModeFailure";
static const char kOldBridgeMlirFilteredSuccess[] =
"kOldBridgeMlirFilteredSuccess";
static const char kOldBridgeWithFallbackModeSuccess[] =
"kOldBridgeWithFallbackModeSuccess";
static const char kMlirCombinedMlirSuccess[] = "kMlirCombinedMlirSuccess";
static const char kMlirCombinedMlirFailure[] = "kMlirCombinedMlirFailure";
static const char kMlirCombinedOldSuccess[] = "kMlirCombinedOldSuccess";
static const char kMlirCombinedOldFailure[] = "kMlirCombinedOldFailure";
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
func.return
}
})";
static constexpr char kBadMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%0 = tf.Unknown() -> ()
func.return %0
}
})";
static constexpr char kUnsupportedMlirBridgeModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%cst0 = "tf.Const"(){ value = dense<0> : tensor<3x5xi1>} : () -> tensor<3x5xi1>
%0 = "tf.Where"(%cst0) : (tensor<3x5xi1>) -> tensor<?x2xi64>
func.return
}
})";
absl::StatusOr<XlaCompiler::CompilationResult> CompileMlirModule(
const char* mlir_module_str,
ConfigProto::Experimental::MlirBridgeRollout rollout_state) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.rollout_state = rollout_state;
mlir_to_hlo_args.mlir_module = mlir_module_str;
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform).value();
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
tensorflow::tf2xla::internal::ConfigureMetadata(mlir_module_str, arg_shapes,
metadata_proto)
.IgnoreError();
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
return LegalizeMlirToHlo(mlir_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_TPU_JIT",
custom_legalization_passes,
{}, arg_shapes,
&arg_core_mapping, &per_core_arg_shapes, client);
}
TEST(LegalizeTFTest, RecordsStreamzForSuccessfulLegalizeWithMlirBridge) {
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
EXPECT_EQ(compilation_status.Delta(kMlirWithFallbackModeFailure), 0);
}
TEST(LegalizeTFTest, MatMul) {
static constexpr char kMatMulModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<5x11xf32>) {
%arg0 = "tf.Const"() {value = dense<-3.0> : tensor<5x7xf32>} : () -> tensor<5x7xf32>
%arg1 = "tf.Const"() {value = dense<-3.0> : tensor<11x7xf32>} : () -> tensor<11x7xf32>
%1 = "tf.MatMul"(%arg0, %arg1) {transpose_a = false, transpose_b = true} : (tensor<5x7xf32>, tensor<11x7xf32>) -> tensor<5x11xf32>
func.return %1 : tensor<5x11xf32>
}
})";
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMatMulModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
}
struct MatMulTestCase {
std::string mat_mul_method;
};
using BatchMatMulTest = TestWithParam<MatMulTestCase>;
TEST_P(BatchMatMulTest, BatchMatMul) {
const MatMulTestCase& test_case = GetParam();
static constexpr char kMatMulModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<1x4x4xf32>) {
%%arg0 = "tf.Const"() {value = dense<-3.0> : tensor<1x4x2xf32>} : () -> tensor<1x4x2xf32>
%%arg1 = "tf.Const"() {value = dense<-3.0> : tensor<1x2x4xf32>} : () -> tensor<1x2x4xf32>
%%1 = "tf.%s"(%%arg0, %%arg1) {T = f32, adj_x = false, adj_y = false, grad_x = false, grad_y = false, device = ""} : (tensor<1x4x2xf32>, tensor<1x2x4xf32>) -> tensor<1x4x4xf32>
func.return %%1 : tensor<1x4x4xf32>
}
})";
std::string mat_mul_method =
absl::StrFormat(kMatMulModuleStr, test_case.mat_mul_method);
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
mat_mul_method.c_str(),
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
}
INSTANTIATE_TEST_SUITE_P(
BatchMatMulTest, BatchMatMulTest,
::testing::ValuesIn<MatMulTestCase>({
{"BatchMatMul"},
{"BatchMatMulV2"},
{"BatchMatMulV3"},
}),
[](const ::testing::TestParamInfo<BatchMatMulTest::ParamType>& info) {
return info.param.mat_mul_method;
});
TEST(LegalizeTFTest, DumpsProducedHLO) {
Env* env = Env::Default();
std::string test_dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", test_dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
std::vector<std::string> files;
TF_ASSERT_OK(env->GetChildren(test_dir, &files));
int original_files_size = files.size();
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
TF_ASSERT_OK(env->GetChildren(test_dir, &files));
EXPECT_THAT(files.size(), ::testing::Gt(original_files_size));
setenv("TF_DUMP_GRAPH_PREFIX", test_dir.c_str(), 0);
}
TEST(LegalizeTFTest, RecordsStreamzForFailedLegalizeWithMlirBridge) {
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
auto result = CompileMlirModule(
kBadMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
EXPECT_FALSE(result.ok());
EXPECT_EQ(compilation_status.Delta(kMlirCombinedMlirFailure), 1);
}
TEST(LegalizeTFTest, RecordsStreamzForSuccessWithCombinedBridge) {
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
auto result = CompileMlirModule(
kUnsupportedMlirBridgeModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
EXPECT_TRUE(result.ok());
EXPECT_EQ(compilation_status.Delta(kMlirCombinedMlirSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kMlirCombinedMlirFailure), 0);
EXPECT_EQ(compilation_status.Delta(kMlirCombinedOldSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kMlirCombinedOldFailure), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeMlirFilteredFailure), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeWithFallbackModeFailure), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeMlirFilteredSuccess), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeWithFallbackModeSuccess), 0);
}
TEST(LegalizeTFTest, RecordsStreamzForNoMlirFallback) {
FunctionDef my_func =
tensorflow::FunctionDefHelper::Create("empty", {}, {}, {}, {}, {});
tensorflow::FunctionDefLibrary fdef;
*(fdef.add_function()) = my_func;
tensorflow::FunctionLibraryDefinition flib_def(
tensorflow::OpRegistry::Global(), fdef);
OpInputList guaranteed_constants;
NameAttrList function;
FunctionToHloArgs function_to_hlo_args{&function,
&flib_def,
0,
{&guaranteed_constants}};
se::Platform* cpu_platform =
se::PlatformManager::PlatformWithName("Host").value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(cpu_platform).value();
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
absl::StatusOr<XlaCompiler::CompilationResult> compile_result =
LegalizeMlirToHlo(function_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_CPU_JIT",
custom_legalization_passes,
{}, arg_shapes,
&arg_core_mapping, &per_core_arg_shapes, client);
EXPECT_FALSE(compile_result.ok());
}
TEST(LegalizeTFTest, RecordsCompilationTimeForSuccessfulCompilation) {
CellReader<monitoring::testing::Histogram> compilation_time(
kCompilationTimeStreamzName);
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_ENABLED));
EXPECT_GT(compilation_time.Delta(kFullBridge).num(), 0);
}
TEST(LegalizeTFTest, SuccessfullyCompilesModulesWithReturnValues) {
static constexpr char kHasReturnValuesAndNoMetadataRetvals[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<2xi32>) {
%cst = "tf.Const"() {value = dense<[524170, 523952]> : tensor<2xi32>} : () -> tensor<2xi32>
return %cst : tensor<2xi32>
}
})";
auto compilation_result = CompileMlirModule(
kHasReturnValuesAndNoMetadataRetvals,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
EXPECT_TRUE(compilation_result.ok());
EXPECT_THAT(compilation_result,
ComputationProtoContains("opcode:.*constant"));
}
TEST(LegalizeTFTest, SkipsTensorListSetItemIfDimensionsTooLarge) {
static constexpr char kTensorListSetItemDimensionTooLarge[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<!tf_type.variant<tensor<64x1xbf16>>> {
%elem_shape = "tf.Const"() <{value = dense<-1> : tensor<i32>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<i32>
%num_elements = "tf.Const"() <{value = dense<0> : tensor<i32>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<i32>
%list = "tf.TensorListReserve"(%elem_shape, %num_elements) : (tensor<i32>, tensor<i32>) -> tensor<!tf_type.variant<tensor<64x1xbf16>>>
%index = "tf.Const"() <{value = dense<0> : tensor<i32>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<i32>
%element = "tf.Const"() <{value = dense<0.0> : tensor<64x1xbf16>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<64x1xbf16>
%updated_list = "tf.TensorListSetItem"(%list, %index, %element) : (tensor<!tf_type.variant<tensor<64x1xbf16>>>, tensor<i32>, tensor<64x1xbf16>) -> tensor<!tf_type.variant<tensor<64x1xbf16>>>
return %updated_list : tensor<!tf_type.variant<tensor<64x1xbf16>>>
}
})";
auto compilation_result = CompileMlirModule(
kTensorListSetItemDimensionTooLarge,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
ASSERT_TRUE(compilation_result.ok());
ASSERT_THAT(compilation_result,
Not(ComputationProtoContains("%.*= \"tf.TensorListSetItem")));
ASSERT_THAT(compilation_result,
Not(ComputationProtoContains("%.*=.*DynamicUpdateSlice")));
}
TEST(LegalizeTFTest, LegalizesFunctionWithBoundedDynamicArg) {
static constexpr char kMlirModuleWithBoundedDynamicArgStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [3]>> ) -> (tensor<?xi32, #mhlo.type_extensions<bounds = [3]>>) {
func.return %arg0 : tensor<?xi32, #mhlo.type_extensions<bounds = [3]>>
}
})";
auto compilation_result = CompileMlirModule(
kMlirModuleWithBoundedDynamicArgStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
ASSERT_TRUE(compilation_result.ok());
EXPECT_THAT(compilation_result,
ComputationProtoContains("element_type:.S32\n.*dimensions: 3"));
}
}
}
} |
1,174 | cpp | tensorflow/tensorflow | name_utils | tensorflow/core/data/name_utils.cc | tensorflow/core/data/name_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_NAME_UTILS_H_
#define TENSORFLOW_CORE_DATA_NAME_UTILS_H_
#include <vector>
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace name_utils {
extern const char kDelimiter[];
extern const char kDefaultDatasetDebugStringPrefix[];
struct OpNameParams {
int op_version = 1;
};
struct DatasetDebugStringParams {
template <typename... T>
void set_args(T... input_args) {
args = {static_cast<const strings::AlphaNum&>(input_args).data()...};
}
int op_version = 1;
string dataset_prefix = "";
std::vector<string> args;
};
struct IteratorPrefixParams {
int op_version = 1;
string dataset_prefix = "";
};
string ArgsToString(const std::vector<string>& args);
string OpName(const string& dataset_type);
string OpName(const string& dataset_type, const OpNameParams& params);
string DatasetDebugString(const string& dataset_type);
string DatasetDebugString(const string& dataset_type,
const DatasetDebugStringParams& params);
string IteratorPrefix(const string& dataset_type, const string& prefix);
string IteratorPrefix(const string& dataset_type, const string& prefix,
const IteratorPrefixParams& params);
}
}
}
#endif
#include "tensorflow/core/data/name_utils.h"
#include <vector>
#include "absl/strings/str_join.h"
namespace tensorflow {
namespace data {
namespace name_utils {
ABSL_CONST_INIT const char kDelimiter[] = "::";
ABSL_CONST_INIT const char kDefaultDatasetDebugStringPrefix[] = "";
constexpr char kDataset[] = "Dataset";
constexpr char kOp[] = "Op";
constexpr char kVersion[] = "V";
string OpName(const string& dataset_type) {
return OpName(dataset_type, OpNameParams());
}
string OpName(const string& dataset_type, const OpNameParams& params) {
if (params.op_version == 1) {
return strings::StrCat(dataset_type, kDataset);
}
return strings::StrCat(dataset_type, kDataset, kVersion, params.op_version);
}
string ArgsToString(const std::vector<string>& args) {
if (args.empty()) {
return "";
}
return strings::StrCat("(", absl::StrJoin(args, ", "), ")");
}
string DatasetDebugString(const string& dataset_type) {
return DatasetDebugString(dataset_type, DatasetDebugStringParams());
}
string DatasetDebugString(const string& dataset_type,
const DatasetDebugStringParams& params) {
OpNameParams op_name_params;
op_name_params.op_version = params.op_version;
string op_name = OpName(dataset_type, op_name_params);
return strings::StrCat(op_name, kOp, ArgsToString(params.args), kDelimiter,
params.dataset_prefix, kDataset);
}
string IteratorPrefix(const string& dataset_type, const string& prefix) {
return IteratorPrefix(dataset_type, prefix, IteratorPrefixParams());
}
string IteratorPrefix(const string& dataset_type, const string& prefix,
const IteratorPrefixParams& params) {
if (params.op_version == 1) {
return strings::StrCat(prefix, kDelimiter, params.dataset_prefix,
dataset_type);
}
return strings::StrCat(prefix, kDelimiter, params.dataset_prefix,
dataset_type, kVersion, params.op_version);
}
}
}
} | #include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(DeviceNameUtils, ArgsToString) {
EXPECT_EQ(name_utils::ArgsToString({}), "");
EXPECT_EQ(name_utils::ArgsToString({"a"}), "(a)");
EXPECT_EQ(name_utils::ArgsToString({"1", "2", "3"}), "(1, 2, 3)");
}
TEST(NameUtilsTest, DatasetDebugString) {
EXPECT_EQ(name_utils::DatasetDebugString("Concatenate"),
"ConcatenateDatasetOp::Dataset");
name_utils::DatasetDebugStringParams range_params;
range_params.set_args(0, 10, 3);
EXPECT_EQ(name_utils::DatasetDebugString("Range", range_params),
"RangeDatasetOp(0, 10, 3)::Dataset");
name_utils::DatasetDebugStringParams shuffle_params;
shuffle_params.dataset_prefix = "FixedSeed";
shuffle_params.set_args(10, 1, 2);
EXPECT_EQ(name_utils::DatasetDebugString("Shuffle", shuffle_params),
"ShuffleDatasetOp(10, 1, 2)::FixedSeedDataset");
name_utils::DatasetDebugStringParams parallel_interleave_params;
parallel_interleave_params.op_version = 2;
EXPECT_EQ(name_utils::DatasetDebugString("ParallelInterleave",
parallel_interleave_params),
"ParallelInterleaveDatasetV2Op::Dataset");
}
TEST(NameUtilsTest, OpName) {
EXPECT_EQ(name_utils::OpName("Range"), "RangeDataset");
EXPECT_EQ(name_utils::OpName("Concatenate", name_utils::OpNameParams()),
"ConcatenateDataset");
name_utils::OpNameParams params;
params.op_version = 2;
EXPECT_EQ(name_utils::OpName("ParallelInterleave", params),
"ParallelInterleaveDatasetV2");
}
}
}
} |
1,175 | cpp | tensorflow/tensorflow | function | tensorflow/cc/experimental/libtf/function.cc | tensorflow/cc/experimental/libtf/tests/function_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBTF_FUNCTION_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBTF_FUNCTION_H_
#include <vector>
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_function.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/core/platform/statusor.h"
namespace tf {
namespace libtf {
class Function {
public:
tensorflow::Status RegisterTrace(tensorflow::AbstractFunctionPtr,
TaggedValue input_signature,
TaggedValue output_signature);
tensorflow::StatusOr<TaggedValue> Execute(tensorflow::AbstractContext*,
TaggedValue) const;
private:
struct ConcreteFunction {
tensorflow::AbstractFunctionPtr trace;
TaggedValue input_signature;
TaggedValue output_signature;
};
tensorflow::StatusOr<ConcreteFunction> GetConcreteFunction(TaggedValue) const;
std::vector<ConcreteFunction> concrete_fns_;
};
}
}
#endif
#include "tensorflow/cc/experimental/libtf/function.h"
#include <string>
#include <utility>
#include "absl/cleanup/cleanup.h"
#include "tensorflow/c/eager/abstract_function.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/cc/experimental/libtf/value.h"
#include "tensorflow/cc/experimental/libtf/value_iostream.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
namespace tf {
namespace libtf {
using tensorflow::AbstractContext;
using tensorflow::AbstractFunctionPtr;
using tensorflow::AbstractOperationPtr;
using tensorflow::AbstractTensorHandle;
using tensorflow::Status;
using tensorflow::StatusOr;
tensorflow::Status ExecuteFunction(
AbstractFunctionPtr trace, AbstractContext* ctx,
absl::Span<tensorflow::AbstractTensorHandle* const> inputs,
absl::Span<tensorflow::AbstractTensorHandle*> outputs) {
std::string fname;
{
const tensorflow::FunctionDef* fdef = nullptr;
TF_RETURN_IF_ERROR(trace->GetFunctionDef(&fdef));
fname = fdef->signature().name();
}
TF_RETURN_IF_ERROR(ctx->RegisterFunction(trace.get()));
auto cleanup = absl::MakeCleanup(
[fname, ctx]() { ctx->RemoveFunction(fname).IgnoreError(); });
auto call_op = AbstractOperationPtr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(
call_op->Reset(fname.c_str(), nullptr));
for (auto t : inputs) {
TF_RETURN_IF_ERROR(call_op->AddInput(t));
}
int num_outputs = outputs.size();
return call_op->Execute(outputs, &num_outputs);
}
Status VerifySupportedSignature(TaggedValue signature) {
if (signature.type() == TaggedValue::Type::TENSOR_SPEC) {
return ::tensorflow::OkStatus();
}
if (signature.type() == TaggedValue::Type::TUPLE) {
for (const auto& t : signature.tuple()) {
if (t.type() != TaggedValue::Type::TENSOR_SPEC) {
break;
}
}
return ::tensorflow::OkStatus();
}
return tensorflow::errors::Unimplemented(
"Only functions with inputs/outputs containing a single tensor or a tuple"
" of tensors are supported right now.");
}
Status VerifySupportedArgs(TaggedValue args) {
if (args.type() == TaggedValue::Type::TENSOR) {
return ::tensorflow::OkStatus();
}
if (args.type() == TaggedValue::Type::TUPLE) {
for (const auto& t : args.tuple()) {
if (t.type() != TaggedValue::Type::TENSOR) {
break;
}
}
return ::tensorflow::OkStatus();
}
return tensorflow::errors::Unimplemented(
"Only functions with inputs/outputs containing a single tensor or a tuple"
" of tensors are supported right now.");
}
Status Function::RegisterTrace(AbstractFunctionPtr fn,
TaggedValue input_signature,
TaggedValue output_signature) {
TF_RETURN_IF_ERROR(VerifySupportedSignature(input_signature));
TF_RETURN_IF_ERROR(VerifySupportedSignature(output_signature));
concrete_fns_.push_back({fn, input_signature, output_signature});
return ::tensorflow::OkStatus();
}
bool Match(TaggedValue signature, TaggedValue value) {
switch (signature.type()) {
case TaggedValue::Type::TENSOR_SPEC: {
if (value.type() != TaggedValue::Type::TENSOR) {
return false;
}
auto spec = signature.tensor_spec();
const auto& tensor = value.tensor();
if (tensor->DataType() != spec.dtype) {
return false;
}
tensorflow::PartialTensorShape tensor_shape;
DCHECK(tensor->Shape(&tensor_shape).ok());
if (!tensor_shape.IsCompatibleWith(spec.shape)) {
return false;
}
} break;
case TaggedValue::Type::TUPLE: {
if (value.type() != TaggedValue::Type::TUPLE) {
return false;
}
if (value.tuple().size() != signature.tuple().size()) {
return false;
}
for (auto i = 0; i < value.tuple().size(); i++) {
if (!Match(signature.tuple()[i], value.tuple()[i])) {
return false;
}
}
} break;
default:
return false;
}
return true;
}
void Flatten(const TaggedValue& value,
std::vector<AbstractTensorHandle*>* flat_args) {
if (value.type() == TaggedValue::Type::TENSOR) {
flat_args->emplace_back(value.tensor().get());
} else if (value.type() == TaggedValue::Type::TUPLE) {
for (const auto& t : value.tuple()) {
Flatten(t, flat_args);
}
} else {
LOG(ERROR) << "Unimplemented";
}
}
StatusOr<TaggedValue> Unflatten(
absl::Span<AbstractTensorHandle* const> flat_args, TaggedValue structure) {
if (structure.type() == TaggedValue::Type::TENSOR_SPEC) {
if (flat_args.size() != 1) {
return tensorflow::errors::Internal("Expected single tensor but found ",
flat_args.size());
}
TaggedValue wrapped_t =
TaggedValue(impl::TaggedValueTensor(flat_args[0], true));
if (!Match(structure, wrapped_t)) {
std::stringstream stream;
stream << "Shape and dtype of tensor " << wrapped_t
<< " does not match that in signature " << structure;
return tensorflow::errors::Internal(stream.str());
}
return wrapped_t;
} else if (structure.type() == TaggedValue::Type::TUPLE) {
if (flat_args.size() != structure.tuple().size()) {
return tensorflow::errors::InvalidArgument(
"Tuple length ", structure.tuple().size(),
" does not match length of flat args ", flat_args.size());
}
auto result = impl::TaggedValue::Tuple();
for (auto i = 0; i < structure.tuple().size(); i++) {
TF_ASSIGN_OR_RETURN(TaggedValue ele,
Unflatten({flat_args[i]}, structure.tuple()[i]));
result.tuple().emplace_back(std::move(ele));
}
return result;
} else {
return tensorflow::errors::Unimplemented(
"Only tensors and tuples of tensors are supported right now.");
}
}
size_t GetFlatSize(const TaggedValue& value) {
if (value.type() == TaggedValue::Type::TUPLE) {
size_t result = 0;
for (const auto& t : value.tuple()) {
result += GetFlatSize(t);
}
return result;
} else if (value.type() == TaggedValue::Type::LIST) {
size_t result = 0;
for (const auto& t : value.list()) {
result += GetFlatSize(t);
}
return result;
} else if (value.type() == TaggedValue::Type::DICT) {
size_t result = 0;
for (const auto& t : value.dict()) {
result += GetFlatSize(t.second);
}
return result;
}
return 1;
}
StatusOr<TaggedValue> Function::Execute(AbstractContext* ctx,
TaggedValue value) const {
TF_RETURN_IF_ERROR(VerifySupportedArgs(value));
TF_ASSIGN_OR_RETURN(auto concrete_fn, GetConcreteFunction(value));
std::vector<AbstractTensorHandle*> args;
Flatten(value, &args);
std::vector<AbstractTensorHandle*> outs(
GetFlatSize(concrete_fn.output_signature));
TF_RETURN_IF_ERROR(
ExecuteFunction(concrete_fn.trace, ctx, args, absl::MakeSpan(outs)));
auto cleanup_tensors = absl::MakeCleanup([outs]() {
for (auto t : outs) {
t->Unref();
}
});
return Unflatten(outs, concrete_fn.output_signature);
}
StatusOr<Function::ConcreteFunction> Function::GetConcreteFunction(
TaggedValue value) const {
if (concrete_fns_.empty()) {
return tensorflow::errors::FailedPrecondition(
"No registered ConcreteFunctions.");
}
for (auto& spec : concrete_fns_) {
if (Match(spec.input_signature, value)) {
return spec;
}
}
return tensorflow::errors::InvalidArgument("No match found.");
}
}
} | #include "tensorflow/cc/experimental/libtf/function.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_function.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/graph_function.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/cc/experimental/libtf/value.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
using tensorflow::AbstractContext;
using tensorflow::AbstractContextPtr;
using tensorflow::AbstractFunctionPtr;
using tensorflow::AbstractTensorHandle;
using tensorflow::DT_FLOAT;
using tensorflow::FunctionDef;
using tensorflow::FunctionDefHelper;
using tensorflow::PartialTensorShape;
using tensorflow::Status;
using tensorflow::StatusOr;
using tensorflow::TF_StatusPtr;
using tensorflow::tracing::graph::GraphFunction;
class FunctionTest
: public ::testing::TestWithParam<std::tuple<const char*, bool>> {
public:
template <class T, TF_DataType datatype>
impl::TaggedValueTensor CreateScalarTensor(T val) {
AbstractTensorHandle* raw = nullptr;
Status s = TestScalarTensorHandle<T, datatype>(ctx_.get(), val, &raw);
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
return impl::TaggedValueTensor(raw, false);
}
bool UseTfrt() { return std::get<1>(GetParam()); }
AbstractContextPtr ctx_;
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = tensorflow::StatusFromTF_Status(status.get());
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
AbstractContext* ctx_raw = nullptr;
s = BuildImmediateExecutionContext(UseTfrt(), &ctx_raw);
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
ctx_.reset(ctx_raw);
}
};
FunctionDef SquareFunc() {
return FunctionDefHelper::Define(
"SquareFunc",
{"x: float"},
{"y: float"},
{},
{{{"y"},
"Square",
{"x"},
{{"T", DT_FLOAT}},
{},
"",
"square"}});
}
FunctionDef AddFunc() {
return FunctionDefHelper::Define(
"AddFunc",
{"x: float", "y: float"},
{"z: float"},
{},
{{{"z"},
"Add",
{"x", "y"},
{{"T", DT_FLOAT}},
{},
"",
"add"}});
}
FunctionDef IdentityNFunc() {
return FunctionDefHelper::Define(
"IdentityNFunc",
{"x: float", "y: float"},
{"u: float", "v: float"},
{},
{{{"u", "v"},
"IdentityN",
{"x", "y"},
{{"T", tensorflow::DataTypeSlice({DT_FLOAT, DT_FLOAT})}},
{},
""}});
}
template <typename T>
void ExpectEquals(AbstractTensorHandle* t, T expected) {
TF_Tensor* result_t;
Status s = tensorflow::GetValue(t, &result_t);
ASSERT_TRUE(s.ok()) << s.message();
auto value = static_cast<T*>(TF_TensorData(result_t));
EXPECT_EQ(*value, expected);
TF_DeleteTensor(result_t);
}
TEST_P(FunctionTest, Square) {
impl::TaggedValueTensor x = CreateScalarTensor<float, TF_FLOAT>(2.0f);
FunctionDef fdef = SquareFunc();
AbstractFunctionPtr trace(new GraphFunction(fdef), false);
Function tf_function;
PartialTensorShape unknown_shape;
TaggedValue signature(unknown_shape, DT_FLOAT);
Status s = tf_function.RegisterTrace(std::move(trace), signature, signature);
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args(std::move(x));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(v.ok()) << v.status().message();
const TaggedValue& result = v.value();
AbstractTensorHandle* t = result.tensor().get();
ExpectEquals(t, 4.0f);
}
TEST_P(FunctionTest, Add) {
impl::TaggedValueTensor x = CreateScalarTensor<float, TF_FLOAT>(2.0f);
FunctionDef fdef = AddFunc();
AbstractFunctionPtr trace(new GraphFunction(fdef), false);
Function tf_function;
PartialTensorShape unknown_shape;
TaggedValue tensor_spec(unknown_shape, DT_FLOAT);
TaggedValue input_signature = TaggedValue::Tuple();
input_signature.tuple().emplace_back(tensor_spec);
input_signature.tuple().emplace_back(tensor_spec);
Status s =
tf_function.RegisterTrace(std::move(trace), input_signature, tensor_spec);
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(x));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(v.ok()) << v.status().message();
const TaggedValue& result = v.value();
ExpectEquals(result.tensor().get(), 4.0f);
}
TEST_P(FunctionTest, IdentityN) {
impl::TaggedValueTensor x = CreateScalarTensor<float, TF_FLOAT>(2.0f);
impl::TaggedValueTensor y = CreateScalarTensor<float, TF_FLOAT>(4.0f);
FunctionDef fdef = IdentityNFunc();
AbstractFunctionPtr trace(new GraphFunction(fdef), false);
Function tf_function;
PartialTensorShape unknown_shape;
TaggedValue tensor_spec(unknown_shape, DT_FLOAT);
TaggedValue signature = TaggedValue::Tuple();
signature.tuple().emplace_back(tensor_spec);
signature.tuple().emplace_back(tensor_spec);
Status s = tf_function.RegisterTrace(std::move(trace), signature, signature);
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(y));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(v.ok()) << v.status().message();
const TaggedValue& result = v.value();
ExpectEquals(result.tuple()[0].tensor().get(), 2.0f);
ExpectEquals(result.tuple()[1].tensor().get(), 4.0f);
}
TEST_P(FunctionTest, UnaryFuncCalledWithMultipleArgsFails) {
impl::TaggedValueTensor x = CreateScalarTensor<float, TF_FLOAT>(2.0f);
FunctionDef fdef = SquareFunc();
AbstractFunctionPtr trace(new GraphFunction(fdef), false);
Function tf_function;
PartialTensorShape unknown_shape;
TaggedValue signature(unknown_shape, DT_FLOAT);
Status s = tf_function.RegisterTrace(std::move(trace), signature, signature);
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(x));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(tensorflow::errors::IsInvalidArgument(v.status()));
ASSERT_TRUE(absl::StrContains(v.status().message(), "No match"));
}
TEST_P(FunctionTest, IncorrectArityOfOutputSignatureFails) {
if (UseTfrt()) {
GTEST_SKIP() << "TFRT crashes if expected number of output tensors does not"
" match actual.";
}
impl::TaggedValueTensor x = CreateScalarTensor<float, TF_FLOAT>(2.0f);
impl::TaggedValueTensor y = CreateScalarTensor<float, TF_FLOAT>(4.0f);
FunctionDef fdef = IdentityNFunc();
AbstractFunctionPtr trace(new GraphFunction(fdef), false);
Function tf_function;
PartialTensorShape unknown_shape;
TaggedValue tensor_spec(unknown_shape, DT_FLOAT);
TaggedValue input_signature = TaggedValue::Tuple();
input_signature.tuple().emplace_back(tensor_spec);
input_signature.tuple().emplace_back(tensor_spec);
TaggedValue output_signature(unknown_shape, DT_FLOAT);
Status s = tf_function.RegisterTrace(std::move(trace), input_signature,
output_signature);
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(y));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(tensorflow::errors::IsInvalidArgument(v.status())) << v.status();
ASSERT_TRUE(absl::StrContains(v.status().message(),
"Expecting 2 outputs, but *num_retvals is 1"));
}
TEST_P(FunctionTest, IncorrectDtypeInOutputSignatureFails) {
impl::TaggedValueTensor x = CreateScalarTensor<float, TF_FLOAT>(2.0f);
FunctionDef fdef = AddFunc();
AbstractFunctionPtr trace(new GraphFunction(fdef), false);
Function tf_function;
PartialTensorShape unknown_shape;
TaggedValue input_tensor_spec(unknown_shape, tensorflow::DT_FLOAT);
TaggedValue input_signature = TaggedValue::Tuple();
input_signature.tuple().emplace_back(input_tensor_spec);
input_signature.tuple().emplace_back(input_tensor_spec);
TaggedValue output_tensor_spec(unknown_shape, tensorflow::DT_INT64);
Status s = tf_function.RegisterTrace(std::move(trace), input_signature,
output_tensor_spec);
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(x));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(tensorflow::errors::IsInternal(v.status())) << v.status();
ASSERT_TRUE(
absl::StrContains(v.status().message(), "Shape and dtype of tensor"));
ASSERT_TRUE(absl::StrContains(v.status().message(),
"does not match that in signature"));
}
INSTANTIATE_TEST_SUITE_P(TF2CAPI, FunctionTest,
::testing::Combine(::testing::Values("graphdef",
"mlir"),
::testing::Values(false)));
}
} |
1,176 | cpp | tensorflow/tensorflow | cost_analysis | third_party/xla/xla/service/memory_space_assignment/cost_analysis.cc | third_party/xla/xla/service/memory_space_assignment/cost_analysis_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_COST_ANALYSIS_H_
#define XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_COST_ANALYSIS_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/functional/function_ref.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/call_graph.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace memory_space_assignment {
struct CostAnalysisOptions {
uint64_t xla_tpu_memory_space_assignment_while_execution_count = 5ULL;
std::string
xla_tpu_alternate_memory_benefit_scaling_factor_for_large_buffers =
"SQRT";
float pipeline_overhead_window_size_mib = 0;
float alternate_mem_bandwidth_bytes_per_second = 0.0f;
float async_copy_bandwidth_bytes_per_second = 0.0f;
float async_copy_bandwidth_scaling_factor = 1.0;
};
class BaseCosts {
public:
virtual ~BaseCosts() = default;
virtual int64_t GetShapeSize(const Shape& shape) = 0;
virtual float BytesAccessed(const HloInstruction& instruction) = 0;
virtual float OperandBytesAccessed(const HloInstruction& instruction,
int64_t operand_num,
const ShapeIndex& shape_index) = 0;
virtual float OutputBytesAccessed(const HloInstruction& instruction,
const ShapeIndex& shape_index) = 0;
virtual float BytesPerSecond() = 0;
virtual float ComputeSeconds(const HloInstruction& instruction) = 0;
protected:
BaseCosts() = default;
};
class HloCostAnalysisCosts : public BaseCosts {
public:
explicit HloCostAnalysisCosts(const HloCostAnalysis& hlo_cost_analysis);
~HloCostAnalysisCosts() override = default;
int64_t GetShapeSize(const Shape& shape) override;
float BytesAccessed(const HloInstruction& instruction) override;
float OperandBytesAccessed(const HloInstruction& instruction,
int64_t operand_num,
const ShapeIndex& shape_index) override;
float OutputBytesAccessed(const HloInstruction& instruction,
const ShapeIndex& shape_index) override;
float BytesPerSecond() override;
float ComputeSeconds(const HloInstruction& instruction) override;
private:
HloCostAnalysisCosts() = default;
const HloCostAnalysis& hlo_cost_analysis_;
};
class CostAnalysis {
public:
struct Cache {
absl::flat_hash_map<const HloInstruction*, float> while_nest_multiplier;
absl::flat_hash_map<const HloComputation*, float> computation_trip_count;
absl::flat_hash_map<HloPosition, float> memory_boundedness;
};
using IsInAlternateMemoryFun = absl::FunctionRef<bool(
std::optional<int> , const ShapeIndex& ,
const Shape& )>;
virtual ~CostAnalysis() = default;
static absl::StatusOr<std::unique_ptr<CostAnalysis>> Create(
BaseCosts& base_costs, const CostAnalysisOptions& options,
const HloModule& module);
BaseCosts& base_costs() const { return base_costs_; }
float GetAlternateMemoryBenefit(const HloInstruction& instruction,
float elapsed_time_due_to_alternate_mem,
Cache* cache = nullptr) const;
float GetAlternateMemoryBenefit(const HloPosition& position,
Cache* cache = nullptr) const;
float GetAlternateMemoryBenefit(const HloUse& use,
Cache* cache = nullptr) const;
float GetMemoryBoundedness(
const GlobalDecreasingSizeBestFitHeap<HloValue>::BufferInterval& interval,
Cache* cache = nullptr) const;
float GetDefaultMemoryAccessOverhead(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_mem = {},
absl::Span<const ShapeIndex> outputs_in_alternate_mem = {}) const;
float GetDefaultMemoryBandwidthIdleTime(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_mem = {},
absl::Span<const ShapeIndex> outputs_in_alternate_mem = {}) const;
float GetBytesAccessedFromAlternateMemory(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_mem = {},
absl::Span<const ShapeIndex> outputs_in_alternate_mem = {}) const;
float GetInstructionElapsedDueToCompute(
const HloInstruction& instruction) const;
float GetInstructionElapsedDueToMemory(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_mem = {},
absl::Span<const ShapeIndex> outputs_in_alternate_mem = {}) const;
float GetInstructionElapsedDueToMemory(
const HloInstruction& instruction,
IsInAlternateMemoryFun is_in_alternate_mem) const;
virtual float GetInstructionElapsed(const HloInstruction& instruction) const;
virtual float GetInstructionElapsedInAlternateMemory(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_mem,
absl::Span<const ShapeIndex> outputs_in_alternate_mem) const;
float GetInstructionElapsedInAlternateMemory(
const HloInstruction& instruction,
IsInAlternateMemoryFun is_in_alternate_mem) const;
virtual float GetAsyncCopyElapsed(const Shape& shape) const;
int64_t GetScheduleEndTime() const;
int CalculateComputationNestLevel(const HloInstruction* instruction,
bool while_only) const;
float CalculateNestTripCount(const HloInstruction* instruction,
Cache* cache = nullptr) const;
float GetWhileNestMultiplier(int while_nest_level) const;
const HloLiveRange& hlo_live_range() const { return *hlo_live_range_; }
protected:
CostAnalysis(BaseCosts& base_costs, const CostAnalysisOptions& options,
std::unique_ptr<HloAliasAnalysis> alias_analysis,
std::unique_ptr<HloLiveRange> hlo_live_range,
std::unique_ptr<CallGraph> call_graph)
: base_costs_(base_costs),
options_(options),
alias_analysis_(std::move(alias_analysis)),
hlo_live_range_(std::move(hlo_live_range)),
call_graph_(std::move(call_graph)) {}
private:
BaseCosts& base_costs_;
const CostAnalysisOptions options_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
std::unique_ptr<HloLiveRange> hlo_live_range_;
std::unique_ptr<CallGraph> call_graph_;
};
}
}
#endif
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/call_graph.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace memory_space_assignment {
HloCostAnalysisCosts::HloCostAnalysisCosts(
const HloCostAnalysis& hlo_cost_analysis)
: hlo_cost_analysis_(hlo_cost_analysis) {}
int64_t HloCostAnalysisCosts::GetShapeSize(const Shape& shape) {
return hlo_cost_analysis_.GetShapeSize(shape);
}
float HloCostAnalysisCosts::BytesAccessed(const HloInstruction& instruction) {
return static_cast<float>(hlo_cost_analysis_.bytes_accessed(instruction));
}
float HloCostAnalysisCosts::OperandBytesAccessed(
const HloInstruction& instruction, int64_t operand_num,
const ShapeIndex& shape_index) {
return static_cast<float>(hlo_cost_analysis_.operand_bytes_accessed(
instruction, operand_num, shape_index));
}
float HloCostAnalysisCosts::OutputBytesAccessed(
const HloInstruction& instruction, const ShapeIndex& shape_index) {
return static_cast<float>(
hlo_cost_analysis_.output_bytes_accessed(instruction, shape_index));
}
float HloCostAnalysisCosts::BytesPerSecond() {
return hlo_cost_analysis_.per_second_rate(HloCostAnalysis::kBytesAccessedKey);
}
float HloCostAnalysisCosts::ComputeSeconds(const HloInstruction& instruction) {
return std::max(
static_cast<float>(hlo_cost_analysis_.flop_count(instruction)) /
hlo_cost_analysis_.per_second_rate(HloCostAnalysis::kFlopsKey),
static_cast<float>(hlo_cost_analysis_.transcendental_count(instruction)) /
hlo_cost_analysis_.per_second_rate(
HloCostAnalysis::kTranscendentalsKey));
}
absl::StatusOr<std::unique_ptr<CostAnalysis>> CostAnalysis::Create(
BaseCosts& base_costs, const CostAnalysisOptions& options,
const HloModule& module) {
TF_ASSIGN_OR_RETURN(auto alias_analysis, HloAliasAnalysis::Run(&module));
TF_ASSIGN_OR_RETURN(auto hlo_live_range,
HloLiveRange::Run(module.schedule(), *alias_analysis,
module.entry_computation()));
auto call_graph = CallGraph::Build(&module);
return absl::WrapUnique(
new CostAnalysis(base_costs, options, std::move(alias_analysis),
std::move(hlo_live_range), std::move(call_graph)));
}
float CostAnalysis::GetAlternateMemoryBenefit(
const HloInstruction& instruction, float elapsed_time_due_to_alternate_mem,
CostAnalysis::Cache* cache) const {
float elapsed_time_due_to_compute =
GetInstructionElapsedDueToCompute(instruction);
float elapsed_time_due_to_memory =
GetInstructionElapsedDueToMemory(instruction);
if (elapsed_time_due_to_memory > elapsed_time_due_to_compute) {
float while_nest_multiplier;
if (cache) {
auto it = cache->while_nest_multiplier.find(&instruction);
if (it != cache->while_nest_multiplier.end()) {
while_nest_multiplier = it->second;
} else {
while_nest_multiplier = GetWhileNestMultiplier(
CalculateComputationNestLevel(&instruction,
true));
cache->while_nest_multiplier[&instruction] = while_nest_multiplier;
}
} else {
while_nest_multiplier = GetWhileNestMultiplier(
CalculateComputationNestLevel(&instruction,
true));
}
return (elapsed_time_due_to_memory - elapsed_time_due_to_alternate_mem) *
while_nest_multiplier;
} else {
return elapsed_time_due_to_memory - elapsed_time_due_to_compute;
}
}
float CostAnalysis::GetMemoryBoundedness(
const GlobalDecreasingSizeBestFitHeap<HloValue>::BufferInterval& interval,
CostAnalysis::Cache* cache) const {
if (cache) {
auto it =
cache->memory_boundedness.find(interval.buffer->defining_position());
if (it != cache->memory_boundedness.end()) {
return it->second;
}
}
float alternate_mem_benefit =
GetAlternateMemoryBenefit(interval.buffer->defining_position(), cache);
for (const HloBuffer* buffer : alias_analysis_->ComputeBuffersAt(
interval.buffer->defining_position().instruction,
interval.buffer->defining_position().index)) {
for (const HloValue* value : buffer->values()) {
for (const HloUse& use : value->GetUses()) {
if (use.instruction->opcode() == HloOpcode::kWhile ||
use.instruction->opcode() == HloOpcode::kConditional) {
continue;
}
float use_alternate_mem_benefit = GetAlternateMemoryBenefit(use, cache);
if (alternate_mem_benefit > 0 && use_alternate_mem_benefit > 0) {
alternate_mem_benefit += use_alternate_mem_benefit;
} else {
alternate_mem_benefit =
std::max(alternate_mem_benefit, use_alternate_mem_benefit);
}
}
}
}
float memory_boundedness = 1;
if (options_
.xla_tpu_alternate_memory_benefit_scaling_factor_for_large_buffers ==
"NO_SCALE") {
memory_boundedness = alternate_mem_benefit;
} else {
memory_boundedness = alternate_mem_benefit / std::sqrt(interval.size);
}
if (cache) {
cache->memory_boundedness[interval.buffer->defining_position()] =
memory_boundedness;
}
return memory_boundedness;
}
float CostAnalysis::GetAlternateMemoryBenefit(
const HloPosition& position, CostAnalysis::Cache* cache) const {
return GetAlternateMemoryBenefit(
*position.instruction,
GetInstructionElapsedDueToMemory(
*position.instruction,
{},
{position.index}),
cache);
}
float CostAnalysis::GetAlternateMemoryBenefit(
const HloUse& use, CostAnalysis::Cache* cache) const {
return GetAlternateMemoryBenefit(
*use.instruction,
GetInstructionElapsedDueToMemory(
*use.instruction,
{std::make_pair(use.operand_number,
use.operand_index)}),
cache);
}
int CostAnalysis::CalculateComputationNestLevel(
const HloInstruction* instruction, bool while_only) const {
int nest_level = 0;
const HloComputation* computation = instruction->parent();
while (!computation->IsEntryComputation()) {
auto& node = call_graph_->GetNode(computation);
auto callsites = node.caller_callsites();
CHECK(node.computation()->IsAsyncComputation() || callsites.size() == 1)
<< "The module is not flattened!";
auto& callsite = callsites[0];
if (!while_only || callsite.instruction()->opcode() == HloOpcode::kWhile) {
++nest_level;
}
computation = callsite.instruction()->parent();
}
return nest_level;
}
float CostAnalysis::GetWhileNestMultiplier(int while_nest_level) const {
return IPow<float>(
options_.xla_tpu_memory_space_assignment_while_execution_count,
while_nest_level);
}
float CostAnalysis::CalculateNestTripCount(const HloInstruction* instruction,
CostAnalysis::Cache* cache) const {
float total_trip_count = 1.0;
const HloComputation* computation = instruction->parent();
while (!computation->IsEntryComputation()) {
if (cache) {
auto it = cache->computation_trip_count.find(computation);
if (it != cache->computation_trip_count.end()) {
if (computation == instruction->parent()) {
return it->second;
} else {
total_trip_count *= it->second;
break;
}
}
}
CallGraphNode& node = call_graph_->GetNode(computation);
absl::Span<const CallSite> callsites = node.caller_callsites();
const xla::CallSite& callsite = callsites[0];
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
HloInstruction* while_op = callsite.instruction();
std::optional<float> trip_count;
if (!trip_count.has_value()) {
trip_count = ComputeWhileLoopTripCount(while_op);
}
total_trip_count *= trip_count.value_or(
options_.xla_tpu_memory_space_assignment_while_execution_count);
}
computation = callsite.instruction()->parent();
}
if (cache) {
cache->computation_trip_count[instruction->parent()] = total_trip_count;
}
return total_trip_count;
}
float CostAnalysis::GetDefaultMemoryAccessOverhead(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>> operands_in_alternate_mem,
absl::Span<const ShapeIndex> outputs_in_alternate_mem) const {
const float window_size_bytes =
options_.pipeline_overhead_window_size_mib * 1024 * 1024;
const float bytes_accessed = base_costs_.BytesAccessed(instruction);
const float default_memory_bytes_accessed =
bytes_accessed -
GetBytesAccessedFromAlternateMemory(
instruction, operands_in_alternate_mem, outputs_in_alternate_mem);
const float compute_elapsed = GetInstructionElapsedDueToCompute(instruction);
const float effective_window_size_bytes =
std::min(window_size_bytes, default_memory_bytes_accessed);
float overhead = 0;
if (bytes_accessed > 0) {
overhead = (effective_window_size_bytes / bytes_accessed) * compute_elapsed;
}
return overhead;
}
float CostAnalysis::GetDefaultMemoryBandwidthIdleTime(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>> operands_in_alternate_mem,
absl::Span<const ShapeIndex> outputs_in_alternate_mem) const {
const float default_memory_bytes_accessed =
base_costs_.BytesAccessed(instruction) -
GetBytesAccessedFromAlternateMemory(
instruction, operands_in_alternate_mem, outputs_in_alternate_mem);
const float elapsed_due_to_default_mem =
default_memory_bytes_accessed / base_costs_.BytesPerSecond();
const float elapsed = GetInstructionElapsedInAlternateMemory(
instruction, operands_in_alternate_mem, outputs_in_alternate_mem);
return elapsed - elapsed_due_to_default_mem;
}
float CostAnalysis::GetBytesAccessedFromAlternateMemory(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>> operands_in_alternate_mem,
absl::Span<const ShapeIndex> outputs_in_alternate_mem) const {
float bytes_accessed_from_alternate_mem = 0.0;
for (auto& operand : operands_in_alternate_mem) {
const float operand_bytes_accessed = base_costs_.OperandBytesAccessed(
instruction, operand.first, operand.second);
bytes_accessed_from_alternate_mem += operand_bytes_accessed;
}
for (auto& shape_idx : outputs_in_alternate_mem) {
const float output_bytes_accessed =
base_costs_.OutputBytesAccessed(instruction, shape_idx);
bytes_accessed_from_alternate_mem += output_bytes_accessed;
}
return bytes_accessed_from_alternate_mem;
}
namespace {
bool ExcludeInstructionFromElapsed(const HloInstruction& instruction) {
return instruction.opcode() == HloOpcode::kAllGatherStart ||
instruction.opcode() == HloOpcode::kAllGatherDone ||
instruction.opcode() == HloOpcode::kAllReduceStart ||
instruction.opcode() == HloOpcode::kAllReduceDone ||
instruction.opcode() == HloOpcode::kAsyncStart ||
instruction.opcode() == HloOpcode::kAsyncDone ||
instruction.opcode() == HloOpcode::kCollectivePermuteStart ||
instruction.opcode() == HloOpcode::kCollectivePermuteDone ||
instruction.opcode() == HloOpcode::kCopyStart ||
instruction.opcode() == HloOpcode::kCopyDone;
}
}
float CostAnalysis::GetInstructionElapsedDueToCompute(
const HloInstruction& instruction) const {
if (ExcludeInstructionFromElapsed(instruction)) {
return 0.0f;
}
return base_costs_.ComputeSeconds(instruction);
}
float CostAnalysis::GetInstructionElapsedDueToMemory(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>> operands_in_alternate_mem,
absl::Span<const ShapeIndex> outputs_in_alternate_mem) const {
if (ExcludeInstructionFromElapsed(instruction)) {
return 0.0f;
}
float total_bytes_accessed = base_costs_.BytesAccessed(instruction);
float bytes_accessed_from_alternate_mem = GetBytesAccessedFromAlternateMemory(
instruction, operands_in_alternate_mem, outputs_in_alternate_mem);
float elapsed_due_to_alternate_mem =
bytes_accessed_from_alternate_mem /
options_.alternate_mem_bandwidth_bytes_per_second;
float elapsed_due_to_default_mem =
(total_bytes_accessed - bytes_accessed_from_alternate_mem) /
base_costs_.BytesPerSecond();
return elapsed_due_to_alternate_mem + elapsed_due_to_default_mem;
}
float CostAnalysis::GetInstructionElapsedDueToMemory(
const HloInstruction& instruction,
IsInAlternateMemoryFun is_in_alternate_mem) const {
if (ExcludeInstructionFromElapsed(instruction)) {
return 0.0f;
}
float total_bytes_accessed = base_costs_.BytesAccessed(instruction);
float bytes_accessed_from_alternate_mem = 0.0;
for (int operand_num = 0; operand_num < instruction.operand_count();
++operand_num) {
ShapeUtil::ForEachSubshape(
instruction.operand(operand_num)->shape(),
[&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
if (is_in_alternate_mem(operand_num, index, subshape)) {
bytes_accessed_from_alternate_mem +=
base_costs_.OperandBytesAccessed(instruction, operand_num,
index);
}
});
}
ShapeUtil::ForEachSubshape(instruction.shape(), [&](const Shape& subshape,
const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
if (is_in_alternate_mem(std::nullopt, index, subshape)) {
bytes_accessed_from_alternate_mem +=
base_costs_.OutputBytesAccessed(instruction, index);
}
});
float elapsed_due_to_alternate_mem =
bytes_accessed_from_alternate_mem /
options_.alternate_mem_bandwidth_bytes_per_second;
float elapsed_due_to_default_mem =
(total_bytes_accessed - bytes_accessed_from_alternate_mem) /
base_costs_.BytesPerSecond();
return elapsed_due_to_alternate_mem + elapsed_due_to_default_mem;
}
float CostAnalysis::GetInstructionElapsed(
const HloInstruction& instruction) const {
if (ExcludeInstructionFromElapsed(instruction)) {
return 0.0f;
}
float overhead = GetDefaultMemoryAccessOverhead(instruction);
return std::max(GetInstructionElapsedDueToCompute(instruction),
GetInstructionElapsedDueToMemory(instruction) + overhead);
}
float CostAnalysis::GetInstructionElapsedInAlternateMemory(
const HloInstruction& instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>> operands_in_alternate_mem,
absl::Span<const ShapeIndex> outputs_in_alternate_mem) const {
if (ExcludeInstructionFromElapsed(instruction)) {
return 0.0f;
}
float overhead = GetDefaultMemoryAccessOverhead(
instruction, operands_in_alternate_mem, outputs_in_alternate_mem);
return std::max(
GetInstructionElapsedDueToCompute(instruction),
GetInstructionElapsedDueToMemory(instruction, operands_in_alternate_mem,
outputs_in_alternate_mem) +
overhead);
}
float CostAnalysis::GetInstructionElapsedInAlternateMemory(
const HloInstruction& instruction,
IsInAlternateMemoryFun is_in_alternate_mem) const {
if (ExcludeInstructionFromElapsed(instruction)) {
return 0.0f;
}
return std::max(
GetInstructionElapsedDueToCompute(instruction),
GetInstructionElapsedDueToMemory(instruction, is_in_alternate_mem));
}
float CostAnalysis::GetAsyncCopyElapsed(const Shape& shape) const {
int64_t size_in_bytes = base_costs_.GetShapeSize(shape);
return static_cast<float>(size_in_bytes) /
(options_.async_copy_bandwidth_bytes_per_second *
options_.async_copy_bandwidth_scaling_factor);
}
int64_t CostAnalysis::GetScheduleEndTime() const {
return hlo_live_range_->schedule_end_time();
}
}
} | #include "xla/service/memory_space_assignment/cost_analysis.h"
#include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using memory_space_assignment::CostAnalysis;
using memory_space_assignment::CostAnalysisOptions;
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
class MemorySpaceAssignmentCostAnalysisTest : public HloTestBase {
protected:
absl::Status Initialize(const HloModule* module,
float pipeline_overhead_window_size_mib = 0.0) {
HloCostAnalysis::Options options;
options_.alternate_mem_bandwidth_bytes_per_second = 128;
options_.async_copy_bandwidth_bytes_per_second = 32;
options_.pipeline_overhead_window_size_mib =
pipeline_overhead_window_size_mib;
options.shape_size = ShapeSize;
options.set_flops_per_second(8);
options.set_bytes_per_second(32);
options.set_transcendentals_per_second(16);
hlo_cost_analysis_ = std::make_unique<HloCostAnalysis>(options);
TF_RETURN_IF_ERROR(
module->entry_computation()->Accept(hlo_cost_analysis_.get()));
hlo_cost_analysis_costs_ =
std::make_unique<memory_space_assignment::HloCostAnalysisCosts>(
*hlo_cost_analysis_);
TF_ASSIGN_OR_RETURN(
cost_analysis_,
CostAnalysis::Create(*hlo_cost_analysis_costs_, options_, *module));
return absl::OkStatus();
}
CostAnalysisOptions options_;
std::unique_ptr<HloCostAnalysis> hlo_cost_analysis_;
std::unique_ptr<memory_space_assignment::HloCostAnalysisCosts>
hlo_cost_analysis_costs_;
std::unique_ptr<CostAnalysis> cost_analysis_;
};
TEST_F(MemorySpaceAssignmentCostAnalysisTest, NoPipelineOverhead) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
param1 = f32[2,4] parameter(1)
ROOT add = f32[2,4] add(param0, param1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(Initialize(module.get()));
const HloInstruction* add = module->entry_computation()->root_instruction();
const float expected_compute_elapsed =
8 / 8.0;
LOG(INFO) << "Expected compute elapsed = " << expected_compute_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToCompute(*add),
expected_compute_elapsed);
float expected_memory_elapsed =
(3 * 4 * 8) / 32.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsed(*add),
expected_memory_elapsed);
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedInAlternateMemory(*add, {}, {}),
expected_memory_elapsed);
expected_memory_elapsed =
((2 * 4 * 8) / 32.0) +
((4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {}),
expected_memory_elapsed);
expected_memory_elapsed =
((4 * 8) / 32.0) +
((2 * 4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {{}}),
expected_memory_elapsed);
expected_memory_elapsed =
(3 * 4 * 8) / 128.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_compute_elapsed);
}
TEST_F(MemorySpaceAssignmentCostAnalysisTest, PipelineOverhead) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
param1 = f32[2,4] parameter(1)
ROOT add = f32[2,4] add(param0, param1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(
Initialize(module.get(),
(64.0 / 1024 / 1024)));
const HloInstruction* add = module->entry_computation()->root_instruction();
const float expected_compute_elapsed =
8 / 8.0;
LOG(INFO) << "Expected compute elapsed = " << expected_compute_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToCompute(*add),
expected_compute_elapsed);
float expected_memory_elapsed =
(3 * 4 * 8) / 32.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add),
expected_memory_elapsed);
float expected_overhead = expected_compute_elapsed * 2 / 3;
LOG(INFO) << "Expected overhead = " << expected_overhead;
EXPECT_EQ(cost_analysis_->GetDefaultMemoryAccessOverhead(*add),
expected_overhead);
EXPECT_EQ(cost_analysis_->GetInstructionElapsed(*add),
expected_memory_elapsed + expected_overhead);
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedInAlternateMemory(*add, {}, {}),
expected_memory_elapsed + expected_overhead);
expected_memory_elapsed =
((2 * 4 * 8) / 32.0) +
((4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetDefaultMemoryAccessOverhead(*add, {{0, {}}}),
expected_overhead);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {}),
expected_memory_elapsed + expected_overhead);
expected_memory_elapsed =
((4 * 8) / 32.0) +
((2 * 4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
expected_overhead = expected_compute_elapsed / 3;
LOG(INFO) << "Expected overhead = " << expected_overhead;
EXPECT_EQ(
cost_analysis_->GetDefaultMemoryAccessOverhead(*add, {{0, {}}}, {{}}),
expected_overhead);
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {{}}),
expected_memory_elapsed + expected_overhead);
expected_memory_elapsed =
(3 * 4 * 8) / 128.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
expected_overhead = 0;
LOG(INFO) << "Expected overhead = " << expected_overhead;
EXPECT_EQ(cost_analysis_->GetDefaultMemoryAccessOverhead(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_overhead);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_compute_elapsed);
}
}
} |
1,177 | cpp | tensorflow/tensorflow | mlir_to_bytecode | tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.cc | tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TFRT_TRANSLATE_MLRT_MLIR_TO_BYTECODE_H_
#define TENSORFLOW_COMPILER_MLIR_TFRT_TRANSLATE_MLRT_MLIR_TO_BYTECODE_H_
#include <functional>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
namespace mlrt {
class ModuleEmitterContext;
class AttributeEncoderRegistry {
public:
using EncoderFn = std::function<absl::StatusOr<std::string>(
const ModuleEmitterContext&, mlir::Attribute)>;
void Register(absl::string_view dialect, EncoderFn encoder) {
encoders_[dialect] = std::move(encoder);
}
const EncoderFn* Get(absl::string_view dialect) const {
auto iter = encoders_.find(dialect);
if (iter != encoders_.end()) return &iter->second;
return nullptr;
}
private:
absl::flat_hash_map<std::string, EncoderFn> encoders_;
};
class ModuleEmitterContext {
public:
explicit ModuleEmitterContext(
const AttributeEncoderRegistry* attribute_encoder_registry)
: attribute_encoder_registry_(*attribute_encoder_registry) {}
void AddKernelName(std::string name) {
AddData(std::move(name), kernels_, kernel_id_map_);
}
int GetKernelId(llvm::StringRef name) const {
return kernel_id_map_.at(name);
}
absl::Status AddAttribute(mlir::Operation* op, mlir::Attribute attr);
int GetAttributeId(mlir::Attribute attr) const {
return attribute_id_map_.lookup(attr);
}
int AddFunction(mlir::func::FuncOp func);
int GetFunctionId(absl::string_view name) const {
return function_name_id_map_.at(name);
}
absl::Span<const std::string> kernels() const { return kernels_; }
absl::Span<const std::string> attributes() const { return attributes_; }
absl::Span<const mlir::func::FuncOp> functions() const { return functions_; }
private:
int AddData(std::string data, std::vector<std::string>& data_vector,
absl::flat_hash_map<std::string, int>& data_map) {
auto iter = data_map.find(data);
if (iter != data_map.end()) return iter->second;
int id = data_vector.size();
data_map[data] = id;
data_vector.push_back(std::move(data));
return id;
}
absl::StatusOr<std::string> DefaultEncodeAttribute(mlir::Attribute attr);
const AttributeEncoderRegistry& attribute_encoder_registry_;
std::vector<std::string> kernels_;
absl::flat_hash_map<std::string, int> kernel_id_map_;
std::vector<std::string> attributes_;
llvm::DenseMap<mlir::Attribute, int> attribute_id_map_;
absl::flat_hash_map<std::string, int> attribute_data_id_map_;
std::vector<mlir::func::FuncOp> functions_;
absl::flat_hash_map<std::string, int> function_name_id_map_;
};
std::optional<std::string> EncodeSimpleAttribute(
const ModuleEmitterContext& module_context, mlir::Attribute attr);
absl::StatusOr<bc::Buffer> EmitExecutable(
const AttributeEncoderRegistry& attribute_encoder_registry,
mlir::ModuleOp module);
}
#endif
#include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include <cstdint>
#include <cstring>
#include <iterator>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/TypeSwitch.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
namespace mlrt {
namespace {
bool CanBeInlined(mlir::Attribute attr, absl::string_view data) {
return mlir::isa<mlir::IntegerAttr, mlir::FloatAttr, mlir::FlatSymbolRefAttr>(
attr) &&
data.size() <= sizeof(uint32_t);
}
template <typename T>
std::string EncodeIntegerOrFloat(T attr) {
std::string data(sizeof(attr), '\0');
std::memcpy(data.data(), &attr, sizeof(attr));
return data;
}
template <typename T>
std::optional<std::string> EncodeListOfInteger(mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size());
mlir::Type type;
for (int i = 0; i < array.size(); ++i) {
if (auto integer_attr = mlir::dyn_cast<mlir::IntegerAttr>(array[i])) {
if (type && integer_attr.getType() != type) return std::nullopt;
type = integer_attr.getType();
llvm::APInt value = integer_attr.getValue();
if (value.getBitWidth() != sizeof(T) * 8) return std::nullopt;
ctor.ConstructAt(i, value.getZExtValue());
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeListOfSymbolRef(
const ModuleEmitterContext& module_context, mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<uint32_t>>(&allocator, array.size());
for (int i = 0; i < array.size(); ++i) {
if (auto symbol_ref = mlir::dyn_cast<mlir::FlatSymbolRefAttr>(array[i])) {
ctor.ConstructAt(i, module_context.GetFunctionId(symbol_ref.getValue()));
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
template <typename T>
std::optional<std::string> EncodeDenseArray(llvm::ArrayRef<T> array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size());
if (!array.empty()) {
ctor.Place(reinterpret_cast<const char*>(array.data()),
array.size() * sizeof(T));
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeDenseBoolArray(llvm::ArrayRef<bool> array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<uint8_t>>(&allocator, array.size());
if (!array.empty()) {
std::vector<uint8_t> data(array.size());
int i = 0;
for (auto v : array) {
data[i++] = static_cast<uint8_t>(v);
}
ctor.Place(reinterpret_cast<const char*>(data.data()), data.size());
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeListOfString(mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<bc::String>>(&allocator, array.size());
for (int i = 0; i < array.size(); ++i) {
if (auto string_attr = mlir::dyn_cast<mlir::StringAttr>(array[i])) {
ctor.ConstructAt(i, string_attr.getValue().str());
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
struct FunctionEmitterContext {
explicit FunctionEmitterContext(const ModuleEmitterContext* module_context)
: module_context(*module_context) {}
const ModuleEmitterContext& module_context;
struct RegInfo {
int num_uses = 0;
int id = -1;
};
int next_reg_id = 0;
llvm::DenseMap<mlir::Value, RegInfo> register_table;
std::vector<int> free_regs;
int AssignRegId() {
if (free_regs.empty()) {
return next_reg_id++;
}
int id = free_regs.back();
free_regs.pop_back();
return id;
}
void FreeRegId(int id) { free_regs.push_back(id); }
};
void EmitKernel(FunctionEmitterContext& function_context,
bc::Kernel::Constructor& constructor, mlir::Operation& op,
std::vector<uint32_t>& function_output_regs,
std::vector<uint8_t>& function_output_last_uses) {
std::vector<uint32_t> results;
results.reserve(op.getNumResults());
for (auto result : op.getResults()) {
auto iter = function_context.register_table.find(result);
CHECK(iter != function_context.register_table.end());
CHECK_EQ(iter->second.id, -1);
iter->second.id = function_context.AssignRegId();
results.push_back(iter->second.id);
}
constructor.construct_results(results.size())
.Assign(results.begin(), results.end());
std::vector<uint32_t> arguments;
std::vector<uint8_t> last_uses;
arguments.reserve(op.getNumOperands());
last_uses.reserve(op.getNumOperands());
for (auto operand : op.getOperands()) {
auto iter = function_context.register_table.find(operand);
CHECK(iter != function_context.register_table.end());
int id = iter->second.id;
CHECK_NE(id, -1);
last_uses.push_back(0);
if (--iter->second.num_uses == 0) {
function_context.FreeRegId(id);
last_uses.back() = 1;
}
arguments.push_back(id);
}
constructor.construct_arguments(arguments.size())
.Assign(arguments.begin(), arguments.end());
constructor.construct_last_uses(last_uses.size())
.Assign(last_uses.begin(), last_uses.end());
std::vector<uint32_t> attributes;
attributes.reserve(op.getAttrs().size());
for (auto attr : op.getAttrs()) {
int attr_id =
function_context.module_context.GetAttributeId(attr.getValue());
absl::string_view attr_data =
function_context.module_context.attributes().at(attr_id);
if (CanBeInlined(attr.getValue(), attr_data)) {
uint32_t data = 0;
std::memcpy(&data, attr_data.data(), attr_data.size());
attributes.push_back(data);
} else {
attributes.push_back(attr_id);
}
}
constructor.construct_attributes(attributes.size())
.Assign(attributes.begin(), attributes.end());
if (llvm::isa<mlir::func::ReturnOp>(&op)) {
constructor.set_code(function_context.module_context.GetKernelId("return"));
function_output_regs = std::move(arguments);
function_output_last_uses = std::move(last_uses);
} else if (llvm::isa<mlir::func::CallOp>(&op)) {
constructor.set_code(function_context.module_context.GetKernelId("call"));
} else {
llvm::StringRef op_name = op.getName().getStringRef();
constructor.set_code(function_context.module_context.GetKernelId(op_name));
}
}
void EmitFunction(const ModuleEmitterContext& module_context,
bc::Function::Constructor& constructor, llvm::StringRef name,
mlir::Region& region) {
FunctionEmitterContext function_context(&module_context);
constructor.construct_name(name.str());
DCHECK(llvm::hasSingleElement(region)) << "should have a single block";
auto& block = region.front();
auto& register_table = function_context.register_table;
std::vector<uint32_t> input_regs;
input_regs.reserve(block.getNumArguments());
for (auto arg : block.getArguments()) {
int id = function_context.AssignRegId();
input_regs.push_back(id);
register_table[arg] = {static_cast<int>(std::distance(arg.getUses().begin(),
arg.getUses().end())),
id};
}
constructor.construct_input_regs(input_regs);
for (auto& op : block) {
for (auto result : op.getResults()) {
register_table[result] = {static_cast<int>(
std::distance(result.getUses().begin(), result.getUses().end()))};
}
}
auto kernels_constructor =
constructor.construct_kernels(block.getOperations().size());
std::vector<uint32_t> output_regs;
std::vector<uint8_t> output_last_uses;
for (const auto& iter : llvm::enumerate(block.getOperations())) {
int i = iter.index();
mlir::Operation& op = iter.value();
auto kernel_ctor = kernels_constructor.ConstructAt(i);
EmitKernel(function_context, kernel_ctor, op, output_regs,
output_last_uses);
}
constructor.set_num_regs(function_context.next_reg_id);
constructor.construct_output_regs(output_regs);
constructor.construct_output_last_uses(output_last_uses);
}
absl::Status EmitExecutable(ModuleEmitterContext& module_context,
bc::Executable::Constructor& constructor,
mlir::ModuleOp module) {
module.walk(
[&](mlir::func::FuncOp func) { module_context.AddFunction(func); });
auto functions = module_context.functions();
for (auto func : functions) {
if (!llvm::hasSingleElement(func.getRegion())) {
return absl::InvalidArgumentError("function should have a single block.");
}
auto& block = func.getRegion().front();
for (auto& op : block) {
if (llvm::isa<mlir::func::CallOp>(&op)) {
module_context.AddKernelName("call");
} else if (llvm::isa<mlir::func::ReturnOp>(&op)) {
if (op.getNumResults() != 0) {
return absl::InvalidArgumentError(
"Block terminator must be a return op.");
}
module_context.AddKernelName("return");
} else {
module_context.AddKernelName(op.getName().getStringRef().str());
}
for (auto attr : op.getAttrs()) {
if (auto status = module_context.AddAttribute(&op, attr.getValue());
!status.ok()) {
return status;
}
}
}
}
constructor.construct_kernel_names(module_context.kernels().size())
.Assign(module_context.kernels().begin(), module_context.kernels().end());
auto functions_constructor =
constructor.construct_functions(functions.size());
for (int i = 0; i < functions.size(); ++i) {
auto func = functions[i];
auto function_ctor = functions_constructor.ConstructAt(i);
EmitFunction(module_context, function_ctor, func.getSymName(),
func.getRegion());
}
constructor.construct_attributes(module_context.attributes().size())
.Assign(module_context.attributes().begin(),
module_context.attributes().end());
return absl::OkStatus();
}
}
absl::Status ModuleEmitterContext::AddAttribute(mlir::Operation* op,
mlir::Attribute attr) {
absl::StatusOr<std::string> attr_data;
if (auto* encoder = attribute_encoder_registry_.Get(
op->getName().getDialectNamespace())) {
attr_data = (*encoder)(*this, attr);
} else {
attr_data = DefaultEncodeAttribute(attr);
}
if (!attr_data.ok()) return std::move(attr_data).status();
int id = AddData(std::move(*attr_data), attributes_, attribute_data_id_map_);
attribute_id_map_[attr] = id;
return absl::OkStatus();
}
int ModuleEmitterContext::AddFunction(mlir::func::FuncOp func) {
int id = functions_.size();
functions_.push_back(func);
DCHECK(!function_name_id_map_.contains(func.getSymName()));
function_name_id_map_[func.getSymName()] = id;
return id;
}
std::optional<std::string> EncodeSimpleAttribute(
const ModuleEmitterContext& module_context, mlir::Attribute attr) {
return llvm::TypeSwitch<mlir::Attribute, std::optional<std::string>>(attr)
.Case<mlir::StringAttr>(
[](const auto& str_attr) { return str_attr.str(); })
.Case<mlir::IntegerAttr>(
[](const auto& integer_attr) -> std::optional<std::string> {
switch (llvm::APInt value = integer_attr.getValue();
value.getBitWidth()) {
case 1:
return EncodeIntegerOrFloat<uint8_t>(value.getZExtValue());
case 32:
return EncodeIntegerOrFloat<uint32_t>(value.getZExtValue());
case 64:
return EncodeIntegerOrFloat<uint64_t>(value.getZExtValue());
default:
return std::nullopt;
}
})
.Case<mlir::FloatAttr>(
[](const auto& float_attr) -> std::optional<std::string> {
llvm::APFloat value = float_attr.getValue();
if (float_attr.getType().isF32()) {
return EncodeIntegerOrFloat<float>(value.convertToFloat());
}
return std::nullopt;
})
.Case<mlir::ArrayAttr>([&](const auto& array_attr)
-> std::optional<std::string> {
if (auto encoded_list_i32 = EncodeListOfInteger<uint32_t>(array_attr)) {
return std::move(*encoded_list_i32);
} else if (auto encoded_list_i64 =
EncodeListOfInteger<uint64_t>(array_attr)) {
return std::move(*encoded_list_i64);
} else if (auto encoded_list_string = EncodeListOfString(array_attr)) {
return std::move(*encoded_list_string);
} else if (auto encoded_list_symbol_ref =
EncodeListOfSymbolRef(module_context, array_attr)) {
return std::move(*encoded_list_symbol_ref);
} else {
return std::nullopt;
}
})
.Case<mlir::DenseI32ArrayAttr>(
[](const auto& dense_array_i32) -> std::optional<std::string> {
return EncodeDenseArray<int32_t>(dense_array_i32);
})
.Case<mlir::DenseI64ArrayAttr>(
[](const auto& dense_array_i64) -> std::optional<std::string> {
return EncodeDenseArray<int64_t>(dense_array_i64);
})
.Case<mlir::DenseBoolArrayAttr>(
[](const auto& dense_array_bool) -> std::optional<std::string> {
return EncodeDenseBoolArray(dense_array_bool.asArrayRef());
})
.Case<mlir::FlatSymbolRefAttr>([&](const auto& symbol_ref) {
return EncodeIntegerOrFloat<uint32_t>(
module_context.GetFunctionId(symbol_ref.getValue()));
})
.Default([](const auto& attr) { return std::nullopt; });
}
absl::StatusOr<std::string> ModuleEmitterContext::DefaultEncodeAttribute(
mlir::Attribute attr) {
if (auto result = EncodeSimpleAttribute(*this, attr)) {
return std::move(*result);
}
std ::string attr_str;
llvm::raw_string_ostream os(attr_str);
attr.print(os);
return absl::InvalidArgumentError(
absl::StrCat("Try to encode unsupported attribute: ", attr_str));
}
absl::StatusOr<bc::Buffer> EmitExecutable(
const AttributeEncoderRegistry& attribute_encoder_registry,
mlir::ModuleOp module) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
ModuleEmitterContext module_context(&attribute_encoder_registry);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
if (auto status = EmitExecutable(module_context, executable_ctor, module);
!status.ok()) {
return status;
}
buffer.shrink_to_fit();
return buffer;
}
} | #include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include <cstring>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/attribute_span.h"
#include "tsl/platform/resource_loader.h"
#include "tsl/platform/status_matchers.h"
namespace mlrt {
namespace {
using ::testing::ElementsAreArray;
using ::testing::FloatEq;
using ::testing::IsEmpty;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
TEST(MlirToByteCodeTest, Basic) {
constexpr char kBasicMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/basic.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kBasicMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto kernel_names = executable.kernel_names();
EXPECT_THAT(kernel_names,
ElementsAreArray({"test_mlbc.add.i32", "test_mlbc.sub.i32",
"call", "return"}));
auto functions = executable.functions();
ASSERT_GE(functions.size(), 1);
auto function = functions[0];
EXPECT_EQ(function.name().str(), "add_i32_10");
EXPECT_EQ(function.num_regs(), 5);
EXPECT_THAT(function.input_regs(), ElementsAreArray({0}));
EXPECT_THAT(function.output_regs(), ElementsAreArray({0, 2, 2}));
EXPECT_THAT(function.output_last_uses(),
ElementsAreArray({true, false, true}));
auto kernels = function.kernels();
ASSERT_EQ(kernels.size(), 11);
EXPECT_EQ(kernels[0].code(), 0);
EXPECT_THAT(kernels[0].arguments(), ElementsAreArray({0, 0}));
EXPECT_THAT(kernels[0].results(), ElementsAreArray({1}));
EXPECT_THAT(kernels[0].last_uses(), ElementsAreArray({0, 0}));
for (int i = 1; i < 9; i++) {
EXPECT_EQ(kernels[i].code(), i % 2);
EXPECT_THAT(kernels[i].arguments(), ElementsAreArray({(i - 1) % 2 + 1, 0}));
EXPECT_THAT(kernels[i].results(), ElementsAreArray({i % 2 + 1}));
EXPECT_THAT(kernels[i].last_uses(), ElementsAreArray({1, 0}));
}
EXPECT_EQ(kernels[9].code(), 2);
EXPECT_THAT(kernels[9].arguments(), ElementsAreArray({1}));
EXPECT_THAT(kernels[9].last_uses(), ElementsAreArray({true}));
EXPECT_THAT(kernels[9].results(), ElementsAreArray({2, 3, 4}));
EXPECT_EQ(kernels[10].code(), 3);
EXPECT_THAT(kernels[10].arguments(), ElementsAreArray({0, 2, 2}));
EXPECT_THAT(kernels[10].last_uses(), ElementsAreArray({true, false, true}));
EXPECT_TRUE(kernels[10].results().empty());
}
template <typename T>
absl::StatusOr<T> DecodeAttribute(absl::string_view data) {
if (data.size() < sizeof(T))
return absl::InvalidArgumentError("Invalid data size for attribute.");
T value;
std::memcpy(&value, data.data(), sizeof(T));
return value;
}
TEST(MlirToByteCodeTest, BasicAttributes) {
constexpr char kBasicAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"basic_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kBasicAttributesMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto attributes = executable.attributes();
ASSERT_EQ(attributes.size(), 15);
auto attr_iter = attributes.begin();
EXPECT_EQ(*attr_iter, "test string");
++attr_iter;
EXPECT_EQ(*attr_iter, "ts");
++attr_iter;
EXPECT_THAT(DecodeAttribute<int32_t>(*attr_iter), IsOkAndHolds(100));
++attr_iter;
EXPECT_THAT(DecodeAttribute<int64_t>(*attr_iter), IsOkAndHolds(200));
++attr_iter;
EXPECT_THAT(DecodeAttribute<float>(*attr_iter), IsOkAndHolds(FloatEq(3.0)));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint8_t>(*attr_iter), IsOkAndHolds(0));
++attr_iter;
bc::Vector<int64_t> list_of_i64((*attr_iter).data());
EXPECT_THAT(list_of_i64, ElementsAreArray({0, 1, 2, 3, 4}));
++attr_iter;
bc::Vector<int32_t> list_of_i32((*attr_iter).data());
EXPECT_THAT(list_of_i32, ElementsAreArray({0, 1, 2, 3}));
++attr_iter;
bc::Vector<bc::String> list_of_str((*attr_iter).data());
EXPECT_THAT(list_of_str, ElementsAreArray({"string 0", "string 1"}));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint32_t>(*attr_iter), IsOkAndHolds(1));
EXPECT_EQ(executable.functions()[1].name().Get(), "callee");
++attr_iter;
bc::Vector<int32_t> list_of_symbol_ref((*attr_iter).data());
EXPECT_EQ(executable.functions()[2].name().Get(), "callee0");
EXPECT_EQ(executable.functions()[3].name().Get(), "callee1");
EXPECT_THAT(list_of_symbol_ref, ElementsAreArray({2, 3}));
++attr_iter;
bc::Vector<int32_t> dense_array_of_i32((*attr_iter).data());
EXPECT_THAT(dense_array_of_i32, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int64_t> dense_array_of_i64((*attr_iter).data());
EXPECT_THAT(dense_array_of_i64, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int32_t> empty_dense_array((*attr_iter).data());
EXPECT_TRUE(empty_dense_array.empty());
++attr_iter;
bc::Vector<uint8_t> dense_array_of_bool((*attr_iter).data());
EXPECT_THAT(dense_array_of_bool, ElementsAreArray({true, false}));
auto kernels = executable.functions()[0].kernels();
ASSERT_EQ(kernels.size(), 16);
auto kernel_iter = kernels.begin();
auto attribute_span = [&](auto kernel_iter) {
return mlrt::AttributeSpan((*kernel_iter).attributes(), attributes);
};
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(),
"test string");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(), "ts");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int32_t>(0), 100);
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int64_t>(0), 200);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<float>(0), FloatEq(3.0));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint8_t>(0), false);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2, 3, 4}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bc::String>>(0),
ElementsAreArray({"string 0", "string 1"}));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint32_t>(0), 1);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
IsEmpty());
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bool>>(0),
ElementsAreArray({true, false}));
}
TEST(MlirToByteCodeTest, UnsupportedAttributes) {
constexpr char kUnsupportedAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"unsupported_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kUnsupportedAttributesMlir),
&mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
EXPECT_THAT(EmitExecutable(attribute_encoder_registry, mlir_module.get()),
StatusIs(absl::StatusCode::kInvalidArgument,
"Try to encode unsupported attribute: unit"));
}
class CustomDense {
public:
struct StorageType {
using Self = StorageType;
DEFINE_BYTECODE_FIELD(bc::Vector<int64_t>, shape);
DEFINE_BYTECODE_FIELD(bc::Vector<uint32_t>, data);
};
class Constructor {
public:
Constructor(bc::Allocator* allocator, bc::BcAddr_t address)
: allocator_(allocator), address_(address) {}
template <typename... Args>
auto construct_shape(Args&&... args) {
return StorageType::construct_shape(allocator_, address_,
std::forward<Args>(args)...);
}
template <typename... Args>
auto construct_data(Args&&... args) {
return StorageType::construct_data(allocator_, address_,
std::forward<Args>(args)...);
}
bc::BcAddr_t address() const { return address_; }
private:
bc::Allocator* allocator_;
bc::BcAddr_t address_;
};
using NonTrivialConstructorType = Constructor;
explicit CustomDense(const char* p) : p_(p) {}
bc::Vector<int64_t> shape() const { return StorageType::read_shape(p_); }
bc::Vector<uint32_t> data() const { return StorageType::read_data(p_); }
private:
const char* p_ = nullptr;
};
absl::StatusOr<std::string> EncodeCustomDense(const ModuleEmitterContext&,
mlir::Attribute attr) {
auto dense_int_attr = mlir::dyn_cast<mlir::DenseIntElementsAttr>(attr);
if (!dense_int_attr)
return absl::InvalidArgumentError(
"The element of the custom dense attribute must be an integer.");
if (mlir::cast<mlir::IntegerType>(dense_int_attr.getElementType())
.getWidth() != 32) {
return absl::InvalidArgumentError(
"The element of the custom dense attribute must be an i32 integer.");
}
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto custom_dense_ctor = bc::New<CustomDense>(&allocator);
auto shaped_type = dense_int_attr.getType();
std::vector<int64_t> shape(shaped_type.getShape().begin(),
shaped_type.getShape().end());
custom_dense_ctor.construct_shape(shape);
custom_dense_ctor.construct_data(shaped_type.getNumElements())
.Place(dense_int_attr.getRawData().data(),
dense_int_attr.getRawData().size());
return std::string(buffer.data(), buffer.size());
}
TEST(MlirToByteCodeTest, CustomDense) {
constexpr char kCustomAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"custom_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kCustomAttributesMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
attribute_encoder_registry.Register("test_custom", &EncodeCustomDense);
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto attributes = executable.attributes();
ASSERT_EQ(attributes.size(), 10);
for (int i = 0; i < 10; ++i) {
bc::String attr_data = attributes[i];
CustomDense custom_dense(attr_data.data());
EXPECT_THAT(custom_dense.shape(), ElementsAreArray({1}));
EXPECT_THAT(custom_dense.data(), ElementsAreArray({i}));
}
}
}
} |
1,178 | cpp | tensorflow/tensorflow | test_utils | third_party/xla/third_party/tsl/tsl/lib/monitoring/test_utils.cc | third_party/xla/xla/tests/test_utils_test.cc | #ifndef TENSORFLOW_TSL_LIB_MONITORING_TEST_UTILS_H_
#define TENSORFLOW_TSL_LIB_MONITORING_TEST_UTILS_H_
#include <cstdint>
#include "tsl/lib/monitoring/types.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/histogram.pb.h"
namespace tsl {
namespace monitoring {
namespace testing {
using tensorflow::HistogramProto;
class Histogram final {
public:
Histogram() = default;
explicit Histogram(const HistogramProto& histogram_proto)
: histogram_proto_(histogram_proto) {}
double num() const;
double num(size_t bucket) const;
double sum() const;
double sum_squares() const;
absl::StatusOr<Histogram> Subtract(const Histogram& other) const;
private:
HistogramProto histogram_proto_;
};
class Percentiles final {
public:
Percentiles() = default;
explicit Percentiles(const tsl::monitoring::Percentiles& percentiles)
: percentiles_(percentiles) {}
size_t num() const;
double sum() const;
Percentiles Subtract(const Percentiles& other) const;
private:
tsl::monitoring::Percentiles percentiles_;
};
}
}
}
#endif
#include "tsl/lib/monitoring/test_utils.h"
#include <cmath>
#include <cstdint>
#include "absl/algorithm/container.h"
#include "absl/strings/str_join.h"
#include "tsl/lib/monitoring/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/histogram.pb.h"
namespace tsl {
namespace monitoring {
namespace testing {
double Histogram::num() const { return histogram_proto_.num(); }
double Histogram::num(size_t bucket) const {
if (bucket >= histogram_proto_.bucket().size()) {
return 0;
}
return histogram_proto_.bucket(bucket);
}
double Histogram::sum() const { return histogram_proto_.sum(); }
double Histogram::sum_squares() const { return histogram_proto_.sum_squares(); }
absl::StatusOr<Histogram> Histogram::Subtract(const Histogram& other) const {
HistogramProto histogram_proto = histogram_proto_;
if (other.histogram_proto_.bucket_limit().empty() &&
other.histogram_proto_.bucket().empty()) {
return Histogram(histogram_proto);
}
if (!absl::c_equal(histogram_proto.bucket_limit(),
other.histogram_proto_.bucket_limit())) {
return errors::InvalidArgument(
"Subtracting a histogram with different buckets. Left: [",
absl::StrJoin(histogram_proto.bucket_limit(), ", "), "], right: [",
absl::StrJoin(other.histogram_proto_.bucket_limit(), ", "), "].");
}
histogram_proto.set_num(histogram_proto.num() - other.histogram_proto_.num());
histogram_proto.set_sum(histogram_proto.sum() - other.histogram_proto_.sum());
histogram_proto.set_sum_squares(histogram_proto.sum_squares() -
other.histogram_proto_.sum_squares());
for (size_t i = 0; i < histogram_proto.bucket().size(); ++i) {
histogram_proto.set_bucket(
i, histogram_proto.bucket(i) - other.histogram_proto_.bucket(i));
}
const bool histogram_is_valid =
histogram_proto.num() >= 0 &&
absl::c_all_of(histogram_proto.bucket(),
[](const double num) { return num >= 0; });
if (!histogram_is_valid) {
return errors::InvalidArgument(
"Failed to subtract a histogram by a larger histogram. Left operand: ",
histogram_proto.ShortDebugString(),
", right operand: ", other.histogram_proto_.ShortDebugString());
}
return Histogram(histogram_proto);
}
size_t Percentiles::num() const { return percentiles_.total_samples; }
double Percentiles::sum() const {
return std::isnan(percentiles_.accumulator) ? 0 : percentiles_.accumulator;
}
Percentiles Percentiles::Subtract(const Percentiles& other) const {
tsl::monitoring::Percentiles delta;
delta.unit_of_measure = percentiles_.unit_of_measure;
delta.total_samples = num() - other.num();
delta.accumulator = sum() - other.sum();
return Percentiles(delta);
}
}
}
} | #include "xla/tests/test_utils.h"
#include <vector>
#include "absl/base/casts.h"
#include "absl/container/flat_hash_set.h"
#include "xla/client/xla_builder.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/tests/local_client_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class TestUtilsTest : public LocalClientTestBase {};
XLA_TEST_F(TestUtilsTest, UnusedParam) {
XlaBuilder builder(TestName());
Shape single_float = ShapeUtil::MakeShape(F32, {});
Parameter(&builder, 0, single_float, "unused");
Parameter(&builder, 1, single_float, "used");
auto computation_status = builder.Build();
TF_ASSERT_OK(computation_status.status());
Shape pair_float = ShapeUtil::MakeShape(F32, {2});
Reduce(Parameter(&builder, 0, pair_float, "operand"),
Parameter(&builder, 1, single_float, "init"),
computation_status.value(), {0});
computation_status = builder.Build();
TF_ASSERT_OK(computation_status.status());
TF_ASSERT_OK_AND_ASSIGN(auto executables,
local_client_->Compile(computation_status.value(),
{&pair_float, &single_float},
ExecutableBuildOptions()));
HloModule& module =
const_cast<HloModule&>(executables[0]->executable()->module());
TF_ASSERT_OK(MakeFakeArguments(&module).status());
}
XLA_TEST_F(TestUtilsTest, MultipleIndexSpacesForDynamicSlices) {
auto module = ParseAndReturnVerifiedModule(
R"(HloModule index_space_module
ENTRY IndexSpace {
index_param.0 = s32[] parameter(0)
index_param.1 = s32[] parameter(1)
index_param.2 = s32[] parameter(2)
array_param.1 = f32[123,4,789]{0,1,2} parameter(3)
array_param.2 = f32[3,3000,5]{0,1,2} parameter(4)
dynamic-slice.1 = f32[1,2,3] dynamic-slice(array_param.1, index_param.0, index_param.1, index_param.2), dynamic_slice_sizes={1,2,3}
ROOT dynamic-slice.2 = f32[3,2,2] dynamic-slice(array_param.2, index_param.0, index_param.1, index_param.2), dynamic_slice_sizes={3,2,2}
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> args,
MakeFakeArguments(module.get()));
ASSERT_EQ(args.size(), 5);
EXPECT_GE(args[0].Get<int32_t>({}), -1);
EXPECT_LE(args[0].Get<int32_t>({}), 1);
EXPECT_GE(args[1].Get<int32_t>({}), -1);
EXPECT_LE(args[1].Get<int32_t>({}), 2);
EXPECT_GE(args[2].Get<int32_t>({}), -1);
EXPECT_LE(args[2].Get<int32_t>({}), 3);
}
XLA_TEST_F(TestUtilsTest, MultipleIndexSpacesForDynamicUpdateSlices) {
auto module = ParseAndReturnVerifiedModule(
R"(HloModule index_space_module
ENTRY IndexSpace {
index_param.0 = s32[] parameter(0)
index_param.1 = s32[] parameter(1)
index_param.2 = s32[] parameter(2)
array_param.1 = f32[123,4,789]{0,1,2} parameter(3)
array_param.2 = f32[3,3000,5]{0,1,2} parameter(4)
update_param.1 = f32[1,2,3]{0,1,2} parameter(5)
update_param.2 = f32[3,2,2]{0,1,2} parameter(6)
dynamic-update-slice.1 = f32[123,4,789] dynamic-update-slice(array_param.1, update_param.1, index_param.0, index_param.1, index_param.2)
ROOT dynamic-update-slice.2 = f32[3,3000,5] dynamic-update-slice(array_param.2, update_param.2, index_param.0, index_param.1, index_param.2)
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> args,
MakeFakeArguments(module.get()));
ASSERT_EQ(args.size(), 7);
EXPECT_GE(args[0].Get<int32_t>({}), -1);
EXPECT_LE(args[0].Get<int32_t>({}), 1);
EXPECT_GE(args[1].Get<int32_t>({}), -1);
EXPECT_LE(args[1].Get<int32_t>({}), 2);
EXPECT_GE(args[2].Get<int32_t>({}), -1);
EXPECT_LE(args[2].Get<int32_t>({}), 3);
}
XLA_TEST_F(TestUtilsTest, NoDuplicatesFloats) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule sort.148.1589
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY %sort.148.1589 (parameter.0: f32[1048576], parameter.1: s32[1048576]) -> (f32[1048576], s32[1048576]) {
%parameter.0 = f32[1048576]{0} parameter(0)
%parameter.1 = s32[1048576]{0} parameter(1)
ROOT %sort.148.1589 = (f32[1048576]{0}, s32[1048576]{0}) sort(f32[1048576]{0} %parameter.0, s32[1048576]{0} %parameter.1), dimensions={0}, to_apply=compare
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> args,
MakeFakeArguments(module.get()));
ASSERT_EQ(args.size(), 2);
const Literal& key_arg = args[0];
absl::flat_hash_set<uint32_t> key_set;
for (const float& value : key_arg.data<float>()) {
EXPECT_TRUE(key_set.insert(absl::bit_cast<uint32_t>(value)).second);
}
}
XLA_TEST_F(TestUtilsTest, NoDuplicatesInt32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule sort.148.1589
compare {
p.0.lhs = s32[] parameter(0)
p.0.rhs = s32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY %sort.148.1589 (parameter.0: s32[1048576], parameter.1: s32[1048576]) -> (s32[1048576], s32[1048576]) {
%parameter.0 = s32[1048576]{0} parameter(0)
%parameter.1 = s32[1048576]{0} parameter(1)
ROOT %sort.148.1589 = (s32[1048576]{0}, s32[1048576]{0}) sort(s32[1048576]{0} %parameter.0, s32[1048576]{0} %parameter.1), dimensions={0}, to_apply=compare
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> args,
MakeFakeArguments(module.get()));
ASSERT_EQ(args.size(), 2);
const Literal& key_arg = args[0];
absl::flat_hash_set<int32_t> key_set;
for (const int32_t& value : key_arg.data<int32_t>()) {
EXPECT_TRUE(key_set.insert(absl::bit_cast<uint32_t>(value)).second);
}
}
XLA_TEST_F(TestUtilsTest, NoDuplicatesBfloat16) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule sort, is_scheduled=true
compare {
p.0.lhs = bf16[] parameter(0)
p.0.rhs = bf16[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY %sort. (parameter.0: bf16[2,1452], parameter.1: s32[2,1452]) -> (bf16[2,1452], s32[2,1452]) {
%parameter.0 = bf16[2,1452]{1,0} parameter(0)
%parameter.1 = s32[2,1452]{1,0} parameter(1)
ROOT %sort = (bf16[2,1452]{1,0}, s32[2,1452]{1,0}) sort(bf16[2,1452]{1,0} %parameter.0, s32[2,1452]{1,0} %parameter.1), dimensions={1}, to_apply=compare
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> args,
MakeFakeArguments(module.get()));
ASSERT_EQ(args.size(), 2);
const Literal& key_arg = args[0];
absl::flat_hash_set<uint16_t> key_set;
for (const bfloat16& value : key_arg.data<bfloat16>()) {
EXPECT_TRUE(key_set.insert(absl::bit_cast<uint16_t>(value)).second);
}
}
XLA_TEST_F(TestUtilsTest, MakeFakeArgumentsR0InputToDynamicSlice) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule Test
ENTRY %module (parameter.0: s32[], parameter.1: f32[20,20]) -> f32[] {
%parameter.1 = f32[20,20]{1,0} parameter(1)
%constant.1 = s32[1]{0} constant({0})
%parameter.0 = s32[] parameter(0)
%bitcast.3 = s32[1]{0} bitcast(s32[] %parameter.0)
%concatenate.1 = s32[2]{0} concatenate(s32[1]{0} %constant.1, s32[1]{0} %bitcast.3), dimensions={0}
%dynamic-slice.2 = f32[20,1]{1,0} dynamic-slice(f32[20,20]{1,0} %parameter.1, s32[2]{0} %concatenate.1), dynamic_slice_sizes={20,1}
%bitcast.4 = f32[20]{0} bitcast(f32[20,1]{1,0} %dynamic-slice.2)
%dynamic-slice.3 = f32[1]{0} dynamic-slice(f32[20]{0} %bitcast.4, s32[1]{0} %bitcast.3), dynamic_slice_sizes={1}
ROOT %bitcast.5 = f32[] bitcast(f32[1]{0} %dynamic-slice.3)
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> args,
MakeFakeArguments(module.get()));
ASSERT_EQ(args.size(), 2);
EXPECT_TRUE(ShapeUtil::Equal(args[0].shape(), ShapeUtil::MakeShape(S32, {})))
<< ShapeUtil::HumanString(args[0].shape());
EXPECT_TRUE(
ShapeUtil::Equal(args[1].shape(), ShapeUtil::MakeShape(F32, {20, 20})))
<< ShapeUtil::HumanString(args[1].shape());
}
XLA_TEST_F(TestUtilsTest, MakeFakeArgumentsForGather) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule Test
ENTRY %module(parameter.0: f32[200,100,300], parameter.1: s32[10,2]) ->
f32[10,300] {
%parameter.0 = f32[200,100,300] parameter(0)
%parameter.1 = s32[10,2] parameter(1)
ROOT gather = f32[10,300] gather(f32[200,100,300] %parameter.0,
s32[10,2] %parameter.1),
offset_dims={1},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=1,
slice_sizes={1,1,300}
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> args,
MakeFakeArguments(module.get()));
ASSERT_EQ(args.size(), 2);
const Shape& indices_shape = args[1].shape();
EXPECT_TRUE(
ShapeUtil::Equal(indices_shape, ShapeUtil::MakeShape(S32, {10, 2})))
<< ShapeUtil::HumanString(indices_shape);
auto indices = args[1].data<int32_t>();
for (const auto index : indices) {
EXPECT_GE(index, -1);
EXPECT_LE(index, 100);
}
}
XLA_TEST_F(TestUtilsTest, MakeFakeArgumentsForGatherTupleParam) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule cluster_13361217111314620287__.11, entry_computation_layout={((s32[10]{0:T(1024)}, bf16[100,256]{1,0:T(8,128)(2,1)}))->(bf16[10,256]{1,0:T(8,128)(2,1)})}
ENTRY cluster_13361217111314620287__.11 {
constant.6 = s32[] constant(0), metadata={op_type="GatherV2" op_name="GatherV2"}
arg_tuple.1 = (s32[10]{0:T(1024)}, bf16[100,256]{1,0:T(8,128)(2,1)}) parameter(0), parameter_replication={false,true}, sharding={{maximal device=0 metadata={op_type="_TPUReplicate" op_name="cluster"}}, {maximal device=0 metadata={op_type="_TPUReplicate" op_name="cluster"}}}, metadata={op_name="XLA_Args"}
get-tuple-element.3 = bf16[100,256]{1,0:T(8,128)(2,1)} get-tuple-element(arg_tuple.1), index=1, sharding={maximal device=0 metadata={op_type="_TPUReplicate" op_name="cluster"}}, metadata={op_name="const_0_arg"}
reshape.5 = bf16[100,256]{1,0} reshape(get-tuple-element.3)
get-tuple-element.2 = s32[10]{0:T(1024)} get-tuple-element(arg_tuple.1), index=0, sharding={maximal device=0 metadata={op_type="_TPUReplicate" op_name="cluster"}}, metadata={op_name="input0_0_arg"}
reshape.4 = s32[10]{0} reshape(get-tuple-element.2)
gather.7 = bf16[10,256]{1,0} gather(reshape.5, reshape.4), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,256}, metadata={op_type="GatherV2" op_name="GatherV2"}
reshape.8 = bf16[10,256]{1,0:T(8,128)(2,1)} reshape(gather.7), metadata={op_name="XLA_Retvals"}
copy.9 = bf16[10,256]{1,0:T(8,128)(2,1)} copy(reshape.8), sharding={maximal device=0 metadata={op_type="_TPUReplicate" op_name="cluster"}}, metadata={op_name="XLA_Retvals"}
ROOT tuple.10 = (bf16[10,256]{1,0:T(8,128)(2,1)}) tuple(copy.9), sharding={{maximal device=0 metadata={op_type="_TPUReplicate" op_name="cluster"}}}, metadata={op_name="XLA_Retvals"}
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> args,
MakeFakeArguments(module.get(), true,
true,
true));
ASSERT_EQ(args.size(), 1);
const Shape& indices_shape = args[0].shape().tuple_shapes()[0];
EXPECT_TRUE(ShapeUtil::Equal(indices_shape, ShapeUtil::MakeShape(S32, {10})))
<< ShapeUtil::HumanString(indices_shape);
const std::vector<Literal> results = args[0].DecomposeTuple();
auto indices = results[0].data<int32_t>();
for (const auto index : indices) {
EXPECT_GE(index, -1);
EXPECT_LE(index, 100);
}
}
XLA_TEST_F(TestUtilsTest, MakeFakeArgumentsForScatter) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule Test
scatter_update (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
ROOT rhs = f32[] parameter(1)
}
ENTRY main {
operand = f32[200,100,300] parameter(0)
indices = s32[10,2] parameter(1)
updates = f32[10,300] parameter(2)
ROOT scatter = f32[200,100,300] scatter(operand, indices, updates),
to_apply=scatter_update,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> args,
MakeFakeArguments(module.get()));
ASSERT_EQ(args.size(), 3);
const Shape& indices_shape = args[1].shape();
EXPECT_TRUE(
ShapeUtil::Equal(indices_shape, ShapeUtil::MakeShape(S32, {10, 2})))
<< ShapeUtil::HumanString(indices_shape);
auto indices = args[1].data<int32_t>();
for (const auto index : indices) {
EXPECT_GE(index, -1);
EXPECT_LE(index, 100);
}
}
}
} |
1,179 | cpp | tensorflow/tensorflow | tfrt_fallback_util | tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.cc | tensorflow/compiler/mlir/tfrt/tests/ir/tfrt_fallback_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TFRT_IR_TFRT_FALLBACK_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TFRT_IR_TFRT_FALLBACK_UTIL_H_
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
namespace tfrt {
namespace fallback_async {
bool IsArgConsumedByFallback(mlir::func::FuncOp func, int arg_index);
void ForEachArgConsumedByFallback(
mlir::func::FuncOp func, llvm::function_ref<void(int arg_index)> action);
void ForEachArgConsumedByFallback(
mlir::ModuleOp module,
llvm::function_ref<void(llvm::StringRef func_name, int arg_index)> action);
}
}
#endif
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.h"
namespace tfrt {
namespace fallback_async {
bool IsArgConsumedByFallback(mlir::func::FuncOp func, int arg_index) {
auto arg = func.getArgument(arg_index);
for (mlir::Operation *user : arg.getUsers()) {
if (llvm::isa<FallbackAsyncDialect>(user->getDialect())) return true;
}
return false;
}
void ForEachArgConsumedByFallback(
mlir::func::FuncOp func, llvm::function_ref<void(int arg_index)> action) {
for (int arg_index = 0; arg_index < func.getNumArguments(); ++arg_index) {
if (IsArgConsumedByFallback(func, arg_index)) action(arg_index);
}
}
void ForEachArgConsumedByFallback(
mlir::ModuleOp module,
llvm::function_ref<void(llvm::StringRef func_name, int arg_index)> action) {
for (auto func : module.getOps<mlir::func::FuncOp>()) {
ForEachArgConsumedByFallback(
func, [func_name = func.getName(), action](int arg_index) {
action(func_name, arg_index);
});
}
}
}
} | #include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.h"
#include <string>
#include <utility>
#include <vector>
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tfrt/init_tfrt_dialects.h"
namespace tfrt {
namespace fallback_async {
namespace {
TEST(SavedModelTest, MapFallbackArgs) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/ir/testdata/test.mlir");
mlir::DialectRegistry registry;
RegisterTFRTDialects(registry);
registry.insert<tfrt::fallback_async::FallbackAsyncDialect>();
registry.insert<tfrt::fallback_sync::FallbackSyncDialect>();
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
std::vector<std::pair<std::string, int>> func_and_index;
ForEachArgConsumedByFallback(
module.get(),
[&func_and_index](llvm::StringRef func_name, int arg_index) {
func_and_index.push_back({func_name.str(), arg_index});
});
ASSERT_EQ(func_and_index.size(), 1);
EXPECT_EQ(func_and_index[0].first, "test");
EXPECT_EQ(func_and_index[0].second, 2);
}
}
}
} |
1,180 | cpp | tensorflow/tensorflow | saved_model | tensorflow/core/tfrt/saved_model/saved_model.cc | tensorflow/core/tfrt/saved_model/tests/saved_model_test.cc | #ifndef TENSORFLOW_CORE_TFRT_SAVED_MODEL_SAVED_MODEL_H_
#define TENSORFLOW_CORE_TFRT_SAVED_MODEL_SAVED_MODEL_H_
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_util.h"
#include "tsl/platform/protobuf.h"
#include "tfrt/host_context/function.h"
#include "tfrt/host_context/request_deadline_tracker.h"
#include "tfrt/host_context/resource_context.h"
namespace tfrt {
class BEFFile;
class HostContext;
}
namespace tensorflow {
namespace tfrt_stub {
class FunctionMetadata {
public:
explicit FunctionMetadata(const internal::Signature* signature)
: signature_(signature) {
assert(signature);
}
const std::vector<std::string>& GetInputNames() const {
return signature_->input_names;
}
const std::vector<TensorSpec>& GetInputSpecs() const {
return signature_->input_specs;
}
const std::vector<std::string>& GetOutputNames() const {
return signature_->output_names;
}
const std::vector<TensorSpec>& GetOutputSpecs() const {
return signature_->output_specs;
}
const protobuf::Map<std::string, TensorProto>& GetDefaultInputs() const {
return signature_->default_inputs;
}
private:
friend class SavedModelImpl;
const internal::Signature* signature_ = nullptr;
};
class SavedModel {
public:
struct Options {
explicit Options(const Runtime* rt) : graph_execution_options(rt) {}
bool enable_lazy_loading = false;
bool maybe_load_from_mla = false;
bool lazy_loading_use_graph_executor = false;
bool aot_generation = false;
GraphExecutionOptions graph_execution_options;
};
using RunOptions = GraphExecutionRunOptions;
explicit SavedModel(const Runtime* runtime) : options_(runtime) {
DCHECK(runtime);
}
explicit SavedModel(Options options,
std::unique_ptr<GraphExecutor> graph_executor)
: options_(std::move(options)),
graph_executor_(std::move(graph_executor)) {}
virtual ~SavedModel();
const SessionMetadata& model_metadata() const {
return options_.graph_execution_options.model_metadata;
}
const Runtime& runtime() const {
DCHECK(options_.graph_execution_options.runtime);
return *options_.graph_execution_options.runtime;
}
tfrt::HostContext* GetHostContext() const;
GraphExecutor& graph_executor() const { return *graph_executor_; }
virtual const tensorflow::MetaGraphDef& GetMetaGraphDef() const = 0;
virtual std::vector<std::string> GetFunctionNames() const = 0;
virtual std::optional<FunctionMetadata> GetFunctionMetadata(
absl::string_view func_name) const = 0;
virtual tensorflow::Status Run(const RunOptions& run_options,
absl::string_view name,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs) = 0;
virtual tensorflow::Status RunMultipleSignatures(
const RunOptions& run_options, absl::Span<const std::string> names,
absl::Span<const std::vector<tensorflow::Tensor>> multi_inputs,
std::vector<std::vector<tensorflow::Tensor>>* multi_outputs) = 0;
virtual tensorflow::Status RunByTensorNames(
const RunOptions& run_options,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_node_names,
std::vector<tensorflow::Tensor>* outputs) = 0;
protected:
const FallbackState& fallback_state() const {
return graph_executor_->fallback_state();
}
FallbackState& fallback_state() { return graph_executor_->fallback_state(); }
const Options options_;
std::unique_ptr<GraphExecutor> graph_executor_;
};
using SignatureMap = absl::flat_hash_map<std::string, internal::Signature>;
using ::tensorflow::StatusOr;
class SavedModelImpl final : public SavedModel {
public:
struct JoinedSignature;
static absl::StatusOr<std::unique_ptr<SavedModel>> LoadSavedModel(
Options options, absl::string_view saved_model_dir,
const std::unordered_set<std::string>& tags);
static absl::StatusOr<std::unique_ptr<SavedModel>> LoadSavedModel(
Options options, tensorflow::MetaGraphDef meta_graph_def,
absl::string_view saved_model_dir);
SavedModelImpl(
Options options, SymbolUids symbol_uids,
tensorflow::MetaGraphDef meta_graph_def, tfrt::BefBuffer bef,
tfrt::RCReference<tfrt::BEFFile> bef_file, mlrt::bc::Buffer bytecode,
std::optional<mlrt::LoadedExecutable> loaded_executable,
absl::flat_hash_map<std::string, internal::Signature> signatures,
std::unique_ptr<OpKernelRunnerTable> runner_table,
std::unique_ptr<tfd::FallbackResourceArray> resource_array,
std::unique_ptr<GraphExecutor> graph_executor);
~SavedModelImpl() override = default;
SavedModelImpl(const SavedModelImpl&) = delete;
SavedModelImpl& operator=(const SavedModelImpl&) = delete;
const tensorflow::MetaGraphDef& GetMetaGraphDef() const override;
std::vector<std::string> GetFunctionNames() const override;
std::optional<FunctionMetadata> GetFunctionMetadata(
absl::string_view func_name) const override;
tensorflow::Status Run(const RunOptions& run_options, absl::string_view name,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs) override;
tensorflow::Status RunMultipleSignatures(
const RunOptions& run_options, absl::Span<const std::string> names,
absl::Span<const std::vector<tensorflow::Tensor>> multi_inputs,
std::vector<std::vector<tensorflow::Tensor>>* multi_outputs) override;
tensorflow::Status RunByTensorNames(
const RunOptions& run_options,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_node_names,
std::vector<tensorflow::Tensor>* outputs) override;
private:
struct LoadingResult {
std::string name;
SymbolUids symbol_uids;
mlrt::bc::Buffer bytecode_buffer;
std::unique_ptr<mlrt::LoadedExecutable> bytecode_executable;
tfrt::BefBuffer bef;
tfrt::RCReference<tfrt::BEFFile> bef_file;
std::unique_ptr<OpKernelRunnerTable> runner_table;
std::unique_ptr<tfd::FallbackResourceArray> resource_array;
std::unique_ptr<tfrt::ResourceContext> resource_context;
};
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ImportSubgraph(
mlir::MLIRContext* context, absl::string_view name,
const tensorflow::GraphImportConfig::InputArrays& input_nodes,
const std::vector<std::string>& output_nodes,
const std::vector<std::string>& target_nodes);
absl::StatusOr<std::reference_wrapper<const SavedModelImpl::LoadingResult>>
LoadJoinedSignature(const JoinedSignature& joined_signature)
TF_EXCLUSIVE_LOCKS_REQUIRED(loading_result_cache_mu_);
absl::StatusOr<std::reference_wrapper<const SavedModelImpl::LoadingResult>>
GetOrCreateLoadingResult(const RunOptions& run_options,
absl::Span<const std::string> names)
TF_LOCKS_EXCLUDED(loading_result_cache_mu_);
SymbolUids symbol_uids_;
tensorflow::MetaGraphDef meta_graph_def_;
tfrt::BefBuffer bef_;
tfrt::RCReference<tfrt::BEFFile> bef_file_;
mlrt::bc::Buffer bytecode_;
std::optional<mlrt::LoadedExecutable> loaded_executable_;
tfrt::RequestDeadlineTracker req_deadline_tracker_;
absl::flat_hash_map<std::string, internal::Signature> signatures_;
std::unique_ptr<OpKernelRunnerTable> runner_table_;
std::unique_ptr<tfd::FallbackResourceArray> resource_array_;
tensorflow::mutex loading_result_cache_mu_;
absl::flat_hash_map<std::string ,
std::unique_ptr<LoadingResult>>
loading_result_cache_ TF_GUARDED_BY(loading_result_cache_mu_);
};
class SavedModelMiraImpl;
}
}
namespace tfrt {
using SavedModel = ::tensorflow::tfrt_stub::SavedModel;
using SavedModelImpl = ::tensorflow::tfrt_stub::SavedModelImpl;
using SavedModelMiraImpl = ::tensorflow::tfrt_stub::SavedModelMiraImpl;
using TensorSpec = ::tensorflow::tfrt_stub::TensorSpec;
using FunctionMetadata = ::tensorflow::tfrt_stub::FunctionMetadata;
namespace internal {
using Signature = ::tensorflow::tfrt_stub::internal::Signature;
}
}
#endif
#include "tensorflow/core/tfrt/saved_model/saved_model.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/compiler/mlir/tfrt/saved_model/saved_model.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/mlrt/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/graph_executor/export_mlir.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/batch_kernel.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_util.h"
#include "tensorflow/core/tfrt/saved_model/utils/serialize_utils.h"
#include "tensorflow/core/tfrt/stubs/model_config_stub.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tfrt/bef/bef_buffer.h"
#include "tfrt/bef_executor/bef_file.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/function.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/kernel_registry.h"
#include "tfrt/host_context/request_deadline_tracker.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/metrics/common_metrics.h"
#include "tfrt/support/ref_count.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr absl::string_view kSignatureJoiningDelimiter = "+";
auto* lazy_loading_count = monitoring::Counter<3>::New(
"/tensorflow/tfrt/lazy_loading_count", "The total number of lazy loadings.",
"model_name", "model_version", "use_graph_executor");
auto* saved_model_import_time_seconds =
tensorflow::monitoring::Gauge<int64_t, 1>::New(
"/tensorflow/tfrt/saved_model/import_time",
"Record the MLIR import time for the savedmodel.", "model_name");
auto* saved_model_compile_time_seconds =
tensorflow::monitoring::Gauge<int64_t, 1>::New(
"/tensorflow/tfrt/saved_model/compile_time",
"Record the compilation time for the savedmodel.", "model_name");
auto* saved_model_init_time_seconds =
tensorflow::monitoring::Gauge<int64_t, 1>::New(
"/tensorflow/tfrt/saved_model/init_time",
"Record the initialization time for the savedmodel.", "model_name");
auto* saved_model_input_spec_validation_failure =
tensorflow::monitoring::Gauge<bool, 1>::New(
"/tensorflow/tfrt/saved_model/input_spec_validation_failure",
"Record the models that failed input spec validation.", "model_name");
tensorflow::Status RunBytecodeInitializers(
const GraphExecutionOptions& options,
const InitializersAndSignatures& initializers_and_signatures,
const mlrt::LoadedExecutable& loaded_executable,
tfrt::ResourceContext* resource_context, OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array, FallbackState& fallback_state,
const bool provide_inputs) {
TF_ASSIGN_OR_RETURN(
auto request_info,
CreateRequestInfo(options, {},
options.runtime->work_queue(), resource_context,
nullptr, runner_table,
resource_array, fallback_state,
fallback_state.process_function_library_runtime()));
std::vector<tensorflow::Tensor> outputs;
if (auto function = loaded_executable.GetFunction("_tfrt_fallback_init")) {
TF_RETURN_IF_ERROR(RunMlrtFunction(
function, loaded_executable, request_info->tfrt_request_context,
*request_info->request_queue, {}, &outputs,
nullptr));
}
for (const auto& p : initializers_and_signatures.initializers) {
const auto& initializer_name = p.name;
std::vector<tensorflow::Tensor> outputs;
const std::vector<tensorflow::Tensor> empty_inputs;
const std::vector<tensorflow::Tensor>& initializer_inputs =
provide_inputs ? p.inputs : empty_inputs;
TF_RETURN_IF_ERROR(GraphExecutionRunOnFunction(
options, {}, initializer_name, {},
nullptr, &loaded_executable, initializer_inputs, &outputs,
resource_context,
nullptr, runner_table, resource_array,
*options.runtime, fallback_state,
fallback_state.process_function_library_runtime(),
nullptr,
std::nullopt));
DCHECK(outputs.empty());
}
if (auto function = loaded_executable.GetFunction("_tfrt_resource_init")) {
TF_RETURN_IF_ERROR(RunMlrtFunction(
function, loaded_executable, request_info->tfrt_request_context,
*request_info->request_queue, {}, &outputs,
nullptr));
}
return absl::OkStatus();
}
tensorflow::Status RunBefInitializers(
const GraphExecutionOptions& options,
const InitializersAndSignatures& initializers_and_signatures,
tfrt::BEFFile* bef_file, tfrt::ResourceContext* resource_context,
OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array, FallbackState& fallback_state,
const bool provide_inputs) {
DCHECK(options.runtime);
TF_ASSIGN_OR_RETURN(
auto request_info,
CreateRequestInfo(options, {},
options.runtime->work_queue(), resource_context,
nullptr, runner_table,
resource_array, fallback_state,
fallback_state.process_function_library_runtime()));
tfrt::ExecutionContext exec_ctx(request_info->tfrt_request_context);
TF_RETURN_IF_ERROR(
RunRuntimeInitializer(exec_ctx, bef_file, "_tfrt_fallback_init"));
for (const auto& p : initializers_and_signatures.initializers) {
const auto& initializer_name = p.name;
auto* func = bef_file->GetFunction(initializer_name);
DCHECK(func);
std::vector<tensorflow::Tensor> outputs;
const std::vector<tensorflow::Tensor> empty_inputs;
const std::vector<tensorflow::Tensor>& initializer_inputs =
provide_inputs ? p.inputs : empty_inputs;
TF_RETURN_IF_ERROR(GraphExecutionRunOnFunction(
options, {}, initializer_name, {}, func,
nullptr, initializer_inputs, &outputs,
resource_context,
nullptr, runner_table, resource_array,
*options.runtime, fallback_state,
fallback_state.process_function_library_runtime(),
nullptr,
std::nullopt));
DCHECK(outputs.empty());
}
TF_RETURN_IF_ERROR(
RunRuntimeInitializer(exec_ctx, bef_file, "_tfrt_resource_init"));
return absl::OkStatus();
}
tensorflow::Status IsInputSpecsCorrect(
absl::string_view name, const internal::Signature& signature,
absl::Span<const tensorflow::Tensor> inputs) {
TF_RET_CHECK(signature.input_specs.size() == inputs.size())
<< "signature " << name
<< " input size is wrong, expected: " << signature.input_specs.size()
<< ", actual: " << inputs.size();
for (size_t i = 0; i < inputs.size(); ++i) {
const auto& expected_input_spec = signature.input_specs[i];
TF_RET_CHECK(expected_input_spec.dtype == inputs[i].dtype())
<< "signature " << name
<< " input dtype is wrong, expected: " << expected_input_spec.dtype
<< ", actual: " << inputs[i].dtype();
TF_RET_CHECK(expected_input_spec.shape.IsCompatibleWith(inputs[i].shape()))
<< "signature " << name
<< " input shape is wrong, expected : " << expected_input_spec.shape
<< ", actual: " << inputs[i].shape();
}
return absl::OkStatus();
}
tensorflow::Status CheckInputSpecs(
const tensorflow::SessionMetadata& model_metadata,
const SavedModel::RunOptions& run_options, absl::string_view signature_name,
const internal::Signature& signature,
absl::Span<const tensorflow::Tensor> input_tensors) {
if (!run_options.validate_input_specs &&
!run_options.validate_input_specs_dry_run) {
return absl::OkStatus();
}
auto status = IsInputSpecsCorrect(signature_name, signature, input_tensors);
if (!status.ok()) {
saved_model_input_spec_validation_failure
->GetCell(
absl::StrCat(model_metadata.name(), ":", model_metadata.version()))
->Set(true);
const auto error_string = absl::StrCat(
"model: ", model_metadata.name(),
", version: ", model_metadata.version(), ", error: ", status.message());
if (!run_options.validate_input_specs_dry_run) {
return tensorflow::errors::InvalidArgument(error_string);
}
LOG_EVERY_N_SEC(WARNING, 5)
<< "TFRT input specs validation failed, " << error_string;
}
return absl::OkStatus();
}
tensorflow::Status PreprocessSignature(
const tensorflow::SessionMetadata& model_metadata,
const SavedModel::RunOptions& run_options, absl::string_view signature_name,
const tensorflow::SignatureDef& signature_def,
const internal::Signature& signature,
absl::Span<const tensorflow::Tensor> input_tensors,
absl::flat_hash_set<std::string>* visited_feed_tensor_names,
std::vector<std::pair<std::string, tensorflow::Tensor>>& inputs,
std::vector<std::string>& output_tensor_names) {
const auto& input_names = signature.input_names;
TF_RETURN_IF_ERROR(CheckInputSpecs(model_metadata, run_options,
signature_name, signature, input_tensors));
TF_RET_CHECK(input_tensors.size() == signature_def.inputs().size())
<< "Incorrect input size for signature: " << signature_name
<< ": expected " << signature_def.inputs().size() << ", but got "
<< input_tensors.size();
DCHECK_EQ(input_names.size(), signature_def.inputs().size());
for (int i = 0; i < input_tensors.size(); ++i) {
const auto& tensor_info = signature_def.inputs().at(input_names[i]);
TF_RET_CHECK(tensor_info.encoding_case() == tensorflow::TensorInfo::kName)
<< "Only dense tensor is supported, but got encoding case "
<< tensor_info.encoding_case();
const auto& tensor_name = tensor_info.name();
if (visited_feed_tensor_names &&
!visited_feed_tensor_names->insert(tensor_name).second)
continue;
inputs.push_back(std::make_pair(tensor_name, input_tensors[i]));
}
for (const auto& output_key : signature.output_names) {
const auto& tensor_info = signature_def.outputs().at(output_key);
VLOG(1) << "Importing Signature Output: output_key = " << output_key
<< ", tensor_info = " << tensor_info.DebugString();
TF_RET_CHECK(tensor_info.encoding_case() == tensorflow::TensorInfo::kName)
<< "Only dense tensor is supported, but got encoding case "
<< tensor_info.encoding_case();
output_tensor_names.push_back(tensor_info.name());
}
return absl::OkStatus();
}
bool AotPackageExists(absl::string_view saved_model_dir) {
Env* env = Env::Default();
const std::string aot_package_path = GetAotPackagePath(saved_model_dir);
const std::string aot_mlir_path = GetMlirFilePath(aot_package_path);
const std::string aot_bef_path = GetBefFilePath(aot_package_path);
return env->FileExists(aot_package_path).ok() &&
env->FileExists(aot_mlir_path).ok() &&
env->FileExists(aot_bef_path).ok();
}
}
SavedModel::~SavedModel() = default;
tfrt::HostContext* SavedModel::GetHostContext() const {
return runtime().core_runtime()->GetHostContext();
}
namespace {
void GetSignaturesFromSignatureDef(
SignatureMap& signatures,
const google::protobuf::Map<std::string, tensorflow::SignatureDef>& signature_defs,
const SavedModel::Options& options) {
for (const auto& p : signature_defs) {
const std::string& signature_name = p.first;
const tensorflow::SignatureDef& signature_def = p.second;
DCHECK(signatures.find(signature_name) == signatures.end());
auto& signature = signatures[signature_name];
signature.input_names.reserve(signature_def.inputs().size());
signature.input_specs.reserve(signature_def.inputs().size());
for (const auto& p : signature_def.inputs()) {
const std::string& input_tensor_name = p.first;
const tensorflow::TensorInfo& tensor_info = p.second;
signature.input_names.push_back(input_tensor_name);
signature.input_specs.push_back(
TensorSpec(tensor_info.dtype(), tensor_info.tensor_shape()));
}
signature.input_devices = std::vector<std::string>(
signature_def.inputs().size(),
options.graph_execution_options.compile_options.default_device);
signature.output_names.reserve(signature_def.outputs().size());
signature.output_specs.reserve(signature_def.outputs().size());
for (const auto& p : signature_def.outputs()) {
const std::string& output_tensor_name = p.first;
const tensorflow::TensorInfo& tensor_info = p.second;
signature.output_names.push_back(output_tensor_name);
signature.output_specs.push_back(
TensorSpec(tensor_info.dtype(), tensor_info.tensor_shape()));
}
}
}
void GetDefaultInputValue(
const google::protobuf::Map<std::string, tensorflow::SignatureDef>& signature_defs,
ModelRuntimeContext& context, SignatureMap& signatures) {
bool load_from_signature_def = false;
for (const auto& [name, signature_def] : signature_defs) {
auto itr = signatures.find(name);
if (itr == signatures.end()) {
continue;
}
LOG(INFO) << "Model signature identified for default inputs";
if (signature_def.defaults().empty()) continue;
LOG(INFO) << "Loading default inputs for signature: " << name
<< " from Signature def";
load_from_signature_def = true;
signatures[name].default_inputs = signature_def.defaults();
}
if (load_from_signature_def) return;
GetDefaultInputsFromModelConfig(context, signatures);
}
void UpdateCompileOptions(SavedModel::Options& options) {
if (options.graph_execution_options.enable_tfrt_gpu) {
options.graph_execution_options.compile_options.decompose_resource_ops =
false;
}
options.graph_execution_options.compile_options
.fuse_get_resource_ops_in_hoist | #include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(SavedModelTest, BasicError) {
std::string saved_model_dir = tensorflow::GetDataDependencyFilepath(
"tensorflow/core/runtime_fallback/test/saved_model/basic_v1");
TFRTSavedModelTest test(saved_model_dir);
std::vector<tensorflow::Tensor> inputs;
inputs.push_back(
CreateTfTensor<int32_t>({1, 3}, {1, 1, 1}));
std::vector<tensorflow::Tensor> outputs;
EXPECT_FALSE(
test.GetSavedModel()->Run({}, "serving_default", inputs, &outputs).ok());
}
}
}
} |
1,181 | cpp | tensorflow/tensorflow | update_op_cost_in_tfrt_mlir | tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.cc | tensorflow/compiler/mlir/tfrt/tests/analysis/update_op_cost_in_tfrt_mlir_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_UPDATE_OP_COST_IN_TFRT_MLIR_H_
#define TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_UPDATE_OP_COST_IN_TFRT_MLIR_H_
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
namespace tensorflow {
namespace tfrt_compiler {
void UpdateOpCostInTfrtMlir(mlir::ModuleOp op,
const tfrt_stub::CostRecorder& cost_recorder);
}
}
#endif
#include "tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.h"
#include "mlir/IR/Builders.h"
#include "tensorflow/compiler/mlir/tfrt/analysis/cost_analysis.h"
namespace tensorflow {
namespace tfrt_compiler {
constexpr char kCostAttrName[] = "_tfrt_cost";
constexpr char kOpKeyAttrName[] = "op_key";
void UpdateOpCostInTfrtMlir(mlir::ModuleOp op,
const tfrt_stub::CostRecorder& cost_recorder) {
mlir::Builder builder(op);
op.walk([&](mlir::Operation* op) {
if (HasCostFunctionRegistered(op->getName().getStringRef())) return;
const auto cost_attr = op->getAttrOfType<mlir::IntegerAttr>(kCostAttrName);
if (!cost_attr) return;
const auto op_key_attr =
op->getAttrOfType<mlir::IntegerAttr>(kOpKeyAttrName);
if (!op_key_attr) return;
const int64_t op_key = op_key_attr.getInt();
op->setAttr(kCostAttrName, builder.getI64IntegerAttr(
cost_recorder.GetCost(op_key)));
});
}
}
} | #include "tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.h"
#include <cstdint>
#include <cstdlib>
#include <string>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tfrt/init_tfrt_dialects.h"
namespace tensorflow {
namespace {
constexpr char kCostAttrName[] = "_tfrt_cost";
constexpr char kOpKeyAttrName[] = "op_key";
absl::flat_hash_map<int64_t, uint64_t> GetOpCostMap(mlir::ModuleOp op) {
absl::flat_hash_map<int64_t, uint64_t> op_cost_map;
op.walk([&](mlir::Operation* op) {
const auto cost_attr = op->getAttrOfType<mlir::IntegerAttr>(kCostAttrName);
if (!cost_attr) return;
const auto op_key_attr =
op->getAttrOfType<mlir::IntegerAttr>(kOpKeyAttrName);
if (!op_key_attr) return;
op_cost_map[op_key_attr.getInt()] = cost_attr.getInt();
});
return op_cost_map;
}
TEST(CostUpdateTest, Basic) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/analysis/testdata/test.mlir");
mlir::DialectRegistry registry;
tfrt::RegisterTFRTDialects(registry);
registry.insert<tfrt::fallback_async::FallbackAsyncDialect>();
registry.insert<tfrt::fallback_sync::FallbackSyncDialect>();
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
auto expected_op_cost_map = GetOpCostMap(module.get());
EXPECT_EQ(expected_op_cost_map.size(), 1);
unsigned int seed = 23579;
for (auto& [op_key, cost] : expected_op_cost_map) {
cost = rand_r(&seed) % 1000;
}
tensorflow::tfrt_stub::CostRecorder cost_recorder;
for (const auto& [op_key, cost] : expected_op_cost_map) {
cost_recorder.RecordCost(op_key, cost);
}
tfrt_compiler::UpdateOpCostInTfrtMlir(module.get(), cost_recorder);
const auto got_op_cost_map = GetOpCostMap(module.get());
EXPECT_THAT(got_op_cost_map, ::testing::ContainerEq(expected_op_cost_map));
}
}
} |
1,182 | cpp | tensorflow/tensorflow | ifrt_backend_compiler | tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler.cc | tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_IFRT_IFRT_BACKEND_COMPILER_H_
#define TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_IFRT_IFRT_BACKEND_COMPILER_H_
#include "absl/status/status.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/compiler/mlir/tfrt/backend_compiler.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/tpu_passes.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
namespace tensorflow {
namespace ifrt_serving {
class IfrtBackendCompiler : public tensorflow::BackendCompiler {
public:
explicit IfrtBackendCompiler(TpuCompiler* tpu_compiler = nullptr)
: tpu_compiler_(tpu_compiler) {}
void GetDependentDialects(mlir::DialectRegistry& registry) const override {
if (tpu_compiler_) {
tpu_compiler_->RegisterTPUDialects(®istry);
}
}
absl::Status CompileTensorflow(
tensorflow::tfrt_stub::ModelRuntimeContext& model_context,
mlir::ModuleOp module) const override;
private:
TpuCompiler* tpu_compiler_;
};
}
}
#endif
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/Verifier.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/visitor.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf_ifrt_passes.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/tpu_passes.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
absl::StatusOr<std::vector<ServingExecutableRegistry::Handle>>
CompileAndRegisterIfrtPrograms(absl::string_view model_name,
mlir::ModuleOp module,
IfrtModelContext& ifrt_model_context) {
std::vector<ServingExecutableRegistry::Handle> handles;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
int64_t program_id;
if (auto attr = func->getAttrOfType<mlir::IntegerAttr>(
"tfrt_ifrt_serving.program_id")) {
program_id = attr.getInt();
} else {
continue;
}
mlir::StatusScopedDiagnosticHandler diag_handler(module->getContext());
auto entry_function_name = func.getSymName();
auto submodule = mlir::TF::CreatePrunedModule(module, entry_function_name);
if (mlir::failed(submodule)) {
return diag_handler.ConsumeStatus();
}
submodule->get()->removeAttr("tf_saved_model.semantics");
submodule->get().walk([&](mlir::func::FuncOp func) {
if (func.getSymName() == entry_function_name) {
func.setName("main");
func.setSymName("main");
func.setPublic();
}
});
TF_ASSIGN_OR_RETURN(
auto executable,
IfrtServingExecutable::Create(
program_id, model_name, entry_function_name.str(),
*std::move(submodule), ifrt_model_context.GetClient(),
&ifrt_model_context.GetThreadPool(),
&ifrt_model_context.GetLoadedVariableRegistry(),
&ifrt_model_context.GetRestoreTensorRegistry(),
ifrt_model_context.checkpoint_loader_queue(),
ifrt_model_context.GetDeviceMgr(),
ifrt_model_context.GetShapeRepresentationFn(),
ifrt_model_context.GetIfrtServingCoreSelector(),
ifrt_model_context.GetCompilationEnvironmentProto()));
TF_ASSIGN_OR_RETURN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handles.push_back(std::move(handle));
}
return handles;
}
absl::Status CompileTensorflowForIfrtServing(
absl::string_view model_name, IfrtModelContext& ifrt_model_context,
mlir::ModuleOp module) {
tsl::profiler::TraceMe trace_me("CompileTensorflowForIfrtServing");
mlir::Builder builder(module.getContext());
TF_RETURN_IF_ERROR(
RunClusterToIfrtRuntimeOpsPassPipeline(module, model_name));
TF_ASSIGN_OR_RETURN(
auto handles,
CompileAndRegisterIfrtPrograms(model_name, module, ifrt_model_context));
for (auto& handle : handles) {
ifrt_model_context.RegisterHandle(std::move(handle));
}
return absl::OkStatus();
}
}
absl::Status IfrtBackendCompiler::CompileTensorflow(
tensorflow::tfrt_stub::ModelRuntimeContext& model_context,
mlir::ModuleOp module) const {
auto ifrt_model_context =
model_context.resource_context().GetResource<IfrtModelContext>(
kIfrtModelContextName);
if (!ifrt_model_context.has_value()) {
return absl::InternalError(
"Failed to find model context for ifrt serving.");
}
mlir::StatusScopedDiagnosticHandler diag_handler(module->getContext());
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_tpu_bct_conversion_before", module);
}
if (tpu_compiler_ != nullptr) {
if (mlir::failed(
tpu_compiler_->RunTPUBackwardCompatConversion(module, {}))) {
return diag_handler.Combine(
absl::InternalError("Failed to handle legacy TPU Ops"));
}
}
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_tpu_bct_conversion_after", module);
}
TF_RETURN_IF_ERROR(tensorflow::tf2xla::v2::RunFunctionTf2xlaClusteringBridge(
module, true,
false));
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("before_ifrt_outlining", module);
}
TF_RETURN_IF_ERROR(CompileTensorflowForIfrtServing(
model_context.name(), **ifrt_model_context, module));
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("after_ifrt_outlining", module);
}
llvm::SmallVector<mlir::func::FuncOp> to_erase;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
if (func->getAttr("tfrt_ifrt_serving.program_id")) {
to_erase.push_back(func);
}
}
for (auto func : to_erase) {
func->erase();
}
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("after_ifrt_program_removal", module);
}
if (mlir::failed(mlir::verify(module))) {
return diag_handler.ConsumeStatus();
}
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static tsl::thread::ThreadPool* thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
TEST(IfrtBackendCompilerTest, Basic) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/ifrt_cluster.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::unique_ptr<tensorflow::tfrt_stub::Runtime> runtime =
tensorflow::tfrt_stub::DefaultTfrtRuntime(1);
tensorflow::tfrt_stub::GraphExecutionOptions graph_execution_options(
runtime.get());
tfrt::ResourceContext resource_context;
tensorflow::tfrt_stub::ModelRuntimeContext runtime_context(
&graph_execution_options, "", &resource_context);
tsl::test_util::MockServingDeviceSelector mock_serving_device_selector;
IfrtServingCoreSelector core_selector(&mock_serving_device_selector,
client->addressable_device_count());
runtime_context.resource_context().CreateResource<IfrtModelContext>(
"IfrtModelContext", client, &core_selector, &GetThreadPool(),
nullptr);
IfrtBackendCompiler compiler;
TF_ASSERT_OK(compiler.CompileTensorflow(runtime_context, mlir_module.get()));
}
}
}
} |
1,183 | cpp | tensorflow/tensorflow | tf2hlo | tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.cc | tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_IFRT_TF2HLO_H_
#define TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_IFRT_TF2HLO_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/python/ifrt/client.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
namespace tensorflow {
namespace ifrt_serving {
struct Tf2HloResult {
mlir::OwningOpRef<mlir::ModuleOp> mlir_hlo_module;
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
tf2xla::HostComputeMetadata host_compute_metadata;
};
absl::Status UpdateCompileMetadata(
tensorflow::tpu::TPUCompileMetadataProto& metadata,
absl::Span<const DtypeAndShape> inputs);
absl::StatusOr<tensorflow::tpu::TPUCompileMetadataProto> GetCompileMetadata(
mlir::ModuleOp module, const xla::ifrt::Client& ifrt_client);
absl::StatusOr<Tf2HloResult> CompileTfToHlo(
mlir::ModuleOp module, absl::Span<const DtypeAndShape> inputs,
absl::string_view entry_function_name, const xla::ifrt::Client& ifrt_client,
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn);
}
}
#endif
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_constants.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/python/ifrt/client.h"
#include "xla/service/computation_placer.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/translate/hlo_to_mhlo/hlo_to_mlir_hlo.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
static constexpr absl::string_view kEntryFuncName = "main";
}
absl::Status UpdateCompileMetadata(
tensorflow::tpu::TPUCompileMetadataProto& metadata,
absl::Span<const DtypeAndShape> inputs) {
VLOG(3) << "TpuCompileMetadata before shape is populated " << metadata;
if (metadata.num_replicas() < 1 || metadata.num_cores_per_replica() < 1) {
return absl::InternalError(
absl::StrCat("Number of replicas ", metadata.num_replicas(),
" and number of cores per replica ",
metadata.num_cores_per_replica(), " must be >= 1"));
}
if (metadata.args_size() != inputs.size()) {
return absl::InternalError(
absl::StrCat("Number of inputs mismatched! Expected ",
metadata.args_size(), " got ", inputs.size()));
}
for (int i = 0; i < metadata.args_size(); ++i) {
if (metadata.args(i).kind() !=
tensorflow::tpu::TPUCompileMetadataProto::Arg::PARAMETER) {
return absl::InternalError(absl::StrCat(
"Only support PARAMETER, but got ", metadata.args(i).kind()));
}
if (metadata.args(i).dtype() != inputs[i].dtype) {
return absl::InternalError(absl::StrCat("Dtype mismatched! Expected ",
metadata.args(i).dtype(), " got ",
inputs[i].dtype));
}
*metadata.mutable_args(i)->mutable_shape() = inputs[i].shape.AsProto();
}
return absl::OkStatus();
}
absl::StatusOr<tensorflow::tpu::TPUCompileMetadataProto> GetCompileMetadata(
mlir::ModuleOp module, const xla::ifrt::Client& ifrt_client) {
tensorflow::tpu::TPUCompileMetadataProto metadata;
auto op = module.lookupSymbol<mlir::func::FuncOp>(kEntryFuncName);
if (!op) {
return absl::InternalError("Could not find entry function in MLIR Module.");
}
auto metadata_text_attr =
op->getAttrOfType<mlir::StringAttr>(kMetadataTextAttrName);
if (metadata_text_attr && !metadata_text_attr.getValue().empty()) {
VLOG(1) << "Parsing from attribute " << kMetadataTextAttrName
<< metadata_text_attr.getValue().str();
if (!tsl::protobuf::TextFormat::ParseFromString(
metadata_text_attr.getValue().str(), &metadata)) {
return absl::InvalidArgumentError(absl::StrCat(
"Attribute ", kMetadataTextAttrName, ":",
metadata_text_attr.getValue().str(), " cannot be parsed"));
}
} else {
return absl::InvalidArgumentError(
absl::StrCat("Missing ", kMetadataTextAttrName));
}
if (!metadata.has_device_assignment()) {
TF_ASSIGN_OR_RETURN(
auto device_assignment,
ifrt_client.GetDefaultDeviceAssignment(
metadata.num_replicas(), metadata.num_cores_per_replica()));
xla::DeviceAssignmentProto device_assignment_proto;
device_assignment.Serialize(&device_assignment_proto);
*metadata.mutable_device_assignment() = device_assignment_proto;
}
return metadata;
}
absl::StatusOr<Tf2HloResult> CompileTfToHlo(
mlir::ModuleOp module, absl::Span<const DtypeAndShape> inputs,
absl::string_view entry_function_name, const xla::ifrt::Client& ifrt_client,
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn) {
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_before_bridge_phase2", module);
}
tpu::MlirToHloArgs mlir_to_hlo_args;
std::string module_str = tensorflow::SerializeMlirModule(module);
mlir_to_hlo_args.mlir_module = module_str;
mlir_to_hlo_args.rollout_state =
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
TF_ASSIGN_OR_RETURN(
auto* platform,
stream_executor::PlatformManager::PlatformWithName("Host"));
TF_ASSIGN_OR_RETURN(
auto* client, xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform));
std::vector<TensorShape> arg_shapes;
for (const auto& input : inputs) {
arg_shapes.push_back(input.shape);
}
bool use_tuple_args = false;
std::vector<tpu::ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
TF_ASSIGN_OR_RETURN(
tensorflow::XlaCompiler::CompilationResult compilation_result,
tensorflow::tf2xla::v2::LegalizeMlirToHlo(
mlir_to_hlo_args, compile_metadata, use_tuple_args,
"XLA_TPU_JIT", custom_legalization_passes,
tensorflow::XlaShapeLayoutHelpers::ShapeDeterminationFns(
tensorflow::UseNoPreferenceLayoutFn(), shape_representation_fn),
arg_shapes, &arg_core_mapping, &per_core_arg_shapes, client));
for (auto arg_shapes_iter = per_core_arg_shapes.begin() + 1;
arg_shapes_iter != per_core_arg_shapes.end(); ++arg_shapes_iter) {
if (per_core_arg_shapes.front() != *arg_shapes_iter) {
return absl::UnimplementedError(
"Only support even sharding SPMD, but get "
"different shapes across cores");
}
}
Tf2HloResult result;
result.mlir_hlo_module = xla::llvm_ir::CreateMlirModuleOp(module->getLoc());
result.compile_metadata = std::move(compile_metadata);
result.host_compute_metadata = compilation_result.host_compute_metadata;
TF_RETURN_IF_ERROR(xla::ConvertHloToMlirHlo(
*result.mlir_hlo_module, &compilation_result.computation->proto()));
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_after_bridge_phase2",
result.mlir_hlo_module.get());
}
return result;
}
}
} | #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.h"
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p,
::testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
TEST(Tf2HloTest, Empty) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_empty.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, {}));
auto result =
CompileTfToHlo(mlir_module.get(), {}, "main", *client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
TF_ASSERT_OK(result.status());
}
TEST(Tf2HloTest, Tuple) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_tuple.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {1, 3}});
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {3, 1}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
TF_ASSERT_OK(result.status());
}
TEST(Tf2HloTest, Spmd) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_spmd_with_device_assignment.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {4, 64}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
LOG(INFO) << result->compile_metadata;
TF_ASSERT_OK(result.status());
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape {
dim { size: 4 }
dim { size: 64 }
}
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
device_assignment {
replica_count: 1
computation_count: 2
computation_devices { replica_device_ids: 0 }
computation_devices { replica_device_ids: 1 }
}
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(result->compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST(Tf2HloTest, UsingDefaultDeviceAssignment) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_spmd_no_device_assignment.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {4, 64}});
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {64, 10}});
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {1, 4}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
LOG(INFO) << result->compile_metadata;
TF_ASSERT_OK(result.status());
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape {
dim { size: 4 }
dim { size: 64 }
}
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
args {
dtype: DT_FLOAT
shape {
dim { size: 64 }
dim { size: 10 }
}
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
args {
dtype: DT_FLOAT
shape {
dim { size: 1 }
dim { size: 4 }
}
kind: PARAMETER
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
device_assignment {
replica_count: 1
computation_count: 2
computation_devices { replica_device_ids: 0 }
computation_devices { replica_device_ids: 1 }
}
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(result->compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST(Tf2HloTest, XlaCallHostCallback) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/xla_call_host_callback.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path,
mlir::ParserConfig(&context));
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_INT32, {1}});
dtype_and_shapes.push_back(DtypeAndShape{DT_INT32, {1}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
TF_ASSERT_OK(result.status());
ASSERT_EQ((*result).host_compute_metadata.device_to_host().size(), 1);
ASSERT_EQ(
(*result).host_compute_metadata.device_to_host().begin()->metadata_size(),
2);
ASSERT_EQ((*result).host_compute_metadata.host_to_device().size(), 0);
}
}
}
} |
1,184 | cpp | tensorflow/tensorflow | clustering_bridge_passes | tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc | tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_CLUSTERING_BRIDGE_PASSES_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_CLUSTERING_BRIDGE_PASSES_H_
#include "absl/base/attributes.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/PassManager.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
void AddReplicatedBridgeClusteringPipelinePasses(
mlir::OpPassManager& pm, llvm::StringRef module_name = llvm::StringRef());
void AddNonReplicatedBridgeClusteringPipelinePasses(mlir::OpPassManager& pm);
};
};
};
#endif
#include "tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.h"
#include <string>
#include "absl/log/log.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/sparsecore_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using mlir::OpPassManager;
using mlir::func::FuncOp;
void AddReplicatedBridgeClusteringPipelinePasses(OpPassManager& pm,
llvm::StringRef module_name) {
const llvm::SmallVector<std::string, 4> ops_to_preserve = {
"tf.TPUReplicateMetadata", "tf.TPUCompilationResult",
"tf.TPUReplicatedOutput"};
bool strict_clusters =
tensorflow::GetMlirCommonFlags()->tf_mlir_enable_strict_clusters;
pm.addNestedPass<FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass(ops_to_preserve));
pm.addNestedPass<FuncOp>(
mlir::CreateExecutorDialectToFunctionalConversionPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUPartitionedOpConversionPass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateTPUReorderReplicateAndPartitionedInputsPass());
pm.addNestedPass<FuncOp>(mlir::TF::CreateDecomposeReduceDatasetPass());
pm.addPass(mlir::TFDevice::CreateEmbeddingPipeliningPass());
pm.addPass(mlir::TFDevice::CreateEmbeddingSequencingPass());
pm.addPass(tensorflow::tf2xla::internal::CreateTPUClusterFormationPass(
strict_clusters));
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TFTPU::CreateTPUClusterCleanupAttributesPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateDeviceAttributeToLaunchPass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TFDevice::CreateDecomposeResourceOpsInClusterPass());
{
OpPassManager& func_pm = pm.nest<FuncOp>();
func_pm.addPass(mlir::TFTPU::CreateTPUHostComputationExpansionPass());
func_pm.addPass(mlir::TFTPU::CreateTPUUpdateEmbeddingEnqueueOpInputsPass());
}
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateLaunchToDeviceAttributePass());
pm.addPass(mlir::TF::CreateTFFunctionalControlFlowToRegions());
pm.addPass(mlir::createInlinerPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateDropWhileShapeInvariantInDeviceClusterPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TFTPU::CreateTPUClusterCleanupAttributesPass());
pm.addPass(mlir::TFDevice::CreateResourceOpLiftingPass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<FuncOp>(mlir::createCSEPass());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_merge_control_flow_pass) {
pm.addPass(mlir::TFDevice::CreateMergeControlFlowPass());
}
pm.addPass(
tensorflow::tf2xla::internal::CreateMarkOpsForOutsideCompilationPass());
pm.addPass(tensorflow::tf2xla::internal::
CreateExtractHeadTailOutsideCompilationPass());
pm.addPass(
tensorflow::tf2xla::internal::CreateExtractOutsideCompilationPass());
pm.addNestedPass<FuncOp>(
mlir::TFDevice::CreateVerifyNoOutsideCompilationMarkersPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateClusterConstantSinkingPass());
pm.addPass(mlir::TF::CreateResourceDeviceInferencePass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateHoistBroadcastReadPass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateXlaBroadcastPass());
pm.addPass(mlir::TFDevice::CreateClusterOutliningPass());
pm.addPass(mlir::TFTPU::CreateTPUResourceReadForWritePass());
pm.addPass(mlir::TFDevice::CreateMarkInputOutputAliasesPass());
pm.addPass(
tensorflow::tf2xla::internal::CreateTPUShardingIdentificationPass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateTPUResourceReadsWritesPartitioningPass());
pm.addPass(mlir::TFDevice::CreateAnnotateParameterReplicationPass());
pm.addNestedPass<FuncOp>(mlir::TF::CreateRewriteTPUEmbeddingOpsPass());
pm.addPass(mlir::TFTPU::CreateTPUAnnotateDynamicShapeInputsPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateHoistReplicateInvariantResourceWritesPass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateVerifyClusteringPass());
}
void NoCanonicalization(OpPassManager& pm) {}
void AddNonReplicatedBridgeClusteringPipelinePasses(OpPassManager& pm) {
VLOG(2) << "Create TF XLA Bridge pipeline";
pm.addPass(mlir::TFDevice::CreateXlaValidateInputsPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateCanonicalizeCompileAndReplicateAttributesPass());
const llvm::SmallVector<std::string, 4> ops_to_preserve = {};
pm.addNestedPass<FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass(ops_to_preserve));
pm.addNestedPass<FuncOp>(
mlir::CreateExecutorDialectToFunctionalConversionPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addPass(tensorflow::tf2xla::internal::CreateXlaClusterFormationPass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TFDevice::CreateDecomposeResourceOpsInClusterPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::createInlinerPass({}, NoCanonicalization));
pm.addPass(mlir::TFDevice::CreateResourceOpLiftingPass());
pm.addPass(mlir::TFDevice::CreateClusterOutliningPass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateVerifyClusteringPass());
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.h"
#include <gtest/gtest.h>
#include "mlir/Pass/PassManager.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using mlir::OpPassManager;
TEST(ClusteringBridgePassesTest, AddsBridgePasses) {
OpPassManager pass_manager;
AddReplicatedBridgeClusteringPipelinePasses(pass_manager);
EXPECT_EQ(pass_manager.size(), 45);
}
TEST(ClusteringBridgePassesTest, AddsNonTPUBridgePasses) {
OpPassManager pass_manager;
AddNonReplicatedBridgeClusteringPipelinePasses(pass_manager);
EXPECT_EQ(pass_manager.size(), 15);
}
};
};
}; |
1,185 | cpp | tensorflow/tensorflow | logging_hooks | tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.cc | tensorflow/compiler/mlir/tf2xla/internal/logging_hooks_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_LOGGING_HOOKS_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_LOGGING_HOOKS_H_
#include <string>
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/PassManager.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
void EnablePassIRPrinting(mlir::PassManager& pm,
const std::string& dump_group_name,
llvm::StringRef module_name = llvm::StringRef());
};
};
};
#endif
#include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include <memory>
#include <string>
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/core/util/debug_data_dumper.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using mlir::PassManager;
void EnablePassIRPrinting(PassManager& pm, const std::string& dump_group_name,
llvm::StringRef module_name) {
pm.getContext()->disableMultithreading();
pm.enableIRPrinting(std::make_unique<::tensorflow::DataDumperLoggerConfig>(
[module_name, dump_group_name](const std::string& pass_tag_name,
mlir::Operation* op) {
return DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), dump_group_name, pass_tag_name);
},
"",
true));
pm.enableTiming();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/file_statistics.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using mlir::DialectRegistry;
using mlir::LogicalResult;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
using mlir::PassManager;
using mlir::func::FuncOp;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tf2xla/internal/testdata/");
}
class LoggingHooksTest : public ::testing::Test {
public:
LoggingHooksTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
env_ = Env::Default();
test_group_name_ = "TestGroup";
test_dir_ = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", test_dir_.c_str(), 1);
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
Env* env_;
std::string test_dir_;
std::string test_group_name_;
};
TEST_F(LoggingHooksTest, DumpsPassData) {
std::vector<std::string> files;
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::IsEmpty());
TF_ASSERT_OK(CreateMlirModule("dead_const.mlir"));
PassManager pass_manager(&context_);
pass_manager.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
EnablePassIRPrinting(pass_manager, test_group_name_);
LogicalResult pass_status = pass_manager.run(mlir_module_.get());
EXPECT_TRUE(pass_status.succeeded());
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::SizeIs(2));
}
};
};
};
}; |
1,186 | cpp | tensorflow/tensorflow | legalize_tf_mlir | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.cc | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_LEGALIZE_TF_MLIR_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_LEGALIZE_TF_MLIR_H_
#include <string>
#include <vector>
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
absl::StatusOr<std::string> CompileFromMlirToXlaHlo(
bool lower_to_xla_hlo, const tpu::MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, llvm::StringRef device_type,
const XlaShapeLayoutHelpers::ShapeDeterminationFns& shape_determination_fns,
bool use_tuple_args, XlaCompiler::CompilationResult* compilation_result,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
const std::vector<TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes);
absl::StatusOr<XlaCompilationResult> LegalizeWithMlirBridge(
const tpu::MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
XlaCompilationResult* compilation_result);
};
};
};
#endif
#include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/log/log.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/Pass.h"
#include "stablehlo/dialect/Register.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/tpu/tpu_compile.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
constexpr char kBridgeComponent[] = "TFXLABridge";
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
absl::StatusOr<std::string> CompileFromMlirToXlaHlo(
bool lower_to_xla_hlo, const MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, llvm::StringRef device_type,
const XlaShapeLayoutHelpers::ShapeDeterminationFns& shape_determination_fns,
bool use_tuple_args, XlaCompiler::CompilationResult* compilation_result,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
const std::vector<TensorShape>& arg_shapes,
std::vector<ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes) {
LOG_FIRST_N(INFO, 1)
<< "Compiling MLIR computation to XLA HLO using MLIR tf2xla bridge in "
"the op by op fallback mode. This is Phase 2 of the TF2XLA Bridge. "
"Old (non-MLIR) bridge may be used in case of unsupported feature "
"or compilation failure from the MLIR bridge (full fallback mode).";
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::stablehlo::registerAllDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
TF_RETURN_IF_ERROR(
DeserializeMlirModule(computation.mlir_module, &context, &mlir_module));
if (!mlir::SetTPUInfeedLayout(mlir_module))
return errors::Internal("Failed to set layouts attribute");
TF_ASSIGN_OR_RETURN(
auto compiled_mlir,
CompileSerializedMlirToXlaHlo(
SerializeMlirModule(mlir_module.get()), arg_shapes, device_type,
use_tuple_args, true, shape_determination_fns, compilation_result,
custom_legalization_passes, metadata.module_name(),
lower_to_xla_hlo));
auto sharding_result =
tpu::GetShardingInfo(metadata, arg_shapes, shape_determination_fns,
arg_core_mapping, per_core_arg_shapes);
if (!sharding_result.ok()) {
return sharding_result;
}
return compiled_mlir;
}
absl::StatusOr<XlaCompilationResult> LegalizeWithMlirBridge(
const tpu::MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
XlaCompilationResult* compilation_result) {
absl::StatusOr<std::string> mlir_bridge_status = CompileFromMlirToXlaHlo(
true, computation, metadata, device_type,
shape_determination_fns, use_tuple_args, compilation_result,
custom_legalization_passes, arg_shapes, arg_core_mapping,
per_core_arg_shapes);
if (mlir_bridge_status.ok()) {
VLOG(1) << "Successfully compiled MLIR computation to XLA HLO using MLIR "
"tf2xla bridge";
return *compilation_result;
}
tsl::error_logging::Log(kBridgeComponent,
"TFXLA_API_V2_BRIDGE_WITH_FALLBACK_FAIL",
mlir_bridge_status.status().ToString())
.IgnoreError();
return mlir_bridge_status.status();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
using tpu::TPUCompileMetadataProto;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
absl::StatusOr<std::string> CompileMlirModule(bool compile_to_xla_hlo,
const char* module_str) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.mlir_module = module_str;
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
auto compilation_result = std::make_unique<XlaCompilationResult>();
return CompileFromMlirToXlaHlo(
compile_to_xla_hlo, mlir_to_hlo_args, metadata_proto,
"XLA_TPU_JIT",
{}, use_tuple_args, compilation_result.get(),
custom_legalization_passes, arg_shapes, &arg_core_mapping,
&per_core_arg_shapes);
}
absl::StatusOr<XlaCompiler::CompilationResult> LegalizeMlirModule(
const char* module_str) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.mlir_module = module_str;
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
auto compilation_result = std::make_unique<XlaCompilationResult>();
return LegalizeWithMlirBridge(
mlir_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_TPU_JIT",
{}, arg_shapes, &arg_core_mapping,
&per_core_arg_shapes, custom_legalization_passes,
compilation_result.get());
}
TEST(LegalizeWithMlirBridge, LegalizesToMhloProto) {
auto result = LegalizeMlirModule(kMlirModuleStr);
ASSERT_THAT(result, IsOkOrFiltered());
EXPECT_THAT(result, ComputationProtoContains("opcode.*constant"));
}
TEST(CompileFromMlir, ReturnsModuleAsString) {
auto result = CompileMlirModule(true, kMlirModuleStr);
ASSERT_THAT(result, IsOkOrFiltered());
EXPECT_THAT(result, HasMlirModuleWith("mhlo.constant"));
}
}
}
}
} |
1,187 | cpp | tensorflow/tensorflow | legalize_tf_to_hlo | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.cc | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_LEGALIZE_TF_TO_HLO_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_LEGALIZE_TF_TO_HLO_H_
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/compile_only_client.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
absl::StatusOr<XlaCompilationResult> LegalizeTfToHlo(
const tpu::MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
xla::CompileOnlyClient* client, XlaCompilationResult* compilation_result);
};
};
};
#endif
#include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/log/log.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/compile_only_client.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using metrics::IncrementTfMlirBridgeSecondPhaseCounter;
using metrics::MlirBridgeSecondPhaseMetric;
using tpu::MlirToHloArgs;
absl::StatusOr<XlaCompilationResult> LegalizeTfToHlo(
const tpu::MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
xla::CompileOnlyClient* client, XlaCompilationResult* compilation_result) {
LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using the "
"Combined MLIR Tf2Xla Bridge.";
absl::StatusOr<std::string> mlir_compilation =
internal::CompileFromMlirToXlaHlo(
false, computation, metadata, device_type,
shape_determination_fns, use_tuple_args, compilation_result,
custom_legalization_passes, arg_shapes, arg_core_mapping,
per_core_arg_shapes);
if (!mlir_compilation.ok()) {
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedMlirFailure);
return mlir_compilation.status();
}
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedMlirSuccess);
Status old_bridge_status = v1::CompileTensorflowGraphToHlo(
MlirToHloArgs{mlir_compilation.value()}, metadata, use_tuple_args,
shape_determination_fns, arg_shapes, arg_core_mapping,
per_core_arg_shapes, client, compilation_result);
if (!old_bridge_status.ok()) {
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedOldFailure);
return old_bridge_status;
}
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedOldSuccess);
return *compilation_result;
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/shape.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using ::tensorflow::monitoring::testing::CellReader;
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
using tpu::TPUCompileMetadataProto;
static constexpr char kMlirLegalizeCount[] =
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_count";
static constexpr char kMlirLegalizeErrors[] =
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count";
static constexpr char kBridgeStatusCounter[] =
"/tensorflow/core/tf2xla/api/v2/phase2_compilation_status";
constexpr char kMlirCombinedMlirSuccess[] = "kMlirCombinedMlirSuccess";
constexpr char kMlirCombinedOldSuccess[] = "kMlirCombinedOldSuccess";
constexpr char kMlirCombinedOldFailure[] = "kMlirCombinedOldFailure";
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0 : tensor<1xf32>) -> tensor<1xf32> {
%0 = "tf.Acos"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
func.return %0 : tensor<1xf32>
}
})";
static constexpr char kBadMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.DoesntExist"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
absl::StatusOr<XlaCompiler::CompilationResult> CompileMlirModule(
const char* module_str) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.rollout_state =
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
mlir_to_hlo_args.mlir_module = module_str;
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform).value();
std::vector<TensorShape> arg_shapes = {{1}};
TPUCompileMetadataProto metadata_proto;
auto arg = metadata_proto.add_args();
arg->set_dtype(DataType::DT_FLOAT);
arg->set_kind(TPUCompileMetadataProto::Arg::PARAMETER);
metadata_proto.add_retvals();
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
auto compilation_result = std::make_unique<XlaCompilationResult>();
return LegalizeTfToHlo(mlir_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_TPU_JIT",
{}, arg_shapes,
&arg_core_mapping, &per_core_arg_shapes,
custom_legalization_passes, client,
compilation_result.get());
}
TEST(LegalizeWithCombinedBridge, DoesNotUseMlirLowering) {
CellReader<int64_t> mlir_bridge_legalize_count(kMlirLegalizeCount);
CellReader<int64_t> counts(kBridgeStatusCounter);
auto result = CompileMlirModule(kMlirModuleStr);
ASSERT_THAT(result, IsOkOrFiltered());
EXPECT_EQ(mlir_bridge_legalize_count.Delta("tf.Acos"), 0);
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedMlirSuccess), 1));
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedOldSuccess), 1));
}
TEST(LegalizeWithCombinedBridge,
CorrectlyCountsMlirBridgePassingAndGraphBridgeFailing) {
CellReader<int64_t> legalize_failure_count(kMlirLegalizeErrors);
CellReader<int64_t> counts(kBridgeStatusCounter);
auto result = CompileMlirModule(kBadMlirModuleStr);
ASSERT_FALSE(result.ok());
EXPECT_EQ(legalize_failure_count.Read("tf.DoesntExist", "Unknown"), 0);
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedMlirSuccess), 1));
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedOldFailure), 1));
}
TEST(LegalizeWithCombinedBridge, RecordsDynamicOps) {
static constexpr char kDynamismFunctionCounterStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/dynamism_function_counter";
constexpr char kNotDynamicFunctionName[] = "kNotDynamicFunction";
CellReader<int64_t> dynamic_function_op_count(
kDynamismFunctionCounterStreamzName);
auto result = CompileMlirModule(kMlirModuleStr);
ASSERT_TRUE(result.ok());
EXPECT_EQ(dynamic_function_op_count.Delta(kNotDynamicFunctionName), 1);
}
};
};
}; |
1,188 | cpp | tensorflow/tensorflow | mlir_bridge_pass_util | tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.cc | tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_MLIR_BRIDGE_PASS_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_MLIR_BRIDGE_PASS_UTIL_H_
#include <optional>
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/core/framework/function.h"
namespace tensorflow {
bool IsSupportedByNonReplicatedBridge(
const Graph& graph, const FunctionLibraryDefinition* function_library);
bool IsSupportedByReplicatedBridge(
const Graph& graph, const FunctionLibraryDefinition* function_library);
bool IsSupportedByReplicatedBridge(mlir::ModuleOp module);
bool HasTPUPartitionedCallOpInModule(mlir::ModuleOp module);
bool IsInferenceGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library);
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/tf2xla/tf2xla_defs.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/graph/graph.h"
#include "tsl/platform/status.h"
namespace tensorflow {
using ::mlir::failure;
using ::mlir::LogicalResult;
using ::mlir::success;
namespace {
constexpr absl::string_view kPartitionedCall = "TPUPartitionedCall";
LogicalResult HasAttr(
const Graph& graph, const FunctionLibraryDefinition* function_library,
const std::function<bool(const Graph& graph)>& predicate) {
if (predicate(graph)) {
return success();
}
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
if (!function_library) return failure();
for (const std::string& func_name :
function_library->ReachableDefinitions(graph_def).ListFunctionNames()) {
const FunctionDef* func_def = function_library->Find(func_name);
std::unique_ptr<FunctionBody> func_body;
absl::Status status = FunctionDefToBodyHelper(
*func_def, AttrSlice(&func_def->attr()), function_library, &func_body);
if (!status.ok()) {
LOG(ERROR) << "Failed to parse " << func_name << ": "
<< absl::StatusMessageAsCStr(status);
return failure();
}
if (predicate(*func_body->graph)) {
return success();
}
}
return failure();
}
bool HasPsWithResourceVariable(const Graph& graph) {
const std::string jobType = "ps";
const std::string nodeType = "_Arg";
const std::string attrKey = "T";
for (const Node* node : graph.nodes()) {
if (node->type_string() == nodeType) {
auto device_name = node->assigned_device_name();
DeviceNameUtils::ParsedName device;
if (DeviceNameUtils::ParseFullName(device_name, &device) &&
device.has_job && device.job == jobType) {
for (const auto& attr : node->attrs()) {
auto attr_key = attr.first;
auto attr_value = attr.second;
if (attr_key == attrKey &&
attr_value.value_case() == AttrValue::kType &&
attr_value.type() == DT_RESOURCE) {
return true;
break;
}
}
}
}
}
return false;
}
bool IsNonReplicatedGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library) {
auto predicate = [](const Graph& graph) {
const std::string kStatefulPartitionedCallOp = "StatefulPartitionedCall";
for (const Node* node : graph.nodes()) {
auto node_op = node->type_string();
if (node_op == kStatefulPartitionedCallOp) {
auto attr = node->attrs().FindByString(std::string(kMustCompileAttr));
if (attr != nullptr && attr->b() == true) {
return true;
}
}
}
return false;
};
return HasAttr(graph, function_library, predicate).succeeded();
}
bool IsReplicatedGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library) {
auto predicate = [](const Graph& graph) {
for (const Node* node : graph.nodes()) {
if (node->attrs().FindByString(std::string(kTpuReplicateAttr))) {
return true;
}
}
return false;
};
return HasAttr(graph, function_library, predicate).succeeded();
}
bool IsReplicatedGraph(mlir::ModuleOp module) {
auto walk_result = module.walk([&](mlir::Operation* op) {
const llvm::StringRef tpu_replicate_attr_name(kTpuReplicateAttr.data(),
kTpuReplicateAttr.size());
auto replicate_attr =
op->getAttrOfType<mlir::StringAttr>(tpu_replicate_attr_name);
if (replicate_attr) return mlir::WalkResult::interrupt();
return mlir::WalkResult::advance();
});
return walk_result.wasInterrupted();
}
bool DoesGraphContainTPUPartitionedCall(const Graph& graph) {
for (const Node* node : graph.nodes()) {
if (node->type_string() == kPartitionedCall) return true;
}
return false;
}
bool DoReachableFuncsContainTPUPartitionedCall(
const GraphDef& graph_def, const FunctionLibraryDefinition& flib_def) {
for (const std::string& func_name :
flib_def.ReachableDefinitions(graph_def).ListFunctionNames()) {
const FunctionDef* func_def = flib_def.Find(func_name);
std::unique_ptr<FunctionBody> func_body;
if (!FunctionDefToBodyHelper(*func_def, AttrSlice(&func_def->attr()),
&flib_def, &func_body)
.ok())
return false;
if (DoesGraphContainTPUPartitionedCall(*func_body->graph)) return true;
}
return false;
}
bool AreFunctionsFromFlibDefInference(
const FunctionLibraryDefinition& flib_def) {
for (const std::string& func_name : flib_def.ListFunctionNames()) {
const FunctionDef* func_def = flib_def.Find(func_name);
for (const NodeDef& node_def : func_def->node_def()) {
if (node_def.op() == kPartitionedCall) return true;
}
}
return false;
}
}
bool IsSupportedByNonReplicatedBridge(
const Graph& graph, const FunctionLibraryDefinition* function_library) {
return IsNonReplicatedGraph(graph, function_library) &&
HasPsWithResourceVariable(graph);
}
bool IsSupportedByReplicatedBridge(
const Graph& graph, const FunctionLibraryDefinition* function_library) {
return IsReplicatedGraph(graph, function_library);
}
bool IsSupportedByReplicatedBridge(mlir::ModuleOp module) {
return IsReplicatedGraph(module);
}
bool HasTPUPartitionedCallOpInModule(mlir::ModuleOp module) {
bool has_tpu_partitioned_call = false;
for (auto func_op : module.getOps<mlir::func::FuncOp>()) {
func_op->walk([&](mlir::TF::TPUPartitionedCallOp op) {
has_tpu_partitioned_call = true;
});
if (has_tpu_partitioned_call) break;
}
return has_tpu_partitioned_call;
}
bool IsInferenceGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library) {
if (DoesGraphContainTPUPartitionedCall(graph)) return true;
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
if (DoReachableFuncsContainTPUPartitionedCall(graph_def, graph.flib_def()))
return true;
if (AreFunctionsFromFlibDefInference(graph.flib_def())) return true;
if (function_library == nullptr) return false;
if (DoReachableFuncsContainTPUPartitionedCall(graph_def, *function_library))
return true;
if (AreFunctionsFromFlibDefInference(*function_library)) return true;
return false;
}
} | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h"
#include <vector>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/tpu_functional_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/tf2xla/tf2xla_defs.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/enable_tf2_utils.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
FunctionDef PassThroughResource() {
return FunctionDefHelper::Define(
"PassThroughResource",
{"in: resource"},
{"out: resource"},
{},
{{{"out"}, "Identity", {"in"}, {{"T", DataType::DT_RESOURCE}}}});
}
TEST(IsSupportedByNonReplicatedBridge, NonReplicatedGraph) {
const FunctionDef& fd = PassThroughResource();
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kEagerRuntime);
tensorflow::set_tf2_execution(true);
ConfigProto config = ConfigProto();
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::_Arg(root.WithOpName("A"), DT_RESOURCE, 0);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* call;
NameAttrList f_name_attr;
f_name_attr.set_name(fd.signature().name());
TF_ASSERT_OK(
NodeBuilder("B", "StatefulPartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_RESOURCE})
.Attr("Tout", {DT_RESOURCE})
.Attr("f", f_name_attr)
.Finalize(root.graph(), &call));
call->AddAttr(std::string(kMustCompileAttr), true);
TF_ASSERT_OK(root.ToGraph(&graph));
for (Node* node : graph.nodes()) {
node->set_assigned_device_name("/job:ps/replica:0/task:0/device:GPU:0");
}
EXPECT_TRUE(
IsSupportedByNonReplicatedBridge(graph, nullptr));
}
TEST(IsSupportedByReplicatedBridge, ReplicatedGraph) {
const FunctionDef& fd = test::function::XTimesTwo();
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kEagerRuntime);
tensorflow::set_tf2_execution(true);
ConfigProto config = ConfigProto();
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* call;
NameAttrList f_name_attr;
f_name_attr.set_name(fd.signature().name());
TF_ASSERT_OK(
NodeBuilder("B", "StatefulPartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_FLOAT})
.Attr("Tout", {DT_FLOAT})
.Attr("f", f_name_attr)
.Finalize(root.graph(), &call));
call->AddAttr(std::string(kTpuReplicateAttr), "cluster");
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_TRUE(
IsSupportedByReplicatedBridge(graph, nullptr));
}
TEST(IsSupportedByReplicatedBridge, ReplicatedModule) {
const char* const code = R"mlir(
func.func @entry_func_1(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
%0 = "tf.Identity"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
EXPECT_TRUE(IsSupportedByReplicatedBridge(*module));
}
TEST(HasTPUPartitionedCallOpInModule, HasTPUPartitionedCallModule) {
const char* const code = R"mlir(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() {
%outputs_0 = "tf.TPUOrdinalSelector"() {device = ""} : () -> tensor<?xi32>
"tf.TPUPartitionedCall"(%outputs_0) {f = @reachable_func} : (tensor<?xi32>) -> ()
func.return
}
func.func @reachable_func() {
func.return
}
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
EXPECT_TRUE(HasTPUPartitionedCallOpInModule(*module));
}
TEST(HasTPUPartitionedCallOpInModule, HasNotTPUPartitionedCallModule) {
const char* const code = R"mlir(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() {
"tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f = @reachable_func} : () -> ()
func.return
}
func.func @reachable_func() {
func.return
}
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
EXPECT_FALSE(HasTPUPartitionedCallOpInModule(*module));
}
TEST(IsInferenceGraph, GraphContrainsTPUPartitionedCall) {
FunctionDef fd = FunctionDefHelper::Define(
"XTimesTwoFloat",
{"x: float"},
{"y: float"},
{},
{
{{"two"},
"Const",
{},
{{"value", test::AsScalar<int32>(2)}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_FLOAT}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_FLOAT}}},
});
tensorflow::set_tf2_execution(true);
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kDirectSession);
Scope root = Scope::NewRootScope().ExitOnError();
Output x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
NameAttrList f_name_attr;
f_name_attr.set_name("XTimesTwoFloat");
ops::TPUPartitionedCall f(root.WithOpName("f"), {x}, 0,
{DT_FLOAT}, f_name_attr);
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_TRUE(IsInferenceGraph(graph, nullptr));
}
TEST(IsInferenceGraph, GraphDoesNotContrainTPUPartitionedCall) {
FunctionDef fd = FunctionDefHelper::Define(
"XTimesTwoFloat",
{"x: float"},
{"y: float"},
{},
{
{{"two"},
"Const",
{},
{{"value", test::AsScalar<int32>(2)}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_FLOAT}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_FLOAT}}},
});
tensorflow::set_tf2_execution(true);
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kDirectSession);
Scope root = Scope::NewRootScope().ExitOnError();
Output x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
NameAttrList f_name_attr;
f_name_attr.set_name("XTimesTwoFloat");
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_FALSE(IsInferenceGraph(graph, nullptr));
}
TEST(IsInferenceGraph, FlibDefIsNotNullptrAndContainsTPUPartitionedCall) {
FunctionDef fd = FunctionDefHelper::Define(
"XTimesTwoFloat",
{"x: float"},
{"y: float"},
{},
{
{{"two"},
"Const",
{},
{{"value", test::AsScalar<int32>(2)}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_FLOAT}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_FLOAT}}},
{{"tpu_op"}, "TPUPartitionedCall", {}, {{"Tout", DT_FLOAT}}},
});
tensorflow::set_tf2_execution(true);
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kDirectSession);
Scope root = Scope::NewRootScope().ExitOnError();
Output x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
NameAttrList f_name_attr;
f_name_attr.set_name("XTimesTwoFloat");
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_TRUE(IsInferenceGraph(graph, &flib_def));
}
}
} |
1,189 | cpp | tensorflow/tensorflow | mlir_pass_instrumentation | tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.cc | tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_MLIR_PASS_INSTRUMENTATION_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_MLIR_PASS_INSTRUMENTATION_H_
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "mlir/Pass/PassInstrumentation.h"
namespace mlir {
void RegisterPassInstrumentor(
const std::string& name,
std::function<std::unique_ptr<PassInstrumentation>()> creator);
std::vector<std::function<std::unique_ptr<PassInstrumentation>()>>
GetPassInstrumentors();
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/platform/logging.h"
namespace mlir {
class MlirPassInstrumentationRegistry {
public:
static MlirPassInstrumentationRegistry& Instance() {
static MlirPassInstrumentationRegistry* r =
new MlirPassInstrumentationRegistry;
return *r;
}
std::unordered_map<std::string,
std::function<std::unique_ptr<PassInstrumentation>()>>
instrumentors_;
};
void RegisterPassInstrumentor(
const std::string& name,
std::function<std::unique_ptr<PassInstrumentation>()> creator) {
MlirPassInstrumentationRegistry& r =
MlirPassInstrumentationRegistry::Instance();
auto result = r.instrumentors_.emplace(name, creator);
if (!result.second) {
VLOG(1) << "Duplicate MLIR pass instrumentor registration";
}
}
std::vector<std::function<std::unique_ptr<PassInstrumentation>()>>
GetPassInstrumentors() {
MlirPassInstrumentationRegistry& r =
MlirPassInstrumentationRegistry::Instance();
std::vector<std::function<std::unique_ptr<PassInstrumentation>()>> result;
result.reserve(r.instrumentors_.size());
std::transform(r.instrumentors_.begin(), r.instrumentors_.end(),
std::back_inserter(result), [](auto v) { return v.second; });
return result;
}
} | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include <cstddef>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace mlir {
namespace {
static const char* kTestInstrumentationName = "test-intrumentatron";
static const char* kTestInstrumentationSearch = "tf.Identity";
struct StringStream : public llvm::raw_ostream {
StringStream() { SetUnbuffered(); }
~StringStream() override = default;
uint64_t current_pos() const override { return 0; }
void write_impl(const char* ptr, size_t size) override {
ss.write(ptr, size);
}
std::stringstream ss;
};
class TestPassInstrumentation : public ::testing::Test {
public:
void SetPassThatChangedIdentity(absl::string_view pass_name) {
pass_that_changed_identity_ = pass_name;
}
absl::string_view GetPassThatChangedIdentity() {
return pass_that_changed_identity_;
}
private:
std::string pass_that_changed_identity_;
friend class TestInstrumentor;
};
class TestInstrumentor : public PassInstrumentation {
public:
explicit TestInstrumentor(TestPassInstrumentation* test) : test_(test) {}
private:
void runBeforePass(Pass* pass, Operation* op) override {
StringStream stream;
op->print(stream, mlir::OpPrintingFlags().useLocalScope());
ops_seen_by_pass_[pass] = stream.ss.str();
}
void runAfterPass(Pass* pass, Operation* op) override {
StringStream stream;
op->print(stream, mlir::OpPrintingFlags().useLocalScope());
if (!absl::StrContains(stream.ss.str(), kTestInstrumentationSearch) &&
absl::StrContains(ops_seen_by_pass_[pass],
kTestInstrumentationSearch)) {
test_->SetPassThatChangedIdentity(pass->getName().str());
}
}
private:
TestPassInstrumentation* test_;
std::unordered_map<mlir::Pass*, std::string> ops_seen_by_pass_;
};
TEST_F(TestPassInstrumentation, CreatedCalledAndSetsPassName) {
RegisterPassInstrumentor(kTestInstrumentationName, [&]() {
return std::make_unique<TestInstrumentor>(this);
});
constexpr char legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>> {
%0 = "tf.Identity"(%arg0) : (tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
func.return %0 : tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
}
})";
SetPassThatChangedIdentity("");
std::vector<::tensorflow::TensorShape> arg_shapes = {{1}};
auto compilation_result = tensorflow::XlaCompilationResult();
TF_EXPECT_OK(tensorflow::CompileSerializedMlirToXlaHlo(
legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result)
.status());
EXPECT_FALSE(GetPassThatChangedIdentity().empty());
}
}
} |
1,190 | cpp | tensorflow/tensorflow | dialect_detection_utils | tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils.cc | tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_UTILS_DIALECT_DETECTION_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_UTILS_DIALECT_DETECTION_UTILS_H_
#include "mlir/IR/Operation.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
bool IsInBridgeAcceptableDialects(mlir::Operation* op);
}
}
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils.h"
#include <set>
#include <string>
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
bool IsInBridgeAcceptableDialects(mlir::Operation* op) {
const std::set<std::string> kBuiltinNamespaces = {"func", "return",
"builtin"};
const std::set<std::string> kBridgeAcceptableNamespaces = {"tf", "tf_device"};
bool isInDefaulNamespaces =
kBuiltinNamespaces.find(op->getDialect()->getNamespace().str()) !=
kBuiltinNamespaces.end();
bool isInBridgeAcceptableNamespaces =
kBridgeAcceptableNamespaces.find(
op->getDialect()->getNamespace().str()) !=
kBridgeAcceptableNamespaces.end();
return isInDefaulNamespaces || isInBridgeAcceptableNamespaces;
}
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils.h"
#include <gtest/gtest.h>
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "stablehlo/dialect/ChloOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using mlir::MLIRContext;
using mlir::OpBuilder;
using mlir::Operation;
using mlir::OperationState;
using mlir::UnknownLoc;
using mlir::chlo::ChloDialect;
using mlir::TF::TensorFlowDialect;
using tensorflow::tf2xla::internal::IsInBridgeAcceptableDialects;
class SharedUtilsTest : public ::testing::Test {};
TEST_F(SharedUtilsTest, IsInFunctionalDialectPasses) {
MLIRContext context;
context.loadDialect<TensorFlowDialect>();
OpBuilder opBuilder(&context);
OperationState state(UnknownLoc::get(opBuilder.getContext()),
"tf.Const");
mlir::Operation* op = Operation::create(state);
bool result = IsInBridgeAcceptableDialects(op);
EXPECT_TRUE(result);
op->destroy();
}
TEST_F(SharedUtilsTest, IsInFunctionalDialectFails) {
MLIRContext context;
context.loadDialect<ChloDialect>();
OpBuilder opBuilder(&context);
OperationState state(UnknownLoc::get(opBuilder.getContext()),
"chlo.broadcast_add");
Operation* op = Operation::create(state);
bool result = IsInBridgeAcceptableDialects(op);
EXPECT_FALSE(result);
op->destroy();
}
}
}
}
} |
1,191 | cpp | tensorflow/tensorflow | compile_mlir_util | tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc | tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_MLIR_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_MLIR_UTIL_H_
#include <memory>
#include "absl/base/attributes.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_argument.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_computation.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
Status ConvertMLIRToXlaComputation(
mlir::ModuleOp module_op, llvm::StringRef device_type,
xla::XlaComputation* xla_computation, bool use_tuple_args,
bool enable_op_fallback, bool return_tuple,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns =
{},
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes = {},
llvm::StringRef module_name = llvm::StringRef());
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
void CreateConvertMlirToXlaHloPipeline(
mlir::OpPassManager& pm, llvm::StringRef device_type,
bool enable_op_fallback,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
bool lower_to_xla_hlo = true, bool allow_partial_conversion = false);
struct TensorOrResourceShape {
TensorShape shape;
bool is_resource = false;
};
ABSL_DEPRECATED("Not meant to be used directly and should be a util.")
Status RefineShapes(llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
mlir::ModuleOp module);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
Status BuildHloFromTf(mlir::ModuleOp module_op, xla::XlaBuilder& builder,
llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns,
llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
llvm::StringRef device_type,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes);
ABSL_DEPRECATED("Not meant to be used directly and should be a util.")
Status PopulateResultIOInfo(
mlir::ModuleOp module_op, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
bool use_tuple_args, bool use_resource_updates_for_aliases,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
absl::StatusOr<std::string> CompileMlirToXlaHlo(
mlir::ModuleOp module_op, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
bool use_return_tuple, bool use_resource_updates_for_aliases,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
llvm::StringRef module_name = llvm::StringRef(),
bool lower_to_xla_hlo = true);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
absl::StatusOr<std::string> CompileSerializedMlirToXlaHlo(
llvm::StringRef mlir_module_string, llvm::ArrayRef<TensorShape> arg_shapes,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes = {},
llvm::StringRef module_name = llvm::StringRef(),
bool lower_to_xla_hlo = true);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
Status CompileGraphToXlaHlo(
mlir::ModuleOp module_op, llvm::ArrayRef<XlaArgument> args,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
bool use_return_tuple,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes);
ABSL_DEPRECATED(
"Use v1/compile_tf_graph.h::CompileTensorflowGraphToHlo instead.")
Status BuildHloFromGraph(
const Graph& graph, xla::XlaBuilder& builder,
mlir::MLIRContext& mlir_context, llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns, bool unconditionally_use_output_shapes,
llvm::ArrayRef<XlaArgument> args, llvm::ArrayRef<std::string> control_rets,
llvm::StringRef device_type, const FunctionLibraryDefinition& flib_def,
const GraphDebugInfo& debug_info,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes = {});
static inline Status CompileToHloGraphAnalysisFailedError() {
return errors::Internal("disabled after graph analysis");
}
void RegisterConvertMlirToXlaHloPipelineWithDefaults();
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include <memory>
#include <string>
#include "tensorflow/compiler/mlir/tf2xla/mlir_bridge_rollout_policy.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/Passes.h"
#include "stablehlo/dialect/Register.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/lowering_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/shape.h"
#include "xla/translate/mhlo_to_hlo/layout_util.h"
#include "xla/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/core_platform_payloads.pb.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kGroupSizeAttrName =
"tf2xla.collective_info.group_size";
constexpr absl::string_view kGroupKeyAttrName =
"tf2xla.collective_info.group_key";
absl::StatusOr<TensorShape> GetTensorShapeFromXlaArgument(
const XlaArgument& arg) {
if (absl::holds_alternative<xla::Shape>(arg.shape)) {
TensorShape arg_shape;
TF_RETURN_IF_ERROR(
XLAShapeToTensorShape(std::get<xla::Shape>(arg.shape), &arg_shape));
return arg_shape;
} else {
return std::get<TensorShape>(arg.shape);
}
}
Status MaybeRewriteLayoutWithShardedShape(
mlir::StringAttr sharding,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
xla::Shape* shape) {
if (!sharding) return absl::OkStatus();
xla::OpSharding op_sharding;
if (tensorflow::DecodeShardingAttribute(sharding, op_sharding).failed()) {
return errors::InvalidArgument("failed to parse sharding '",
sharding.getValue().str(), "'");
}
std::optional<xla::HloSharding> hlo_sharding;
TF_ASSIGN_OR_RETURN(hlo_sharding, xla::HloSharding::FromProto(op_sharding));
TF_RETURN_IF_ERROR(RewriteLayoutWithShardedShape(
hlo_sharding, false, shape_determination_fns, shape));
return absl::OkStatus();
}
Status GetXlaInputShapes(
mlir::ModuleOp module, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
std::vector<xla::Shape>* xla_input_shapes) {
xla_input_shapes->clear();
mlir::func::FuncOp main_func =
module.lookupSymbol<mlir::func::FuncOp>("main");
TF_RET_CHECK(main_func != nullptr) << "No main function found";
mlir::FunctionType func_type = main_func.getFunctionType();
int num_args = func_type.getNumInputs();
xla_input_shapes->reserve(num_args);
std::vector<xla::Shape> individual_arg_shapes;
individual_arg_shapes.reserve(num_args);
for (int i = 0; i < num_args; ++i) {
individual_arg_shapes.emplace_back();
xla::Shape& xla_shape = individual_arg_shapes.back();
DataType arg_dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(func_type.getInput(i), &arg_dtype));
auto layout_preference = shape_determination_fns.layout_preference_fn(
arg_shapes[i].shape, arg_dtype, std::nullopt);
TF_ASSIGN_OR_RETURN(xla_shape,
shape_determination_fns.shape_representation_fn(
arg_shapes[i].shape, arg_dtype,
false, layout_preference));
auto sharding =
main_func.getArgAttrOfType<mlir::StringAttr>(i, "mhlo.sharding");
TF_RETURN_IF_ERROR(MaybeRewriteLayoutWithShardedShape(
sharding, shape_determination_fns, &xla_shape));
}
if (use_tuple_args) {
xla_input_shapes->push_back(
xla::ShapeUtil::MakeTupleShape(individual_arg_shapes));
} else {
*xla_input_shapes = individual_arg_shapes;
}
return absl::OkStatus();
}
mlir::RankedTensorType GetBufferType(mlir::Type ty) {
auto ranked_ty = mlir::dyn_cast_or_null<mlir::RankedTensorType>(ty);
if (!ranked_ty) return {};
int64_t rank = ranked_ty.getRank();
llvm::SmallVector<int64_t, 4> dims = llvm::to_vector<4>(ranked_ty.getShape());
auto encoding = mlir::dyn_cast_or_null<mlir::mhlo::TypeExtensionsAttr>(
ranked_ty.getEncoding());
if (encoding && !encoding.getBounds().empty()) {
for (int64_t dim = 0; dim < rank; ++dim) {
if (dims[dim] == mlir::ShapedType::kDynamic) {
dims[dim] = encoding.getBounds()[dim];
}
}
}
return GetTypeFromTFTensorShape(dims, ranked_ty.getElementType());
}
Status GetOutputInfo(
mlir::ModuleOp module, bool use_resource_updates_for_aliases,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
xla::Shape* xla_output_shape, std::vector<XlaOutputDescription>* outputs,
std::vector<XlaResourceUpdate>* resource_updates) {
auto shape_representation_fn_no_fast_memory =
[shape_determination_fns](
const xla::Shape& xla_shape) -> absl::StatusOr<xla::Shape> {
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(xla_shape, &shape));
TF_ASSIGN_OR_RETURN(DataType dtype, EncodePrimitiveTypeAsDataType(
xla_shape.element_type()));
auto layout_preference = shape_determination_fns.layout_preference_fn(
shape, dtype, std::nullopt);
return shape_determination_fns.shape_representation_fn(
shape, dtype, false, layout_preference);
};
mlir::func::FuncOp main_func =
module.lookupSymbol<mlir::func::FuncOp>("main");
mlir::FunctionType func_type = main_func.getFunctionType();
outputs->clear();
outputs->reserve(func_type.getNumResults());
resource_updates->clear();
resource_updates->reserve(func_type.getNumResults());
std::vector<xla::Shape> shapes;
shapes.reserve(func_type.getNumResults());
llvm::SmallDenseMap<unsigned, unsigned> output_to_input_alias;
for (unsigned i = 0; i < main_func.getNumArguments(); ++i)
if (auto aliasing_output = main_func.getArgAttrOfType<mlir::IntegerAttr>(
i, "tf.aliasing_output"))
output_to_input_alias[aliasing_output.getInt()] = i;
auto return_op = main_func.begin()->getTerminator();
for (const auto& type_and_idx : llvm::enumerate(func_type.getResults())) {
size_t idx = type_and_idx.index();
auto result_ty = mlir::cast<mlir::RankedTensorType>(type_and_idx.value());
mlir::RankedTensorType buffer_ty = result_ty;
if (!buffer_ty.hasStaticShape()) {
mlir::Value return_val = return_op->getOperand(idx);
if (auto owner = mlir::dyn_cast_or_null<mlir::tensor::CastOp>(
return_val.getDefiningOp())) {
buffer_ty = GetBufferType(owner.getOperand().getType());
if (!buffer_ty || !buffer_ty.hasStaticShape()) {
return errors::InvalidArgument(
"results needs to be static or bounded");
}
}
}
xla::Shape shape = xla::TypeToShape(buffer_ty);
if (shape.element_type() == xla::PRIMITIVE_TYPE_INVALID) {
return errors::InvalidArgument("XLA conversion failed for MLIR type.");
}
TF_ASSIGN_OR_RETURN(shape, shape_representation_fn_no_fast_memory(shape));
if (!result_ty.hasStaticShape()) {
int64_t rank = result_ty.getRank();
for (int64_t dim = 0; dim < rank; ++dim) {
if (result_ty.isDynamicDim(dim)) {
shape.set_dynamic_dimension(dim, true);
}
}
}
auto sharding = main_func.getResultAttrOfType<mlir::StringAttr>(
type_and_idx.index(), "mhlo.sharding");
TF_RETURN_IF_ERROR(MaybeRewriteLayoutWithShardedShape(
sharding, shape_determination_fns, &shape));
auto tensor_type =
mlir::dyn_cast<mlir::RankedTensorType>(type_and_idx.value());
shapes.push_back(shape);
auto it = output_to_input_alias.find(type_and_idx.index());
if (it != output_to_input_alias.end() && use_resource_updates_for_aliases) {
resource_updates->emplace_back();
XlaResourceUpdate& resource_update = resource_updates->back();
resource_update.input_index = it->getSecond();
resource_update.modified = true;
TF_RETURN_IF_ERROR(ConvertToDataType(tensor_type, &resource_update.type));
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(shape, &resource_update.shape));
continue;
}
outputs->emplace_back();
XlaOutputDescription& out_desc = outputs->back();
TF_RETURN_IF_ERROR(ConvertToDataType(tensor_type, &out_desc.type));
out_desc.is_constant = false;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(shape, &out_desc.shape));
out_desc.input_index =
it != output_to_input_alias.end() ? it->getSecond() : -1;
out_desc.is_tensor_list = false;
}
*xla_output_shape = xla::ShapeUtil::MakeTupleShape(shapes);
return absl::OkStatus();
}
void GetInputMappingForMlir(int num_inputs, std::vector<int>* input_mapping) {
input_mapping->resize(num_inputs, 0);
std::iota(input_mapping->begin(), input_mapping->end(), 0);
}
static void RegisterDialects(mlir::DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::stablehlo::registerAllDialects(registry);
}
bool CanInlineFunctionsPostLegalization(llvm::StringRef device_type) {
return device_type == DEVICE_TPU_XLA_JIT;
}
void AddLegalizationPasses(mlir::OpPassManager& pm, bool legalize_chlo,
llvm::StringRef device_type, bool enable_op_fallback,
bool lower_to_xla_hlo) {
if (lower_to_xla_hlo) {
mlir::quant::stablehlo::AddQuantizationLoweringPasses(pm);
pm.addPass(mlir::mhlo::createLegalizeTFPass(
legalize_chlo,
device_type, enable_op_fallback));
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateInfeedsOpsXlaAdjustLayoutPass());
if (lower_to_xla_hlo) {
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
}
}
}
void CreateConvertMlirToXlaHloPipeline(
mlir::OpPassManager& pm, llvm::StringRef device_type,
bool enable_op_fallback,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
bool lower_to_xla_hlo, bool allow_partial_conversion) {
bool legalize_chlo = true;
pm.addNestedPass<mlir::func::FuncOp>(
tensorflow::tf2xla::internal::CreateInputLoweringMetricsPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateTFXLADeviceSpecificTransformsPass(device_type));
pm.addPass(mlir::TF::CreateTFFunctionalControlFlowToRegions());
pm.addPass(mlir::createInlinerPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TF::CreateDropWhileShapeInvariantPass());
if (lower_to_xla_hlo) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TF::CreateReplicateTensorListInitOpsPass());
}
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::createSCCPPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addPass(mlir::createSCCPPass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::TF::CreateTensorListOpsDecompositionPass());
}
pm.addPass(mlir::TF::CreateStackOpsDecompositionPass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::TF::CreateTensorArrayOpsDecompositionPass());
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TFDevice::CreateDecomposeResourceOpsPass());
pm.addPass(mlir::TF::CreatePromoteResourcesToArgsPass());
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
}
pm.addNestedPass<mlir::func::FuncOp>(mlir::TF::CreateLowerQuantizedPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::stablehlo::CreateConvertTFQuantTypesPass());
if (lower_to_xla_hlo) {
for (auto& target_pass : custom_legalization_passes) {
pm.addNestedPass<mlir::func::FuncOp>(std::move(target_pass));
}
pm.addPass(mlir::mhlo::CreateLegalizeTFCollectivePass());
}
AddLegalizationPasses(pm, legalize_chlo, device_type, enable_op_fallback,
lower_to_xla_hlo);
if (lower_to_xla_hlo) {
pm.addPass(mlir::mhlo::CreateLegalizeTFCommunicationPass());
if (!allow_partial_conversion) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateVerifyTFXLALegalizationPass(legalize_chlo));
}
}
if (CanInlineFunctionsPostLegalization(device_type)) {
pm.addPass(mlir::createInlinerPass());
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
}
Status RefineShapes(llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
mlir::ModuleOp module) {
auto producer_or = GetTfGraphProducerVersion(module);
if (!producer_or.ok()) ret | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include <initializer_list>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_builder.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::mlir::OpPassManager;
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::HasSubstr;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
TEST(LegalizeMlirTest, LegalizesModule) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
kMlirModuleStr, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_TRUE(status.ok());
EXPECT_THAT(status.value(), HasSubstr("mhlo.const"));
}
TEST(LegalizeMlirTest, FailsLegalizesModule) {
constexpr char failed_legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.DoesntExist"() : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> count(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count");
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
failed_legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_FALSE(status.ok());
EXPECT_EQ(count.Delta("tf.DoesntExist", "Unknown"), 1);
}
TEST(CompileMlirUtil, CreatesPipeline) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
false,
{});
EXPECT_FALSE(pass_manager.getPasses().empty());
}
TEST(CompileMlirUtil, HasLegalizationPass) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kLegalizeTfPass = "xla-legalize-tf";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
true,
{});
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, HasSubstr(kLegalizeTfPass));
}
TEST(CompileMlirUtil, DoesNotHaveLegalizationPass) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kLegalizeTfPass = "xla-legalize-tf";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
false,
{},
false);
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, Not(HasSubstr(kLegalizeTfPass)));
}
TEST(CompileMlirUtil, DoesNotLowerWhenTold) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
kMlirModuleStr, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result,
{},
"",
false);
EXPECT_TRUE(status.ok());
EXPECT_THAT(status.value(), HasSubstr("tf.Const"));
}
TEST(CompileMlirUtil, CanonicalizationIsExplicitDuringInlining) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kInlinePass =
"inline{default-pipeline=canonicalize "
"inlining-threshold=4294967295 max-iterations=4 }";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
true,
{});
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, HasSubstr(kInlinePass));
}
TEST(LegalizeMlirTest, LegalizesModuleWithDynamicShape) {
constexpr char legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>> {
%0 = "tf.Identity"(%arg0) : (tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
func.return %0 : tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
}
})";
std::vector<tensorflow::TensorShape> arg_shapes = {{1}};
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_TRUE(status.ok());
}
absl::StatusOr<std::unique_ptr<Graph>> BuildOpGraphWithOutputShapes() {
DataType data_type = DT_INT32;
std::initializer_list<int64_t> dims = {2, 3, 4, 5};
Tensor tensor(data_type, TensorShape(dims));
for (int i = 0; i < 2 * 3 * 4 * 5; ++i) {
tensor.flat<int32>()(i) = i;
}
NodeDef node;
auto builder = NodeDefBuilder("some_node", "Const")
.Attr("dtype", data_type)
.Attr("value", tensor);
AttrValue shape_attr;
TensorShapeProto* shape_proto = shape_attr.mutable_list()->add_shape();
shape_proto->add_dim()->set_size(1);
builder.Attr("_output_shapes", shape_attr);
TF_RETURN_IF_ERROR(builder.Finalize(&node));
return CreateSingleOpGraph(node, {}, {DataType::DT_INT32});
}
absl::Status BuildHloFromGraph(Graph& graph, bool use_output_shapes) {
xla::XlaBuilder builder(
::testing::UnitTest::GetInstance()->current_test_info()->name());
mlir::MLIRContext mlir_context;
llvm::SmallVector<xla::XlaOp, 4> xla_params;
std::vector<xla::XlaOp> returns(1);
return BuildHloFromGraph(graph, builder, mlir_context, xla_params, returns,
use_output_shapes, {},
{}, DEVICE_TPU,
FunctionLibraryDefinition(OpRegistry::Global()),
{},
{});
}
TEST(CompileMlirUtil, UsesCorrectOriginalShapeWithoutOutputShapes) {
TF_ASSERT_OK_AND_ASSIGN(auto graph, BuildOpGraphWithOutputShapes());
auto build_result = BuildHloFromGraph(*graph, false);
TF_ASSERT_OK(build_result);
}
TEST(CompileMlirUtil, UsesIncorrectOutputShapesWhenPresent) {
TF_ASSERT_OK_AND_ASSIGN(auto graph, BuildOpGraphWithOutputShapes());
auto build_result = BuildHloFromGraph(*graph, true);
ASSERT_FALSE(build_result.ok());
EXPECT_THAT(build_result.message(),
HasSubstr("op operand type 'tensor<2x3x4x5xi32>' and result type "
"'tensor<1xi32>' are cast incompatible"));
}
}
} |
1,192 | cpp | tensorflow/tensorflow | tf_dialect_to_executor | tensorflow/compiler/mlir/tf2xla/api/v1/tf_dialect_to_executor.cc | tensorflow/compiler/mlir/tf2xla/api/v1/tf_dialect_to_executor_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V2_TF_DIALECT_TO_EXECUTOR_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V2_TF_DIALECT_TO_EXECUTOR_H_
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
tensorflow::Status ExportFromTensorflowDialectToExecutor(
mlir::ModuleOp module, llvm::StringRef module_name = llvm::StringRef());
}
}
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.h"
#include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/lib/monitoring/counter.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using mlir::LogicalResult;
using mlir::ModuleOp;
using mlir::OpPassManager;
using mlir::PassManager;
using mlir::func::FuncOp;
auto *tf_dialect_to_executor_dialect_status = tsl::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v2/tf_dialect_to_executor_dialect_status",
"Counts how often a successful export from TF Dialect to Executor Dialect "
"is",
"status");
constexpr char kExportSuccess[] = "success";
constexpr char kExportFailed[] = "failed";
namespace {
void AddTfDialectToExecutorPasses(OpPassManager &pm) {
pm.addPass(mlir::TF::CreateTFRegionControlFlowToFunctional());
pm.addNestedPass<FuncOp>(
mlir::CreateFunctionalToExecutorDialectConversionPass());
pm.addNestedPass<FuncOp>(mlir::TF::CreateSplitIntoIslandPerOpPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateReplicateToIslandPass(
false));
pm.addNestedPass<FuncOp>(
mlir::TFDevice::CreateReplicaIDToDeviceOrdinalPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateParallelExecuteToIslandsPass(
false));
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateLaunchToDeviceAttributePass(
false));
pm.addPass(
mlir::tf_executor::CreateTFExecutorUpdateControlDependenciesPass());
pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUDevicePropagationPass());
pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUColocateSplitsPass());
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_convert_control_to_data_outputs_pass) {
bool composite_tpuexecute_side_effects =
tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_composite_tpuexecute_side_effects;
pm.addPass(
mlir::tf_executor::CreateTFExecutorConvertControlToDataOutputsPass(
composite_tpuexecute_side_effects));
}
pm.addPass(mlir::TF::CreateVerifySuitableForExportPass());
}
tensorflow::Status RecordStatusIfError(absl::Status status) {
if (status.ok()) {
return absl::OkStatus();
}
tf_dialect_to_executor_dialect_status->GetCell(kExportFailed)->IncrementBy(1);
VLOG(1) << "Failed to export from TF Dialect to TF Executor Dialect. "
<< status;
constexpr char bridge_subcomponent[] =
"TFXLA_TF_FUNCTIONAL_TO_EXECUTOR_EXPORT_v2";
constexpr char kBridgeComponent[] = "TFXLABridge";
tsl::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_1,
status);
tsl::error_logging::Log(kBridgeComponent, bridge_subcomponent,
status.ToString())
.IgnoreError();
return status;
}
}
tensorflow::Status ExportFromTensorflowDialectToExecutor(
ModuleOp module, llvm::StringRef module_name) {
PassManager tf_to_executor(module.getContext());
::tensorflow::applyTensorflowAndCLOptions(tf_to_executor);
tf_to_executor.enableVerifier();
AddTfDialectToExecutorPasses(tf_to_executor);
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), kDebugGroupMain,
"tfxla_bridge_v2_tfdialect_to_executor_before"),
module, llvm::StringRef(), &tf_to_executor);
if (VLOG_IS_ON(2) ||
DEBUG_DATA_DUMPER()->ShouldDump(
module_name.str(), kDebugGroupBridgePhase1ExecutorExport)) {
internal::EnablePassIRPrinting(
tf_to_executor, kDebugGroupBridgePhase1ExecutorExport, module_name);
}
}
LogicalResult result = tf_to_executor.run(module);
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), kDebugGroupMain,
"tfxla_bridge_v2_tfdialect_to_executor_after"),
module, llvm::StringRef(), &tf_to_executor);
}
if (result.failed()) {
return RecordStatusIfError(
absl::InternalError("Failed to export from TF Dialect to TF Executor "
"Dialect. Read LLVM Pipeline Error"));
}
tf_dialect_to_executor_dialect_status->GetCell(kExportSuccess)
->IncrementBy(1);
return absl::OkStatus();
}
mlir::PassPipelineRegistration<> tf_dialect_to_executor_pipeline(
"tf-dialect-to-executor-v2",
"Run passes to convert from TF Dialect to Executor in preparation for "
"exporting module back to TF Graph.",
AddTfDialectToExecutorPasses);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.h"
#include <stdlib.h>
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
namespace {
constexpr char kExportStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/tf_dialect_to_executor_dialect_status";
constexpr char kExportSuccess[] = "success";
constexpr char kExportFailed[] = "failed";
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
using ::tensorflow::monitoring::testing::CellReader;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tf2xla/api/v2/testdata/");
}
size_t CountSubstring(absl::string_view str, absl::string_view substr) {
size_t count = 0;
size_t idx = str.find(substr);
while (idx != std::string::npos) {
count++;
idx = str.find(substr, idx + 1);
}
return count;
}
class TensorflowDialectToExecutorTest : public ::testing::Test {
public:
TensorflowDialectToExecutorTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(TensorflowDialectToExecutorTest, ConvertsToExecutor) {
CellReader<int64_t> compilation_status(kExportStreamzName);
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(ExportFromTensorflowDialectToExecutor(*mlir_module_));
EXPECT_EQ(compilation_status.Delta(kExportSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kExportFailed), 0);
}
TEST_F(TensorflowDialectToExecutorTest, ErrorsWhenCannotConvert) {
CellReader<int64_t> compilation_status(kExportStreamzName);
TF_ASSERT_OK(CreateMlirModule("invalid_executor.mlir"));
EXPECT_FALSE(ExportFromTensorflowDialectToExecutor(*mlir_module_).ok());
EXPECT_EQ(compilation_status.Delta(kExportSuccess), 0);
EXPECT_EQ(compilation_status.Delta(kExportFailed), 1);
}
TEST_F(TensorflowDialectToExecutorTest, PrunesDeadOps) {
CellReader<int64_t> compilation_status(kExportStreamzName);
TF_ASSERT_OK(CreateMlirModule("func_with_dead_ops.mlir"));
TF_EXPECT_OK(ExportFromTensorflowDialectToExecutor(*mlir_module_));
std::string module_dump;
llvm::raw_string_ostream raw_stream(module_dump);
mlir_module_->print(raw_stream);
EXPECT_EQ(compilation_status.Delta(kExportSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kExportFailed), 0);
EXPECT_EQ(
CountSubstring(module_dump, "tf_executor.island wraps \"tf.Concat\""), 2);
}
}
}
}
} |
1,193 | cpp | tensorflow/tensorflow | cluster_tf | tensorflow/compiler/mlir/tf2xla/api/v1/cluster_tf.cc | tensorflow/compiler/mlir/tf2xla/api/v1/cluster_tf_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V2_CLUSTER_TF_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V2_CLUSTER_TF_H_
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/device_type.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
tensorflow::Status RunFunctionTf2xlaClusteringBridge(
mlir::ModuleOp module, bool is_supported_by_replicated_brige,
bool is_in_fallback_enabled_mode,
llvm::StringRef module_name = llvm::StringRef());
}
}
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.h"
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/device_type.pb.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stacktrace.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using mlir::LogicalResult;
using mlir::ModuleOp;
using mlir::OpPassManager;
using mlir::PassManager;
using mlir::func::FuncOp;
tensorflow::Status RunTFXLABridge(
ModuleOp module,
llvm::function_ref<void(OpPassManager &pm)> pipeline_builder,
llvm::StringRef module_name = llvm::StringRef(),
llvm::StringRef dump_prefix = "tf_xla_bridge_v2") {
if (!mlir::TF::TensorFlowDialect::HasConstantFoldHook()) {
return tensorflow::errors::Internal(
"TensorFlow dialect missing constant fold hook in TFXLA bridge phase "
"1; this could happen if the binary doesn't link the constant fold "
"hook registration library.");
}
PassManager bridge(module.getContext());
bridge.enableVerifier();
::tensorflow::applyTensorflowAndCLOptions(bridge);
pipeline_builder(bridge);
mlir::StatusScopedDiagnosticHandler diag_handler(
module.getContext(), false,
!VLOG_IS_ON(1));
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
dump_prefix.str() + "_before"),
module, llvm::StringRef(), &bridge);
}
if (VLOG_IS_ON(2) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(),
kDebugGroupBridgePhase1Clustering)) {
::tensorflow::tf2xla::internal::EnablePassIRPrinting(
bridge, kDebugGroupBridgePhase1Clustering, module_name);
}
LogicalResult result = bridge.run(module);
(void)result;
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
dump_prefix.str() + "_after"),
module, llvm::StringRef(), &bridge);
}
return diag_handler.ConsumeStatus();
}
tensorflow::Status RecordIfErrorStatus(const std::string error_prefix,
bool fallback_enabled,
std::string bridge_type,
std::string device_type,
absl::Status status) {
if (status.ok()) {
return status;
}
VLOG(2) << error_prefix << " " << status;
tensorflow::metrics::UpdateTfMlirBridgeFirstPhaseCounter(
bridge_type, "v2", device_type,
fallback_enabled,
"failure");
tsl::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_1,
status);
std::string bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_TPU_BRIDGE";
if (device_type != "tpu") {
bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_CPU/GPU_BRIDGE";
}
tsl::error_logging::Log(mlir::TF::kBridgeComponent, bridge_subcomponent,
status.ToString())
.IgnoreError();
return status;
}
void CreateReplicatedClusteringPipeline(OpPassManager &pm,
llvm::StringRef module_name) {
pm.addPass(mlir::TFTPU::CreateTPUValidateInputsPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateCanonicalizeCompileAndReplicateAttributesPass());
tensorflow::tf2xla::internal::AddReplicatedBridgeClusteringPipelinePasses(
pm, module_name);
}
void CreateReplicatedClusteringPipelineV2(OpPassManager &pm) {
CreateReplicatedClusteringPipeline(pm, "");
}
tensorflow::Status RunFunctionTf2xlaClusteringBridge(
ModuleOp module, bool is_supported_by_replicated_brige,
bool is_in_fallback_enabled_mode, llvm::StringRef module_name) {
std::string device_type = is_supported_by_replicated_brige
? mlir::TF::kMlirPh1BridgeCounterTpu
: mlir::TF::kMlirPh1BridgeCounterNonTpu;
VLOG(2)
<< (is_supported_by_replicated_brige ? "Replicated" : "NonReplicated")
<< " Bridge called stack trace is "
<< "(NOTE: this is not an error; rather the stack trace for debugging) : "
<< tensorflow::CurrentStackTrace();
Status clustering_status =
is_supported_by_replicated_brige
? RunTFXLABridge(
module,
[module_name](OpPassManager &pm) {
CreateReplicatedClusteringPipeline(pm, module_name);
},
module_name, "tf_xla_bridge_v2_replicated")
: RunTFXLABridge(
module,
[](OpPassManager &pm) {
tensorflow::tf2xla::internal::
AddNonReplicatedBridgeClusteringPipelinePasses(pm);
},
module_name, "tf_xla_bridge_v2_nonreplicated");
std::string bridge_type = is_supported_by_replicated_brige
? mlir::TF::kMlirPh1BridgeCounterReplicated
: mlir::TF::kMlirPh1BridgeCounterNonReplicated;
TF_RETURN_IF_ERROR(RecordIfErrorStatus(
"clustering_v2", is_in_fallback_enabled_mode,
bridge_type, device_type, clustering_status));
tensorflow::metrics::UpdateTfMlirBridgeFirstPhaseCounter(
bridge_type, "v2", device_type,
is_in_fallback_enabled_mode,
"success");
return absl::OkStatus();
}
mlir::PassPipelineRegistration<> replicated_clustering_bridge_v2(
"tf-replicated-clustering-bridge-v2",
"Run all the passes involved in transforming a TensorFlow 2 graph before "
"execution so that it is suitable for targeting devices.",
CreateReplicatedClusteringPipelineV2);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.h"
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
namespace {
using ::mlir::DialectRegistry;
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::WalkResult;
using ::mlir::func::FuncOp;
using ::tensorflow::monitoring::testing::CellReader;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tf2xla/api/v2/testdata/");
}
static constexpr char kCompilationStreamz[] =
"/tensorflow/core/tf_mlir_bridge_first_phase_v2_count";
class FunctionClusterTensorflowDialectTest : public ::testing::Test {
public:
FunctionClusterTensorflowDialectTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(FunctionClusterTensorflowDialectTest, ClustersTfReplicatedBridge) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(RunFunctionTf2xlaClusteringBridge(
*mlir_module_, true,
false));
FuncOp main = mlir_module_->lookupSymbol<mlir::func::FuncOp>("main");
ASSERT_TRUE(main);
EXPECT_EQ(compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
mlir::TF::kMlirPh1BridgeCounterV2,
mlir::TF::kMlirPh1BridgeCounterTpu,
"fallback_disabled", "success"),
1);
}
TEST_F(FunctionClusterTensorflowDialectTest,
RunsOutsideCompilationReplicatedBridge) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("outside_compilation.mlir"));
TF_EXPECT_OK(RunFunctionTf2xlaClusteringBridge(
*mlir_module_, true,
false));
FuncOp main = mlir_module_->lookupSymbol<mlir::func::FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterFuncOp cluster_op) {
has_cluster_op = true;
return WalkResult::advance();
});
EXPECT_TRUE(has_cluster_op);
EXPECT_EQ(compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
mlir::TF::kMlirPh1BridgeCounterV2,
mlir::TF::kMlirPh1BridgeCounterTpu,
"fallback_disabled", "success"),
1);
}
TEST_F(FunctionClusterTensorflowDialectTest, ClustersTFNonReplicatedBridge) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(RunFunctionTf2xlaClusteringBridge(
*mlir_module_, false,
false));
FuncOp main = mlir_module_->lookupSymbol<mlir::func::FuncOp>("main");
ASSERT_TRUE(main);
EXPECT_EQ(
compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterNonReplicated,
mlir::TF::kMlirPh1BridgeCounterV2,
mlir::TF::kMlirPh1BridgeCounterNonTpu,
"fallback_disabled", "success"),
1);
}
TEST_F(FunctionClusterTensorflowDialectTest, LogsFallbackMode) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(RunFunctionTf2xlaClusteringBridge(
*mlir_module_, true,
true));
EXPECT_EQ(compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
mlir::TF::kMlirPh1BridgeCounterV2,
mlir::TF::kMlirPh1BridgeCounterTpu,
"fallback_enabled", "success"),
1);
}
}
}
}
} |
1,194 | cpp | tensorflow/tensorflow | compile_tf_graph | tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc | tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_TF_GRAPH_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_TF_GRAPH_H_
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/variant.h"
#include "xla/client/compile_only_client.h"
#include "xla/pjrt/compile_options.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/tpu/kernels/tpu_compile.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
namespace tensorflow {
namespace tf2xla {
namespace v1 {
absl::Status CompileTensorflowGraphToHlo(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result);
}
}
};
#endif
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include <cstdint>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_graphdef.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/compile_only_client.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/tpu/tpu_compile.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/lib/monitoring/sampler.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v1 {
using ::tensorflow::tpu::FunctionToHloArgs;
using ::tensorflow::tpu::GuaranteedConsts;
using ::tensorflow::tpu::MlirToHloArgs;
using ::tensorflow::tpu::ShardingAndIndex;
auto* phase2_bridge_compilation_status =
tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v1/"
"phase2_compilation_status",
"Tracks the compilation status of the non-mlir bridge",
"status" );
auto* phase2_bridge_compilation_time = tsl::monitoring::Sampler<1>::New(
{"/tensorflow/core/tf2xla/api/v1/phase2_compilation_time",
"The wall-clock time spent on executing graphs in milliseconds.",
"configuration"},
{tsl::monitoring::Buckets::Exponential(1, 1.5, 45)});
constexpr char kOldBridgeNoMlirSuccess[] = "kOldBridgeNoMlirSuccess";
constexpr char kOldBridgeNoMlirFailure[] = "kOldBridgeNoMlirFailure";
namespace {
struct CompilationTimer {
uint64 start_cycles = profile_utils::CpuUtils::GetCurrentClockCycle();
uint64 ElapsedCycles() {
return profile_utils::CpuUtils::GetCurrentClockCycle() - start_cycles;
}
int64_t ElapsedCyclesInMilliseconds() {
std::chrono::duration<double> duration =
profile_utils::CpuUtils::ConvertClockCycleToTime(ElapsedCycles());
return std::chrono::duration_cast<std::chrono::milliseconds>(duration)
.count();
}
};
Status PopulateInputOutputAliasing(
mlir::func::FuncOp main_fn,
XlaCompiler::CompilationResult* compilation_result, bool use_tuple_args) {
constexpr char kAliasingAttr[] = "tf.aliasing_output";
llvm::SmallDenseMap<unsigned, unsigned> output_to_input_alias;
unsigned num_arguments = main_fn.getNumArguments();
for (unsigned arg_index = 0; arg_index < num_arguments; ++arg_index) {
if (auto aliasing_output = main_fn.getArgAttrOfType<mlir::IntegerAttr>(
arg_index, kAliasingAttr))
output_to_input_alias[aliasing_output.getInt()] = arg_index;
}
if (output_to_input_alias.empty()) return absl::OkStatus();
xla::HloModuleProto* module_proto =
compilation_result->computation->mutable_proto();
absl::StatusOr<xla::ProgramShape> program_shape_or_status =
compilation_result->computation->GetProgramShape();
TF_RET_CHECK(program_shape_or_status.ok());
xla::ProgramShape& program_shape = program_shape_or_status.value();
if (!program_shape.result().IsTuple())
return errors::Internal("Expect result to have tuple shape");
xla::HloInputOutputAliasConfig config(program_shape.result());
for (auto alias : output_to_input_alias) {
if (use_tuple_args) {
TF_RETURN_IF_ERROR(config.SetUpAlias(
xla::ShapeIndex({alias.first}), 0, xla::ShapeIndex({alias.second}),
xla::HloInputOutputAliasConfig::AliasKind::kMayAlias));
} else {
TF_RETURN_IF_ERROR(config.SetUpAlias(
xla::ShapeIndex({alias.first}), alias.second, xla::ShapeIndex({}),
xla::HloInputOutputAliasConfig::AliasKind::kMayAlias));
}
}
*module_proto->mutable_input_output_alias() = config.ToProto();
return absl::OkStatus();
}
bool failed(const absl::Status& status) { return !status.ok(); }
Status PrepareAndExportToLibrary(mlir::ModuleOp module,
FunctionLibraryDefinition* flib_def) {
mlir::PassManager manager(module.getContext());
applyTensorflowAndCLOptions(manager);
manager.addPass(mlir::TF::CreatePrepareTpuComputationForTfExportPass());
manager.addPass(mlir::TF::CreateTFRegionControlFlowToFunctional());
manager.addPass(mlir::TF::CreateTFShapeInferencePass());
manager.addNestedPass<mlir::func::FuncOp>(
mlir::CreateFunctionalToExecutorDialectConversionPass());
manager.addPass(mlir::CreateBreakUpIslandsPass());
mlir::StatusScopedDiagnosticHandler diag_handler(module.getContext());
if (VLOG_IS_ON(2)) {
llvm::StringRef module_name = llvm::StringRef();
constexpr const char* kDebugGroupBridgePhase2 =
"v1_prepare_and_export_to_library";
internal::EnablePassIRPrinting(manager, kDebugGroupBridgePhase2,
module_name);
}
auto prepare_status = manager.run(module);
auto diag_handler_status = diag_handler.ConsumeStatus();
if (failed(prepare_status) || failed(diag_handler_status)) {
return diag_handler_status;
}
GraphExportConfig config;
config.export_entry_func_to_flib = true;
absl::flat_hash_set<Node*> control_ret_nodes;
return tensorflow::tf2xla::v2::ConvertMlirToGraph(
module, config, nullptr, flib_def, &control_ret_nodes);
}
absl::Status CompileTFFunctionWithoutMlir(
FunctionToHloArgs function_computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns
shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result) {
Status comp_status = CompileTFFunctionToHlo(
*function_computation.flib_def, function_computation.graph_def_version,
shape_determination_funcs, arg_shapes,
function_computation.guaranteed_constants, *function_computation.function,
metadata, client, arg_core_mapping, per_core_arg_shapes, use_tuple_args,
compilation_result);
if (comp_status.ok()) {
phase2_bridge_compilation_status->GetCell(kOldBridgeNoMlirSuccess)
->IncrementBy(1);
} else {
phase2_bridge_compilation_status->GetCell(kOldBridgeNoMlirFailure)
->IncrementBy(1);
}
return comp_status;
}
absl::Status CompileMLIRTFFunction(
tpu::MlirToHloArgs mlir_computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns
shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result) {
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
TF_RETURN_IF_ERROR(DeserializeMlirModule(mlir_computation.mlir_module,
&context, &mlir_module));
if (!mlir::SetTPUInfeedLayout(mlir_module))
return errors::Internal("Failed to set layouts attribute");
if (VLOG_IS_ON(2)) {
tensorflow::DumpMlirOpToFile("legalize_with_old_bridge", mlir_module.get());
}
constexpr char kEntryFuncName[] = "main";
auto main_fn = mlir_module->lookupSymbol<mlir::func::FuncOp>(kEntryFuncName);
if (!main_fn) {
return errors::Internal(
"TPU compile op requires module with a entry function main");
}
auto flib_def = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
TF_RETURN_IF_ERROR(PrepareAndExportToLibrary(*mlir_module, flib_def.get()));
if (VLOG_IS_ON(2)) {
tensorflow::DumpMlirOpToFile("legalize_with_old_bridge_post_transform",
mlir_module.get());
}
VersionDef versions;
if (mlir::failed(ExtractTfVersions(*mlir_module, &versions))) {
return errors::Internal(
"module attribute in _TPUCompileMlir op is missing tf versions.");
}
NameAttrList func;
func.set_name(kEntryFuncName);
GuaranteedConsts consts;
*compilation_result = {};
TF_RETURN_IF_ERROR(CompileTFFunctionToHlo(
*flib_def, versions.producer(), shape_determination_funcs, arg_shapes,
consts, func, metadata, client, arg_core_mapping, per_core_arg_shapes,
use_tuple_args, compilation_result));
return PopulateInputOutputAliasing(main_fn, compilation_result,
use_tuple_args);
}
}
absl::Status CompileTensorflowGraphToHlo(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns
shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result) {
LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using the "
"old (non-MLIR) tf2xla bridge";
CompilationTimer timer;
*compilation_result = {};
bool has_mlir = computation.index() == 0;
std::string mlir_string = has_mlir ? "has_mlir" : "has_function_to_hlo";
const std::string kBridgePhase2Config =
absl::StrCat("graph_old_bridge_", mlir_string);
if (has_mlir) {
TF_RETURN_IF_ERROR(CompileMLIRTFFunction(
std::get<0>(computation), metadata, use_tuple_args,
shape_determination_funcs, arg_shapes, arg_core_mapping,
per_core_arg_shapes, client, compilation_result));
} else {
FunctionToHloArgs function_computation = std::get<1>(computation);
TF_RETURN_IF_ERROR(CompileTFFunctionWithoutMlir(
function_computation, metadata, use_tuple_args,
shape_determination_funcs, arg_shapes, arg_core_mapping,
per_core_arg_shapes, client, compilation_result));
}
phase2_bridge_compilation_time->GetCell(kBridgePhase2Config)
->Add(timer.ElapsedCyclesInMilliseconds());
return absl::OkStatus();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/shape.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/translate/mhlo_to_hlo/type_to_shape.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/monitoring/test_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v1 {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::tpu::FunctionToHloArgs;
using ::tensorflow::tpu::MlirToHloArgs;
using ::tensorflow::tpu::ShardingAndIndex;
using ::tsl::monitoring::testing::Histogram;
static constexpr char kCompilationTimeStreamzName[] =
"/tensorflow/core/tf2xla/api/v1/phase2_compilation_time";
static constexpr char kCompilationStatusStreamzName[] =
"/tensorflow/core/tf2xla/api/v1/phase2_compilation_status";
static constexpr char kPlatformName[] = "Host";
constexpr char kEntryFuncName[] = "main";
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
func.return
}
})";
MlirToHloArgs CreateTestMlirToHloArgs(const char* module_str = kMlirModuleStr) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.rollout_state =
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
mlir_to_hlo_args.mlir_module = module_str;
return mlir_to_hlo_args;
}
class CompileTFGraphTest : public ::testing::Test {
public:
absl::StatusOr<XlaCompilationResult> CompileWithComputation(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>
computation) {
XlaCompilationResult compilation_result;
se::Platform* platform =
se::PlatformManager::PlatformWithName(kPlatformName).value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform).value();
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
tpu::TPUCompileMetadataProto metadata_proto;
std::vector<TensorShape> arg_shapes;
if (computation.index() == 0) {
TF_RETURN_IF_ERROR(tensorflow::tf2xla::internal::ConfigureMetadata(
std::get<0>(computation).mlir_module, arg_shapes, metadata_proto));
}
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
absl::Status compilation_status =
tensorflow::tf2xla::v1::CompileTensorflowGraphToHlo(
computation, metadata_proto, use_tuple_args,
shape_determination_fns, arg_shapes, &arg_core_mapping,
&per_core_arg_shapes, client, &compilation_result);
if (!compilation_status.ok()) return compilation_status;
return compilation_result;
}
};
TEST_F(CompileTFGraphTest, RecordsStreamzForMlirFallback) {
CellReader<Histogram> compilation_time(kCompilationTimeStreamzName);
MlirToHloArgs mlir_to_hlo_args = CreateTestMlirToHloArgs();
TF_EXPECT_OK(CompileWithComputation(mlir_to_hlo_args).status());
Histogram histogram = compilation_time.Delta("graph_old_bridge_has_mlir");
EXPECT_EQ(histogram.num(), 1);
}
TEST_F(CompileTFGraphTest, RecordsStreamzForFunctionToHlo) {
CellReader<Histogram> compilation_time(kCompilationTimeStreamzName);
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
FunctionDef empty_function =
tensorflow::FunctionDefHelper::Create("empty", {}, {}, {}, {}, {});
tensorflow::FunctionDefLibrary fdef;
*(fdef.add_function()) = empty_function;
tensorflow::FunctionLibraryDefinition flib_def(
tensorflow::OpRegistry::Global(), fdef);
OpInputList guaranteed_constants;
NameAttrList function;
function.set_name("empty");
FunctionToHloArgs function_to_hlo_args = {&function,
&flib_def,
0,
{&guaranteed_constants}};
TF_EXPECT_OK(CompileWithComputation(function_to_hlo_args).status());
Histogram histogram =
compilation_time.Delta("graph_old_bridge_has_function_to_hlo");
EXPECT_EQ(histogram.num(), 1);
EXPECT_EQ(compilation_status.Delta("kOldBridgeNoMlirSuccess"), 1);
}
TEST_F(CompileTFGraphTest, SuccessfullyCompilesWithManualSharding) {
constexpr char kSupportedManualSharding[] = R"(
module @module___inference_tpu_function_41 attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1617 : i32}} {
func.func @main(%arg0: tensor<2x2xf32>) -> (tensor<2x2xf32> {mhlo.sharding = "\08\03\1A\02\02\01\22\02\00\01"}) {
%0 = tf_executor.graph {
%outputs, %control = tf_executor.island wraps "tf.XlaSharding"(%arg0) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01"} : (tensor<2x2xf32>) -> tensor<2x2xf32>
%outputs_0, %control_1 = tf_executor.island wraps "tf.XlaSharding"(%outputs) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<2x2xf32>
%outputs_2, %control_3 = tf_executor.island wraps "tf.XlaSpmdFullToShardShape"(%outputs_0) {dim = -1 : i64, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<1x2xf32>
%control_4 = tf_executor.island wraps "tf._XlaHostComputeMlir"(%outputs_2) {host_mlir_module = "", manual_sharding = true, recv_key = "host_compute_channel_0_retvals", send_key = "host_compute_channel_0_args"} : (tensor<1x2xf32>) -> ()
%outputs_5, %control_6 = tf_executor.island(%control_4) wraps "tf._XlaHostComputeMlir"() {host_mlir_module = "module {\0A func.func @host_func() -> tensor<1x2xf32> {\0A %0 = \22tf.Const\22() {value = dense<0.1> : tensor<1x2xf32>} : () -> tensor<1x2xf32> \0A return %0 : tensor<1x2xf32>}}", manual_sharding = true, recv_key = "host_compute_channel_1_retvals", send_key = "host_compute_channel_1_args"} : () -> tensor<1x2xf32>
%outputs_7, %control_8 = tf_executor.island wraps "tf.XlaSpmdShardToFullShape"(%outputs_5) {dim = -1 : i64, full_shape = #tf_type.shape<2x2>, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<1x2xf32>) -> tensor<2x2xf32>
%outputs_9, %control_10 = tf_executor.island wraps "tf.XlaSharding"(%outputs_7) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<2x2xf32>
tf_executor.fetch %outputs_9 : tensor<2x2xf32>
}
return %0 : tensor<2x2xf32>
}
}
)";
auto mlir_to_hlo_args = CreateTestMlirToHloArgs(kSupportedManualSharding);
auto result = CompileWithComputation(mlir_to_hlo_args);
EXPECT_TRUE(result.ok());
}
TEST_F(CompileTFGraphTest, DoesNotInlineStatelessRandomOps) {
static constexpr char kHasReturnValues[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<32x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) {
%cst = "tf.Const"() {value = dense<[524170, 523952]> : tensor<2xi32>} : () -> tensor<2xi32>
%cst_0 = "tf.Const"() {value = dense<[32, 64]> : tensor<2xi32>} : () -> tensor<2xi32>
%0 = "tf.StatelessRandomNormal"(%cst_0, %cst) : (tensor<2xi32>, tensor<2xi32>) -> tensor<32x64xf32>
return %0 : tensor<32x64xf32>
}
})";
auto compilation_result =
CompileWithComputation(CreateTestMlirToHloArgs(kHasReturnValues));
EXPECT_TRUE(compilation_result.ok());
EXPECT_THAT(compilation_result,
ComputationProtoContains("tf.StatelessRandomNormal"));
}
TEST_F(CompileTFGraphTest, TestRunsShapeInference) {
static constexpr char kShapeInferenceModule[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%0 = "tf.Const"() <{value = dense<-1> : tensor<3360x8xi32>}> : () -> tensor<3360x8xi32>
%cst_33 = "tf.Const"() <{value = dense<[1120, -1]> : tensor<2xi32>}> : () -> tensor<2xi32>
%cst_34 = "tf.Const"() <{value = dense<[3, 1120, -1]> : tensor<3xi32>}> : () -> tensor<3xi32>
%cst_63 = "tf.Const"() <{value = dense<0> : tensor<i32>}> : () -> tensor<i32>
%1965:4 = "tf._XlaHostComputeMlir"(%0, %cst_34, %cst_63, %cst_33) <{host_mlir_module = "#loc1 = loc(\22Reshape:\22)\0A#loc2 = loc(\22Reshape_4\22)\0A#loc3 = loc(\22Reshape\22)\0A#loc9 = loc(fused[#loc1, #loc2, #loc3])\0Amodule {\0A func.func @host_func(%arg0: tensor<3360x?xi32> loc(fused[#loc1, #loc2, #loc3]), %arg1: tensor<3xi32> loc(fused[#loc1, #loc2, #loc3]), %arg2: tensor<i32> loc(fused[#loc1, #loc2, #loc3]), %arg3: tensor<2xi32> loc(fused[#loc1, #loc2, #loc3])) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32>) {\0A %0 = \22tf.Reshape\22(%arg0, %arg1) {_xla_outside_compilation = \220\22} : (tensor<3360x?xi32>, tensor<3xi32>) -> tensor<3x1120x?xi32> loc(#loc9)\0A %1:3 = \22tf.Split\22(%arg2, %0) {_xla_outside_compilation = \220\22} : (tensor<i32>, tensor<3x1120x?xi32>) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1x1120x?xi32>) loc(#loc10)\0A %2 = \22tf.Reshape\22(%1#0, %arg3) {_xla_outside_compilation = \220\22} : (tensor<1x1120x?xi32>, tensor<2xi32>) -> tensor<1120x?xi32> loc(#loc11)\0A %3 = \22tf.Shape\22(%2) {_xla_outside_compilation = \220\22} : (tensor<1120x?xi32>) -> tensor<2xi32> loc(#loc12)\0A return %1#1, %1#2, %2, %3 : tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32> loc(#loc9)\0A } loc(#loc9)\0A} loc(#loc)\0A#loc = loc(unknown)\0A#loc4 = loc(\22Split:\22)\0A#loc5 = loc(\22split\22)\0A#loc6 = loc(\22Reshape_5\22)\0A#loc7 = loc(\22Shape:\22)\0A#loc8 = loc(\22Shape_4\22)\0A#loc10 = loc(fused[#loc4, #loc5])\0A#loc11 = loc(fused[#loc1, #loc6])\0A#loc12 = loc(fused[#loc7, #loc8])\0A", recv_key = "host_compute_channel_0_retvals", send_key = "host_compute_channel_0_args"}> : (tensor<3360x8xi32>, tensor<3xi32>, tensor<i32>, tensor<2xi32>) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32>)
return
}
}
)";
auto compilation_result =
CompileWithComputation(CreateTestMlirToHloArgs(kShapeInferenceModule));
EXPECT_TRUE(compilation_result.ok());
}
}
}
}
} |
1,195 | cpp | tensorflow/tensorflow | xla_legalize_targets | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.cc | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_TRANSFORMS_XLA_LEGALIZE_TARGETS_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_TRANSFORMS_XLA_LEGALIZE_TARGETS_H_
#include "mlir/IR/MLIRContext.h"
#include "mlir/Transforms/DialectConversion.h"
namespace mlir {
namespace mhlo {
mlir::ConversionTarget GetDefaultLegalConversionTargets(
MLIRContext& mlir_context, bool legalize_chlo);
}
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/ChloOps.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace mhlo {
ConversionTarget GetDefaultLegalConversionTargets(MLIRContext& mlir_context,
bool legalize_chlo) {
ConversionTarget target(mlir_context);
if (legalize_chlo) {
target.addIllegalDialect<chlo::ChloDialect>();
target.addIllegalDialect<stablehlo::StablehloDialect>();
} else {
target.addLegalDialect<chlo::ChloDialect>();
}
target.addLegalDialect<MhloDialect>();
target.addLegalDialect<arith::ArithDialect>();
target.addLegalDialect<func::FuncDialect>();
target.addLegalDialect<tensor::TensorDialect>();
target.addLegalDialect<shape::ShapeDialect>();
target.addLegalOp<func::CallOp>();
target.addLegalOp<TF::_XlaHostComputeMlirOp, TF::XlaSendToHostOp,
TF::XlaRecvFromHostOp>();
return target;
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/ChloOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace mhlo {
namespace {
mlir::DialectRegistry GetDefaultDialectRegistry() {
mlir::DialectRegistry registry;
registry.insert<arith::ArithDialect>();
registry.insert<func::FuncDialect>();
registry.insert<tensor::TensorDialect>();
registry.insert<shape::ShapeDialect>();
registry.insert<TF::TensorFlowDialect>();
registry.insert<chlo::ChloDialect>();
return registry;
}
class XlaLegalizeTargetsTest : public testing::Test {
public:
XlaLegalizeTargetsTest()
: context_(GetDefaultDialectRegistry()),
module_(mlir::ModuleOp::create(mlir::UnknownLoc::get(&context_))),
builder_(&module_->getBodyRegion()) {
context_.loadAllAvailableDialects();
}
protected:
mlir::MLIRContext context_;
mlir::OwningOpRef<mlir::ModuleOp> module_;
mlir::OpBuilder builder_;
};
TEST_F(XlaLegalizeTargetsTest, CreatesConversionTargets) {
auto const_int = builder_.create<mlir::arith::ConstantIntOp>(
builder_.getUnknownLoc(), 10, builder_.getI32Type());
ConversionTarget target =
GetDefaultLegalConversionTargets(context_, false);
EXPECT_TRUE(target.isLegal(const_int));
}
TEST_F(XlaLegalizeTargetsTest, AllowsCHLODialect) {
auto const_int = builder_.create<chlo::ConstantOp>(
builder_.getUnknownLoc(), builder_.getI32TensorAttr({42}));
ConversionTarget target =
GetDefaultLegalConversionTargets(context_, true);
EXPECT_TRUE(target.isIllegal(const_int));
}
TEST_F(XlaLegalizeTargetsTest, DontAllowCHLODialect) {
auto const_int = builder_.create<chlo::ConstantOp>(
builder_.getUnknownLoc(), builder_.getI32TensorAttr({42}));
ConversionTarget target =
GetDefaultLegalConversionTargets(context_, false);
EXPECT_TRUE(target.isLegal(const_int));
}
}
}
} |
1,196 | cpp | tensorflow/tensorflow | tf2xla_rewriter | tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc | tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_TRANSFORMS_TF2XLA_REWRITER_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_TRANSFORMS_TF2XLA_REWRITER_H_
#include <memory>
#include <string>
#include <vector>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/op_or_arg_name_mapper.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_expression.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/op_kernel.h"
namespace mlir {
namespace mhlo {
class Tf2XlaRewriterTestPeer;
class Tf2XlaRewriter {
public:
static mlir::LogicalResult RewriteOp(mlir::Operation* op,
mlir::PatternRewriter& rewriter,
const std::string& device_type);
private:
friend class Tf2XlaRewriterTestPeer;
Tf2XlaRewriter(mlir::Operation* op, mlir::PatternRewriter& rewriter,
const std::string& device_type);
~Tf2XlaRewriter();
absl::StatusOr<mhlo::TupleOp> CompileWithHloImporter(
tensorflow::OpKernelContext& op_context);
absl::StatusOr<mhlo::TupleOp> ImportXlaComputation(
xla::XlaComputation& computation);
mlir::LogicalResult PrepareParams();
mlir::LogicalResult PrepareKernelInputs(
const llvm::SmallDenseSet<int>& required_consts,
std::vector<tensorflow::XlaExpression>& expressions,
std::vector<tensorflow::Tensor>& tensors,
std::vector<tensorflow::TensorValue>& inputs);
mlir::LogicalResult VerifyOpResults(tensorflow::OpKernelContext& op_context);
mlir::LogicalResult GetKernelOutputs(tensorflow::OpKernelContext& op_context,
mhlo::TupleOp tuple_results,
llvm::SmallVector<Value>& outputs);
mlir::LogicalResult UnpackTupleResults(mhlo::TupleOp tuple_result,
llvm::SmallVector<Value>& outputs);
mlir::LogicalResult LegalizeOp();
tensorflow::XlaExpression GetExprForOperand(mlir::Value operand,
mlir::Operation* op,
int64_t operand_index);
mlir::Operation* op_;
std::string device_type_;
mlir::PatternRewriter& rewriter_;
tensorflow::OpOrArgLocNameMapper name_mapper_;
tensorflow::XlaContext* context_;
std::unique_ptr<tensorflow::StaticDeviceMgr> device_mgr_;
tensorflow::Device* device_;
std::unique_ptr<tensorflow::ScopedStepContainer> step_container_;
std::unique_ptr<tensorflow::FunctionLibraryDefinition> flib_def_;
std::unique_ptr<tensorflow::ProcessFunctionLibraryRuntime> pflr_;
tensorflow::OpKernelContext::Params params_;
xla::XlaBuilder xla_builder_;
};
}
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h"
#include <cstdint>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/op_or_arg_name_mapper.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tpu_embedding_ops_registry.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/tf2xla/xla_compilation_device.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_expression.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/service/hlo.pb.h"
#include "xla/translate/hlo_to_mhlo/hlo_function_importer.h"
#include "xla/translate/hlo_to_mhlo/hlo_to_mlir_hlo.h"
#include "xla/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace mhlo {
namespace {
using ::mlir::ModuleOp;
using ::tensorflow::Tensor;
using ::tsl::StatusOr;
using ::xla::XlaComputation;
static std::unique_ptr<tensorflow::StaticDeviceMgr> CreateDeviceMgr(
const std::string& device_type) {
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
auto device = std::make_unique<tensorflow::XlaCompilationDevice>(
tensorflow::SessionOptions(), tensorflow::DeviceType(device_type));
return std::make_unique<tensorflow::StaticDeviceMgr>(std::move(device));
}
bool RootInstructionIsTuple(const xla::HloModule& hlo_module) {
xla::HloInstruction* root_instruction =
hlo_module.entry_computation()->root_instruction();
return root_instruction->opcode() == xla::HloOpcode::kTuple;
}
};
LogicalResult Tf2XlaRewriter::RewriteOp(Operation* op,
PatternRewriter& rewriter,
const std::string& device_type) {
Tf2XlaRewriter tf2xla_rewriter(op, rewriter, device_type);
return tf2xla_rewriter.LegalizeOp();
}
Tf2XlaRewriter::Tf2XlaRewriter(Operation* op, PatternRewriter& rewriter,
const std::string& device_type)
: op_(op),
device_type_(device_type),
rewriter_(rewriter),
context_(nullptr),
xla_builder_(op_->getName().getStringRef().str()) {}
Tf2XlaRewriter::~Tf2XlaRewriter() {
if (context_) context_->Unref();
}
absl::StatusOr<mhlo::TupleOp> Tf2XlaRewriter::ImportXlaComputation(
XlaComputation& computation) {
xla::DebugOptions debug_options;
TF_ASSIGN_OR_RETURN(auto hlo_module_config,
xla::HloModule::CreateModuleConfigFromProto(
computation.proto(), debug_options));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::HloModule> hlo_module,
xla::HloModule::CreateFromProto(computation.proto(), hlo_module_config));
if (!RootInstructionIsTuple(*hlo_module)) {
return tsl::errors::InvalidArgument("Imported XLA Root is not a tuple op");
}
if (op_->getNumOperands() !=
hlo_module->entry_computation()->num_parameters()) {
return tsl::errors::InvalidArgument(
"Entry computation does not have equal number of parameters to op "
"operands");
}
ModuleOp mlir_module = op_->getParentOfType<ModuleOp>();
mlir::OpBuilder builder(op_);
mlir::SymbolTable symbol_table(mlir_module);
llvm::SmallVector<mlir::Value> arguments;
for (int i = 0; i < op_->getNumOperands(); i++) {
arguments.push_back(op_->getOperand(i));
}
TF_ASSIGN_OR_RETURN(
mlir::Value root_value,
xla::HloFunctionImporter::ImportInstructions(
*hlo_module->entry_computation(), arguments, symbol_table, &builder));
mhlo::TupleOp root_tuple =
mlir::dyn_cast_or_null<mhlo::TupleOp>(root_value.getDefiningOp());
if (!root_tuple) {
return tsl::errors::InvalidArgument(
"Imported XLA Root Value is not a tuple op");
}
return root_tuple;
}
LogicalResult Tf2XlaRewriter::PrepareParams() {
context_ = new tensorflow::XlaContext(nullptr, &xla_builder_,
nullptr);
context_->Ref();
device_mgr_ = CreateDeviceMgr(device_type_);
if (!device_mgr_) return failure();
device_ = device_mgr_->ListDevices().front();
params_.device = device_;
params_.resource_manager = device_->resource_manager();
auto cleanup = [](const std::string& name) {};
step_container_ = std::make_unique<tensorflow::ScopedStepContainer>(
0, cleanup);
absl::Status status = step_container_->Create(
device_->resource_manager(),
tensorflow::XlaContext::kXlaContextResourceName, context_);
if (!status.ok()) {
return emitRemark(op_->getLoc())
<< "failed to create XlaContext resource: " << status.ToString();
}
params_.step_container = step_container_.get();
absl::StatusOr<int64_t> version_or = tensorflow::GetTfGraphProducerVersion(
op_->getParentOfType<mlir::ModuleOp>());
if (!version_or.ok()) {
return emitError(op_->getLoc()) << version_or.status().ToString();
}
flib_def_ = std::make_unique<tensorflow::FunctionLibraryDefinition>(
tensorflow::OpRegistry::Global(), tensorflow::FunctionDefLibrary());
pflr_ = std::make_unique<tensorflow::ProcessFunctionLibraryRuntime>(
device_mgr_.get(), tensorflow::Env::Default(), nullptr,
version_or.value(), flib_def_.get(), tensorflow::OptimizerOptions());
params_.function_library = pflr_->GetFLR(device_->name());
return success();
}
bool IsBounded(Type ty) {
auto ranked_ty = mlir::dyn_cast<RankedTensorType>(ty);
if (!ranked_ty) return false;
if (ranked_ty.hasStaticShape()) return true;
auto encoding =
mlir::dyn_cast_or_null<TypeExtensionsAttr>(ranked_ty.getEncoding());
if (!encoding) return false;
for (int i = 0; i < ranked_ty.getRank(); ++i) {
if (ranked_ty.isDynamicDim(i) &&
encoding.getBounds()[i] == ShapedType::kDynamic) {
return false;
}
}
return true;
}
bool HasSymbolRefAttr(Operation* op) {
for (const auto& attr : op->getAttrs()) {
Attribute attr_value = attr.getValue();
if (mlir::isa<SymbolRefAttr>(attr_value)) {
return true;
} else if (auto array_attr = mlir::dyn_cast<ArrayAttr>(attr_value)) {
if (!array_attr.empty() &&
mlir::isa<SymbolRefAttr>(*array_attr.begin())) {
return true;
}
}
}
return false;
}
LogicalResult Tf2XlaRewriter::PrepareKernelInputs(
const llvm::SmallDenseSet<int>& required_consts,
std::vector<tensorflow::XlaExpression>& expressions,
std::vector<tensorflow::Tensor>& tensors,
std::vector<tensorflow::TensorValue>& inputs) {
for (auto it : llvm::enumerate(op_->getOperands())) {
Value operand = it.value();
size_t idx = it.index();
tensorflow::XlaExpression expr = GetExprForOperand(operand, op_, idx);
tensorflow::XlaExpression::Kind kind = expr.kind();
if (kind == tensorflow::XlaExpression::Kind::kInvalid) return failure();
expressions.push_back(expr);
if (!tensorflow::DataTypeCanUseMemcpy(expr.dtype())) {
return op_->emitRemark()
<< "skipping legalization due to unsupported type "
<< operand.getType();
}
auto shape_or = expr.GetShape();
if (!shape_or.ok()) {
return op_->emitRemark()
<< "failed to get shape for expression. " << expr.HumanString();
}
tensors.emplace_back(
device_->GetAllocator(tensorflow::AllocatorAttributes()), expr.dtype(),
shape_or.value());
tensorflow::Tensor& tensor = tensors.back();
tensorflow::XlaExpression::AssignExpressionToTensor(expr, &tensor);
inputs.emplace_back(&tensor);
}
return success();
}
LogicalResult Tf2XlaRewriter::LegalizeOp() {
for (Type ty : op_->getOperandTypes()) {
auto ranked_ty = mlir::dyn_cast<ShapedType>(ty);
if (!IsBounded(ranked_ty)) {
return op_->emitRemark()
<< "lowering requires bounded tensor operands " << ranked_ty;
}
}
if (HasSymbolRefAttr(op_)) {
return op_->emitRemark() << "ops with symbol references are not supported";
}
auto nodedef_or = tensorflow::ConvertTFDialectOpToNodeDef(
op_, name_mapper_.GetUniqueName(op_),
true);
if (!nodedef_or.ok()) {
return op_->emitRemark() << "failed to convert op to NodeDef: "
<< nodedef_or.status().ToString();
}
if (failed(PrepareParams())) return failure();
std::shared_ptr<const tensorflow::NodeProperties> props;
absl::Status status = tensorflow::NodeProperties::CreateFromNodeDef(
*nodedef_or.value(),
params_.function_library->GetFunctionLibraryDefinition(), &props);
if (!status.ok()) {
return op_->emitRemark()
<< "failed to create NodeProperties: " << status.ToString();
}
tensorflow::OpKernel* op_kernel_raw;
status = params_.function_library->CreateKernel(props, &op_kernel_raw);
if (!status.ok()) {
return op_->emitRemark()
<< "failed to create tf2xla kernel: " << status.ToString();
}
auto op_kernel = absl::WrapUnique(op_kernel_raw);
std::vector<int> required_constants;
status = tensorflow::XlaOpRegistry::CompileTimeConstantInputs(
*op_kernel, &required_constants);
if (!status.ok()) {
return op_->emitRemark()
<< "failed to compute required constants: " << status.ToString();
}
llvm::SmallDenseSet<int> required_consts;
required_consts.insert(required_constants.begin(), required_constants.end());
std::vector<tensorflow::XlaExpression> expressions;
std::vector<tensorflow::Tensor> tensors;
std::vector<tensorflow::TensorValue> inputs;
expressions.reserve(op_->getNumOperands());
tensors.reserve(op_->getNumOperands());
inputs.reserve(op_->getNumOperands());
if (failed(
PrepareKernelInputs(required_consts, expressions, tensors, inputs)))
return failure();
params_.inputs = inputs;
params_.op_kernel = op_kernel.get();
llvm::SmallVector<tensorflow::AllocatorAttributes, 4> output_attr(
op_->getNumResults());
params_.output_attr_array = output_attr.data();
tensorflow::OpKernelContext op_context(¶ms_, op_->getNumResults());
device_->Compute(params_.op_kernel, &op_context);
status = op_context.status();
if (!status.ok()) {
return op_->emitRemark()
<< "compilation to HLO failed: " << status.ToString();
}
if (failed(VerifyOpResults(op_context))) return failure();
absl::StatusOr<mhlo::TupleOp> tuple_result_or_status =
CompileWithHloImporter(op_context);
if (!tuple_result_or_status.ok()) {
return op_->emitRemark() << tuple_result_or_status.status().ToString();
}
mhlo::TupleOp tuple_result = tuple_result_or_status.value();
llvm::SmallVector<Value> output_values;
if (failed(GetKernelOutputs(op_context, tuple_result, output_values))) {
return failure();
}
rewriter_.replaceOp(op_, output_values);
return success();
}
absl::StatusOr<mhlo::TupleOp> Tf2XlaRewriter::CompileWithHloImporter(
tensorflow::OpKernelContext& op_context) {
std::vector<xla::XlaOp> output_values;
for (int i = 0, e = op_->getNumResults(); i < e; i++) {
tensorflow::Tensor* output = op_context.mutable_output(i);
const tensorflow::XlaExpression* expr =
tensorflow::XlaExpression::CastExpressionFromTensor(*output);
output_values.push_back(expr->AsXlaOp(&xla_builder_));
}
absl::Span<const xla::XlaOp> return_values(output_values);
xla::XlaOp root_value = xla::Tuple(&xla_builder_, return_values);
TF_ASSIGN_OR_RETURN(XlaComputation computation,
xla_builder_.Build(root_value,
false));
return ImportXlaComputation(computation);
}
mlir::LogicalResult Tf2XlaRewriter::VerifyOpResults(
tensorflow::OpKernelContext& op_context) {
for (int i = 0, e = op_->getNumResults(); i < e; i++) {
tensorflow::Tensor* output = op_context.mutable_output(i);
const tensorflow::XlaExpression* expr =
tensorflow::XlaExpression::CastExpressionFromTensor(*output);
if (expr->kind() != tensorflow::XlaExpression::Kind::kXlaOp &&
expr->kind() != tensorflow::XlaExpression::Kind::kConstant) {
return op_->emitRemark(absl::StrCat(
"expects XlaExpression of kind kXlaOp or kConstant in compiled "
"output index ",
i));
}
}
return success();
}
mlir::LogicalResult Tf2XlaRewriter::UnpackTupleResults(
mhlo::TupleOp tuple_result, llvm::SmallVector<Value>& outputs) {
if (tuple_result->getNumOperands() != op_->getNumResults()) {
return op_->emitRemark() << "Translated TF2XLA tuple has different "
"number of results than original op";
}
for (int i = 0; i < tuple_result->getNumOperands(); i++) {
outputs.push_back(tuple_result->getOperand(i));
}
tuple_result.getOperation()->erase();
return success();
}
mlir::LogicalResult Tf2XlaRewriter::GetKernelOutputs(
tensorflow::OpKernelContext& op_context, mhlo::TupleOp tuple_results,
llvm::SmallVector<Value>& outputs) {
outputs.reserve(op_->getNumResults());
return UnpackTupleResults(tuple_results, outputs);
}
tensorflow::XlaExpression Tf2XlaRewriter::GetExprForOperand(
Value operand, Operation* op, int64_t operand_index) {
ElementsAttr const_attr;
auto defining_op = operand.getDefiningOp();
::xla::XlaOp xla_op = xla::Parameter(&xla_builder_, operand_index,
xla::TypeToShape(operand.getType()),
std::to_string(operand_index));
if (defining_op && matchPattern(defining_op, m_Constant(&const_attr))) {
tensorflow::Tensor tensor;
auto status = tensorflow::ConvertToTensor(const_attr, &tensor);
if (!status.ok()) {
op->emitRemark() << "skipping legalization due to failed const conversion"
<< status.ToString();
return tensorflow::XlaExpression::Invalid();
}
return tensorflow::XlaExpression::Constant(tensor);
}
tensorflow::DataType dtype;
auto status = tensorflow::ConvertToDataType(operand.getType(), &dtype);
if (!status.ok()) {
op->emitRemark() << "skipping legalization due to " << status.ToString();
return tensorflow::XlaExpression::Invalid();
}
return tensorflow::XlaExpression::XlaOp(xla_op, dtype);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace mhlo {
using ::mlir::LogicalResult;
using ::mlir::ModuleOp;
using ::mlir::OpBuilder;
using ::mlir::Operation;
using ::mlir::func::FuncOp;
using ::tsl::Status;
using ::tsl::StatusOr;
using ::xla::ReplicaGroup;
using ::xla::ShapeUtil;
using ::xla::XlaBuilder;
using ::xla::XlaComputation;
using ::xla::XlaOp;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1442 : i32}} {
func.func @main(%arg0: tensor<3xi64> {tf._user_specified_name = "resource", tf.aliasing_output = 3 : i64}) -> () attributes {tf.entry_function = {control_outputs = "stateful_normal/RngReadAndSkip,stateful_uniform/RngReadAndSkip,stateful_uniform_full_int/RngReadAndSkip", inputs = "stateful_normal_rngreadandskip_resource", outputs = "identity_RetVal,identity_1_RetVal,identity_2_RetVal"}} {
%0:3 = "tf.Unpack"(%arg0) {axis = 0 : i64} : (tensor<3xi64>) -> (tensor<i64>, tensor<i64>, tensor<i64>)
return
}
})";
XlaComputation GetTestXlaComputation() {
XlaBuilder xla_builder("test");
auto param =
Parameter(&xla_builder, 0, ShapeUtil::MakeScalarShape(xla::F32), "a");
XlaOp add = xla::Add(param, xla::ConstantR0<float>(&xla_builder, 2.0));
std::vector<XlaOp> tuple_values;
tuple_values.push_back(add);
xla::Tuple(&xla_builder, tuple_values);
return xla_builder.Build().value();
}
class EmptyPatternRewriter : public mlir::PatternRewriter {
public:
explicit EmptyPatternRewriter(const OpBuilder& other_builder)
: mlir::PatternRewriter(other_builder) {}
~EmptyPatternRewriter() override = default;
};
class Tf2XlaRewriterTestPeer {
public:
explicit Tf2XlaRewriterTestPeer() = delete;
explicit Tf2XlaRewriterTestPeer(mlir::Operation* op)
: op_builder_(op),
empty_rewriter_(op_builder_),
tf2xla_rewriter_(op, empty_rewriter_,
"XLA_CPU_JIT") {}
absl::StatusOr<TupleOp> ImportXlaComputationIntoModule(
XlaComputation& computation) {
return tf2xla_rewriter_.ImportXlaComputation(computation);
}
private:
OpBuilder op_builder_;
EmptyPatternRewriter empty_rewriter_;
Tf2XlaRewriter tf2xla_rewriter_;
};
class Tf2XlaRewriterTest : public ::testing::Test {
public:
void SetUp() override {
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
}
Status CreateMlirModule(std::string module_string = kMlirModuleStr) {
TF_ASSIGN_OR_RETURN(
module_, test::GetMlirModuleFromString(module_string, &context_));
context_.loadAllAvailableDialects();
return absl::OkStatus();
}
Status LegalizeSingleOp(Operation& op) {
SourceMgrDiagnosticHandler sourceMgrHandler(source_manager_, &context_);
OpBuilder op_builder(&op);
EmptyPatternRewriter pattern_rewriter(op_builder);
LogicalResult result =
Tf2XlaRewriter::RewriteOp(&op, pattern_rewriter,
"XLA_CPU_JIT");
if (!result.succeeded()) {
return tsl::errors::Internal("Failed to rewrite op");
}
return absl::OkStatus();
}
Status LegalizeModule(std::string module_string = kMlirModuleStr) {
TF_EXPECT_OK(CreateMlirModule(module_string));
FuncOp main = module_->lookupSymbol<mlir::func::FuncOp>("main");
if (!main) {
return tsl::errors::InvalidArgument("Could not find a main function");
}
WalkResult walk_result = main.walk([&](Operation* op) {
if (op->getDialect()->getNamespace() !=
TF::TensorFlowDialect::getDialectNamespace()) {
return WalkResult::advance();
}
if (!LegalizeSingleOp(*op).ok()) {
return WalkResult::interrupt();
}
return WalkResult::advance();
});
if (walk_result.wasInterrupted()) {
return tsl::errors::Internal("Could not legalize all ops");
}
return absl::OkStatus();
}
mlir::func::FuncOp GetMainFunc() {
func::FuncOp main_func = module_->lookupSymbol<mlir::func::FuncOp>("main");
EXPECT_TRUE(main_func);
return main_func;
}
mlir::Operation& GetFirstOpFromMain() {
mlir::func::FuncOp main_func = GetMainFunc();
return main_func.getBody().front().front();
}
absl::StatusOr<TupleOp> ImportXlaComputationIntoModule(
XlaComputation& computation) {
SourceMgrDiagnosticHandler sourceMgrHandler(source_manager_, &context_);
mlir::Operation& first_op = GetFirstOpFromMain();
Tf2XlaRewriterTestPeer test_peer(&first_op);
return test_peer.ImportXlaComputationIntoModule(computation);
}
protected:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
llvm::SourceMgr source_manager_;
};
TEST_F(Tf2XlaRewriterTest, LegalizesOpWithTf2xlaHloImporter) {
TF_EXPECT_OK(LegalizeModule());
int num_tuple_ops = 0;
module_->walk([&num_tuple_ops](TupleOp tuple_op) { num_tuple_ops += 1; });
EXPECT_EQ(num_tuple_ops, 0);
}
TEST_F(Tf2XlaRewriterTest, ImportsXlaComputationIntoModule) {
TF_ASSERT_OK(CreateMlirModule());
XlaComputation computation = GetTestXlaComputation();
TF_ASSERT_OK_AND_ASSIGN(TupleOp root_tuple,
ImportXlaComputationIntoModule(computation));
ModuleOp parent_module =
root_tuple.getOperation()->getParentOfType<ModuleOp>();
EXPECT_EQ(parent_module, *module_);
}
TEST_F(Tf2XlaRewriterTest, FailsWithoutRootTuple) {
TF_ASSERT_OK(CreateMlirModule());
XlaBuilder xla_builder("test_fail");
xla::Add(xla::ConstantR0<float>(&xla_builder, 1.0),
xla::ConstantR0<float>(&xla_builder, 2.0));
XlaComputation bad_computation = xla_builder.Build().value();
EXPECT_FALSE(ImportXlaComputationIntoModule(bad_computation).ok());
}
TEST_F(Tf2XlaRewriterTest, ImportsSingleComputation) {
XlaBuilder builder("test_builder");
XlaComputation to_apply;
{
auto sub_builder = builder.CreateSubBuilder("add");
auto arg0 = Parameter(sub_builder.get(), 0,
ShapeUtil::MakeScalarShape(xla::F32), "x");
auto arg1 = Parameter(sub_builder.get(), 1,
ShapeUtil::MakeScalarShape(xla::F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(xla::F32, {4, 16}), "x");
ReplicaGroup group;
group.add_replica_ids(0);
group.add_replica_ids(1);
XlaOp reduce_scatter =
ReduceScatter(x, to_apply, 1, 2,
{group});
std::vector<XlaOp> tuple_values;
tuple_values.push_back(reduce_scatter);
xla::Tuple(&builder, tuple_values);
TF_ASSERT_OK_AND_ASSIGN(XlaComputation computation, builder.Build());
EXPECT_EQ(computation.proto().computations_size(), 2);
TF_ASSERT_OK(CreateMlirModule());
TF_ASSERT_OK_AND_ASSIGN(TupleOp root_tuple,
ImportXlaComputationIntoModule(computation));
EXPECT_TRUE(root_tuple);
int num_func_ops = 0;
module_->walk([&num_func_ops](func::FuncOp func_op) { num_func_ops++; });
EXPECT_EQ(num_func_ops, 1);
}
TEST_F(Tf2XlaRewriterTest, InsertsConstantParameters) {
static constexpr char kModuleWithConstParam[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1442 : i32}} {
func.func @main(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = "tf.Const"() {value = dense<1.42> : tensor<2xf32>} : () -> tensor<2xf32>
%1 = "tf.Atan2"(%arg0, %0) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
func.return %0 : tensor<2xf32>
}
})";
TF_ASSERT_OK(LegalizeModule(kModuleWithConstParam));
}
TEST_F(Tf2XlaRewriterTest, DoesntEnforceCompileTimeConstantCheck) {
static constexpr char kModuleWithNonConstParam[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1610 : i32}} {
func.func @main(%arg0: tensor<3x3x10xbf16>, %arg1: tensor<3xi32>) -> tensor<1x?x4xbf16> attributes {allow_soft_placement = false, tf.entry_function = {control_outputs = "", inputs = "_arg0,_arg1,_arg2", outputs = "_retval0"}} {
%cst = "tf.Const"() {value = dense<[1, -1, 4]> : tensor<3xi32>} : () -> tensor<3xi32>
%0 = "tf.Slice"(%arg0, %arg1, %cst) {_XlaHasReferenceVars = false, _xla_inferred_shapes = [#tf_type.shape<1x?x4>], device = "/job:localhost/replica:0/task:0/device:TPU:0"} : (tensor<3x3x10xbf16>, tensor<3xi32>, tensor<3xi32>) -> tensor<1x?x4xbf16>
return %0 : tensor<1x?x4xbf16>
}
})";
TF_ASSERT_OK(LegalizeModule(kModuleWithNonConstParam));
}
TEST_F(Tf2XlaRewriterTest, ErrorsWithInvalidNumberOfParametersToArgs) {
XlaBuilder builder("test_builder");
XlaComputation to_apply;
{
auto sub_builder = builder.CreateSubBuilder("add");
auto arg0 = Parameter(sub_builder.get(), 0,
ShapeUtil::MakeScalarShape(xla::F32), "x");
auto arg1 = Parameter(sub_builder.get(), 1,
ShapeUtil::MakeScalarShape(xla::F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto a = Parameter(&builder, 0, ShapeUtil::MakeScalarShape(xla::F32), "a");
auto b = Parameter(&builder, 1, ShapeUtil::MakeScalarShape(xla::F32), "b");
XlaOp call_op = xla::Call(&builder, to_apply, {a, b});
std::vector<XlaOp> tuple_values;
tuple_values.push_back(call_op);
xla::Tuple(&builder, tuple_values);
TF_ASSERT_OK_AND_ASSIGN(XlaComputation computation, builder.Build());
EXPECT_EQ(computation.proto().computations_size(), 2);
TF_ASSERT_OK(CreateMlirModule());
absl::StatusOr<TupleOp> status_or_tuple_op =
ImportXlaComputationIntoModule(computation);
EXPECT_FALSE(status_or_tuple_op.ok());
}
}
} |
1,197 | cpp | tensorflow/tensorflow | legalization_op_config | tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc | tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_TRANSFORMS_LEGALIZATION_OP_CONFIG_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_TRANSFORMS_LEGALIZATION_OP_CONFIG_H_
#include "mlir/IR/Operation.h"
#include "mlir/Support/TypeID.h"
namespace mlir {
namespace mhlo {
bool IsOpLegalizedWithMlir(Operation& op);
bool IsTypeLegalizedWithMlir(const TypeID& type_id);
bool IsDynamicPadderOp(const TypeID& type_id);
bool HasTf2XlaFallback(const TypeID& type_id);
bool IsOpAllowedTf2xlaFallback(const TypeID& type_id);
bool IsOpAllowedTf2xlaPreferred(const TypeID& type_id);
}
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include "llvm/ADT/DenseSet.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tpu_embedding_ops_registry.h"
namespace mlir {
namespace mhlo {
namespace {
const llvm::DenseSet<mlir::TypeID>& MlirAlwaysOps() {
static const llvm::DenseSet<mlir::TypeID>* ops = new llvm::DenseSet<
mlir::TypeID>{
TypeID::get<TF::FusedBatchNormV3Op>(),
TypeID::get<TF::FusedBatchNormGradV3Op>(),
TypeID::get<TF::XlaReduceScatterOp>(),
TypeID::get<TF::ModOp>(),
TypeID::get<TF::MatrixDiagPartV3Op>(),
TypeID::get<TF::AbsOp>(),
TypeID::get<TF::AtanOp>(),
TypeID::get<TF::AvgPool3DOp>(),
TypeID::get<TF::BiasAddGradOp>(),
TypeID::get<TF::CeilOp>(),
TypeID::get<TF::CheckNumericsOp>(),
TypeID::get<TF::CosOp>(),
TypeID::get<TF::TanOp>(),
TypeID::get<TF::DiagPartOp>(),
TypeID::get<TF::EinsumOp>(),
TypeID::get<TF::ExpOp>(),
TypeID::get<TF::Expm1Op>(),
TypeID::get<TF::FakeQuantWithMinMaxArgsOp>(),
TypeID::get<TF::FloorOp>(),
TypeID::get<TF::IFFTOp>(),
TypeID::get<TF::ImagOp>(),
TypeID::get<TF::IsFiniteOp>(),
TypeID::get<TF::IsInfOp>(),
TypeID::get<TF::IsNanOp>(),
TypeID::get<TF::LgammaOp>(),
TypeID::get<TF::Log1pOp>(),
TypeID::get<TF::LogSoftmaxOp>(),
TypeID::get<TF::MatrixBandPartOp>(),
TypeID::get<TF::MaxPool3DGradOp>(),
TypeID::get<TF::PreventGradientOp>(),
TypeID::get<TF::RandomShuffleOp>(),
TypeID::get<TF::RealOp>(),
TypeID::get<TF::ReciprocalOp>(),
TypeID::get<TF::ReluOp>(),
TypeID::get<TF::Relu6Op>(),
TypeID::get<TF::ReluGradOp>(),
TypeID::get<TF::RsqrtOp>(),
TypeID::get<TF::SelectOp>(),
TypeID::get<TF::SigmoidOp>(),
TypeID::get<TF::SignOp>(),
TypeID::get<TF::SoftmaxOp>(),
TypeID::get<TF::SqrtOp>(),
TypeID::get<TF::TanhOp>(),
TypeID::get<TF::XlaConvV2Op>(),
TypeID::get<TF::XlaDotOp>(),
TypeID::get<TF::XlaDotV2Op>(),
TypeID::get<TF::XlaDynamicSliceOp>(),
TypeID::get<TF::XlaEinsumOp>(),
TypeID::get<TF::XlaReduceWindowOp>(),
TypeID::get<TF::XlaReplicaIdOp>(),
TypeID::get<TF::XlaRngBitGeneratorOp>(),
TypeID::get<TF::XlaSelectAndScatterOp>(),
TypeID::get<TF::XlaSortOp>(),
TypeID::get<TF::XlaVariadicReduceV2Op>(),
TypeID::get<TF::XlaVariadicSortOp>(),
TypeID::get<TF::RiscAddOp>(),
TypeID::get<TF::RiscDotOp>(),
TypeID::get<TF::ConstOp>(),
TypeID::get<TF::AssertOp>(),
TypeID::get<TF::CrossReplicaSumOp>(),
TypeID::get<TF::InfeedDequeueTupleOp>(),
TypeID::get<TF::OutfeedEnqueueTupleOp>(),
TypeID::get<TF::XlaShardingOp>(),
TypeID::get<TF::IfRegionOp>(),
TypeID::get<TF::WhileRegionOp>(),
TypeID::get<TF::CaseRegionOp>(),
TypeID::get<TF::YieldOp>(),
};
return *ops;
}
bool IsOpTypeAllowedTf2XlaFallback(const TypeID& type_id) {
static auto* ops = [] {
llvm::SmallDenseSet<mlir::TypeID, 512>* ops_set = new llvm::SmallDenseSet<
mlir::TypeID, 512>{
TypeID::get<TF::AcoshOp>(),
TypeID::get<TF::AcosOp>(),
TypeID::get<TF::AddNOp>(),
TypeID::get<TF::AddV2Op>(),
TypeID::get<TF::AngleOp>(),
TypeID::get<TF::AdjustContrastv2Op>(),
TypeID::get<TF::AdjustHueOp>(),
TypeID::get<TF::AdjustSaturationOp>(),
TypeID::get<TF::ApproximateEqualOp>(),
TypeID::get<TF::ApproxTopKOp>(),
TypeID::get<TF::ArgMaxOp>(),
TypeID::get<TF::ArgMinOp>(),
TypeID::get<TF::AsinhOp>(),
TypeID::get<TF::AsinOp>(),
TypeID::get<TF::Atan2Op>(),
TypeID::get<TF::AtanhOp>(),
TypeID::get<TF::BatchMatMulV2Op>(),
TypeID::get<TF::BatchMatMulV3Op>(),
TypeID::get<TF::BatchToSpaceOp>(),
TypeID::get<TF::BesselI0eOp>(),
TypeID::get<TF::BesselI1eOp>(),
TypeID::get<TF::BetaincOp>(),
TypeID::get<TF::BiasAddOp>(),
TypeID::get<TF::BitwiseAndOp>(),
TypeID::get<TF::BitwiseOrOp>(),
TypeID::get<TF::BitwiseXorOp>(),
TypeID::get<TF::BucketizeOp>(),
TypeID::get<TF::CaseOp>(),
TypeID::get<TF::CastOp>(),
TypeID::get<TF::ClipByValueOp>(),
TypeID::get<TF::CholeskyOp>(),
TypeID::get<TF::CollectiveReduceV2Op>(),
TypeID::get<TF::ComplexAbsOp>(),
TypeID::get<TF::ConjugateTransposeOp>(),
TypeID::get<TF::ConcatV2Op>(),
TypeID::get<TF::ConvOp>(),
TypeID::get<TF::CoshOp>(),
TypeID::get<TF::CrossOp>(),
TypeID::get<TF::CumulativeLogsumexpOp>(),
TypeID::get<TF::DataFormatDimMapOp>(),
TypeID::get<TF::DataFormatVecPermuteOp>(),
TypeID::get<TF::DepthToSpaceOp>(),
TypeID::get<TF::DepthwiseConv2dNativeBackpropFilterOp>(),
TypeID::get<TF::DepthwiseConv2dNativeBackpropInputOp>(),
TypeID::get<TF::DiagOp>(),
TypeID::get<TF::DigammaOp>(),
TypeID::get<TF::DivNoNanOp>(),
TypeID::get<TF::DynamicPartitionOp>(),
TypeID::get<TF::EluGradOp>(),
TypeID::get<TF::EluOp>(),
TypeID::get<TF::EnsureShapeOp>(),
TypeID::get<TF::EqualOp>(),
TypeID::get<TF::ErfcOp>(),
TypeID::get<TF::ErfinvOp>(),
TypeID::get<TF::ErfOp>(),
TypeID::get<TF::ExtractImagePatchesOp>(),
TypeID::get<TF::FFT2DOp>(),
TypeID::get<TF::FFT3DOp>(),
TypeID::get<TF::FFTOp>(),
TypeID::get<TF::FakeParamOp>(),
TypeID::get<TF::FakeQuantWithMinMaxArgsGradientOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsGradientOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsPerChannelOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsPerChannelGradientOp>(),
TypeID::get<TF::FloorDivOp>(),
TypeID::get<TF::FloorModOp>(),
TypeID::get<TF::GetMinibatchesInCsrWithPhysicalReplicaOp>(),
TypeID::get<TF::GetMinibatchSplitsWithPhysicalReplicaOp>(),
TypeID::get<TF::GreaterOp>(),
TypeID::get<TF::HSVToRGBOp>(),
TypeID::get<TF::IFFT2DOp>(),
TypeID::get<TF::IFFT3DOp>(),
TypeID::get<TF::IRFFT2DOp>(),
TypeID::get<TF::IRFFT3DOp>(),
TypeID::get<TF::IgammaOp>(),
TypeID::get<TF::IgammacOp>(),
TypeID::get<TF::IgammaGradAOp>(),
TypeID::get<TF::InplaceAddOp>(),
TypeID::get<TF::InTopKV2Op>(),
TypeID::get<TF::InvertOp>(),
TypeID::get<TF::InvOp>(),
TypeID::get<TF::KthOrderStatisticOp>(),
TypeID::get<TF::LRNOp>(),
TypeID::get<TF::LRNGradOp>(),
TypeID::get<TF::LeakyReluGradOp>(),
TypeID::get<TF::LeakyReluOp>(),
TypeID::get<TF::LeftShiftOp>(),
TypeID::get<TF::LessOp>(),
TypeID::get<TF::ListDiffOp>(),
TypeID::get<TF::LogicalAndOp>(),
TypeID::get<TF::LogicalNotOp>(),
TypeID::get<TF::LogOp>(),
TypeID::get<TF::LowerBoundOp>(),
TypeID::get<TF::MakeUniqueOp>(),
TypeID::get<TF::MatMulOp>(),
TypeID::get<TF::MatrixDiagV3Op>(),
TypeID::get<TF::MatrixInverseOp>(),
TypeID::get<TF::MatrixSetDiagV3Op>(),
TypeID::get<TF::MatrixSolveOp>(),
TypeID::get<TF::MatrixTriangularSolveOp>(),
TypeID::get<TF::MaxPool3DGradGradOp>(),
TypeID::get<TF::MaxPoolGradOp>(),
TypeID::get<TF::MaxPoolGradGradOp>(),
TypeID::get<TF::MirrorPadOp>(),
TypeID::get<TF::MirrorPadGradOp>(),
TypeID::get<TF::MulOp>(),
TypeID::get<TF::MultinomialOp>(),
TypeID::get<TF::NdtriOp>(),
TypeID::get<TF::NegOp>(),
TypeID::get<TF::NextAfterOp>(),
TypeID::get<TF::NonMaxSuppressionV4Op>(),
TypeID::get<TF::NotEqualOp>(),
TypeID::get<TF::PadOp>(),
TypeID::get<TF::ParameterizedTruncatedNormalOp>(),
TypeID::get<TF::PlaceholderWithDefaultOp>(),
TypeID::get<TF::PolygammaOp>(),
TypeID::get<TF::PopulationCountOp>(),
TypeID::get<TF::PowOp>(),
TypeID::get<TF::QrOp>(),
TypeID::get<TF::QuantizeAndDequantizeOp>(),
TypeID::get<TF::QuantizeAndDequantizeV2Op>(),
TypeID::get<TF::QuantizeAndDequantizeV3Op>(),
TypeID::get<TF::QuantizeAndDequantizeV4Op>(),
TypeID::get<TF::RFFT2DOp>(),
TypeID::get<TF::RFFT3DOp>(),
TypeID::get<TF::RGBToHSVOp>(),
TypeID::get<TF::RandomUniformIntOp>(),
TypeID::get<TF::RandomUniformOp>(),
TypeID::get<TF::RealDivOp>(),
TypeID::get<TF::ReciprocalGradOp>(),
TypeID::get<TF::Relu6GradOp>(),
TypeID::get<TF::ResizeBilinearOp>(),
TypeID::get<TF::ResizeBilinearGradOp>(),
TypeID::get<TF::ResizeNearestNeighborOp>(),
TypeID::get<TF::ResizeNearestNeighborGradOp>(),
TypeID::get<TF::ReverseSequenceOp>(),
TypeID::get<TF::RightShiftOp>(),
TypeID::get<TF::RintOp>(),
TypeID::get<TF::RollOp>(),
TypeID::get<TF::RoundOp>(),
TypeID::get<TF::SegmentSumV2Op>(),
TypeID::get<TF::SegmentProdV2Op>(),
TypeID::get<TF::SegmentMinV2Op>(),
TypeID::get<TF::SegmentMaxV2Op>(),
TypeID::get<TF::SelectV2Op>(),
TypeID::get<TF::SelfAdjointEigV2Op>(),
TypeID::get<TF::SeluGradOp>(),
TypeID::get<TF::SeluOp>(),
TypeID::get<TF::SigmoidGradOp>(),
TypeID::get<TF::SinOp>(),
TypeID::get<TF::SliceOp>(),
TypeID::get<TF::SoftplusGradOp>(),
TypeID::get<TF::SoftsignGradOp>(),
TypeID::get<TF::SoftsignOp>(),
TypeID::get<TF::SpaceToBatchNDOp>(),
TypeID::get<TF::SpaceToBatchOp>(),
TypeID::get<TF::SpaceToDepthOp>(),
TypeID::get<TF::SparseToDenseOp>(),
TypeID::get<TF::SquareOp>(),
TypeID::get<TF::StatelessMultinomialOp>(),
TypeID::get<TF::StatelessParameterizedTruncatedNormalOp>(),
TypeID::get<TF::StatelessRandomGetAlgOp>(),
TypeID::get<TF::StatelessRandomGetKeyCounterOp>(),
TypeID::get<TF::StatelessRandomGetKeyCounterAlgOp>(),
TypeID::get<TF::StatelessRandomNormalOp>(),
TypeID::get<TF::StatelessRandomNormalV2Op>(),
TypeID::get<TF::StatelessRandomUniformOp>(),
TypeID::get<TF::StatelessRandomUniformFullIntOp>(),
TypeID::get<TF::StatelessRandomUniformFullIntV2Op>(),
TypeID::get<TF::StatelessRandomUniformV2Op>(),
TypeID::get<TF::StatelessRandomUniformIntOp>(),
TypeID::get<TF::StatelessRandomUniformIntV2Op>(),
TypeID::get<TF::StatelessTruncatedNormalOp>(),
TypeID::get<TF::StatelessTruncatedNormalV2Op>(),
TypeID::get<TF::StoreMinibatchStatisticsInFdoOp>(),
TypeID::get<TF::StridedSliceOp>(),
TypeID::get<TF::SubOp>(),
TypeID::get<TF::SvdOp>(),
TypeID::get<TF::TanOp>(),
TypeID::get<TF::TensorScatterAddOp>(),
TypeID::get<TF::TensorScatterSubOp>(),
TypeID::get<TF::TPUEmbeddingActivationsOp>(),
TypeID::get<TF::TopKUniqueOp>(),
TypeID::get<TF::TopKWithUniqueOp>(),
TypeID::get<TF::TransposeOp>(),
TypeID::get<TF::TridiagonalSolveOp>(),
TypeID::get<TF::TridiagonalMatMulOp>(),
TypeID::get<TF::TruncateDivOp>(),
TypeID::get<TF::TruncatedNormalOp>(),
TypeID::get<TF::TruncateModOp>(),
TypeID::get<TF::UniqueOp>(),
TypeID::get<TF::UnpackOp>(),
TypeID::get<TF::UpperBoundOp>(),
TypeID::get<TF::WhereOp>(),
TypeID::get<TF::XlaSendTPUEmbeddingGradientsOp>(),
TypeID::get<TF::XlaBroadcastHelperOp>(),
TypeID::get<TF::XlaCallModuleOp>(),
TypeID::get<TF::XlaCustomCallV2Op>(),
TypeID::get<TF::XlaDynamicUpdateSliceOp>(),
TypeID::get<TF::XlaKeyValueSortOp>(),
TypeID::get<TF::XlaPadOp>(),
TypeID::get<TF::XlaSetBoundOp>(),
TypeID::get<TF::XlaSetDynamicDimensionSizeOp>(),
TypeID::get<TF::XlaSparseCoreAdagradMomentumOp>(),
TypeID::get<TF::XlaSparseCoreAdagradOp>(),
TypeID::get<TF::XlaSparseCoreAdamOp>(),
TypeID::get<TF::XlaSparseCoreFtrlOp>(),
TypeID::get<TF::XlaSparseCoreSgdOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithAdagradAndCsrInputOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithAdamAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithFtrlAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithSgdAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulWithCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulWithStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdamAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithSgdAndStaticBufferSizeOp>(),
TypeID::get<TF::XlaSpmdFullToShardShapeOp>(),
TypeID::get<TF::XlaSpmdShardToFullShapeOp>(),
TypeID::get<TF::XlaSvdOp>(),
};
for (auto op_type_id :
TF::TPUEmbeddingOpsRegistry::Global().GetOpsTypeIds()) {
ops_set->insert(op_type_id);
}
return ops_set;
}();
return ops->count(type_id);
}
bool IsOpTypeAllowedTf2XlaPreferred(const TypeID& type_id) {
static auto* ops =
new llvm::SmallDenseSet<mlir::TypeID, 512>{
TypeID::get<TF::AllOp>(),
TypeID::get<TF::AllToAllOp>(),
TypeID::get<TF::AnyOp>(),
TypeID::get<TF::AvgPoolOp>(),
TypeID::get<TF::AvgPool3DGradOp>(),
TypeID::get<TF::AvgPoolGradOp>(),
TypeID::get<TF::BatchToSpaceNDOp>(),
TypeID::get<TF::BitcastOp>(),
TypeID::get<TF::BroadcastToOp>(),
TypeID::get<TF::CollectivePermuteOp>(),
TypeID::get<TF::ComplexOp>(),
TypeID::get<TF::ConcatV2Op>(),
TypeID::get<TF::ConjOp>(),
TypeID::get<TF::Conv2DOp>(),
TypeID::get<TF::Conv2DBackpropFilterOp>(),
TypeID::get<TF::Conv2DBackpropInputOp>(),
TypeID::get<TF::Conv3DOp>(),
TypeID::get<TF::Conv3DBackpropFilterV2Op>(),
TypeID::get<TF::Conv3DBackpropInputV2Op>(),
TypeID::get<TF::CumprodOp>(),
TypeID::get<TF::CumsumOp>(),
TypeID::get<TF::DepthwiseConv2dNativeOp>(),
TypeID::get<TF::DivOp>(),
TypeID::get<TF::DynamicStitchOp>(),
TypeID::get<TF::_EagerConstOp>(),
TypeID::get<TF::EmptyOp>(),
TypeID::get<TF::ExpandDimsOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsOp>(),
TypeID::get<TF::FillOp>(),
TypeID::get<TF::FusedBatchNormOp>(),
TypeID::get<TF::FusedBatchNormGradOp>(),
TypeID::get<TF::FusedBatchNormGradV2Op>(),
TypeID::get<TF::FusedBatchNormV2Op>(),
TypeID::get<TF::_FusedConv2DOp>(),
TypeID::get<TF::GatherNdOp>(),
TypeID::get<TF::GatherV2Op>(),
TypeID::get<TF::GreaterEqualOp>(),
TypeID::get<TF::IdentityOp>(),
TypeID::get<TF::IdentityNOp>(),
TypeID::get<TF::InplaceUpdateOp>(),
TypeID::get<TF::InvertPermutationOp>(),
TypeID::get<TF::IRFFTOp>(),
TypeID::get<TF::L2LossOp>(),
TypeID::get<TF::LegacyCallOp>(),
TypeID::get<TF::LessEqualOp>(),
TypeID::get<TF::LinSpaceOp>(),
TypeID::get<TF::LogicalOrOp>(),
TypeID::get<TF::MaxOp>(),
TypeID::get<TF::MaximumOp>(),
TypeID::get<TF::MaxPoolOp>(),
TypeID::get<TF::MaxPool3DOp>(),
TypeID::get<TF::MeanOp>(),
TypeID::get<TF::MinOp>(),
TypeID::get<TF::MinimumOp>(),
TypeID::get<TF::MulNoNanOp>(),
TypeID::get<TF::OneHotOp>(),
TypeID::get<TF::OnesLikeOp>(),
TypeID::get<TF::PackOp>(),
TypeID::get<TF::PadV2Op>(),
TypeID::get<TF::ParallelDynamicStitchOp>(),
TypeID::get<TF::PartitionedCallOp>(),
TypeID::get<TF::ProdOp>(),
TypeID::get<TF::QrOp>(),
TypeID::get<TF::RandomStandardNormalOp>(),
TypeID::get<TF::RandomUniformOp>(),
TypeID::get<TF::RangeOp>(),
TypeID::get<TF::ReshapeOp>(),
TypeID::get<TF::ReverseV2Op>(),
TypeID::get<TF::RFFTOp>(),
TypeID::get<TF::RsqrtGradOp>(),
TypeID::get<TF::ScatterNdOp>(),
TypeID::get<TF::ShapeOp>(),
TypeID::get<TF::SinhOp>(),
TypeID::get<TF::SizeOp>(),
TypeID::get<TF::SliceOp>(),
TypeID::get<TF::SoftmaxCrossEntropyWithLogitsOp>(),
TypeID::get<TF::SoftplusOp>(),
TypeID::get<TF::SparseMatMulOp>(),
TypeID::get<TF::SparseSoftmaxCrossEntropyWithLogitsOp>(),
TypeID::get<TF::SplitOp>(),
TypeID::get<TF::SplitVOp>(),
TypeID::get<TF::SqrtGradOp>(),
TypeID::get<TF::SquaredDifferenceOp>(),
TypeID::get<TF::SqueezeOp>(),
TypeID::get<TF::StatelessParameterizedTruncatedNormalOp>(),
TypeID::get<TF::StatefulPartitionedCallOp>(),
TypeID::get<TF::StopGradientOp>(),
TypeID::get<TF::StridedSliceOp>(),
TypeID::get<TF::StridedSliceGradOp>(),
TypeID::get<TF::SumOp>(),
TypeID::get<TF::TanhGradOp>(),
TypeID::get<TF::TensorScatterUpdateOp>(),
TypeID::get<TF::TileOp>(),
TypeID::get<TF::TopKV2Op>(),
TypeID::get<TF::_UnaryOpsCompositionOp>(),
TypeID::get<TF::UnsortedSegmentMaxOp>(),
TypeID::get<TF::UnsortedSegmentMinOp>(),
TypeID::get<TF::UnsortedSegmentProdOp>(),
TypeID::get<TF::UnsortedSegmentSumOp>(),
TypeID::get<TF::XdivyOp>(),
TypeID::get<TF::XlaSendTPUEmbeddingGradientsOp>(),
TypeID::get<TF::XlaAllReduceOp>(),
TypeID::get<TF::XlaGatherOp>(),
TypeID::get<TF::Xlog1pyOp>(),
TypeID::get<TF::XlogyOp>(),
TypeID::get<TF::ZerosLikeOp>(),
TypeID::get<TF::ZetaOp>(),
};
return ops->contains(type_id);
}
const llvm::DenseSet<mlir::TypeID>& DynamicTensorflowOps() {
static const llvm::DenseSet<mlir::TypeID>* ops =
new llvm::DenseSet<mlir::TypeID>{
TypeID::get<mlir::TF::DynamicPartitionOp>(),
TypeID::get<mlir::TF::UniqueOp>(),
TypeID::get<mlir::TF::WhereOp>(),
TypeID::get<mlir::TF::XlaSetDynamicDimensionSizeOp>(),
};
return *ops;
}
}
bool HasTf2XlaFallback(const TypeID& type_id) {
return IsOpTypeAllowedTf2XlaFallback(type_id) ||
IsOpTypeAllowedTf2XlaPreferred(type_id);
}
bool IsOpLegalizedWithMlir(Operation& op) {
auto abstractOp = op.getRegisteredInfo();
if (!abstractOp) return false;
return IsTypeLegalizedWithMlir(abstractOp->getTypeID());
}
bool IsTypeLegalizedWithMlir(const TypeID& type_id) {
return MlirAlwaysOps().contains(type_id);
}
bool IsOpAllowedTf2xlaFallback(const TypeID& type_id) {
return IsOpTypeAllowedTf2XlaFallback(type_id);
}
bool IsOpAllowedTf2xlaPreferred(const TypeID& type_id) {
return IsOpTypeAllowedTf2XlaPreferred(type_id);
}
bool IsDynamicPadderOp(const TypeID& type_id) {
return DynamicTensorflowOps().contains(type_id);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include <optional>
#include <set>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace mhlo {
using func::FuncOp;
using mlir::ModuleOp;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1442 : i32}} {
func.func @main(%arg0: tensor<3xi64> {tf._user_specified_name = "resource", tf.aliasing_output = 3 : i64}) -> () attributes {tf.entry_function = {control_outputs = "stateful_normal/RngReadAndSkip,stateful_uniform/RngReadAndSkip,stateful_uniform_full_int/RngReadAndSkip", inputs = "stateful_normal_rngreadandskip_resource", outputs = "identity_RetVal,identity_1_RetVal,identity_2_RetVal"}} {
%0:3 = "tf.Unpack"(%arg0) {axis = 0 : i64} : (tensor<3xi64>) -> (tensor<i64>, tensor<i64>, tensor<i64>)
return
}
})";
class LegalizationOpConfigTest : public ::testing::Test {
public:
absl::Status CreateMlirModule(std::string module_string = kMlirModuleStr) {
TF_ASSIGN_OR_RETURN(
module_, test::GetMlirModuleFromString(module_string, &context_));
context_.loadAllAvailableDialects();
return absl::OkStatus();
}
absl::StatusOr<FuncOp> GetMain() {
func::FuncOp main = module_->lookupSymbol<mlir::func::FuncOp>("main");
if (!main) {
return absl::NotFoundError("Could not find main function");
}
return main;
}
protected:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
};
TEST_F(LegalizationOpConfigTest, FailsWithExpectsLegalizationWithMlir) {
TF_EXPECT_OK(CreateMlirModule());
EXPECT_FALSE(IsOpLegalizedWithMlir(*module_->getOperation()));
}
TEST_F(LegalizationOpConfigTest, ExpectsFalseForNonMlirOps) {
TF_EXPECT_OK(CreateMlirModule());
TF_ASSERT_OK_AND_ASSIGN(FuncOp main, GetMain());
main.walk([&](Operation* op) { EXPECT_FALSE(IsOpLegalizedWithMlir(*op)); });
}
TEST_F(LegalizationOpConfigTest, ExpectsTrueForMlirTypeID) {
EXPECT_TRUE(IsTypeLegalizedWithMlir(TypeID::get<TF::ModOp>()));
EXPECT_FALSE(HasTf2XlaFallback(TypeID::get<TF::ModOp>()));
EXPECT_FALSE(IsOpAllowedTf2xlaFallback(TypeID::get<TF::ModOp>()));
EXPECT_FALSE(IsOpAllowedTf2xlaPreferred(TypeID::get<TF::ModOp>()));
}
TEST_F(LegalizationOpConfigTest, ExpectsTrueForTF2XLATypeID) {
EXPECT_TRUE(HasTf2XlaFallback(TypeID::get<TF::AllOp>()));
EXPECT_TRUE(IsOpAllowedTf2xlaPreferred(TypeID::get<TF::AllOp>()));
EXPECT_FALSE(IsTypeLegalizedWithMlir(TypeID::get<TF::AllOp>()));
}
TEST_F(LegalizationOpConfigTest, ChecksDynamicPadderOps) {
EXPECT_TRUE(
IsDynamicPadderOp(TypeID::get<TF::XlaSetDynamicDimensionSizeOp>()));
EXPECT_FALSE(IsDynamicPadderOp(TypeID::get<TF::ConstOp>()));
}
TEST_F(LegalizationOpConfigTest, CountLoweringsSet) {
int mlir_lowering_count = 0;
int tf2xla_fallback_count = 0;
int non_categorized_count = 0;
DialectRegistry dialect_registry;
dialect_registry.insert<mlir::TF::TensorFlowDialect>();
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
for (auto operation : context.getRegisteredOperations()) {
if (IsTypeLegalizedWithMlir(operation.getTypeID())) {
mlir_lowering_count++;
} else if (HasTf2XlaFallback(operation.getTypeID())) {
tf2xla_fallback_count++;
} else {
non_categorized_count++;
}
}
EXPECT_EQ(mlir_lowering_count, 67);
EXPECT_EQ(tf2xla_fallback_count, 322);
EXPECT_EQ(non_categorized_count, 430);
}
TEST_F(LegalizationOpConfigTest, CountTypesWhichHaveBothMlirAndTf2xlaFallback) {
int double_lowering_count = 0;
DialectRegistry dialect_registry;
dialect_registry.insert<mlir::TF::TensorFlowDialect>();
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
for (auto operation : context.getRegisteredOperations()) {
if (IsTypeLegalizedWithMlir(operation.getTypeID()) &&
HasTf2XlaFallback(operation.getTypeID())) {
double_lowering_count++;
}
}
EXPECT_EQ(double_lowering_count, 1);
}
TEST_F(LegalizationOpConfigTest, CountAllMlirLoweringPatterns) {
DialectRegistry dialect_registry;
mlir::RegisterCommonToolingDialects(dialect_registry);
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
RewritePatternSet mlir_legalize_lower_patterns(&context);
PopulateLegalizeTfPatterns(&context, &mlir_legalize_lower_patterns);
int mlir_only_patterns = 0;
for (auto& pattern : mlir_legalize_lower_patterns.getNativePatterns()) {
std::optional<OperationName> pat_op_name = pattern->getRootKind();
if (!pat_op_name) {
continue;
}
if (!HasTf2XlaFallback(pat_op_name->getRegisteredInfo()->getTypeID())) {
mlir_only_patterns++;
}
}
EXPECT_EQ(mlir_only_patterns, 63);
}
TEST_F(LegalizationOpConfigTest, MlirLoweringWithoutXlaKernel) {
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
std::vector<const tensorflow::KernelDef*> kernel_defs =
tensorflow::XlaOpRegistry::DeviceKernels(
tensorflow::DEVICE_CPU_XLA_JIT,
true);
std::set<std::string> xla_op_kernels;
for (auto kernel_def : kernel_defs) {
std::string tf_name = "tf." + kernel_def->op();
xla_op_kernels.insert(tf_name);
}
DialectRegistry dialect_registry;
mlir::RegisterCommonToolingDialects(dialect_registry);
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
RewritePatternSet mlir_legalize_lower_patterns(&context);
PopulateLegalizeTfPatterns(&context, &mlir_legalize_lower_patterns);
int mlir_without_xla_count = 0;
for (auto& pattern : mlir_legalize_lower_patterns.getNativePatterns()) {
std::optional<OperationName> pat_op_name = pattern->getRootKind();
if (!pat_op_name) {
continue;
}
if (xla_op_kernels.find(pat_op_name->getStringRef().str()) ==
xla_op_kernels.end()) {
mlir_without_xla_count++;
}
}
EXPECT_EQ(mlir_without_xla_count, 13);
}
}
} |
1,198 | cpp | tensorflow/tensorflow | attrs_and_constraints | tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc | tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_ATTRS_AND_CONSTRAINTS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_ATTRS_AND_CONSTRAINTS_H_
#include <array>
#include <cstdint>
#include <optional>
#include <type_traits>
#include "absl/status/statusor.h"
#include "llvm/Support/Debug.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_call_module_attrs.h"
namespace mlir::quant {
constexpr char kAttrMapAttribute[] = "attr_map";
inline constexpr StringRef kQuantizationMethodAttr = "_quantization_method";
inline constexpr std::array<int64_t, 4> kNhwcToNchwPermutation = {0, 3, 1, 2};
inline constexpr std::array<int64_t, 4> kNchwToNhwcPermutation = {0, 2, 3, 1};
inline constexpr std::array<int64_t, 4> kOihwToHwioPermutation = {2, 3, 1, 0};
bool HasStaticShape(Value value);
bool HasStaticShapeAtDims(Value value, ArrayRef<int> dims);
inline bool HasRankOf(Value value, const int64_t rank) {
auto shaped_type = mlir::dyn_cast_or_null<ShapedType>(value.getType());
return shaped_type && shaped_type.hasRank() && shaped_type.getRank() == rank;
}
Type CloneTypeWithNewElementType(Type old_type, Type element_type);
template <typename T,
typename = std::enable_if_t<
(std::is_integral_v<T> || std::is_same_v<T, float>), void>>
Value CreateConstValue(OpBuilder& builder, const Location loc,
const SmallVector<int64_t>& shape,
const SmallVector<T>& values) {
if constexpr (std::is_integral_v<T>) {
auto shape_type =
RankedTensorType::get(shape, builder.getIntegerType(sizeof(T) * 8));
const auto attr = DenseIntElementsAttr::get(shape_type, values);
return builder.create<TF::ConstOp>(loc, attr);
}
const auto type = RankedTensorType::get(shape, builder.getF32Type());
const auto value_attr = DenseFPElementsAttr::get(type, values);
return builder.create<TF::ConstOp>(loc, value_attr);
}
template <typename T>
Value Create1DConstValue(OpBuilder& builder, const Location loc,
const SmallVector<T>& values) {
return CreateConstValue<T>(builder, loc,
{static_cast<int64_t>(values.size())}, values);
}
template <typename T>
Value CreateScalarConstValue(OpBuilder& builder, const Location loc,
const T value) {
return CreateConstValue<T>(builder, loc, {}, {value});
}
template <typename T,
typename = std::enable_if_t<
(std::is_integral_v<T> || std::is_same_v<T, float>), void>>
bool GetSplatValue(Value value, T& splat_value) {
if constexpr (std::is_integral_v<T>) {
DenseIntElementsAttr value_attr;
if (!matchPattern(value, m_Constant(&value_attr)) ||
!value_attr.isSplat()) {
return false;
}
splat_value = value_attr.getSplatValue<T>();
return true;
}
DenseFPElementsAttr value_attr;
if (!matchPattern(value, m_Constant(&value_attr)) || !value_attr.isSplat()) {
return false;
}
splat_value = value_attr.getSplatValue<T>();
return true;
}
template <typename T>
bool IsSplatValueEqual(Value value, const T x) {
T splat_value;
if (!GetSplatValue(value, splat_value)) return false;
return splat_value == x;
}
template <typename T>
bool AreSplatValuesEqual(Value x, Value y) {
T splat_x, splat_y;
if (!GetSplatValue(x, splat_x) || !GetSplatValue(y, splat_y)) {
return false;
}
return splat_x == splat_y;
}
SmallVector<Value> CloneOpWithReplacedOperands(OpBuilder& builder,
Operation* op,
ArrayRef<Value> new_operands);
template <typename T>
FailureOr<T> TryCast(Operation* op, const StringRef name) {
auto cast_op = dyn_cast_or_null<T>(op);
if (cast_op) {
return cast_op;
} else {
DEBUG_WITH_TYPE("mlir-quant-attrs-and-constraints",
llvm::dbgs() << "Failed to match " << name << " ("
<< T::getOperationName() << ").\n");
return failure();
}
}
FailureOr<int32_t> CastI64ToI32(int64_t value);
FailureOr<SmallVector<int32_t>> CastI64ArrayToI32(
ArrayRef<int64_t> int64_array);
template <typename OpType>
OpType FindOperationOfType(func::FuncOp function) {
for (auto op : function.getBody().getOps<OpType>()) {
return op;
}
return nullptr;
}
template <typename T = Operation*>
Operation* FindUserOfType(Operation* op) {
for (Operation* user : op->getUsers()) {
if (isa<T>(user)) {
return user;
}
}
return nullptr;
}
template <typename T = Operation*>
Operation* FindOperandOfType(Operation* op) {
for (Value operand_value : op->getOperands()) {
if (isa<T>(operand_value.getDefiningOp())) {
return operand_value.getDefiningOp();
}
}
return nullptr;
}
inline FlatSymbolRefAttr GetFuncAttr(TF::PartitionedCallOp call_op) {
return mlir::dyn_cast<FlatSymbolRefAttr>(call_op.getFAttr());
}
inline FlatSymbolRefAttr GetFuncAttr(TF::XlaCallModuleOp call_op) {
return call_op->getAttrOfType<FlatSymbolRefAttr>(
TF::kStablehloEntryFunctionAttrName);
}
StringRef GetEntryFunctionName(TF::XlaCallModuleOp op);
inline bool HasQuantizableTrait(Operation* op) {
return op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) &&
op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue().str() ==
QuantTraitValues[QuantizationTrait::FullyQuantizable];
}
bool IsHybridQuantizedOp(Operation* op);
absl::StatusOr<bool> IsDotGeneralFullyConnected(
::mlir::stablehlo::DotGeneralOp dot_general_op);
std::optional<int64_t> GetDotGeneralQuantizationDim(
::mlir::stablehlo::DotGeneralOp dot_general_op);
bool ContainsConvOrDot(StringRef str);
}
#endif
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include <cstdint>
#include <optional>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_call_module_attrs.h"
namespace mlir::quant {
using ::mlir::stablehlo::DotGeneralOp;
bool HasStaticShape(Value value) {
auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType());
if (!shaped_type) return false;
return shaped_type.hasStaticShape();
}
bool HasStaticShapeAtDims(Value value, const ArrayRef<int> dims) {
auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType());
if (!shaped_type || !shaped_type.hasRank()) return false;
for (auto dim : dims) {
if (shaped_type.isDynamicDim(dim)) return false;
}
return true;
}
Type CloneTypeWithNewElementType(Type old_type, Type element_type) {
if (!mlir::isa<ShapedType>(old_type)) return {};
return mlir::cast<ShapedType>(old_type).clone(element_type);
}
SmallVector<Value> CloneOpWithReplacedOperands(
OpBuilder& builder, Operation* op, const ArrayRef<Value> new_operands) {
IRMapping mapping;
for (const auto& arg : enumerate(new_operands)) {
mapping.map(op->getOperand(arg.index()), arg.value());
}
return builder.clone(*op, mapping)->getResults();
}
FailureOr<int32_t> CastI64ToI32(const int64_t value) {
if (!llvm::isInt<32>(value)) {
DEBUG_WITH_TYPE(
"mlir-quant-attrs-and-constraints",
llvm::dbgs()
<< "Tried to cast " << value
<< "from int64 to int32, but lies out of range of int32.\n");
return failure();
}
return static_cast<int32_t>(value);
}
FailureOr<SmallVector<int32_t>> CastI64ArrayToI32(
const ArrayRef<int64_t> int64_array) {
SmallVector<int32_t> int32_array{};
int32_array.reserve(int64_array.size());
for (const int64_t i64 : int64_array) {
FailureOr<int32_t> cast_i32 = CastI64ToI32(i64);
if (failed(cast_i32)) return failure();
int32_array.push_back(*cast_i32);
}
return int32_array;
}
StringRef GetEntryFunctionName(TF::XlaCallModuleOp op) {
if (!op->hasAttrOfType<FlatSymbolRefAttr>(
TF::kStablehloEntryFunctionAttrName)) {
return StringRef();
}
return op
->getAttrOfType<FlatSymbolRefAttr>(TF::kStablehloEntryFunctionAttrName)
.getValue();
}
bool IsHybridQuantizedOp(Operation* op) {
if ((op->getNumOperands() != 2 && op->getNumOperands() != 3) ||
op->getResultTypes().size() != 1) {
return false;
}
Type lhs_type = op->getOperand(0).getType();
Type rhs_type = op->getOperand(1).getType();
Type result_type = op->getResult(0).getType();
return !IsQuantizedTensorType(lhs_type) && IsQuantizedTensorType(rhs_type) &&
!IsQuantizedTensorType(result_type);
}
absl::StatusOr<bool> IsDotGeneralFullyConnected(DotGeneralOp dot_general_op) {
if (dot_general_op == nullptr)
return absl::InvalidArgumentError(
"Given dot_general op cannot be null when checking "
"`IsDotGeneralBatchMatmul`.");
const ::mlir::stablehlo::DotDimensionNumbersAttr dot_dimension_numbers =
dot_general_op.getDotDimensionNumbers();
const ArrayRef<int64_t> lhs_contracting_dims =
dot_dimension_numbers.getLhsContractingDimensions();
const ArrayRef<int64_t> rhs_contracting_dims =
dot_dimension_numbers.getRhsContractingDimensions();
const int64_t input_rank =
mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(0).getType())
.getRank();
const int64_t filter_rank =
mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(1).getType())
.getRank();
const bool has_proper_rank =
(input_rank == 1 || input_rank == 2) && filter_rank == 2;
const bool has_proper_contracting_dim =
lhs_contracting_dims.size() == 1 && rhs_contracting_dims.size() == 1 &&
lhs_contracting_dims[0] == input_rank - 1;
const bool is_not_batch_op =
dot_dimension_numbers.getLhsBatchingDimensions().empty();
const bool has_proper_quantization_dimension =
absl::c_find(rhs_contracting_dims, filter_rank) ==
rhs_contracting_dims.end();
return has_proper_rank && has_proper_contracting_dim && is_not_batch_op &&
has_proper_quantization_dimension;
}
std::optional<int64_t> GetDotGeneralQuantizationDim(
DotGeneralOp dot_general_op) {
if (dot_general_op == nullptr) return std::nullopt;
const int64_t filter_rank =
mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(1).getType())
.getRank();
const bool is_per_axis_quantizable =
IsDotGeneralFullyConnected(dot_general_op).value();
if (!is_per_axis_quantizable) return std::nullopt;
return filter_rank - 1;
}
bool ContainsConvOrDot(StringRef str) {
return str.contains("_conv") || str.contains("_dot_general");
}
} | #include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include <cstdint>
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant {
namespace {
using ::mlir::stablehlo::AddOp;
using ::mlir::stablehlo::ConstantOp;
using ::mlir::stablehlo::ConvolutionOp;
using ::mlir::stablehlo::DotGeneralOp;
using ::mlir::stablehlo::SubtractOp;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::Optional;
using ::tsl::testing::StatusIs;
using AttrsAndConstraintsTest = ::mlir::quant::QuantizationTestBase;
constexpr absl::string_view kModuleStatic = R"mlir(
module {
func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
constexpr absl::string_view kModuleDynamic = R"mlir(
module {
func.func @main(%arg0: tensor<?x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<?x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<?x1024xf32>, tensor<1024x3xf32>) -> tensor<?x3xf32>
return %0 : tensor<?x3xf32>
}
}
)mlir";
constexpr absl::string_view kModuleMultipleUses = R"mlir(
module {
func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%cst = stablehlo.constant dense<1.0> : tensor<1x3xf32>
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
%1 = stablehlo.subtract %cst, %0 : tensor<1x3xf32>
%2 = stablehlo.add %0, %cst : tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
}
)mlir";
constexpr absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
constexpr absl::string_view kModuleDotWeightOnlyPtq = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
constexpr absl::string_view kModuleXlaCallModuleNoEntryNoQuantTrait = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_original_entry_function = "composite_fn_1"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
constexpr absl::string_view kModulePartitionedCall = R"mlir(
module {
func.func @main(%arg0: tensor<2x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<2x2xf32>) {
%cst = "tf.Const"() {device = "", value = dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>} : () -> tensor<2x2xf32>
%0 = "tf.PartitionedCall"(%arg0, %cst) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_fn_1} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32> loc(callsite("test@main"("MatMul") at "QuantizationUnit(\12\06MatMul\1a\07main)"))
return %0 : tensor<2x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> tensor<2x2xf32> attributes {tf_quant.composite_function} {
%0 = "tf.MatMul"(%arg0, %arg1) {attr_map = "0:transpose_a,1:transpose_b", device = "", transpose_a = false, transpose_b = false} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
return %0 : tensor<2x2xf32>
}
}
)mlir";
constexpr absl::string_view kModuleHybridQuantized = R"mlir(
module {
func.func @main(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3x!quant.uniform<i8:f32, 6.000000e-03:0>> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<1x3xf32>) {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3x!quant.uniform<i8:f32, 6.000000e-03:0>>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(AttrsAndConstraintsTest, HasStaticShapeSucceedsWithStaticShapes) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Value dot_general_result =
FindOperationOfType<DotGeneralOp>(main_fn)->getResult(0);
EXPECT_TRUE(HasStaticShape(dot_general_result));
EXPECT_TRUE(HasStaticShapeAtDims(dot_general_result, {0}));
EXPECT_TRUE(HasStaticShapeAtDims(dot_general_result, {1}));
}
TEST_F(AttrsAndConstraintsTest, HasStaticShapeFailsWithDynamicShapes) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleDynamic);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Value dot_general_result =
FindOperationOfType<DotGeneralOp>(main_fn)->getResult(0);
EXPECT_FALSE(HasStaticShape(dot_general_result));
EXPECT_FALSE(HasStaticShapeAtDims(dot_general_result, {0}));
EXPECT_TRUE(HasStaticShapeAtDims(dot_general_result, {1}));
}
TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsTrueForMatchingRank) {
constexpr absl::string_view kConstantOpWithRankFour =
R"mlir(%0 = stablehlo.constant dense<0> : tensor<1x1x1x1xi8>)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kConstantOpWithRankFour);
ASSERT_TRUE(module_op);
ASSERT_FALSE(module_op->getBodyRegion().empty());
ASSERT_FALSE(module_op->getBodyRegion().front().empty());
auto constant_op = dyn_cast_or_null<mlir::stablehlo::ConstantOp>(
module_op->getBodyRegion().front().front());
ASSERT_THAT(constant_op, NotNull());
EXPECT_TRUE(HasRankOf(constant_op, 4));
}
TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsFalseForNonMatchingRank) {
constexpr absl::string_view kConstantOpWithRankFour =
R"mlir(%0 = stablehlo.constant dense<0> : tensor<1x1x1x1xi8>)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kConstantOpWithRankFour);
ASSERT_TRUE(module_op);
ASSERT_FALSE(module_op->getBodyRegion().empty());
ASSERT_FALSE(module_op->getBodyRegion().front().empty());
auto constant_op = dyn_cast_or_null<mlir::stablehlo::ConstantOp>(
module_op->getBodyRegion().front().front());
ASSERT_THAT(constant_op, NotNull());
EXPECT_FALSE(HasRankOf(constant_op, 3));
}
TEST_F(AttrsAndConstraintsTest,
HasRankOfReturnsTrueForMatchingRankWithUnknownDimensions) {
constexpr absl::string_view kArgumentWithUnknownDims = R"mlir(
func.func @unknown_dims_arg(%arg: tensor<?x?xi8>) -> tensor<?x?xi8> {
return %arg : tensor<?x?xi8>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kArgumentWithUnknownDims);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("unknown_dims_arg");
ASSERT_THAT(func_op, NotNull());
ASSERT_THAT(func_op.getNumArguments(), Eq(1));
EXPECT_TRUE(HasRankOf(func_op.getArgument(0), 2));
}
TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsFalseForUnknownRank) {
constexpr absl::string_view kArgumentWithUnknownRank = R"mlir(
func.func @unknown_rank_arg(%arg: tensor<*xi8>) -> tensor<*xi8> {
return %arg : tensor<*xi8>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kArgumentWithUnknownRank);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("unknown_rank_arg");
ASSERT_THAT(func_op, NotNull());
ASSERT_THAT(func_op.getNumArguments(), Eq(1));
EXPECT_FALSE(HasRankOf(func_op.getArgument(0), 1));
}
TEST_F(AttrsAndConstraintsTest, TryCastSucceeds) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = FindOperationOfType<DotGeneralOp>(main_fn);
ASSERT_THAT(dot_general_op, NotNull());
EXPECT_TRUE(succeeded(
TryCast<DotGeneralOp>(dot_general_op, "dot_general_op")));
}
TEST_F(AttrsAndConstraintsTest, TryCastFailsOnWrongType) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = FindOperationOfType<DotGeneralOp>(main_fn);
ASSERT_THAT(dot_general_op, NotNull());
EXPECT_TRUE(
failed(TryCast<AddOp>(dot_general_op, "dot_general_op")));
}
TEST_F(AttrsAndConstraintsTest, TryCastFailsOnNullPtr) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto op_nullptr =
FindOperationOfType<DotGeneralOp>(main_fn)->getNextNode()->getNextNode();
EXPECT_THAT(op_nullptr, IsNull());
EXPECT_TRUE(failed(TryCast<DotGeneralOp>(op_nullptr, "op_nullptr")));
EXPECT_TRUE(failed(TryCast<DotGeneralOp>(nullptr, "nullptr")));
}
TEST_F(AttrsAndConstraintsTest, I64ValueInI32RangeAreCastedCorrectly) {
EXPECT_TRUE(succeeded(CastI64ToI32(llvm::minIntN(32))));
EXPECT_TRUE(succeeded(CastI64ToI32(llvm::maxIntN(32))));
}
TEST_F(AttrsAndConstraintsTest, CastingFailsForI64ValueOutOfI32Range) {
EXPECT_TRUE(failed(CastI64ToI32(llvm::minIntN(32) - 10)));
EXPECT_TRUE(failed(CastI64ToI32(llvm::maxIntN(32) + 10)));
}
TEST_F(AttrsAndConstraintsTest, I64ArrayInI32RangeAreCastedCorrectly) {
const SmallVector<int64_t> array_i64 = {llvm::minIntN(32), -2, -1, 0, 1, 2,
llvm::maxIntN(32)};
FailureOr<SmallVector<int32_t>> array_i32 = CastI64ArrayToI32(array_i64);
EXPECT_TRUE(succeeded(array_i32));
EXPECT_THAT(
*array_i32,
ElementsAreArray({static_cast<int32_t>(llvm::minIntN(32)), -2, -1, 0, 1,
2, static_cast<int32_t>(llvm::maxIntN(32))}));
}
TEST_F(AttrsAndConstraintsTest, CastingFailsForI64ArrayUnderI32Range) {
const int64_t under_min_i32 = -2147483658;
ArrayRef<int64_t> array_i64{under_min_i32};
EXPECT_EQ(under_min_i32, llvm::minIntN(32) - 10);
EXPECT_TRUE(failed(CastI64ArrayToI32(array_i64)));
}
TEST_F(AttrsAndConstraintsTest, CastingFailsForI64ArrayAboveI32Range) {
const int64_t below_max_i32 = 2147483657;
ArrayRef<int64_t> array_i64{below_max_i32};
EXPECT_EQ(below_max_i32, llvm::maxIntN(32) + 10);
EXPECT_TRUE(failed(CastI64ArrayToI32(array_i64)));
}
TEST_F(AttrsAndConstraintsTest, FindUserOfDifferentTypes) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleMultipleUses);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = FindOperationOfType<DotGeneralOp>(main_fn);
ASSERT_THAT(dot_general_op, NotNull());
EXPECT_THAT(FindUserOfType<AddOp>(dot_general_op), NotNull());
EXPECT_THAT(FindUserOfType<SubtractOp>(dot_general_op), NotNull());
EXPECT_THAT(FindUserOfType<>(dot_general_op), NotNull());
EXPECT_THAT(FindUserOfType<ConvolutionOp>(dot_general_op), IsNull());
}
TEST_F(AttrsAndConstraintsTest, FindOperandOfDifferentTypes) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleMultipleUses);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto subtract_op = FindOperationOfType<SubtractOp>(main_fn);
ASSERT_THAT(subtract_op, NotNull());
EXPECT_THAT(FindOperandOfType<DotGeneralOp>(subtract_op), NotNull());
EXPECT_THAT(FindOperandOfType<ConstantOp>(subtract_op), NotNull());
EXPECT_THAT(FindOperandOfType<>(subtract_op), NotNull());
EXPECT_THAT(FindOperandOfType<AddOp>(subtract_op), IsNull());
}
TEST_F(AttrsAndConstraintsTest, XlaCallModuleOpGetFuncAttr) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
FlatSymbolRefAttr xla_call_op_attr = GetFuncAttr(xla_call_module_op);
EXPECT_EQ(xla_call_op_attr.getValue(), "composite_fn_1");
}
TEST_F(AttrsAndConstraintsTest, PartitionedCallGetFuncAttr) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModulePartitionedCall);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto partitioned_call_op =
FindOperationOfType<TF::PartitionedCallOp>(main_fn);
ASSERT_THAT(partitioned_call_op, NotNull());
FlatSymbolRefAttr partitioned_call_op_attr = GetFuncAttr(partitioned_call_op);
EXPECT_EQ(partitioned_call_op_attr.getValue(), "composite_fn_1");
}
TEST_F(AttrsAndConstraintsTest, GetEntryFunctionNameCorrectly) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
EXPECT_EQ(GetEntryFunctionName(xla_call_module_op),
StringRef("composite_fn_1"));
}
TEST_F(AttrsAndConstraintsTest, GetEntryFunctionNameWhenNotSet) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleXlaCallModuleNoEntryNoQuantTrait);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
EXPECT_THAT(GetEntryFunctionName(xla_call_module_op), IsEmpty());
}
TEST_F(AttrsAndConstraintsTest, HasQuantizableTraitTrue) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
EXPECT_TRUE(HasQuantizableTrait(xla_call_module_op));
}
TEST_F(AttrsAndConstraintsTest, HasQuantizableTraitFalse) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleXlaCallModuleNoEntryNoQuantTrait);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
EXPECT_FALSE(HasQuantizableTrait(xla_call_module_op));
}
TEST_F(AttrsAndConstraintsTest, IsHybridQuantizedOpTrue) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleHybridQuantized);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Operation* dot_general = FindOperationOfType<DotGeneralOp>(main_fn);
EXPECT_TRUE(IsHybridQuantizedOp(dot_general));
}
TEST_F(AttrsAndConstraintsTest, IsHybridQuantizedOpFalse) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Operation* call_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
EXPECT_FALSE(IsHybridQuantizedOp(call_op));
}
constexpr absl::string_view kModuleDotGeneralFullyConnected = R"mlir(
module {
func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
constexpr absl::string_view kModuleDotGeneralBatchMatmul = R"mlir(
module {
func.func @main(%arg0: tensor<2x2x2xf32>, %arg1: tensor<2x2x2xf32>) -> tensor<2x2x2xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1,
batching_dims = [0] x [0],
contracting_dims = [2] x [1],
precision = [DEFAULT, DEFAULT]
: (tensor<2x2x2xf32>, tensor<2x2x2xf32>) -> tensor<2x2x2xf32>
return %0 : tensor<2x2x2xf32>
}
}
)mlir";
TEST_F(AttrsAndConstraintsTest, IsDotGeneralFullyConnectedReturnsError) {
DotGeneralOp dot_general_op = nullptr;
StatusIs(absl::StatusCode::kInvalidArgument,
"Given dot_general op cannot be null when checking "
"`IsDotGeneralBatchMatmul`");
}
TEST_F(AttrsAndConstraintsTest, IsDotGeneralFullyConnectedReturnsTrue) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotGeneralFullyConnected);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin();
EXPECT_THAT(IsDotGeneralFullyConnected(dot_general_op), true);
}
TEST_F(AttrsAndConstraintsTest, IsDotGeneralFullyConnectedReturnsFalse) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotGeneralBatchMatmul);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin();
EXPECT_THAT(IsDotGeneralFullyConnected(dot_general_op), false);
}
TEST_F(AttrsAndConstraintsTest, DotGeneralFullyConnectedReturnsQuantDim) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotGeneralFullyConnected);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin();
EXPECT_THAT(GetDotGeneralQuantizationDim(dot_general_op), Optional(1));
}
TEST_F(AttrsAndConstraintsTest, DotGeneralBatchMatmulReturnsNullQuantDim) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotGeneralBatchMatmul);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin();
EXPECT_THAT(GetDotGeneralQuantizationDim(dot_general_op), Eq(std::nullopt));
}
TEST_F(AttrsAndConstraintsTest, ContainsConvOrDotTrue) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotWeightOnlyPtq);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
const StringRef function_name = GetEntryFunctionName(call_op);
EXPECT_TRUE(ContainsConvOrDot(function_name));
}
TEST_F(AttrsAndConstraintsTest, ContainsConvOrDotFalse) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleXlaCallModuleNoEntryNoQuantTrait);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
const StringRef function_name = GetEntryFunctionName(call_op);
EXPECT_FALSE(ContainsConvOrDot(function_name));
}
}
} |
1,199 | cpp | tensorflow/tensorflow | uniform_quantized_types | tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc | tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_UNIFORM_QUANTIZED_TYPES_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_UNIFORM_QUANTIZED_TYPES_H_
#include <cstdint>
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
namespace mlir {
namespace quant {
UniformQuantizedType CreateI8F32UniformQuantizedType(Location loc,
MLIRContext& context,
double scale,
int64_t zero_point,
bool narrow_range = false);
UniformQuantizedType CreateI32F32UniformQuantizedType(Location loc,
MLIRContext& context,
double scale,
int64_t zero_point);
UniformQuantizedPerAxisType CreateI8F32UniformQuantizedPerAxisType(
Location loc, MLIRContext& context, ArrayRef<double> scales,
ArrayRef<int64_t> zero_points, int quantization_dimension,
bool narrow_range = false);
UniformQuantizedPerAxisType CreateI32F32UniformQuantizedPerAxisType(
Location loc, MLIRContext& context, ArrayRef<double> scales,
ArrayRef<int64_t> zero_points, int quantization_dimension);
bool IsStorageTypeI8(QuantizedType quantized_type);
bool IsStorageTypeI32(QuantizedType quantized_type);
bool IsExpressedTypeF32(QuantizedType quantized_type);
inline Type GetElementType(const Value value) {
return mlir::cast<TensorType>(value.getType()).getElementType();
}
bool IsI8F32UniformQuantizedType(Type type);
bool IsI8F32UniformQuantizedPerAxisType(Type type);
bool IsI32F32UniformQuantizedType(Type type);
bool IsI32F32UniformQuantizedPerAxisType(Type type);
bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type);
bool IsQuantizedTensorType(Type type);
bool IsOpFullyQuantized(Operation* op);
bool IsOpNotQuantized(Operation* op);
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h"
#include <cstdint>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#define DEBUG_TYPE "uniform-quantized-types"
namespace mlir {
namespace quant {
UniformQuantizedType CreateI8F32UniformQuantizedType(const Location loc,
MLIRContext& context,
const double scale,
const int64_t zero_point,
const bool narrow_range) {
return UniformQuantizedType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 8),
FloatType::getF32(&context), scale, zero_point,
llvm::minIntN(8) + (narrow_range ? 1 : 0),
llvm::maxIntN(8));
}
UniformQuantizedType CreateI32F32UniformQuantizedType(
const Location loc, MLIRContext& context, const double scale,
const int64_t zero_point) {
return UniformQuantizedType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 32),
FloatType::getF32(&context), scale, zero_point,
llvm::minIntN(32),
llvm::maxIntN(32));
}
UniformQuantizedPerAxisType CreateI8F32UniformQuantizedPerAxisType(
const Location loc, MLIRContext& context, const ArrayRef<double> scales,
const ArrayRef<int64_t> zero_points, const int quantization_dimension,
const bool narrow_range) {
return UniformQuantizedPerAxisType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 8),
FloatType::getF32(&context),
SmallVector<double>(scales), SmallVector<int64_t>(zero_points),
quantization_dimension,
llvm::minIntN(8) + (narrow_range ? 1 : 0),
llvm::maxIntN(8));
}
UniformQuantizedPerAxisType CreateI32F32UniformQuantizedPerAxisType(
const Location loc, MLIRContext& context, const ArrayRef<double> scales,
const ArrayRef<int64_t> zero_points, const int quantization_dimension) {
return UniformQuantizedPerAxisType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 32),
FloatType::getF32(&context),
SmallVector<double>(scales), SmallVector<int64_t>(zero_points),
quantization_dimension, llvm::minIntN(32),
llvm::maxIntN(32));
}
bool IsStorageTypeI8(const QuantizedType quantized_type) {
const Type storage_type = quantized_type.getStorageType();
return storage_type.isInteger(8);
}
bool IsStorageTypeI32(const QuantizedType quantized_type) {
const Type storage_type = quantized_type.getStorageType();
return storage_type.isInteger(32);
}
bool IsExpressedTypeF32(const QuantizedType quantized_type) {
const Type expressed_type = quantized_type.getExpressedType();
return mlir::isa<Float32Type>(expressed_type);
}
bool IsI8F32UniformQuantizedType(const Type type) {
const UniformQuantizedType quantized_type =
mlir::dyn_cast_or_null<UniformQuantizedType>(type);
if (!quantized_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI8(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i8 storage type. Got: "
<< quantized_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_type << ".\n");
return false;
}
return true;
}
bool IsI8F32UniformQuantizedPerAxisType(const Type type) {
const UniformQuantizedPerAxisType quantized_per_axis_type =
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(type);
if (!quantized_per_axis_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI8(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i8 storage type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
return true;
}
bool IsI32F32UniformQuantizedType(const Type type) {
const UniformQuantizedType quantized_type =
mlir::dyn_cast_or_null<UniformQuantizedType>(type);
if (!quantized_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI32(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i32 storage type. Got: "
<< quantized_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_type << ".\n");
return false;
}
return true;
}
bool IsI32F32UniformQuantizedPerAxisType(const Type type) {
const UniformQuantizedPerAxisType quantized_per_axis_type =
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(type);
if (!quantized_per_axis_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI32(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i32 storage type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
return true;
}
bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) {
if (storage_type.getWidth() == 8 ||
(storage_type.isSigned() && storage_type.getWidth() == 16)) {
return true;
}
LLVM_DEBUG(llvm::dbgs()
<< "Uniform quantize / dequantize op only supports ui8, i8 or "
"i16 for the storage type of uniform quantized type. Got: "
<< storage_type << ".\n");
return false;
}
bool IsQuantizedTensorType(Type type) {
if (!mlir::isa<TensorType>(type)) {
return false;
}
Type element_type = mlir::cast<TensorType>(type).getElementType();
return mlir::isa<QuantizedType>(element_type);
}
bool IsOpFullyQuantized(Operation* op) {
return llvm::all_of(op->getOperandTypes(), IsQuantizedTensorType) &&
llvm::all_of(op->getResultTypes(), IsQuantizedTensorType);
}
bool IsOpNotQuantized(Operation* op) {
return !llvm::any_of(op->getOperandTypes(), IsQuantizedTensorType) &&
!llvm::any_of(op->getResultTypes(), IsQuantizedTensorType);
}
}
} | #include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h"
#include <cstdint>
#include <limits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/QuantOps.h"
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsNull;
using ::testing::Ne;
using ::testing::NotNull;
using ::testing::Test;
class CreateI8F32UniformQuantizedTypeTest : public Test {
protected:
CreateI8F32UniformQuantizedTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantizationDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI8F32UniformQuantizedTypeTest, I8StorageTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8));
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.isSigned());
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxEqualToI8MinMax) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -128);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxNarrowRange) {
const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(
UnknownLoc::get(&ctx_), ctx_,
1.0, 0, true);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -127);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
8.0, 99);
EXPECT_EQ(quantized_type.getScale(), 8.0);
EXPECT_EQ(quantized_type.getZeroPoint(), 99);
}
class CreateI32F32UniformQuantizedTypeTest : public Test {
protected:
CreateI32F32UniformQuantizedTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantizationDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI32F32UniformQuantizedTypeTest, I32StorageTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32));
}
TEST_F(CreateI32F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI32F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.isSigned());
}
TEST_F(CreateI32F32UniformQuantizedTypeTest,
StorageTypeMinMaxEqualToI32MinMax) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_EQ(quantized_type.getStorageTypeMin(),
std::numeric_limits<int32_t>::min());
EXPECT_EQ(quantized_type.getStorageTypeMax(),
std::numeric_limits<int32_t>::max());
}
TEST_F(CreateI32F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
8.0, 1111);
EXPECT_EQ(quantized_type.getScale(), 8.0);
EXPECT_EQ(quantized_type.getZeroPoint(), 1111);
}
class CreateI8F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
CreateI8F32UniformQuantizedPerAxisTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantizationDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, I8StorageTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8));
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
SignedQuantizedTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.isSigned());
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
StorageTypeMinMaxEqualToI8MinMax) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -128);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
StorageTypeMinMaxNarrowRange) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0, true);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -127);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
HasQuantizationDimensionProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
3);
EXPECT_EQ(quantized_type.getQuantizedDimension(), 3);
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
HasScaleAndZeroPointProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{8.0, 9.0},
SmallVector<int64_t, 2>{98, 99},
0);
EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0}));
EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99}));
}
class CreateI32F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
CreateI32F32UniformQuantizedPerAxisTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantizationDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, I32StorageTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32));
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,
StorageTypeMinMaxEqualToI32MinMax) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_EQ(quantized_type.getStorageTypeMin(),
std::numeric_limits<int32_t>::min());
EXPECT_EQ(quantized_type.getStorageTypeMax(),
std::numeric_limits<int32_t>::max());
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,
HasQuantizationDimensionProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
3);
EXPECT_EQ(quantized_type.getQuantizedDimension(), 3);
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,
HasScaleAndZeroPointProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{8.0, 9.0},
SmallVector<int64_t, 2>{98, 99},
0);
EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0}));
EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99}));
}
class IsI8F32UniformQuantizedTypeTest : public Test {
protected:
IsI8F32UniformQuantizedTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantizationDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI8F32UniformQuantizedTypeTest, I8F32UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_TRUE(IsI8F32UniformQuantizedType(qi8_type));
}
TEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedType>(qi8_type),
NotNull());
}
TEST_F(IsI8F32UniformQuantizedTypeTest, StorageTypeI8Succeeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_TRUE(IsStorageTypeI8(qi8_type));
}
TEST_F(IsI8F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_TRUE(IsExpressedTypeF32(qi8_type));
}
class IsI8F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
IsI8F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantizationDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest,
I8F32UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_TRUE(IsI8F32UniformQuantizedPerAxisType(qi8_per_axis_type));
EXPECT_FALSE(IsI8F32UniformQuantizedType(qi8_per_axis_type));
}
TEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_THAT(
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi8_per_axis_type),
NotNull());
}
TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_TRUE(IsStorageTypeI8(qi8_per_axis_type));
}
TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_TRUE(IsExpressedTypeF32(qi8_per_axis_type));
}
class IsI32F32UniformQuantizedTypeTest : public Test {
protected:
IsI32F32UniformQuantizedTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantizationDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI32F32UniformQuantizedTypeTest, I32F32UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));
}
TEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));
EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedType>(qi32_type),
NotNull());
}
TEST_F(IsI32F32UniformQuantizedTypeTest, StorageTypeI32Succeeds) {
const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));
EXPECT_TRUE(IsStorageTypeI32(qi32_type));
}
TEST_F(IsI32F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedType qi32_per_axis_type =
quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type));
}
class IsI32F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
IsI32F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantizationDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest,
I32F32UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedPerAxisType(qi32_per_axis_type));
EXPECT_FALSE(IsI32F32UniformQuantizedType(qi32_per_axis_type));
}
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest,
I8F32UniformQuantizedTypeFails) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0, 0, -128,
127);
EXPECT_FALSE(IsI32F32UniformQuantizedPerAxisType(qi8_type));
EXPECT_FALSE(IsStorageTypeI32(qi8_type));
EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi8_type),
IsNull());
}
TEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_THAT(
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi32_per_axis_type),
NotNull());
}
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_TRUE(IsStorageTypeI32(qi32_per_axis_type));
}
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type));
}
class IsSupportedByTfliteQuantizeOrDequantizeOpsTest : public Test {
protected:
IsSupportedByTfliteQuantizeOrDequantizeOpsTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantizationDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI8Succeeds) {
auto qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0,
0, -128, 127);
EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(
dyn_cast_or_null<IntegerType>(qi8_type.getStorageType())));
}
TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI16Succeeds) {
auto qi16_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0,
0, -128, 127);
EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(
dyn_cast_or_null<IntegerType>(qi16_type.getStorageType())));
}
TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeUI8Succeeds) {
auto qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0,
0, -128, 127);
EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(
dyn_cast_or_null<IntegerType>(qi8_type.getStorageType())));
}
using IsOpFullyQuantizedTest = QuantizationTestBase;
TEST_F(IsOpFullyQuantizedTest, TrueIfOpFullyQuantized) {
constexpr absl::string_view kFullyQuantizedAdd = R"mlir(
func.func @fully_quantized_add(%arg0: tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kFullyQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("fully_quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_TRUE(IsOpFullyQuantized(*add_op_itr));
}
TEST_F(IsOpFullyQuantizedTest, FalseIfOpNotQuantized) {
constexpr absl::string_view kNotQuantizedAdd = R"mlir(
func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2xf32>
return %0 : tensor<2xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kNotQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("not_quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_FALSE(IsOpFullyQuantized(*add_op_itr));
}
TEST_F(IsOpFullyQuantizedTest, FalseIfOpPartiallyQuantized) {
constexpr absl::string_view kQuantizeOp = R"mlir(
func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
ASSERT_THAT(func_op, NotNull());
auto uniform_quantize_op_itr =
func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>();
ASSERT_THAT(
uniform_quantize_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>()));
EXPECT_FALSE(IsOpFullyQuantized(*uniform_quantize_op_itr));
}
using IsOpNotQuantizedTest = QuantizationTestBase;
TEST_F(IsOpNotQuantizedTest, TrueIfOpNotQuantized) {
constexpr absl::string_view kNotQuantizedAdd = R"mlir(
func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2xf32>
return %0 : tensor<2xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kNotQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("not_quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_TRUE(IsOpNotQuantized(*add_op_itr));
}
TEST_F(IsOpNotQuantizedTest, FalseIfOpQuantized) {
constexpr absl::string_view kQuantizedAdd = R"mlir(
func.func @quantized_add(%arg0: tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_FALSE(IsOpNotQuantized(*add_op_itr));
}
TEST_F(IsOpNotQuantizedTest, FalseIfOpPartiallyQuantized) {
constexpr absl::string_view kQuantizeOp = R"mlir(
func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
ASSERT_THAT(func_op, NotNull());
auto uniform_quantize_op_itr =
func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>();
ASSERT_THAT(
uniform_quantize_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>()));
EXPECT_FALSE(IsOpNotQuantized(*uniform_quantize_op_itr));
}
using UniformQuantizedTypeTest = QuantizationTestBase;
TEST_F(UniformQuantizedTypeTest, GetElementTypeSucceeds) {
constexpr absl::string_view kQuantizeOp = R"mlir(
func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
ASSERT_THAT(func_op, NotNull());
auto uniform_quantize_op =
*func_op.getOps<::mlir::stablehlo::UniformQuantizeOp>().begin();
Value result = uniform_quantize_op.getResult();
EXPECT_THAT(GetElementType(result), NotNull());
}
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.