ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
ee620caa-1725-45c5-9079-95411bac2806 | cpp | tensorflow/tensorflow | debug_io_utils | tensorflow/core/debug/debug_io_utils.cc | tensorflow/core/debug/debug_io_utils_test.cc | #include "tensorflow/core/debug/debug_io_utils.h"
#include <stddef.h>
#include <string.h>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <utility>
#include <vector>
#ifndef PLATFORM_WINDOWS
#include "grpcpp/create_channel.h"
#else
#endif
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/debug/debug_callback_registry.h"
#include "tensorflow/core/debug/debugger_event_metadata.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/bits.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/util/event.pb.h"
#define GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR \
return errors::Unimplemented( \
kGrpcURLScheme, " debug URL scheme is not implemented on Windows yet.")
namespace tensorflow {
namespace {
constexpr absl::string_view kDumpSubDirName = "node-io-dump";
Event PrepareChunkEventProto(const DebugNodeKey& debug_node_key,
const uint64 wall_time_us, const size_t num_chunks,
const size_t chunk_index,
const DataType& tensor_dtype,
const TensorShapeProto& tensor_shape) {
Event event;
event.set_wall_time(static_cast<double>(wall_time_us));
Summary::Value* value = event.mutable_summary()->add_value();
value->set_node_name(debug_node_key.debug_node_name);
value->set_tag(debug_node_key.node_name);
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
metadata.set_device(debug_node_key.device_name);
metadata.set_output_slot(debug_node_key.output_slot);
metadata.set_num_chunks(num_chunks);
metadata.set_chunk_index(chunk_index);
string json_output;
tensorflow::protobuf::util::JsonPrintOptions json_options;
json_options.always_print_primitive_fields = true;
auto status = tensorflow::protobuf::util::MessageToJsonString(
metadata, &json_output, json_options);
if (status.ok()) {
SummaryMetadata::PluginData* plugin_data =
value->mutable_metadata()->mutable_plugin_data();
plugin_data->set_plugin_name(DebugIO::kDebuggerPluginName);
plugin_data->set_content(json_output);
} else {
LOG(WARNING) << "Failed to convert DebuggerEventMetadata proto to JSON. "
<< "The debug_node_name is " << debug_node_key.debug_node_name
<< ".";
}
value->mutable_tensor()->set_dtype(tensor_dtype);
*value->mutable_tensor()->mutable_tensor_shape() = tensor_shape;
return event;
}
const size_t StringValMaxBytesInProto(const string& str) {
#if defined(PLATFORM_GOOGLE)
return str.size() + DebugGrpcIO::kGrpcMaxVarintLengthSize;
#else
return str.size();
#endif
}
Status WrapStringTensorAsEvents(const DebugNodeKey& debug_node_key,
const uint64 wall_time_us,
const size_t chunk_size_limit,
TensorProto* tensor_proto,
std::vector<Event>* events) {
const protobuf::RepeatedPtrField<string>& strs = tensor_proto->string_val();
const size_t num_strs = strs.size();
const size_t chunk_size_ub = chunk_size_limit > 0
? chunk_size_limit
: std::numeric_limits<size_t>::max();
std::vector<size_t> cutoffs;
size_t chunk_size = 0;
for (size_t i = 0; i < num_strs; ++i) {
if (StringValMaxBytesInProto(strs[i]) > chunk_size_ub) {
return errors::FailedPrecondition(
"string value at index ", i, " from debug node ",
debug_node_key.debug_node_name,
" does not fit gRPC message size limit (", chunk_size_ub, ")");
}
if (chunk_size + StringValMaxBytesInProto(strs[i]) > chunk_size_ub) {
cutoffs.push_back(i);
chunk_size = 0;
}
chunk_size += StringValMaxBytesInProto(strs[i]);
}
cutoffs.push_back(num_strs);
const size_t num_chunks = cutoffs.size();
for (size_t i = 0; i < num_chunks; ++i) {
Event event = PrepareChunkEventProto(debug_node_key, wall_time_us,
num_chunks, i, tensor_proto->dtype(),
tensor_proto->tensor_shape());
Summary::Value* value = event.mutable_summary()->mutable_value(0);
if (cutoffs.size() == 1) {
value->mutable_tensor()->mutable_string_val()->Swap(
tensor_proto->mutable_string_val());
} else {
const size_t begin = (i == 0) ? 0 : cutoffs[i - 1];
const size_t end = cutoffs[i];
for (size_t j = begin; j < end; ++j) {
value->mutable_tensor()->add_string_val(strs[j]);
}
}
events->push_back(std::move(event));
}
return absl::OkStatus();
}
Status WrapTensorAsEvents(const DebugNodeKey& debug_node_key,
const Tensor& tensor, const uint64 wall_time_us,
const size_t chunk_size_limit,
std::vector<Event>* events) {
TensorProto tensor_proto;
if (tensor.dtype() == DT_STRING) {
tensor.AsProtoField(&tensor_proto);
TF_RETURN_IF_ERROR(WrapStringTensorAsEvents(
debug_node_key, wall_time_us, chunk_size_limit, &tensor_proto, events));
} else {
tensor.AsProtoTensorContent(&tensor_proto);
const size_t total_length = tensor_proto.tensor_content().size();
const size_t chunk_size_ub =
chunk_size_limit > 0 ? chunk_size_limit : total_length;
const size_t num_chunks =
(total_length == 0)
? 1
: (total_length + chunk_size_ub - 1) / chunk_size_ub;
for (size_t i = 0; i < num_chunks; ++i) {
const size_t pos = i * chunk_size_ub;
const size_t len =
(i == num_chunks - 1) ? (total_length - pos) : chunk_size_ub;
Event event = PrepareChunkEventProto(debug_node_key, wall_time_us,
num_chunks, i, tensor_proto.dtype(),
tensor_proto.tensor_shape());
event.mutable_summary()
->mutable_value(0)
->mutable_tensor()
->set_tensor_content(tensor_proto.tensor_content().substr(pos, len));
events->push_back(std::move(event));
}
}
return absl::OkStatus();
}
string AppendTimestampToFilePath(const string& in, const uint64 timestamp) {
string out = strings::StrCat(in, "_", timestamp);
uint64 i = 1;
while (Env::Default()->FileExists(out).ok()) {
out = strings::StrCat(in, "_", timestamp, "-", i);
++i;
}
return out;
}
#ifndef PLATFORM_WINDOWS
Status PublishEncodedGraphDefInChunks(const string& encoded_graph_def,
const string& device_name,
const int64_t wall_time,
const string& debug_url) {
const uint64 hash = ::tensorflow::Hash64(encoded_graph_def);
const size_t total_length = encoded_graph_def.size();
const size_t num_chunks =
static_cast<size_t>(std::ceil(static_cast<float>(total_length) /
DebugGrpcIO::kGrpcMessageSizeLimitBytes));
for (size_t i = 0; i < num_chunks; ++i) {
const size_t pos = i * DebugGrpcIO::kGrpcMessageSizeLimitBytes;
const size_t len = (i == num_chunks - 1)
? (total_length - pos)
: DebugGrpcIO::kGrpcMessageSizeLimitBytes;
Event event;
event.set_wall_time(static_cast<double>(wall_time));
event.set_graph_def(strings::StrCat(hash, ",", device_name, ",", wall_time,
"|", i, "|", num_chunks, "|",
encoded_graph_def.substr(pos, len)));
const Status s = DebugGrpcIO::SendEventProtoThroughGrpcStream(
event, debug_url, num_chunks - 1 == i);
if (!s.ok()) {
return errors::FailedPrecondition(
"Failed to send chunk ", i, " of ", num_chunks,
" of encoded GraphDef of size ", encoded_graph_def.size(), " bytes, ",
"due to: ", s.message());
}
}
return absl::OkStatus();
}
#endif
}
const char* const DebugIO::kDebuggerPluginName = "debugger";
const char* const DebugIO::kCoreMetadataTag = "core_metadata_";
const char* const DebugIO::kGraphTag = "graph_";
const char* const DebugIO::kHashTag = "hash";
Status ReadEventFromFile(const string& dump_file_path, Event* event) {
Env* env(Env::Default());
string content;
uint64 file_size = 0;
Status s = env->GetFileSize(dump_file_path, &file_size);
if (!s.ok()) {
return s;
}
content.resize(file_size);
std::unique_ptr<RandomAccessFile> file;
s = env->NewRandomAccessFile(dump_file_path, &file);
if (!s.ok()) {
return s;
}
StringPiece result;
s = file->Read(0, file_size, &result, &(content)[0]);
if (!s.ok()) {
return s;
}
event->ParseFromString(content);
return absl::OkStatus();
}
const char* const DebugIO::kFileURLScheme = "file:
const char* const DebugIO::kGrpcURLScheme = "grpc:
const char* const DebugIO::kMemoryURLScheme = "memcbk:
Status DebugIO::PublishDebugMetadata(
const int64_t global_step, const int64_t session_run_index,
const int64_t executor_step_index, const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
const std::unordered_set<string>& debug_urls) {
std::ostringstream oss;
oss << "{";
oss << "\"global_step\":" << global_step << ",";
oss << "\"session_run_index\":" << session_run_index << ",";
oss << "\"executor_step_index\":" << executor_step_index << ",";
oss << "\"input_names\":[";
for (size_t i = 0; i < input_names.size(); ++i) {
oss << "\"" << input_names[i] << "\"";
if (i < input_names.size() - 1) {
oss << ",";
}
}
oss << "],";
oss << "\"output_names\":[";
for (size_t i = 0; i < output_names.size(); ++i) {
oss << "\"" << output_names[i] << "\"";
if (i < output_names.size() - 1) {
oss << ",";
}
}
oss << "],";
oss << "\"target_nodes\":[";
for (size_t i = 0; i < target_nodes.size(); ++i) {
oss << "\"" << target_nodes[i] << "\"";
if (i < target_nodes.size() - 1) {
oss << ",";
}
}
oss << "]";
oss << "}";
const string json_metadata = oss.str();
Event event;
event.set_wall_time(static_cast<double>(Env::Default()->NowMicros()));
LogMessage* log_message = event.mutable_log_message();
log_message->set_message(json_metadata);
Status status;
for (const string& url : debug_urls) {
if (absl::StartsWith(absl::AsciiStrToLower(url), kGrpcURLScheme)) {
#ifndef PLATFORM_WINDOWS
Event grpc_event;
const string address = url.substr(strlen(DebugIO::kFileURLScheme));
const string path = address.find('/') == string::npos
? ""
: address.substr(address.find('/'));
grpc_event.set_wall_time(event.wall_time());
LogMessage* log_message_grpc = grpc_event.mutable_log_message();
log_message_grpc->set_message(
strings::StrCat(json_metadata.substr(0, json_metadata.size() - 1),
",\"grpc_path\":\"", path, "\"}"));
status.Update(
DebugGrpcIO::SendEventProtoThroughGrpcStream(grpc_event, url, true));
#else
GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR;
#endif
} else if (absl::StartsWith(absl::AsciiStrToLower(url), kFileURLScheme)) {
const string dump_root_dir = url.substr(strlen(kFileURLScheme));
const string core_metadata_path = AppendTimestampToFilePath(
io::JoinPath(dump_root_dir,
strings::StrCat(
DebugNodeKey::kMetadataFilePrefix,
DebugIO::kCoreMetadataTag, "sessionrun",
strings::Printf("%.14lld", static_cast<long long>(
session_run_index)))),
Env::Default()->NowMicros());
status.Update(DebugFileIO::DumpEventProtoToFile(
event, string(io::Dirname(core_metadata_path)),
string(io::Basename(core_metadata_path))));
}
}
return status;
}
Status DebugIO::PublishDebugTensor(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const absl::Span<const string> debug_urls,
const bool gated_grpc,
const int64_t step_id) {
int32_t num_failed_urls = 0;
std::vector<Status> fail_statuses;
for (const string& url : debug_urls) {
if (absl::StartsWith(absl::AsciiStrToLower(url), kFileURLScheme)) {
const string dump_root_dir = url.substr(strlen(kFileURLScheme));
const int64_t tensorBytes =
tensor.IsInitialized() ? tensor.TotalBytes() : 0;
if (!DebugFileIO::requestDiskByteUsage(tensorBytes)) {
return errors::ResourceExhausted(
"TensorFlow Debugger has exhausted file-system byte-size "
"allowance (",
DebugFileIO::global_disk_bytes_limit_, "), therefore it cannot ",
"dump an additional ", tensorBytes, " byte(s) of tensor data ",
"for the debug tensor ", debug_node_key.node_name, ":",
debug_node_key.output_slot, ". You may use the environment ",
"variable TFDBG_DISK_BYTES_LIMIT to set a higher limit.");
}
Status s = debug_node_key.io_of_node.empty()
? DebugFileIO::DumpTensorToDir(debug_node_key, tensor,
wall_time_us, dump_root_dir,
nullptr)
: DebugFileIO::DumpTensorToDirForNodeDumping(
debug_node_key, tensor, wall_time_us, dump_root_dir,
nullptr, step_id);
if (!s.ok()) {
num_failed_urls++;
fail_statuses.push_back(s);
}
} else if (absl::StartsWith(absl::AsciiStrToLower(url), kGrpcURLScheme)) {
#ifndef PLATFORM_WINDOWS
Status s = DebugGrpcIO::SendTensorThroughGrpcStream(
debug_node_key, tensor, wall_time_us, url, gated_grpc);
if (!s.ok()) {
num_failed_urls++;
fail_statuses.push_back(s);
}
#else
GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR;
#endif
} else if (absl::StartsWith(absl::AsciiStrToLower(url), kMemoryURLScheme)) {
const string dump_root_dir = url.substr(strlen(kMemoryURLScheme));
auto* callback_registry = DebugCallbackRegistry::singleton();
auto* callback = callback_registry->GetCallback(dump_root_dir);
CHECK(callback) << "No callback registered for: " << dump_root_dir;
(*callback)(debug_node_key, tensor);
} else {
return Status(absl::StatusCode::kUnavailable,
strings::StrCat("Invalid debug target URL: ", url));
}
}
if (num_failed_urls == 0) {
return absl::OkStatus();
} else {
string error_message = strings::StrCat(
"Publishing to ", num_failed_urls, " of ", debug_urls.size(),
" debug target URLs failed, due to the following errors:");
for (Status& status : fail_statuses) {
error_message =
strings::StrCat(error_message, " ", status.message(), ";");
}
return Status(absl::StatusCode::kInternal, error_message);
}
}
Status DebugIO::PublishDebugTensor(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const absl::Span<const string> debug_urls) {
return PublishDebugTensor(debug_node_key, tensor, wall_time_us, debug_urls,
false);
}
Status DebugIO::PublishGraph(const Graph& graph, const string& device_name,
const std::unordered_set<string>& debug_urls) {
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
string buf;
graph_def.SerializeToString(&buf);
const int64_t now_micros = Env::Default()->NowMicros();
Event event;
event.set_wall_time(static_cast<double>(now_micros));
event.set_graph_def(buf);
Status status = absl::OkStatus();
for (const string& debug_url : debug_urls) {
if (absl::StartsWith(debug_url, kFileURLScheme)) {
const string dump_root_dir =
io::JoinPath(debug_url.substr(strlen(kFileURLScheme)),
DebugNodeKey::DeviceNameToDevicePath(device_name));
const uint64 graph_hash = ::tensorflow::Hash64(buf);
const string file_name =
strings::StrCat(DebugNodeKey::kMetadataFilePrefix, DebugIO::kGraphTag,
DebugIO::kHashTag, graph_hash, "_", now_micros);
status.Update(
DebugFileIO::DumpEventProtoToFile(event, dump_root_dir, file_name));
} else if (absl::StartsWith(debug_url, kGrpcURLScheme)) {
#ifndef PLATFORM_WINDOWS
status.Update(PublishEncodedGraphDefInChunks(buf, device_name, now_micros,
debug_url));
#else
GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR;
#endif
}
}
return status;
}
bool DebugIO::IsCopyNodeGateOpen(
const std::vector<DebugWatchAndURLSpec>& specs) {
#ifndef PLATFORM_WINDOWS
for (const DebugWatchAndURLSpec& spec : specs) {
if (!spec.gated_grpc || spec.url.compare(0, strlen(DebugIO::kGrpcURLScheme),
DebugIO::kGrpcURLScheme)) {
return true;
} else {
if (DebugGrpcIO::IsReadGateOpen(spec.url, spec.watch_key)) {
return true;
}
}
}
return false;
#else
return true;
#endif
}
bool DebugIO::IsDebugNodeGateOpen(const string& watch_key,
const std::vector<string>& debug_urls) {
#ifndef PLATFORM_WINDOWS
for (const string& debug_url : debug_urls) {
if (debug_url.compare(0, strlen(DebugIO::kGrpcURLScheme),
DebugIO::kGrpcURLScheme)) {
return true;
} else {
if (DebugGrpcIO::IsReadGateOpen(debug_url, watch_key)) {
return true;
}
}
}
return false;
#else
return true;
#endif
}
bool DebugIO::IsDebugURLGateOpen(const string& watch_key,
const string& debug_url) {
#ifndef PLATFORM_WINDOWS
if (debug_url != kGrpcURLScheme) {
return true;
} else {
return DebugGrpcIO::IsReadGateOpen(debug_url, watch_key);
}
#else
return true;
#endif
}
Status DebugIO::CloseDebugURL(const string& debug_url) {
if (absl::StartsWith(debug_url, DebugIO::kGrpcURLScheme)) {
#ifndef PLATFORM_WINDOWS
return DebugGrpcIO::CloseGrpcStream(debug_url);
#else
GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR;
#endif
} else {
return absl::OkStatus();
}
}
Status DebugFileIO::DumpTensorToDir(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const string& dump_root_dir,
string* dump_file_path) {
const string file_path =
GetDumpFilePath(dump_root_dir, debug_node_key, wall_time_us);
if (dump_file_path != nullptr) {
*dump_file_path = file_path;
}
return DumpTensorToEventFile(debug_node_key, tensor, wall_time_us, file_path);
}
Status DebugFileIO::DumpTensorToDirForNodeDumping(
const DebugNodeKey& debug_node_key, const Tensor& tensor,
const uint64 wall_time_us, const string& dump_root_dir,
string* dump_file_path, const int64_t step_id) {
const string file_path = GetDumpFilePathForNodeDumping(
dump_root_dir, debug_node_key, wall_time_us, step_id);
if (dump_file_path != nullptr) {
*dump_file_path = file_path;
}
return DumpTensorToEventFile(debug_node_key, tensor, wall_time_us, file_path);
}
string DebugFileIO::GetDumpFilePath(const string& dump_root_dir,
const DebugNodeKey& debug_node_key,
const uint64 wall_time_us) {
return AppendTimestampToFilePath(
io::JoinPath(dump_root_dir, debug_node_key.device_path,
strings::StrCat(debug_node_key.node_name, "_",
debug_node_key.output_slot, "_",
debug_node_key.debug_op)),
wall_time_us);
}
string DebugFileIO::GetDumpFilePathForNodeDumping(
const string& dump_root_dir, const DebugNodeKey& debug_node_key,
const uint64 wall_time_us, const int64_t step_id) {
return AppendTimestampToFilePath(
io::JoinPath(
dump_root_dir, kDumpSubDirName, strings::StrCat("step-", step_id),
strings::StrCat(
absl::StrReplaceAll(debug_node_key.io_of_node, {{"/", "-"}}), ":",
debug_node_key.is_input ? "in" : "out", ":",
debug_node_key.io_index)),
wall_time_us);
}
Status DebugFileIO::DumpEventProtoToFile(const Event& event_proto,
const string& dir_name,
const string& file_name) {
Env* env(Env::Default());
Status s = RecursiveCreateDir(env, dir_name);
if (!s.ok()) {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create directory ", dir_name,
", due to: ", s.message()));
}
const string file_path = io::JoinPath(dir_name, file_name);
string event_str;
event_proto.SerializeToString(&event_str);
std::unique_ptr<WritableFile> f = nullptr;
TF_CHECK_OK(env->NewWritableFile(file_path, &f));
f->Append(event_str).IgnoreError();
TF_CHECK_OK(f->Close());
return absl::OkStatus();
}
Status DebugFileIO::DumpTensorToEventFile(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const string& file_path) {
std::vector<Event> events;
TF_RETURN_IF_ERROR(
WrapTensorAsEvents(debug_node_key, tensor, wall_time_us, 0, &events));
return DumpEventProtoToFile(events[0], string(io::Dirname(file_path)),
string(io::Basename(file_path)));
}
Status DebugFileIO::RecursiveCreateDir(Env* env, const string& dir) {
if (env->FileExists(dir).ok() && env->IsDirectory(dir).ok()) {
return absl::OkStatus();
}
string parent_dir(io::Dirname(dir));
if (!env->FileExists(parent_dir).ok()) {
Status s = RecursiveCreateDir(env, parent_dir);
if (!s.ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create directory ", parent_dir));
}
} else if (env->FileExists(parent_dir).ok() &&
!env->IsDirectory(parent_dir).ok()) {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create directory ", parent_dir,
" because the path exists as a file "));
}
env->CreateDir(dir).IgnoreError();
if (env->FileExists(dir).ok() && env->IsDirectory(dir).ok()) {
return absl::OkStatus();
} else {
return Status(absl::StatusCode::kAborted,
strings::StrCat("Failed to create directory ", parent_dir));
}
}
const uint64 DebugFileIO::kDefaultGlobalDiskBytesLimit = 107374182400L;
uint64 DebugFileIO::global_disk_bytes_limit_ = 0;
uint64 DebugFileIO::disk_bytes_used_ = 0;
mutex DebugFileIO::bytes_mu_(LINKER_INITIALIZED);
bool DebugFileIO::requestDiskByteUsage(uint64 bytes) {
mutex_lock l(bytes_mu_);
if (global_disk_bytes_limit_ == 0) {
const char* env_tfdbg_disk_bytes_limit = getenv("TFDBG_DISK_BYTES_LIMIT");
if (env_tfdbg_disk_bytes_limit == nullptr ||
strlen(env_tfdbg_disk_bytes_limit) == 0) {
global_disk_bytes_limit_ = kDefaultGlobalDiskBytesLimit;
} else {
strings::safe_strtou64(string(env_tfdbg_disk_bytes_limit),
&global_disk_bytes_limit_);
}
}
if (bytes == 0) {
return true;
}
if (disk_bytes_used_ + bytes < global_disk_bytes_limit_) {
disk_bytes_used_ += bytes;
return true;
} else {
return false;
}
}
void DebugFileIO::resetDiskByteUsage() {
mutex_lock l(bytes_mu_);
disk_bytes_used_ = 0;
}
#ifndef PLATFORM_WINDOWS
DebugGrpcChannel::DebugGrpcChannel(const string& server_stream_addr)
: server_stream_addr_(server_stream_addr),
url_(strings::StrCat(DebugIO::kGrpcURLScheme, server_stream_addr)) {}
Status DebugGrpcChannel::Connect(const int64_t timeout_micros) {
::grpc::ChannelArguments args;
args.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, 1000);
channel_ = ::grpc::CreateCustomChannel(
server_stream_addr_, ::grpc::InsecureChannelCredentials(), args);
if (!channel_->WaitForConnected(
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(timeout_micros, GPR_TIMESPAN)))) {
return errors::FailedPrecondition(
"Failed to connect to gRPC channel at ", server_stream_addr_,
" within a timeout of ", timeout_micros / 1e6, " s.");
}
stub_ = grpc::EventListener::NewStub(channel_);
reader_writer_ = stub_->SendEvents(&ctx_);
return absl::OkStatus();
}
bool DebugGrpcChannel::WriteEvent(const Event& event) {
mutex_lock l(mu_);
return reader_writer_->Write(event);
}
bool DebugGrpcChannel::ReadEventReply(EventReply* event_reply) {
mutex_lock l(mu_);
return reader_writer_->Read(event_reply);
}
void DebugGrpcChannel::ReceiveAndProcessEventReplies(const size_t max_replies) {
EventReply event_reply;
size_t num_replies = 0;
while ((max_replies == 0 || ++num_replies <= max_replies) &&
ReadEventReply(&event_reply)) {
for (const EventReply::DebugOpStateChange& debug_op_state_change :
event_reply.debug_op_state_changes()) {
string watch_key = strings::StrCat(debug_op_state_change.node_name(), ":",
debug_op_state_change.output_slot(),
":", debug_op_state_change.debug_op());
DebugGrpcIO::SetDebugNodeKeyGrpcState(url_, watch_key,
debug_op_state_change.state());
}
}
}
Status DebugGrpcChannel::ReceiveServerRepliesAndClose() {
reader_writer_->WritesDone();
ReceiveAndProcessEventReplies(0);
if (reader_writer_->Finish().ok()) {
return absl::OkStatus();
} else {
return Status(absl::StatusCode::kFailedPrecondition,
"Failed to close debug GRPC stream.");
}
}
mutex DebugGrpcIO::streams_mu_(LINKER_INITIALIZED);
int64_t DebugGrpcIO::channel_connection_timeout_micros_ = 900 * 1000 * 1000;
const size_t DebugGrpcIO::kGrpcMessageSizeLimitBytes = 4000 * 1024;
const size_t DebugGrpcIO::kGrpcMaxVarintLengthSize = 6;
std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>*
DebugGrpcIO::GetStreamChannels() {
static std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>*
stream_channels =
new std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>();
return stream_channels;
}
Status DebugGrpcIO::SendTensorThroughGrpcStream(
const DebugNodeKey& debug_node_key, const Tensor& tensor,
const uint64 wall_time_us, const string& grpc_stream_url,
const bool gated) {
if (gated &&
!IsReadGateOpen(grpc_stream_url, debug_node_key.debug_node_name)) {
return absl::OkStatus();
} else {
std::vector<Event> events;
TF_RETURN_IF_ERROR(WrapTensorAsEvents(debug_node_key, tensor, wall_time_us,
kGrpcMessageSizeLimitBytes, &events));
for (const Event& event : events) {
TF_RETURN_IF_ERROR(
SendEventProtoThroughGrpcStream(event, grpc_stream_url));
}
if (IsWriteGateOpen(grpc_stream_url, debug_node_key.debug_node_name)) {
DebugGrpcChannel* debug_grpc_channel = nullptr;
TF_RETURN_IF_ERROR(
GetOrCreateDebugGrpcChannel(grpc_stream_url, &debug_grpc_channel));
debug_grpc_channel->ReceiveAndProcessEventReplies(1);
}
return absl::OkStatus();
}
}
Status DebugGrpcIO::ReceiveEventReplyProtoThroughGrpcStream(
EventReply* event_reply, const string& grpc_stream_url) {
DebugGrpcChannel* debug_grpc_channel = nullptr;
TF_RETURN_IF_ERROR(
GetOrCreateDebugGrpcChannel(grpc_stream_url, &debug_grpc_channel));
if (debug_grpc_channel->ReadEventReply(event_reply)) {
return absl::OkStatus();
} else {
return errors::Cancelled(strings::StrCat(
"Reading EventReply from stream URL ", grpc_stream_url, " failed."));
}
}
Status DebugGrpcIO::GetOrCreateDebugGrpcChannel(
const string& grpc_stream_url, DebugGrpcChannel** debug_grpc_channel) {
const string addr_with_path =
absl::StartsWith(grpc_stream_url, DebugIO::kGrpcURLScheme)
? grpc_stream_url.substr(strlen(DebugIO::kGrpcURLScheme))
: grpc_stream_url;
const string server_stream_addr =
addr_with_path.substr(0, addr_with_path.find('/'));
{
mutex_lock l(streams_mu_);
std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>*
stream_channels = GetStreamChannels();
if (stream_channels->find(grpc_stream_url) == stream_channels->end()) {
std::unique_ptr<DebugGrpcChannel> channel(
new DebugGrpcChannel(server_stream_addr));
TF_RETURN_IF_ERROR(channel->Connect(channel_connection_timeout_micros_));
stream_channels->insert(
std::make_pair(grpc_stream_url, std::move(channel)));
}
*debug_grpc_channel = (*stream_channels)[grpc_stream_url].get();
}
return absl::OkStatus();
}
Status DebugGrpcIO::SendEventProtoThroughGrpcStream(
const Event& event_proto, const string& grpc_stream_url,
const bool receive_reply) {
DebugGrpcChannel* debug_grpc_channel;
TF_RETURN_IF_ERROR(
GetOrCreateDebugGrpcChannel(grpc_stream_url, &debug_grpc_channel));
bool write_ok = debug_grpc_channel->WriteEvent(event_proto);
if (!write_ok) {
return errors::Cancelled(strings::StrCat("Write event to stream URL ",
grpc_stream_url, " failed."));
}
if (receive_reply) {
debug_grpc_channel->ReceiveAndProcessEventReplies(1);
}
return absl::OkStatus();
}
bool DebugGrpcIO::IsReadGateOpen(const string& grpc_debug_url,
const string& watch_key) {
const DebugNodeName2State* enabled_node_to_state =
GetEnabledDebugOpStatesAtUrl(grpc_debug_url);
return enabled_node_to_state->find(watch_key) != enabled_node_to_state->end();
}
bool DebugGrpcIO::IsWriteGateOpen(const string& grpc_debug_url,
const string& watch_key) {
const DebugNodeName2State* enabled_node_to_state =
GetEnabledDebugOpStatesAtUrl(grpc_debug_url);
auto it = enabled_node_to_state->find(watch_key);
if (it == enabled_node_to_state->end()) {
return false;
} else {
return it->second == EventReply::DebugOpStateChange::READ_WRITE;
}
}
Status DebugGrpcIO::CloseGrpcStream(const string& grpc_stream_url) {
mutex_lock l(streams_mu_);
std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>*
stream_channels = GetStreamChannels();
if (stream_channels->find(grpc_stream_url) != stream_channels->end()) {
Status s =
(*stream_channels)[grpc_stream_url]->ReceiveServerRepliesAndClose();
(*stream_channels).erase(grpc_stream_url);
return s;
} else {
return absl::OkStatus();
}
}
std::unordered_map<string, DebugGrpcIO::DebugNodeName2State>*
DebugGrpcIO::GetEnabledDebugOpStates() {
static std::unordered_map<string, DebugNodeName2State>*
enabled_debug_op_states =
new std::unordered_map<string, DebugNodeName2State>();
return enabled_debug_op_states;
}
DebugGrpcIO::DebugNodeName2State* DebugGrpcIO::GetEnabledDebugOpStatesAtUrl(
const string& grpc_debug_url) {
static mutex* debug_ops_state_mu = new mutex();
std::unordered_map<string, DebugNodeName2State>* states =
GetEnabledDebugOpStates();
mutex_lock l(*debug_ops_state_mu);
if (states->find(grpc_debug_url) == states->end()) {
DebugNodeName2State url_enabled_debug_op_states;
(*states)[grpc_debug_url] = url_enabled_debug_op_states;
}
return &(*states)[grpc_debug_url];
}
void DebugGrpcIO::SetDebugNodeKeyGrpcState(
const string& grpc_debug_url, const string& watch_key,
const EventReply::DebugOpStateChange::State new_state) {
DebugNodeName2State* states = GetEnabledDebugOpStatesAtUrl(grpc_debug_url);
if (new_state == EventReply::DebugOpStateChange::DISABLED) {
if (states->find(watch_key) == states->end()) {
LOG(ERROR) << "Attempt to disable a watch key that is not currently "
<< "enabled at " << grpc_debug_url << ": " << watch_key;
} else {
states->erase(watch_key);
}
} else if (new_state != EventReply::DebugOpStateChange::STATE_UNSPECIFIED) {
(*states)[watch_key] = new_state;
}
}
void DebugGrpcIO::ClearEnabledWatchKeys() {
GetEnabledDebugOpStates()->clear();
}
#endif
} | #include "tensorflow/core/debug/debug_io_utils.h"
#include <cstdlib>
#include <memory>
#include <unordered_set>
#include "tensorflow/core/debug/debug_callback_registry.h"
#include "tensorflow/core/debug/debug_node_key.h"
#include "tensorflow/core/debug/debugger_event_metadata.pb.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
class DebugIOUtilsTest : public ::testing::Test {
public:
void Initialize() {
env_ = Env::Default();
tensor_a_ = std::make_unique<Tensor>(DT_FLOAT, TensorShape({2, 2}));
tensor_a_->flat<float>()(0) = 5.0;
tensor_a_->flat<float>()(1) = 3.0;
tensor_a_->flat<float>()(2) = -1.0;
tensor_a_->flat<float>()(3) = 0.0;
tensor_b_.reset(new Tensor(DT_STRING, TensorShape{2}));
tensor_b_->flat<tstring>()(0) = "corge";
tensor_b_->flat<tstring>()(1) = "garply";
}
Env* env_;
std::unique_ptr<Tensor> tensor_a_;
std::unique_ptr<Tensor> tensor_b_;
};
TEST_F(DebugIOUtilsTest, ConstructDebugNodeKey) {
DebugNodeKey debug_node_key("/job:worker/replica:1/task:0/device:GPU:2",
"hidden_1/MatMul", 0, "DebugIdentity");
EXPECT_EQ("/job:worker/replica:1/task:0/device:GPU:2",
debug_node_key.device_name);
EXPECT_EQ("hidden_1/MatMul", debug_node_key.node_name);
EXPECT_EQ(0, debug_node_key.output_slot);
EXPECT_EQ("DebugIdentity", debug_node_key.debug_op);
EXPECT_EQ("hidden_1/MatMul:0:DebugIdentity", debug_node_key.debug_node_name);
EXPECT_EQ("_tfdbg_device_,job_worker,replica_1,task_0,device_GPU_2",
debug_node_key.device_path);
}
TEST_F(DebugIOUtilsTest, EqualityOfDebugNodeKeys) {
const DebugNodeKey debug_node_key_1("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_2("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_3("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/BiasAdd", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_4("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0,
"DebugNumericSummary");
EXPECT_EQ(debug_node_key_1, debug_node_key_2);
EXPECT_NE(debug_node_key_1, debug_node_key_3);
EXPECT_NE(debug_node_key_1, debug_node_key_4);
EXPECT_NE(debug_node_key_3, debug_node_key_4);
}
TEST_F(DebugIOUtilsTest, DebugNodeKeysIsHashable) {
const DebugNodeKey debug_node_key_1("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_2("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_3("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/BiasAdd", 0, "DebugIdentity");
std::unordered_set<DebugNodeKey> keys;
keys.insert(debug_node_key_1);
ASSERT_EQ(1, keys.size());
keys.insert(debug_node_key_3);
ASSERT_EQ(2, keys.size());
keys.erase(debug_node_key_2);
ASSERT_EQ(1, keys.size());
}
TEST_F(DebugIOUtilsTest, DumpFloatTensorToFileSunnyDay) {
Initialize();
const string test_dir =
strings::StrCat(testing::TmpDir(), "/DumpFloatTensorToFileSunnyDay");
if (!env_->FileExists(test_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(test_dir).ok());
}
const uint64 wall_time = env_->NowMicros();
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo/bar/qux/tensor_a", 0, "DebugIdentity");
string dump_file_path;
TF_ASSERT_OK(DebugFileIO::DumpTensorToDir(
kDebugNodeKey, *tensor_a_, wall_time, test_dir, &dump_file_path));
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_path, &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
Tensor a_prime(DT_FLOAT);
ASSERT_TRUE(a_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_a_->shape(), a_prime.shape());
for (int i = 0; i < a_prime.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), a_prime.flat<float>()(i));
}
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(
env_->DeleteRecursively(test_dir, &undeleted_files, &undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
TEST_F(DebugIOUtilsTest, DumpStringTensorToFileSunnyDay) {
Initialize();
const string test_dir =
strings::StrCat(testing::TmpDir(), "/DumpStringTensorToFileSunnyDay");
if (!env_->FileExists(test_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(test_dir).ok());
}
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"quux/grault/tensor_b", 1, "DebugIdentity");
const uint64 wall_time = env_->NowMicros();
string dump_file_name;
Status s = DebugFileIO::DumpTensorToDir(kDebugNodeKey, *tensor_b_, wall_time,
test_dir, &dump_file_name);
ASSERT_TRUE(s.ok());
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_name, &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.node_name, event.summary().value(0).tag());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
auto status = tensorflow::protobuf::util::JsonStringToMessage(
event.summary().value(0).metadata().plugin_data().content(), &metadata);
ASSERT_TRUE(status.ok());
ASSERT_EQ(kDebugNodeKey.device_name, metadata.device());
ASSERT_EQ(kDebugNodeKey.output_slot, metadata.output_slot());
Tensor b_prime(DT_STRING);
ASSERT_TRUE(b_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_b_->shape(), b_prime.shape());
for (int i = 0; i < b_prime.flat<tstring>().size(); ++i) {
ASSERT_EQ(tensor_b_->flat<tstring>()(i), b_prime.flat<tstring>()(i));
}
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(
env_->DeleteRecursively(test_dir, &undeleted_files, &undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
TEST_F(DebugIOUtilsTest, DumpTensorToFileCannotCreateDirectory) {
Initialize();
const string test_dir = strings::StrCat(
testing::TmpDir(), "/DumpTensorToFileCannotCreateDirectory");
if (!env_->FileExists(test_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(test_dir).ok());
}
const string kDeviceName = "/job:localhost/replica:0/task:0/cpu:0";
const DebugNodeKey kDebugNodeKey(kDeviceName, "baz/tensor_a", 0,
"DebugIdentity");
const string txt_file_dir =
io::JoinPath(test_dir, DebugNodeKey::DeviceNameToDevicePath(kDeviceName));
const string txt_file_name = io::JoinPath(txt_file_dir, "baz");
if (!env_->FileExists(txt_file_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(txt_file_dir).ok());
}
ASSERT_EQ(error::Code::NOT_FOUND, env_->FileExists(txt_file_name).code());
std::unique_ptr<WritableFile> file;
ASSERT_TRUE(env_->NewWritableFile(txt_file_name, &file).ok());
TF_EXPECT_OK(file->Append("text in baz"));
TF_EXPECT_OK(file->Flush());
TF_ASSERT_OK(file->Close());
ASSERT_TRUE(env_->FileExists(txt_file_name).ok());
ASSERT_FALSE(env_->IsDirectory(txt_file_name).ok());
const uint64 wall_time = env_->NowMicros();
string dump_file_name;
Status s = DebugFileIO::DumpTensorToDir(kDebugNodeKey, *tensor_a_, wall_time,
test_dir, &dump_file_name);
ASSERT_FALSE(s.ok());
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(
env_->DeleteRecursively(test_dir, &undeleted_files, &undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
TEST_F(DebugIOUtilsTest, PublishTensorToMultipleFileURLs) {
Initialize();
const int kNumDumpRoots = 3;
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo/bar/qux/tensor_a", 0, "DebugIdentity");
const uint64 wall_time = env_->NowMicros();
std::vector<string> dump_roots;
std::vector<string> dump_file_paths;
std::vector<string> urls;
for (int i = 0; i < kNumDumpRoots; ++i) {
string dump_root = strings::StrCat(testing::TmpDir(),
"/PublicTensorToMultipleFileUrls_", i);
dump_roots.push_back(dump_root);
dump_file_paths.push_back(
DebugFileIO::GetDumpFilePath(dump_root, kDebugNodeKey, wall_time));
urls.push_back(strings::StrCat("file:
}
for (int i = 1; i < kNumDumpRoots; ++i) {
ASSERT_NE(dump_roots[0], dump_roots[i]);
}
Status s =
DebugIO::PublishDebugTensor(kDebugNodeKey, *tensor_a_, wall_time, urls);
ASSERT_TRUE(s.ok());
for (int i = 0; i < kNumDumpRoots; ++i) {
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_paths[i], &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.node_name, event.summary().value(0).tag());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
auto status = tensorflow::protobuf::util::JsonStringToMessage(
event.summary().value(0).metadata().plugin_data().content(), &metadata);
ASSERT_TRUE(status.ok());
ASSERT_EQ(kDebugNodeKey.device_name, metadata.device());
ASSERT_EQ(kDebugNodeKey.output_slot, metadata.output_slot());
Tensor a_prime(DT_FLOAT);
ASSERT_TRUE(a_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_a_->shape(), a_prime.shape());
for (int i = 0; i < a_prime.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), a_prime.flat<float>()(i));
}
}
for (int i = 0; i < kNumDumpRoots; ++i) {
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(env_->DeleteRecursively(dump_roots[i], &undeleted_files,
&undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
TEST_F(DebugIOUtilsTest, PublishTensorToMemoryCallback) {
Initialize();
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo/bar/qux/tensor_a", 0, "DebugIdentity");
const uint64 wall_time = env_->NowMicros();
bool called = false;
std::vector<string> urls = {"memcbk:
;
auto* callback_registry = DebugCallbackRegistry::singleton();
callback_registry->RegisterCallback(
"test_callback", [this, &kDebugNodeKey, &called](const DebugNodeKey& key,
const Tensor& tensor) {
called = true;
ASSERT_EQ(kDebugNodeKey.device_name, key.device_name);
ASSERT_EQ(kDebugNodeKey.node_name, key.node_name);
ASSERT_EQ(tensor_a_->shape(), tensor.shape());
for (int i = 0; i < tensor.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), tensor.flat<float>()(i));
}
});
Status s =
DebugIO::PublishDebugTensor(kDebugNodeKey, *tensor_a_, wall_time, urls);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(called);
callback_registry->UnregisterCallback("test_callback");
}
TEST_F(DebugIOUtilsTest, PublishTensorConcurrentlyToPartiallyOverlappingPaths) {
Initialize();
const int kConcurrentPubs = 3;
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"tensor_a", 0, "DebugIdentity");
thread::ThreadPool* tp =
new thread::ThreadPool(Env::Default(), "test", kConcurrentPubs);
const uint64 wall_time = env_->NowMicros();
const string dump_root_base =
strings::StrCat(testing::TmpDir(),
"/PublishTensorConcurrentlyToPartiallyOverlappingPaths");
if (!env_->FileExists(dump_root_base).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(dump_root_base).ok());
}
mutex mu;
std::vector<string> dump_roots TF_GUARDED_BY(mu);
std::vector<string> dump_file_paths TF_GUARDED_BY(mu);
int dump_count TF_GUARDED_BY(mu) = 0;
int done_count TF_GUARDED_BY(mu) = 0;
Notification all_done;
auto fn = [this, &dump_count, &done_count, &mu, &dump_root_base, &dump_roots,
&dump_file_paths, &wall_time, &kDebugNodeKey, &kConcurrentPubs,
&all_done]() {
string dump_root;
string debug_url;
{
mutex_lock l(mu);
dump_root =
strings::StrCat(dump_root_base, "grumpy/", "dump_", dump_count++);
dump_roots.push_back(dump_root);
dump_file_paths.push_back(
DebugFileIO::GetDumpFilePath(dump_root, kDebugNodeKey, wall_time));
debug_url = strings::StrCat("file:
}
std::vector<string> urls;
urls.push_back(debug_url);
Status s =
DebugIO::PublishDebugTensor(kDebugNodeKey, *tensor_a_, wall_time, urls);
ASSERT_TRUE(s.ok());
{
mutex_lock l(mu);
done_count++;
if (done_count == kConcurrentPubs) {
all_done.Notify();
}
}
};
for (int i = 0; i < kConcurrentPubs; ++i) {
tp->Schedule(fn);
}
all_done.WaitForNotification();
delete tp;
{
mutex_lock l(mu);
for (int i = 1; i < kConcurrentPubs; ++i) {
ASSERT_NE(dump_roots[0], dump_roots[i]);
}
for (int i = 0; i < kConcurrentPubs; ++i) {
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_paths[i], &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.node_name, event.summary().value(0).tag());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
auto status = tensorflow::protobuf::util::JsonStringToMessage(
event.summary().value(0).metadata().plugin_data().content(),
&metadata);
ASSERT_TRUE(status.ok());
ASSERT_EQ(kDebugNodeKey.device_name, metadata.device());
ASSERT_EQ(kDebugNodeKey.output_slot, metadata.output_slot());
Tensor a_prime(DT_FLOAT);
ASSERT_TRUE(a_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_a_->shape(), a_prime.shape());
for (int i = 0; i < a_prime.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), a_prime.flat<float>()(i));
}
}
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
auto delete_files = env_->DeleteRecursively(
dump_root_base, &undeleted_files, &undeleted_dirs);
ASSERT_TRUE(delete_files.ok()) << delete_files;
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
class DiskUsageLimitTest : public ::testing::Test {
public:
void Initialize() {
setenv("TFDBG_DISK_BYTES_LIMIT", "", 1);
DebugFileIO::resetDiskByteUsage();
DebugFileIO::global_disk_bytes_limit_ = 0;
}
};
TEST_F(DiskUsageLimitTest, RequestWithZeroByteIsOkay) {
Initialize();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(0L));
}
TEST_F(DiskUsageLimitTest, ExceedingLimitAfterOneCall) {
Initialize();
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(100L * 1024L * 1024L * 1024L));
}
TEST_F(DiskUsageLimitTest, ExceedingLimitAfterTwoCalls) {
Initialize();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(1024L));
}
TEST_F(DiskUsageLimitTest, ResetDiskByteUsageWorks) {
Initialize();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
DebugFileIO::resetDiskByteUsage();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
}
TEST_F(DiskUsageLimitTest, CustomEnvVarIsObeyed) {
Initialize();
setenv("TFDBG_DISK_BYTES_LIMIT", "1024", 1);
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(1024L));
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(1000L));
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(23L));
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(1L));
DebugFileIO::resetDiskByteUsage();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(1023L));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/debug/debug_io_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/debug/debug_io_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5cf953bd-5814-4751-b579-123fcf7dfd8b | cpp | tensorflow/tensorflow | debug | tensorflow/compiler/mlir/lite/debug/debug.cc | tensorflow/compiler/mlir/lite/debug/debug_test.cc | #include "tensorflow/compiler/mlir/lite/debug/debug.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassInstrumentation.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Transforms/ViewOpGraph.h"
#include "re2/re2.h"
#include "tensorflow/compiler/mlir/lite/debug/debug_options.pb.h"
#include "tensorflow/compiler/mlir/lite/metrics/error_collector_inst.h"
#include "xla/tsl/lib/io/buffered_file.h"
#include "tensorflow/core/platform/logging.h"
#include "tsl/platform/env.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/path.h"
#include "tsl/platform/stringpiece.h"
namespace tensorflow {
namespace {
struct WritableFileRawStream : public llvm::raw_ostream {
explicit WritableFileRawStream(std::unique_ptr<tsl::WritableFile> file)
: file(std::move(file)) {
SetUnbuffered();
}
~WritableFileRawStream() override = default;
uint64_t current_pos() const override {
int64_t position;
if (file->Tell(&position).ok()) {
return position;
} else {
LOG(WARNING)
<< "Couldn't query file position. Stream might be malformed.\n";
return -1;
}
}
void write_impl(const char* ptr, size_t size) override {
if (file && !file->Append(absl::string_view(ptr, size)).ok()) {
file = nullptr;
}
}
std::unique_ptr<tsl::WritableFile> file;
};
class ReproducerStream : public mlir::ReproducerStream {
public:
ReproducerStream(std::string name, std::unique_ptr<llvm::raw_ostream> os)
: name_(std::move(name)), os_(std::move(os)) {}
llvm::StringRef description() override { return name_; }
llvm::raw_ostream& os() override { return *os_; }
private:
std::string name_;
std::unique_ptr<llvm::raw_ostream> os_;
};
mlir::ReproducerStreamFactory GetReproducerStreamFactory(
absl::string_view dump_dir) {
std::string path = tsl::io::JoinPath(dump_dir, "tfl_mlir_crash_repro.mlir");
return [path = std::move(path)](
std::string& error) -> std::unique_ptr<mlir::ReproducerStream> {
std::unique_ptr<tsl::WritableFile> file;
if (auto status = tsl::Env::Default()->NewWritableFile(path, &file);
!status.ok()) {
error = status.ToString();
absl::StrAppend(&error, "; failed to open '", path,
"' for writing an MLIR reproducer");
return nullptr;
}
file = std::make_unique<tsl::BufferedWritableFile>(std::move(file));
return std::make_unique<ReproducerStream>(
path, std::make_unique<WritableFileRawStream>(std::move(file)));
};
}
std::string Sanitize(absl::string_view string) {
static const auto& kUnwantedChars = *new absl::flat_hash_set<char>{
'<', '>', ':', '\"', '/', '\\', '|', '?', '*', ' ', '(', ')'};
std::string sanitized;
sanitized.reserve(string.size());
bool skip = false;
for (const char& c : string) {
if (auto it = kUnwantedChars.find(c); it != kUnwantedChars.end()) {
skip = true;
continue;
}
if (skip) {
skip = false;
sanitized.push_back('_');
}
sanitized.push_back(c);
}
return sanitized;
}
class DumpInstrumentation : public mlir::PassInstrumentation {
public:
explicit DumpInstrumentation(absl::string_view dump_dir,
absl::string_view dump_pass_regex,
absl::string_view dump_func_regex)
: dump_dir_(dump_dir),
dump_pass_re_(std::make_unique<RE2>(dump_pass_regex)),
dump_func_re_(std::make_unique<RE2>(dump_func_regex)) {}
DumpInstrumentation(const DumpInstrumentation& other) = delete;
DumpInstrumentation& operator=(const DumpInstrumentation& other) = delete;
void runBeforePass(mlir::Pass* pass, mlir::Operation* op) override {
if (!printed_) {
Dump("before_all", op);
printed_ = true;
}
if (RE2::FullMatch(pass->getName(), *dump_pass_re_)) {
Dump(absl::StrCat(absl::string_view(pass->getName()), "_before"), op,
absl::StrCat(absl::Hex(pass_counter_, absl::kZeroPad8)));
}
}
void runAfterPass(mlir::Pass* pass, mlir::Operation* op) override {
if (RE2::FullMatch(pass->getName(), *dump_pass_re_)) {
Dump(absl::StrCat(absl::string_view(pass->getName()), "_after"), op,
absl::StrCat(absl::Hex(pass_counter_++, absl::kZeroPad8)));
}
}
private:
void Dump(absl::string_view name, mlir::Operation* op,
std::string prefix = "") {
static constexpr char kFiletypeSuffix[] = "mlir";
llvm::SmallVector<absl::string_view> func_names;
bool match = false;
op->walk([&](mlir::func::FuncOp func) {
if (func.isPublic()) {
const absl::string_view name = func.getSymName();
if (name.empty()) {
return;
}
func_names.push_back(name);
if (RE2::FullMatch(name, *dump_func_re_)) {
match = true;
}
}
});
if (!func_names.empty() && !match) {
return;
}
llvm::sort(func_names);
std::string joined_func_names = Sanitize(absl::StrJoin(func_names, "-"));
std::string sanitized_name = Sanitize(name);
std::vector<absl::string_view> name_parts;
if (!prefix.empty()) {
name_parts.emplace_back(prefix);
}
if (!joined_func_names.empty()) {
name_parts.emplace_back(joined_func_names);
}
name_parts.emplace_back(sanitized_name);
name_parts.emplace_back(kFiletypeSuffix);
const std::string filename = tsl::io::JoinPath(
dump_dir_, absl::StrJoin(name_parts.begin(), name_parts.end(), "."));
std::unique_ptr<tsl::WritableFile> file;
if (auto status = tsl::Env::Default()->NewWritableFile(filename, &file);
!status.ok()) {
LOG(ERROR) << "Unable to open '" << filename
<< "' for dumping TFLite MLIR output: " << status;
return;
}
file = std::make_unique<tsl::BufferedWritableFile>(std::move(file));
WritableFileRawStream os(std::move(file));
op->print(os);
}
const std::string dump_dir_;
const std::unique_ptr<RE2> dump_pass_re_;
const std::unique_ptr<RE2> dump_func_re_;
int pass_counter_ = 0;
bool printed_ = false;
};
std::function<bool(mlir::Pass*, mlir::Operation*)> CreatePrintIRFun(
const std::string& pass_regex) {
std::function<bool(mlir::Pass*, mlir::Operation*)> fun;
if (pass_regex.empty()) {
return fun;
}
return [pr = pass_regex](mlir::Pass* p, mlir::Operation*) {
static const RE2* const re = new RE2(pr);
if (RE2::FullMatch(p->getName(), *re)) {
return true;
}
return false;
};
}
}
void InitPassManager(mlir::PassManager& pm,
const converter::DebugOptions& options,
llvm::raw_ostream& out) {
std::string dump_dir = options.ir_dump_dir();
bool dump_to_dir = !dump_dir.empty();
bool print_to_stdout =
!options.print_ir_before().empty() || !options.print_ir_after().empty();
if (dump_to_dir || print_to_stdout) {
pm.getContext()->disableMultithreading();
}
if (dump_to_dir) {
dump_dir = tsl::io::JoinPath(
dump_dir, absl::FormatTime("%E4Y%m%d_%H%M%E6S", absl::Now(),
absl::LocalTimeZone()));
tsl::Env* env = tsl::Env::Default();
if (auto status = env->RecursivelyCreateDir(dump_dir); !status.ok()) {
LOG(WARNING) << "Failed to create '" << dump_dir
<< "' directory for dumping: " << status;
return;
}
if (auto reproducer_stream_factory = GetReproducerStreamFactory(dump_dir)) {
pm.enableCrashReproducerGeneration(std::move(reproducer_stream_factory));
}
pm.addInstrumentation(std::make_unique<DumpInstrumentation>(
dump_dir, options.ir_dump_pass_regex(), options.ir_dump_func_regex()));
}
if (print_to_stdout) {
std::function<bool(mlir::Pass*, mlir::Operation*)>
should_print_ir_before_pass(
CreatePrintIRFun(options.print_ir_before()));
std::function<bool(mlir::Pass*, mlir::Operation*)>
should_print_ir_after_pass(CreatePrintIRFun(options.print_ir_after()));
mlir::OpPrintingFlags opPrintingFlags = mlir::OpPrintingFlags();
if (options.has_elide_elementsattrs_if_larger()) {
opPrintingFlags.elideLargeElementsAttrs(
options.elide_elementsattrs_if_larger());
}
pm.enableIRPrinting(should_print_ir_before_pass, should_print_ir_after_pass,
options.print_ir_module_scope(),
true,
false, out,
opPrintingFlags);
}
if (options.enable_timing()) {
pm.enableTiming();
}
pm.addInstrumentation(
std::make_unique<mlir::TFL::ErrorCollectorInstrumentation>(
pm.getContext()));
}
} | #include "tensorflow/compiler/mlir/lite/debug/debug.h"
#include <stdint.h>
#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/lite/debug/debug_options.pb.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
namespace tensorflow {
namespace debug_test {
class NopPass : public mlir::PassWrapper<NopPass, mlir::OperationPass<>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(NopPass)
void runOnOperation() override {}
};
class MutatePass : public mlir::PassWrapper<MutatePass, mlir::OperationPass<>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MutatePass)
void runOnOperation() override {
mlir::OpBuilder builder(&getContext());
getOperation()->setAttr("tfl.random_attr", builder.getUnitAttr());
}
};
class AlwaysFailPass
: public mlir::PassWrapper<AlwaysFailPass, mlir::OperationPass<>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(AlwaysFailPass)
void runOnOperation() override { signalPassFailure(); }
};
}
namespace {
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Not;
using namespace tensorflow::debug_test;
class InitPassManagerTest : public testing::Test {
protected:
InitPassManagerTest()
: path_(GetOutputPath()), context_([]() {
mlir::registerPassManagerCLOptions();
mlir::DialectRegistry registry;
registry.insert<mlir::BuiltinDialect>();
registry.insert<mlir::arith::ArithDialect>();
registry.insert<mlir::func::FuncDialect>();
registry.insert<mlir::TFL::TensorFlowLiteDialect>();
return registry;
}()) {
context_.loadAllAvailableDialects();
mlir::OpBuilder builder(&context_);
module_ = builder.create<mlir::ModuleOp>(builder.getUnknownLoc());
builder.setInsertionPointToStart(module_->getBody());
auto func = builder.create<mlir::func::FuncOp>(
builder.getUnknownLoc(), "main", builder.getFunctionType({}, {}));
func->setAttr("tfl.func", builder.getUnitAttr());
builder.setInsertionPointToStart(func.addEntryBlock());
llvm::SmallVector<int> shape{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
builder.create<mlir::arith::ConstantOp>(
builder.getUnknownLoc(),
mlir::DenseIntElementsAttr::get(
mlir::RankedTensorType::get(shape.size(), builder.getI32Type()),
shape));
builder.create<mlir::func::ReturnOp>(builder.getUnknownLoc());
}
absl::Status GetDumpDir(std::string* dump_dir) {
std::vector<string> files;
if (auto status = tsl::Env::Default()->GetChildren(path_, &files);
!status.ok()) {
return status;
}
if (files.size() != 1) {
return absl::FailedPreconditionError(
"Expecting directory to have one child.");
}
*dump_dir = tsl::io::JoinPath(path_, files[0]);
return absl::OkStatus();
}
std::string path_;
mlir::MLIRContext context_;
mlir::OwningOpRef<mlir::ModuleOp> module_;
private:
std::string GetOutputPath() {
const auto* const test_info =
testing::UnitTest::GetInstance()->current_test_info();
return tsl::io::JoinPath(
getenv("TEST_UNDECLARED_OUTPUTS_DIR"),
absl::StrCat(test_info->test_suite_name(), ".", test_info->name()));
}
};
TEST_F(InitPassManagerTest, CrashReproducer) {
converter::DebugOptions debug_options;
*debug_options.mutable_ir_dump_dir() = path_;
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options);
pm.addPass(std::make_unique<AlwaysFailPass>());
ASSERT_TRUE(mlir::failed(pm.run(*module_)));
std::string dump_dir;
TF_ASSERT_OK(GetDumpDir(&dump_dir));
std::string mlir_dump;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(dump_dir, "tfl_mlir_crash_repro.mlir"), &mlir_dump));
EXPECT_THAT(mlir_dump, Not(IsEmpty()));
}
TEST_F(InitPassManagerTest, DumpToDir) {
converter::DebugOptions debug_options;
*debug_options.mutable_ir_dump_dir() = path_;
*debug_options.mutable_ir_dump_pass_regex() = R"(.*NopPass)";
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options);
pm.addPass(std::make_unique<NopPass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
std::string dump_dir;
TF_ASSERT_OK(GetDumpDir(&dump_dir));
{
std::string mlir_dump;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(
dump_dir, "00000000.main.tensorflow_debug_test_NopPass_after.mlir"),
&mlir_dump));
EXPECT_THAT(mlir_dump, Not(IsEmpty()));
}
{
std::string mlir_dump;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(
dump_dir,
"00000000.main.tensorflow_debug_test_NopPass_before.mlir"),
&mlir_dump));
EXPECT_THAT(mlir_dump, Not(IsEmpty()));
}
}
TEST_F(InitPassManagerTest, PrintIRBeforeEverything) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_before() = R"(.*)";
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<NopPass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out,
HasSubstr("IR Dump Before tensorflow::debug_test::NopPass"));
EXPECT_THAT(captured_out,
Not(HasSubstr("IR Dump After tensorflow::debug_test::NopPass")));
}
TEST_F(InitPassManagerTest, PrintIRAfterEverything) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_after() = R"(.*)";
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<MutatePass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out,
HasSubstr("IR Dump After tensorflow::debug_test::MutatePass"));
EXPECT_THAT(
captured_out,
Not(HasSubstr("IR Dump Before tensorflow::debug_test::MutatePass")));
}
TEST_F(InitPassManagerTest, PrintIRBeforeAndAfterEverything) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_before() = R"(.*)";
*debug_options.mutable_print_ir_after() = R"(.*)";
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<MutatePass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out,
HasSubstr("IR Dump After tensorflow::debug_test::MutatePass"));
EXPECT_THAT(captured_out,
HasSubstr("IR Dump Before tensorflow::debug_test::MutatePass"));
}
TEST_F(InitPassManagerTest, ElideLargeElementAttrs) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_before() = R"(.*)";
debug_options.set_elide_elementsattrs_if_larger(5);
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<MutatePass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out, HasSubstr("dense_resource<__elided__>"));
}
TEST_F(InitPassManagerTest, DontElideSmallerElementAttrs) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_before() = R"(.*)";
debug_options.set_elide_elementsattrs_if_larger(11);
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<MutatePass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out,
HasSubstr("dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]>"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/debug/debug.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/debug/debug_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3cc97400-cf2f-490d-94fe-ed7357993393 | cpp | tensorflow/tensorflow | python_op_gen | tensorflow/python/framework/python_op_gen.cc | tensorflow/python/framework/python_op_gen_test.cc | #include "tensorflow/python/framework/python_op_gen.h"
#include <float.h>
#include <stdio.h>
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <iomanip>
#include <locale>
#include <set>
#include <sstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/python/framework/python_op_gen_annotator.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace {
const int kLatestAPIExportVersion = 2;
const int kRightMargin = 78;
constexpr char kEagerFallbackSuffix[] = "_eager_fallback";
const std::unordered_map<string, string> dtype_type{
{"_dtypes.float16", "_atypes.Float16"},
{"_dtypes.half", "_atypes.Half"},
{"_dtypes.float32", "_atypes.Float32"},
{"_dtypes.float64", "_atypes.Float64"},
{"_dtypes.bfloat16", "_atypes.BFloat16"},
{"_dtypes.complex64", "_atypes.Complex64"},
{"_dtypes.complex128", "_atypes.Complex128"},
{"_dtypes.int8", "_atypes.Int8"},
{"_dtypes.uint8", "_atypes.UInt8"},
{"_dtypes.uint16", "_atypes.UInt16"},
{"_dtypes.uint32", "_atypes.UInt32"},
{"_dtypes.uint64", "_atypes.UInt64"},
{"_dtypes.int16", "_atypes.Int16"},
{"_dtypes.int32", "_atypes.Int32"},
{"_dtypes.int64", "_atypes.Int64"},
{"_dtypes.bool", "_atypes.Bool"},
{"_dtypes.string", "_atypes.String"},
{"_dtypes.qint8", "_atypes.QInt8"},
{"_dtypes.quint8", "_atypes.QUInt8"},
{"_dtypes.qint16", "_atypes.QInt16"},
{"_dtypes.quint16", "_atypes.QUInt16"},
{"_dtypes.qint32", "_atypes.QInt32"},
{"_dtypes.resource", "_atypes.Resource"},
{"_dtypes.variant", "_atypes.Variant"},
{"_dtypes.float8_e4m3fn", "_atypes.Float8e4m3fn"},
{"_dtypes.float8_e5m2", "_atypes.Float8e5m2"},
{"_dtypes.int4", "_atypes.Int4"},
{"_dtypes.uint4", "_atypes.UInt4"},
};
string AttrVarName(const string& attr_name,
std::unordered_map<string, string>* attr_expressions) {
const string var = strings::StrCat("_attr_", attr_name);
if (attr_expressions != nullptr) (*attr_expressions)[attr_name] = var;
return var;
}
void AddInferredAttr(const string& indentation, const string& attr_name,
const string& value_expression, string* result,
std::unordered_map<string, string>* attr_expressions) {
strings::StrAppend(result, indentation,
AttrVarName(attr_name, attr_expressions), " = ",
value_expression, "\n");
}
string VectorToTuple(const std::vector<string>& l) {
if (l.size() == 1) return strings::StrCat("(", l.front(), ",)");
string ret = "(";
for (int i = 0, end = l.size(); i < end; ++i) {
if (i > 0) {
strings::StrAppend(&ret, ", ");
}
strings::StrAppend(&ret, l[i]);
}
strings::StrAppend(&ret, ")");
return ret;
}
void Unflatten(const string& prefix, const std::vector<string>& output_sizes,
const string& var, string* result) {
for (int i = 0, end = output_sizes.size(); i < end; ++i) {
if (!output_sizes[i].empty()) {
strings::StrAppend(result, prefix, var, " = ");
if (i > 0) strings::StrAppend(result, var, "[:", i, "] + ");
if (i + 1 < end) {
if (i == 0) {
strings::StrAppend(result, "[", var, "[:", output_sizes[i], "]] + ",
var, "[", output_sizes[i], ":]");
} else {
strings::StrAppend(result, "[", var, "[", i, ":", i, " + ",
output_sizes[i], "]] + ", var, "[", i, " + ",
output_sizes[i], ":]");
}
} else {
strings::StrAppend(result, "[", var, "[", i, ":]]");
}
strings::StrAppend(result, "\n");
}
}
}
string TensorPBString(const TensorProto& pb) {
std::string message_short_text;
::tensorflow::protobuf::TextFormat::Printer printer;
printer.SetSingleLineMode(true);
printer.SetExpandAny(true);
printer.PrintToString(pb, &message_short_text);
return strings::StrCat("\"\"\"", message_short_text, "\"\"\"");
}
bool IsPythonReserved(const string& s);
bool IsOpWithUnderscorePrefix(const string& s);
string AvoidPythonReserved(const string& s);
string AttrValueToPython(const string& type, const AttrValue& value,
const string& dtype_module = "tf.");
void GenerateLowerCaseOpName(const string& str, string* result);
string DataTypeToPython(DataType dtype, const string& dtype_module);
class ParamNames {
public:
ParamNames(const string& name, const string& rename_to) : name_(name) {
rename_to_ = AvoidPythonReserved(rename_to);
}
string GetName() const { return name_; }
string GetRenameTo() const { return rename_to_; }
private:
string name_;
string rename_to_;
};
class GenPythonOp {
public:
GenPythonOp(
const OpDef& op_def, const ApiDef& api_def, const string& function_name,
python_op_gen_internal::GeneratedCodeAnnotator* annotator = nullptr)
: op_def_(op_def),
api_def_(api_def),
function_name_(function_name),
num_outs_(op_def.output_arg_size()),
annotator_(annotator) {
op_name_ = function_name_;
absl::ConsumePrefix(&op_name_, "_");
}
~GenPythonOp() = default;
string Code();
protected:
void AddDefLine(const string& function_name, const string& parameters);
void AddDefLine(const string& parameters);
void AddDocStringDescription();
void AddDocStringArgs();
void AddDocStringInputs();
void AddDocStringAttrs();
void AddDocStringNameArg();
void AddOutputGlobals();
void AddDocStringOutputs();
void AddBody(const string& prefix);
void AddBodyNoReturn(const string& apply_prefix);
void AddExport();
void HandleGraphMode(const string& function_setup,
const std::vector<string>& output_sizes);
string GetEagerNotAllowedError();
void ExpectListArg(const string& indentation, const string& arg_name,
string* output);
bool GetEagerFunctionSetup(const string& indentation, string* function_setup);
void GetOutputSizesAndNumOutputsExpr(std::vector<string>* output_sizes,
string* num_outputs_expr);
void AddEagerFunctionTeardown(const string& indentation,
const std::vector<string>& output_sizes,
bool execute_record_gradient);
bool AddEagerFastPathAndGraphCode(
const string& parameters, const std::vector<string>& output_sizes,
const string& eager_not_allowed_error,
const std::unordered_map<string, string>& type_annotations);
bool AddEagerFallbackCode(
const string& parameters, const std::vector<string>& output_sizes,
const string& num_outputs_expr, const string& eager_not_allowed_error,
const std::unordered_map<string, string>& type_annotations);
void AddEagerFastPathExecute();
void AddEagerInferredAttrs(const string& indentation);
void AddEagerInputCasts(const string& indentation);
void AddEagerAttrs(const string& indentation);
void AddEagerExecute(const string& indentation,
const string& num_outputs_expr);
void AddFallbackDispatch(const string& prefix);
void AddTypeBasedDispatch(const string& prefix);
void AddTypeBasedDispatcherAlias();
void AddRawOpExport(const string& parameters);
std::unordered_map<string, string> GetTypeAnnotations();
void GenerateTypeVars(
const std::unordered_map<string, string>& type_annotations);
void AddReturnTypeAnnotation(
const std::unordered_map<string, string>& type_annotations);
void AddAttrForArg(const string& attr, int arg_index) {
gtl::InsertIfNotPresent(&inferred_attrs_, attr,
op_def_.input_arg(arg_index).name());
auto iter = attr_to_args_.find(attr);
if (iter == attr_to_args_.end()) {
attr_to_args_.insert(AttrToArgMap::value_type(attr, {arg_index}));
} else {
iter->second.push_back(arg_index);
}
}
string FlattenInputs(const std::vector<int>* input_indices,
std::vector<string>* output_sizes) const;
const OpDef& op_def_;
const ApiDef& api_def_;
const string function_name_;
const int num_outs_;
python_op_gen_internal::GeneratedCodeAnnotator* annotator_ = nullptr;
uint32_t def_offset_start_ = 0;
string prelude_;
string result_;
std::unordered_map<string, string> inferred_attrs_;
std::vector<string> attrs_;
std::vector<ParamNames> param_names_;
StringPiece op_name_;
typedef std::unordered_map<string, std::vector<int>> AttrToArgMap;
AttrToArgMap attr_to_args_;
std::unordered_map<string, string> attr_expressions_;
std::vector<ParamNames> params_no_default_;
std::vector<std::pair<ParamNames, string>> params_with_default_;
};
string GetEagerPythonOp(
const OpDef& op_def, const ApiDef& api_def, const string& function_name,
python_op_gen_internal::GeneratedCodeAnnotator* annotator = nullptr) {
return GenPythonOp(op_def, api_def, function_name, annotator).Code();
}
bool IsPythonReserved(const string& s) {
static const std::set<string>* const kPythonReserved = new std::set<string>(
{
"and", "as", "assert", "break", "class", "continue", "def", "del",
"elif", "else", "except", "exec", "finally", "for", "from", "global",
"if", "import", "in", "is", "lambda", "not", "or", "pass", "print",
"raise", "return", "try", "while", "with", "yield",
"ArithmeticError", "AssertionError", "AttributeError", "BaseException",
"BufferError", "BytesWarning", "DeprecationWarning", "EOFError",
"Ellipsis", "EnvironmentError", "Exception", "False",
"FloatingPointError", "FutureWarning", "GeneratorExit", "IOError",
"ImportError", "ImportWarning", "IndentationError", "IndexError",
"KeyError", "KeyboardInterrupt", "LookupError", "MemoryError",
"NameError", "None", "NotImplemented", "NotImplementedError", "OSError",
"OverflowError", "PendingDeprecationWarning", "ReferenceError",
"RuntimeError", "RuntimeWarning", "StandardError", "StopIteration",
"SyntaxError", "SyntaxWarning", "SystemError", "SystemExit", "TabError",
"True", "TypeError", "UnboundLocalError", "UnicodeDecodeError",
"UnicodeEncodeError", "UnicodeError", "UnicodeTranslateError",
"UnicodeWarning", "UserWarning", "ValueError", "Warning",
"ZeroDivisionError", "__debug__", "__doc__", "__import__", "__name__",
"__package__"});
return kPythonReserved->count(s) > 0;
}
bool IsOpWithUnderscorePrefix(const string& s) {
static const std::set<string>* const kUnderscoreOps = new std::set<string>(
{
"abs", "all", "any", "apply", "bin", "bool", "buffer", "bytearray",
"bytes", "callable", "chr", "classmethod", "cmp", "coerce", "compile",
"complex", "copyright", "credits", "delattr", "dict", "dir", "divmod",
"enumerate", "eval", "execfile", "exit", "file", "filter", "float",
"format", "frozenset", "getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "input", "int", "intern", "isinstance", "issubclass",
"iter", "len", "license", "list", "locals", "long", "map", "max",
"memoryview", "min", "next", "object", "oct", "open", "ord", "pow",
"print", "property", "quit", "range", "raw_input", "reduce", "reload",
"repr", "reversed", "set", "setattr", "slice", "sorted", "staticmethod",
"str", "sum", "super", "tuple", "type", "unichr", "unicode", "vars",
"xrange", "zip",
"fused_batch_norm", "histogram_fixed_width", "stack",
"batch_norm_with_global_normalization", "clip_by_value"});
return kUnderscoreOps->count(s) > 0;
}
string AvoidPythonReserved(const string& s) {
string result = absl::StrReplaceAll(s, {{">", "_"}});
if (IsPythonReserved(result)) return strings::StrCat(result, "_");
return result;
}
string Indent(int initial, int rest, StringPiece in) {
string copy(in.data(), in.size());
absl::StripTrailingAsciiWhitespace(©);
std::vector<string> v = str_util::Split(copy, '\n');
string result;
bool first = true;
for (const string& line : v) {
if (first) {
result = strings::StrCat(Spaces(initial), line, "\n");
first = false;
} else {
if (line.empty()) {
strings::StrAppend(&result, "\n");
} else {
strings::StrAppend(&result, Spaces(rest), line, "\n");
}
}
}
return result;
}
void AppendWithinWidth(string* dest, StringPiece append, int width) {
auto first_line = append.find('\n');
if (first_line == string::npos) first_line = append.size();
if (dest->size() + first_line + 1 > static_cast<size_t>(width)) {
strings::StrAppend(dest, "\n", append);
} else {
strings::StrAppend(dest, " ", append);
}
}
string PythonDataTypeString(DataType dtype) {
switch (dtype) {
case DT_FLOAT:
return "float32";
case DT_DOUBLE:
return "float64";
default:
return DataTypeString(dtype);
}
}
string TypeString(DataType dtype, bool ref) {
if (ref) {
return strings::StrCat("mutable `", PythonDataTypeString(dtype), "`");
} else {
return strings::StrCat("`", PythonDataTypeString(dtype), "`");
}
}
string TypeListString(const AttrValue& value) {
string ret;
for (int t : value.list().type()) {
if (!ret.empty()) strings::StrAppend(&ret, ", ");
DataType dtype = static_cast<DataType>(t);
if (IsRefType(dtype)) {
strings::StrAppend(&ret, PythonDataTypeString(RemoveRefType(dtype)),
" mutable");
} else {
strings::StrAppend(&ret, "`", PythonDataTypeString(dtype), "`");
}
}
return ret;
}
string SingleTensorName(DataType dtype, bool is_ref) {
const string type_str = TypeString(dtype, is_ref);
return strings::StrCat("A `Tensor` of type ", type_str, ".");
}
const char kUnknownTensorType[] = {"A `Tensor`."};
string ArgTypeName(const OpDef& op_def, const OpDef::ArgDef& arg,
const std::unordered_map<string, string>& inferred_attrs,
bool is_output) {
if (!arg.number_attr().empty()) {
const string* original_arg =
gtl::FindOrNull(inferred_attrs, arg.number_attr());
string prefix;
if (original_arg == nullptr) {
prefix = strings::StrCat("A list of `", arg.number_attr(), "`");
} else if (*original_arg == arg.name()) {
const OpDef::AttrDef* attr = FindAttr(arg.number_attr(), op_def);
if (attr->has_minimum() && attr->minimum() > 0) {
prefix = strings::StrCat("A list of at least ", attr->minimum());
} else {
prefix = "A list of";
}
} else {
prefix = strings::StrCat("A list with the same length as `",
AvoidPythonReserved(*original_arg), "` of");
}
if (arg.type() != DT_INVALID) {
return strings::StrCat(prefix, " `Tensor` objects with type ",
TypeString(arg.type(), arg.is_ref()), ".");
} else {
original_arg = gtl::FindOrNull(inferred_attrs, arg.type_attr());
if (arg.is_ref()) {
strings::StrAppend(&prefix, " mutable");
}
if (original_arg == nullptr) {
return strings::StrCat(prefix, " `Tensor` objects with type `",
arg.type_attr(), "`.");
} else if (*original_arg == arg.name()) {
const OpDef::AttrDef* attr = FindAttr(arg.type_attr(), op_def);
if (attr->has_allowed_values()) {
return strings::StrCat(prefix,
" `Tensor` objects with the same type in: ",
TypeListString(attr->allowed_values()), ".");
} else {
return strings::StrCat(prefix,
" `Tensor` objects with the same type.");
}
} else {
return strings::StrCat(prefix,
" `Tensor` objects with the same type as `",
AvoidPythonReserved(*original_arg), "`.");
}
}
} else if (!arg.type_attr().empty() || !arg.type_list_attr().empty()) {
const bool is_list = !arg.type_list_attr().empty();
const string attr_name = is_list ? arg.type_list_attr() : arg.type_attr();
const OpDef::AttrDef* attr = FindAttr(attr_name, op_def);
const string mutable_str = arg.is_ref() ? "mutable " : "";
const string prefix =
is_list ? strings::StrCat("A list of ", mutable_str, "`Tensor` objects")
: strings::StrCat("A ", mutable_str, "`Tensor`");
const string* original_arg = gtl::FindOrNull(inferred_attrs, attr_name);
if (original_arg == nullptr) {
return strings::StrCat(prefix, " of type `", attr_name, "`.");
} else if (*original_arg == arg.name()) {
if (attr->has_allowed_values()) {
if (is_list) {
return strings::StrCat(prefix, " with types from: ",
TypeListString(attr->allowed_values()), ".");
} else {
return strings::StrCat(prefix,
is_output
? ". Has one of the following types: "
: ". Must be one of the following types: ",
TypeListString(attr->allowed_values()), ".");
}
} else {
return strings::StrCat(prefix, ".");
}
} else {
return strings::StrCat(prefix,
is_output ? ". Has the same type as `"
: ". Must have the same type as `",
AvoidPythonReserved(*original_arg), "`.");
}
} else {
return SingleTensorName(arg.type(), arg.is_ref());
}
}
string GetReturns(const OpDef& op_def,
const std::vector<string>& output_type_string) {
string result;
DCHECK_EQ(op_def.output_arg_size(), output_type_string.size());
const int num_outs = op_def.output_arg_size();
strings::StrAppend(&result, "\n Returns:\n");
if (num_outs == 0) {
strings::StrAppend(&result, " The created Operation.\n");
} else {
if (num_outs == 1) {
StringPiece description = op_def.output_arg(0).description();
if (ConsumeEquals(&description)) {
strings::StrAppend(&result, Indent(4, 4, description));
} else {
string desc = output_type_string.empty() ? kUnknownTensorType
: output_type_string[0];
if (desc == kUnknownTensorType) {
if (!description.empty()) {
desc = op_def.output_arg(0).description();
} else if (!op_def.output_arg(0).name().empty()) {
desc = strings::StrCat(" The ", op_def.output_arg(0).name(),
" `Tensor`.");
}
} else if (!description.empty()) {
AppendWithinWidth(&desc, description, kRightMargin - 4 );
}
strings::StrAppend(&result, Indent(4, 4, desc));
}
} else {
std::vector<string> out_names(num_outs);
for (int i = 0; i < num_outs; ++i) {
if (!op_def.output_arg(i).name().empty()) {
out_names[i] = op_def.output_arg(i).name();
} else {
out_names[i] = strings::StrCat("output", i);
}
}
strings::StrAppend(&result, " A tuple of `Tensor` objects (",
absl::StrJoin(out_names, ", "), ").\n\n");
for (int i = 0; i < num_outs; ++i) {
string desc = strings::StrCat(out_names[i], ": ");
StringPiece description = op_def.output_arg(i).description();
if (ConsumeEquals(&description)) {
strings::StrAppend(&desc, description);
} else {
const string type = static_cast<size_t>(i) < output_type_string.size()
? output_type_string[i]
: kUnknownTensorType;
if (!description.empty()) {
if (type == kUnknownTensorType) {
strings::StrAppend(&desc, description);
} else {
strings::StrAppend(&desc, type, " ", description);
}
} else {
strings::StrAppend(&desc, type);
}
}
strings::StrAppend(&result, Indent(4, 6, desc));
}
}
}
return result;
}
string StringToPython(const string& str) {
return strings::StrCat("\"", absl::CEscape(str), "\"");
}
string DataTypeToPython(DataType dtype, const string& dtype_module) {
return strings::StrCat(dtype_module, PythonDataTypeString(dtype));
}
string ShapeToPython(const TensorShapeProto& shape) {
if (shape.unknown_rank()) {
return "None";
}
string python = "[";
for (const auto& dim : shape.dim()) {
if (python.size() > 1) strings::StrAppend(&python, ", ");
if (!dim.name().empty()) {
strings::StrAppend(&python, "(", StringToPython(dim.name()), ", ",
dim.size(), ")");
} else {
strings::StrAppend(&python, dim.size());
}
}
strings::StrAppend(&python, "]");
return python;
}
string TensorToPython(const TensorProto& proto) {
return tsl::LegacyUnredactedShortDebugString(proto);
}
string AttrListToPython(const AttrValue& value,
const string& dtype_module = "tf.") {
string ret;
if (value.list().s_size() > 0) {
for (int i = 0; i < value.list().s_size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret, StringToPython(value.list().s(i)));
}
} else if (value.list().i_size() > 0) {
for (int i = 0; i < value.list().i_size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret, value.list().i(i));
}
} else if (value.list().f_size() > 0) {
for (int i = 0; i < value.list().f_size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret, value.list().f(i));
}
} else if (value.list().b_size() > 0) {
for (int i = 0; i < value.list().b_size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret, value.list().b(i) ? "True" : "False");
}
} else if (value.list().type_size() > 0) {
for (int i = 0; i < value.list().type_size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret,
DataTypeToPython(value.list().type(i), dtype_module));
}
} else if (value.list().shape_size() > 0) {
for (int i = 0; i < value.list().shape_size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret, ShapeToPython(value.list().shape(i)));
}
} else if (value.list().tensor_size() > 0) {
for (int i = 0; i < value.list().tensor_size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret, TensorToPython(value.list().tensor(i)));
}
} else if (value.list().func_size() > 0) {
for (int i = 0; i < value.list().func_size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret, StringToPython(value.list().func(i).name()));
}
}
return ret;
}
string AttrValueToPython(const string& type, const AttrValue& value,
const string& dtype_module) {
if (type == "string") {
return StringToPython(value.s());
} else if (type == "int") {
return strings::StrCat(value.i());
} else if (type == "float") {
if (std::isnan(value.f()) || std::isinf(value.f())) {
return strings::StrCat("float('", value.f(), "')");
} else {
static_assert(FLT_DIG < 10, "FLT_DIG is too big");
std::ostringstream s;
s.imbue(std::locale::classic());
s << std::setprecision(FLT_DIG) << value.f();
if (s.good()) {
return s.str();
}
return strings::StrCat(value.f());
}
} else if (type == "bool") {
return value.b() ? "True" : "False";
} else if (type == "type") {
return DataTypeToPython(value.type(), dtype_module);
} else if (type == "shape") {
return ShapeToPython(value.shape());
} else if (type == "tensor") {
return TensorToPython(value.tensor());
} else if (type == "func") {
return StringToPython(value.func().name());
} else if (absl::StartsWith(type, "list(")) {
return strings::StrCat("[", AttrListToPython(value, dtype_module), "]");
} else {
return "?";
}
}
void GenerateLowerCaseOpName(const string& str, string* result) {
const char joiner = '_';
const char namespace_separator = '>';
const int last_index = str.size() - 1;
for (int i = 0; i <= last_index; ++i) {
const char c = str[i];
if (c == namespace_separator) {
result->push_back(joiner);
continue;
}
if (isupper(c) && (i > 0)) {
if (islower(str[i - 1]) || ((i < last_index) && islower(str[i + 1]))) {
if (!(str[i - 1] == namespace_separator)) {
result->push_back(joiner);
}
}
}
result->push_back(tolower(c));
}
}
static void AddDelimiter(string* append_to, const string& delim) {
if (!append_to->empty()) strings::StrAppend(append_to, delim);
}
const ApiDef::Attr* FindAttr(StringPiece name, const ApiDef& api_def) {
for (int i = 0; i < api_def.attr_size(); ++i) {
if (api_def.attr(i).name() == name) {
return &api_def.attr(i);
}
}
return nullptr;
}
void GenPythonOp::AddExport() {
if (api_def_.visibility() != ApiDef::VISIBLE) {
return;
}
bool op_available_in_latest =
!api_def_.deprecation_version() ||
api_def_.deprecation_version() > kLatestAPIExportVersion;
string names;
string names_v1;
string deprecated_endpoints;
for (const auto& endpoint : api_def_.endpoint()) {
string endpoint_name;
GenerateLowerCaseOpName(endpoint.name(), &endpoint_name);
if (endpoint.deprecated() || endpoint.deprecation_version() > 0) {
AddDelimiter(&deprecated_endpoints, ", ");
strings::StrAppend(&deprecated_endpoints, "'", endpoint_name, "'");
}
AddDelimiter(&names_v1, ", ");
strings::StrAppend(&names_v1, "'", endpoint_name, "'");
if (op_available_in_latest &&
(!endpoint.deprecation_version() ||
endpoint.deprecation_version() > kLatestAPIExportVersion)) {
AddDelimiter(&names, ", ");
strings::StrAppend(&names, "'", endpoint_name, "'");
}
}
if (names != names_v1) {
AddDelimiter(&names, ", ");
strings::StrAppend(&names, "v1=[", names_v1, "]");
}
strings::StrAppend(&result_, "@tf_export(", names, ")\n");
if (!api_def_.deprecation_message().empty()) {
const string instructions = api_def_.deprecation_message();
strings::StrAppend(&result_, "@deprecated(None, '", instructions, "')\n");
}
if (!deprecated_endpoints.empty()) {
strings::StrAppend(&result_, "@deprecated_endpoints(", deprecated_endpoints,
")\n");
}
}
void GenPythonOp::AddDefLine(const string& function_name,
const string& parameters) {
strings::StrAppend(&result_, "def ", function_name, "(", parameters, "):\n");
}
void GenPythonOp::AddDefLine(const string& parameters) {
AddDefLine(function_name_, parameters);
}
void GenPythonOp::AddDocStringDescription() {
string comment;
if (api_def_.summary().empty()) {
comment = "TODO: add doc.\n";
} else {
comment = strings::StrCat(api_def_.summary(), "\n");
if (!api_def_.description().empty()) {
strings::StrAppend(&comment, "\n", Indent(2, 2, api_def_.description()));
}
}
strings::StrAppend(&result_, " r\"\"\"", comment, "\n");
}
void GenPythonOp::AddDocStringArgs() {
strings::StrAppend(&result_, " Args:\n");
}
void GenPythonOp::AddDocStringInputs() {
for (int i = 0; i < api_def_.arg_order_size(); ++i) {
const auto& arg = *FindInputArg(api_def_.arg_order(i), op_def_);
const auto& api_def_arg = *FindInputArg(api_def_.arg_order(i), api_def_);
StringPiece description = api_def_arg.description();
string desc;
if (ConsumeEquals(&description)) {
desc = strings::StrCat(param_names_[i].GetRenameTo(), ": ");
} else {
desc = strings::StrCat(param_names_[i].GetRenameTo(), ": ",
ArgTypeName(op_def_, arg, inferred_attrs_, false));
}
if (!description.empty()) {
AppendWithinWidth(&desc, description, kRightMargin - 4 );
}
strings::StrAppend(&result_, Indent(4, 6, desc));
}
}
void GenPythonOp::AddDocStringAttrs() {
for (const string& name : attrs_) {
const auto& attr = *FindAttr(name, op_def_);
const auto& api_def_attr = *FindAttr(name, api_def_);
string desc =
strings::StrCat(AvoidPythonReserved(api_def_attr.rename_to()), ": ");
static const char* const kAttrTypeName[][2] = {
{"string", "`string`"},
{"list(string)", "list of `strings`"},
{"int", "`int`"},
{"list(int)", "list of `ints`"},
{"float", "`float`"},
{"list(float)", "list of `floats`"},
{"bool", "`bool`"},
{"list(bool)", "list of `bools`"},
{"type", "`tf.DType`"},
{"list(type)", "list of `tf.DTypes`"},
{"shape", "`tf.TensorShape` or list of `ints`"},
{"list(shape)",
"list of shapes (each a `tf.TensorShape` or list of `ints`)"},
{"tensor", "`tf.TensorProto`"},
{"list(tensor)", "list of `tf.TensorProto` objects"},
{"func", "function decorated with @Defun"},
{"list(func)", "list of functions decorated with @Defun"},
};
for (size_t i = 0; i < TF_ARRAYSIZE(kAttrTypeName); ++i) {
if (attr.type() == kAttrTypeName[i][0]) {
string s;
if (api_def_attr.has_default_value()) {
s = strings::StrCat("optional ", kAttrTypeName[i][1]);
} else {
s = kAttrTypeName[i][1];
}
if (s[0] == 'o' || (s[0] == '`' && (s[1] == 'i' || s[1] == 'o'))) {
strings::StrAppend(&desc, "An ", s);
} else {
strings::StrAppend(&desc, "A ", s);
}
break;
}
}
if (attr.has_allowed_values()) {
strings::StrAppend(&desc, " from: `",
AttrListToPython(attr.allowed_values()), "`");
}
if (attr.has_minimum()) {
if (attr.type() == "int") {
strings::StrAppend(&desc, " that is `>= ", attr.minimum(), "`");
} else if (attr.minimum() > 0) {
strings::StrAppend(&desc, " that has length `>= ", attr.minimum(), "`");
}
}
strings::StrAppend(&desc, ".");
if (api_def_attr.has_default_value()) {
strings::StrAppend(
&desc, " Defaults to `",
AttrValueToPython(attr.type(), api_def_attr.default_value()), "`.");
}
if (!api_def_attr.description().empty()) {
AppendWithinWidth(&desc, api_def_attr.description(),
kRightMargin - 4 );
}
strings::StrAppend(&result_, Indent(4, 6, desc));
}
}
void GenPythonOp::AddDocStringNameArg() {
strings::StrAppend(&result_,
" name: A name for the operation (optional).\n");
}
void GenPythonOp::AddOutputGlobals() {
if (num_outs_ > 1) {
std::vector<string> out_names;
out_names.reserve(num_outs_);
for (int i = 0; i < num_outs_; ++i) {
const string out_name = !api_def_.out_arg(i).rename_to().empty()
? api_def_.out_arg(i).rename_to()
: strings::StrCat("output", i);
out_names.push_back(strings::StrCat("\"", out_name, "\""));
}
strings::StrAppend(&prelude_, "_", AvoidPythonReserved(op_def_.name()),
"Output = collections.namedtuple(\n");
strings::StrAppend(&prelude_, " \"", AvoidPythonReserved(op_def_.name()),
"\",\n");
strings::StrAppend(&prelude_, " [", absl::StrJoin(out_names, ", "),
"])");
strings::StrAppend(&prelude_, "\n\n");
}
strings::StrAppend(&prelude_, "\n");
}
void GenPythonOp::AddDocStringOutputs() {
std::vector<string> output_type_string;
output_type_string.reserve(num_outs_);
for (int i = 0; i < num_outs_; ++i) {
output_type_string.push_back(
ArgTypeName(op_def_, op_def_.output_arg(i), inferred_attrs_, true));
}
strings::StrAppend(&result_, GetReturns(op_def_, output_type_string));
}
void GenPythonOp::AddBody(const string& prefix) {
const string apply_prefix = strings::StrCat(
prefix, "_result = _op_def_lib.apply_op(\"", op_def_.name(), "\", ");
AddBodyNoReturn(apply_prefix);
if (num_outs_ > 1) {
strings::StrAppend(&result_, prefix, "_result = _",
AvoidPythonReserved(op_def_.name()),
"Output._make(_result)\n");
}
strings::StrAppend(&result_, prefix, "return _result\n");
}
void GenPythonOp::AddBodyNoReturn(const string& apply_prefix) {
string args;
for (size_t i = 0; i < param_names_.size(); ++i) {
strings::StrAppend(&args, AvoidPythonReserved(param_names_[i].GetName()),
"=", param_names_[i].GetRenameTo(), ", ");
}
strings::StrAppend(&args, "name=name)");
strings::StrAppend(&result_,
WordWrap(apply_prefix, args, kRightMargin), "\n");
}
string GenPythonOp::FlattenInputs(const std::vector<int>* input_indices,
std::vector<string>* output_sizes) const {
string inputs;
enum { STARTING, WAS_LIST_INPUT, WAS_SOLO_INPUT } inputs_state = STARTING;
const int n = input_indices != nullptr ? input_indices->size()
: op_def_.input_arg_size();
for (int j = 0; j < n; ++j) {
const int i = input_indices ? (*input_indices)[j] : j;
const auto& arg(op_def_.input_arg(i));
const bool is_list =
!arg.type_list_attr().empty() || !arg.number_attr().empty();
if (is_list) {
if (inputs_state == WAS_SOLO_INPUT) {
strings::StrAppend(&inputs, "] + ");
} else if (inputs_state == WAS_LIST_INPUT) {
strings::StrAppend(&inputs, " + ");
}
strings::StrAppend(&inputs, "list(", param_names_[i].GetRenameTo(), ")");
inputs_state = WAS_LIST_INPUT;
if (output_sizes != nullptr) {
if (!arg.number_attr().empty()) {
output_sizes->emplace_back(AttrVarName(arg.number_attr(), nullptr));
} else {
output_sizes->emplace_back(
strings::StrCat("len(", param_names_[i].GetRenameTo(), ")"));
}
}
} else {
if (inputs_state == WAS_SOLO_INPUT) {
strings::StrAppend(&inputs, ", ");
} else if (inputs_state == WAS_LIST_INPUT) {
strings::StrAppend(&inputs, " + [");
} else {
strings::StrAppend(&inputs, "[");
}
strings::StrAppend(&inputs, param_names_[i].GetRenameTo());
inputs_state = WAS_SOLO_INPUT;
if (output_sizes != nullptr) output_sizes->emplace_back();
}
}
if (inputs_state == STARTING) return "[]";
if (inputs_state == WAS_SOLO_INPUT) {
strings::StrAppend(&inputs, "]");
}
return inputs;
}
string GenPythonOp::Code() {
if (api_def_.visibility() == ApiDef::SKIP) {
return "";
}
for (int i = 0; i < api_def_.arg_order_size(); ++i) {
const auto& arg = *FindInputArg(api_def_.arg_order(i), op_def_);
const auto& api_def_arg = *FindInputArg(api_def_.arg_order(i), api_def_);
params_no_default_.emplace_back(api_def_arg.name(),
api_def_arg.rename_to());
if (!arg.type_attr().empty()) {
AddAttrForArg(arg.type_attr(), i);
} else if (!arg.type_list_attr().empty()) {
AddAttrForArg(arg.type_list_attr(), i);
}
if (!arg.number_attr().empty()) {
AddAttrForArg(arg.number_attr(), i);
}
}
for (int i = 0; i < op_def_.attr_size(); ++i) {
const auto& attr(op_def_.attr(i));
const auto& api_def_attr(api_def_.attr(i));
if (inferred_attrs_.find(attr.name()) == inferred_attrs_.end()) {
if (api_def_attr.has_default_value()) {
if (attr.type() == "tensor") {
params_with_default_.emplace_back(
ParamNames(api_def_attr.name(), api_def_attr.rename_to()),
strings::StrCat(
"_execute.make_tensor(",
TensorPBString(api_def_attr.default_value().tensor()), ", \"",
api_def_attr.rename_to(), "\")"));
} else if (attr.type() == "list(tensor)") {
std::vector<string> pbtxt;
for (const auto& pb : api_def_attr.default_value().list().tensor()) {
pbtxt.emplace_back(TensorPBString(pb));
}
params_with_default_.emplace_back(
ParamNames(api_def_attr.name(), api_def_attr.rename_to()),
strings::StrCat("[_execute.make_tensor(_pb, \"",
api_def_attr.rename_to(), "\") for _pb in ",
VectorToTuple(pbtxt), "]"));
} else {
params_with_default_.emplace_back(
ParamNames(api_def_attr.name(), api_def_attr.rename_to()),
AttrValueToPython(attr.type(), api_def_attr.default_value(),
"_dtypes."));
}
} else {
params_no_default_.emplace_back(api_def_attr.name(),
api_def_attr.rename_to());
}
}
}
attrs_.reserve(params_no_default_.size() - op_def_.input_arg_size() +
params_with_default_.size());
for (int i = op_def_.input_arg_size(), end = params_no_default_.size();
i < end; ++i) {
attrs_.push_back(params_no_default_[i].GetName());
}
for (const auto& p : params_with_default_) {
attrs_.push_back(p.first.GetName());
}
param_names_.reserve(params_no_default_.size() + params_with_default_.size());
param_names_.insert(param_names_.begin(), params_no_default_.begin(),
params_no_default_.end());
for (const auto& param_and_default : params_with_default_) {
param_names_.push_back(param_and_default.first);
}
std::unordered_map<string, string> type_annotations = GetTypeAnnotations();
string parameters;
for (const auto& param : params_no_default_) {
if (!parameters.empty()) strings::StrAppend(¶meters, ", ");
strings::StrAppend(¶meters, param.GetRenameTo());
if (type_annotations.find(param.GetName()) != type_annotations.end()) {
strings::StrAppend(¶meters, ": ",
type_annotations.at(param.GetName()));
}
}
string parameters_with_defaults = parameters;
for (const auto& param_and_default : params_with_default_) {
if (!parameters.empty()) strings::StrAppend(¶meters, ", ");
if (!parameters_with_defaults.empty())
strings::StrAppend(¶meters_with_defaults, ", ");
strings::StrAppend(¶meters, param_and_default.first.GetRenameTo());
strings::StrAppend(¶meters_with_defaults,
param_and_default.first.GetRenameTo());
if (type_annotations.find(param_and_default.first.GetName()) !=
type_annotations.end()) {
const string param_type =
type_annotations.at(param_and_default.first.GetName());
strings::StrAppend(¶meters, ": ", param_type);
strings::StrAppend(¶meters_with_defaults, ":", param_type);
}
strings::StrAppend(¶meters_with_defaults, "=",
param_and_default.second);
}
strings::StrAppend(¶meters, parameters.empty() ? "" : ", ", "name");
strings::StrAppend(¶meters_with_defaults,
parameters_with_defaults.empty() ? "" : ", ", "name=None");
for (int i = 0, end = attrs_.size(); i < end; ++i) {
const string& attr_name = attrs_[i];
const string& attr_api_name =
param_names_[i + op_def_.input_arg_size()].GetRenameTo();
attr_expressions_[attr_name] = attr_api_name;
}
for (int i = 0; i < op_def_.attr_size(); ++i) {
const auto& attr(op_def_.attr(i));
if (attr.type() == "int") {
auto arg_list = attr_to_args_.find(attr.name());
if (arg_list != attr_to_args_.end()) {
AttrVarName(attr.name(), &attr_expressions_);
}
}
}
string num_outputs_expr;
std::vector<string> output_sizes(num_outs_);
GetOutputSizesAndNumOutputsExpr(&output_sizes, &num_outputs_expr);
string eager_not_allowed_error = GetEagerNotAllowedError();
if (!AddEagerFastPathAndGraphCode(parameters_with_defaults, output_sizes,
eager_not_allowed_error,
type_annotations)) {
return result_;
}
if (!AddEagerFallbackCode(parameters, output_sizes, num_outputs_expr,
eager_not_allowed_error, type_annotations)) {
return result_;
}
if (annotator_ != nullptr) {
def_offset_start_ += prelude_.length();
annotator_->AddAnnotation(op_def_, function_name_, def_offset_start_);
}
return prelude_ + result_;
}
std::unordered_map<string, string> GenPythonOp::GetTypeAnnotations() {
std::unordered_map<string, string> type_annotations;
for (const auto& attr : op_def_.attr()) {
if (attr.type() == "type") {
const string type_var_name =
AvoidPythonReserved("TV_" + op_def_.name() + "_" + attr.name());
type_annotations[attr.name()] = type_var_name;
} else if (attr.type() == "bool" || attr.type() == "float" ||
attr.type() == "int" || attr.type() == "bytes") {
type_annotations[attr.name()] = attr.type();
} else if (attr.type() == "string") {
type_annotations[attr.name()] = "str";
}
}
for (const auto& arg : op_def_.input_arg()) {
if (!arg.type_list_attr().empty()) continue;
type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations);
}
if (op_def_.output_arg_size() == 1) {
const auto& arg = op_def_.output_arg(0);
if (arg.number_attr().empty() && arg.type_list_attr().empty())
type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations);
}
return type_annotations;
}
void GenPythonOp::GenerateTypeVars(
const std::unordered_map<string, string>& type_annotations) {
bool added_typevar = false;
for (const auto& attr : op_def_.attr()) {
if (attr.type() == "type") {
std::vector<string> allowed_types;
for (int t : attr.allowed_values().list().type()) {
DataType dtype = static_cast<DataType>(t);
const string py_dtype = DataTypeToPython(dtype, "_dtypes.");
allowed_types.emplace_back(dtype_type.at(py_dtype));
}
if (allowed_types.empty()) {
for (std::pair<string, string> map_dtype : dtype_type) {
allowed_types.emplace_back(map_dtype.second);
}
}
std::sort(allowed_types.begin(), allowed_types.end());
string typevar_dtypes;
if (allowed_types.size() == 1) {
strings::StrAppend(&typevar_dtypes, "bound=", allowed_types[0]);
} else {
for (std::vector<string>::iterator it = allowed_types.begin();
it != allowed_types.end(); ++it) {
if (!typevar_dtypes.empty())
strings::StrAppend(&typevar_dtypes, ", ");
strings::StrAppend(&typevar_dtypes, *it);
}
}
const string type_var_name = type_annotations.at(attr.name());
strings::StrAppend(&result_, type_var_name, " = TypeVar(\"",
type_var_name, "\", ", typevar_dtypes, ")\n");
added_typevar = true;
}
}
if (added_typevar) strings::StrAppend(&result_, "\n");
}
void GenPythonOp::AddReturnTypeAnnotation(
const std::unordered_map<string, string>& type_annotations) {
if (op_def_.output_arg_size() == 1) {
const auto& arg = op_def_.output_arg(0);
if (arg.number_attr().empty() && arg.type_list_attr().empty()) {
const string return_type = type_annotations.at(arg.name());
result_.erase(result_.length() - 2);
strings::StrAppend(&result_, " -> ", return_type, ":\n");
}
}
}
void GenPythonOp::HandleGraphMode(const string& function_setup,
const std::vector<string>& output_sizes) {
if (api_def_.visibility() == ApiDef::VISIBLE) {
strings::StrAppend(&result_, " else:\n");
AddTypeBasedDispatch(" ");
}
strings::StrAppend(&result_, " # Add nodes to the TensorFlow graph.\n");
strings::StrAppend(&result_, function_setup);
if (api_def_.visibility() == ApiDef::VISIBLE) {
strings::StrAppend(&result_, " try:\n ");
}
strings::StrAppend(
&result_, " _, _, _op, _outputs = _op_def_library._apply_op_helper(\n");
AddBodyNoReturn(strings::StrCat(" \"", op_def_.name(), "\", "));
AddFallbackDispatch(" ");
if (num_outs_ > 0) {
strings::StrAppend(&result_, " _result = _outputs[:]\n");
if (num_outs_ == 1 && op_def_.is_stateful() &&
(!op_def_.output_arg(0).number_attr().empty() ||
!op_def_.output_arg(0).type_list_attr().empty())) {
strings::StrAppend(&result_,
" if not _result:\n"
" return _op\n");
}
strings::StrAppend(&result_, " if _execute.must_record_gradient():\n");
if (op_def_.attr_size() > 0) {
string attr_values;
for (int i = 0; i < op_def_.attr_size(); ++i) {
if (i > 0) strings::StrAppend(&attr_values, ", ");
const auto& attr_name(op_def_.attr(i).name());
if (op_def_.attr(i).type() == "type") {
strings::StrAppend(&attr_values, "\"", attr_name,
"\", _op._get_attr_type(\"", attr_name, "\")");
} else if (op_def_.attr(i).type() == "bool") {
strings::StrAppend(&attr_values, "\"", attr_name,
"\", _op._get_attr_bool(\"", attr_name, "\")");
} else if (op_def_.attr(i).type() == "int") {
strings::StrAppend(&attr_values, "\"", attr_name,
"\", _op._get_attr_int(\"", attr_name, "\")");
} else {
strings::StrAppend(&attr_values, "\"", attr_name,
"\", _op.get_attr(\"", attr_name, "\")");
}
}
strings::StrAppend(&attr_values, ")");
strings::StrAppend(&result_,
WordWrap(" _attrs = (", attr_values, kRightMargin),
"\n");
} else {
strings::StrAppend(&result_, " _attrs = ()\n");
}
strings::StrAppend(&result_, " _inputs_flat = _op.inputs\n");
strings::StrAppend(&result_, " _execute.record_gradient(\n",
" \"", op_def_.name(),
"\", _inputs_flat, _attrs, _result)\n");
if (num_outs_ == 1 && !output_sizes[0].empty()) {
} else if (num_outs_ == 1) {
strings::StrAppend(&result_, " ", "_result, = _result\n");
} else {
Unflatten(" ", output_sizes, "_result", &result_);
strings::StrAppend(&result_, " _result = _",
AvoidPythonReserved(op_def_.name()),
"Output._make(_result)\n");
}
strings::StrAppend(&result_, " return _result\n\n");
} else {
strings::StrAppend(&result_, " return _op\n");
}
}
string GenPythonOp::GetEagerNotAllowedError() {
bool eager_allowed = true;
string ref_arg;
for (int i = 0; i < op_def_.input_arg_size(); ++i) {
const auto& arg = op_def_.input_arg(i);
if (arg.is_ref()) {
eager_allowed = false;
DCHECK_EQ(op_def_.input_arg(i).name(), api_def_.in_arg(i).name());
ref_arg = api_def_.in_arg(i).rename_to();
}
}
for (int i = 0; i < op_def_.output_arg_size(); ++i) {
const auto& arg = op_def_.output_arg(i);
if (arg.is_ref()) {
eager_allowed = false;
DCHECK_EQ(op_def_.output_arg(i).name(), api_def_.out_arg(i).name());
ref_arg = api_def_.out_arg(i).rename_to();
}
}
if (eager_allowed) return "";
return strings::StrCat("raise RuntimeError(\"", op_name_,
" op does not support eager execution. ", "Arg '",
ref_arg, "' is a ref.\")\n");
}
void GenPythonOp::ExpectListArg(const string& indentation,
const string& arg_name, string* output) {
strings::StrAppend(output, indentation, "if not isinstance(", arg_name,
", (list, tuple)):\n", indentation, " raise TypeError(\n",
indentation, " \"Expected list for '", arg_name,
"' argument to \"\n", indentation, " \"'", op_name_,
"' Op, not %r.\" % ", arg_name, ")\n");
}
bool GenPythonOp::GetEagerFunctionSetup(const string& indentation,
string* function_setup) {
for (int i = 0; i < op_def_.attr_size(); ++i) {
const auto& attr(op_def_.attr(i));
if (attr.type() == "int") {
auto arg_list = attr_to_args_.find(attr.name());
if (arg_list != attr_to_args_.end()) {
for (auto iter = arg_list->second.begin();
iter != arg_list->second.end(); ++iter) {
const string& arg_api_name = param_names_[*iter].GetRenameTo();
ExpectListArg(indentation, arg_api_name, function_setup);
if (iter == arg_list->second.begin()) {
AddInferredAttr(indentation, attr.name(),
strings::StrCat("len(", arg_api_name, ")"),
function_setup, &attr_expressions_);
} else {
const auto& attr_var = attr_expressions_[attr.name()];
strings::StrAppend(
function_setup, indentation, "if len(", arg_api_name,
") != ", attr_var, ":\n", indentation, " raise ValueError(\n",
indentation, " \"List argument '", arg_api_name, "' to '",
op_name_, "' Op with length %d \"\n", indentation,
" \"must match length %d of argument '",
inferred_attrs_[attr.name()], "'.\" %\n", indentation,
" (len(", arg_api_name, "), ", attr_var, "))\n");
}
}
}
}
}
for (int i = 0, end = attrs_.size(); i < end; ++i) {
const string& attr_name = attrs_[i];
const auto& param = param_names_[i + op_def_.input_arg_size()];
const auto& attr = *FindAttr(attr_name, op_def_);
const string& attr_api_name = param.GetRenameTo();
StringPiece attr_type = attr.type();
attr_expressions_[attr_name] = attr_api_name;
const int default_index = i - (attrs_.size() - params_with_default_.size());
if (default_index >= 0) {
const string& default_value = params_with_default_[default_index].second;
strings::StrAppend(function_setup, indentation, "if ", attr_api_name,
" is None:\n");
strings::StrAppend(function_setup, indentation, " ", attr_api_name,
" = ", default_value, "\n");
}
if (absl::StartsWith(attr_type, "list(")) {
ExpectListArg(indentation, attr_api_name, function_setup);
}
if (attr_type == "string") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = _execute.make_str(", attr_api_name, ", \"",
attr_api_name, "\")\n");
} else if (attr_type == "list(string)") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = [_execute.make_str(_s, \"", attr_api_name,
"\") for _s in ", attr_api_name, "]\n");
} else if (attr_type == "int") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = _execute.make_int(", attr_api_name, ", \"",
attr_api_name, "\")\n");
} else if (attr_type == "list(int)") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = [_execute.make_int(_i, \"", attr_api_name,
"\") for _i in ", attr_api_name, "]\n");
} else if (attr_type == "float") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = _execute.make_float(", attr_api_name, ", \"",
attr_api_name, "\")\n");
} else if (attr_type == "list(float)") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = [_execute.make_float(_f, \"", attr_api_name,
"\") for _f in ", attr_api_name, "]\n");
} else if (attr_type == "bool") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = _execute.make_bool(", attr_api_name, ", \"",
attr_api_name, "\")\n");
} else if (attr_type == "list(bool)") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = [_execute.make_bool(_b, \"", attr_api_name,
"\") for _b in ", attr_api_name, "]\n");
} else if (attr_type == "type") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = _execute.make_type(", attr_api_name, ", \"",
attr_api_name, "\")\n");
} else if (attr_type == "list(type)") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = [_execute.make_type(_t, \"", attr_api_name,
"\") for _t in ", attr_api_name, "]\n");
} else if (attr_type == "shape") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = _execute.make_shape(", attr_api_name, ", \"",
attr_api_name, "\")\n");
} else if (attr_type == "list(shape)") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = [_execute.make_shape(_s, \"", attr_api_name,
"\") for _s in ", attr_api_name, "]\n");
} else if (attr_type == "tensor") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = _execute.make_tensor(", attr_api_name, ", \"",
attr_api_name, "\")\n");
} else if (attr_type == "list(tensor)") {
strings::StrAppend(function_setup, indentation, attr_api_name,
" = [_execute.make_tensor(_t, \"", attr_api_name,
"\") for _t in ", attr_api_name, "]\n");
} else if (attr_type != "func" && attr_type != "list(func)") {
*function_setup =
strings::StrCat("# No definition for ", function_name_,
" since we don't support attrs with type\n"
"# '",
attr_type, "' right now.\n\n");
return false;
}
}
return true;
}
void GenPythonOp::GetOutputSizesAndNumOutputsExpr(
std::vector<string>* output_sizes, string* num_outputs_expr) {
int num_fixed_outputs = 0;
for (int i = 0; i < num_outs_; ++i) {
const auto& arg(op_def_.output_arg(i));
if (!arg.number_attr().empty()) {
if (!num_outputs_expr->empty()) {
strings::StrAppend(num_outputs_expr, " + ");
}
(*output_sizes)[i] = attr_expressions_[arg.number_attr()];
strings::StrAppend(num_outputs_expr, (*output_sizes)[i]);
} else if (!arg.type_list_attr().empty()) {
if (!num_outputs_expr->empty()) {
strings::StrAppend(num_outputs_expr, " + ");
}
const auto iter = inferred_attrs_.find(arg.type_list_attr());
if (iter == inferred_attrs_.end()) {
(*output_sizes)[i] = strings::StrCat(
"len(", attr_expressions_[arg.type_list_attr()], ")");
} else {
(*output_sizes)[i] = strings::StrCat("len(", iter->second, ")");
}
strings::StrAppend(num_outputs_expr, (*output_sizes)[i]);
} else {
++num_fixed_outputs;
}
}
if (num_fixed_outputs > 0) {
if (!num_outputs_expr->empty()) {
strings::StrAppend(num_outputs_expr, " + ");
}
strings::StrAppend(num_outputs_expr, num_fixed_outputs);
} else if (num_outputs_expr->empty()) {
*num_outputs_expr = "0";
}
}
void GenPythonOp::AddEagerFunctionTeardown(
const string& indentation, const std::vector<string>& output_sizes,
bool execute_record_gradient) {
if (num_outs_ > 0) {
if (execute_record_gradient) {
strings::StrAppend(&result_, indentation,
"if _execute.must_record_gradient():\n");
strings::StrAppend(&result_, indentation, " _execute.record_gradient(\n",
" \"", op_def_.name(),
"\", _inputs_flat, _attrs, _result)\n");
}
if (num_outs_ == 1 && !output_sizes[0].empty()) {
} else if (num_outs_ == 1) {
strings::StrAppend(&result_, indentation, "_result, = _result\n");
} else {
Unflatten(indentation, output_sizes, "_result", &result_);
strings::StrAppend(&result_, indentation, "_result = _",
AvoidPythonReserved(op_def_.name()),
"Output._make(_result)\n");
}
} else {
strings::StrAppend(&result_, indentation, "_result = None\n");
}
strings::StrAppend(&result_, indentation, "return _result\n\n");
}
bool GenPythonOp::AddEagerFastPathAndGraphCode(
const string& parameters, const std::vector<string>& output_sizes,
const string& eager_not_allowed_error,
const std::unordered_map<string, string>& type_annotations) {
GenerateTypeVars(type_annotations);
if (api_def_.visibility() == ApiDef::VISIBLE) {
strings::StrAppend(&result_, "@_dispatch.add_fallback_dispatch_list\n");
strings::StrAppend(&result_, "@_dispatch.add_type_based_api_dispatcher\n");
}
AddExport();
if (annotator_ != nullptr) {
def_offset_start_ = result_.length() + 4;
}
AddDefLine(function_name_, parameters);
AddReturnTypeAnnotation(type_annotations);
AddDocStringDescription();
AddDocStringArgs();
AddDocStringInputs();
AddDocStringAttrs();
AddDocStringNameArg();
AddOutputGlobals();
AddDocStringOutputs();
strings::StrAppend(&result_, " \"\"\"\n");
strings::StrAppend(&result_,
" _ctx = _context._context or _context.context()\n"
" tld = _ctx._thread_local_data\n",
" if tld.is_eager:", "\n");
if (eager_not_allowed_error.empty()) {
AddEagerFastPathExecute();
} else {
strings::StrAppend(&result_, " ", eager_not_allowed_error);
}
string function_setup;
if (!GetEagerFunctionSetup(" ", &function_setup)) {
result_ = function_setup;
return false;
}
HandleGraphMode(function_setup, output_sizes);
AddRawOpExport(parameters);
AddTypeBasedDispatcherAlias();
strings::StrAppend(&result_, "\n\n");
return true;
}
bool GenPythonOp::AddEagerFallbackCode(
const string& parameters, const std::vector<string>& output_sizes,
const string& num_outputs_expr, const string& eager_not_allowed_error,
const std::unordered_map<string, string>& type_annotations) {
AddDefLine(
strings::StrCat(function_name_, kEagerFallbackSuffix),
strings::StrCat(parameters, parameters.empty() ? "" : ", ", "ctx"));
AddReturnTypeAnnotation(type_annotations);
if (!eager_not_allowed_error.empty()) {
strings::StrAppend(&result_, " ", eager_not_allowed_error);
return true;
}
string function_setup;
if (!GetEagerFunctionSetup(" ", &function_setup)) {
result_ = function_setup;
return false;
}
strings::StrAppend(&result_, function_setup);
AddEagerInferredAttrs(" ");
AddEagerInputCasts(" ");
strings::StrAppend(
&result_, " _inputs_flat = ", FlattenInputs(nullptr, nullptr), "\n");
AddEagerAttrs(" ");
AddEagerExecute(" ", num_outputs_expr);
AddEagerFunctionTeardown(" ", output_sizes,
true );
return true;
}
void GenPythonOp::AddEagerFastPathExecute() {
string fastpath_execute_params =
strings::StrCat("_ctx, \"", op_def_.name(), "\", ", "name");
string fallback_params;
for (int i = 0; i < api_def_.in_arg_size(); i++) {
const string param_name = param_names_[i].GetRenameTo();
strings::StrAppend(&fastpath_execute_params, ", ", param_name);
if (!fallback_params.empty()) strings::StrAppend(&fallback_params, ", ");
strings::StrAppend(&fallback_params, param_name);
}
for (const auto& attr : api_def_.attr()) {
if (inferred_attrs_.find(attr.name()) == inferred_attrs_.end()) {
strings::StrAppend(&fastpath_execute_params, ", \"", attr.name(), "\", ",
attr.rename_to());
if (!fallback_params.empty()) strings::StrAppend(&fallback_params, ", ");
strings::StrAppend(&fallback_params, attr.rename_to(), "=",
attr.rename_to());
}
}
if (!fallback_params.empty()) strings::StrAppend(&fallback_params, ", ");
strings::StrAppend(&fallback_params, "name=name");
strings::StrAppend(&result_, " try:\n");
strings::StrAppend(
&result_, " ", "_result = pywrap_tfe.TFE_Py_FastPathExecute(\n",
WordWrap(strings::StrCat(" "),
strings::StrCat(fastpath_execute_params, ")"), kRightMargin),
"\n");
if (op_def_.output_arg_size() > 1) {
const string output_tuple_name =
strings::StrCat("_", AvoidPythonReserved(op_def_.name()), "Output");
strings::StrAppend(&result_, " ", "_result = ", output_tuple_name,
"._make(_result)\n");
}
strings::StrAppend(&result_, " ", "return _result\n");
if (!fallback_params.empty()) strings::StrAppend(&fallback_params, ", ");
strings::StrAppend(&fallback_params, "ctx=_ctx");
strings::StrAppend(&result_, " ",
"except _core._NotOkStatusException as e:\n");
strings::StrAppend(&result_, " ",
"_ops.raise_from_not_ok_status(e, name)\n");
strings::StrAppend(&result_, " ", "except _core._FallbackException:\n");
strings::StrAppend(&result_, " pass\n");
strings::StrAppend(&result_, " try:\n");
AddTypeBasedDispatch(" ");
strings::StrAppend(
&result_, " ", "return ", function_name_, kEagerFallbackSuffix,
"(\n",
WordWrap(strings::StrCat(" "),
strings::StrCat(fallback_params, ")"), kRightMargin),
"\n");
strings::StrAppend(&result_, " except _core._SymbolicException:\n");
strings::StrAppend(&result_,
" pass # Add nodes to the TensorFlow graph.\n");
AddFallbackDispatch(" ");
}
void GenPythonOp::AddEagerInferredAttrs(const string& indentation) {
for (int i = 0; i < op_def_.attr_size(); ++i) {
const auto& attr(op_def_.attr(i));
const auto& api_def_attr(api_def_.attr(i));
auto arg_list = attr_to_args_.find(attr.name());
if (arg_list != attr_to_args_.end()) {
if (attr.type() == "type") {
std::vector<string> output_sizes;
const string flattened =
FlattenInputs(&arg_list->second, &output_sizes);
string conversion = strings::StrCat("_execute.args_to_matching_eager(",
flattened, ", ctx");
strings::StrAppend(&conversion, ", [");
for (int t : attr.allowed_values().list().type()) {
DataType dtype = static_cast<DataType>(t);
const string py_dtype = DataTypeToPython(dtype, "_dtypes.");
strings::StrAppend(&conversion, py_dtype, ", ");
}
strings::StrAppend(&conversion, "]");
if (attr.has_default_value()) {
strings::StrAppend(
&conversion, ", ",
AttrValueToPython(attr.type(), api_def_attr.default_value(),
"_dtypes."));
}
strings::StrAppend(&conversion, ")");
const string var_name = AttrVarName(attr.name(), &attr_expressions_);
if (output_sizes.size() == 1) {
const string inputs_var =
param_names_[arg_list->second.front()].GetRenameTo();
if (output_sizes.front().empty()) {
strings::StrAppend(&result_, indentation, var_name, ", (",
inputs_var, ",) = ", conversion, "\n");
} else {
strings::StrAppend(&result_, indentation, var_name, ", ",
inputs_var, " = ", conversion, "\n");
}
} else {
const string inputs_var = strings::StrCat("_inputs_", attr.name());
strings::StrAppend(&result_, indentation, var_name, ", ", inputs_var,
" = ", conversion, "\n");
Unflatten(indentation, output_sizes, inputs_var, &result_);
std::vector<string> p;
for (int j : arg_list->second) {
p.emplace_back(param_names_[j].GetRenameTo());
}
strings::StrAppend(&result_, indentation, VectorToTuple(p), " = ",
inputs_var, "\n");
}
} else if (attr.type() == "list(type)") {
const string var_name = AttrVarName(attr.name(), &attr_expressions_);
string inputs_var;
string conversion;
if (arg_list->second.size() > 1) {
std::vector<string> lists;
for (auto iter = arg_list->second.begin();
iter != arg_list->second.end(); ++iter) {
lists.push_back(param_names_[*iter].GetRenameTo());
}
inputs_var = VectorToTuple(lists);
conversion = "_execute.args_to_mixed_eager_tensors";
} else {
inputs_var = param_names_[arg_list->second.front()].GetRenameTo();
conversion = "_execute.convert_to_mixed_eager_tensors";
}
strings::StrAppend(&result_, indentation, var_name, ", ", inputs_var,
" = ", conversion, "(", inputs_var, ", ctx)\n");
}
}
}
}
void GenPythonOp::AddEagerInputCasts(const string& indentation) {
for (int i = 0; i < op_def_.input_arg_size(); ++i) {
const auto& arg(op_def_.input_arg(i));
if (!arg.type_attr().empty() || !arg.type_list_attr().empty()) continue;
const string& param = param_names_[i].GetRenameTo();
const string fn = arg.number_attr().empty() ? "" : "n_";
const string dtype = DataTypeToPython(arg.type(), "_dtypes.");
strings::StrAppend(&result_, indentation, param, " = _ops.convert_", fn,
"to_tensor(", param, ", ", dtype, ")\n");
}
}
void GenPythonOp::AddEagerAttrs(const string& indentation) {
if (op_def_.attr_size() > 0) {
string attr_values;
for (int i = 0; i < op_def_.attr_size(); ++i) {
if (i > 0) strings::StrAppend(&attr_values, ", ");
const auto& attr_name(op_def_.attr(i).name());
strings::StrAppend(&attr_values, "\"", attr_name, "\", ",
attr_expressions_[attr_name]);
}
strings::StrAppend(&attr_values, ")");
strings::StrAppend(
&result_,
WordWrap(indentation, strings::StrCat("_attrs = (", attr_values),
kRightMargin),
"\n");
} else {
strings::StrAppend(&result_, indentation, "_attrs = None\n");
}
}
void GenPythonOp::AddEagerExecute(const string& indentation,
const string& num_outputs_expr) {
const string return_prefix =
strings::StrCat(indentation, "_result = _execute.execute(");
const string return_args = strings::StrCat(
"b\"", op_def_.name(), "\", ", num_outputs_expr,
", inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name)");
strings::StrAppend(&result_,
WordWrap(return_prefix, return_args, kRightMargin), "\n");
}
void GenPythonOp::AddFallbackDispatch(const string& prefix) {
if (api_def_.visibility() != ApiDef::VISIBLE) return;
strings::StrAppend(&result_, prefix, "except (TypeError, ValueError):\n");
strings::StrAppend(&result_, prefix, " _result = _dispatch.dispatch(\n");
AddBodyNoReturn(strings::StrCat(prefix, " ", function_name_,
", "
"(), dict("));
strings::StrAppend(&result_, prefix, " )\n");
strings::StrAppend(&result_, prefix,
" if _result is not "
"_dispatch.OpDispatcher.NOT_SUPPORTED:\n");
strings::StrAppend(&result_, prefix, " return _result\n");
strings::StrAppend(&result_, prefix, " raise\n");
}
void GenPythonOp::AddTypeBasedDispatcherAlias() {
if (api_def_.visibility() == ApiDef::VISIBLE) {
strings::StrAppend(&result_, "_dispatcher_for_", function_name_, " = ",
function_name_, "._tf_type_based_dispatcher.Dispatch\n");
}
}
void GenPythonOp::AddTypeBasedDispatch(const string& prefix) {
if (api_def_.visibility() != ApiDef::VISIBLE) return;
std::string args("(");
for (const auto& name : param_names_) {
strings::StrAppend(&args, name.GetRenameTo(), ", ");
}
strings::StrAppend(&args, "name,), None");
strings::StrAppend(
&result_, prefix, "_result = ", "_dispatcher_for_", function_name_, "(\n",
WordWrap(strings::StrCat(prefix, " "), args, kRightMargin), ")\n");
strings::StrAppend(&result_, prefix, "if _result is not NotImplemented:\n",
prefix, " return _result\n");
}
void GenPythonOp::AddRawOpExport(const string& parameters) {
const string raw_function_name = AvoidPythonReserved(op_def_.name());
strings::StrAppend(&result_, raw_function_name, " = tf_export(\"raw_ops.",
raw_function_name, "\")", "(_ops.to_raw_op(",
function_name_, "))\n");
}
string GetPythonOpsImpl(const OpList& ops, const ApiDefMap& api_defs,
const OpRegOffsets& op_reg_offsets,
absl::Span<const string> hidden_ops,
absl::Span<const string> source_file_list) {
python_op_gen_internal::GeneratedCodeAnnotator annotator;
bool annotate = !op_reg_offsets.offsets().empty();
string result;
strings::StrAppend(&result, R"("""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
)");
if (!source_file_list.empty()) {
strings::StrAppend(&result, "Original C++ source file: ");
strings::StrAppend(&result, absl::StrJoin(source_file_list, ", "));
strings::StrAppend(&result, "\n");
}
strings::StrAppend(&result, R"("""
import collections
from tensorflow.python import pywrap_tfe as pywrap_tfe
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.security.fuzzing.py import annotation_types as _atypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from typing import TypeVar, List, Any
from typing_extensions import Annotated
)");
for (const auto& op_def : ops.op()) {
const auto* api_def = api_defs.GetApiDef(op_def.name());
if (api_def->visibility() == ApiDef::SKIP) {
continue;
}
bool is_hidden = api_def->visibility() == ApiDef::HIDDEN;
bool hidden_by_api_def = is_hidden;
if (!is_hidden) {
for (const string& hidden : hidden_ops) {
if (op_def.name() == hidden) {
is_hidden = true;
break;
}
}
}
string function_name;
GenerateLowerCaseOpName(op_def.name(), &function_name);
bool is_reserved = IsPythonReserved(function_name);
if (is_hidden) {
if (!hidden_by_api_def || is_reserved ||
IsOpWithUnderscorePrefix(function_name)) {
function_name = strings::StrCat("_", function_name);
}
} else if (is_reserved) {
continue;
}
if (annotate) {
annotator.SetBase(result.length());
}
strings::StrAppend(&result,
GetEagerPythonOp(op_def, *api_def, function_name,
annotate ? &annotator : nullptr));
}
if (annotate) {
annotator.FillSourceOffsets(op_reg_offsets);
strings::StrAppend(&result, annotator.BuildKytheMetadata());
}
return result;
}
}
string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs,
const OpRegOffsets& op_reg_offsets,
absl::Span<const string> hidden_ops,
absl::Span<const string> source_file_list) {
return GetPythonOpsImpl(ops, api_defs, op_reg_offsets, hidden_ops,
source_file_list);
}
void PrintPythonOps(const OpList& ops, const ApiDefMap& api_defs,
const OpRegOffsets& op_reg_offsets,
absl::Span<const string> hidden_ops,
absl::Span<const string> source_file_list) {
printf("%s", GetPythonOpsImpl(ops, api_defs, op_reg_offsets, hidden_ops,
source_file_list)
.c_str());
}
string GetPythonWrappers(const char* op_list_buf, size_t op_list_len) {
OpList ops;
ops.ParseFromArray(op_list_buf, op_list_len);
ApiDefMap api_def_map(ops);
return GetPythonOpsImpl(ops, api_def_map, OpRegOffsets(), {}, {});
}
string GetSingleTensorArgAnnotation(
const OpDef::ArgDef& arg,
const std::unordered_map<string, string>& type_annotations) {
if (!arg.type_attr().empty()) {
return type_annotations.at(arg.type_attr());
} else {
const string py_dtype = DataTypeToPython(arg.type(), "_dtypes.");
return dtype_type.at(py_dtype);
}
}
string GetArgAnnotation(
const OpDef::ArgDef& arg,
const std::unordered_map<string, string>& type_annotations) {
if (!arg.number_attr().empty()) {
return strings::StrCat("Annotated[List[Any], ",
GetSingleTensorArgAnnotation(arg, type_annotations),
"]");
}
return strings::StrCat("Annotated[Any, ",
GetSingleTensorArgAnnotation(arg, type_annotations),
"]");
}
} | #include "tensorflow/python/framework/python_op_gen.h"
#include <unordered_set>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/python/framework/kythe_metadata.pb.h"
#include "tensorflow/python/framework/op_reg_offset.pb.h"
namespace tensorflow {
namespace {
void ExpectHasSubstr(const string& s, const string& expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'Generated ops "
<< " does not contain '" << expected << "'";
}
void ExpectDoesNotHaveSubstr(const string& s, const string& expected) {
EXPECT_FALSE(absl::StrContains(s, expected))
<< "'Generated ops contains '" << expected << "'";
}
void ExpectSubstrOrder(const string& s, const string& before,
const string& after) {
int before_pos = s.find(before);
int after_pos = s.find(after);
ASSERT_NE(std::string::npos, before_pos);
ASSERT_NE(std::string::npos, after_pos);
EXPECT_LT(before_pos, after_pos) << before << "' is not before '" << after;
}
TEST(PythonOpGen, TypeAnnotateAllOps) {
OpList ops;
OpRegistry::Global()->Export(false, &ops);
ApiDefMap api_def_map(ops);
string code =
GetPythonOps(ops, api_def_map, OpRegOffsets(), {},
{});
const string all_types =
", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, "
"_atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, "
"_atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, "
"_atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, "
"_atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, "
"_atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, "
"_atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, "
"_atypes.Variant)";
const string fake_param_typevar =
"TV_FakeParam_dtype = TypeVar(\"TV_FakeParam_dtype\"" + all_types;
const string fake_param =
"def fake_param_eager_fallback(dtype: TV_FakeParam_dtype, shape, name, "
"ctx) -> Annotated[Any, TV_FakeParam_dtype]:";
const string fake_param_fallback =
"def fake_param_eager_fallback(dtype: TV_FakeParam_dtype, shape, name, "
"ctx) -> Annotated[Any, TV_FakeParam_dtype]:";
ExpectHasSubstr(code, fake_param_typevar);
ExpectHasSubstr(code, fake_param);
ExpectHasSubstr(code, fake_param_fallback);
const string to_bool_typevar =
"TV_ToBool_T = TypeVar(\"TV_ToBool_T\"" + all_types;
const string to_bool_ =
"def to_bool(input: Annotated[Any, TV_ToBool_T], "
"name=None) -> "
"Annotated[Any, _atypes.Bool]:";
const string to_bool_fallback =
"def to_bool_eager_fallback(input: "
"Annotated[Any, TV_ToBool_T], name, ctx) "
"-> Annotated[Any, _atypes.Bool]:";
ExpectHasSubstr(code, to_bool_typevar);
ExpectHasSubstr(code, to_bool_);
ExpectHasSubstr(code, to_bool_fallback);
}
TEST(PythonOpGen, TypeAnnotateSingleTypeTensor) {
constexpr char kBaseOpDef[] = R"(
op {
name: "Bar"
input_arg {
name: "x"
type: DT_STRING
}
input_arg {
name: "y"
type: DT_QINT8
}
output_arg {
name: "output"
type: DT_BOOL
}
summary: "Summary for op Bar."
description: "Description for op Bar."
}
)";
OpList op_defs;
OpRegistry::Global()->Export(false, &op_defs);
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string code =
GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {},
{});
const string typed_bar =
"def bar(x: Annotated[Any, _atypes.String], y: "
"Annotated[Any, _atypes.QInt8], "
"name=None) -> Annotated[Any, _atypes.Bool]:";
ExpectHasSubstr(code, typed_bar);
const string untyped_bar = "def bar(x, y, name=None):";
ExpectDoesNotHaveSubstr(code, untyped_bar);
}
TEST(PythonOpGen, TypeAnnotateMultiTypeTensor) {
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "x"
type_attr: "T"
}
input_arg {
name: "y"
type_attr: "T2"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
}
attr {
name: "T2"
type: "type"
allowed_values {
list {
type: DT_STRING
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
OpList op_defs;
OpRegistry::Global()->Export(false, &op_defs);
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string code =
GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {},
{});
const string typed_foo =
"def foo(x: Annotated[Any, TV_Foo_T], y: "
"Annotated[Any, TV_Foo_T2], name=None) "
"-> Annotated[Any, TV_Foo_T]:";
ExpectHasSubstr(code, typed_foo);
}
TEST(PythonOpGen, GenerateCorrectTypeVars) {
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "x"
type_attr: "T"
}
input_arg {
name: "y"
type_attr: "T2"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
}
attr {
name: "T2"
type: "type"
allowed_values {
list {
type: DT_STRING
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
OpList op_defs;
OpRegistry::Global()->Export(false, &op_defs);
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string code =
GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {},
{});
const string typevars_foo = R"(
TV_Foo_T = TypeVar("TV_Foo_T", _atypes.Int8, _atypes.UInt8)
TV_Foo_T2 = TypeVar("TV_Foo_T2", _atypes.Float32, _atypes.Float64, _atypes.String)
)";
ExpectHasSubstr(code, typevars_foo);
}
TEST(PythonOpGen, TypeAnnotateFallback) {
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "x"
type_attr: "T"
}
input_arg {
name: "y"
type_attr: "T2"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
}
attr {
name: "T2"
type: "type"
allowed_values {
list {
type: DT_STRING
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
OpList op_defs;
OpRegistry::Global()->Export(false, &op_defs);
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string code =
GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {},
{});
const string typed_foo_fallback =
"def foo_eager_fallback(x: Annotated[Any, TV_Foo_T], y: "
"Annotated[Any, TV_Foo_T2], name, ctx) -> "
"Annotated[Any, TV_Foo_T]:";
ExpectHasSubstr(code, typed_foo_fallback);
}
TEST(PythonOpGen, GenerateTypeVarAboveOp) {
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "x"
type_attr: "T"
}
input_arg {
name: "y"
type_attr: "T2"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
}
attr {
name: "T2"
type: "type"
allowed_values {
list {
type: DT_STRING
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
OpList op_defs;
OpRegistry::Global()->Export(false, &op_defs);
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string code =
GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {},
{});
const string typevar_foo = "TV_Foo_";
const string def_foo = "def foo";
ExpectSubstrOrder(code, typevar_foo, def_foo);
}
TEST(PythonOpGen, TypeAnnotateDefaultParams) {
constexpr char kBaseOpDef[] = R"(
op {
name: "FooBar"
input_arg {
name: "x"
type: DT_FLOAT
}
output_arg {
name: "output"
type: DT_BOOL
}
attr {
name: "t"
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_INT8
}
}
}
attr {
name: "var1"
type: "bool"
default_value {
b: false
}
}
attr {
name: "var2"
type: "int"
default_value {
i: 0
}
}
summary: "Summary for op FooBar."
description: "Description for op FooBar."
}
)";
OpList op_defs;
OpRegistry::Global()->Export(false, &op_defs);
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string code =
GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {},
{});
const string params =
"def foo_bar(x: Annotated[Any, _atypes.Float32], t: "
"TV_FooBar_t, "
"var1:bool=False, var2:int=0, name=None)";
const string params_fallback =
"def foo_bar_eager_fallback(x: "
"Annotated[Any, _atypes.Float32], t: "
"TV_FooBar_t, var1: bool, var2: int, name, ctx)";
ExpectHasSubstr(code, params);
ExpectHasSubstr(code, params_fallback);
}
TEST(PythonOpGen, NoTypingSequenceTensors) {
constexpr char kBaseOpDef[] = R"(
op {
name: "Baz"
input_arg {
name: "inputs"
number_attr: "N"
type_list_attr: "T"
}
output_arg {
name: "output1"
type: DT_BOOL
}
output_arg {
name: "output2"
type: DT_BOOL
}
attr {
name: "T"
type: "bool"
}
attr {
name: "N"
type: "int"
}
summary: "Summary for op Baz."
description: "Description for op Baz."
}
)";
OpList op_defs;
OpRegistry::Global()->Export(false, &op_defs);
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string code =
GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {},
{});
const string baz_def_line = "def baz(inputs, name=None):";
ExpectHasSubstr(code, baz_def_line);
}
TEST(PythonOpGen, InsertCommentsForSourceFileLocation) {
std::vector<string> source_file_list{"some_ops.cc", "another_ops.cc"};
OpList op_defs;
ApiDefMap api_def_map(op_defs);
string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(),
{}, source_file_list);
ExpectHasSubstr(code,
"Original C++ source file: some_ops.cc, another_ops.cc");
}
GeneratedCodeInfo DecodeAnnotation(string anno) {
std::vector<string> sp = absl::StrSplit(anno, ':');
string gci_str;
absl::Base64Unescape(sp[1], &gci_str);
GeneratedCodeInfo gci;
gci.ParseFromString(gci_str);
return gci;
}
TEST(PythonOpGen, GenerateMetadataWhenOpRegOffsetsIsPresent) {
constexpr char kBaseOpDef[] = R"(
op {
name: "Baz"
}
)";
OpList op_defs;
OpRegistry::Global()->Export(false, &op_defs);
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
OpRegOffsets offsets;
auto* offset = offsets.add_offsets();
offset->set_name("Baz");
offset->set_filepath("some_ops.cc");
offset->set_start(0);
offset->set_end(3);
string code = GetPythonOps(op_defs, api_def_map, offsets, {}, {});
int target_begin = code.find(absl::StrCat("def baz")) + 4;
int target_end = target_begin + 3;
std::vector<string> sp = absl::StrSplit(code, '\n');
string last_line = sp.back();
ASSERT_TRUE(absl::StrContains(last_line,
"# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo gci = DecodeAnnotation(last_line);
EXPECT_EQ(gci.meta_size(), 1);
EXPECT_EQ(gci.meta(0).source_begin(), 0);
EXPECT_EQ(gci.meta(0).source_end(), 3);
EXPECT_EQ(gci.meta(0).target_begin(), target_begin);
EXPECT_EQ(gci.meta(0).target_end(), target_end);
}
TEST(PythonOpGen, GenerateMetadataForMultipleOutputOp) {
constexpr char kBaseOpDef[] = R"(
op {
name: "Baz"
output_arg {
name: "output1"
type: DT_BOOL
}
output_arg {
name: "output2"
type: DT_BOOL
}
}
)";
OpList op_defs;
OpRegistry::Global()->Export(false, &op_defs);
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
OpRegOffsets offsets;
auto* offset = offsets.add_offsets();
offset->set_name("Baz");
offset->set_filepath("some_ops.cc");
offset->set_start(0);
offset->set_end(3);
string code = GetPythonOps(op_defs, api_def_map, offsets, {}, {});
int target_begin = code.find(absl::StrCat("def baz")) + 4;
int target_end = target_begin + 3;
std::vector<string> sp = absl::StrSplit(code, '\n');
string last_line = sp.back();
ASSERT_TRUE(absl::StrContains(last_line,
"# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo gci = DecodeAnnotation(last_line);
EXPECT_EQ(gci.meta_size(), 1);
EXPECT_EQ(gci.meta(0).source_begin(), 0);
EXPECT_EQ(gci.meta(0).source_end(), 3);
EXPECT_EQ(gci.meta(0).target_begin(), target_begin);
EXPECT_EQ(gci.meta(0).target_end(), target_end);
}
TEST(PythonOpGen, NotGenerateMetadataWhenOpRegOffsetsIsEmpty) {
OpList op_defs;
ApiDefMap api_def_map(op_defs);
string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, {});
ExpectDoesNotHaveSubstr(code, "# kythe.proto.metadata.GeneratedCodeInfo:");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0a1cf107-125e-40dd-85f7-591c64563636 | cpp | tensorflow/tensorflow | python_op_gen_annotator | tensorflow/python/framework/python_op_gen_annotator.cc | tensorflow/python/framework/python_op_gen_annotator_test.cc | #include "tensorflow/python/framework/python_op_gen_annotator.h"
#include <cstdint>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/python/framework/kythe_metadata.pb.h"
#include "tensorflow/python/framework/op_reg_offset.pb.h"
namespace tensorflow {
namespace python_op_gen_internal {
void GeneratedCodeAnnotator::AddAnnotation(const OpDef& op_def,
absl::string_view function_name,
uint32_t offset_start) {
const uint32_t start_byte = base_pos_ + offset_start;
const uint32_t end_byte = start_byte + function_name.size();
byte_offsets_map_[op_def.name()].generated_start = start_byte;
byte_offsets_map_[op_def.name()].generated_end = end_byte;
}
void GeneratedCodeAnnotator::FillSourceOffsets(
const OpRegOffsets& op_reg_offsets) {
for (const OpRegOffset& offset : op_reg_offsets.offsets()) {
if (byte_offsets_map_.find(offset.name()) != byte_offsets_map_.end()) {
byte_offsets_map_[offset.name()].file_path = offset.filepath();
byte_offsets_map_[offset.name()].source_start = offset.start();
byte_offsets_map_[offset.name()].source_end = offset.end();
}
}
}
string GeneratedCodeAnnotator::BuildKytheMetadata() {
GeneratedCodeInfo generated_code_info;
generated_code_info.set_type(GeneratedCodeInfo::KYTHE0);
for (const auto& [name, offsets] : byte_offsets_map_) {
if (offsets.file_path.empty()) {
continue;
}
MappingRule* meta = generated_code_info.add_meta();
meta->set_type(MappingRule::ANCHOR_ANCHOR);
meta->set_edge("/kythe/edge/imputes");
meta->set_source_begin(offsets.source_start);
meta->set_source_end(offsets.source_end);
meta->set_target_begin(offsets.generated_start);
meta->set_target_end(offsets.generated_end);
VName* vname = meta->mutable_source_vname();
vname->set_signature(absl::StrFormat(
"@%d:%d@tensorflow_op#%s#%s#%s", offsets.source_start,
offsets.source_end, name, kKytheCorpus, offsets.file_path));
vname->set_corpus(std::string(kKytheCorpus));
vname->set_path(offsets.file_path);
vname->set_language("c++");
}
return "# kythe.proto.metadata.GeneratedCodeInfo:" +
absl::Base64Escape(generated_code_info.SerializeAsString());
}
}
} | #include "tensorflow/python/framework/python_op_gen_annotator.h"
#include <utility>
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/python/framework/kythe_metadata.pb.h"
namespace tensorflow {
namespace python_op_gen_internal {
namespace {
using ::testing::StartsWith;
GeneratedCodeInfo ParseMetadata(string metadata) {
GeneratedCodeInfo generated_code_info;
std::pair<string, string> p = absl::StrSplit(metadata, ':');
string serialized_generated_code_info;
absl::Base64Unescape(p.second, &serialized_generated_code_info);
generated_code_info.ParseFromString(serialized_generated_code_info);
return generated_code_info;
}
TEST(PythonOpGenAnnotatorTest, AddAnnotationWithoutSourceOffsets) {
GeneratedCodeAnnotator annotator;
OpDef fakeOpDef;
fakeOpDef.set_name("fake_op");
annotator.AddAnnotation(fakeOpDef, "fake_op", 0);
string meta = annotator.BuildKytheMetadata();
ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo actual = ParseMetadata(meta);
GeneratedCodeInfo expected;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString("type: KYTHE0", &expected));
EXPECT_EQ(actual.SerializeAsString(), expected.SerializeAsString());
}
TEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsets) {
GeneratedCodeAnnotator annotator;
OpDef fakeOpDef;
fakeOpDef.set_name("fake_op");
OpRegOffsets fakeOffsets;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
R"pb(
offsets {
name: "fake_op",
filepath: "file/path/to/fake_op.cc",
start: 7,
end: 11,
}
)pb",
&fakeOffsets));
annotator.AddAnnotation(fakeOpDef, "fake_op", 100);
annotator.FillSourceOffsets(fakeOffsets);
string meta = annotator.BuildKytheMetadata();
ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo actual = ParseMetadata(meta);
EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR);
EXPECT_EQ(actual.meta(0).edge(), "/kythe/edge/imputes");
EXPECT_EQ(
actual.meta(0).source_vname().signature(),
absl::StrFormat("@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc",
kKytheCorpus));
EXPECT_EQ(actual.meta(0).source_vname().path(), "file/path/to/fake_op.cc");
EXPECT_EQ(actual.meta(0).source_begin(), 7);
EXPECT_EQ(actual.meta(0).source_end(), 11);
EXPECT_EQ(actual.meta(0).target_begin(), 100);
EXPECT_EQ(actual.meta(0).target_end(), 107);
}
TEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsetsAndNonZeroBase) {
GeneratedCodeAnnotator annotator;
OpDef fakeOpDef;
fakeOpDef.set_name("fake_op");
OpRegOffsets fakeOffsets;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
R"pb(
offsets {
name: "fake_op",
filepath: "file/path/to/fake_op.cc",
start: 7,
end: 11,
}
)pb",
&fakeOffsets));
annotator.SetBase(10);
annotator.AddAnnotation(fakeOpDef, "fake_op", 100);
annotator.FillSourceOffsets(fakeOffsets);
string meta = annotator.BuildKytheMetadata();
ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo actual = ParseMetadata(meta);
EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR);
EXPECT_EQ(actual.meta(0).edge(), "/kythe/edge/imputes");
EXPECT_EQ(
actual.meta(0).source_vname().signature(),
absl::StrFormat("@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc",
kKytheCorpus));
EXPECT_EQ(actual.meta(0).source_vname().path(), "file/path/to/fake_op.cc");
EXPECT_EQ(actual.meta(0).source_begin(), 7);
EXPECT_EQ(actual.meta(0).source_end(), 11);
EXPECT_EQ(actual.meta(0).target_begin(), 110);
EXPECT_EQ(actual.meta(0).target_end(), 117);
}
TEST(PythonOpGenAnnotatorTest, AddMultipleAnnotation) {
GeneratedCodeAnnotator annotator;
OpDef fakeOpDef;
OpRegOffsets fakeOffsets;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
R"pb(
offsets {
name: "fake_op_1",
filepath: "file/path/to/fake_op.cc",
start: 7,
end: 11,
}
offsets {
name: "fake_op_2",
filepath: "file/path/to/fake_op.cc",
start: 101,
end: 103,
}
)pb",
&fakeOffsets));
fakeOpDef.set_name("fake_op_1");
annotator.AddAnnotation(fakeOpDef, "fake_op_1", 10);
fakeOpDef.set_name("fake_op_2");
annotator.AddAnnotation(fakeOpDef, "fake_op_2", 100);
annotator.FillSourceOffsets(fakeOffsets);
string meta = annotator.BuildKytheMetadata();
ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo actual = ParseMetadata(meta);
EXPECT_EQ(actual.meta_size(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen_annotator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen_annotator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5d262aa-e339-407f-8095-0640e0dd0715 | cpp | tensorflow/tensorflow | offset_counter_helper | tensorflow/python/framework/offset_counter_helper.cc | tensorflow/python/framework/offset_counter_helper_test.cc | #include "tensorflow/python/framework/offset_counter_helper.h"
#include <cstdint>
#include <fstream>
#include <string>
#include "absl/strings/string_view.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/regexp.h"
#include "tsl/platform/strcat.h"
namespace tensorflow {
absl::Status FindOpRegistationFromFile(absl::string_view filename,
OpRegOffsets& op_reg_offsets) {
static constexpr LazyRE2 reg_pattern = {
R"regex((REGISTER_OP)\("([\w>]+)"\))regex"};
std::ifstream f(std::string{filename});
if (f.bad()) {
return tsl::errors::IOError(
tsl::strings::StrCat("Cannot open file: ", filename), errno);
}
std::string line;
absl::string_view reg_keyword, op_name;
uint32_t offsets = 0;
while (std::getline(f, line)) {
if (RE2::PartialMatch(line, *reg_pattern, ®_keyword, &op_name)) {
uint32_t offset_start = offsets + (op_name.data() - line.data() - 1);
uint32_t offset_end = offset_start + op_name.size() + 2;
auto op_reg_offset = op_reg_offsets.add_offsets();
op_reg_offset->set_name(std::string{op_name});
op_reg_offset->set_filepath(std::string{filename});
op_reg_offset->set_start(offset_start);
op_reg_offset->set_end(offset_end);
}
offsets += line.size() + 1;
}
f.close();
return absl::OkStatus();
}
} | #include "tensorflow/python/framework/offset_counter_helper.h"
#include <string>
#include "absl/strings/str_format.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/python/framework/op_reg_offset.pb.h"
namespace tensorflow {
namespace {
TEST(OffsetCounterHelper, FindOpRegistationFromFile) {
std::string content = R"code(
REGISTER_OP("Test>Op1");
REGISTER_OP("Test>Op2")
.Input("input: int32")
.Output("output: int32");
)code";
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, content));
OpRegOffsets actual;
TF_CHECK_OK(FindOpRegistationFromFile(fname, actual));
EXPECT_EQ(actual.offsets(0).name(), "Test>Op1");
EXPECT_EQ(actual.offsets(0).filepath(), fname);
EXPECT_EQ(actual.offsets(0).start(), 13);
EXPECT_EQ(actual.offsets(0).end(), 23);
EXPECT_EQ(actual.offsets(1).name(), "Test>Op2");
EXPECT_EQ(actual.offsets(1).filepath(), fname);
EXPECT_EQ(actual.offsets(1).start(), 38);
EXPECT_EQ(actual.offsets(1).end(), 48);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/offset_counter_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/offset_counter_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a60fb0ae-01f5-4908-b4fc-335e70e7759f | cpp | tensorflow/tensorflow | unified_api | tensorflow/python/framework/experimental/unified_api.cc | tensorflow/c/eager/unified_api_test.cc | #include <pybind11/stl.h>
#include <memory>
#include "pybind11/pybind11.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_function.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/c_api_unified_experimental.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/eager/tfe_context_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/safe_ptr.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/python/eager/pywrap_tensor.h"
#include "tensorflow/python/lib/core/pybind11_lib.h"
#include "tensorflow/python/lib/core/pybind11_status.h"
#include "tensorflow/python/lib/core/safe_pyobject_ptr.h"
namespace py = pybind11;
using tensorflow::AbstractContext;
using tensorflow::AbstractContextPtr;
using tensorflow::AbstractFunction;
using tensorflow::AbstractOperation;
using tensorflow::AbstractOperationPtr;
using tensorflow::AbstractTensorHandle;
using tensorflow::AbstractTensorHandlePtr;
using tensorflow::OutputList;
using tensorflow::tracing::TracingContext;
using tensorflow::tracing::TracingOperation;
using tensorflow::tracing::TracingTensorHandle;
using tensorflow::ImmediateContextPtr;
using tensorflow::ImmediateExecutionContext;
using tensorflow::ImmediateExecutionTensorHandle;
using tensorflow::dyn_cast;
using tensorflow::isa;
using tensorflow::unwrap;
using tensorflow::wrap;
using tensorflow::DataType;
using tensorflow::make_safe;
using tensorflow::MaybeRaiseRegisteredFromStatus;
using tensorflow::MaybeRaiseRegisteredFromTFStatus;
using tensorflow::Pyo;
using tensorflow::Safe_TF_StatusPtr;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::TFE_TensorHandleToNumpy;
using tensorflow::core::RefCountPtr;
using tensorflow::errors::Internal;
using tensorflow::errors::InvalidArgument;
PYBIND11_MODULE(_unified_api, m) {
m.def("SetTracingImplementation", [](const char* impl) {
Safe_TF_StatusPtr status = make_safe(TF_NewStatus());
TF_SetTracingImplementation(impl, status.get());
MaybeRaiseRegisteredFromStatus(status->status);
});
m.def("NewTracingContext", [](const char* fn_name) {
Safe_TF_StatusPtr status = make_safe(TF_NewStatus());
auto* ctx = unwrap(TF_CreateFunction(fn_name, status.get()));
MaybeRaiseRegisteredFromTFStatus(status.get());
if (!ctx) {
MaybeRaiseRegisteredFromStatus(
Internal("TF_CreateFunction returned nullptr"));
}
if (!isa<TracingContext>(ctx)) {
MaybeRaiseRegisteredFromStatus(
Internal("TF_CreateFunction must return a TracingContext, found ",
ctx->getKind()));
}
return dyn_cast<TracingContext>(ctx);
});
m.def("EagerContextToImmediateExecutionContext", [](py::handle& obj) {
TFE_Context* ctx =
static_cast<TFE_Context*>(PyCapsule_GetPointer(obj.ptr(), nullptr));
if (!ctx) {
MaybeRaiseRegisteredFromStatus(InvalidArgument("TFE_Context is nullptr"));
}
return unwrap(ctx);
});
py::class_<AbstractContext, AbstractContextPtr>(m, "AbstractContext")
.def("CreateOperation",
[](AbstractContext* self, const char* op,
const char* raw_device_name) {
auto operation = self->CreateOperation();
(void)operation->Reset(op, raw_device_name);
return operation;
})
.def("RegisterFunction",
[](AbstractContext* self, AbstractFunction* f) {
Status s = self->RegisterFunction(f);
MaybeRaiseRegisteredFromStatus(s);
})
.def("RemoveFunction", [](AbstractContext* self, const string& func) {
Status s = self->RemoveFunction(func);
MaybeRaiseRegisteredFromStatus(s);
});
py::class_<TracingContext, AbstractContext>(m, "TracingContext")
.def("AddParameter",
[](TracingContext* self, DataType dtype) {
TracingTensorHandle* handle = nullptr;
tensorflow::PartialTensorShape shape;
Status s = self->AddParameter(dtype, shape, &handle);
MaybeRaiseRegisteredFromStatus(s);
return static_cast<AbstractTensorHandle*>(handle);
})
.def("Finalize", [](TracingContext* self, py::handle& outputs) {
OutputList output_list;
if (outputs.ptr() != Py_None) {
if (!PyList_Check(outputs.ptr())) {
MaybeRaiseRegisteredFromStatus(
InvalidArgument("must provide a list of Tensors as inputs"));
}
Py_ssize_t len = PyList_Size(outputs.ptr());
output_list.outputs.resize(len);
for (Py_ssize_t i = 0; i < len; ++i) {
PyObject* elem = PyList_GetItem(outputs.ptr(), i);
if (!elem) {
MaybeRaiseRegisteredFromStatus(
InvalidArgument("Tensor at index ", i, " is None."));
}
py::handle elem_h = elem;
AbstractTensorHandle* handle = elem_h.cast<AbstractTensorHandle*>();
if (!isa<TracingTensorHandle>(handle)) {
MaybeRaiseRegisteredFromStatus(InvalidArgument(
"Tensor at index ", i, " is not a graph tensor."));
}
output_list.outputs[i] = handle;
}
}
AbstractFunction* f = nullptr;
Status s = self->Finalize(&output_list, &f);
MaybeRaiseRegisteredFromStatus(s);
return f;
});
py::class_<ImmediateExecutionContext, AbstractContext,
std::unique_ptr<ImmediateExecutionContext, py::nodelete>>
ImmediateExecutionContext(m, "ImmediateExecutionContext");
py::class_<AbstractOperation, AbstractOperationPtr>(m, "AbstractOperation")
.def("Reset",
[](AbstractOperation* self, const char* op,
const char* raw_device_name) {
Status s = self->Reset(op, raw_device_name);
MaybeRaiseRegisteredFromStatus(s);
})
.def("SetOpName",
[](AbstractOperation* self, const char* op_name) {
if (isa<TracingOperation>(self)) {
auto tracing_op = reinterpret_cast<TracingOperation*>(self);
Status s = tracing_op->SetOpName(op_name);
MaybeRaiseRegisteredFromStatus(s);
}
})
.def("Name", &AbstractOperation::Name)
.def("DeviceName", &AbstractOperation::DeviceName)
.def("SetDeviceName",
[](AbstractOperation* self, const char* name) {
Status s = self->SetDeviceName(name);
MaybeRaiseRegisteredFromStatus(s);
})
.def("AddInput",
[](AbstractOperation* self, AbstractTensorHandle* input) {
Status s = self->AddInput(input);
MaybeRaiseRegisteredFromStatus(s);
})
.def("SetAttrType",
[](AbstractOperation* self, const char* attr_name, DataType value) {
Status s = self->SetAttrType(attr_name, value);
MaybeRaiseRegisteredFromStatus(s);
})
.def("Execute", [](AbstractOperation* self, int num_outputs) {
std::vector<AbstractTensorHandle*> outputs(num_outputs);
MaybeRaiseRegisteredFromStatus(
self->Execute(absl::MakeSpan(outputs), &num_outputs));
return outputs;
});
py::class_<AbstractTensorHandle, AbstractTensorHandlePtr>(
m, "AbstractTensorHandle")
.def("DataType", &AbstractTensorHandle::DataType)
.def("numpy", [](AbstractTensorHandle* self) {
if (!isa<ImmediateExecutionTensorHandle>(self)) {
MaybeRaiseRegisteredFromStatus(Internal(
"AbstractTensorHandle.numpy() must be called with an ",
"ImmediateExecutionTensorHandle found type: ", self->getKind()));
}
TF_Status s;
TFE_TensorHandle* handle =
wrap(dyn_cast<ImmediateExecutionTensorHandle>(self));
auto result = TFE_TensorHandleToNumpy(handle, &s);
MaybeRaiseRegisteredFromStatus(s.status);
return Pyo(result);
});
m.def("EagerTensorToImmediateExecutionTensorHandle", [](py::object handle) {
if (!EagerTensor_CheckExact(handle.ptr())) {
MaybeRaiseRegisteredFromStatus(
InvalidArgument("EagerTensorToImmediateExecutionTensorHandle called "
"with non-EagerTensor."));
}
TFE_TensorHandle* eager_tensor = EagerTensor_Handle(handle.ptr());
auto t = static_cast<AbstractTensorHandle*>(unwrap(eager_tensor));
t->Ref();
return t;
});
py::class_<AbstractFunction,
std::unique_ptr<AbstractFunction, tsl::core::RefCountDeleter>>
AbstractFunction(m, "AbstractFunction");
} | #include "tensorflow/c/eager/c_api_unified_experimental.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class UnifiedAPI
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.message();
}
public:
bool UseMlir() const { return strcmp(std::get<0>(GetParam()), "mlir") == 0; }
bool UseFunction() const { return std::get<2>(GetParam()); }
};
Status TestScalarShape(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(inputs[0]->Shape(&shape));
if (shape.dims() != 0) {
return errors::InvalidArgument(
"Tensor expected to have scalar shape found rank: ", shape.dims());
}
return absl::OkStatus();
}
TEST_P(UnifiedAPI, TestTensorShapeScalar) {
if (UseFunction() && UseMlir()) {
GTEST_SKIP() << "MlirTensor::Shape is not implemented yet.";
}
AbstractContextPtr ctx;
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
Status s = RunModel(TestScalarShape, ctx.get(),
{x.get()},
{},
UseFunction());
ASSERT_EQ(errors::OK, s.code()) << s.message();
}
Status TestTensorShape2x4(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(inputs[0]->Shape(&shape));
if (shape.dims() != 2) {
return errors::InvalidArgument(
"Tensor expected to have rank 2 found rank: ", shape.dims());
}
int64_t dim_sizes[] = {2, 4};
for (int i = 0; i < shape.dims(); i++) {
if (shape.dim_size(i) != dim_sizes[i]) {
return errors::InvalidArgument("Dim ", i, " expected to be of size ",
dim_sizes[i],
" found: ", shape.dim_size(i));
}
}
return absl::OkStatus();
}
TEST_P(UnifiedAPI, TestTensorShape2x4) {
if (UseFunction() && UseMlir()) {
GTEST_SKIP() << "MlirTensor::Shape is not implemented yet.";
}
AbstractContextPtr ctx;
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
float data[] = {0., 0., 0., 0., 0., 0., 0., 0};
int64_t dim_sizes[] = {2, 4};
Status s = TestTensorHandleWithDims<float, TF_FLOAT>(ctx.get(), data,
dim_sizes, 2, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
Status s = RunModel(TestTensorShape2x4, ctx.get(),
{x.get()},
{},
UseFunction());
ASSERT_EQ(errors::OK, s.code()) << s.message();
}
TEST_P(UnifiedAPI, TestUnknownShapeTracing) {
if (!UseFunction()) {
GTEST_SKIP() << "Tracing only test.";
}
if (UseMlir()) {
GTEST_SKIP() << "MlirTensor::Shape is not implemented yet.";
}
AbstractContextPtr ctx(BuildFunction("test_fn"));
AbstractTensorHandlePtr x;
{
tracing::TracingTensorHandle* x_raw = nullptr;
PartialTensorShape shape;
Status s = dyn_cast<tracing::TracingContext>(ctx.get())->AddParameter(
DT_FLOAT, shape, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
PartialTensorShape shape;
Status s = x->Shape(&shape);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ASSERT_TRUE(shape.unknown_rank());
}
TEST_P(UnifiedAPI, TestPartialShapeTracing) {
if (!UseFunction()) {
GTEST_SKIP() << "Tracing only test.";
}
if (UseMlir()) {
GTEST_SKIP() << "MlirTensor::Shape is not implemented yet.";
}
AbstractContextPtr ctx(BuildFunction("test_fn"));
AbstractTensorHandlePtr x;
{
tracing::TracingTensorHandle* x_raw = nullptr;
PartialTensorShape shape;
int64_t dim_sizes[] = {2, -1};
Status s = PartialTensorShape::MakePartialShape(dim_sizes, 2, &shape);
ASSERT_EQ(errors::OK, s.code()) << s.message();
s = dyn_cast<tracing::TracingContext>(ctx.get())->AddParameter(
DT_FLOAT, shape, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
PartialTensorShape shape;
Status s = x->Shape(&shape);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ASSERT_FALSE(shape.unknown_rank());
ASSERT_EQ(2, shape.dim_size(0));
ASSERT_EQ(-1, shape.dim_size(1));
}
INSTANTIATE_TEST_SUITE_P(
UnifiedCppAPI, UnifiedAPI,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/experimental/unified_api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/unified_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21a1a09a-18da-4af4-abe7-661d9ba85800 | cpp | tensorflow/tensorflow | ts_op_gen | tensorflow/js/ops/ts_op_gen.cc | tensorflow/js/ops/ts_op_gen_test.cc | #include "tensorflow/js/ops/ts_op_gen.h"
#include <memory>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
static bool IsListAttr(const OpDef_ArgDef& arg) {
return !arg.type_list_attr().empty() || !arg.number_attr().empty();
}
struct ArgDefs {
ArgDefs(const OpDef::ArgDef& op_def_arg, const ApiDef::Arg& api_def_arg)
: op_def_arg(op_def_arg), api_def_arg(api_def_arg) {}
const OpDef::ArgDef& op_def_arg;
const ApiDef::Arg& api_def_arg;
};
struct OpAttrs {
OpAttrs(const OpDef::AttrDef& op_def_attr, const ApiDef::Attr& api_def_attr)
: op_def_attr(op_def_attr), api_def_attr(api_def_attr) {}
const OpDef::AttrDef& op_def_attr;
const ApiDef::Attr& api_def_attr;
};
class GenTypeScriptOp {
public:
GenTypeScriptOp(const OpDef& op_def, const ApiDef& api_def);
~GenTypeScriptOp();
string Code();
private:
void ProcessArgs();
void ProcessAttrs();
void AddAttrForArg(const string& attr, int arg_index);
string InputForAttr(const OpDef::AttrDef& op_def_attr);
void AddMethodSignature();
void AddOpAttrs();
void AddMethodReturnAndClose();
const OpDef& op_def_;
const ApiDef& api_def_;
string result_;
std::vector<ArgDefs> input_op_args_;
std::vector<OpAttrs> op_attrs_;
typedef std::unordered_map<string, std::vector<int>> AttrArgIdxMap;
AttrArgIdxMap attr_arg_idx_map_;
int num_outputs_;
};
GenTypeScriptOp::GenTypeScriptOp(const OpDef& op_def, const ApiDef& api_def)
: op_def_(op_def), api_def_(api_def), num_outputs_(0) {}
GenTypeScriptOp::~GenTypeScriptOp() = default;
string GenTypeScriptOp::Code() {
ProcessArgs();
ProcessAttrs();
AddMethodSignature();
AddOpAttrs();
AddMethodReturnAndClose();
strings::StrAppend(&result_, "\n");
return result_;
}
void GenTypeScriptOp::ProcessArgs() {
for (int i = 0; i < api_def_.arg_order_size(); i++) {
auto op_def_arg = FindInputArg(api_def_.arg_order(i), op_def_);
if (op_def_arg == nullptr) {
LOG(WARNING) << "Could not find OpDef::ArgDef for "
<< api_def_.arg_order(i);
continue;
}
auto api_def_arg = FindInputArg(api_def_.arg_order(i), api_def_);
if (api_def_arg == nullptr) {
LOG(WARNING) << "Could not find ApiDef::Arg for "
<< api_def_.arg_order(i);
continue;
}
if (!op_def_arg->type_attr().empty()) {
AddAttrForArg(op_def_arg->type_attr(), i);
} else if (!op_def_arg->type_list_attr().empty()) {
AddAttrForArg(op_def_arg->type_list_attr(), i);
}
if (!op_def_arg->number_attr().empty()) {
AddAttrForArg(op_def_arg->number_attr(), i);
}
input_op_args_.push_back(ArgDefs(*op_def_arg, *api_def_arg));
}
num_outputs_ = api_def_.out_arg_size();
}
void GenTypeScriptOp::ProcessAttrs() {
for (int i = 0; i < op_def_.attr_size(); i++) {
op_attrs_.push_back(OpAttrs(op_def_.attr(i), api_def_.attr(i)));
}
}
void GenTypeScriptOp::AddAttrForArg(const string& attr, int arg_index) {
auto iter = attr_arg_idx_map_.find(attr);
if (iter == attr_arg_idx_map_.end()) {
attr_arg_idx_map_.insert(AttrArgIdxMap::value_type(attr, {arg_index}));
} else {
iter->second.push_back(arg_index);
}
}
string GenTypeScriptOp::InputForAttr(const OpDef::AttrDef& op_def_attr) {
string inputs;
auto arg_list = attr_arg_idx_map_.find(op_def_attr.name());
if (arg_list != attr_arg_idx_map_.end()) {
for (auto iter = arg_list->second.begin(); iter != arg_list->second.end();
++iter) {
strings::StrAppend(&inputs, input_op_args_[*iter].op_def_arg.name());
}
}
return inputs;
}
void GenTypeScriptOp::AddMethodSignature() {
strings::StrAppend(&result_, "export function ", api_def_.endpoint(0).name(),
"(");
bool is_first = true;
for (auto& in_arg : input_op_args_) {
if (is_first) {
is_first = false;
} else {
strings::StrAppend(&result_, ", ");
}
auto op_def_arg = in_arg.op_def_arg;
strings::StrAppend(&result_, op_def_arg.name(), ": ");
if (IsListAttr(op_def_arg)) {
strings::StrAppend(&result_, "tfc.Tensor[]");
} else {
strings::StrAppend(&result_, "tfc.Tensor");
}
}
if (num_outputs_ == 1) {
strings::StrAppend(&result_, "): tfc.Tensor {\n");
} else {
strings::StrAppend(&result_, "): tfc.Tensor[] {\n");
}
}
void GenTypeScriptOp::AddOpAttrs() {
strings::StrAppend(&result_, " const opAttrs = [\n");
bool is_first = true;
for (auto& attr : op_attrs_) {
if (is_first) {
is_first = false;
} else {
strings::StrAppend(&result_, ",\n");
}
strings::StrAppend(&result_, " ");
if (attr.op_def_attr.type() == "type") {
strings::StrAppend(&result_, "createTensorsTypeOpAttr('",
attr.op_def_attr.name(), "', ",
InputForAttr(attr.op_def_attr), ")");
} else if (attr.op_def_attr.type() == "int") {
strings::StrAppend(&result_, "{name: '", attr.op_def_attr.name(), "', ");
strings::StrAppend(&result_, "type: nodeBackend().binding.TF_ATTR_INT, ");
strings::StrAppend(&result_, "value: ", InputForAttr(attr.op_def_attr),
".length}");
}
}
strings::StrAppend(&result_, "\n ];\n");
}
void GenTypeScriptOp::AddMethodReturnAndClose() {
strings::StrAppend(&result_, " return null;\n}\n");
}
void WriteTSOp(const OpDef& op_def, const ApiDef& api_def, WritableFile* ts) {
GenTypeScriptOp ts_op(op_def, api_def);
TF_CHECK_OK(ts->Append(GenTypeScriptOp(op_def, api_def).Code()));
}
void StartFile(WritableFile* ts_file) {
const string header =
R"header(
import * as tfc from '@tensorflow/tfjs-core';
import {createTensorsTypeOpAttr, nodeBackend} from './op_utils';
)header";
TF_CHECK_OK(ts_file->Append(header));
}
}
void WriteTSOps(const OpList& ops, const ApiDefMap& api_def_map,
const string& ts_filename) {
Env* env = Env::Default();
std::unique_ptr<WritableFile> ts_file = nullptr;
TF_CHECK_OK(env->NewWritableFile(ts_filename, &ts_file));
StartFile(ts_file.get());
for (const auto& op_def : ops.op()) {
if (op_def.has_deprecation() &&
op_def.deprecation().version() <= TF_GRAPH_DEF_VERSION) {
continue;
}
const auto* api_def = api_def_map.GetApiDef(op_def.name());
if (api_def->visibility() == ApiDef::VISIBLE) {
WriteTSOp(op_def, *api_def, ts_file.get());
}
}
TF_CHECK_OK(ts_file->Close());
}
} | #include "tensorflow/js/ops/ts_op_gen.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
void ExpectContainsStr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
void ExpectDoesNotContainStr(StringPiece s, StringPiece expected) {
EXPECT_FALSE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "images"
type_attr: "T"
number_attr: "N"
description: "Images to process."
}
input_arg {
name: "dim"
description: "Description for dim."
type: DT_FLOAT
}
output_arg {
name: "output"
description: "Description for output."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for images"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
default_value {
i: 1
}
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 1
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
void GenerateTsOpFileText(const string& op_def_str, const string& api_def_str,
string* ts_file_text) {
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(
op_def_str.empty() ? kBaseOpDef : op_def_str, &op_defs);
ApiDefMap api_def_map(op_defs);
if (!api_def_str.empty()) {
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def_str));
}
const string& tmpdir = testing::TmpDir();
const auto ts_file_path = io::JoinPath(tmpdir, "test.ts");
WriteTSOps(op_defs, api_def_map, ts_file_path);
TF_ASSERT_OK(ReadFileToString(env, ts_file_path, ts_file_text));
}
TEST(TsOpGenTest, TestImports) {
string ts_file_text;
GenerateTsOpFileText("", "", &ts_file_text);
const string expected = R"(
import * as tfc from '@tensorflow/tfjs-core';
import {createTensorsTypeOpAttr, nodeBackend} from './op_utils';
)";
ExpectContainsStr(ts_file_text, expected);
}
TEST(TsOpGenTest, InputSingleAndList) {
const string api_def = R"pb(
op { graph_op_name: "Foo" arg_order: "dim" arg_order: "images" }
)pb";
string ts_file_text;
GenerateTsOpFileText("", api_def, &ts_file_text);
const string expected = R"(
export function Foo(dim: tfc.Tensor, images: tfc.Tensor[]): tfc.Tensor {
)";
ExpectContainsStr(ts_file_text, expected);
}
TEST(TsOpGenTest, TestVisibility) {
const string api_def = R"(
op {
graph_op_name: "Foo"
visibility: HIDDEN
}
)";
string ts_file_text;
GenerateTsOpFileText("", api_def, &ts_file_text);
const string expected = R"(
export function Foo(images: tfc.Tensor[], dim: tfc.Tensor): tfc.Tensor {
)";
ExpectDoesNotContainStr(ts_file_text, expected);
}
TEST(TsOpGenTest, SkipDeprecated) {
const string op_def = R"(
op {
name: "DeprecatedFoo"
input_arg {
name: "input"
type_attr: "T"
description: "Description for input."
}
output_arg {
name: "output"
description: "Description for output."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for input"
allowed_values {
list {
type: DT_FLOAT
}
}
}
deprecation {
explanation: "Deprecated."
}
}
)";
string ts_file_text;
GenerateTsOpFileText(op_def, "", &ts_file_text);
ExpectDoesNotContainStr(ts_file_text, "DeprecatedFoo");
}
TEST(TsOpGenTest, MultiOutput) {
const string op_def = R"(
op {
name: "MultiOutputFoo"
input_arg {
name: "input"
description: "Description for input."
type_attr: "T"
}
output_arg {
name: "output1"
description: "Description for output 1."
type: DT_FLOAT
}
output_arg {
name: "output2"
description: "Description for output 2."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for input"
allowed_values {
list {
type: DT_FLOAT
}
}
}
summary: "Summary for op MultiOutputFoo."
description: "Description for op MultiOutputFoo."
}
)";
string ts_file_text;
GenerateTsOpFileText(op_def, "", &ts_file_text);
const string expected = R"(
export function MultiOutputFoo(input: tfc.Tensor): tfc.Tensor[] {
)";
ExpectContainsStr(ts_file_text, expected);
}
TEST(TsOpGenTest, OpAttrs) {
string ts_file_text;
GenerateTsOpFileText("", "", &ts_file_text);
const string expectedFooAttrs = R"(
const opAttrs = [
createTensorsTypeOpAttr('T', images),
{name: 'N', type: nodeBackend().binding.TF_ATTR_INT, value: images.length}
];
)";
ExpectContainsStr(ts_file_text, expectedFooAttrs);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/js/ops/ts_op_gen.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/js/ops/ts_op_gen_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3426a9d7-9880-4064-a001-25e5ea338033 | cpp | tensorflow/tensorflow | stderr_reporter | tensorflow/lite/stderr_reporter.cc | tensorflow/lite/stderr_reporter_test.cc | #include "tensorflow/lite/stderr_reporter.h"
#include <stdarg.h>
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
int StderrReporter::Report(const char* format, va_list args) {
logging_internal::MinimalLogger::LogFormatted(TFLITE_LOG_ERROR, format, args);
return 0;
}
ErrorReporter* DefaultErrorReporter() {
static StderrReporter* error_reporter = new StderrReporter;
return error_reporter;
}
} | #include "tensorflow/lite/stderr_reporter.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/error_reporter.h"
namespace tflite {
namespace {
void CheckWritesToStderr(ErrorReporter *error_reporter) {
#ifndef TF_LITE_STRIP_ERROR_STRINGS
testing::internal::CaptureStderr();
#endif
TF_LITE_REPORT_ERROR(error_reporter, "Test: %d", 42);
#ifndef TF_LITE_STRIP_ERROR_STRINGS
EXPECT_EQ("ERROR: Test: 42\n", testing::internal::GetCapturedStderr());
#endif
}
TEST(StderrReporterTest, DefaultErrorReporter_WritesToStderr) {
CheckWritesToStderr(DefaultErrorReporter());
}
TEST(StderrReporterTest, StderrReporter_WritesToStderr) {
StderrReporter stderr_reporter;
CheckWritesToStderr(&stderr_reporter);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/stderr_reporter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/stderr_reporter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c4d93f8c-26b9-4357-95af-c244f2433a63 | cpp | tensorflow/tensorflow | simple_planner | tensorflow/lite/simple_planner.cc | tensorflow/lite/simple_planner_test.cc | #include "tensorflow/lite/simple_planner.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
namespace {
constexpr int32_t kNodeNotAssigned = std::numeric_limits<int32_t>::max();
}
SimplePlanner::SimplePlanner(TfLiteContext* context,
std::unique_ptr<GraphInfo> graph_info)
: context_(context), graph_info_(std::move(graph_info)) {}
SimplePlanner::~SimplePlanner() { FreeAllAllocations(); }
void SimplePlanner::FreeAllAllocations() {
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
allocs_[i].free();
}
}
TfLiteStatus SimplePlanner::ResetAllocations() {
FreeAllAllocations();
allocs_.clear();
allocs_.resize(graph_info_->num_tensors());
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ResetAllocationsAfter(int node) {
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
if (allocs_[i].node > node && allocs_[i].size > 0) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].free();
tensor.data.raw = nullptr;
}
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::PlanAllocations() {
TF_LITE_ENSURE_STATUS(ResetAllocations());
alloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
dealloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
std::vector<int> refcounts(graph_info_->num_tensors(), 0);
auto allocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] != kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
alloc_node_[tensor] = node;
return kTfLiteOk;
};
auto deallocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] == kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
dealloc_node_[tensor] = node;
return kTfLiteOk;
};
for (int tensor_index : graph_info_->outputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
}
}
for (int tensor_index : graph_info_->variables()) {
refcounts[tensor_index]++;
TF_LITE_ENSURE(context_, tensor_index != kTfLiteOptionalTensor);
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
for (int tensor_index : graph_info_->inputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
}
const size_t num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
}
}
}
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_outputs = node.outputs;
for (int j = 0; j < node_outputs->size; ++j) {
int tensor_index = node_outputs->data[j];
TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
}
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]--;
if (refcounts[tensor_index] == 0) {
TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
}
}
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ExecuteAllocations(int first_node, int last_node) {
alloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
dealloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
allocs_.resize(graph_info_->num_tensors());
const size_t num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = first_node;
i <= static_cast<size_t>(last_node) && i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_temporaries = node.temporaries;
for (int j = 0; j < node_temporaries->size; ++j) {
int tensor_index = node_temporaries->data[j];
alloc_node_[tensor_index] = i;
dealloc_node_[tensor_index] = i;
}
}
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
bool allocated = false;
if (alloc_node_[i] >= first_node && alloc_node_[i] <= last_node) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[i].size != 0) {
allocs_[i].free();
}
allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
} else if (tensor.allocation_type == kTfLiteArenaRwPersistent &&
allocs_[i].size == 0) {
allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
}
}
if (allocated) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ReleaseNonPersistentMemory() {
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].free();
tensor.data.raw = nullptr;
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::AcquireNonPersistentMemory() {
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ResolveTensorAllocation(int tensor_index) {
TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size != 0) {
tensor.data.raw = allocs_[tensor_index].ptr;
}
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
tensor.data.raw = allocs_[tensor_index].ptr;
}
return kTfLiteOk;
}
} | #include "tensorflow/lite/simple_planner.h"
#include <algorithm>
#include <cstdarg>
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
namespace {
class TestOp {
public:
TestOp(std::initializer_list<int> inputs, std::initializer_list<int> outputs,
std::initializer_list<int> temporaries)
: inputs_(inputs), outputs_(outputs), temporaries_(temporaries) {}
const std::vector<int>& inputs() const { return inputs_; }
const std::vector<int>& outputs() const { return outputs_; }
const std::vector<int>& temporaries() const { return temporaries_; }
const TfLiteRegistration& registration() const { return registration_; }
private:
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> temporaries_;
TfLiteRegistration registration_{};
};
class TestGraph {
public:
TestGraph(std::initializer_list<int> inputs,
std::initializer_list<TestOp> nodes,
std::initializer_list<int> outputs)
: inputs_(inputs), outputs_(outputs) {
int max_tensor_index = 0;
for (int t : inputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (int t : outputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (const auto& node : nodes) {
auto int_array = [](const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
};
registrations_.push_back(node.registration());
nodes_.push_back(TfLiteNode());
nodes_.back().inputs = int_array(node.inputs());
for (int t : node.inputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().outputs = int_array(node.outputs());
for (int t : node.outputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().temporaries = int_array(node.temporaries());
for (int t : node.temporaries()) {
max_tensor_index = std::max(max_tensor_index, t);
}
}
for (int i = 0; i <= max_tensor_index; ++i) {
tensors_.push_back(TfLiteTensor());
tensors_.back().allocation_type = kTfLiteArenaRw;
tensors_.back().bytes = (i + 1) * 3;
}
}
~TestGraph() {
for (auto node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
TfLiteIntArrayFree(node.temporaries);
}
}
const std::vector<TfLiteNode>& nodes() { return nodes_; }
std::vector<TfLiteTensor>* tensors() { return &tensors_; }
const std::vector<int>& inputs() { return inputs_; }
const std::vector<int>& outputs() { return outputs_; }
const std::vector<int>& variables() { return variables_; }
const std::vector<TfLiteRegistration>& registrations() {
return registrations_;
}
void SetVariables(const std::vector<int>& variables) {
variables_ = variables;
}
void Swap(TestGraph* other) {
std::swap(nodes_, other->nodes_);
std::swap(tensors_, other->tensors_);
std::swap(inputs_, other->inputs_);
std::swap(outputs_, other->outputs_);
std::swap(variables_, other->variables_);
}
private:
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<TfLiteRegistration> registrations_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
};
class TestGraphInfo : public GraphInfo {
public:
explicit TestGraphInfo(TestGraph* graph) : graph_(graph) {}
size_t num_tensors() const override { return graph_->tensors()->size(); }
const TfLiteRegistration& registration(size_t index) const override {
return graph_->registrations()[index];
}
TfLiteTensor* tensor(size_t index) override {
return &graph_->tensors()->at(index);
}
TfLiteTensor* tensors() override { return graph_->tensors()->data(); }
size_t num_execution_nodes() const override { return graph_->nodes().size(); }
size_t num_total_nodes() const override { return graph_->nodes().size(); }
const TfLiteNode& node(size_t index) const override {
return graph_->nodes()[index];
}
size_t node_index(size_t index) const override { return index; }
const std::vector<int>& inputs() const override { return graph_->inputs(); }
const std::vector<int>& outputs() const override { return graph_->outputs(); }
const std::vector<int>& variables() const override {
return graph_->variables();
}
private:
TestGraph* graph_;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
LOG(INFO) << temp_buffer;
}
class SimplePlannerTest : public ::testing::Test {
protected:
void SetGraph(TestGraph* graph, bool preserve_all_tensors = false) {
graph_ = graph;
context_.ReportError = ReportError;
planner_ = std::make_unique<SimplePlanner>(
&context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)));
CHECK(planner_->ResetAllocations() == kTfLiteOk);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void SwapGraph(TestGraph* graph) {
graph_->Swap(graph);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void Execute(int start, int end) {
CHECK(planner_->ExecuteAllocations(start, end) == kTfLiteOk);
}
void ReleaseNonPersistentMemory() {
CHECK(planner_->ReleaseNonPersistentMemory() == kTfLiteOk);
}
void AcquireNonPersistentMemory() {
CHECK(planner_->AcquireNonPersistentMemory() == kTfLiteOk);
}
void ResetAllocationsAfter(int node) {
CHECK(planner_->ResetAllocationsAfter(node) == kTfLiteOk);
}
bool HasNonPersistentMemory() {
return planner_ && planner_->HasNonPersistentMemory();
}
bool IsAllocated(int tensor_index) {
return (*graph_->tensors())[tensor_index].data.raw != nullptr;
}
TfLiteContext context_;
TestGraph* graph_;
std::unique_ptr<SimplePlanner> planner_;
};
TEST_F(SimplePlannerTest, EmptyGraph) {
TestGraph graph({}, {}, {});
SetGraph(&graph);
Execute(0, 10);
}
TEST_F(SimplePlannerTest, GraphWithNoOps) {
TestGraph graph({0, 10}, {}, {5, 11});
SetGraph(&graph);
Execute(0, 10);
EXPECT_FALSE(IsAllocated(5));
EXPECT_FALSE(IsAllocated(11));
}
TEST_F(SimplePlannerTest, ZeroSizedTensors) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
(*graph.tensors())[1].bytes = 0;
SetGraph(&graph);
ASSERT_EQ(planner_->ExecuteAllocations(0, 10), kTfLiteOk);
EXPECT_FALSE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
}
TEST_F(SimplePlannerTest, SimpleGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphInputsPreserved) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithTemporary) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
ResetAllocationsAfter(0);
EXPECT_TRUE(IsAllocated(0));
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_FALSE(IsAllocated(3));
EXPECT_FALSE(IsAllocated(4));
EXPECT_FALSE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithPersistentResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
(*graph.tensors())[5].allocation_type = kTfLiteArenaRwPersistent;
SetGraph(&graph);
Execute(0, 10);
void* tensor5_ptr = (*graph.tensors())[5].data.raw;
ResetAllocationsAfter(0);
EXPECT_TRUE(IsAllocated(0));
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_FALSE(IsAllocated(3));
EXPECT_FALSE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
Execute(0, 10);
EXPECT_TRUE(tensor5_ptr == (*graph.tensors())[5].data.raw);
}
TEST_F(SimplePlannerTest, SimpleGraphOptionalOutput) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{-1, 3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/simple_planner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/simple_planner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
219f0a76-c451-412b-a998-8677ee7adc06 | cpp | tensorflow/tensorflow | mutable_op_resolver_utils | tensorflow/lite/mutable_op_resolver_utils.cc | tensorflow/lite/mutable_op_resolver_utils_test.cc | #include "tensorflow/lite/mutable_op_resolver_utils.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
void AddOp(MutableOpResolver* mutable_op_resolver, const TfLiteOperator* op,
int min_version, int max_version) {
TfLiteRegistration registration{};
registration.builtin_code = TfLiteOperatorGetBuiltInCode(op);
registration.custom_name = TfLiteOperatorGetCustomName(op);
registration.version = TfLiteOperatorGetVersion(op);
registration.registration_external = const_cast<TfLiteOperator*>(op);
if (registration.custom_name != nullptr) {
mutable_op_resolver->AddCustom(registration.custom_name, ®istration,
min_version, max_version);
} else {
mutable_op_resolver->AddBuiltin(BuiltinOperator(registration.builtin_code),
®istration, min_version, max_version);
}
}
void AddOp(MutableOpResolver* mutable_op_resolver, const TfLiteOperator* op) {
int version = TfLiteOperatorGetVersion(op);
AddOp(mutable_op_resolver, op, version, version);
}
} | #include "tensorflow/lite/mutable_op_resolver_utils.h"
#include <stddef.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/c/common_internal.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/test_util.h"
namespace tflite {
namespace {
TfLiteStatus DummyInvoke(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node) {
return kTfLiteOk;
}
TfLiteStatus DummyPrepare(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node) {
return kTfLiteOk;
}
TfLiteOperator* GetDummyRegistration() {
static TfLiteOperator* registration = []() {
auto* op = TfLiteOperatorCreate(kTfLiteBuiltinCustom, "dummy",
1, nullptr);
TfLiteOperatorSetPrepareWithData(op, DummyPrepare);
TfLiteOperatorSetInvokeWithData(op, DummyInvoke);
return op;
}();
return registration;
}
TfLiteOperator* GetAdditionOpRegistration() {
static TfLiteOperator* registration = []() {
auto* r = TfLiteOperatorCreate(kTfLiteBuiltinAdd, nullptr,
1, nullptr);
TfLiteOperatorSetInvokeWithData(r, DummyInvoke);
return r;
}();
return registration;
}
using MutableOpResolverTest = tflite::testing::Test;
TEST_F(MutableOpResolverTest, FindOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(
TfLiteOperatorGetBuiltInCode(found_registration->registration_external),
kTfLiteBuiltinAdd);
EXPECT_EQ(TfLiteOperatorGetVersion(found_registration->registration_external),
1);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_ADD);
EXPECT_EQ(found_registration->version, 1);
}
TEST_F(MutableOpResolverTest, FindMissingOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, RegisterOpWithSingleVersion) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration(), 2, 2);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, RegisterOpWithMultipleVersions) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 3);
}
TEST_F(MutableOpResolverTest, FindOpWithUnsupportedVersions) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 4);
EXPECT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, FindCustomOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("dummy", 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 1);
}
TEST_F(MutableOpResolverTest, FindMissingCustomOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("whatever", 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, FindCustomOpWithUnsupportedVersion) {
MutableOpResolver resolver;
AddOp(&resolver, GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("dummy", 2);
EXPECT_EQ(found_registration, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/mutable_op_resolver_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/mutable_op_resolver_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b5e79173-7ffb-4558-b0cf-0e4bb23d8675 | cpp | tensorflow/tensorflow | arena_planner | tensorflow/lite/arena_planner.cc | tensorflow/lite/arena_planner_test.cc | #include "tensorflow/lite/arena_planner.h"
#include <stddef.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
#include "tensorflow/lite/simple_memory_arena.h"
namespace tflite {
constexpr int32_t kLastActiveNodeUndefined =
std::numeric_limits<int32_t>::max();
constexpr int32_t kNodeNotAssigned = std::numeric_limits<int32_t>::max();
constexpr int32_t kScalarTensorBytes = 4;
ArenaPlanner::ArenaPlanner(TfLiteContext* context,
std::unique_ptr<GraphInfo> graph_info,
bool preserve_all_tensors, int tensor_alignment,
int subgraph_index)
: context_(context),
graph_info_(std::move(graph_info)),
arena_(kDefaultArenaAlignment, subgraph_index),
has_nonpersistent_memory_(false),
persistent_arena_(kDefaultArenaAlignment, subgraph_index),
preserve_all_tensors_(preserve_all_tensors),
tensor_alignment_(tensor_alignment),
last_active_node_(kLastActiveNodeUndefined) {}
ArenaPlanner::~ArenaPlanner() {
arena_.ReleaseBuffer();
persistent_arena_.ReleaseBuffer();
}
std::intptr_t ArenaPlanner::BasePointer(TfLiteAllocationType type) {
if (type == kTfLiteArenaRwPersistent) {
return persistent_arena_.BasePointer();
}
if (type == kTfLiteArenaRw) {
return arena_.BasePointer();
}
return 0;
}
TfLiteStatus ArenaPlanner::ResetAllocations() {
TF_LITE_ENSURE_STATUS(arena_.ClearPlan());
TF_LITE_ENSURE_STATUS(persistent_arena_.ClearPlan());
allocs_.clear();
allocs_.resize(graph_info_->num_tensors());
last_active_node_ = kLastActiveNodeUndefined;
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::ResetAllocationsAfter(int node) {
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
if (allocs_[i].first_node > node && allocs_[i].size > 0) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].reset();
tensor.data.raw = nullptr;
}
}
}
if (last_active_node_ > node) {
arena_.CalculateActiveAllocs(allocs_, node);
} else {
arena_.PurgeAfter(node);
}
last_active_node_ = node;
return kTfLiteOk;
}
int ArenaPlanner::FindSharedTensor(int tensor_index) {
auto actual_tensor_it = actual_tensor_id_.find(tensor_index);
if (actual_tensor_it != actual_tensor_id_.end()) {
tensor_index = actual_tensor_it->second;
}
return tensor_index;
}
bool ArenaPlanner::InputTensorCanBeShared(const TfLiteTensor& input_tensor,
const TfLiteTensor& output_tensor,
int input_id, int output_id,
bool tensor_changed) {
if (tensor_changed) {
if (input_tensor.bytes != output_tensor.bytes ||
input_tensor.bytes <= kScalarTensorBytes) {
return false;
}
if (refcounts_[input_id] > 1) {
return false;
}
}
for (int input : graph_info_->inputs()) {
if (input == input_id) {
return false;
}
}
for (int output : graph_info_->outputs()) {
if (output == output_id) {
return false;
}
}
TfLiteAllocationType input_allocation_type = input_tensor.allocation_type;
TfLiteAllocationType output_allocation_type = output_tensor.allocation_type;
if (input_allocation_type != output_allocation_type &&
input_allocation_type != kTfLiteArenaRw) {
return false;
}
if (preserve_all_tensors_) {
return false;
}
return true;
}
void ArenaPlanner::IdentifyInPlaceTensors() {
actual_tensor_id_.clear();
const int num_execution_nodes = graph_info_->num_execution_nodes();
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_execution_nodes; ++i) {
const TfLiteRegistration& registration = graph_info_->registration(i);
const TfLiteNode& node = graph_info_->node(i);
if (node.outputs->size < 1) continue;
bool tensor_changed =
!(registration.inplace_operator & kTfLiteInplaceOpDataUnmodified);
if (registration.inplace_operator == kTfLiteInplaceOpNone) {
continue;
}
int32_t input_id = -1;
int32_t output_id = node.outputs->data[0];
const TfLiteTensor& output_tensor = tensors[output_id];
const int loop_end =
std::min(kTfLiteMaxSharableOpInputs, node.inputs->size);
for (int i = 0; i < loop_end; ++i) {
if (node.inputs->data[i] == kTfLiteOptionalTensor) {
continue;
}
const bool input_shareable =
registration.inplace_operator & (kTfLiteInplaceOpInput0Shared << i);
if (input_shareable) {
const TfLiteTensor& input_tensor = tensors[node.inputs->data[i]];
if (InputTensorCanBeShared(input_tensor, output_tensor,
node.inputs->data[i], output_id,
tensor_changed)) {
input_id = node.inputs->data[i];
break;
}
}
}
if (input_id == -1) {
continue;
}
int32_t actual_output_tensor_id = FindSharedTensor(input_id);
if (tensor_changed) {
if (refcounts_[actual_output_tensor_id] > 1) {
continue;
}
}
actual_tensor_id_[output_id] = actual_output_tensor_id;
}
}
TfLiteStatus ArenaPlanner::PlanAllocations() {
const size_t num_tensors = graph_info_->num_tensors();
TF_LITE_ENSURE_STATUS(ResetAllocations());
alloc_node_.assign(num_tensors, kNodeNotAssigned);
dealloc_node_.assign(num_tensors, kNodeNotAssigned);
nodes_to_tensors_.clear();
nodes_to_tensors_.resize(
std::max(graph_info_->num_execution_nodes(), (size_t)1), {});
refcounts_.assign(num_tensors, 0);
auto allocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] != kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
alloc_node_[tensor] = node;
return kTfLiteOk;
};
auto deallocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] == kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
dealloc_node_[tensor] = node;
return kTfLiteOk;
};
for (int tensor_index : graph_info_->outputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
++refcounts_[tensor_index];
}
}
for (int tensor_index : graph_info_->variables()) {
++refcounts_[tensor_index];
TF_LITE_ENSURE(context_, tensor_index != kTfLiteOptionalTensor);
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
nodes_to_tensors_[0].insert(tensor_index);
}
for (int tensor_index : graph_info_->inputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
++refcounts_[tensor_index];
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
nodes_to_tensors_[0].insert(tensor_index);
}
}
std::vector<int> refcounts = refcounts_;
const int num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
++refcounts_[tensor_index];
}
}
}
IdentifyInPlaceTensors();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
tensor_index = FindSharedTensor(tensor_index);
++refcounts[tensor_index];
}
}
}
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_outputs = node.outputs;
for (int j = 0; j < node_outputs->size; ++j) {
int tensor_index = node_outputs->data[j];
if (tensor_index == kTfLiteOptionalTensor) continue;
nodes_to_tensors_[i].insert(tensor_index);
TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
}
if (!preserve_all_tensors_) {
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
tensor_index = FindSharedTensor(tensor_index);
--refcounts[tensor_index];
if (refcounts[tensor_index] == 0) {
TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
}
}
}
}
}
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::ExecuteAllocations(int first_node, int last_node) {
const size_t num_tensors = graph_info_->num_tensors();
TF_LITE_ENSURE(context_, num_tensors >= allocs_.size());
alloc_node_.resize(num_tensors, kNodeNotAssigned);
dealloc_node_.resize(num_tensors, kNodeNotAssigned);
allocs_.resize(num_tensors);
const int num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = first_node;
i <= static_cast<size_t>(last_node) && i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_temporaries = node.temporaries;
for (int j = 0; j < node_temporaries->size; ++j) {
int tensor_index = node_temporaries->data[j];
alloc_node_[tensor_index] = i;
nodes_to_tensors_[i].insert(tensor_index);
if (!preserve_all_tensors_) {
dealloc_node_[tensor_index] = i;
}
}
}
std::vector<int32_t> tensors_allocated;
TF_LITE_ENSURE_STATUS(
CalculateAllocations(first_node, last_node, &tensors_allocated));
bool arena_reallocated = false;
TF_LITE_ENSURE_STATUS(Commit(&arena_reallocated));
TfLiteTensor* tensors = graph_info_->tensors();
if (arena_reallocated) {
for (int i = 0; i < static_cast<int>(num_tensors); ++i) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i, tensors));
}
} else {
for (int i = 0; i < static_cast<int>(tensors_allocated.size()); ++i) {
TF_LITE_ENSURE_STATUS(
ResolveTensorAllocation(tensors_allocated[i], tensors));
}
}
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::ReleaseNonPersistentMemory() {
TF_LITE_ENSURE_STATUS(arena_.ReleaseBuffer());
has_nonpersistent_memory_ = false;
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(graph_info_->num_tensors()); ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
tensor.data.raw = nullptr;
}
}
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::AcquireNonPersistentMemory() {
bool reallocated;
TF_LITE_ENSURE_STATUS(arena_.Commit(&reallocated));
has_nonpersistent_memory_ = true;
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(graph_info_->num_tensors()); ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i, tensors));
}
}
return kTfLiteOk;
}
bool ArenaPlanner::HasNonPersistentMemory() {
return has_nonpersistent_memory_;
}
void ArenaPlanner::DumpDebugInfo(const std::vector<int>& execution_plan) const {
arena_.DumpDebugInfo("kTfLiteArenaRw Dump:", execution_plan);
persistent_arena_.DumpDebugInfo("kTfLiteArenaRwPersistent Dump:",
execution_plan);
}
void ArenaPlanner::GetAllocInfo(size_t* arena_size,
size_t* arena_persist_size) const {
*arena_size = arena_.GetBufferSize();
*arena_persist_size = persistent_arena_.GetBufferSize();
}
TfLiteStatus ArenaPlanner::Commit(bool* reallocated) {
bool arena_reallocated, persistent_arena_reallocated;
TF_LITE_ENSURE_STATUS(arena_.Commit(&arena_reallocated));
has_nonpersistent_memory_ = true;
TF_LITE_ENSURE_STATUS(
persistent_arena_.Commit(&persistent_arena_reallocated));
*reallocated = arena_reallocated;
*reallocated |= persistent_arena_reallocated;
return kTfLiteOk;
}
void ArenaPlanner::CreateTensorAllocationVector(
std::vector<int32_t>* tensors_to_allocate) {
const TfLiteTensor* tensors = this->graph_info_->tensors();
auto tensor_compare = [&](int idx1, int idx2) {
if (alloc_node_[idx1] == 0 && dealloc_node_[idx1] == kNodeNotAssigned) {
if (alloc_node_[idx2] == 0 && dealloc_node_[idx2] == kNodeNotAssigned) {
return idx1 < idx2;
}
return true;
}
if (alloc_node_[idx2] == 0 && dealloc_node_[idx2] == kNodeNotAssigned) {
return false;
}
auto size1 = tensors[idx1].bytes;
auto size2 = tensors[idx2].bytes;
if (size1 != size2) {
return size1 > size2;
}
return alloc_node_[idx1] < alloc_node_[idx2];
};
std::sort(tensors_to_allocate->begin(), tensors_to_allocate->end(),
tensor_compare);
}
std::vector<int32_t> ArenaPlanner::GetTensorsToAllocate(int first_node,
int last_node) {
int num_tensors = static_cast<int>(graph_info_->num_tensors());
std::vector<int32_t> tensors_to_allocate;
tensors_to_allocate.reserve(num_tensors);
for (int i = first_node; i <= last_node; ++i) {
tensors_to_allocate.insert(tensors_to_allocate.end(),
nodes_to_tensors_[i].begin(),
nodes_to_tensors_[i].end());
}
return tensors_to_allocate;
}
TfLiteStatus ArenaPlanner::CalculateAllocations(
int first_node, int last_node, std::vector<int32_t>* tensors_allocated) {
const std::vector<int32_t> tensors_to_allocate =
GetTensorsToAllocate(first_node, last_node);
tensors_allocated->reserve(tensors_to_allocate.size());
TfLiteTensor* tensors = graph_info_->tensors();
for (const auto& tensor_index : tensors_to_allocate) {
TfLiteTensor& tensor = tensors[tensor_index];
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size < tensor.bytes) {
tensors_allocated->push_back(tensor_index);
}
} else if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
tensors_allocated->push_back(tensor_index);
}
}
if (tensors_allocated->empty()) {
last_active_node_ = last_node;
return kTfLiteOk;
}
if (first_node < last_active_node_) {
arena_.ResetAllocs();
last_active_node_ = first_node;
} else {
arena_.PurgeActiveAllocs(first_node);
}
CreateTensorAllocationVector(tensors_allocated);
for (const auto& tensor_index : *tensors_allocated) {
TfLiteTensor& tensor = tensors[tensor_index];
auto it = actual_tensor_id_.find(tensor_index);
if (it != actual_tensor_id_.end()) {
TfLiteAllocationType allocation_type =
tensors[it->second].allocation_type;
if (allocation_type != kTfLiteArenaRw ||
tensors[it->second].bytes != tensors[it->first].bytes) {
actual_tensor_id_.erase(it);
} else {
continue;
}
}
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(
arena_.Allocate(context_, tensor_alignment_, tensor.bytes,
tensor_index, alloc_node_[tensor_index],
dealloc_node_[tensor_index], &allocs_[tensor_index]));
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent &&
allocs_[tensor_index].size == 0) {
if (allocs_[tensor_index].size < tensor.bytes) {
TF_LITE_ENSURE_STATUS(persistent_arena_.Allocate(
context_, tensor_alignment_, tensor.bytes, tensor_index,
alloc_node_[tensor_index],
std::numeric_limits<int32_t>::max(),
&allocs_[tensor_index]));
}
}
}
last_active_node_ = last_node;
return kTfLiteOk;
}
bool AreTensorsAllocatedInSameArena(int32_t root_tensor_index,
int32_t tensor_index,
const TfLiteTensor* tensors) {
if (tensors[root_tensor_index].allocation_type == kTfLiteArenaRw &&
tensors[tensor_index].allocation_type == kTfLiteArenaRw) {
return true;
}
if (tensors[root_tensor_index].allocation_type == kTfLiteArenaRwPersistent &&
tensors[tensor_index].allocation_type == kTfLiteArenaRwPersistent) {
return true;
}
return false;
}
TfLiteStatus ArenaPlanner::ResolveTensorAllocation(int32_t tensor_index,
TfLiteTensor* tensors) {
auto actual_tensor_it = actual_tensor_id_.find(tensor_index);
TfLiteTensor& tensor = tensors[tensor_index];
int32_t root_tensor_index = actual_tensor_it == actual_tensor_id_.end()
? tensor_index
: actual_tensor_it->second;
const TfLiteTensor& root_tensor = tensors[root_tensor_index];
if (root_tensor_index != tensor_index) {
if (AreTensorsAllocatedInSameArena(root_tensor_index, tensor_index,
tensors)) {
ResolveTensorAllocation(root_tensor_index, tensors);
tensor.data.data = root_tensor.data.data;
return kTfLiteOk;
}
}
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size != 0) {
return arena_.ResolveAlloc(context_, allocs_[tensor_index],
&tensor.data.raw);
}
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
return persistent_arena_.ResolveAlloc(context_, allocs_[tensor_index],
&tensor.data.raw);
}
return kTfLiteOk;
}
} | #include "tensorflow/lite/arena_planner.h"
#include <algorithm>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <initializer_list>
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
int gNumAlloc = 0;
void OnTfLiteArenaAlloc(int subgraph_index, int arena_id, size_t num_bytes) {
gNumAlloc++;
}
int gNumDealloc = 0;
void OnTfLiteArenaDealloc(int subgraph_index, int arena_id, size_t num_bytes) {
gNumDealloc++;
}
namespace {
constexpr const int kTensorAlignment = 4;
class TestOp {
public:
TestOp(std::initializer_list<int> inputs, std::initializer_list<int> outputs,
std::initializer_list<int> temporaries,
int builtin_code = kTfLiteBuiltinAdd,
int inplace_operator = kTfLiteInplaceOpInput0Shared)
: inputs_(inputs),
outputs_(outputs),
temporaries_(temporaries),
registration_{} {
registration_.builtin_code = builtin_code;
registration_.inplace_operator = inplace_operator;
}
const std::vector<int>& inputs() const { return inputs_; }
const std::vector<int>& outputs() const { return outputs_; }
const std::vector<int>& temporaries() const { return temporaries_; }
const TfLiteRegistration& registration() const { return registration_; }
private:
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> temporaries_;
TfLiteRegistration registration_;
};
class TestGraph {
public:
TestGraph(std::initializer_list<int> inputs,
std::initializer_list<TestOp> nodes,
std::initializer_list<int> outputs)
: inputs_(inputs), outputs_(outputs) {
int max_tensor_index = 0;
for (int t : inputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (int t : outputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (const auto& node : nodes) {
auto int_array = [](const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
};
registrations_.push_back(node.registration());
nodes_.push_back(TfLiteNode());
nodes_.back().inputs = int_array(node.inputs());
for (int t : node.inputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().outputs = int_array(node.outputs());
for (int t : node.outputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().temporaries = int_array(node.temporaries());
for (int t : node.temporaries()) {
max_tensor_index = std::max(max_tensor_index, t);
}
}
for (int i = 0; i <= max_tensor_index; ++i) {
tensors_.push_back(TfLiteTensor());
tensors_.back().allocation_type = kTfLiteArenaRw;
tensors_.back().bytes = (i + 1) * 3;
}
}
~TestGraph() {
for (auto node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
TfLiteIntArrayFree(node.temporaries);
}
}
const std::vector<TfLiteNode>& nodes() { return nodes_; }
std::vector<TfLiteTensor>* tensors() { return &tensors_; }
const std::vector<int>& inputs() { return inputs_; }
const std::vector<int>& outputs() { return outputs_; }
const std::vector<int>& variables() { return variables_; }
const std::vector<TfLiteRegistration>& registrations() {
return registrations_;
}
void SetVariables(const std::vector<int>& variables) {
variables_ = variables;
}
void Swap(TestGraph* other) {
std::swap(nodes_, other->nodes_);
std::swap(tensors_, other->tensors_);
std::swap(inputs_, other->inputs_);
std::swap(outputs_, other->outputs_);
std::swap(variables_, other->variables_);
}
private:
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<TfLiteRegistration> registrations_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
};
class TestGraphInfo : public GraphInfo {
public:
explicit TestGraphInfo(TestGraph* graph) : graph_(graph) {}
size_t num_tensors() const override { return graph_->tensors()->size(); }
TfLiteTensor* tensors() override { return graph_->tensors()->data(); }
TfLiteTensor* tensor(size_t index) override {
return &graph_->tensors()->at(index);
}
size_t num_execution_nodes() const override { return graph_->nodes().size(); }
size_t num_total_nodes() const override { return graph_->nodes().size(); }
const TfLiteNode& node(size_t index) const override {
return graph_->nodes()[index];
}
const TfLiteRegistration& registration(size_t index) const override {
return graph_->registrations()[index];
}
size_t node_index(size_t index) const override { return index; }
const std::vector<int>& inputs() const override { return graph_->inputs(); }
const std::vector<int>& outputs() const override { return graph_->outputs(); }
const std::vector<int>& variables() const override {
return graph_->variables();
}
private:
TestGraph* graph_;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
LOG(INFO) << temp_buffer;
}
class ArenaPlannerTest : public ::testing::Test {
protected:
void SetGraph(TestGraph* graph, bool preserve_all_tensors = false) {
graph_ = graph;
context_.ReportError = ReportError;
planner_ = std::make_unique<ArenaPlanner>(
&context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)),
preserve_all_tensors, kTensorAlignment);
CHECK(planner_->ResetAllocations() == kTfLiteOk);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void SwapGraph(TestGraph* graph) {
graph_->Swap(graph);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void Execute(int start, int end) {
CHECK(planner_->ExecuteAllocations(start, end) == kTfLiteOk);
}
void ReleaseNonPersistentMemory() {
CHECK(planner_->ReleaseNonPersistentMemory() == kTfLiteOk);
}
void AcquireNonPersistentMemory() {
CHECK(planner_->AcquireNonPersistentMemory() == kTfLiteOk);
}
void ResetAllocations() { CHECK(planner_->ResetAllocations() == kTfLiteOk); }
void ResetAllocationsAfter(int node) {
CHECK(planner_->ResetAllocationsAfter(node) == kTfLiteOk);
}
bool HasNonPersistentMemory() {
return planner_ && planner_->HasNonPersistentMemory();
}
void Destroy() { planner_.reset(); }
std::ptrdiff_t GetOffset(int tensor_index) {
const TfLiteTensor& tensor = (*graph_->tensors())[tensor_index];
return reinterpret_cast<std::intptr_t>(tensor.data.raw) -
planner_->BasePointer(tensor.allocation_type);
}
std::ptrdiff_t GetOffsetAfter(int tensor_index) {
const TfLiteTensor& tensor = (*graph_->tensors())[tensor_index];
std::ptrdiff_t offset = GetOffset(tensor_index) + tensor.bytes;
if (offset % kTensorAlignment != 0) {
offset += kTensorAlignment - offset % kTensorAlignment;
}
return offset;
}
bool IsUnallocated(int tensor_index) {
return (*graph_->tensors())[tensor_index].data.raw == nullptr;
}
TfLiteContext context_;
TestGraph* graph_;
std::unique_ptr<ArenaPlanner> planner_;
};
TEST_F(ArenaPlannerTest, EmptyGraph) {
TestGraph graph({}, {}, {});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
}
TEST_F(ArenaPlannerTest, GraphWithOneOp) {
TestGraph graph({0, 10}, {{{0}, {}, {}}, {{10}, {}, {}}}, {5, 11});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(10), GetOffsetAfter(0));
EXPECT_TRUE((*graph.tensors())[5].data.raw == nullptr);
EXPECT_TRUE((*graph.tensors())[11].data.raw == nullptr);
}
TEST_F(ArenaPlannerTest, GraphWithOneOp2) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), 8);
EXPECT_EQ(GetOffsetAfter(2), 20);
}
TEST_F(ArenaPlannerTest, ZeroSizedTensors) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
(*graph.tensors())[1].bytes = 0;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ((*graph_->tensors())[1].data.raw, nullptr);
}
TEST_F(ArenaPlannerTest, SimpleGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
}
TEST_F(ArenaPlannerTest, AllocsCorrectlyReset) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
ResetAllocations();
std::vector<TfLiteTensor>& tensors = *graph.tensors();
tensors[0].bytes += 1;
tensors[1].bytes += 1;
tensors[2].bytes += 1;
tensors[3].bytes += 1;
tensors[4].bytes += 1;
tensors[5].bytes += 1;
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
}
TEST_F(ArenaPlannerTest, SimpleGraphInputsPreserved) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithTemporary) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithInplaceReshape) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{1}, {3}, {}},
{{2, 3},
{4},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{4}, {5}, {}}
},
{5});
(*graph.tensors())[2].bytes = 24;
(*graph.tensors())[4].bytes = 24;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), GetOffset(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithChainOfInplaceOps) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{2, 3},
{4},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{4, 3},
{5},
{},
kTfLiteBuiltinExpandDims,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{5, 3},
{6},
{},
kTfLiteBuiltinSqueeze,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{6, 3},
{7},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{7}, {8}, {}},
},
{8});
(*graph.tensors())[2].bytes = 24;
(*graph.tensors())[4].bytes = 24;
(*graph.tensors())[5].bytes = 24;
(*graph.tensors())[6].bytes = 24;
(*graph.tensors())[7].bytes = 24;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), GetOffset(2));
EXPECT_EQ(GetOffset(2), GetOffset(4));
EXPECT_EQ(GetOffset(2), GetOffset(5));
EXPECT_EQ(GetOffset(2), GetOffset(6));
EXPECT_EQ(GetOffset(2), GetOffset(7));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeInputOutput) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{2, 1},
{3},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{3}, {4}, {}}},
{4});
(*graph.tensors())[2].bytes = 24;
(*graph.tensors())[3].bytes = 24;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), GetOffset(3));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeInputTensor) {
TestGraph graph({0, 1},
{
{{0, 1},
{2},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared |
kTfLiteInplaceOpDataUnmodified},
{{4}, {3}, {}}},
{3});
SetGraph(&graph);
(*graph.tensors())[0].allocation_type = kTfLiteDynamic;
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(0), GetOffset(2));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeOutputTensor) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{2, 1},
{3},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared |
kTfLiteInplaceOpDataUnmodified},
},
{3});
SetGraph(&graph);
(*graph.tensors())[0].allocation_type = kTfLiteDynamic;
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(2), GetOffset(3));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeDynamicInput) {
TestGraph graph({0, 1},
{
{{0, 1},
{2},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpDataUnmodified}
},
{2});
SetGraph(&graph);
(*graph.tensors())[0].allocation_type = kTfLiteDynamic;
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(0), GetOffset(2));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithBroadcastingAddInPlace) {
TestGraph graph(
{0, 1},
{
{{0, 1}, {3}, {}},
{{1, 2}, {4}, {}},
{{3, 4},
{5},
{},
kTfLiteBuiltinAdd,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared},
{{5}, {6}, {}},
},
{6});
(*graph.tensors())[3].bytes = 8;
(*graph.tensors())[4].bytes = 16;
(*graph.tensors())[5].bytes = 16;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(3), GetOffset(5));
EXPECT_EQ(GetOffset(4), GetOffset(5));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithBroadcastingAddNotInPlace) {
TestGraph graph(
{0, 1},
{
{{0, 1}, {3}, {}},
{{1, 2}, {4}, {}},
{{3, 4}, {5}, {}, kTfLiteBuiltinAdd, kTfLiteInplaceOpInput0Shared},
{{5}, {6}, {}},
},
{6});
(*graph.tensors())[3].bytes = 8;
(*graph.tensors())[4].bytes = 8;
(*graph.tensors())[5].bytes = 64;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(3), GetOffset(5));
EXPECT_NE(GetOffset(4), GetOffset(5));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
ResetAllocationsAfter(0);
EXPECT_FALSE(IsUnallocated(0));
EXPECT_FALSE(IsUnallocated(1));
EXPECT_FALSE(IsUnallocated(2));
EXPECT_TRUE(IsUnallocated(3));
EXPECT_TRUE(IsUnallocated(4));
EXPECT_TRUE(IsUnallocated(5));
(*graph.tensors())[4].bytes += 64;
Execute(1, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(2), 48);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithPersistentResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
(*graph.tensors())[5].allocation_type = kTfLiteArenaRwPersistent;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
void* tensor5_ptr = (*graph.tensors())[5].data.raw;
ResetAllocationsAfter(0);
EXPECT_FALSE(IsUnallocated(0));
EXPECT_FALSE(IsUnallocated(1));
EXPECT_FALSE(IsUnallocated(2));
EXPECT_TRUE(IsUnallocated(3));
EXPECT_TRUE(IsUnallocated(4));
EXPECT_FALSE(IsUnallocated(5));
Execute(0, graph.nodes().size() - 1);
EXPECT_TRUE(tensor5_ptr == (*graph.tensors())[5].data.raw);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithOptionals) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, -1, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithOptionalOutput) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{-1, 3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithLargeTensor) {
TestGraph graph({0, -1},
{
{{0}, {1}, {}},
{{1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4, -1}, {3}, {}}
},
{3});
(*graph.tensors())[1].bytes = 40;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(1), 4);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(3), 4);
EXPECT_EQ(GetOffset(5), 4);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithPersistentTensor) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4, -1}, {3}, {}}
},
{3});
(*graph.tensors())[1].allocation_type = kTfLiteArenaRwPersistent;
graph.SetVariables({1});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE((*graph.tensors())[0].data.raw, (*graph.tensors())[1].data.raw);
EXPECT_EQ(GetOffset(5), 4);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), 4);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), 0);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithDynamicTensor) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4, -1}, {3}, {}}
},
{3});
(*graph.tensors())[1].allocation_type = kTfLiteDynamic;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ((*graph.tensors())[1].data.raw, nullptr);
EXPECT_EQ(GetOffset(5), 4);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), 4);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, LargerGraphAndStepwiseAllocation) {
TestGraph graph({0, 1},
{
{{0, 1}, {2, 3}, {}},
{{2, 0}, {4, 5}, {6}},
{{1, -1}, {7}, {}},
{{7, 3}, {8}, {9}},
{{4, 5, 8}, {10}, {}},
},
{10});
SetGraph(&graph);
Execute(0, 0);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_TRUE(IsUnallocated(6));
EXPECT_TRUE(IsUnallocated(4));
EXPECT_TRUE(IsUnallocated(5));
EXPECT_TRUE(IsUnallocated(7));
EXPECT_TRUE(IsUnallocated(9));
EXPECT_TRUE(IsUnallocated(8));
EXPECT_TRUE(IsUnallocated(10));
Execute(1, 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_TRUE(IsUnallocated(7));
EXPECT_TRUE(IsUnallocated(9));
EXPECT_TRUE(IsUnallocated(8));
EXPECT_TRUE(IsUnallocated(10));
Execute(2, 2);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(3));
EXPECT_TRUE(IsUnallocated(9));
EXPECT_TRUE(IsUnallocated(8));
EXPECT_TRUE(IsUnallocated(10));
Execute(3, 3);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(9), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(8), GetOffsetAfter(9));
EXPECT_TRUE(IsUnallocated(10));
Execute(4, 4);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(9), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(8), GetOffsetAfter(9));
EXPECT_EQ(GetOffset(10), 12);
}
TEST_F(ArenaPlannerTest, ModifiedGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
TestGraph pruned_graph({0, 1},
{
{{0, 1}, {3}, {}},
},
{3});
SwapGraph(&pruned_graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(1));
}
TEST_F(ArenaPlannerTest, ModifiedGraph_DeallocateNonPersistentArena) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
AcquireNonPersistentMemory();
AcquireNonPersistentMemory();
EXPECT_TRUE(HasNonPersistentMemory());
ReleaseNonPersistentMemory();
EXPECT_FALSE(HasNonPersistentMemory());
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), 0);
EXPECT_EQ(GetOffset(3), 0);
TestGraph pruned_graph({0, 1},
{
{{0, 1}, {3}, {}},
},
{3});
SwapGraph(&pruned_graph);
Execute(0, graph.nodes().size() - 1);
AcquireNonPersistentMemory();
EXPECT_TRUE(HasNonPersistentMemory());
ReleaseNonPersistentMemory();
AcquireNonPersistentMemory();
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(1));
}
TEST_F(ArenaPlannerTest, ComplexGraph) {
TestGraph graph({0},
{
{{0}, {1}, {}},
{{1}, {2}, {}},
{{1}, {3}, {}},
{{1}, {4}, {}},
{{2, 3, 4}, {5}, {}},
{{5}, {6}, {}},
{{5}, {7}, {}},
{{6, 7}, {8}, {}},
},
{8});
(*graph.tensors())[0].bytes = 32;
(*graph.tensors())[1].bytes = 28;
(*graph.tensors())[2].bytes = 36;
(*graph.tensors())[3].bytes = 16;
(*graph.tensors())[4].bytes = 8;
(*graph.tensors())[5].bytes = 64;
(*graph.tensors())[6].bytes = 10;
(*graph.tensors())[7].bytes = 40;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 32);
EXPECT_EQ(GetOffset(7), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(7));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(8), 32);
}
TEST_F(ArenaPlannerTest, GraphWithIntermediates) {
TestGraph graph({0, 1},
{
{{0}, {2}, {3}},
{{1, 2}, {4, 5}, {}},
{{5}, {6, 7}, {8, 9, 10}},
{{4, 6}, {11}, {12}},
{{11}, {13}, {}},
{{7, 13}, {14}, {15}},
},
{11, 14});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(15), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(14), GetOffsetAfter(15));
EXPECT_EQ(GetOffset(13), GetOffsetAfter(14));
EXPECT_EQ(GetOffset(12), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(11), GetOffsetAfter(13));
EXPECT_EQ(GetOffset(10), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(9), GetOffsetAfter(10));
EXPECT_EQ(GetOffset(8), GetOffsetAfter(9));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(11));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(8));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(7));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(5));
}
TEST_F(ArenaPlannerTest, DebugTensors) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {5}},
{{2, 0}, {4}, {6}},
{{4}, {3}, {7}}
},
{3});
SetGraph(&graph, false);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), GetOffset(6));
EXPECT_EQ(GetOffset(6), GetOffset(7));
SetGraph(&graph, true);
Execute(0, graph.nodes().size() - 1);
std::set<std::ptrdiff_t> tensorOffsets;
for (int i = 0; i < 8; i++) {
tensorOffsets.insert(GetOffset(i));
}
EXPECT_EQ(tensorOffsets.size(), 8);
}
TEST_F(ArenaPlannerTest, DebugTensorsInputReuse) {
TestGraph graph({0, 1},
{
{{0, 1}, {2, 3}, {}},
{{2, 3}, {4}, {}, kTfLiteBuiltinMul},
{{4, 2}, {5}, {}, kTfLiteBuiltinSub},
{{5}, {6}, {}},
},
{6});
(*graph.tensors())[4].bytes = 200;
(*graph.tensors())[5].bytes = 200;
SetGraph(&graph, false);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(4), GetOffset(5));
SetGraph(&graph, true);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(4), GetOffset(5));
}
TEST_F(ArenaPlannerTest, SimpleProfilerTest) {
gNumAlloc = 0;
gNumDealloc = 0;
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(gNumAlloc, 1);
EXPECT_EQ(gNumDealloc, 0);
Destroy();
EXPECT_EQ(gNumDealloc, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/arena_planner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/arena_planner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d8c52ca2-a890-4b74-982d-b4eca24afbf8 | cpp | tensorflow/tensorflow | mutable_op_resolver | tensorflow/lite/mutable_op_resolver.cc | tensorflow/lite/mutable_op_resolver_test.cc | #include "tensorflow/lite/mutable_op_resolver.h"
#include <unordered_map>
#include <utility>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/api/op_resolver_internal.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
const TfLiteRegistration* MutableOpResolver::FindOp(tflite::BuiltinOperator op,
int version) const {
auto it = builtins_.find(std::make_pair(op, version));
if (it != builtins_.end()) {
return &it->second;
}
for (const OpResolver* other : other_op_resolvers_) {
const TfLiteRegistration* result = other->FindOp(op, version);
if (result != nullptr) {
return result;
}
}
return nullptr;
}
const TfLiteRegistration* MutableOpResolver::FindOp(const char* op,
int version) const {
auto it = custom_ops_.find(std::make_pair(op, version));
if (it != custom_ops_.end()) {
return &it->second;
}
for (const OpResolver* other : other_op_resolvers_) {
const TfLiteRegistration* result = other->FindOp(op, version);
if (result != nullptr) {
return result;
}
}
return nullptr;
}
void MutableOpResolver::AddBuiltin(tflite::BuiltinOperator op,
const TfLiteRegistration* registration,
int version) {
if (registration == nullptr) {
return;
}
TfLiteRegistration new_registration = *registration;
new_registration.custom_name = nullptr;
new_registration.builtin_code = op;
new_registration.version = version;
auto op_key = std::make_pair(op, version);
builtins_[op_key] = new_registration;
may_directly_contain_user_defined_ops_ = true;
}
void MutableOpResolver::AddBuiltin(tflite::BuiltinOperator op,
const TfLiteRegistration* registration,
int min_version, int max_version) {
for (int version = min_version; version <= max_version; ++version) {
AddBuiltin(op, registration, version);
}
}
void MutableOpResolver::AddCustom(const char* name,
const TfLiteRegistration* registration,
int version) {
TfLiteRegistration new_registration = *registration;
new_registration.builtin_code = BuiltinOperator_CUSTOM;
new_registration.custom_name = name;
new_registration.version = version;
auto op_key = std::make_pair(name, version);
custom_ops_[op_key] = new_registration;
may_directly_contain_user_defined_ops_ = true;
}
void MutableOpResolver::AddCustom(const char* name,
const TfLiteRegistration* registration,
int min_version, int max_version) {
for (int version = min_version; version <= max_version; ++version) {
AddCustom(name, registration, version);
}
}
void MutableOpResolver::AddAll(const MutableOpResolver& other) {
for (const auto& other_builtin : other.builtins_) {
builtins_[other_builtin.first] = other_builtin.second;
}
for (const auto& other_custom_op : other.custom_ops_) {
custom_ops_[other_custom_op.first] = other_custom_op.second;
}
other_op_resolvers_.insert(other_op_resolvers_.begin(),
other.other_op_resolvers_.begin(),
other.other_op_resolvers_.end());
}
void MutableOpResolver::ChainOpResolver(const OpResolver* other) {
other_op_resolvers_.push_back(other);
}
bool MutableOpResolver::MayContainUserDefinedOps() const {
if (may_directly_contain_user_defined_ops_) {
return true;
}
for (const OpResolver* other : other_op_resolvers_) {
if (OpResolverInternal::MayContainUserDefinedOps(*other)) {
return true;
}
}
return false;
}
} | #include "tensorflow/lite/mutable_op_resolver.h"
#include <stddef.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
TfLiteStatus DummyInvoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteRegistration* GetDummyRegistration() {
static TfLiteRegistration registration = {
.init = nullptr,
.free = nullptr,
.prepare = nullptr,
.invoke = DummyInvoke,
};
return ®istration;
}
TfLiteStatus Dummy2Invoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus Dummy2Prepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
void* Dummy2Init(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void Dummy2free(TfLiteContext* context, void* buffer) {}
TfLiteRegistration* GetDummy2Registration() {
static TfLiteRegistration registration = {
.init = Dummy2Init,
.free = Dummy2free,
.prepare = Dummy2Prepare,
.invoke = Dummy2Invoke,
};
return ®istration;
}
TEST(MutableOpResolverTest, FindOp) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_ADD);
EXPECT_EQ(found_registration->version, 1);
}
TEST(MutableOpResolverTest, FindMissingOp) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, RegisterOpWithSingleVersion) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration(), 2);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, RegisterOpWithMultipleVersions) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 3);
}
TEST(MutableOpResolverTest, FindOpWithUnsupportedVersions) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 4);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, FindCustomOp) {
MutableOpResolver resolver;
resolver.AddCustom("AWESOME", GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("AWESOME", 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 1);
}
TEST(MutableOpResolverTest, FindCustomName) {
MutableOpResolver resolver;
TfLiteRegistration* reg = GetDummyRegistration();
reg->custom_name = "UPDATED";
resolver.AddCustom(reg->custom_name, reg);
const TfLiteRegistration* found_registration =
resolver.FindOp(reg->custom_name, 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_EQ(found_registration->invoke, GetDummyRegistration()->invoke);
EXPECT_EQ(found_registration->version, 1);
EXPECT_EQ(found_registration->custom_name, "UPDATED");
}
TEST(MutableOpResolverTest, FindBuiltinName) {
MutableOpResolver resolver1;
TfLiteRegistration* reg = GetDummy2Registration();
reg->custom_name = "UPDATED";
resolver1.AddBuiltin(BuiltinOperator_ADD, reg);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->prepare,
GetDummy2Registration()->prepare);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->init,
GetDummy2Registration()->init);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->free,
GetDummy2Registration()->free);
EXPECT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->custom_name, nullptr);
}
TEST(MutableOpResolverTest, FindMissingCustomOp) {
MutableOpResolver resolver;
resolver.AddCustom("AWESOME", GetDummyRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp("EXCELLENT", 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, FindCustomOpWithUnsupportedVersion) {
MutableOpResolver resolver;
resolver.AddCustom("AWESOME", GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("AWESOME", 2);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, AddAll) {
MutableOpResolver resolver1;
resolver1.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver1.AddBuiltin(BuiltinOperator_MUL, GetDummy2Registration());
MutableOpResolver resolver2;
resolver2.AddBuiltin(BuiltinOperator_SUB, GetDummyRegistration());
resolver2.AddBuiltin(BuiltinOperator_ADD, GetDummy2Registration());
resolver1.AddAll(resolver2);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_MUL, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummyRegistration()->invoke);
}
class ChainingMutableOpResolver : public MutableOpResolver {
public:
using MutableOpResolver::ChainOpResolver;
};
TEST(MutableOpResolverTest, ChainOpResolver) {
ChainingMutableOpResolver resolver1;
resolver1.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver1.AddBuiltin(BuiltinOperator_MUL, GetDummy2Registration());
MutableOpResolver resolver2;
resolver2.AddBuiltin(BuiltinOperator_SUB, GetDummyRegistration());
resolver2.AddBuiltin(BuiltinOperator_ADD, GetDummy2Registration());
resolver1.ChainOpResolver(&resolver2);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_MUL, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, CopyConstructChainedOpResolver) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver.AddBuiltin(BuiltinOperator_SUB, GetDummy2Registration());
resolver.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver);
MutableOpResolver resolver3(resolver2);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_MUL, 1), nullptr);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp("NotMyCustom", 1), nullptr);
}
TEST(MutableOpResolverTest, AssignChainedOpResolver) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver.AddBuiltin(BuiltinOperator_SUB, GetDummy2Registration());
resolver.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver);
MutableOpResolver resolver3;
resolver3 = resolver2;
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_MUL, 1), nullptr);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp("NotMyCustom", 1), nullptr);
}
TEST(MutableOpResolverTest, AddAllChainedOpResolver) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver.AddBuiltin(BuiltinOperator_SUB, GetDummy2Registration());
resolver.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver);
MutableOpResolver resolver3;
resolver3.AddAll(resolver2);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_MUL, 1), nullptr);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp("NotMyCustom", 1), nullptr);
}
TEST(MutableOpResolverTest, ChainOpResolverCustomOpPrecedence) {
MutableOpResolver resolver1;
resolver1.AddCustom("MyCustom", GetDummyRegistration());
MutableOpResolver resolver2;
resolver2.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver3;
resolver3.ChainOpResolver(&resolver1);
resolver3.ChainOpResolver(&resolver2);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, ChainOpResolverBuiltinOpPrecedence) {
MutableOpResolver resolver1;
resolver1.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
MutableOpResolver resolver2;
resolver2.AddBuiltin(BuiltinOperator_ADD, GetDummy2Registration());
ChainingMutableOpResolver resolver3;
resolver3.ChainOpResolver(&resolver1);
resolver3.ChainOpResolver(&resolver2);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, ChainOpResolverAddVersusChainPrecedence) {
MutableOpResolver resolver1;
resolver1.AddCustom("MyCustom", GetDummyRegistration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver1);
MutableOpResolver resolver3;
resolver3.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver4;
resolver4.ChainOpResolver(&resolver2);
resolver4.ChainOpResolver(&resolver3);
ASSERT_EQ(resolver4.FindOp("MyCustom", 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, AddAllAddVersusChainPrecedence) {
MutableOpResolver resolver1;
resolver1.AddCustom("MyCustom", GetDummyRegistration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver1);
MutableOpResolver resolver3;
resolver3.AddCustom("MyCustom", GetDummy2Registration());
MutableOpResolver resolver4;
resolver4.AddAll(resolver2);
resolver4.AddAll(resolver3);
ASSERT_EQ(resolver4.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/mutable_op_resolver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/mutable_op_resolver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6068034-ee18-406b-bba3-544be2c8deaf | cpp | tensorflow/tensorflow | optional_debug_tools | tensorflow/lite/optional_debug_tools.cc | tensorflow/lite/optional_debug_tools_test.cc | #include "tensorflow/lite/optional_debug_tools.h"
#include <cassert>
#include <cinttypes>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <functional>
#include <limits>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
const char* AllocTypeName(TfLiteAllocationType type) {
switch (type) {
case kTfLiteMemNone:
return "kTfLiteMemNone";
case kTfLiteMmapRo:
return "kTfLiteMmapRo";
case kTfLiteDynamic:
return "kTfLiteDynamic";
case kTfLiteArenaRw:
return "kTfLiteArenaRw";
case kTfLiteArenaRwPersistent:
return "kTfLiteArenaRwPersistent";
case kTfLitePersistentRo:
return "kTfLitePersistentRo";
case kTfLiteCustom:
return "kTfLiteCustom";
case kTfLiteVariantObject:
return "kTfLiteVariantObject";
}
return "(invalid)";
}
SubgraphDelegationMetadata GetNodeDelegationMetadata(const Subgraph& subgraph) {
SubgraphDelegationMetadata metadata;
metadata.is_node_delegated.resize(subgraph.nodes_size());
metadata.replaced_by_node.resize(subgraph.nodes_size());
metadata.has_delegate_applied = false;
for (size_t node_index = 0; node_index < subgraph.nodes_size();
node_index++) {
metadata.is_node_delegated[node_index] = false;
metadata.replaced_by_node[node_index] = -1;
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(static_cast<int>(node_index));
const TfLiteNode& node = node_and_reg->first;
auto* const delegate = node.delegate;
if (delegate != nullptr) {
metadata.has_delegate_applied = true;
auto* params = static_cast<TfLiteDelegateParams*>(node.builtin_data);
for (int nid : TfLiteIntArrayView(params->nodes_to_replace)) {
metadata.is_node_delegated[nid] = true;
metadata.replaced_by_node[nid] = node_index;
}
}
}
return metadata;
}
namespace {
void PrintIntVector(const std::vector<int>& v,
bool collapse_consecutives = true,
bool add_newline = false);
class MemoryArenaInfo {
public:
explicit MemoryArenaInfo(TfLiteAllocationType type)
: allocation_type_(type) {}
void Update(size_t tensor_index, const TfLiteTensor& tensor) {
if (tensor.allocation_type != allocation_type_) return;
if (tensor.data.data == nullptr) return;
if (tensor.bytes > max_tensor_mem_bytes_) {
max_tensor_mem_bytes_ = tensor.bytes;
max_tensor_id_ = tensor_index;
}
size_t current_start_addr = reinterpret_cast<size_t>(tensor.data.data);
size_t current_end_addr = current_start_addr + tensor.bytes;
if (current_start_addr < min_tensor_start_addr_) {
min_tensor_start_addr_ = current_start_addr;
}
if (current_end_addr > max_tensor_end_addr_) {
max_tensor_end_addr_ = current_end_addr;
}
TensorAllocInfo info;
info.tensor_id = tensor_index;
info.start_addr = current_start_addr;
info.bytes = tensor.bytes;
const auto result = alloc_info_.insert(info);
assert(result.second);
(void)result;
}
size_t GetArenaStartingAddress() const { return min_tensor_start_addr_; }
void Print() const {
printf("%s Info: ", AllocTypeName(allocation_type_));
if (max_tensor_end_addr_ == 0) {
printf("not holding any allocation.\n");
return;
}
printf("\nTensor %zu has the max size %zu bytes (%.3f MB).\n",
max_tensor_id_, max_tensor_mem_bytes_,
static_cast<float>(max_tensor_mem_bytes_) / (1 << 20));
const size_t arena_size = max_tensor_end_addr_ - min_tensor_start_addr_;
printf(
"This memory arena is estimated as[0x%zx, 0x%zx), taking %zu bytes "
"(%.3f MB).\n",
max_tensor_end_addr_, min_tensor_start_addr_, arena_size,
static_cast<float>(arena_size) / (1 << 20));
std::vector<const TensorAllocInfo*> arena_increase_trace;
size_t last_end_addr = 0;
for (const auto& info : alloc_info_) {
if (info.start_addr >= last_end_addr) {
arena_increase_trace.emplace_back(&info);
last_end_addr = info.start_addr + info.bytes;
}
}
printf(
"One possible set of tensors that have non-overlapping memory spaces "
"with each other, and they take up the whole arena:\n");
printf("Tensor ");
for (int i = 0; i < arena_increase_trace.size() - 1; ++i) {
printf("%zu -> ", arena_increase_trace[i]->tensor_id);
}
printf("%zu.\n", arena_increase_trace.back()->tensor_id);
}
private:
struct TensorAllocInfo {
size_t tensor_id;
size_t start_addr;
size_t bytes;
};
struct TensorAllocInfoCompare {
bool operator()(const TensorAllocInfo& lhs,
const TensorAllocInfo& rhs) const {
if (lhs.start_addr < rhs.start_addr) return true;
if (lhs.start_addr == rhs.start_addr) {
if (lhs.bytes > rhs.bytes) return true;
if (lhs.bytes == rhs.bytes) return lhs.tensor_id < rhs.tensor_id;
return false;
}
return false;
}
};
const TfLiteAllocationType allocation_type_;
size_t max_tensor_mem_bytes_ = 0;
size_t max_tensor_id_ = -1;
size_t min_tensor_start_addr_ = std::numeric_limits<size_t>::max();
size_t max_tensor_end_addr_ = 0;
std::set<TensorAllocInfo, TensorAllocInfoCompare> alloc_info_;
};
class DynamicMemoryInfo {
public:
void Update(size_t tensor_index, const TfLiteTensor& tensor) {
if (tensor.allocation_type != kTfLiteDynamic) return;
if (tensor.data.data == nullptr) return;
if (tensor.bytes > max_tensor_mem_bytes_) {
max_tensor_mem_bytes_ = tensor.bytes;
max_tensor_ids_.clear();
max_tensor_ids_.push_back(tensor_index);
} else if (tensor.bytes == max_tensor_mem_bytes_) {
max_tensor_ids_.push_back(static_cast<int>(tensor_index));
}
total_mem_bytes_ += tensor.bytes;
num_total_tensors_++;
}
void Print() const {
printf("kTfLiteDynamic Info: ");
if (total_mem_bytes_ == 0) {
printf("not holding any allocation.\n");
return;
}
printf("\n%zu Tensors ", max_tensor_ids_.size());
PrintIntVector(max_tensor_ids_, false);
printf(" have the max size %zu bytes (%.3f MB).\n", max_tensor_mem_bytes_,
static_cast<float>(max_tensor_mem_bytes_) / (1 << 20));
printf("There are %d dynamic tensors, taking %zu bytes (%.3f MB).\n",
num_total_tensors_, total_mem_bytes_,
static_cast<float>(total_mem_bytes_) / (1 << 20));
}
private:
size_t max_tensor_mem_bytes_ = 0;
std::vector<int> max_tensor_ids_;
size_t total_mem_bytes_ = 0;
int num_total_tensors_ = 0;
};
class ModelTensorMemoryInfo {
public:
ModelTensorMemoryInfo()
: rw_info_(kTfLiteArenaRw),
rw_persistent_info_(kTfLiteArenaRwPersistent),
mmap_info_(kTfLiteMmapRo) {}
void Update(size_t tensor_index, const TfLiteTensor& tensor) {
rw_info_.Update(tensor_index, tensor);
rw_persistent_info_.Update(tensor_index, tensor);
mmap_info_.Update(tensor_index, tensor);
dynamic_info_.Update(tensor_index, tensor);
}
int64_t GetOffsetFromArenaStart(const TfLiteTensor& tensor) const {
if (tensor.data.data == nullptr) return -1;
size_t tensor_address = reinterpret_cast<size_t>(tensor.data.data);
if (tensor.allocation_type == kTfLiteArenaRw) {
return static_cast<int64_t>(tensor_address -
rw_info_.GetArenaStartingAddress());
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
return static_cast<int64_t>(
tensor_address - rw_persistent_info_.GetArenaStartingAddress());
}
if (tensor.allocation_type == kTfLiteMmapRo) {
return static_cast<int64_t>(tensor_address -
mmap_info_.GetArenaStartingAddress());
}
return -1;
}
void Print() const {
printf("\n");
rw_info_.Print();
printf("\n");
rw_persistent_info_.Print();
printf("\n");
mmap_info_.Print();
printf("\n");
dynamic_info_.Print();
printf("\n");
}
private:
MemoryArenaInfo rw_info_;
MemoryArenaInfo rw_persistent_info_;
MemoryArenaInfo mmap_info_;
DynamicMemoryInfo dynamic_info_;
};
template <typename T>
void PrintTotalBytesOfTensors(const Subgraph& subgraph, const T& tensor_ids,
const std::string& prefix = " -> ") {
size_t total = 0;
for (const auto id : tensor_ids) {
const TfLiteTensor* tensor = subgraph.tensor(id);
if (tensor == nullptr) continue;
total += tensor->bytes;
}
printf("%s%zuB (%.2fMB)\n", prefix.c_str(), total,
static_cast<float>(total) / (1 << 20));
}
void PrintIntVector(const std::vector<int>& v, bool collapse_consecutives,
bool add_newline) {
if (v.empty()) {
printf("(null)");
if (add_newline) {
printf("\n");
}
return;
}
int range_start = v[0];
int range_end = range_start;
std::function<void(const char*)> print_range = [&](const char* suffix) {
if (range_end == range_start) {
printf("%d%s", range_start, suffix);
} else if (range_end == range_start + 1) {
printf("%d,%d%s", range_start, range_end, suffix);
} else {
printf("%d-%d%s", range_start, range_end, suffix);
}
};
printf("[");
for (int i = 1; i < v.size(); ++i) {
int current = v[i];
if (collapse_consecutives && (current == range_end + 1)) {
range_end = current;
} else {
print_range(",");
range_start = range_end = current;
}
}
print_range("]");
if (add_newline) {
printf("\n");
}
}
void PrintTfLiteIntVector(const TfLiteIntArray* v,
bool collapse_consecutives = true,
bool add_newline = false) {
std::vector<int> tmp;
if (!v || v->size <= 0) {
PrintIntVector(tmp, collapse_consecutives, add_newline);
return;
}
tmp.insert(tmp.end(), v->data, v->data + v->size);
PrintIntVector(tmp, collapse_consecutives, add_newline);
}
const char* TensorTypeName(TfLiteType type) {
switch (type) {
case kTfLiteNoType:
return "kTfLiteNoType";
case kTfLiteFloat32:
return "kTfLiteFloat32";
case kTfLiteInt32:
return "kTfLiteInt32";
case kTfLiteUInt32:
return "kTfLiteUInt32";
case kTfLiteUInt8:
return "kTfLiteUInt8";
case kTfLiteInt8:
return "kTfLiteInt8";
case kTfLiteInt64:
return "kTfLiteInt64";
case kTfLiteUInt64:
return "kTfLiteUInt64";
case kTfLiteString:
return "kTfLiteString";
case kTfLiteBool:
return "kTfLiteBool";
case kTfLiteUInt16:
return "kTfLiteUInt16";
case kTfLiteInt16:
return "kTfLiteInt16";
case kTfLiteComplex64:
return "kTfLiteComplex64";
case kTfLiteComplex128:
return "kTfLiteComplex128";
case kTfLiteFloat16:
return "kTfLiteFloat16";
case kTfLiteBFloat16:
return "kTfLiteBFloat16";
case kTfLiteFloat64:
return "kTfLiteFloat64";
case kTfLiteResource:
return "kTfLiteResource";
case kTfLiteVariant:
return "kTfLiteVariant";
case kTfLiteInt4:
return "kTfLiteInt4";
}
return "(invalid)";
}
std::string TruncateString(const char* str, int size_limit,
bool truncate_at_end = false) {
if (str == nullptr) return "(nil)";
std::string truncated(str);
const size_t length = truncated.size();
if (length <= size_limit) return truncated;
if (size_limit <= 3) return std::string(size_limit, '.');
if (truncate_at_end) {
truncated.resize(size_limit);
truncated.replace(size_limit - 3, 3, "...");
} else {
truncated.erase(0, length - size_limit);
truncated.replace(0, 3, "...");
}
return truncated;
}
}
void PrintInterpreterState(const Interpreter* interpreter,
const int32_t tensor_name_display_length,
const int32_t tensor_type_display_length,
const int32_t alloc_type_display_length) {
const size_t num_subgraphs = interpreter->subgraphs_size();
printf("Interpreter has %zu subgraphs.\n\n", num_subgraphs);
for (int i = 0; i < num_subgraphs; ++i) {
const Subgraph& subgraph = *(interpreter->subgraph(i));
printf("-----------Subgraph-%d has %zu tensors and %zu nodes------------\n",
i, subgraph.tensors_size(), subgraph.nodes_size());
printf("%zu Inputs: ", subgraph.inputs().size());
PrintIntVector(subgraph.inputs());
PrintTotalBytesOfTensors(subgraph, subgraph.inputs());
printf("%zu Outputs: ", subgraph.outputs().size());
PrintIntVector(subgraph.outputs());
PrintTotalBytesOfTensors(subgraph, subgraph.outputs());
printf("\n");
ModelTensorMemoryInfo tensor_mem_info;
for (size_t tensor_index = 0; tensor_index < subgraph.tensors_size();
tensor_index++) {
const TfLiteTensor* tensor =
subgraph.tensor(static_cast<int>(tensor_index));
tensor_mem_info.Update(tensor_index, *tensor);
}
std::stringstream var_length_fs;
var_length_fs << "%-" << tensor_name_display_length << "s %-"
<< tensor_type_display_length << "s %-"
<< alloc_type_display_length << "s";
printf(
("Tensor %3s " + var_length_fs.str() + " %-18s %-10s %-16s\n").c_str(),
"ID", "Name", "Type", "AllocType", "Size (Bytes/MB)", "Shape",
"MemAddr-Offset");
for (size_t tensor_index = 0; tensor_index < subgraph.tensors_size();
tensor_index++) {
const TfLiteTensor* tensor =
subgraph.tensor(static_cast<int>(tensor_index));
printf(("Tensor %3zu " + var_length_fs.str() + " %-8zu / %.2f ").c_str(),
tensor_index,
TruncateString(tensor->name, tensor_name_display_length,
true)
.c_str(),
TruncateString(TensorTypeName(tensor->type),
tensor_type_display_length)
.c_str(),
TruncateString(AllocTypeName(tensor->allocation_type),
alloc_type_display_length)
.c_str(),
tensor->bytes, (static_cast<float>(tensor->bytes) / (1 << 20)));
PrintTfLiteIntVector(tensor->dims, false);
const int64_t start_offset =
tensor_mem_info.GetOffsetFromArenaStart(*tensor);
const int64_t end_offset =
start_offset == -1
? -1
: start_offset + static_cast<int64_t>(tensor->bytes);
printf(" [%" PRId64 ", %" PRId64 ")\n", start_offset, end_offset);
}
tensor_mem_info.Print();
subgraph.DumpMemoryPlannerDebugInfo();
SubgraphDelegationMetadata delegation_metadata =
GetNodeDelegationMetadata(subgraph);
for (size_t node_index = 0; node_index < subgraph.nodes_size();
node_index++) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(static_cast<int>(node_index));
const TfLiteNode& node = node_and_reg->first;
const TfLiteRegistration& reg = node_and_reg->second;
std::string delegated_status;
bool is_node_delegated = false;
TfLiteIntArray empty_int_array;
empty_int_array.size = 0;
if (node.delegate == nullptr) {
if (delegation_metadata.is_node_delegated[node_index]) {
delegated_status = "(delegated by node ";
delegated_status.append(
std::to_string(delegation_metadata.replaced_by_node[node_index]));
delegated_status.append(")");
is_node_delegated = true;
} else {
delegated_status = "(not delegated)";
}
}
if (reg.custom_name != nullptr) {
printf("Node %3zu Operator Custom Name %s %s\n", node_index,
reg.custom_name, delegated_status.c_str());
} else {
printf("Node %3zu Operator Builtin Code %3d %s %s\n", node_index,
reg.builtin_code, EnumNamesBuiltinOperator()[reg.builtin_code],
delegated_status.c_str());
}
printf(" %d Input Tensors:",
node.inputs != nullptr ? node.inputs->size : 0);
if (node.inputs) {
PrintTfLiteIntVector(
node.inputs,
(node.delegate != nullptr));
PrintTotalBytesOfTensors(
subgraph, is_node_delegated ? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.inputs));
}
printf(" %d Output Tensors:",
node.outputs != nullptr ? node.outputs->size : 0);
if (node.outputs) {
PrintTfLiteIntVector(node.outputs);
PrintTotalBytesOfTensors(
subgraph, is_node_delegated ? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.outputs));
}
if (node.intermediates && node.intermediates->size) {
printf(" %d Intermediate Tensors:", node.intermediates->size);
PrintTfLiteIntVector(node.intermediates);
PrintTotalBytesOfTensors(subgraph,
is_node_delegated
? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.intermediates));
}
if (node.temporaries && node.temporaries->size) {
printf(" %d Temporary Tensors:", node.temporaries->size);
PrintTfLiteIntVector(node.temporaries);
PrintTotalBytesOfTensors(
subgraph, is_node_delegated ? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.temporaries));
}
}
printf("\nExecution plan as the list of %zu nodes invoked in-order: ",
subgraph.execution_plan().size());
PrintIntVector(subgraph.execution_plan(), true,
true);
if (delegation_metadata.has_delegate_applied) {
printf("Among these nodes in the execution plan:\n");
for (int node_id : subgraph.execution_plan()) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(node_id);
const TfLiteNode& node = node_and_reg->first;
auto* const delegate = node.delegate;
if (delegate == nullptr) continue;
const char* delegate_name = node_and_reg->second.custom_name;
auto* delegate_params =
static_cast<TfLiteDelegateParams*>(node.builtin_data);
printf(" Node %d is a %s node (%p), which has delegated %d nodes: ",
node_id, delegate_name == nullptr ? "[n/a]" : delegate_name,
delegate, delegate_params->nodes_to_replace->size);
PrintTfLiteIntVector(delegate_params->nodes_to_replace,
true,
true);
}
}
printf("--------------Subgraph-%d dump has completed--------------\n\n", i);
}
printf("--------------Memory Arena Status Start--------------\n");
size_t total_arena_memory_bytes = 0;
size_t total_dynamic_memory_bytes = 0;
size_t total_resource_bytes = 0;
for (int i = 0; i < num_subgraphs; ++i) {
const Subgraph& subgraph = *(interpreter->subgraph(i));
Subgraph::SubgraphAllocInfo alloc_info;
subgraph.GetMemoryAllocInfo(&alloc_info);
total_arena_memory_bytes += alloc_info.arena_size;
total_arena_memory_bytes += alloc_info.arena_persist_size;
total_dynamic_memory_bytes += alloc_info.dynamic_size;
if (i == 0) {
total_resource_bytes = alloc_info.resource_size;
}
}
size_t total_memory_bytes = total_arena_memory_bytes +
total_dynamic_memory_bytes + total_resource_bytes;
printf("Total memory usage: %zu bytes (%.3f MB)\n", total_memory_bytes,
static_cast<float>(total_memory_bytes) / (1 << 20));
printf("- Total arena memory usage: %zu bytes (%.3f MB)\n",
total_arena_memory_bytes,
static_cast<float>(total_arena_memory_bytes) / (1 << 20));
printf("- Total dynamic memory usage: %zu bytes (%.3f MB)\n",
total_dynamic_memory_bytes,
static_cast<float>(total_dynamic_memory_bytes) / (1 << 20));
if (total_resource_bytes) {
printf("- Total resource memory usage: %zu bytes (%.3f MB)\n",
total_resource_bytes,
static_cast<float>(total_resource_bytes) / (1 << 20));
}
putchar('\n');
for (int i = 0; i < num_subgraphs; ++i) {
const Subgraph& subgraph = *(interpreter->subgraph(i));
Subgraph::SubgraphAllocInfo alloc_info;
subgraph.GetMemoryAllocInfo(&alloc_info);
if (alloc_info.arena_size) {
printf(
"Subgraph#%-3d %-18s %10zu (%.2f%%)\n", i, "Arena (Normal)",
alloc_info.arena_size,
static_cast<float>(alloc_info.arena_size * 100) / total_memory_bytes);
}
if (alloc_info.arena_persist_size) {
printf("Subgraph#%-3d %-18s %10zu (%.2f%%)\n", i, "Arena (Persistent)",
alloc_info.arena_persist_size,
static_cast<float>(alloc_info.arena_persist_size * 100) /
total_memory_bytes);
}
if (alloc_info.dynamic_size) {
printf("Subgraph#%-3d %-18s %10zu (%.2f%%)\n", i, "Dyanmic Tensors",
alloc_info.dynamic_size,
static_cast<float>(alloc_info.dynamic_size * 100) /
total_memory_bytes);
}
}
printf("--------------Memory Arena Status End--------------\n\n");
}
} | #include "tensorflow/lite/optional_debug_tools.h"
#include <algorithm>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace {
void InitInputTensorData(Interpreter* interpreter) {
ASSERT_EQ(interpreter->inputs().size(), 1);
TfLiteTensor* t = interpreter->input_tensor(0);
ASSERT_EQ(t->type, kTfLiteFloat32);
float* data = static_cast<float*>(t->data.data);
int num_elements = t->bytes / sizeof(float);
std::fill(data, data + num_elements, 1.0f);
}
}
TEST(OptionalDebugTools, PrintInterpreterState) {
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(
InterpreterBuilder(
*model, ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
&interpreter),
kTfLiteOk);
PrintInterpreterState(interpreter.get());
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
PrintInterpreterState(interpreter.get());
InitInputTensorData(interpreter.get());
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
PrintInterpreterState(interpreter.get());
}
TEST(OptionalDebugTools, PrintInterpreterStateWithDelegate) {
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(
InterpreterBuilder(
*model, ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
&interpreter),
kTfLiteOk);
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
ASSERT_EQ(interpreter->ModifyGraphWithDelegate(xnnpack_delegate.get()),
kTfLiteOk);
InitInputTensorData(interpreter.get());
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
PrintInterpreterState(interpreter.get());
}
TEST(OptionalDebugTools, GetNodeDelegationMetadata) {
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(
InterpreterBuilder(
*model, ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
&interpreter),
kTfLiteOk);
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
auto metadata = GetNodeDelegationMetadata(*interpreter->subgraph(0));
EXPECT_FALSE(metadata.has_delegate_applied);
for (int i = 0; i < metadata.is_node_delegated.size(); ++i) {
EXPECT_FALSE(metadata.is_node_delegated[i]);
EXPECT_EQ(metadata.replaced_by_node[i], -1);
}
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
ASSERT_EQ(interpreter->ModifyGraphWithDelegate(xnnpack_delegate.get()),
kTfLiteOk);
auto metadata_with_delegate =
GetNodeDelegationMetadata(*interpreter->subgraph(0));
EXPECT_TRUE(metadata_with_delegate.has_delegate_applied);
EXPECT_EQ(metadata_with_delegate.is_node_delegated[0], true);
EXPECT_EQ(metadata_with_delegate.replaced_by_node[0], 2);
EXPECT_EQ(metadata_with_delegate.is_node_delegated[1], true);
EXPECT_EQ(metadata_with_delegate.replaced_by_node[1], 2);
EXPECT_EQ(metadata_with_delegate.is_node_delegated[2], false);
EXPECT_EQ(metadata_with_delegate.replaced_by_node[2], -1);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/optional_debug_tools.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/optional_debug_tools_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b1dcf762-9ec7-475c-a7a3-29543231774f | cpp | tensorflow/tensorflow | minimal_logging | tensorflow/lite/minimal_logging.cc | tensorflow/lite/minimal_logging_test.cc | #include "tensorflow/lite/minimal_logging.h"
#include <cstdarg>
#include "tensorflow/lite/logger.h"
namespace tflite {
namespace logging_internal {
void MinimalLogger::Log(LogSeverity severity, const char* format, ...) {
va_list args;
va_start(args, format);
LogFormatted(severity, format, args);
va_end(args);
}
const char* MinimalLogger::GetSeverityName(LogSeverity severity) {
switch (severity) {
case TFLITE_LOG_VERBOSE:
return "VERBOSE";
case TFLITE_LOG_INFO:
return "INFO";
case TFLITE_LOG_WARNING:
return "WARNING";
case TFLITE_LOG_ERROR:
return "ERROR";
case TFLITE_LOG_SILENT:
return "SILENT";
}
return "<Unknown severity>";
}
LogSeverity MinimalLogger::GetMinimumLogSeverity() {
return MinimalLogger::minimum_log_severity_;
}
LogSeverity MinimalLogger::SetMinimumLogSeverity(LogSeverity new_severity) {
LogSeverity old_severity = MinimalLogger::minimum_log_severity_;
MinimalLogger::minimum_log_severity_ = new_severity;
return old_severity;
}
}
} | #include "tensorflow/lite/minimal_logging.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/logger.h"
namespace tflite {
TEST(MinimalLogging, Basic) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Foo");
EXPECT_EQ("INFO: Foo\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, BasicFormatted) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Foo %s %s", "Bar", "Baz");
EXPECT_EQ("INFO: Foo Bar Baz\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, Warn) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "One", "");
EXPECT_EQ("WARNING: One\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, Error) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Two");
EXPECT_EQ("ERROR: Two\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, UnknownSeverity) {
testing::internal::CaptureStderr();
LogSeverity default_log_severity = TFLITE_LOG_INFO;
#if defined(__ANDROID__) || !defined(NDEBUG)
default_log_severity = TFLITE_LOG_VERBOSE;
#endif
EXPECT_EQ(tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
static_cast<LogSeverity>(-1)),
default_log_severity);
TFLITE_LOG_PROD(static_cast<LogSeverity>(-1), "Three");
EXPECT_EQ("<Unknown severity>: Three\n",
testing::internal::GetCapturedStderr());
tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
default_log_severity);
}
TEST(MinimalLogging, MinimumSeverity) {
testing::internal::CaptureStderr();
LogSeverity default_log_severity = TFLITE_LOG_INFO;
#if defined(__ANDROID__) || !defined(NDEBUG)
default_log_severity = TFLITE_LOG_VERBOSE;
#endif
EXPECT_EQ(tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
TFLITE_LOG_WARNING),
default_log_severity);
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Foo");
TFLITE_LOG_PROD(default_log_severity, "Bar");
EXPECT_EQ("WARNING: Foo\n", testing::internal::GetCapturedStderr());
tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
default_log_severity);
}
TEST(MinimalLogging, Once) {
testing::internal::CaptureStderr();
for (int i = 0; i < 10; ++i) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO, "Count: %d", i);
}
EXPECT_EQ("INFO: Count: 0\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, Debug) {
testing::internal::CaptureStderr();
TFLITE_LOG(TFLITE_LOG_INFO, "Foo");
TFLITE_LOG(TFLITE_LOG_WARNING, "Bar");
TFLITE_LOG(TFLITE_LOG_ERROR, "Baz");
#ifndef NDEBUG
EXPECT_EQ("INFO: Foo\nWARNING: Bar\nERROR: Baz\n",
testing::internal::GetCapturedStderr());
#else
EXPECT_TRUE(testing::internal::GetCapturedStderr().empty());
#endif
}
TEST(MinimalLogging, DebugOnce) {
testing::internal::CaptureStderr();
for (int i = 0; i < 10; ++i) {
TFLITE_LOG_ONCE(TFLITE_LOG_INFO, "Count: %d", i);
}
#ifndef NDEBUG
EXPECT_EQ("INFO: Count: 0\n", testing::internal::GetCapturedStderr());
#else
EXPECT_TRUE(testing::internal::GetCapturedStderr().empty());
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/minimal_logging.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/minimal_logging_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d0680bc2-f060-431e-b3a4-16242667a3f8 | cpp | tensorflow/tensorflow | simple_memory_arena | tensorflow/lite/simple_memory_arena.cc | tensorflow/lite/simple_memory_arena_test.cc | #include "tensorflow/lite/simple_memory_arena.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <string>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/macros.h"
#ifdef TF_LITE_TENSORFLOW_PROFILER
#include "tensorflow/lite/tensorflow_profiler_logger.h"
#endif
#if defined(__ANDROID__)
#define TF_LITE_HAS_ALIGNED_ALLOC (__ANDROID_API__ >= 28)
#elif defined(__APPLE__)
#define TF_LITE_HAS_ALIGNED_ALLOC 0
#elif defined(_WIN32)
#define TF_LITE_HAS_ALIGNED_ALLOC 0
#elif __cplusplus >= 201703L || __STDC_VERSION__ >= 201112L
#define TF_LITE_HAS_ALIGNED_ALLOC 1
#endif
namespace {
template <typename T>
T AlignTo(size_t alignment, T offset) {
return offset % alignment == 0 ? offset
: offset + (alignment - offset % alignment);
}
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer);
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment);
#if defined(_WIN32)
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment) {
char* pointer = reinterpret_cast<char*>(_aligned_malloc(size, alignment));
char* aligned_ptr = pointer;
return {pointer, aligned_ptr};
}
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer) {
_aligned_free(buffer.pointer);
}
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment) {
char* pointer = reinterpret_cast<char*>(
_aligned_realloc(old_buffer.pointer, new_size, alignment));
char* aligned_ptr = pointer;
return {pointer, aligned_ptr};
}
#else
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment) {
#if TF_LITE_HAS_ALIGNED_ALLOC
const size_t allocation_size = AlignTo(alignment, size + alignment - 1);
char* pointer =
reinterpret_cast<char*>(::aligned_alloc(alignment, allocation_size));
char* aligned_ptr = pointer;
#else
const size_t allocation_size = size + alignment - 1;
char* pointer = reinterpret_cast<char*>(std::malloc(allocation_size));
char* aligned_ptr = reinterpret_cast<char*>(
AlignTo(alignment, reinterpret_cast<std::uintptr_t>(pointer)));
#endif
#if defined(__clang__)
#if __has_feature(memory_sanitizer)
std::memset(pointer, 0, allocation_size);
#endif
#endif
return {pointer, aligned_ptr};
}
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer) {
std::free(buffer.pointer);
}
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment) {
tflite::PointerAlignedPointerPair new_buffer =
AlignedAlloc(new_size, alignment);
if (new_size > 0 && old_size > 0) {
const size_t copy_amount = std::min(new_size, old_size);
std::memcpy(new_buffer.aligned_pointer, old_buffer.aligned_pointer,
copy_amount);
}
AlignedFree(old_buffer);
return new_buffer;
}
#endif
}
namespace tflite {
bool ResizableAlignedBuffer::Resize(size_t new_size) {
if (new_size <= data_size_) {
return false;
}
#ifdef TF_LITE_TENSORFLOW_PROFILER
PauseHeapMonitoring(true);
OnTfLiteArenaAlloc(subgraph_index_, reinterpret_cast<std::uintptr_t>(this),
new_size);
if (data_size_ > 0) {
OnTfLiteArenaDealloc(subgraph_index_,
reinterpret_cast<std::uintptr_t>(this), data_size_);
}
#endif
auto new_buffer = AlignedRealloc(buffer_, data_size_, new_size, alignment_);
bool reallocated = (new_buffer.aligned_pointer != buffer_.aligned_pointer);
buffer_ = new_buffer;
data_size_ = new_size;
#ifdef TF_LITE_TENSORFLOW_PROFILER
PauseHeapMonitoring(false);
#endif
return reallocated;
}
void ResizableAlignedBuffer::Release() {
if (buffer_.pointer == nullptr) {
return;
}
#ifdef TF_LITE_TENSORFLOW_PROFILER
OnTfLiteArenaDealloc(subgraph_index_, reinterpret_cast<std::uintptr_t>(this),
data_size_);
#endif
AlignedFree(buffer_);
buffer_.pointer = nullptr;
buffer_.aligned_pointer = nullptr;
data_size_ = 0;
}
void SimpleMemoryArena::PurgeAfter(int32_t node) {
for (int i = 0; i < active_allocs_.size(); ++i) {
if (active_allocs_[i].first_node > node) {
active_allocs_[i].tensor = -1;
}
}
active_allocs_.erase(
std::remove_if(active_allocs_.begin(), active_allocs_.end(),
[](ArenaAllocWithUsageInterval& alloc) {
return alloc.tensor == -1;
}),
active_allocs_.end());
}
void SimpleMemoryArena::PurgeActiveAllocs(int32_t node) {
for (int i = 0; i < active_allocs_.size(); ++i) {
if (active_allocs_[i].last_node < node) {
active_allocs_[i].tensor = -1;
}
}
active_allocs_.erase(
std::remove_if(active_allocs_.begin(), active_allocs_.end(),
[](ArenaAllocWithUsageInterval& alloc) {
return alloc.tensor == -1;
}),
active_allocs_.end());
}
void SimpleMemoryArena::CalculateActiveAllocs(
const std::vector<ArenaAllocWithUsageInterval>& allocs, int32_t node) {
active_allocs_.clear();
for (int i = 0; i < allocs.size(); ++i) {
if (allocs[i].first_node <= node && allocs[i].last_node >= node) {
active_allocs_.push_back(allocs[i]);
}
}
std::sort(active_allocs_.begin(), active_allocs_.end());
}
void SimpleMemoryArena::ResetAllocs() { active_allocs_.clear(); }
TfLiteStatus SimpleMemoryArena::Allocate(
TfLiteContext* context, size_t alignment, size_t size, int32_t tensor,
int32_t first_node, int32_t last_node,
ArenaAllocWithUsageInterval* new_alloc) {
TF_LITE_ENSURE(context, alignment <= underlying_buffer_.GetAlignment());
new_alloc->tensor = tensor;
new_alloc->first_node = first_node;
new_alloc->last_node = last_node;
new_alloc->size = size;
if (size == 0) {
new_alloc->offset = 0;
return kTfLiteOk;
}
const size_t kOffsetNotAssigned = std::numeric_limits<size_t>::max();
size_t best_offset = kOffsetNotAssigned;
size_t best_offset_fit = kOffsetNotAssigned;
size_t current_offset = 0;
for (const auto& alloc : active_allocs_) {
if (alloc.last_node < first_node || alloc.first_node > last_node) {
continue;
}
size_t aligned_current_offset = AlignTo(alignment, current_offset);
if (aligned_current_offset + size <= alloc.offset &&
alloc.offset - aligned_current_offset < best_offset_fit) {
best_offset = aligned_current_offset;
best_offset_fit = alloc.offset - current_offset;
}
current_offset = std::max(current_offset, alloc.offset + alloc.size);
if (best_offset_fit == 0) {
break;
}
}
if (best_offset == kOffsetNotAssigned) {
best_offset = AlignTo(alignment, current_offset);
}
high_water_mark_ = std::max(high_water_mark_, best_offset + size);
new_alloc->offset = best_offset;
auto insertion_it = std::upper_bound(active_allocs_.begin(),
active_allocs_.end(), *new_alloc);
active_allocs_.insert(insertion_it, *new_alloc);
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::Commit(bool* arena_reallocated) {
*arena_reallocated = underlying_buffer_.Resize(high_water_mark_);
committed_ = true;
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ResolveAlloc(
TfLiteContext* context, const ArenaAllocWithUsageInterval& alloc,
char** output_ptr) {
TF_LITE_ENSURE(context, committed_);
TF_LITE_ENSURE(context, output_ptr != nullptr);
TF_LITE_ENSURE(context,
underlying_buffer_.GetSize() >= (alloc.offset + alloc.size));
if (alloc.size == 0) {
*output_ptr = nullptr;
} else {
*output_ptr = underlying_buffer_.GetPtr() + alloc.offset;
}
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ClearPlan() {
committed_ = false;
high_water_mark_ = 0;
active_allocs_.clear();
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ReleaseBuffer() {
committed_ = false;
underlying_buffer_.Release();
return kTfLiteOk;
}
TFLITE_ATTRIBUTE_WEAK void DumpArenaInfo(
const std::string& name, const std::vector<int>& execution_plan,
size_t arena_size, const std::vector<ArenaAllocWithUsageInterval>& allocs) {
}
void SimpleMemoryArena::DumpDebugInfo(
const std::string& name, const std::vector<int>& execution_plan) const {
tflite::DumpArenaInfo(name, execution_plan, underlying_buffer_.GetSize(),
active_allocs_);
}
} | #include "tensorflow/lite/simple_memory_arena.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
void ReportError(TfLiteContext* context, const char* format, ...) {}
TEST(SimpleMemoryArenaTest, BasicArenaOperations) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[6];
arena.Allocate(&context, 32, 2047, 0, 1, 3, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 2, 5, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2, 3, 6, &allocs[2]);
arena.Allocate(&context, 32, 2047, 3, 5, 6, &allocs[3]);
arena.Allocate(&context, 32, 1023, 4, 4, 6, &allocs[4]);
arena.Allocate(&context, 32, 1023, 5, 6, 6, &allocs[5]);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 2048);
EXPECT_EQ(allocs[2].offset, 4096);
EXPECT_EQ(allocs[3].offset, 0);
EXPECT_EQ(allocs[4].offset, 6144);
EXPECT_EQ(allocs[5].offset, 2048);
}
TEST(SimpleMemoryArenaTest, BasicZeroAlloc) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval alloc;
ASSERT_EQ(arena.Allocate(&context, 32, 0, 0, 1, 2, &alloc), kTfLiteOk);
EXPECT_EQ(alloc.offset, 0);
EXPECT_EQ(alloc.size, 0);
char* resolved_ptr = nullptr;
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
EXPECT_FALSE(reallocated);
EXPECT_EQ(resolved_ptr, nullptr);
}
TEST(SimpleMemoryArenaTest, InterleavedZeroAlloc) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[4];
ASSERT_EQ(arena.Allocate(&context, 32, 2047, 0, 0, 4, &allocs[0]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 0, 1, 1, 2, &allocs[1]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 1023, 2, 1, 2, &allocs[2]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 2047, 3, 3, 4, &allocs[3]), kTfLiteOk);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 0);
EXPECT_EQ(allocs[2].offset, 2048);
EXPECT_EQ(allocs[3].offset, 2048);
}
TEST(SimpleMemoryArenaTest, TestClearPlan) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2, 1, 2, &allocs[2]);
bool reallocated = false;
arena.Commit(&reallocated);
ASSERT_TRUE(reallocated);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 2048);
EXPECT_EQ(allocs[2].offset, 4096);
arena.ClearPlan();
arena.Allocate(&context, 32, 1023, 3, 0, 2, &allocs[3]);
arena.Allocate(&context, 32, 1023, 4, 1, 2, &allocs[4]);
arena.Allocate(&context, 32, 1023, 5, 1, 2, &allocs[5]);
arena.Commit(&reallocated);
ASSERT_FALSE(reallocated);
EXPECT_EQ(allocs[3].offset, 0);
EXPECT_EQ(allocs[4].offset, 1024);
EXPECT_EQ(allocs[5].offset, 2048);
arena.ClearPlan();
arena.Allocate(&context, 32, 4095, 6, 0, 2, &allocs[6]);
arena.Allocate(&context, 32, 4095, 7, 1, 2, &allocs[7]);
arena.Allocate(&context, 32, 4095, 8, 1, 2, &allocs[8]);
arena.Commit(&reallocated);
ASSERT_TRUE(reallocated);
EXPECT_EQ(allocs[6].offset, 0);
EXPECT_EQ(allocs[7].offset, 4096);
EXPECT_EQ(allocs[8].offset, 8192);
}
TEST(SimpleMemoryArenaTest, TestPurgeAllocs) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[5];
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1,
1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2,
2, 3, &allocs[2]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr0 = nullptr;
char* resolved_ptr1 = nullptr;
char* resolved_ptr2 = nullptr;
char* resolved_ptr3 = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
arena.PurgeActiveAllocs(4);
arena.Allocate(&context, 32, 13, 3,
4, 5, &allocs[4]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[4], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[4].offset, 0);
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[0].offset, 0);
}
TEST(SimpleMemoryArenaTest, TestResetAllocs) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1,
1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2,
2, 3, &allocs[2]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr0 = nullptr;
char* resolved_ptr1 = nullptr;
char* resolved_ptr2 = nullptr;
char* resolved_ptr3 = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
arena.Allocate(&context, 32, 13, 0,
0, 3, &allocs[3]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
EXPECT_EQ(resolved_ptr3, resolved_ptr2 + 2048);
arena.ResetAllocs();
arena.Allocate(&context, 32, 13, 0,
0, 2, &allocs[3]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[3].offset, 0);
}
TEST(SimpleMemoryArenaTest, TestClearBuffer) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
ASSERT_EQ(arena.BasePointer(), 0);
ASSERT_NE(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
ASSERT_NE(arena.BasePointer(), 0);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
}
class BufferAndPlanClearingTest : public ::testing::Test,
public ::testing::WithParamInterface<bool> {};
TEST_P(BufferAndPlanClearingTest, TestClearBufferAndClearPlan) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
if (GetParam()) {
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
ASSERT_EQ(arena.ClearPlan(), kTfLiteOk);
} else {
ASSERT_EQ(arena.ClearPlan(), kTfLiteOk);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
}
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
EXPECT_FALSE(reallocated);
char* resolved_ptr = nullptr;
ASSERT_NE(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
}
INSTANTIATE_TEST_SUITE_P(BufferAndPlanClearingTest, BufferAndPlanClearingTest,
::testing::Values(true, false));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/simple_memory_arena.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/simple_memory_arena_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7f27a8a3-8772-4af9-a79e-dda37cf9c8dc | cpp | tensorflow/tensorflow | graph_info | tensorflow/lite/graph_info.cc | tensorflow/lite/graph_info_test.cc | #include "tensorflow/lite/graph_info.h"
#include <algorithm>
#include <vector>
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
template <class T>
void Uniquefy(std::vector<T>* items) {
std::sort(items->begin(), items->end());
items->erase(std::unique(items->begin(), items->end()), items->end());
}
class PartitionGraphIntoIndependentNodeSubsetsImpl {
public:
PartitionGraphIntoIndependentNodeSubsetsImpl(
const GraphInfo* info, const TfLiteIntArray* nodes_to_partition,
std::vector<NodeSubset>* node_subsets, bool greedily,
const ControlEdges& control_edges)
: info_(info),
node_subsets_(node_subsets),
node_type_(info_->num_total_nodes(), NodeSubset::kTfNonPartition),
greedily_(greedily),
control_edges_(control_edges),
num_incoming_control_edges_(info_->num_execution_nodes(), 0) {
for (auto node_index : TfLiteIntArrayView(nodes_to_partition)) {
node_type_[node_index] = NodeSubset::kTfPartition;
}
Uniquefy(&control_edges_);
}
void Partition() {
node_subsets_->clear();
tensor_epochs_.clear();
tensor_epochs_.resize(info_->num_tensors(), kEpochAlwaysReady);
node_epochs_.clear();
node_epochs_.resize(info_->num_execution_nodes(), kEpochNotReady);
num_incoming_control_edges_.clear();
num_incoming_control_edges_.resize(info_->num_execution_nodes(), 0);
for (const auto& edge : control_edges_) {
++num_incoming_control_edges_[edge.second];
}
for (int node_index = 0; node_index < info_->num_execution_nodes();
node_index++) {
const TfLiteNode& node = info_->node(node_index);
for (int output_tensor_index : TfLiteIntArrayView(node.outputs)) {
if (output_tensor_index == kTfLiteOptionalTensor) continue;
tensor_epochs_[output_tensor_index] = kEpochNotReady;
}
}
while (true) {
BuildNodeSubset();
if (node_subsets_->back().nodes.empty()) {
node_subsets_->pop_back();
break;
}
}
for (int output_index : info_->outputs()) {
int output_epoch = tensor_epochs_[output_index];
if (output_epoch == kEpochAlwaysReady) {
continue;
}
NodeSubset& output_subset = (*node_subsets_)[output_epoch];
output_subset.output_tensors.push_back(output_index);
}
for (NodeSubset& node_subset : *node_subsets_) {
Uniquefy(&node_subset.input_tensors);
Uniquefy(&node_subset.output_tensors);
}
}
private:
enum {
kEpochNotReady = -1,
kEpochAlwaysReady = -2
};
bool UpdateNode(int node_index) {
const TfLiteNode& node = info_->node(node_index);
NodeSubset& current_subset = node_subsets_->back();
int current_epoch = node_subsets_->size() - 1;
if (node_epochs_[node_index] != kEpochNotReady) {
return false;
}
for (int input_tensor_index : TfLiteIntArrayView(node.inputs)) {
if (input_tensor_index != kTfLiteOptionalTensor &&
tensor_epochs_[input_tensor_index] == kEpochNotReady) {
return false;
}
}
if (num_incoming_control_edges_[node_index] != 0) {
return false;
}
int original_node_idx = info_->node_index(node_index);
if (current_subset.type == NodeSubset::kTfUnexplored) {
current_subset.type = node_type_[original_node_idx];
}
if (current_subset.type == node_type_[original_node_idx]) {
node_epochs_[node_index] = current_epoch;
current_subset.nodes.push_back(original_node_idx);
for (int output_tensor_index : TfLiteIntArrayView(node.outputs)) {
if (output_tensor_index == kTfLiteOptionalTensor) continue;
tensor_epochs_[output_tensor_index] = current_epoch;
}
for (int input_tensor_index : TfLiteIntArrayView(node.inputs)) {
if (input_tensor_index == kTfLiteOptionalTensor) {
continue;
}
int input_epoch = tensor_epochs_[input_tensor_index];
int node_epoch = current_epoch;
if (input_epoch != node_epoch) {
current_subset.input_tensors.push_back(input_tensor_index);
if (input_epoch >= 0) {
NodeSubset& input_subset = (*node_subsets_)[input_epoch];
input_subset.output_tensors.push_back(input_tensor_index);
}
}
}
for (auto edge_iter =
std::lower_bound(control_edges_.begin(), control_edges_.end(),
ControlEdge(node_index, 0));
edge_iter != control_edges_.end() && edge_iter->first == node_index;
++edge_iter) {
--num_incoming_control_edges_[edge_iter->second];
}
return true;
} else {
return false;
}
}
void BuildNodeSubset() {
node_subsets_->emplace_back(NodeSubset());
while (true) {
bool did_something = false;
for (int node_index = 0; node_index < info_->num_execution_nodes();
node_index++) {
if (UpdateNode(node_index)) {
did_something = true;
} else {
if (did_something && !greedily_) {
return;
}
}
}
if (!did_something) return;
}
}
const GraphInfo* info_;
std::vector<NodeSubset>* node_subsets_;
std::vector<NodeSubset::Type> node_type_;
std::vector<int> tensor_epochs_;
std::vector<int> node_epochs_;
const bool greedily_;
ControlEdges control_edges_;
std::vector<int> num_incoming_control_edges_;
};
}
TfLiteStatus PartitionGraphIntoIndependentNodeSubsets(
const GraphInfo* info, const TfLiteIntArray* nodes_to_partition,
std::vector<NodeSubset>* node_subsets, bool greedily,
const ControlEdges* control_edges) {
ControlEdges my_control_edges;
if (control_edges == nullptr) {
control_edges = &my_control_edges;
if (greedily) {
for (int last_op_with_side_effect = -1, node_index = 0;
node_index < info->num_execution_nodes(); ++node_index) {
const auto& node = info->node(node_index);
if (node.might_have_side_effect) {
if (last_op_with_side_effect != -1) {
my_control_edges.emplace_back(last_op_with_side_effect, node_index);
}
last_op_with_side_effect = node_index;
}
}
}
}
PartitionGraphIntoIndependentNodeSubsetsImpl(
info, nodes_to_partition, node_subsets, greedily, *control_edges)
.Partition();
return kTfLiteOk;
}
} | #include "tensorflow/lite/graph_info.h"
#include <stddef.h>
#include <algorithm>
#include <memory>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
using ::testing::Eq;
using ::testing::ExplainMatchResult;
using ::testing::Pointwise;
using NodeSubsets = std::vector<NodeSubset>;
TfLiteIntArray* ConvertVector(const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
}
class SimpleTestGraph : public GraphInfo {
public:
SimpleTestGraph(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<std::tuple<std::vector<int>, std::vector<int>, bool>>&
nodes,
int node_index_offset = 0)
: inputs_(inputs),
outputs_(outputs),
node_index_offset_(node_index_offset) {
NeedsTensors(inputs_);
NeedsTensors(outputs_);
for (int i = 0; i < node_index_offset; ++i) AddNode({}, {}, false);
for (const auto& [inputs, outputs, might_have_side_effect] : nodes) {
AddNode(inputs, outputs, might_have_side_effect);
}
registrations_.resize(nodes.size());
}
~SimpleTestGraph() override {
for (auto& node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
}
}
size_t num_total_nodes() const override { return nodes_.size(); }
size_t num_execution_nodes() const override {
return nodes_.size() - node_index_offset_;
}
const TfLiteNode& node(size_t index) const override {
return nodes_[index + node_index_offset_];
}
size_t node_index(size_t index) const override {
return index + node_index_offset_;
}
size_t num_tensors() const override { return tensors_.size(); }
const TfLiteRegistration& registration(size_t index) const override {
return registrations_[index + node_index_offset_];
}
TfLiteTensor* tensor(size_t index) override { return &tensors_[index]; }
TfLiteTensor* tensors() override { return tensors_.data(); }
const std::vector<int>& inputs() const override { return inputs_; }
const std::vector<int>& outputs() const override { return outputs_; }
const std::vector<int>& variables() const override { return variables_; }
private:
void AddNode(const std::vector<int>& inputs, const std::vector<int>& outputs,
bool might_have_side_effect) {
NeedsTensors(inputs);
NeedsTensors(outputs);
nodes_.push_back(TfLiteNode());
TfLiteNode& node = nodes_.back();
node.inputs = ConvertVector(inputs);
node.outputs = ConvertVector(outputs);
node.might_have_side_effect = might_have_side_effect;
}
void NeedsTensors(const std::vector<int>& tensors) {
for (const int tensor : tensors)
tensors_.resize(std::max<int>(tensor + 1, tensors_.size()));
}
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
std::vector<TfLiteRegistration> registrations_;
size_t node_index_offset_;
};
void PartitionGraphOrDie(const SimpleTestGraph& graph,
const std::vector<int>& nodes_to_partition,
NodeSubsets* subgraphs, const bool greedily,
const ControlEdges* control_edges) {
TfLiteIntArray* nodes_to_partition_int_array =
ConvertVector(nodes_to_partition);
ASSERT_EQ(PartitionGraphIntoIndependentNodeSubsets(
&graph, nodes_to_partition_int_array, subgraphs, greedily,
control_edges),
kTfLiteOk);
TfLiteIntArrayFree(nodes_to_partition_int_array);
}
NodeSubsets PartitionGraph(const SimpleTestGraph& graph,
const std::vector<int>& nodes_to_partition,
const bool greedily = true,
const ControlEdges* control_edges = nullptr) {
NodeSubsets subgraphs;
PartitionGraphOrDie(graph, nodes_to_partition, &subgraphs, greedily,
control_edges);
return subgraphs;
}
MATCHER(EqNodeSubset, "") {
const NodeSubset& a = std::get<0>(arg);
const NodeSubset& b = std::get<1>(arg);
if (a.type != b.type) {
*result_listener << "mismatched .type ";
return ExplainMatchResult(Eq(b.type), a.type, result_listener);
}
if (a.nodes != b.nodes) {
*result_listener << "mismatched .nodes ";
return ExplainMatchResult(Pointwise(Eq(), b.nodes), a.nodes,
result_listener);
}
if (a.input_tensors != b.input_tensors) {
*result_listener << "mismatched .input_tensors ";
return ExplainMatchResult(Pointwise(Eq(), b.input_tensors), a.input_tensors,
result_listener);
}
if (a.output_tensors != b.output_tensors) {
*result_listener << "mismatched .output_tensors ";
return ExplainMatchResult(Pointwise(Eq(), b.output_tensors),
a.output_tensors, result_listener);
}
return true;
}
TEST(PartitionTest, Nodes0PartitionNodes0) {
EXPECT_THAT(PartitionGraph({
{},
{},
{},
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({})));
}
TEST(PartitionTest, Nodes0PartitionNodes0Tensors1) {
EXPECT_THAT(PartitionGraph({
{0},
{0},
{},
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({})));
}
TEST(PartitionTest, Nodes1PartitionNodes0) {
EXPECT_THAT(
PartitionGraph({
{0},
{1},
{
{{0}, {1}, false},
},
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{0},
{0},
{1},
},
})));
}
TEST(PartitionTest, Nodes1PartitionNodes0_WithOffset) {
constexpr int node_index_offset = 17;
EXPECT_THAT(
PartitionGraph({
{0},
{1},
{
{{0}, {1}, false},
},
node_index_offset,
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{node_index_offset},
{0},
{1},
},
})));
}
TEST(PartitionTest, Nodes1PartitionNodes0Inputs0) {
EXPECT_THAT(
PartitionGraph({
{},
{0},
{
{{}, {0}, false},
},
},
{0}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{},
{0},
},
})));
}
TEST(PartitionTest, Nodes1PartitionNodes1) {
EXPECT_THAT(
PartitionGraph({
{0},
{1},
{
{{0}, {1}, false},
},
},
{0}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{0},
{1},
},
})));
}
TEST(PartitionTest, Nodes2PartitionNodes1) {
EXPECT_THAT(
PartitionGraph({
{0},
{2},
{
{{0}, {1}, false},
{{1}, {2}, false},
},
},
{1}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{0},
{0},
{1},
},
{
NodeSubset::kTfPartition,
{1},
{1},
{2},
},
})));
}
TEST(PartitionTest, Nodes2PartitionNodes1_WithOffset) {
constexpr int node_index_offset = 17;
EXPECT_THAT(
PartitionGraph({{0},
{2},
{
{{0}, {1}, false},
{{1}, {2}, false},
},
node_index_offset},
{node_index_offset + 1}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{node_index_offset + 0},
{0},
{1},
},
{
NodeSubset::kTfPartition,
{node_index_offset + 1},
{1},
{2},
},
})));
}
TEST(PartitionTest, Nodes2PartitionNodes2) {
EXPECT_THAT(
PartitionGraph({
{0},
{2},
{
{{0}, {1}, false},
{{1}, {2}, false},
},
},
{0, 1}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{2},
},
})));
}
TEST(PartitionTest, Nodes3PartitionNodes2) {
EXPECT_THAT(
PartitionGraph({
{0},
{3},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{1, 2}, {3}, false},
},
},
{0, 2}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{0},
{1},
},
{
NodeSubset::kTfNonPartition,
{1},
{1},
{2},
},
{
NodeSubset::kTfPartition,
{2},
{1, 2},
{3},
},
})));
}
TEST(PartitionTest, Nodes3PartitionNodes2Greedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{2, 3},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{1}, {3}, false},
},
},
{0, 2}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 2},
{0},
{1, 3},
},
{
NodeSubset::kTfNonPartition,
{1},
{1},
{2},
},
})));
}
TEST(PartitionTest, Nodes3PartitionNodes2ClusteredNonGreedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{2, 3},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{1}, {3}, false},
},
},
{0, 2},
false),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{0},
{1},
},
{
NodeSubset::kTfNonPartition,
{1},
{1},
{2},
},
{
NodeSubset::kTfPartition,
{2},
{1},
{3},
},
})));
}
TEST(PartitionTest, Nodes4PartitionNodes3_WithControlDependency) {
EXPECT_THAT(
PartitionGraph({
{0},
{4},
{
{{0}, {1}, true},
{{1}, {2}, true},
{{2}, {3}, false},
{{1, 3}, {}, true},
{{1}, {4}, true},
},
},
{0, 1, 3, 4}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{1, 2},
},
{
NodeSubset::kTfNonPartition,
{2},
{2},
{3},
},
{
NodeSubset::kTfPartition,
{3, 4},
{1, 3},
{4},
},
})));
}
TEST(PartitionTest, Nodes4PartitionNodes3_WithExternalControlDependency) {
const ControlEdges control_edges = {
{0, 1},
{1, 3},
{3, 4},
};
EXPECT_THAT(
PartitionGraph({
{0},
{4},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{2}, {3}, false},
{{1, 3}, {}, false},
{{1}, {4}, false},
},
},
{0, 1, 3, 4},
true, &control_edges),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{1, 2},
},
{
NodeSubset::kTfNonPartition,
{2},
{2},
{3},
},
{
NodeSubset::kTfPartition,
{3, 4},
{1, 3},
{4},
},
})));
}
TEST(PartitionTest, ComplexGreedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{4, 7},
{
{{0}, {1}, false},
{{1}, {2, 5}, false},
{{2}, {3}, false},
{{3}, {4}, false},
{{5}, {6}, false},
{{6}, {7}, false},
},
},
{0, 1, 4, 5}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1, 4, 5},
{0},
{2, 7},
},
{
NodeSubset::kTfNonPartition,
{2, 3},
{2},
{4},
},
})));
}
TEST(PartitionTest, ComplexNonGreedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{4, 7},
{
{{0}, {1}, false},
{{1}, {2, 5}, false},
{{2}, {3}, false},
{{3}, {4}, false},
{{5}, {6}, false},
{{6}, {7}, false},
},
},
{0, 1, 4, 5},
false),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{2, 5},
},
{
NodeSubset::kTfNonPartition,
{2, 3},
{2},
{4},
},
{
NodeSubset::kTfPartition,
{4, 5},
{5},
{7},
},
})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/graph_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/graph_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d018e54-5730-4a78-9799-48cd0f7ff152 | cpp | tensorflow/tensorflow | gpu_plugin | tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.cc | tensorflow/lite/core/acceleration/configuration/c/gpu_plugin_test.cc | #include "tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/acceleration/configuration/gpu_plugin.h"
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/core/c/common.h"
#if TFLITE_SUPPORTS_GPU_DELEGATE
#include "tensorflow/lite/delegates/gpu/delegate.h"
#elif defined(REAL_IPHONE_DEVICE)
#include "tensorflow/lite/delegates/gpu/metal_delegate.h"
#endif
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
tflite::delegates::GpuPlugin gpu_plugin(*tflite_settings);
#if TFLITE_SUPPORTS_GPU_DELEGATE
return TfLiteGpuDelegateV2Create(&gpu_plugin.Options());
#elif defined(REAL_IPHONE_DEVICE)
return TFLGpuDelegateCreate(&gpu_plugin.Options());
#else
return nullptr;
#endif
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
#if TFLITE_SUPPORTS_GPU_DELEGATE
TfLiteGpuDelegateV2Delete(delegate);
#elif defined(REAL_IPHONE_DEVICE)
TFLGpuDelegateDelete(delegate);
#endif
}
static int DelegateErrno(TfLiteDelegate* from_delegate) { return 0; }
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteGpuDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
class GpuTest : public testing::Test {
public:
void SetUp() override {
GPUSettingsBuilder gpu_settings_builder(flatbuffer_builder_);
flatbuffers::Offset<GPUSettings> gpu_settings =
gpu_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_gpu_settings(gpu_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~GpuTest() override {}
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
TEST_F(GpuTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate = TfLiteGpuDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteGpuDelegatePluginCApi()->destroy(delegate);
}
TEST_F(GpuTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate = TfLiteGpuDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteGpuDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteGpuDelegatePluginCApi()->destroy(delegate);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/gpu_plugin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bae5383e-93f1-41e3-961d-207c7d508cb7 | cpp | tensorflow/tensorflow | flatbuffer_to_proto | tensorflow/lite/acceleration/configuration/flatbuffer_to_proto.cc | tensorflow/lite/acceleration/configuration/flatbuffer_to_proto_test.cc | #include "tensorflow/lite/acceleration/configuration/flatbuffer_to_proto.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace {
proto::ExecutionPreference ConvertExecutionPreference(
ExecutionPreference preference) {
switch (preference) {
case ExecutionPreference_ANY:
return proto::ExecutionPreference::ANY;
case ExecutionPreference_LOW_LATENCY:
return proto::ExecutionPreference::LOW_LATENCY;
case ExecutionPreference_LOW_POWER:
return proto::ExecutionPreference::LOW_POWER;
case ExecutionPreference_FORCE_CPU:
return proto::ExecutionPreference::FORCE_CPU;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for ExecutionPreference: %d", preference);
return proto::ExecutionPreference::ANY;
}
proto::Delegate ConvertDelegate(Delegate delegate) {
switch (delegate) {
case Delegate_NONE:
return proto::Delegate::NONE;
case Delegate_NNAPI:
return proto::Delegate::NNAPI;
case Delegate_GPU:
return proto::Delegate::GPU;
case Delegate_HEXAGON:
return proto::Delegate::HEXAGON;
case Delegate_XNNPACK:
return proto::Delegate::XNNPACK;
case Delegate_EDGETPU:
return proto::Delegate::EDGETPU;
case Delegate_EDGETPU_CORAL:
return proto::Delegate::EDGETPU_CORAL;
case Delegate_CORE_ML:
return proto::Delegate::CORE_ML;
case Delegate_ARMNN:
return proto::Delegate::ARMNN;
case Delegate_MTK_NEURON:
return proto::Delegate::MTK_NEURON;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for Delegate: %d",
delegate);
return proto::Delegate::NONE;
}
proto::NNAPIExecutionPreference ConvertNNAPIExecutionPreference(
NNAPIExecutionPreference preference) {
switch (preference) {
case NNAPIExecutionPreference_UNDEFINED:
return proto::NNAPIExecutionPreference::UNDEFINED;
case NNAPIExecutionPreference_NNAPI_LOW_POWER:
return proto::NNAPIExecutionPreference::NNAPI_LOW_POWER;
case NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER:
return proto::NNAPIExecutionPreference::NNAPI_FAST_SINGLE_ANSWER;
case NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED:
return proto::NNAPIExecutionPreference::NNAPI_SUSTAINED_SPEED;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPreference: %d",
preference);
return proto::NNAPIExecutionPreference::UNDEFINED;
}
proto::NNAPIExecutionPriority ConvertNNAPIExecutionPriority(
NNAPIExecutionPriority priority) {
switch (priority) {
case NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED:
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED;
case NNAPIExecutionPriority_NNAPI_PRIORITY_LOW:
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_LOW;
case NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM:
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_MEDIUM;
case NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH:
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_HIGH;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPriority: %d", priority);
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED;
}
proto::GPUBackend ConvertGPUBackend(GPUBackend backend) {
switch (backend) {
case GPUBackend_UNSET:
return proto::GPUBackend::UNSET;
case GPUBackend_OPENCL:
return proto::GPUBackend::OPENCL;
case GPUBackend_OPENGL:
return proto::GPUBackend::OPENGL;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for GPUBackend: %d",
backend);
return proto::GPUBackend::UNSET;
}
proto::GPUInferenceUsage ConvertGPUInferenceUsage(
GPUInferenceUsage preference) {
switch (preference) {
case GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER:
return proto::GPUInferenceUsage::
GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
case GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED:
return proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for GPUInferenceUsage: %d", preference);
return proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
}
proto::GPUInferencePriority ConvertGPUInferencePriority(
GPUInferencePriority priority) {
switch (priority) {
case GPUInferencePriority_GPU_PRIORITY_AUTO:
return proto::GPUInferencePriority::GPU_PRIORITY_AUTO;
case GPUInferencePriority_GPU_PRIORITY_MAX_PRECISION:
return proto::GPUInferencePriority::GPU_PRIORITY_MAX_PRECISION;
case GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY:
return proto::GPUInferencePriority::GPU_PRIORITY_MIN_LATENCY;
case GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE:
return proto::GPUInferencePriority::GPU_PRIORITY_MIN_MEMORY_USAGE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for GPUInferencePriority: %d", priority);
return proto::GPUInferencePriority::GPU_PRIORITY_AUTO;
}
proto::EdgeTpuPowerState ConvertEdgeTpuPowerState(EdgeTpuPowerState state) {
switch (state) {
case EdgeTpuPowerState_UNDEFINED_POWERSTATE:
return proto::EdgeTpuPowerState::UNDEFINED_POWERSTATE;
case EdgeTpuPowerState_TPU_CORE_OFF:
return proto::EdgeTpuPowerState::TPU_CORE_OFF;
case EdgeTpuPowerState_READY:
return proto::EdgeTpuPowerState::READY;
case EdgeTpuPowerState_ACTIVE_MIN_POWER:
return proto::EdgeTpuPowerState::ACTIVE_MIN_POWER;
case EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER:
return proto::EdgeTpuPowerState::ACTIVE_VERY_LOW_POWER;
case EdgeTpuPowerState_ACTIVE_LOW_POWER:
return proto::EdgeTpuPowerState::ACTIVE_LOW_POWER;
case EdgeTpuPowerState_ACTIVE:
return proto::EdgeTpuPowerState::ACTIVE;
case EdgeTpuPowerState_OVER_DRIVE:
return proto::EdgeTpuPowerState::OVER_DRIVE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for EdgeTpuSettings::PowerState: %d",
state);
return proto::EdgeTpuPowerState::UNDEFINED_POWERSTATE;
}
proto::FallbackSettings ConvertFallbackSettings(
const FallbackSettings& settings) {
proto::FallbackSettings proto_settings;
proto_settings.set_allow_automatic_fallback_on_compilation_error(
settings.allow_automatic_fallback_on_compilation_error());
proto_settings.set_allow_automatic_fallback_on_execution_error(
settings.allow_automatic_fallback_on_execution_error());
return proto_settings;
}
proto::NNAPISettings ConvertNNAPISettings(const NNAPISettings& settings) {
proto::NNAPISettings proto_settings;
if (settings.accelerator_name() != nullptr) {
proto_settings.set_accelerator_name(settings.accelerator_name()->str());
}
if (settings.cache_directory() != nullptr) {
proto_settings.set_cache_directory(settings.cache_directory()->str());
}
if (settings.model_token() != nullptr) {
proto_settings.set_model_token(settings.model_token()->str());
}
proto_settings.set_execution_preference(
ConvertNNAPIExecutionPreference(settings.execution_preference()));
proto_settings.set_no_of_nnapi_instances_to_cache(
settings.no_of_nnapi_instances_to_cache());
if (settings.fallback_settings() != nullptr) {
*(proto_settings.mutable_fallback_settings()) =
ConvertFallbackSettings(*settings.fallback_settings());
}
proto_settings.set_allow_nnapi_cpu_on_android_10_plus(
settings.allow_nnapi_cpu_on_android_10_plus());
proto_settings.set_execution_priority(
ConvertNNAPIExecutionPriority(settings.execution_priority()));
proto_settings.set_allow_dynamic_dimensions(
settings.allow_dynamic_dimensions());
proto_settings.set_allow_fp16_precision_for_fp32(
settings.allow_fp16_precision_for_fp32());
proto_settings.set_use_burst_computation(settings.use_burst_computation());
proto_settings.set_support_library_handle(settings.support_library_handle());
return proto_settings;
}
proto::GPUSettings ConvertGPUSettings(const GPUSettings& settings) {
proto::GPUSettings proto_settings;
proto_settings.set_is_precision_loss_allowed(
settings.is_precision_loss_allowed());
proto_settings.set_enable_quantized_inference(
settings.enable_quantized_inference());
proto_settings.set_force_backend(ConvertGPUBackend(settings.force_backend()));
proto_settings.set_inference_priority1(
ConvertGPUInferencePriority(settings.inference_priority1()));
proto_settings.set_inference_priority2(
ConvertGPUInferencePriority(settings.inference_priority2()));
proto_settings.set_inference_priority3(
ConvertGPUInferencePriority(settings.inference_priority3()));
proto_settings.set_inference_preference(
ConvertGPUInferenceUsage(settings.inference_preference()));
if (settings.cache_directory() != nullptr) {
proto_settings.set_cache_directory(settings.cache_directory()->str());
}
if (settings.model_token() != nullptr) {
proto_settings.set_model_token(settings.model_token()->str());
}
return proto_settings;
}
proto::HexagonSettings ConvertHexagonSettings(const HexagonSettings& settings) {
proto::HexagonSettings proto_settings;
proto_settings.set_debug_level(settings.debug_level());
proto_settings.set_powersave_level(settings.powersave_level());
proto_settings.set_print_graph_profile(settings.print_graph_profile());
proto_settings.set_print_graph_debug(settings.print_graph_debug());
return proto_settings;
}
proto::XNNPackSettings ConvertXNNPackSettings(const XNNPackSettings& settings) {
proto::XNNPackSettings proto_settings;
proto_settings.set_num_threads(settings.num_threads());
proto_settings.set_flags(::tflite::proto::XNNPackFlags(settings.flags()));
return proto_settings;
}
proto::CoreMLSettings ConvertCoreMLSettings(const CoreMLSettings& settings) {
proto::CoreMLSettings proto_settings;
switch (settings.enabled_devices()) {
case CoreMLSettings_::EnabledDevices_DEVICES_ALL:
proto_settings.set_enabled_devices(proto::CoreMLSettings::DEVICES_ALL);
break;
case CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE:
proto_settings.set_enabled_devices(
proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE);
break;
default:
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Invalid devices enum: %d",
settings.enabled_devices());
}
proto_settings.set_coreml_version(settings.coreml_version());
proto_settings.set_max_delegated_partitions(
settings.max_delegated_partitions());
proto_settings.set_min_nodes_per_partition(
settings.min_nodes_per_partition());
return proto_settings;
}
proto::CPUSettings ConvertCPUSettings(const CPUSettings& settings) {
proto::CPUSettings proto_settings;
proto_settings.set_num_threads(settings.num_threads());
return proto_settings;
}
proto::EdgeTpuDeviceSpec ConvertEdgeTpuDeviceSpec(
const EdgeTpuDeviceSpec& device_spec) {
proto::EdgeTpuDeviceSpec proto_settings;
if (device_spec.device_paths() != nullptr) {
for (int i = 0; i < device_spec.device_paths()->size(); ++i) {
auto device_path = device_spec.device_paths()->Get(i);
proto_settings.add_device_paths(device_path->str());
}
}
proto_settings.set_platform_type(
static_cast<proto::EdgeTpuDeviceSpec::PlatformType>(
device_spec.platform_type()));
proto_settings.set_num_chips(device_spec.num_chips());
proto_settings.set_chip_family(device_spec.chip_family());
return proto_settings;
}
proto::EdgeTpuSettings ConvertEdgeTpuSettings(const EdgeTpuSettings& settings) {
proto::EdgeTpuSettings proto_settings;
proto_settings.set_inference_power_state(
ConvertEdgeTpuPowerState(settings.inference_power_state()));
proto_settings.set_inference_priority(settings.inference_priority());
if (settings.model_token() != nullptr) {
proto_settings.set_model_token(settings.model_token()->str());
}
if (settings.edgetpu_device_spec() != nullptr) {
*(proto_settings.mutable_edgetpu_device_spec()) =
ConvertEdgeTpuDeviceSpec(*settings.edgetpu_device_spec());
}
proto_settings.set_float_truncation_type(
static_cast<proto::EdgeTpuSettings::FloatTruncationType>(
settings.float_truncation_type()));
auto inactive_powre_configs = settings.inactive_power_configs();
if (inactive_powre_configs != nullptr) {
for (int i = 0; i < inactive_powre_configs->size(); ++i) {
auto config = inactive_powre_configs->Get(i);
auto proto_config = proto_settings.add_inactive_power_configs();
proto_config->set_inactive_power_state(
ConvertEdgeTpuPowerState(config->inactive_power_state()));
proto_config->set_inactive_timeout_us(config->inactive_timeout_us());
}
}
return proto_settings;
}
proto::StableDelegateLoaderSettings ConvertStableDelegateLoaderSettings(
const StableDelegateLoaderSettings& settings) {
proto::StableDelegateLoaderSettings proto_settings;
if (settings.delegate_path() != nullptr) {
proto_settings.set_delegate_path(settings.delegate_path()->str());
}
if (settings.delegate_name() != nullptr) {
proto_settings.set_delegate_name(settings.delegate_name()->str());
}
return proto_settings;
}
proto::CoralSettings ConvertCoralSettings(const CoralSettings& settings) {
proto::CoralSettings proto_settings;
if (settings.device() != nullptr) {
proto_settings.set_device(settings.device()->str());
}
proto_settings.set_performance(
static_cast<proto::CoralSettings::Performance>(settings.performance()));
proto_settings.set_usb_always_dfu(settings.usb_always_dfu());
proto_settings.set_usb_max_bulk_in_queue_length(
settings.usb_max_bulk_in_queue_length());
return proto_settings;
}
proto::GoogleEdgeTpuSettings::Priority ConvertGoogleEdgeTpuPriority(
GoogleEdgeTpuSettings_::Priority priority) {
switch (priority) {
case GoogleEdgeTpuSettings_::Priority_PRIORITY_UNDEFINED:
return proto::GoogleEdgeTpuSettings::PRIORITY_UNDEFINED;
case GoogleEdgeTpuSettings_::Priority_PRIORITY_LOW:
return proto::GoogleEdgeTpuSettings::PRIORITY_LOW;
case GoogleEdgeTpuSettings_::Priority_PRIORITY_MEDIUM:
return proto::GoogleEdgeTpuSettings::PRIORITY_MEDIUM;
case GoogleEdgeTpuSettings_::Priority_PRIORITY_HIGH:
return proto::GoogleEdgeTpuSettings::PRIORITY_HIGH;
}
}
proto::GoogleEdgeTpuSettings::TriState ConvertGoogleEdgeTpuTriState(
GoogleEdgeTpuSettings_::TriState tri_state) {
switch (tri_state) {
case GoogleEdgeTpuSettings_::TriState_TRISTATE_UNDEFINED:
return proto::GoogleEdgeTpuSettings::TRISTATE_UNDEFINED;
case GoogleEdgeTpuSettings_::TriState_TRISTATE_FALSE:
return proto::GoogleEdgeTpuSettings::TRISTATE_FALSE;
case GoogleEdgeTpuSettings_::TriState_TRISTATE_TRUE:
return proto::GoogleEdgeTpuSettings::TRISTATE_TRUE;
}
}
proto::GoogleEdgeTpuSettings ConvertGoogleEdgetpuSettings(
const GoogleEdgeTpuSettings& settings) {
proto::GoogleEdgeTpuSettings proto_settings;
proto_settings.set_log_verbosity(settings.log_verbosity());
proto_settings.set_enable_tracing(settings.enable_tracing());
proto_settings.set_priority(
ConvertGoogleEdgeTpuPriority(settings.priority()));
if (settings.extension_data()) {
proto_settings.set_extension_data(settings.extension_data()->data(),
settings.extension_data()->size());
}
if (settings.model_identifier()) {
proto_settings.set_model_identifier(settings.model_identifier()->str());
}
proto_settings.set_use_async_api(settings.use_async_api());
proto_settings.set_delegate_should_manage_cache_for_inputs(
settings.delegate_should_manage_cache_for_inputs());
proto_settings.set_delegate_should_manage_cache_for_outputs(
settings.delegate_should_manage_cache_for_outputs());
proto_settings.set_prefer_cache_coherency_for_inputs(
ConvertGoogleEdgeTpuTriState(
settings.prefer_cache_coherency_for_inputs()));
proto_settings.set_prefer_cache_coherency_for_outputs(
ConvertGoogleEdgeTpuTriState(
settings.prefer_cache_coherency_for_outputs()));
proto_settings.set_allow_fp16_precision_for_fp32(
settings.allow_fp16_precision_for_fp32());
return proto_settings;
}
proto::CompilationCachingSettings ConvertCompilationCachingSettings(
const CompilationCachingSettings& settings) {
proto::CompilationCachingSettings proto_settings;
if (settings.cache_dir() != nullptr) {
proto_settings.set_cache_dir(settings.cache_dir()->str());
}
if (settings.model_token() != nullptr) {
proto_settings.set_model_token(settings.model_token()->str());
}
return proto_settings;
}
proto::MtkNeuronSettings ConvertMtkNeuronSettings(
const MtkNeuronSettings& settings) {
proto::MtkNeuronSettings proto_settings;
proto_settings.set_execution_preference(
static_cast<proto::MtkNeuronSettings_ExecutionPreference>(
settings.execution_preference()));
proto_settings.set_execution_priority(
static_cast<proto::MtkNeuronSettings_ExecutionPriority>(
settings.execution_priority()));
auto optimization_hints = settings.optimization_hints();
if (optimization_hints != nullptr) {
for (auto hint : *optimization_hints) {
proto_settings.add_optimization_hints(
static_cast<proto::MtkNeuronSettings_OptimizationHint>(hint));
}
}
proto_settings.set_operation_check_mode(
static_cast<proto::MtkNeuronSettings_OperationCheckMode>(
settings.operation_check_mode()));
proto_settings.set_allow_fp16_precision_for_fp32(
settings.allow_fp16_precision_for_fp32());
proto_settings.set_use_ahwb(settings.use_ahwb());
proto_settings.set_use_cacheable_buffer(settings.use_cacheable_buffer());
auto compile_options = settings.compile_options();
if (compile_options != nullptr) {
for (auto option : *compile_options) {
proto_settings.add_compile_options(option->str());
}
}
auto accelerator_names = settings.accelerator_names();
if (accelerator_names != nullptr) {
for (auto name : *accelerator_names) {
proto_settings.add_accelerator_names(name->str());
}
}
if (settings.neuron_config_path()) {
proto_settings.set_neuron_config_path(settings.neuron_config_path()->str());
}
return proto_settings;
}
proto::TFLiteSettings ConvertTfliteSettings(const TFLiteSettings& settings) {
proto::TFLiteSettings proto_settings;
proto_settings.set_delegate(ConvertDelegate(settings.delegate()));
if (settings.nnapi_settings() != nullptr) {
*proto_settings.mutable_nnapi_settings() =
ConvertNNAPISettings(*settings.nnapi_settings());
}
if (settings.gpu_settings() != nullptr) {
*proto_settings.mutable_gpu_settings() =
ConvertGPUSettings(*settings.gpu_settings());
}
if (settings.hexagon_settings() != nullptr) {
*proto_settings.mutable_hexagon_settings() =
ConvertHexagonSettings(*settings.hexagon_settings());
}
if (settings.xnnpack_settings() != nullptr) {
*proto_settings.mutable_xnnpack_settings() =
ConvertXNNPackSettings(*settings.xnnpack_settings());
}
if (settings.coreml_settings() != nullptr) {
*proto_settings.mutable_coreml_settings() =
ConvertCoreMLSettings(*settings.coreml_settings());
}
if (settings.cpu_settings() != nullptr) {
*proto_settings.mutable_cpu_settings() =
ConvertCPUSettings(*settings.cpu_settings());
}
proto_settings.set_max_delegated_partitions(
settings.max_delegated_partitions());
if (settings.edgetpu_settings() != nullptr) {
*proto_settings.mutable_edgetpu_settings() =
ConvertEdgeTpuSettings(*settings.edgetpu_settings());
}
if (settings.coral_settings() != nullptr) {
*proto_settings.mutable_coral_settings() =
ConvertCoralSettings(*settings.coral_settings());
}
if (settings.fallback_settings() != nullptr) {
*proto_settings.mutable_fallback_settings() =
ConvertFallbackSettings(*settings.fallback_settings());
}
proto_settings.set_disable_default_delegates(
settings.disable_default_delegates());
if (settings.stable_delegate_loader_settings() != nullptr) {
*proto_settings.mutable_stable_delegate_loader_settings() =
ConvertStableDelegateLoaderSettings(
*settings.stable_delegate_loader_settings());
}
if (settings.google_edgetpu_settings() != nullptr) {
*proto_settings.mutable_google_edgetpu_settings() =
ConvertGoogleEdgetpuSettings(*settings.google_edgetpu_settings());
}
if (settings.compilation_caching_settings() != nullptr) {
*proto_settings.mutable_compilation_caching_settings() =
ConvertCompilationCachingSettings(
*settings.compilation_caching_settings());
}
if (settings.mtk_neuron_settings() != nullptr) {
*proto_settings.mutable_mtk_neuron_settings() =
ConvertMtkNeuronSettings(*settings.mtk_neuron_settings());
}
return proto_settings;
}
proto::ModelFile ConvertModelFile(const ModelFile& model_file) {
proto::ModelFile proto_settings;
if (model_file.filename() != nullptr) {
proto_settings.set_filename(model_file.filename()->str());
}
proto_settings.set_fd(model_file.fd());
proto_settings.set_offset(model_file.offset());
proto_settings.set_length(model_file.length());
return proto_settings;
}
proto::BenchmarkStoragePaths ConvertBenchmarkStoragePaths(
const BenchmarkStoragePaths& storage_paths) {
proto::BenchmarkStoragePaths proto_settings;
if (storage_paths.storage_file_path() != nullptr) {
proto_settings.set_storage_file_path(
storage_paths.storage_file_path()->str());
}
if (storage_paths.data_directory_path() != nullptr) {
proto_settings.set_data_directory_path(
storage_paths.data_directory_path()->str());
}
return proto_settings;
}
proto::MinibenchmarkSettings ConvertMinibenchmarkSettings(
const MinibenchmarkSettings& settings) {
proto::MinibenchmarkSettings proto_settings;
if (settings.settings_to_test() != nullptr &&
settings.settings_to_test()->size() > 0) {
for (int i = 0; i < settings.settings_to_test()->size(); ++i) {
auto tflite_setting = settings.settings_to_test()->Get(i);
auto proto_tflite_setting = proto_settings.add_settings_to_test();
*proto_tflite_setting = ConvertTfliteSettings(*tflite_setting);
}
}
if (settings.model_file() != nullptr) {
*(proto_settings.mutable_model_file()) =
ConvertModelFile(*settings.model_file());
}
if (settings.storage_paths() != nullptr) {
*(proto_settings.mutable_storage_paths()) =
ConvertBenchmarkStoragePaths(*settings.storage_paths());
}
return proto_settings;
}
proto::BenchmarkEventType ConvertBenchmarkEventType(BenchmarkEventType type) {
switch (type) {
case BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE:
return proto::BenchmarkEventType::UNDEFINED_BENCHMARK_EVENT_TYPE;
case BenchmarkEventType_START:
return proto::BenchmarkEventType::START;
case BenchmarkEventType_END:
return proto::BenchmarkEventType::END;
case BenchmarkEventType_ERROR:
return proto::BenchmarkEventType::ERROR;
case BenchmarkEventType_LOGGED:
return proto::BenchmarkEventType::LOGGED;
case BenchmarkEventType_RECOVERED_ERROR:
return proto::BenchmarkEventType::RECOVERED_ERROR;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for BenchmarkEventType: %d", type);
return proto::BenchmarkEventType::UNDEFINED_BENCHMARK_EVENT_TYPE;
}
proto::BenchmarkMetric ConvertBenchmarkMetric(const BenchmarkMetric& metric) {
proto::BenchmarkMetric proto_metric;
if (metric.name() != nullptr) {
proto_metric.set_name(metric.name()->str());
}
auto values = metric.values();
if (values != nullptr) {
for (int i = 0; i < values->size(); ++i) {
proto_metric.add_values(values->Get(i));
}
}
return proto_metric;
}
proto::BenchmarkResult ConvertBenchmarkResult(const BenchmarkResult& result) {
proto::BenchmarkResult proto_result;
auto initialization_time_us = result.initialization_time_us();
if (initialization_time_us != nullptr) {
for (int i = 0; i < initialization_time_us->size(); ++i) {
proto_result.add_initialization_time_us(initialization_time_us->Get(i));
}
}
auto inference_time_us = result.inference_time_us();
if (inference_time_us != nullptr) {
for (int i = 0; i < inference_time_us->size(); ++i) {
proto_result.add_inference_time_us(inference_time_us->Get(i));
}
}
proto_result.set_max_memory_kb(result.max_memory_kb());
proto_result.set_ok(result.ok());
auto metrics = result.metrics();
if (metrics != nullptr) {
for (int i = 0; i < metrics->size(); ++i) {
*proto_result.add_metrics() = ConvertBenchmarkMetric(*metrics->Get(i));
}
}
return proto_result;
}
proto::BenchmarkStage ConvertBenchmarkStage(BenchmarkStage stage) {
switch (stage) {
case BenchmarkStage_UNKNOWN:
return proto::BenchmarkStage::UNKNOWN;
case BenchmarkStage_INITIALIZATION:
return proto::BenchmarkStage::INITIALIZATION;
case BenchmarkStage_INFERENCE:
return proto::BenchmarkStage::INFERENCE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for BenchmarkStage: %d",
stage);
return proto::BenchmarkStage::UNKNOWN;
}
proto::ErrorCode ConvertBenchmarkErrorCode(const ErrorCode& code) {
proto::ErrorCode proto_code;
proto_code.set_source(ConvertDelegate(code.source()));
proto_code.set_tflite_error(code.tflite_error());
proto_code.set_underlying_api_error(code.underlying_api_error());
return proto_code;
}
proto::BenchmarkError ConvertBenchmarkError(const BenchmarkError& error) {
proto::BenchmarkError proto_error;
proto_error.set_stage(ConvertBenchmarkStage(error.stage()));
proto_error.set_exit_code(error.exit_code());
proto_error.set_signal(error.signal());
auto error_codes = error.error_code();
if (error_codes != nullptr) {
for (int i = 0; i < error_codes->size(); ++i) {
*proto_error.add_error_code() =
ConvertBenchmarkErrorCode(*error_codes->Get(i));
}
}
proto_error.set_mini_benchmark_error_code(error.mini_benchmark_error_code());
return proto_error;
}
proto::BenchmarkEvent ConvertBenchmarkEvent(const BenchmarkEvent& event) {
proto::BenchmarkEvent proto_event;
if (event.tflite_settings() != nullptr) {
*proto_event.mutable_tflite_settings() =
ConvertTfliteSettings(*event.tflite_settings());
}
proto_event.set_event_type(ConvertBenchmarkEventType(event.event_type()));
if (event.result() != nullptr) {
*proto_event.mutable_result() = ConvertBenchmarkResult(*event.result());
}
if (event.error() != nullptr) {
*proto_event.mutable_error() = ConvertBenchmarkError(*event.error());
}
proto_event.set_boottime_us(event.boottime_us());
proto_event.set_wallclock_us(event.wallclock_us());
return proto_event;
}
proto::BestAccelerationDecision ConvertBestAccelerationDecision(
const BestAccelerationDecision& decision) {
proto::BestAccelerationDecision proto_decision;
proto_decision.set_number_of_source_events(
decision.number_of_source_events());
if (decision.min_latency_event() != nullptr) {
*proto_decision.mutable_min_latency_event() =
ConvertBenchmarkEvent(*decision.min_latency_event());
}
proto_decision.set_min_inference_time_us(decision.min_inference_time_us());
return proto_decision;
}
proto::BenchmarkInitializationFailure ConvertBenchmarkInitializationFailure(
const BenchmarkInitializationFailure& init_failure) {
proto::BenchmarkInitializationFailure proto_init_failure;
proto_init_failure.set_initialization_status(
init_failure.initialization_status());
return proto_init_failure;
}
}
proto::ComputeSettings ConvertFromFlatbuffer(
const ComputeSettings& settings, bool skip_mini_benchmark_settings) {
proto::ComputeSettings proto_settings;
proto_settings.set_preference(
ConvertExecutionPreference(settings.preference()));
if (settings.tflite_settings() != nullptr) {
*(proto_settings.mutable_tflite_settings()) =
ConvertTfliteSettings(*settings.tflite_settings());
}
if (settings.model_namespace_for_statistics() != nullptr) {
proto_settings.set_model_namespace_for_statistics(
settings.model_namespace_for_statistics()->str());
}
if (settings.model_identifier_for_statistics() != nullptr) {
proto_settings.set_model_identifier_for_statistics(
settings.model_identifier_for_statistics()->str());
}
if (!skip_mini_benchmark_settings &&
settings.settings_to_test_locally() != nullptr) {
*(proto_settings.mutable_settings_to_test_locally()) =
ConvertMinibenchmarkSettings(*settings.settings_to_test_locally());
}
return proto_settings;
}
proto::ComputeSettings ConvertFromFlatbuffer(
const ComputeSettingsT& settings, bool skip_mini_benchmark_settings) {
flatbuffers::FlatBufferBuilder fbb;
fbb.Finish(ComputeSettings::Pack(fbb, &settings));
auto settings_fbb =
flatbuffers::GetRoot<ComputeSettings>(fbb.GetBufferPointer());
return ConvertFromFlatbuffer(*settings_fbb, skip_mini_benchmark_settings);
}
proto::MiniBenchmarkEvent ConvertFromFlatbuffer(
const MiniBenchmarkEvent& event) {
proto::MiniBenchmarkEvent proto_event;
proto_event.set_is_log_flushing_event(event.is_log_flushing_event());
if (event.best_acceleration_decision() != nullptr) {
*proto_event.mutable_best_acceleration_decision() =
ConvertBestAccelerationDecision(*event.best_acceleration_decision());
}
if (event.initialization_failure() != nullptr) {
*proto_event.mutable_initialization_failure() =
ConvertBenchmarkInitializationFailure(*event.initialization_failure());
}
if (event.benchmark_event() != nullptr) {
*proto_event.mutable_benchmark_event() =
ConvertBenchmarkEvent(*event.benchmark_event());
}
return proto_event;
}
proto::MiniBenchmarkEvent ConvertFromFlatbuffer(
const MiniBenchmarkEventT& event) {
flatbuffers::FlatBufferBuilder fbb;
fbb.Finish(MiniBenchmarkEvent::Pack(fbb, &event));
auto event_fbb =
flatbuffers::GetRoot<MiniBenchmarkEvent>(fbb.GetBufferPointer());
return ConvertFromFlatbuffer(*event_fbb);
}
} | #include "tensorflow/lite/acceleration/configuration/flatbuffer_to_proto.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace tflite {
namespace acceleration {
namespace {
class ConversionTest : public ::testing::Test {
protected:
void CheckDelegateEnum(Delegate input, proto::Delegate output) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->delegate = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(output, compute.tflite_settings().delegate());
}
void CheckExecutionPreference(ExecutionPreference input,
proto::ExecutionPreference output) {
settings_.preference = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(output, compute.preference());
}
void CheckNNAPIExecutionPreference(NNAPIExecutionPreference input,
proto::NNAPIExecutionPreference output) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
settings_.tflite_settings->nnapi_settings->execution_preference = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(
output,
compute.tflite_settings().nnapi_settings().execution_preference());
}
void CheckNNAPIExecutionPriority(NNAPIExecutionPriority input,
proto::NNAPIExecutionPriority output) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
settings_.tflite_settings->nnapi_settings->execution_priority = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(output,
compute.tflite_settings().nnapi_settings().execution_priority());
}
void CheckGPUBackend(GPUBackend input, proto::GPUBackend output) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
settings_.tflite_settings->gpu_settings->force_backend = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(output, compute.tflite_settings().gpu_settings().force_backend());
}
ComputeSettingsT settings_;
MiniBenchmarkEventT event_;
};
TEST_F(ConversionTest, Delegate) {
CheckDelegateEnum(Delegate_NONE, proto::Delegate::NONE);
CheckDelegateEnum(Delegate_NNAPI, proto::Delegate::NNAPI);
CheckDelegateEnum(Delegate_GPU, proto::Delegate::GPU);
CheckDelegateEnum(Delegate_HEXAGON, proto::Delegate::HEXAGON);
CheckDelegateEnum(Delegate_EDGETPU, proto::Delegate::EDGETPU);
CheckDelegateEnum(Delegate_EDGETPU_CORAL, proto::Delegate::EDGETPU_CORAL);
CheckDelegateEnum(Delegate_XNNPACK, proto::Delegate::XNNPACK);
CheckDelegateEnum(Delegate_CORE_ML, proto::Delegate::CORE_ML);
}
TEST_F(ConversionTest, ExecutionPreference) {
CheckExecutionPreference(ExecutionPreference_ANY,
proto::ExecutionPreference::ANY);
CheckExecutionPreference(ExecutionPreference_LOW_LATENCY,
proto::ExecutionPreference::LOW_LATENCY);
CheckExecutionPreference(ExecutionPreference_LOW_POWER,
proto::ExecutionPreference::LOW_POWER);
CheckExecutionPreference(ExecutionPreference_FORCE_CPU,
proto::ExecutionPreference::FORCE_CPU);
}
TEST_F(ConversionTest, ModelIdentifier) {
settings_.model_identifier_for_statistics = "id";
settings_.model_namespace_for_statistics = "ns";
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.model_namespace_for_statistics(), "ns");
EXPECT_EQ(compute.model_identifier_for_statistics(), "id");
}
TEST_F(ConversionTest, NNAPISettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
NNAPISettingsT* input_settings =
settings_.tflite_settings->nnapi_settings.get();
input_settings->accelerator_name = "a";
input_settings->cache_directory = "d";
input_settings->model_token = "t";
input_settings->allow_fp16_precision_for_fp32 = true;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::NNAPISettings output_settings =
compute.tflite_settings().nnapi_settings();
EXPECT_EQ(output_settings.accelerator_name(), "a");
EXPECT_EQ(output_settings.cache_directory(), "d");
EXPECT_EQ(output_settings.model_token(), "t");
EXPECT_TRUE(output_settings.allow_fp16_precision_for_fp32());
EXPECT_FALSE(output_settings.allow_nnapi_cpu_on_android_10_plus());
EXPECT_FALSE(output_settings.fallback_settings()
.allow_automatic_fallback_on_compilation_error());
EXPECT_FALSE(output_settings.fallback_settings()
.allow_automatic_fallback_on_execution_error());
input_settings->fallback_settings = std::make_unique<FallbackSettingsT>();
input_settings->fallback_settings
->allow_automatic_fallback_on_compilation_error = true;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_TRUE(output_settings.fallback_settings()
.allow_automatic_fallback_on_compilation_error());
EXPECT_FALSE(output_settings.fallback_settings()
.allow_automatic_fallback_on_execution_error());
input_settings->fallback_settings
->allow_automatic_fallback_on_compilation_error = false;
input_settings->fallback_settings
->allow_automatic_fallback_on_execution_error = true;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_FALSE(output_settings.fallback_settings()
.allow_automatic_fallback_on_compilation_error());
EXPECT_TRUE(output_settings.fallback_settings()
.allow_automatic_fallback_on_execution_error());
input_settings->allow_fp16_precision_for_fp32 = false;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_FALSE(output_settings.allow_fp16_precision_for_fp32());
}
TEST_F(ConversionTest, NNAPIAllowDynamicDimensions) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
NNAPISettingsT* input_settings =
settings_.tflite_settings->nnapi_settings.get();
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::NNAPISettings output_settings =
compute.tflite_settings().nnapi_settings();
EXPECT_FALSE(output_settings.allow_dynamic_dimensions());
input_settings->allow_dynamic_dimensions = true;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_TRUE(output_settings.allow_dynamic_dimensions());
}
TEST_F(ConversionTest, NNAPIBurstComputation) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
NNAPISettingsT* input_settings =
settings_.tflite_settings->nnapi_settings.get();
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::NNAPISettings output_settings =
compute.tflite_settings().nnapi_settings();
EXPECT_FALSE(output_settings.use_burst_computation());
input_settings->use_burst_computation = true;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_TRUE(output_settings.use_burst_computation());
}
TEST_F(ConversionTest, NNAPIExecutionPreference) {
CheckNNAPIExecutionPreference(
NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER,
proto::NNAPIExecutionPreference::NNAPI_FAST_SINGLE_ANSWER);
CheckNNAPIExecutionPreference(
NNAPIExecutionPreference_NNAPI_LOW_POWER,
proto::NNAPIExecutionPreference::NNAPI_LOW_POWER);
CheckNNAPIExecutionPreference(
NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED,
proto::NNAPIExecutionPreference::NNAPI_SUSTAINED_SPEED);
CheckNNAPIExecutionPreference(NNAPIExecutionPreference_UNDEFINED,
proto::NNAPIExecutionPreference::UNDEFINED);
}
TEST_F(ConversionTest, NNAPIExecutionPriority) {
CheckNNAPIExecutionPriority(
NNAPIExecutionPriority_NNAPI_PRIORITY_LOW,
proto::NNAPIExecutionPriority::NNAPI_PRIORITY_LOW);
CheckNNAPIExecutionPriority(
NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM,
proto::NNAPIExecutionPriority::NNAPI_PRIORITY_MEDIUM);
CheckNNAPIExecutionPriority(
NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH,
proto::NNAPIExecutionPriority::NNAPI_PRIORITY_HIGH);
CheckNNAPIExecutionPriority(
NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED);
}
TEST_F(ConversionTest, NNAPISupportLibraryHandle) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
NNAPISettingsT* input_settings =
settings_.tflite_settings->nnapi_settings.get();
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::NNAPISettings output_settings =
compute.tflite_settings().nnapi_settings();
EXPECT_EQ(output_settings.support_library_handle(), 0);
input_settings->support_library_handle = std::numeric_limits<int64_t>::max();
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_EQ(output_settings.support_library_handle(),
std::numeric_limits<int64_t>::max());
}
TEST_F(ConversionTest, GPUSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
input_settings->is_precision_loss_allowed = true;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
EXPECT_TRUE(output_settings.is_precision_loss_allowed());
input_settings->is_precision_loss_allowed = false;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().gpu_settings();
EXPECT_FALSE(output_settings.is_precision_loss_allowed());
EXPECT_TRUE(output_settings.enable_quantized_inference());
input_settings->enable_quantized_inference = false;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().gpu_settings();
EXPECT_FALSE(output_settings.enable_quantized_inference());
}
TEST_F(ConversionTest, GPUBacked) {
CheckGPUBackend(GPUBackend_UNSET, proto::GPUBackend::UNSET);
CheckGPUBackend(GPUBackend_OPENCL, proto::GPUBackend::OPENCL);
CheckGPUBackend(GPUBackend_OPENGL, proto::GPUBackend::OPENGL);
}
TEST_F(ConversionTest, GPUInferencePriority) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
input_settings->inference_priority1 =
GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE;
input_settings->inference_priority2 =
GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_MIN_MEMORY_USAGE,
output_settings.inference_priority1());
EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_MIN_LATENCY,
output_settings.inference_priority2());
EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_AUTO,
output_settings.inference_priority3());
}
TEST_F(ConversionTest, GPUInferencePreference) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
input_settings->inference_preference =
GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
EXPECT_EQ(
proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER,
output_settings.inference_preference());
input_settings->inference_preference =
GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().gpu_settings();
EXPECT_EQ(proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED,
output_settings.inference_preference());
}
TEST_F(ConversionTest, HexagonSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->hexagon_settings =
std::make_unique<HexagonSettingsT>();
HexagonSettingsT* input_settings =
settings_.tflite_settings->hexagon_settings.get();
input_settings->debug_level = 1;
input_settings->powersave_level = 2;
input_settings->print_graph_profile = true;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
const proto::HexagonSettings& output_settings =
compute.tflite_settings().hexagon_settings();
EXPECT_EQ(1, output_settings.debug_level());
EXPECT_EQ(2, output_settings.powersave_level());
EXPECT_TRUE(output_settings.print_graph_profile());
EXPECT_FALSE(output_settings.print_graph_debug());
}
TEST_F(ConversionTest, EdgeTpuSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->edgetpu_settings =
std::make_unique<EdgeTpuSettingsT>();
EdgeTpuSettingsT* input_settings =
settings_.tflite_settings->edgetpu_settings.get();
constexpr EdgeTpuPowerState kInferencePowerState = EdgeTpuPowerState_ACTIVE;
constexpr EdgeTpuPowerState kInactivePowerState =
EdgeTpuPowerState_ACTIVE_MIN_POWER;
constexpr int64_t kInactiveTimeoutUs = 300000;
constexpr int kInferencePriority = 2;
const std::string kModelToken = "model_token";
constexpr EdgeTpuSettings_::FloatTruncationType kFloatTruncationType =
EdgeTpuSettings_::FloatTruncationType_HALF;
input_settings->inference_power_state = kInferencePowerState;
input_settings->inference_priority = kInferencePriority;
input_settings->model_token = kModelToken;
input_settings->float_truncation_type = kFloatTruncationType;
std::unique_ptr<EdgeTpuInactivePowerConfigT> inactive_power_config(
new EdgeTpuInactivePowerConfigT());
inactive_power_config->inactive_power_state = kInactivePowerState;
inactive_power_config->inactive_timeout_us = kInactiveTimeoutUs;
input_settings->inactive_power_configs.emplace_back(
std::move(inactive_power_config));
constexpr EdgeTpuDeviceSpec_::PlatformType kPlatformType =
EdgeTpuDeviceSpec_::PlatformType_MMIO;
constexpr int kNumChips = 1;
const std::string kDevicePath = "/dev/abrolhos";
constexpr int kChipFamily = 1;
input_settings->edgetpu_device_spec = std::make_unique<EdgeTpuDeviceSpecT>();
EdgeTpuDeviceSpecT* input_spec = input_settings->edgetpu_device_spec.get();
input_spec->platform_type = kPlatformType;
input_spec->num_chips = kNumChips;
input_spec->chip_family = kChipFamily;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::EdgeTpuSettings output_settings =
compute.tflite_settings().edgetpu_settings();
EXPECT_EQ(
static_cast<EdgeTpuPowerState>(output_settings.inference_power_state()),
kInferencePowerState);
EXPECT_EQ(output_settings.inactive_power_configs().size(), 1);
EXPECT_EQ(
static_cast<EdgeTpuPowerState>(output_settings.inactive_power_configs()
.at(0)
.inactive_power_state()),
kInactivePowerState);
EXPECT_EQ(
output_settings.inactive_power_configs().at(0).inactive_timeout_us(),
kInactiveTimeoutUs);
EXPECT_EQ(output_settings.inference_priority(), kInferencePriority);
EXPECT_EQ(output_settings.model_token(), kModelToken);
EXPECT_EQ(static_cast<EdgeTpuSettings_::FloatTruncationType>(
output_settings.float_truncation_type()),
kFloatTruncationType);
EXPECT_EQ(static_cast<EdgeTpuDeviceSpec_::PlatformType>(
output_settings.edgetpu_device_spec().platform_type()),
kPlatformType);
EXPECT_EQ(output_settings.edgetpu_device_spec().num_chips(), kNumChips);
EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths_size(), 0);
EXPECT_EQ(output_settings.edgetpu_device_spec().chip_family(), kChipFamily);
input_spec->device_paths.push_back(kDevicePath);
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().edgetpu_settings();
EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths().size(), 1);
EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths()[0],
kDevicePath);
}
TEST_F(ConversionTest, XNNPackSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->xnnpack_settings =
std::make_unique<XNNPackSettingsT>();
XNNPackSettingsT* input_settings =
settings_.tflite_settings->xnnpack_settings.get();
input_settings->num_threads = 2;
input_settings->flags =
tflite::XNNPackFlags::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.tflite_settings().xnnpack_settings().num_threads(), 2);
EXPECT_EQ(compute.tflite_settings().xnnpack_settings().flags(), 3);
}
TEST_F(ConversionTest, CoreMLSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->coreml_settings =
std::make_unique<CoreMLSettingsT>();
CoreMLSettingsT* input_settings =
settings_.tflite_settings->coreml_settings.get();
input_settings->enabled_devices =
CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE;
input_settings->coreml_version = 3;
input_settings->max_delegated_partitions = 10;
input_settings->min_nodes_per_partition = 4;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.tflite_settings().coreml_settings().enabled_devices(),
proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE);
EXPECT_EQ(compute.tflite_settings().coreml_settings().coreml_version(), 3);
EXPECT_EQ(
compute.tflite_settings().coreml_settings().max_delegated_partitions(),
10);
EXPECT_EQ(
compute.tflite_settings().coreml_settings().min_nodes_per_partition(), 4);
}
TEST_F(ConversionTest, CoralSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->coral_settings =
std::make_unique<CoralSettingsT>();
CoralSettingsT* input_settings =
settings_.tflite_settings->coral_settings.get();
input_settings->device = "test";
input_settings->performance = CoralSettings_::Performance_HIGH;
input_settings->usb_always_dfu = true;
input_settings->usb_max_bulk_in_queue_length = 768;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
const proto::CoralSettings& output_settings =
compute.tflite_settings().coral_settings();
EXPECT_EQ("test", output_settings.device());
EXPECT_TRUE(output_settings.usb_always_dfu());
EXPECT_EQ(proto::CoralSettings::HIGH, output_settings.performance());
EXPECT_EQ(768, output_settings.usb_max_bulk_in_queue_length());
}
TEST_F(ConversionTest, StableDelegateLoaderSettings) {
const std::string kDelegatePath = "TEST_DELEGATE_PATH";
const std::string kDelegateName = "TEST_DELEGATE_NAME";
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->stable_delegate_loader_settings =
std::make_unique<StableDelegateLoaderSettingsT>();
settings_.tflite_settings->stable_delegate_loader_settings->delegate_path =
kDelegatePath;
settings_.tflite_settings->stable_delegate_loader_settings->delegate_name =
kDelegateName;
const proto::StableDelegateLoaderSettings output_settings =
ConvertFromFlatbuffer(settings_)
.tflite_settings()
.stable_delegate_loader_settings();
EXPECT_EQ(output_settings.delegate_path(), kDelegatePath);
EXPECT_EQ(output_settings.delegate_name(), kDelegateName);
}
TEST_F(ConversionTest, CPUSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->cpu_settings = std::make_unique<CPUSettingsT>();
settings_.tflite_settings->cpu_settings->num_threads = 2;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.tflite_settings().cpu_settings().num_threads(), 2);
}
TEST_F(ConversionTest, MaxDelegatedPartitions) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->max_delegated_partitions = 2;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.tflite_settings().max_delegated_partitions(), 2);
}
TEST_F(ConversionTest, GoogleEdgeTpuSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->google_edgetpu_settings =
std::make_unique<GoogleEdgeTpuSettingsT>();
GoogleEdgeTpuSettingsT* input_settings =
settings_.tflite_settings->google_edgetpu_settings.get();
input_settings->priority = GoogleEdgeTpuSettings_::Priority_PRIORITY_HIGH;
input_settings->allow_fp16_precision_for_fp32 = true;
std::vector<uint8_t> extension_data{1, 2, 3};
input_settings->extension_data = extension_data;
input_settings->model_identifier = "model";
input_settings->prefer_cache_coherency_for_inputs =
GoogleEdgeTpuSettings_::TriState_TRISTATE_TRUE;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::GoogleEdgeTpuSettings output_settings =
compute.tflite_settings().google_edgetpu_settings();
EXPECT_EQ(output_settings.priority(),
proto::GoogleEdgeTpuSettings::PRIORITY_HIGH);
EXPECT_TRUE(output_settings.allow_fp16_precision_for_fp32());
EXPECT_EQ(output_settings.extension_data().size(), 3);
EXPECT_EQ(output_settings.model_identifier(), "model");
EXPECT_EQ(output_settings.prefer_cache_coherency_for_inputs(),
proto::GoogleEdgeTpuSettings::TRISTATE_TRUE);
}
TEST_F(ConversionTest, CompilationCachingSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->compilation_caching_settings =
std::make_unique<CompilationCachingSettingsT>();
CompilationCachingSettingsT* input_settings =
settings_.tflite_settings->compilation_caching_settings.get();
input_settings->cache_dir = "/tmp";
input_settings->model_token = "model";
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::CompilationCachingSettings output_settings =
compute.tflite_settings().compilation_caching_settings();
EXPECT_EQ(output_settings.cache_dir(), "/tmp");
EXPECT_EQ(output_settings.model_token(), "model");
}
TEST_F(ConversionTest, MtkNeuronSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->mtk_neuron_settings =
std::make_unique<MtkNeuronSettingsT>();
MtkNeuronSettingsT* input_settings =
settings_.tflite_settings->mtk_neuron_settings.get();
input_settings->execution_preference =
MtkNeuronSettings_::ExecutionPreference_PREFERENCE_UNDEFINED;
input_settings->execution_priority =
MtkNeuronSettings_::ExecutionPriority_PRIORITY_MEDIUM;
input_settings->optimization_hints = {
MtkNeuronSettings_::OptimizationHint_OPTIMIZATION_LOW_LATENCY,
MtkNeuronSettings_::OptimizationHint_OPTIMIZATION_BATCH_PROCESSING};
input_settings->operation_check_mode =
MtkNeuronSettings_::OperationCheckMode_PER_NODE_OPERATION_CHECK;
input_settings->allow_fp16_precision_for_fp32 = true;
input_settings->use_ahwb = false;
input_settings->use_cacheable_buffer = true;
input_settings->compile_options = {"TEST_COMPILE_OPTIONS"};
input_settings->accelerator_names = {"TEST_ACCELERATOR_NAME"};
input_settings->neuron_config_path = "TEST_NEURON_CONFIG_PATH";
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
const proto::MtkNeuronSettings& output_settings =
compute.tflite_settings().mtk_neuron_settings();
EXPECT_EQ(output_settings.execution_preference(),
proto::MtkNeuronSettings::PREFERENCE_UNDEFINED);
EXPECT_EQ(output_settings.execution_priority(),
proto::MtkNeuronSettings::PRIORITY_MEDIUM);
EXPECT_EQ(output_settings.optimization_hints().size(), 2);
EXPECT_EQ(output_settings.optimization_hints().at(0),
proto::MtkNeuronSettings::OPTIMIZATION_LOW_LATENCY);
EXPECT_EQ(output_settings.optimization_hints().at(1),
proto::MtkNeuronSettings::OPTIMIZATION_BATCH_PROCESSING);
EXPECT_EQ(output_settings.operation_check_mode(),
proto::MtkNeuronSettings::PER_NODE_OPERATION_CHECK);
EXPECT_TRUE(output_settings.allow_fp16_precision_for_fp32());
EXPECT_FALSE(output_settings.use_ahwb());
EXPECT_TRUE(output_settings.use_cacheable_buffer());
EXPECT_EQ(output_settings.compile_options().size(), 1);
EXPECT_EQ(output_settings.compile_options().at(0), "TEST_COMPILE_OPTIONS");
EXPECT_EQ(output_settings.accelerator_names().size(), 1);
EXPECT_EQ(output_settings.accelerator_names().at(0), "TEST_ACCELERATOR_NAME");
EXPECT_EQ(output_settings.neuron_config_path(), "TEST_NEURON_CONFIG_PATH");
}
TEST_F(ConversionTest, MiniBenchmarkSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->cpu_settings = std::make_unique<CPUSettingsT>();
settings_.tflite_settings->cpu_settings->num_threads = 2;
settings_.model_identifier_for_statistics = "id";
settings_.model_namespace_for_statistics = "ns";
settings_.settings_to_test_locally =
std::make_unique<MinibenchmarkSettingsT>();
MinibenchmarkSettingsT* mini_settings =
settings_.settings_to_test_locally.get();
mini_settings->model_file = std::make_unique<ModelFileT>();
mini_settings->model_file->filename = "test_model";
mini_settings->storage_paths = std::make_unique<BenchmarkStoragePathsT>();
mini_settings->storage_paths->storage_file_path = "/data/local/tmp";
std::unique_ptr<TFLiteSettingsT> xnnpack(new TFLiteSettingsT());
xnnpack->xnnpack_settings = std::make_unique<XNNPackSettingsT>();
xnnpack->xnnpack_settings->num_threads = 2;
std::unique_ptr<TFLiteSettingsT> hexagon(new TFLiteSettingsT());
hexagon->hexagon_settings = std::make_unique<HexagonSettingsT>();
hexagon->hexagon_settings->powersave_level = 3;
std::unique_ptr<TFLiteSettingsT> coreml(new TFLiteSettingsT());
coreml->coreml_settings = std::make_unique<CoreMLSettingsT>();
coreml->coreml_settings->enabled_devices =
CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE;
coreml->coreml_settings->coreml_version = 3;
coreml->coreml_settings->max_delegated_partitions = 10;
coreml->coreml_settings->min_nodes_per_partition = 4;
mini_settings->settings_to_test.emplace_back(std::move(xnnpack));
mini_settings->settings_to_test.emplace_back(std::move(hexagon));
mini_settings->settings_to_test.emplace_back(std::move(coreml));
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(2, compute.tflite_settings().cpu_settings().num_threads());
EXPECT_EQ("id", compute.model_identifier_for_statistics());
EXPECT_EQ("ns", compute.model_namespace_for_statistics());
EXPECT_TRUE(compute.has_settings_to_test_locally());
const proto::MinibenchmarkSettings& mini_output =
compute.settings_to_test_locally();
EXPECT_EQ("test_model", mini_output.model_file().filename());
EXPECT_EQ("/data/local/tmp", mini_output.storage_paths().storage_file_path());
EXPECT_EQ(3, mini_output.settings_to_test_size());
EXPECT_EQ(
2, mini_output.settings_to_test().at(0).xnnpack_settings().num_threads());
EXPECT_EQ(3, mini_output.settings_to_test()
.at(1)
.hexagon_settings()
.powersave_level());
EXPECT_EQ(
proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE,
mini_output.settings_to_test().at(2).coreml_settings().enabled_devices());
EXPECT_EQ(
3,
mini_output.settings_to_test().at(2).coreml_settings().coreml_version());
EXPECT_EQ(10, mini_output.settings_to_test()
.at(2)
.coreml_settings()
.max_delegated_partitions());
EXPECT_EQ(4, mini_output.settings_to_test()
.at(2)
.coreml_settings()
.min_nodes_per_partition());
compute =
ConvertFromFlatbuffer(settings_, true);
EXPECT_EQ(2, compute.tflite_settings().cpu_settings().num_threads());
EXPECT_EQ("id", compute.model_identifier_for_statistics());
EXPECT_EQ("ns", compute.model_namespace_for_statistics());
EXPECT_FALSE(compute.has_settings_to_test_locally());
}
TEST_F(ConversionTest, BestAccelerationDecisionEvent) {
event_.is_log_flushing_event = true;
event_.best_acceleration_decision =
std::make_unique<BestAccelerationDecisionT>();
event_.best_acceleration_decision->number_of_source_events = 4;
event_.best_acceleration_decision->min_inference_time_us = 3000;
proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
EXPECT_TRUE(proto_event.is_log_flushing_event());
const auto& best_decision = proto_event.best_acceleration_decision();
EXPECT_EQ(4, best_decision.number_of_source_events());
EXPECT_EQ(3000, best_decision.min_inference_time_us());
EXPECT_FALSE(best_decision.has_min_latency_event());
event_.best_acceleration_decision->min_latency_event =
std::make_unique<BenchmarkEventT>();
auto* min_event = event_.best_acceleration_decision->min_latency_event.get();
min_event->event_type = BenchmarkEventType_END;
min_event->tflite_settings = std::make_unique<TFLiteSettingsT>();
min_event->tflite_settings->delegate = Delegate_XNNPACK;
min_event->tflite_settings->xnnpack_settings =
std::make_unique<XNNPackSettingsT>();
min_event->tflite_settings->xnnpack_settings->num_threads = 2;
min_event->result = std::make_unique<BenchmarkResultT>();
min_event->result->initialization_time_us.push_back(100);
min_event->result->initialization_time_us.push_back(110);
min_event->result->inference_time_us.push_back(3000);
min_event->result->inference_time_us.push_back(3500);
min_event->result->max_memory_kb = 1234;
min_event->result->ok = true;
min_event->boottime_us = 1111;
min_event->wallclock_us = 2222;
proto_event = ConvertFromFlatbuffer(event_);
EXPECT_TRUE(proto_event.best_acceleration_decision().has_min_latency_event());
const auto& proto_min_event =
proto_event.best_acceleration_decision().min_latency_event();
EXPECT_EQ(proto::BenchmarkEventType::END, proto_min_event.event_type());
EXPECT_EQ(proto::Delegate::XNNPACK,
proto_min_event.tflite_settings().delegate());
EXPECT_EQ(2,
proto_min_event.tflite_settings().xnnpack_settings().num_threads());
EXPECT_TRUE(proto_min_event.has_result());
EXPECT_EQ(2, proto_min_event.result().initialization_time_us_size());
EXPECT_EQ(100, proto_min_event.result().initialization_time_us()[0]);
EXPECT_EQ(110, proto_min_event.result().initialization_time_us()[1]);
EXPECT_EQ(2, proto_min_event.result().inference_time_us_size());
EXPECT_EQ(3000, proto_min_event.result().inference_time_us()[0]);
EXPECT_EQ(3500, proto_min_event.result().inference_time_us()[1]);
EXPECT_EQ(1234, proto_min_event.result().max_memory_kb());
EXPECT_TRUE(proto_min_event.result().ok());
EXPECT_EQ(1111, proto_min_event.boottime_us());
EXPECT_EQ(2222, proto_min_event.wallclock_us());
}
TEST_F(ConversionTest, BenchmarkInitializationEvent) {
event_.initialization_failure =
std::make_unique<BenchmarkInitializationFailureT>();
event_.initialization_failure->initialization_status = 101;
proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
EXPECT_FALSE(proto_event.is_log_flushing_event());
EXPECT_EQ(101, proto_event.initialization_failure().initialization_status());
}
TEST_F(ConversionTest, BenchmarkError) {
event_.benchmark_event = std::make_unique<BenchmarkEventT>();
event_.benchmark_event->error = std::make_unique<BenchmarkErrorT>();
auto* error = event_.benchmark_event->error.get();
error->stage = BenchmarkStage_INITIALIZATION;
error->exit_code = 123;
error->signal = 321;
error->mini_benchmark_error_code = 456;
std::unique_ptr<ErrorCodeT> code1(new ErrorCodeT());
code1->source = Delegate_EDGETPU;
code1->tflite_error = 3;
code1->underlying_api_error = 301;
error->error_code.emplace_back(std::move(code1));
std::unique_ptr<ErrorCodeT> code2(new ErrorCodeT());
code2->source = Delegate_NNAPI;
code2->tflite_error = 4;
code2->underlying_api_error = 404;
error->error_code.emplace_back(std::move(code2));
const proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
const auto& proto_error = proto_event.benchmark_event().error();
EXPECT_EQ(proto::BenchmarkStage::INITIALIZATION, proto_error.stage());
EXPECT_EQ(123, proto_error.exit_code());
EXPECT_EQ(321, proto_error.signal());
EXPECT_EQ(456, proto_error.mini_benchmark_error_code());
EXPECT_EQ(2, proto_error.error_code_size());
EXPECT_EQ(proto::Delegate::EDGETPU, proto_error.error_code()[0].source());
EXPECT_EQ(3, proto_error.error_code()[0].tflite_error());
EXPECT_EQ(301, proto_error.error_code()[0].underlying_api_error());
EXPECT_EQ(proto::Delegate::NNAPI, proto_error.error_code()[1].source());
EXPECT_EQ(4, proto_error.error_code()[1].tflite_error());
EXPECT_EQ(404, proto_error.error_code()[1].underlying_api_error());
}
TEST_F(ConversionTest, BenchmarkMetric) {
event_.benchmark_event = std::make_unique<BenchmarkEventT>();
event_.benchmark_event->result = std::make_unique<BenchmarkResultT>();
std::unique_ptr<BenchmarkMetricT> metric(new BenchmarkMetricT());
metric->name = "test";
metric->values.push_back(1.234);
metric->values.push_back(5.678);
event_.benchmark_event->result->metrics.emplace_back(std::move(metric));
const proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
EXPECT_EQ(1, proto_event.benchmark_event().result().metrics_size());
const auto& proto_metric =
proto_event.benchmark_event().result().metrics()[0];
EXPECT_EQ("test", proto_metric.name());
EXPECT_EQ(2, proto_metric.values_size());
EXPECT_FLOAT_EQ(1.234, proto_metric.values()[0]);
EXPECT_FLOAT_EQ(5.678, proto_metric.values()[1]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/flatbuffer_to_proto.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/flatbuffer_to_proto_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
079d69b0-65c5-4dbf-b05a-cfe05100b0de | cpp | tensorflow/tensorflow | stable_delegate_plugin | tensorflow/lite/acceleration/configuration/stable_delegate_plugin.cc | tensorflow/lite/acceleration/configuration/stable_delegate_plugin_test.cc | #include "tensorflow/lite/acceleration/configuration/stable_delegate_plugin.h"
namespace tflite {
namespace delegates {
TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(StableDelegatePlugin,
StableDelegatePlugin::New);
}
} | #include <memory>
#include <gtest/gtest.h>
#include "pthreadpool.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
class StableDelegatePluginTest : public testing::Test {
public:
static constexpr int kNumThreadsForTest = 7;
static constexpr tflite::XNNPackFlags kFlagsForTest =
tflite::XNNPackFlags::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8;
static constexpr char kDelegateBinaryPath[] =
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/libtensorflowlite_stable_xnnpack_delegate.so";
void SetUp() override {
flatbuffers::Offset<flatbuffers::String> stable_delegate_path_offset =
flatbuffer_builder_.CreateString(kDelegateBinaryPath);
StableDelegateLoaderSettingsBuilder stable_delegate_loader_settings_builder(
flatbuffer_builder_);
stable_delegate_loader_settings_builder.add_delegate_path(
stable_delegate_path_offset);
flatbuffers::Offset<StableDelegateLoaderSettings>
stable_delegate_loader_settings =
stable_delegate_loader_settings_builder.Finish();
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_num_threads(kNumThreadsForTest);
xnnpack_settings_builder.add_flags(kFlagsForTest);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_stable_delegate_loader_settings(
stable_delegate_loader_settings);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
tflite_settings_builder.add_delegate(Delegate_XNNPACK);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
tflite_settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
delegate_plugin_ = delegates::DelegatePluginRegistry::CreateByName(
"StableDelegatePlugin", *tflite_settings_);
ASSERT_NE(delegate_plugin_, nullptr);
}
void TearDown() override { delegate_plugin_.reset(); }
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *tflite_settings_;
std::unique_ptr<delegates::DelegatePluginInterface> delegate_plugin_;
};
TEST_F(StableDelegatePluginTest, CanCreateAndDestroyDelegate) {
delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();
EXPECT_NE(delegate, nullptr);
}
TEST_F(StableDelegatePluginTest, CanGetDelegateErrno) {
delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();
EXPECT_EQ(delegate_plugin_->GetDelegateErrno(delegate.get()), 0);
}
TEST_F(StableDelegatePluginTest, SetsCorrectThreadCount) {
delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();
pthreadpool_t threadpool = static_cast<pthreadpool_t>(
TfLiteXNNPackDelegateGetThreadPool(delegate.get()));
EXPECT_EQ(pthreadpool_get_threads_count(threadpool), kNumThreadsForTest);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/stable_delegate_plugin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/stable_delegate_plugin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7de7115a-9dd8-4348-a90a-d90d03c013f0 | cpp | tensorflow/tensorflow | proto_to_flatbuffer | tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.cc | tensorflow/lite/acceleration/configuration/proto_to_flatbuffer_test.cc | #include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include <cstdint>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::Offset;
using ::flatbuffers::String;
using ::flatbuffers::Vector;
ExecutionPreference ConvertExecutionPreference(
proto::ExecutionPreference preference) {
switch (preference) {
case proto::ExecutionPreference::ANY:
return ExecutionPreference_ANY;
case proto::ExecutionPreference::LOW_LATENCY:
return ExecutionPreference_LOW_LATENCY;
case proto::ExecutionPreference::LOW_POWER:
return ExecutionPreference_LOW_POWER;
case proto::ExecutionPreference::FORCE_CPU:
return ExecutionPreference_FORCE_CPU;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for ExecutionPreference: %d", preference);
return ExecutionPreference_ANY;
}
Delegate ConvertDelegate(proto::Delegate delegate) {
switch (delegate) {
case proto::Delegate::NONE:
return Delegate_NONE;
case proto::Delegate::NNAPI:
return Delegate_NNAPI;
case proto::Delegate::GPU:
return Delegate_GPU;
case proto::Delegate::HEXAGON:
return Delegate_HEXAGON;
case proto::Delegate::XNNPACK:
return Delegate_XNNPACK;
case proto::Delegate::EDGETPU:
return Delegate_EDGETPU;
case proto::Delegate::EDGETPU_CORAL:
return Delegate_EDGETPU_CORAL;
case proto::Delegate::CORE_ML:
return Delegate_CORE_ML;
case proto::Delegate::ARMNN:
return Delegate_ARMNN;
case proto::Delegate::MTK_NEURON:
return Delegate_MTK_NEURON;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for Delegate: %d",
delegate);
return Delegate_NONE;
}
NNAPIExecutionPreference ConvertNNAPIExecutionPreference(
proto::NNAPIExecutionPreference preference) {
switch (preference) {
case proto::NNAPIExecutionPreference::UNDEFINED:
return NNAPIExecutionPreference_UNDEFINED;
case proto::NNAPIExecutionPreference::NNAPI_LOW_POWER:
return NNAPIExecutionPreference_NNAPI_LOW_POWER;
case proto::NNAPIExecutionPreference::NNAPI_FAST_SINGLE_ANSWER:
return NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER;
case proto::NNAPIExecutionPreference::NNAPI_SUSTAINED_SPEED:
return NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPreference: %d",
preference);
return NNAPIExecutionPreference_UNDEFINED;
}
NNAPIExecutionPriority ConvertNNAPIExecutionPriority(
proto::NNAPIExecutionPriority priority) {
switch (priority) {
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED:
return NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED;
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_LOW:
return NNAPIExecutionPriority_NNAPI_PRIORITY_LOW;
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_MEDIUM:
return NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM;
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_HIGH:
return NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPriority: %d", priority);
return NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED;
}
GPUBackend ConvertGPUBackend(proto::GPUBackend backend) {
switch (backend) {
case proto::GPUBackend::UNSET:
return GPUBackend_UNSET;
case proto::GPUBackend::OPENCL:
return GPUBackend_OPENCL;
case proto::GPUBackend::OPENGL:
return GPUBackend_OPENGL;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for GPUBackend: %d",
backend);
return GPUBackend_UNSET;
}
GPUInferenceUsage ConvertGPUInferenceUsage(
proto::GPUInferenceUsage preference) {
switch (preference) {
case proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER:
return GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
case proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED:
return GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for GPUInferenceUsage: %d", preference);
return GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
}
GPUInferencePriority ConvertGPUInferencePriority(
proto::GPUInferencePriority priority) {
switch (priority) {
case proto::GPUInferencePriority::GPU_PRIORITY_AUTO:
return GPUInferencePriority_GPU_PRIORITY_AUTO;
case proto::GPUInferencePriority::GPU_PRIORITY_MAX_PRECISION:
return GPUInferencePriority_GPU_PRIORITY_MAX_PRECISION;
case proto::GPUInferencePriority::GPU_PRIORITY_MIN_LATENCY:
return GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY;
case proto::GPUInferencePriority::GPU_PRIORITY_MIN_MEMORY_USAGE:
return GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for GPUInferencePriority: %d", priority);
return GPUInferencePriority_GPU_PRIORITY_AUTO;
}
EdgeTpuPowerState ConvertEdgeTpuPowerState(proto::EdgeTpuPowerState state) {
switch (state) {
case proto::EdgeTpuPowerState::UNDEFINED_POWERSTATE:
return EdgeTpuPowerState_UNDEFINED_POWERSTATE;
case proto::EdgeTpuPowerState::TPU_CORE_OFF:
return EdgeTpuPowerState_TPU_CORE_OFF;
case proto::EdgeTpuPowerState::READY:
return EdgeTpuPowerState_READY;
case proto::EdgeTpuPowerState::ACTIVE_MIN_POWER:
return EdgeTpuPowerState_ACTIVE_MIN_POWER;
case proto::EdgeTpuPowerState::ACTIVE_VERY_LOW_POWER:
return EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER;
case proto::EdgeTpuPowerState::ACTIVE_LOW_POWER:
return EdgeTpuPowerState_ACTIVE_LOW_POWER;
case proto::EdgeTpuPowerState::ACTIVE:
return EdgeTpuPowerState_ACTIVE;
case proto::EdgeTpuPowerState::OVER_DRIVE:
return EdgeTpuPowerState_OVER_DRIVE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for EdgeTpuSettings::PowerState: %d",
state);
return EdgeTpuPowerState_UNDEFINED_POWERSTATE;
}
Offset<FallbackSettings> ConvertFallbackSettings(
const proto::FallbackSettings& settings, FlatBufferBuilder& builder) {
return CreateFallbackSettings(
builder,
settings.allow_automatic_fallback_on_compilation_error(),
settings.allow_automatic_fallback_on_execution_error());
}
Offset<NNAPISettings> ConvertNNAPISettings(const proto::NNAPISettings& settings,
FlatBufferBuilder& builder) {
return CreateNNAPISettings(
builder,
builder.CreateString(settings.accelerator_name()),
builder.CreateString(settings.cache_directory()),
builder.CreateString(settings.model_token()),
ConvertNNAPIExecutionPreference(settings.execution_preference()),
settings.no_of_nnapi_instances_to_cache(),
ConvertFallbackSettings(settings.fallback_settings(), builder),
settings.allow_nnapi_cpu_on_android_10_plus(),
ConvertNNAPIExecutionPriority(settings.execution_priority()),
settings.allow_dynamic_dimensions(),
settings.allow_fp16_precision_for_fp32(),
settings.use_burst_computation(),
settings.support_library_handle());
}
Offset<GPUSettings> ConvertGPUSettings(const proto::GPUSettings& settings,
FlatBufferBuilder& builder) {
return CreateGPUSettings(
builder,
settings.is_precision_loss_allowed(),
settings.enable_quantized_inference(),
ConvertGPUBackend(settings.force_backend()),
ConvertGPUInferencePriority(settings.inference_priority1()),
ConvertGPUInferencePriority(settings.inference_priority2()),
ConvertGPUInferencePriority(settings.inference_priority3()),
ConvertGPUInferenceUsage(settings.inference_preference()),
builder.CreateString(settings.cache_directory()),
builder.CreateString(settings.model_token()));
}
Offset<HexagonSettings> ConvertHexagonSettings(
const proto::HexagonSettings& settings, FlatBufferBuilder& builder) {
return CreateHexagonSettings(
builder,
settings.debug_level(),
settings.powersave_level(),
settings.print_graph_profile(),
settings.print_graph_debug());
}
Offset<XNNPackSettings> ConvertXNNPackSettings(
const proto::XNNPackSettings& settings, FlatBufferBuilder& builder) {
return CreateXNNPackSettings(
builder,
settings.num_threads(),
tflite::XNNPackFlags(settings.flags()));
}
Offset<CoreMLSettings> ConvertCoreMLSettings(
const proto::CoreMLSettings& settings, FlatBufferBuilder& builder) {
tflite::CoreMLSettings_::EnabledDevices enabled_devices =
tflite::CoreMLSettings_::EnabledDevices_DEVICES_ALL;
switch (settings.enabled_devices()) {
case proto::CoreMLSettings::DEVICES_ALL:
enabled_devices = tflite::CoreMLSettings_::EnabledDevices_DEVICES_ALL;
break;
case proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE:
enabled_devices =
tflite::CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE;
break;
default:
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Invalid devices enum: %d",
settings.enabled_devices());
}
return CreateCoreMLSettings(
builder, enabled_devices, settings.coreml_version(),
settings.max_delegated_partitions(), settings.min_nodes_per_partition());
}
Offset<StableDelegateLoaderSettings> ConvertStableDelegateLoaderSettings(
const proto::StableDelegateLoaderSettings& settings,
FlatBufferBuilder& builder) {
return CreateStableDelegateLoaderSettings(
builder, builder.CreateString(settings.delegate_path()),
builder.CreateString(settings.delegate_name()));
}
Offset<CPUSettings> ConvertCPUSettings(const proto::CPUSettings& settings,
FlatBufferBuilder& builder) {
return CreateCPUSettings(builder,
settings.num_threads());
}
Offset<tflite::EdgeTpuDeviceSpec> ConvertEdgeTpuDeviceSpec(
FlatBufferBuilder& builder, const proto::EdgeTpuDeviceSpec& device_spec) {
Offset<Vector<Offset<String>>> device_paths_fb = 0;
if (device_spec.device_paths_size() > 0) {
std::vector<Offset<String>> device_paths;
for (const auto& device_path : device_spec.device_paths()) {
auto device_path_fb = builder.CreateString(device_path);
device_paths.push_back(device_path_fb);
}
device_paths_fb = builder.CreateVector(device_paths);
}
return tflite::CreateEdgeTpuDeviceSpec(
builder,
static_cast<tflite::EdgeTpuDeviceSpec_::PlatformType>(
device_spec.platform_type()),
device_spec.num_chips(), device_paths_fb, device_spec.chip_family());
}
Offset<GoogleEdgeTpuSettings> ConvertGoogleEdgeTpuSettings(
const proto::GoogleEdgeTpuSettings& settings, FlatBufferBuilder& builder) {
Offset<String> model_identifier = 0;
if (settings.has_model_identifier()) {
model_identifier = builder.CreateString(settings.model_identifier());
}
Offset<Vector<uint8_t>> extension_data = 0;
if (settings.has_extension_data()) {
extension_data = builder.CreateVector(
reinterpret_cast<const uint8_t*>(settings.extension_data().data()),
settings.extension_data().size());
}
GoogleEdgeTpuSettingsBuilder builder_(builder);
builder_.add_log_verbosity(settings.log_verbosity());
builder_.add_enable_tracing(settings.enable_tracing());
builder_.add_priority(static_cast<tflite::GoogleEdgeTpuSettings_::Priority>(
settings.priority()));
builder_.add_model_identifier(model_identifier);
builder_.add_use_async_api(settings.use_async_api());
builder_.add_delegate_should_manage_cache_for_inputs(
settings.delegate_should_manage_cache_for_inputs());
builder_.add_delegate_should_manage_cache_for_outputs(
settings.delegate_should_manage_cache_for_outputs());
builder_.add_prefer_cache_coherency_for_inputs(
static_cast<tflite::GoogleEdgeTpuSettings_::TriState>(
settings.prefer_cache_coherency_for_inputs()));
builder_.add_prefer_cache_coherency_for_outputs(
static_cast<tflite::GoogleEdgeTpuSettings_::TriState>(
settings.prefer_cache_coherency_for_outputs()));
builder_.add_allow_fp16_precision_for_fp32(
settings.allow_fp16_precision_for_fp32());
builder_.add_extension_data(extension_data);
return builder_.Finish();
}
Offset<EdgeTpuSettings> ConvertEdgeTpuSettings(
const proto::EdgeTpuSettings& settings, FlatBufferBuilder& builder) {
Offset<Vector<Offset<tflite::EdgeTpuInactivePowerConfig>>>
inactive_power_configs = 0;
std::vector<Offset<tflite::EdgeTpuInactivePowerConfig>>
inactive_power_configs_std;
if (settings.inactive_power_configs_size() > 0) {
for (const auto& config : settings.inactive_power_configs()) {
inactive_power_configs_std.push_back(
tflite::CreateEdgeTpuInactivePowerConfig(
builder,
static_cast<tflite::EdgeTpuPowerState>(
config.inactive_power_state()),
config.inactive_timeout_us()));
}
inactive_power_configs =
builder.CreateVector<Offset<tflite::EdgeTpuInactivePowerConfig>>(
inactive_power_configs_std);
}
Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec = 0;
if (settings.has_edgetpu_device_spec()) {
edgetpu_device_spec =
ConvertEdgeTpuDeviceSpec(builder, settings.edgetpu_device_spec());
}
Offset<String> model_token = 0;
if (settings.has_model_token()) {
model_token = builder.CreateString(settings.model_token());
}
std::vector<int32_t> hardware_cluster_ids_std{
settings.hardware_cluster_ids().begin(),
settings.hardware_cluster_ids().end()};
auto hardware_cluster_ids_fb =
builder.CreateVector<int32_t>(hardware_cluster_ids_std);
Offset<String> public_model_id = 0;
if (settings.has_public_model_id()) {
public_model_id = builder.CreateString(settings.public_model_id());
}
return CreateEdgeTpuSettings(
builder, ConvertEdgeTpuPowerState(settings.inference_power_state()),
inactive_power_configs, settings.inference_priority(),
edgetpu_device_spec, model_token,
static_cast<tflite::EdgeTpuSettings_::FloatTruncationType>(
settings.float_truncation_type()),
static_cast<tflite::EdgeTpuSettings_::QosClass>(settings.qos_class()),
hardware_cluster_ids_fb, public_model_id,
static_cast<tflite::EdgeTpuSettings_::UseLayerIrTgcBackend>(
settings.use_layer_ir_tgc_backend()));
}
Offset<CompilationCachingSettings> ConvertCompilationCachingSettings(
const proto::CompilationCachingSettings& settings,
FlatBufferBuilder& builder) {
return CreateCompilationCachingSettings(
builder, builder.CreateString(settings.cache_dir()),
builder.CreateString(settings.model_token()));
}
Offset<ArmNNSettings> ConvertArmNNSettings(const proto::ArmNNSettings& settings,
FlatBufferBuilder& builder) {
return CreateArmNNSettings(
builder, builder.CreateString(settings.backends()), settings.fastmath(),
builder.CreateString(settings.additional_parameters()));
}
Offset<MtkNeuronSettings> ConvertMtkNeuronSettings(
const proto::MtkNeuronSettings& settings, FlatBufferBuilder& builder) {
return CreateMtkNeuronSettings(
builder,
static_cast<MtkNeuronSettings_::ExecutionPreference>(
settings.execution_preference()),
static_cast<MtkNeuronSettings_::ExecutionPriority>(
settings.execution_priority()),
builder.CreateVector(settings.optimization_hints().data(),
settings.optimization_hints().size()),
static_cast<MtkNeuronSettings_::OperationCheckMode>(
settings.operation_check_mode()),
settings.allow_fp16_precision_for_fp32(), settings.use_ahwb(),
settings.use_cacheable_buffer(),
builder.CreateVectorOfStrings(settings.compile_options().begin(),
settings.compile_options().end()),
builder.CreateVectorOfStrings(settings.accelerator_names().begin(),
settings.accelerator_names().end()),
builder.CreateString(settings.neuron_config_path()));
}
Offset<CoralSettings> ConvertCoralSettings(const proto::CoralSettings& settings,
FlatBufferBuilder& builder) {
return CreateCoralSettings(
builder, builder.CreateString(settings.device()),
static_cast<tflite::CoralSettings_::Performance>(settings.performance()),
settings.usb_always_dfu(), settings.usb_max_bulk_in_queue_length());
}
Offset<TFLiteSettings> ConvertTfliteSettings(
const proto::TFLiteSettings& settings, FlatBufferBuilder& builder) {
return CreateTFLiteSettings(
builder, ConvertDelegate(settings.delegate()),
ConvertNNAPISettings(settings.nnapi_settings(), builder),
ConvertGPUSettings(settings.gpu_settings(), builder),
ConvertHexagonSettings(settings.hexagon_settings(), builder),
ConvertXNNPackSettings(settings.xnnpack_settings(), builder),
ConvertCoreMLSettings(settings.coreml_settings(), builder),
ConvertCPUSettings(settings.cpu_settings(), builder),
settings.max_delegated_partitions(),
ConvertEdgeTpuSettings(settings.edgetpu_settings(), builder),
ConvertCoralSettings(settings.coral_settings(), builder),
ConvertFallbackSettings(settings.fallback_settings(), builder),
settings.disable_default_delegates(),
ConvertStableDelegateLoaderSettings(
settings.stable_delegate_loader_settings(), builder),
ConvertGoogleEdgeTpuSettings(settings.google_edgetpu_settings(), builder),
ConvertCompilationCachingSettings(settings.compilation_caching_settings(),
builder),
ConvertArmNNSettings(settings.armnn_settings(), builder),
ConvertMtkNeuronSettings(settings.mtk_neuron_settings(), builder));
}
Offset<ModelFile> ConvertModelFile(const proto::ModelFile& model_file,
FlatBufferBuilder& builder) {
return CreateModelFile(builder, builder.CreateString(model_file.filename()),
model_file.fd(), model_file.offset(),
model_file.length());
}
Offset<BenchmarkStoragePaths> ConvertBenchmarkStoragePaths(
const proto::BenchmarkStoragePaths& storage_paths,
FlatBufferBuilder& builder) {
return CreateBenchmarkStoragePaths(
builder, builder.CreateString(storage_paths.storage_file_path()),
builder.CreateString(storage_paths.data_directory_path()));
}
Offset<MinibenchmarkSettings> ConvertMinibenchmarkSettings(
const proto::MinibenchmarkSettings& settings, FlatBufferBuilder& builder) {
Offset<Vector<Offset<TFLiteSettings>>> settings_to_test = 0;
std::vector<Offset<TFLiteSettings>> settings_to_test_vec;
if (settings.settings_to_test_size() > 0) {
for (const auto& one : settings.settings_to_test()) {
settings_to_test_vec.push_back(ConvertTfliteSettings(one, builder));
}
settings_to_test =
builder.CreateVector<Offset<TFLiteSettings>>(settings_to_test_vec);
}
return CreateMinibenchmarkSettings(
builder, settings_to_test,
ConvertModelFile(settings.model_file(), builder),
ConvertBenchmarkStoragePaths(settings.storage_paths(), builder));
}
const TFLiteSettings* ConvertFromProto(
const proto::TFLiteSettings& proto_settings, FlatBufferBuilder* builder) {
Offset<TFLiteSettings> settings =
ConvertTfliteSettings(proto_settings, *builder);
return flatbuffers::GetTemporaryPointer(*builder, settings);
}
const ComputeSettings* ConvertFromProto(
const proto::ComputeSettings& proto_settings, FlatBufferBuilder* builder) {
auto settings = CreateComputeSettings(
*builder, ConvertExecutionPreference(proto_settings.preference()),
ConvertTfliteSettings(proto_settings.tflite_settings(), *builder),
builder->CreateString(proto_settings.model_namespace_for_statistics()),
builder->CreateString(proto_settings.model_identifier_for_statistics()),
ConvertMinibenchmarkSettings(proto_settings.settings_to_test_locally(),
*builder));
return flatbuffers::GetTemporaryPointer(*builder, settings);
}
const MinibenchmarkSettings* ConvertFromProto(
const proto::MinibenchmarkSettings& proto_settings,
flatbuffers::FlatBufferBuilder* builder) {
auto settings = ConvertMinibenchmarkSettings(proto_settings, *builder);
return flatbuffers::GetTemporaryPointer(*builder, settings);
}
} | #include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace tflite {
namespace {
TEST(ConversionTest, EdgeTpuSettings) {
const std::vector<int32_t> kHardwareClusterIds{1};
const std::string kPublicModelId = "public_model_id";
const tflite::proto::EdgeTpuSettings_UseLayerIrTgcBackend
kUseLayerIrTgcBackend =
tflite::proto::EdgeTpuSettings::USE_LAYER_IR_TGC_BACKEND_YES;
proto::ComputeSettings input_settings;
auto* edgetpu_settings =
input_settings.mutable_tflite_settings()->mutable_edgetpu_settings();
edgetpu_settings->set_public_model_id(kPublicModelId);
edgetpu_settings->set_use_layer_ir_tgc_backend(kUseLayerIrTgcBackend);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
*edgetpu_settings->mutable_hardware_cluster_ids() = {
kHardwareClusterIds.begin(), kHardwareClusterIds.end()};
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder)
->tflite_settings()
->edgetpu_settings();
EXPECT_EQ(output_settings->hardware_cluster_ids()->size(), 1);
EXPECT_EQ(output_settings->hardware_cluster_ids()->Get(0),
kHardwareClusterIds[0]);
EXPECT_EQ(output_settings->public_model_id()->str(), kPublicModelId);
EXPECT_EQ(output_settings->use_layer_ir_tgc_backend(),
tflite::EdgeTpuSettings_::
UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_YES);
}
TEST(ConversionTest, TFLiteSettings) {
const std::vector<int32_t> kHardwareClusterIds{1};
const std::string kPublicModelId = "public_model_id";
const tflite::proto::EdgeTpuSettings_UseLayerIrTgcBackend
kUseLayerIrTgcBackend =
tflite::proto::EdgeTpuSettings::USE_LAYER_IR_TGC_BACKEND_YES;
proto::TFLiteSettings input_settings;
input_settings.set_delegate(::tflite::proto::EDGETPU);
auto* edgetpu_settings = input_settings.mutable_edgetpu_settings();
edgetpu_settings->set_public_model_id(kPublicModelId);
edgetpu_settings->set_use_layer_ir_tgc_backend(kUseLayerIrTgcBackend);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
*edgetpu_settings->mutable_hardware_cluster_ids() = {
kHardwareClusterIds.begin(), kHardwareClusterIds.end()};
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
EXPECT_EQ(output_settings->delegate(), ::tflite::Delegate_EDGETPU);
const auto* output_edgetpu_settings = output_settings->edgetpu_settings();
EXPECT_EQ(output_edgetpu_settings->hardware_cluster_ids()->size(), 1);
EXPECT_EQ(output_edgetpu_settings->hardware_cluster_ids()->Get(0),
kHardwareClusterIds[0]);
EXPECT_EQ(output_edgetpu_settings->public_model_id()->str(), kPublicModelId);
EXPECT_EQ(output_edgetpu_settings->use_layer_ir_tgc_backend(),
tflite::EdgeTpuSettings_::
UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_YES);
}
TEST(ConversionTest, StableDelegateLoaderSettings) {
const std::string kDelegatePath = "TEST_DELEGATE_PATH";
const std::string kDelegateName = "TEST_DELEGATE_NAME";
proto::TFLiteSettings input_settings;
auto* stable_delegate_loader_settings =
input_settings.mutable_stable_delegate_loader_settings();
stable_delegate_loader_settings->set_delegate_path(kDelegatePath);
stable_delegate_loader_settings->set_delegate_name(kDelegateName);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
const auto* output_stable_delegate_loader_settings =
output_settings->stable_delegate_loader_settings();
ASSERT_NE(output_stable_delegate_loader_settings, nullptr);
EXPECT_EQ(output_stable_delegate_loader_settings->delegate_path()->str(),
kDelegatePath);
EXPECT_EQ(output_stable_delegate_loader_settings->delegate_name()->str(),
kDelegateName);
}
TEST(ConversionTest, CompilationCachingSettings) {
const std::string kCacheDir = "TEST_CACHE_DIR";
const std::string kModelToken = "TEST_MODEL_TOKEN";
proto::TFLiteSettings input_settings;
auto* compilation_caching_settings =
input_settings.mutable_compilation_caching_settings();
compilation_caching_settings->set_cache_dir(kCacheDir);
compilation_caching_settings->set_model_token(kModelToken);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
const auto* output_compilation_caching_settings =
output_settings->compilation_caching_settings();
ASSERT_NE(output_compilation_caching_settings, nullptr);
EXPECT_EQ(output_compilation_caching_settings->cache_dir()->str(), kCacheDir);
EXPECT_EQ(output_compilation_caching_settings->model_token()->str(),
kModelToken);
}
TEST(ConversionTest, ArmNNSettings) {
const std::string kBackends = "TEST_BACKENDS";
const bool kFastmath = true;
const std::string kAdditionalParameters = "TEST_ADDITIONAL_PARAMETERS";
proto::TFLiteSettings input_settings;
auto* armnn_settings = input_settings.mutable_armnn_settings();
armnn_settings->set_backends(kBackends);
armnn_settings->set_fastmath(kFastmath);
armnn_settings->set_additional_parameters(kAdditionalParameters);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
const auto* output_armnn_settings = output_settings->armnn_settings();
ASSERT_NE(output_armnn_settings, nullptr);
EXPECT_EQ(output_armnn_settings->backends()->str(), kBackends);
EXPECT_EQ(output_armnn_settings->fastmath(), kFastmath);
EXPECT_EQ(output_armnn_settings->additional_parameters()->str(),
kAdditionalParameters);
}
TEST(ConversionTest, MtkNeuronSettings) {
const proto::MtkNeuronSettings_ExecutionPreference kExecutionPreference =
proto::MtkNeuronSettings::PREFERENCE_FAST_SINGLE_ANSWER;
const proto::MtkNeuronSettings_ExecutionPriority kExecutionPriority =
proto::MtkNeuronSettings::PRIORITY_MEDIUM;
const proto::MtkNeuronSettings_OptimizationHint kOptimizationHint =
proto::MtkNeuronSettings::OPTIMIZATION_LOW_LATENCY;
const proto::MtkNeuronSettings_OperationCheckMode kOperationCheckMode =
proto::MtkNeuronSettings::PER_NODE_OPERATION_CHECK;
const bool kAllowFp16 = true;
const bool kUseAhwb = false;
const bool kUseCacheableBuffer = true;
const std::string kCompileOptions = "TEST_COMPILE_OPTIONS";
const std::string kAcceleratorName = "TEST_ACCELERATOR_NAME";
const std::string kNeuronConfigPath = "TEST_NEURON_CONFIG_PATH";
proto::TFLiteSettings input_settings;
auto* mtk_neuron_settings = input_settings.mutable_mtk_neuron_settings();
mtk_neuron_settings->set_execution_preference(kExecutionPreference);
mtk_neuron_settings->set_execution_priority(kExecutionPriority);
mtk_neuron_settings->add_optimization_hints(kOptimizationHint);
mtk_neuron_settings->set_operation_check_mode(kOperationCheckMode);
mtk_neuron_settings->set_allow_fp16_precision_for_fp32(kAllowFp16);
mtk_neuron_settings->set_use_ahwb(kUseAhwb);
mtk_neuron_settings->set_use_cacheable_buffer(kUseCacheableBuffer);
mtk_neuron_settings->add_compile_options(kCompileOptions);
mtk_neuron_settings->add_accelerator_names(kAcceleratorName);
mtk_neuron_settings->set_neuron_config_path(kNeuronConfigPath);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
const auto* output_mtk_neuron_settings =
output_settings->mtk_neuron_settings();
ASSERT_NE(output_mtk_neuron_settings, nullptr);
EXPECT_EQ(
output_mtk_neuron_settings->execution_preference(),
MtkNeuronSettings_::ExecutionPreference_PREFERENCE_FAST_SINGLE_ANSWER);
EXPECT_EQ(output_mtk_neuron_settings->execution_priority(),
MtkNeuronSettings_::ExecutionPriority_PRIORITY_MEDIUM);
EXPECT_EQ(output_mtk_neuron_settings->optimization_hints()->size(), 1);
EXPECT_EQ(output_mtk_neuron_settings->optimization_hints()->Get(0),
kOptimizationHint);
EXPECT_EQ(output_mtk_neuron_settings->operation_check_mode(),
MtkNeuronSettings_::OperationCheckMode_PER_NODE_OPERATION_CHECK);
EXPECT_EQ(output_mtk_neuron_settings->allow_fp16_precision_for_fp32(),
kAllowFp16);
EXPECT_EQ(output_mtk_neuron_settings->use_ahwb(), kUseAhwb);
EXPECT_EQ(output_mtk_neuron_settings->use_cacheable_buffer(),
kUseCacheableBuffer);
EXPECT_EQ(output_mtk_neuron_settings->compile_options()->size(), 1);
EXPECT_EQ(output_mtk_neuron_settings->compile_options()->Get(0)->str(),
kCompileOptions);
EXPECT_EQ(output_mtk_neuron_settings->accelerator_names()->size(), 1);
EXPECT_EQ(output_mtk_neuron_settings->accelerator_names()->Get(0)->str(),
kAcceleratorName);
EXPECT_EQ(output_mtk_neuron_settings->neuron_config_path()->str(),
kNeuronConfigPath);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/proto_to_flatbuffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7fb12dec-028f-4ff1-bef1-48445bd7e24c | cpp | tensorflow/tensorflow | concatenation | tensorflow/lite/kernels/concatenation.cc | tensorflow/lite/delegates/xnnpack/concatenation_test.cc | #include "tensorflow/lite/kernels/internal/reference/concatenation.h"
#include <stdint.h>
#include <cstddef>
#include <cstring>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace concatenation {
enum KernelType {
kReference,
kGenericOptimized,
};
template <KernelType kernel_type>
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, int axis,
TfLiteTensor* output) {
#define TF_LITE_CONCATENATION(scalar) \
{ \
VectorOfTensors<scalar> all_inputs(*context, *node->inputs); \
tflite::ConcatenationParams op_params; \
op_params.axis = axis; \
op_params.inputs_count = node->inputs->size; \
if (kernel_type == kReference) { \
reference_ops::Concatenation(op_params, all_inputs.shapes(), \
all_inputs.data(), GetTensorShape(output), \
GetTensorData<scalar>(output)); \
} else { \
optimized_ops::Concatenation(op_params, all_inputs.shapes(), \
all_inputs.data(), GetTensorShape(output), \
GetTensorData<scalar>(output)); \
} \
}
#define TF_LITE_CONCATENATION_QUANTIZED() \
{ \
VectorOfQuantizedTensors all_inputs(*context, *node->inputs); \
tflite::ConcatenationParams op_params; \
op_params.axis = axis; \
op_params.input_zeropoint = all_inputs.zero_point(); \
op_params.input_scale = all_inputs.scale(); \
op_params.inputs_count = node->inputs->size; \
op_params.output_zeropoint = output->params.zero_point; \
op_params.output_scale = output->params.scale; \
if (kernel_type == kReference) { \
reference_ops::ConcatenationWithScaling( \
op_params, all_inputs.shapes(), all_inputs.data(), \
GetTensorShape(output), GetTensorData<uint8>(output)); \
} else { \
optimized_ops::ConcatenationWithScaling( \
op_params, all_inputs.shapes(), all_inputs.data(), \
GetTensorShape(output), GetTensorData<uint8>(output)); \
} \
}
switch (output->type) {
case kTfLiteFloat32:
TF_LITE_CONCATENATION(float);
break;
case kTfLiteInt32:
TF_LITE_CONCATENATION(int32);
break;
case kTfLiteUInt32:
TF_LITE_CONCATENATION(uint32_t);
break;
case kTfLiteUInt8:
TF_LITE_CONCATENATION_QUANTIZED();
break;
case kTfLiteInt8:
TF_LITE_CONCATENATION(int8_t);
break;
case kTfLiteInt64:
TF_LITE_CONCATENATION(int64_t);
break;
case kTfLiteInt16:
TF_LITE_CONCATENATION(int16_t);
break;
case kTfLiteBool:
TF_LITE_CONCATENATION(bool);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported currently.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
#undef TF_LITE_CONCATENATION_QUANTIZED
#undef TF_LITE_CONCATENATION
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data);
int axis = params->axis;
int num_inputs = node->inputs->size;
const TfLiteTensor* t0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &t0));
TfLiteType input_type = t0->type;
if (axis < 0) axis += t0->dims->size;
TF_LITE_ENSURE(context, axis >= 0);
TF_LITE_ENSURE(context,
axis < t0->dims->size || (t0->dims->size == 0 && axis == 0));
TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone);
TF_LITE_ENSURE(context,
input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
input_type == kTfLiteInt8 || input_type == kTfLiteInt16 ||
input_type == kTfLiteInt32 || input_type == kTfLiteInt64 ||
input_type == kTfLiteBool || input_type == kTfLiteUInt32);
bool all_inputs_at_prepare = true;
for (int i = 0; i < num_inputs; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
if (!IsConstantOrPersistentTensor(t)) {
all_inputs_at_prepare = false;
break;
}
}
int sum_axis = t0->dims->size > 0 ? t0->dims->data[axis] : 1;
if (all_inputs_at_prepare && t0->dims->size == 0 && axis == 0) {
for (int i = 1; i < num_inputs; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
TF_LITE_ENSURE_EQ(context, t->dims->size, t0->dims->size);
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteIntArray* output_size = TfLiteIntArrayCreate(1);
output_size->data[0] = num_inputs;
SetTensorToPersistentRo(output);
context->ResizeTensor(context, output, output_size);
size_t input_type_size;
TF_LITE_ENSURE_STATUS(GetSizeOfType(context, t0->type, &input_type_size));
void* o_data = output->data.data;
for (int i = 0; i < num_inputs; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
const void* i_data = t->data.data;
memcpy(o_data, i_data, input_type_size);
o_data = (void*)((uintptr_t)o_data + input_type_size);
}
return kTfLiteOk;
} else {
for (int i = 1; i < num_inputs; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
TF_LITE_ENSURE_EQ(context, t->dims->size, t0->dims->size);
TF_LITE_ENSURE_EQ(context, t->type, input_type);
for (int d = 0; d < t0->dims->size; ++d) {
if (d == axis) {
TF_LITE_ENSURE(context, t->dims->data[axis] >= 0);
TF_LITE_ENSURE(context,
t->dims->data[axis] <=
std::numeric_limits<int>::max() - sum_axis);
sum_axis += t->dims->data[axis];
} else {
TF_LITE_ENSURE_EQ(context, t->dims->data[d], t0->dims->data[d]);
}
}
}
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(t0->dims->size);
for (int d = 0; d < t0->dims->size; ++d) {
output_size->data[d] = (d == axis) ? sum_axis : t0->dims->data[d];
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_type);
if (input_type == kTfLiteInt8) {
VectorOfTensors<int8_t> all_inputs(*context, *node->inputs);
for (int i = 0; i < node->inputs->size; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
TF_LITE_ENSURE_EQ(context, t->params.scale, output->params.scale);
TF_LITE_ENSURE_EQ(context, t->params.zero_point,
output->params.zero_point);
}
}
if (input_type == kTfLiteInt16) {
for (int i = 0; i < node->inputs->size; ++i) {
const TfLiteTensor* t = GetInput(context, node, i);
TF_LITE_ENSURE_EQ(context, t->params.zero_point, 0);
}
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
if (all_inputs_at_prepare) {
SetTensorToPersistentRo(output);
context->ResizeTensor(context, output, output_size);
return EvalImpl<kReference>(context, node, axis, output);
}
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data);
int axis = params->axis;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
if (IsConstantOrPersistentTensor(output)) {
return kTfLiteOk;
}
if (axis < 0) axis += output->dims->size;
return EvalImpl<kernel_type>(context, node, axis, output);
}
#undef TF_LITE_MACRO_DISPATCH
}
TfLiteRegistration* Register_CONCATENATION_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, concatenation::Prepare,
concatenation::Eval<concatenation::kReference>};
return &r;
}
TfLiteRegistration* Register_CONCATENATION_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, concatenation::Prepare,
concatenation::Eval<concatenation::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_CONCATENATION() {
return Register_CONCATENATION_GENERIC_OPT();
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/concatenation_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Concatenation, 1D_2_inputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape1({shape_rng()});
const std::vector<int32_t> shape2({shape_rng()});
for (int i = -1; i < 1; i++) {
ConcatenationTester()
.InputShapes({shape1, shape2})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 2D_2_inputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -2; i < 2; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 3D_2_inputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -3; i < 3; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 4D_2_inputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -4; i < 4; i++) {
const std::vector<int32_t> shape1(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 1D_of_3) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape1({shape_rng()});
const std::vector<int32_t> shape2({shape_rng()});
const std::vector<int32_t> shape3({shape_rng()});
for (int i = -1; i < 1; i++) {
ConcatenationTester()
.InputShapes({shape1, shape2, shape3})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 2D_of_3) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -2; i < 2; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 3D_of_3) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -3; i < 3; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 4D_of_3) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -4; i < 4; i++) {
const std::vector<int32_t> shape1(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 1D_of_4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape1({shape_rng()});
const std::vector<int32_t> shape2({shape_rng()});
const std::vector<int32_t> shape3({shape_rng()});
const std::vector<int32_t> shape4({shape_rng()});
for (int i = -1; i < 1; i++) {
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 2D_of_4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -2; i < 2; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 3D_of_4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -3; i < 3; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 4D_of_4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -4; i < 4; i++) {
const std::vector<int32_t> shape1(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 1D_of_5) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape1({shape_rng()});
const std::vector<int32_t> shape2({shape_rng()});
const std::vector<int32_t> shape3({shape_rng()});
const std::vector<int32_t> shape4({shape_rng()});
const std::vector<int32_t> shape5({shape_rng()});
for (int i = -1; i < 1; i++) {
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4, shape5})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 2D_of_5) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -2; i < 2; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape5 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4, shape5})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 3D_of_5) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -3; i < 3; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape5 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4, shape5})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 4D_of_5) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -4; i < 4; i++) {
const std::vector<int32_t> shape1(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape5 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4, shape5})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/concatenation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/concatenation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0a4c7c6e-ff3f-427a-af60-064374bf1590 | cpp | tensorflow/tensorflow | roll | tensorflow/lite/kernels/roll.cc | tensorflow/lite/kernels/roll_test.cc | #include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <cstring>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/portable_tensor.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace roll {
namespace {
std::vector<int32_t> ExtractIntegerVector(const TfLiteTensor* t) {
TFLITE_DCHECK(t->type == kTfLiteInt32 || t->type == kTfLiteInt64);
const RuntimeShape& shape = GetTensorShape(t);
std::vector<int32_t> result(shape.FlatSize());
if (t->type == kTfLiteInt32) {
memcpy(result.data(), t->data.raw_const, t->bytes);
} else {
const int64_t* data = GetTensorData<int64_t>(t);
for (int i = 0; i < result.size(); ++i) {
result[i] = static_cast<int32_t>(data[i]);
}
}
return result;
}
template <typename T>
inline void Pool(const std::vector<int32_t>& shift_map,
const RuntimeShape& shape, const TfLiteTensor* input,
TfLiteTensor* cache, TfLiteTensor* output) {
int stride = 1, outer_size, next_stride;
bool in_place_rolling = false;
for (int i = shift_map.size() - 1; i >= 0; --i, stride = next_stride) {
next_stride = stride * shape.Dims(i);
if (shift_map[i] == 0) continue;
TFLITE_DCHECK_EQ(shape.FlatSize() % next_stride, 0);
outer_size = shape.FlatSize() / next_stride;
const TfLiteTensor* source = input;
if (in_place_rolling) {
SequentialTensorWriter<T> writer(output, cache);
writer.WriteN(0, shape.FlatSize());
source = cache;
}
SequentialTensorWriter<T> writer(source, output);
for (int j = 0; j < outer_size; ++j) {
const int begin_1 =
j * next_stride + (shape.Dims(i) - shift_map[i]) * stride;
const int size_1 = shift_map[i] * stride;
writer.WriteN(begin_1, size_1);
const int begin_2 = j * next_stride;
const int size_2 = (shape.Dims(i) - shift_map[i]) * stride;
writer.WriteN(begin_2, size_2);
}
in_place_rolling = true;
}
if (!in_place_rolling) {
SequentialTensorWriter<T> writer(input, output);
writer.WriteN(0, shape.FlatSize());
return;
}
}
}
constexpr int kInputTensor = 0;
constexpr int kShiftTensor = 1;
constexpr int kAxisTensor = 2;
constexpr int kOutputTensor = 0;
constexpr int kTensorNotAllocated = -1;
struct OpData {
int cache_tensor_id = kTensorNotAllocated;
int32_t cache_index = kTensorNotAllocated;
bool need_cache = false;
};
TfLiteStatus AllocateTemporaryTensorsIfRequired(TfLiteContext* context,
TfLiteNode* node,
OpData* opdata,
const TfLiteTensor* input,
const TfLiteTensor* shift) {
int temporaries_count = 0;
opdata->need_cache = (NumElements(shift) > 1);
if (opdata->need_cache) {
if (opdata->cache_tensor_id == kTensorNotAllocated) {
TF_LITE_ENSURE_OK(
context, context->AddTensors(context, 1, &opdata->cache_tensor_id));
}
opdata->cache_index = temporaries_count++;
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(temporaries_count);
if (opdata->need_cache) {
node->temporaries->data[opdata->cache_index] = opdata->cache_tensor_id;
TfLiteTensor* cache;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, opdata->cache_index, &cache));
cache->type = input->type;
cache->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* cache_shape = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, cache, cache_shape));
}
return kTfLiteOk;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* opdata = new OpData;
return opdata;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* shift;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShiftTensor, &shift));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxisTensor, &axis));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE(
context, (shift->type == kTfLiteInt32) || (shift->type == kTfLiteInt64));
TF_LITE_ENSURE(context,
(axis->type == kTfLiteInt32) || (axis->type == kTfLiteInt64));
TF_LITE_ENSURE(context,
(NumDimensions(shift) == 0) || (NumDimensions(shift) == 1));
TF_LITE_ENSURE(context,
(NumDimensions(shift) == 0) || (NumDimensions(shift) == 1));
TF_LITE_ENSURE_EQ(context, NumElements(shift), NumElements(axis));
TF_LITE_ENSURE_OK(context, AllocateTemporaryTensorsIfRequired(
context, node, opdata, input, shift));
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* shift;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShiftTensor, &shift));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxisTensor, &axis));
TfLiteTensor* cache = GetTemporary(context, node, opdata->cache_index);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
std::vector<int32_t> shift_data = ExtractIntegerVector(shift);
std::vector<int32_t> axis_data = ExtractIntegerVector(axis);
const int input_rank = NumDimensions(input);
std::vector<int32_t> shift_map(input_rank, 0);
for (int i = 0; i < axis_data.size(); ++i) {
int32_t axis_i = axis_data[i];
if (axis_i < 0) axis_i += input_rank;
shift_map[axis_i] += shift_data[i];
}
for (int i = 0; i < input_rank; ++i) {
const int32_t input_dims_i = SizeOfDimension(input, i);
int32_t shift_i = shift_map[i] % input_dims_i;
if (shift_i < 0) shift_i += input_dims_i;
shift_map[i] = shift_i;
}
#define TF_LITE_ROLL(type) \
Pool<type>(shift_map, GetTensorShape(input), input, cache, output);
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ROLL(float);
break;
case kTfLiteInt32:
TF_LITE_ROLL(int32_t);
break;
case kTfLiteInt64:
TF_LITE_ROLL(int64_t);
break;
case kTfLiteInt8:
TF_LITE_ROLL(int8_t);
break;
case kTfLiteInt16:
TF_LITE_ROLL(int16_t);
break;
case kTfLiteUInt8:
TF_LITE_ROLL(uint8_t);
break;
case kTfLiteBool:
TF_LITE_ROLL(bool);
break;
case kTfLiteString:
TF_LITE_ROLL(string);
break;
default:
TF_LITE_KERNEL_LOG(
context, "Type %d is currently not supported by Slice.", input->type);
return kTfLiteError;
}
#undef TF_LITE_ROLL
return kTfLiteOk;
}
}
TfLiteRegistration* Register_ROLL() {
static TfLiteRegistration r = {roll::Init, roll::Free, roll::Prepare,
roll::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
using ::testing::ElementsAreArray;
class BaseRollOpModel : public SingleOpModel {
public:
BaseRollOpModel(TensorData input, const std::vector<int32_t>& shift,
const std::vector<int64_t>& axis, TensorData output) {
if (input.type == TensorType_FLOAT32 || input.type == TensorType_INT64) {
input.min = input.max = 0.f;
output.min = output.max = 0.f;
}
input_ = AddInput(input);
shift_ = AddInput(
TensorData(TensorType_INT32, {static_cast<int>(shift.size())}));
axis_ =
AddInput(TensorData(TensorType_INT64, {static_cast<int>(axis.size())}));
output_ = AddOutput(output);
SetCustomOp("Roll", {}, ops::custom::Register_ROLL);
BuildInterpreter({GetShape(input_), GetShape(shift_), GetShape(axis_)});
PopulateTensor(shift_, shift);
PopulateTensor(axis_, axis);
}
template <typename T>
inline typename std::enable_if<is_small_integer<T>::value, void>::type
SetInput(const std::initializer_list<float>& data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
inline typename std::enable_if<!is_small_integer<T>::value, void>::type
SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
template <typename T>
inline typename std::enable_if<is_small_integer<T>::value,
std::vector<float>>::type
GetOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
template <typename T>
inline
typename std::enable_if<!is_small_integer<T>::value, std::vector<T>>::type
GetOutput() {
return ExtractVector<T>(output_);
}
void SetStringInput(std::initializer_list<std::string> data) {
PopulateStringTensor(input_, data);
}
protected:
int input_;
int shift_;
int axis_;
int output_;
};
#if GTEST_HAS_DEATH_TEST
TEST(RollOpTest, MismatchSize) {
EXPECT_DEATH(BaseRollOpModel m({TensorType_FLOAT32, {1, 2, 4, 2}},
{2, 3}, {2},
{TensorType_FLOAT32, {}}),
"NumElements.shift. != NumElements.axis.");
}
#endif
template <typename T>
class RollOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<float, int8_t, int16_t, int64_t>;
TYPED_TEST_SUITE(RollOpTest, DataTypes);
TYPED_TEST(RollOpTest, Roll1D) {
BaseRollOpModel m(
{GetTensorType<TypeParam>(), {10}, 0, 31.875},
{3}, {0},
{GetTensorType<TypeParam>(), {}, 0, 31.875});
m.SetInput<TypeParam>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<TypeParam>(),
ElementsAreArray({7, 8, 9, 0, 1, 2, 3, 4, 5, 6}));
}
TYPED_TEST(RollOpTest, Roll3D) {
BaseRollOpModel m(
{GetTensorType<TypeParam>(), {2, 4, 4}, 0, 31.875},
{2, 6}, {1, 2},
{GetTensorType<TypeParam>(), {}, 0, 31.875});
m.SetInput<TypeParam>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<TypeParam>(),
ElementsAreArray({10, 11, 8, 9, 14, 15, 12, 13, 2, 3, 0,
1, 6, 7, 4, 5, 26, 27, 24, 25, 30, 31,
28, 29, 18, 19, 16, 17, 22, 23, 20, 21}));
}
TYPED_TEST(RollOpTest, Roll3DNegativeShift) {
BaseRollOpModel m(
{GetTensorType<TypeParam>(), {2, 4, 4}, 0, 31.875},
{2, -5}, {1, -1},
{GetTensorType<TypeParam>(), {}, 0, 31.875});
m.SetInput<TypeParam>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<TypeParam>(),
ElementsAreArray({9, 10, 11, 8, 13, 14, 15, 12, 1, 2, 3,
0, 5, 6, 7, 4, 25, 26, 27, 24, 29, 30,
31, 28, 17, 18, 19, 16, 21, 22, 23, 20}));
}
TYPED_TEST(RollOpTest, DuplicatedAxis) {
BaseRollOpModel m(
{GetTensorType<TypeParam>(), {2, 4, 4}, 0, 31.875},
{2, 3}, {1, 1},
{GetTensorType<TypeParam>(), {}, 0, 31.875});
m.SetInput<TypeParam>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<TypeParam>(),
ElementsAreArray({12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 28, 29, 30, 31, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27}));
}
TEST(RollOpTest, Roll3DTring) {
BaseRollOpModel m({TensorType_STRING, {2, 4, 4}},
{2, 5}, {1, 2},
{TensorType_STRING, {}});
m.SetStringInput({"0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "11", "12", "13", "14", "15",
"16", "17", "18", "19", "20", "21", "22", "23",
"24", "25", "26", "27", "28", "29", "30", "31"});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput<std::string>(),
ElementsAreArray({"11", "8", "9", "10", "15", "12", "13", "14",
"3", "0", "1", "2", "7", "4", "5", "6",
"27", "24", "25", "26", "31", "28", "29", "30",
"19", "16", "17", "18", "23", "20", "21", "22"}));
}
TEST(RollOpTest, BoolRoll3D) {
BaseRollOpModel m({TensorType_BOOL, {2, 4, 4}},
{2, 3}, {1, 2},
{TensorType_BOOL, {}});
m.SetInput<bool>({true, false, false, true, true, false, false, true,
false, false, false, true, false, false, true, true,
false, false, true, false, false, false, true, false,
false, true, true, false, false, true, false, false});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<bool>(),
ElementsAreArray({false, false, true, false, false, true, true,
false, false, false, true, true, false, false,
true, true, true, true, false, false, true,
false, false, false, false, true, false, false,
false, true, false, false}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/roll.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/roll_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0900bb66-bb7c-4499-b5f1-23262dab9ad5 | cpp | tensorflow/tensorflow | lstm | tensorflow/lite/delegates/gpu/gl/kernels/lstm.cc | tensorflow/lite/delegates/gpu/cl/kernels/lstm_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/lstm.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class LstmNodeShader : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string code = R"(
vec4 prev_state = $input_data_1[gid.x, gid.y, gid.z]$;
int c0 = 0 * $workload_z$;
int c1 = 1 * $workload_z$;
int c2 = 2 * $workload_z$;
int c3 = 3 * $workload_z$;
vec4 gate_0 = $input_data_0[gid.x, gid.y, gid.z + c0]$;
vec4 gate_1 = $input_data_0[gid.x, gid.y, gid.z + c1]$;
vec4 gate_2 = $input_data_0[gid.x, gid.y, gid.z + c2]$;
vec4 gate_3 = $input_data_0[gid.x, gid.y, gid.z + c3]$;
vec4 input_gate = 1.0f / (1.0f + exp(-1.0 * gate_0));
vec4 new_input = tanh(gate_1);
vec4 forget_gate = 1.0f / (1.0f + exp(-1.0 * gate_2));
vec4 output_gate = 1.0f / (1.0f + exp(-1.0 * gate_3));
vec4 new_state = input_gate * new_input + forget_gate * prev_state;
vec4 activation = output_gate * tanh(new_state);
value_0 = new_state;
value_1 = activation;
)";
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewLstmNodeShader() {
return std::make_unique<LstmNodeShader>();
}
}
}
} | #include <cmath>
#include <cstdlib>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/lstm_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, LSTM) {
auto status = LstmTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/lstm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/lstm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5710bd06-fb26-4a74-960c-a31248cb0fb0 | cpp | tensorflow/tensorflow | squeeze | tensorflow/lite/kernels/squeeze.cc | tensorflow/lite/kernels/squeeze_test.cc | #include <stdint.h>
#include <string.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/portable_tensor.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace squeeze {
struct SqueezeContext {
SqueezeContext(TfLiteContext* context, TfLiteNode* node)
: params(reinterpret_cast<TfLiteSqueezeParams*>(node->builtin_data)),
input(GetInput(context, node, 0)),
output(GetOutput(context, node, 0)) {}
TfLiteSqueezeParams* params;
const TfLiteTensor* const input;
TfLiteTensor* output;
};
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
SqueezeContext op_context(context, node);
int input_num_dims = NumDimensions(op_context.input);
int num_squeeze_dims = op_context.params->num_squeeze_dims;
const TfLiteIntArray* input_dims = op_context.input->dims;
const int32_t* squeeze_dims = op_context.params->squeeze_dims;
TF_LITE_ENSURE(context, input_num_dims <= 8);
bool should_squeeze[8] = {false};
int num_squeezed_dims = 0;
if (num_squeeze_dims == 0) {
for (int idx = 0; idx < input_num_dims; ++idx) {
if (input_dims->data[idx] == 1) {
should_squeeze[idx] = true;
++num_squeezed_dims;
}
}
} else {
for (int idx = 0; idx < num_squeeze_dims; ++idx) {
int32_t current = squeeze_dims[idx] < 0
? squeeze_dims[idx] + input_num_dims
: squeeze_dims[idx];
TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims &&
input_dims->data[current] == 1);
if (!should_squeeze[current]) ++num_squeezed_dims;
should_squeeze[current] = true;
}
}
TfLiteIntArray* output_dims =
TfLiteIntArrayCreate(input_num_dims - num_squeezed_dims);
for (int in_idx = 0, out_idx = 0; in_idx < input_num_dims; ++in_idx) {
if (!should_squeeze[in_idx]) {
output_dims->data[out_idx++] = input_dims->data[in_idx];
}
}
return context->ResizeTensor(context, op_context.output, output_dims);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
SqueezeContext op_context(context, node);
if (op_context.input->type == kTfLiteString) {
const int input_flat_size = GetTensorShape(op_context.input).FlatSize();
const int output_flat_size = GetTensorShape(op_context.output).FlatSize();
TF_LITE_ENSURE_EQ(context, input_flat_size, output_flat_size);
SequentialTensorWriter<string> writer(op_context.input, op_context.output);
for (int i = 0; i < input_flat_size; i++) {
writer.Write(i);
}
return kTfLiteOk;
}
TF_LITE_ENSURE_EQ(context, op_context.input->bytes, op_context.output->bytes);
if (op_context.output->data.data != op_context.input->data.data) {
memcpy(op_context.output->data.data, op_context.input->data.data,
op_context.input->bytes);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SQUEEZE() {
static TfLiteRegistration r = {
nullptr,
nullptr,
squeeze::Prepare,
squeeze::Eval,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
class BaseSqueezeOpModel : public SingleOpModel {
public:
BaseSqueezeOpModel(const TensorData& input, const TensorData& output,
std::initializer_list<int> axis) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_SQUEEZE, BuiltinOptions_SqueezeOptions,
CreateSqueezeOptions(builder_, builder_.CreateVector<int>(axis))
.Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
protected:
int input_;
int output_;
};
template <typename T>
class SqueezeOpModel : public BaseSqueezeOpModel {
public:
using BaseSqueezeOpModel::BaseSqueezeOpModel;
void SetInput(std::initializer_list<T> data) { PopulateTensor(input_, data); }
void SetStringInput(std::initializer_list<string> data) {
PopulateStringTensor(input_, data);
}
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<string> GetStringOutput() {
return ExtractVector<string>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
};
template <typename T>
class SqueezeOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<float, int8_t, int16_t, int32_t>;
TYPED_TEST_SUITE(SqueezeOpTest, DataTypes);
TYPED_TEST(SqueezeOpTest, SqueezeAllInplace) {
std::initializer_list<TypeParam> data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
SqueezeOpModel<TypeParam> m({GetTensorType<TypeParam>(), {1, 24, 1}},
{GetTensorType<TypeParam>(), {24}}, {});
m.SetInput(data);
const int kInplaceInputTensorIdx = 0;
const int kInplaceOutputTensorIdx = 0;
const TfLiteTensor* input_tensor = m.GetInputTensor(kInplaceInputTensorIdx);
TfLiteTensor* output_tensor = m.GetOutputTensor(kInplaceOutputTensorIdx);
output_tensor->data.data = input_tensor->data.data;
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}));
EXPECT_EQ(output_tensor->data.data, input_tensor->data.data);
}
TYPED_TEST(SqueezeOpTest, SqueezeAll) {
std::initializer_list<TypeParam> data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
SqueezeOpModel<TypeParam> m({GetTensorType<TypeParam>(), {1, 24, 1}},
{GetTensorType<TypeParam>(), {24}}, {});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}));
}
TYPED_TEST(SqueezeOpTest, SqueezeSelectedAxis) {
std::initializer_list<TypeParam> data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
SqueezeOpModel<TypeParam> m({GetTensorType<TypeParam>(), {1, 24, 1}},
{GetTensorType<TypeParam>(), {24}}, {2});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}));
}
TYPED_TEST(SqueezeOpTest, SqueezeNegativeAxis) {
std::initializer_list<TypeParam> data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
SqueezeOpModel<TypeParam> m({GetTensorType<TypeParam>(), {1, 24, 1}},
{GetTensorType<TypeParam>(), {24}}, {-1, 0});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}));
}
TYPED_TEST(SqueezeOpTest, SqueezeAllDims) {
std::initializer_list<TypeParam> data = {3};
SqueezeOpModel<TypeParam> m(
{GetTensorType<TypeParam>(), {1, 1, 1, 1, 1, 1, 1}},
{GetTensorType<TypeParam>(), {1}}, {});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), IsEmpty());
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3}));
}
TEST(SqueezeOpTest, SqueezeAllString) {
std::initializer_list<std::string> data = {"a", "b"};
SqueezeOpModel<std::string> m({GetTensorType<std::string>(), {1, 2, 1}},
{GetTensorType<std::string>(), {2}}, {});
m.SetStringInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetStringOutput(), ElementsAreArray({"a", "b"}));
}
TEST(SqueezeOpTest, SqueezeNegativeAxisString) {
std::initializer_list<std::string> data = {"a", "b"};
SqueezeOpModel<std::string> m({GetTensorType<std::string>(), {1, 2, 1}},
{GetTensorType<std::string>(), {24}}, {-1});
m.SetStringInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
EXPECT_THAT(m.GetStringOutput(), ElementsAreArray({"a", "b"}));
}
TEST(SqueezeOpTest, SqueezeAllDimsString) {
std::initializer_list<std::string> data = {"a"};
SqueezeOpModel<std::string> m(
{GetTensorType<std::string>(), {1, 1, 1, 1, 1, 1, 1}},
{GetTensorType<std::string>(), {1}}, {});
m.SetStringInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), IsEmpty());
EXPECT_THAT(m.GetStringOutput(), ElementsAreArray({"a"}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/squeeze.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/squeeze_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce07eab7-820d-4314-b0ea-6ab402ef04f1 | cpp | tensorflow/tensorflow | test_delegate_providers | tensorflow/lite/kernels/test_delegate_providers.cc | tensorflow/lite/kernels/test_delegate_providers_test.cc | #include "tensorflow/lite/kernels/test_delegate_providers.h"
#include <string>
#include <vector>
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
constexpr char KernelTestDelegateProviders::kAccelerationTestConfigPath[];
constexpr char KernelTestDelegateProviders::kUseSimpleAllocator[];
constexpr char KernelTestDelegateProviders::kAllowFp16PrecisionForFp32[];
KernelTestDelegateProviders* KernelTestDelegateProviders::Get() {
static KernelTestDelegateProviders* const providers =
new KernelTestDelegateProviders();
return providers;
}
KernelTestDelegateProviders::KernelTestDelegateProviders()
: delegate_list_util_(¶ms_) {
delegate_list_util_.AddAllDelegateParams();
params_.AddParam(kAccelerationTestConfigPath,
tools::ToolParam::Create<std::string>(""));
params_.AddParam(kUseSimpleAllocator, tools::ToolParam::Create<bool>(false));
params_.AddParam(kAllowFp16PrecisionForFp32,
tools::ToolParam::Create<bool>(false));
}
bool KernelTestDelegateProviders::InitFromCmdlineArgs(int* argc,
const char** argv) {
std::vector<tflite::Flag> flags = {
Flag(
kAccelerationTestConfigPath,
[this](const std::string& val, int argv_position) {
this->params_.Set<std::string>(kAccelerationTestConfigPath, val,
argv_position);
},
"", "Acceleration test config file for SingleOpModel",
Flag::kOptional),
Flag(
kUseSimpleAllocator,
[this](const bool& val, int argv_position) {
this->params_.Set<bool>(kUseSimpleAllocator, val, argv_position);
},
false, "Use Simple Memory Allocator for SingleOpModel",
Flag::kOptional),
Flag(
kAllowFp16PrecisionForFp32,
[this](const bool& val, int argv_position) {
this->params_.Set<bool>(kAllowFp16PrecisionForFp32, val,
argv_position);
},
false, "Compare result in fp16 precision for fp32 operations",
Flag::kOptional)};
delegate_list_util_.AppendCmdlineFlags(flags);
bool parse_result = tflite::Flags::Parse(argc, argv, flags);
if (!parse_result || params_.Get<bool>("help")) {
std::string usage = Flags::Usage(argv[0], flags);
TFLITE_LOG(ERROR) << usage;
parse_result = false;
}
return parse_result;
}
} | #include "tensorflow/lite/kernels/test_delegate_providers.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
TEST(KernelTestDelegateProvidersTest, DelegateProvidersParams) {
KernelTestDelegateProviders providers;
const auto& params = providers.ConstParams();
EXPECT_TRUE(params.HasParam("use_xnnpack"));
EXPECT_TRUE(params.HasParam("use_nnapi"));
EXPECT_TRUE(params.HasParam("allow_fp16_precision_for_fp32"));
int argc = 4;
const char* argv[] = {"program_name", "--use_nnapi=true",
"--allow_fp16_precision_for_fp32=true",
"--other_undefined_flag=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
EXPECT_TRUE(params.Get<bool>("use_nnapi"));
EXPECT_TRUE(params.Get<bool>("allow_fp16_precision_for_fp32"));
EXPECT_EQ(2, argc);
EXPECT_EQ("--other_undefined_flag=1", argv[1]);
}
TEST(KernelTestDelegateProvidersTest, CreateTfLiteDelegates) {
#if !defined(__Fuchsia__) && !defined(__s390x__) && \
!defined(TFLITE_WITHOUT_XNNPACK)
KernelTestDelegateProviders providers;
providers.MutableParams()->Set<bool>("use_xnnpack", true);
EXPECT_GE(providers.CreateAllDelegates().size(), 1);
tools::ToolParams local_params;
local_params.Merge(providers.ConstParams());
local_params.Set<bool>("use_xnnpack", false);
EXPECT_TRUE(providers.CreateAllDelegates(local_params).empty());
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/test_delegate_providers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/test_delegate_providers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94631c1a-afbc-4275-be7c-6ef151723962 | cpp | tensorflow/tensorflow | sign | tensorflow/lite/experimental/shlo/ops/sign.cc | tensorflow/lite/experimental/shlo/ops/sign_test.cc | #include "tensorflow/lite/experimental/shlo/ops/sign.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Sign {
template <class T>
T operator()(T v) const {
constexpr T one = static_cast<T>(1);
constexpr T minus_one = static_cast<T>(-1);
constexpr T zero = static_cast<T>(0);
return v < zero ? minus_one : (v > zero ? one : v);
}
};
template <>
F16 Sign::operator()(F16 v) const {
return static_cast<F16>(operator()(static_cast<float>(v)));
}
template <>
BF16 Sign::operator()(BF16 v) const {
return static_cast<BF16>(operator()(static_cast<float>(v)));
}
SignOp Create(SignOp::Attributes) { return {}; }
absl::Status Prepare(SignOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("sign"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("sign"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(SignOp& op, const Tensor& input, Tensor& output) {
Sign sign;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), sign, input,
output)
} else if (IsSignedIntTensor(input) || IsFloatTensor(input)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), sign, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.sign: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/sign.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<SignOp> {
static std::string Get() { return "Sign"; }
};
namespace {
struct Sign {
template <class T>
T operator()(T v) const {
constexpr T one = static_cast<T>(1);
constexpr T minus_one = static_cast<T>(-1);
constexpr T zero = static_cast<T>(0);
return v < zero ? minus_one : (v > zero ? one : v);
}
} sign_ref;
template <>
F16 Sign::operator()(F16 v) const {
return static_cast<F16>(operator()(static_cast<float>(v)));
}
template <>
BF16 Sign::operator()(BF16 v) const {
return static_cast<BF16>(operator()(static_cast<float>(v)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Sign, UnaryElementwiseOpShapePropagationTest,
SignOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Sign, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<SignOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<SignOp, ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Sign, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct SignTest : ::testing::Test {};
TYPED_TEST_SUITE(SignTest, ArithmeticTestTypes, TestParamNames);
TYPED_TEST(SignTest, ArithmeticTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), sign_ref);
auto op = Create(SignOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedSignTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedSignTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedSignTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = sign_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(SignOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/sign.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/sign_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
30f4234e-b7ff-44c7-aef5-61c50f768538 | cpp | tensorflow/tensorflow | rng_bit_generator | tensorflow/lite/kernels/rng_bit_generator.cc | tensorflow/lite/kernels/rng_bit_generator_test.cc | #include <array>
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/rng_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_rng_bit_generator {
namespace {
constexpr int kInitialState = 0;
constexpr int kOutputKey = 0;
constexpr int kOutput = 1;
template <typename T, size_t K>
void FillOutputBuffer(uint32_t* output_buffer, uint32_t* output_state_buffer,
int64_t output_num_elements, T fn,
std::array<uint32_t, K>& ctr, uint32_t key_0,
uint32_t key_1) {
int64_t i = 0;
while (i < output_num_elements) {
auto val = fn(key_0, key_1, ctr);
int64_t copy_size = (output_num_elements - i >= val.size())
? val.size()
: output_num_elements - i;
memcpy(output_buffer + i, &val, copy_size * sizeof(uint32_t));
i += copy_size;
if (!++ctr[0]) {
++ctr[1];
}
}
output_state_buffer[0] = key_0;
output_state_buffer[1] = key_1;
output_state_buffer[2] = ctr[0];
output_state_buffer[3] = ctr[1];
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
const TfLiteTensor* initial_state;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInitialState, &initial_state));
TF_LITE_ENSURE_EQ(context, initial_state->type, kTfLiteUInt64);
TF_LITE_ENSURE_EQ(context, NumDimensions(initial_state), 1);
TfLiteTensor* output_key;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputKey, &output_key));
TF_LITE_ENSURE_EQ(context, output_key->type, kTfLiteUInt64);
TF_LITE_ENSURE(context, HaveSameShapes(output_key, initial_state));
TfLiteIntArray* output_key_size_array = TfLiteIntArrayCopy(output_key->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_key,
output_key_size_array));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutput, &output));
TF_LITE_ENSURE(context, output->type == kTfLiteInt32 ||
output->type == kTfLiteInt64 ||
output->type == kTfLiteUInt32 ||
output->type == kTfLiteUInt64);
TfLiteIntArray* output_shape_array = TfLiteIntArrayCopy(output->dims);
return context->ResizeTensor(context, output, output_shape_array);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteStablehloRngBitGeneratorParams*>(
node->builtin_data);
TfLiteRngAlgorithm algorithm = params->algorithm;
const TfLiteTensor* initial_state = GetInput(context, node, 0);
TfLiteTensor* output_key = GetOutput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 1);
TF_LITE_ENSURE(context, !IsDynamicTensor(output));
int64_t output_num_elements = NumElements(output);
switch (output->type) {
case kTfLiteUInt64:
case kTfLiteInt64:
output_num_elements *= sizeof(uint64_t) / sizeof(uint32_t);
break;
case kTfLiteUInt32:
case kTfLiteInt32:
break;
default:
TF_LITE_KERNEL_LOG(context, "Unsupported output data type: %s",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
switch (algorithm) {
case TfLiteRngAlgorithm::kTfLiteRngAlgorithmThreefry: {
TF_LITE_ENSURE_EQ(context, SizeOfDimension(initial_state, 0), 2);
const uint32_t* state_vals = GetTensorData<uint32_t>(initial_state);
std::array<uint32_t, 2> ctr{state_vals[2], state_vals[3]};
FillOutputBuffer<decltype(tflite::rng::Threefry2x32), 2>(
static_cast<uint32_t*>(output->data.data),
static_cast<uint32_t*>(output_key->data.data), output_num_elements,
tflite::rng::Threefry2x32, ctr,
state_vals[0], state_vals[1]);
break;
}
case TfLiteRngAlgorithm::kTfLiteRngAlgorithmPhilox:
case TfLiteRngAlgorithm::kTfLiteRngAlgorithmDefault: {
int state_dim_0_size = SizeOfDimension(initial_state, 0);
TF_LITE_ENSURE(context, state_dim_0_size == 2 || state_dim_0_size == 3);
const uint32_t* state_vals = GetTensorData<uint32_t>(initial_state);
std::array<uint32_t, 4> ctr{state_vals[2], state_vals[3],
state_vals[state_dim_0_size == 3 ? 4 : 0],
state_vals[state_dim_0_size == 3 ? 5 : 1]};
memcpy(output_key->data.data, state_vals,
state_dim_0_size * sizeof(uint64_t));
FillOutputBuffer<decltype(tflite::rng::Philox4x32), 4>(
static_cast<uint32_t*>(output->data.data),
static_cast<uint32_t*>(output_key->data.data), output_num_elements,
tflite::rng::Philox4x32, ctr, state_vals[0],
state_vals[1]);
break;
}
default:
TF_LITE_KERNEL_LOG(context, "Unknown RNG algorithm: %d", algorithm);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_STABLEHLO_RNG_BIT_GENERATOR() {
static TfLiteRegistration r = {nullptr,
nullptr,
stablehlo_rng_bit_generator::Prepare,
stablehlo_rng_bit_generator::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
class RngBitGeneratorOpModel : public SingleOpModel {
public:
RngBitGeneratorOpModel(const tflite::RngAlgorithm algorithm,
const TensorData& initial_state,
const TensorData& output_state,
const TensorData& output) {
initial_state_ = AddInput(initial_state);
output_state_ = AddOutput(output_state);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR,
BuiltinOptions2_StablehloRngBitGeneratorOptions,
CreateStablehloRngBitGeneratorOptions(builder_, algorithm).Union());
BuildInterpreter({GetShape(initial_state_)});
}
template <typename T>
void SetInitialState(std::initializer_list<T> data) {
PopulateTensor<T>(initial_state_, data);
}
template <typename T>
std::vector<T> GetOutputState() {
return ExtractVector<T>(output_state_);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int initial_state_;
int shape_;
int output_state_;
int output_;
};
template <typename output_integer_type>
void ValidateRngOutputAndOutputState(
const tflite::RngAlgorithm algorithm,
std::initializer_list<uint64_t> initial_state_val,
std::vector<int> initial_state_shape,
std::initializer_list<int32_t> output_shape,
std::vector<uint64_t> expected_output_state,
std::vector<output_integer_type> expected_output_val) {
RngBitGeneratorOpModel m(
algorithm,
{TensorType_UINT64, initial_state_shape},
{TensorType_UINT64, initial_state_shape},
{GetTensorType<output_integer_type>(), output_shape});
m.SetInitialState<uint64_t>(initial_state_val);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<uint64_t> output_state = m.GetOutputState<uint64_t>();
std::vector<output_integer_type> output = m.GetOutput<output_integer_type>();
ASSERT_EQ(output_state, expected_output_state);
ASSERT_EQ(output, expected_output_val);
}
TEST(RngBitGeneratorOpTest, PhiloxOutputInt32) {
ValidateRngOutputAndOutputState<int32_t>(
tflite::RngAlgorithm_PHILOX, {1, 2, 3},
{3}, {2, 3},
{1, 4, 3},
{-263854262, 1366700262, 495645701, -1243243882, 89414891, 1917262711});
}
TEST(RngBitGeneratorOpTest, PhiloxOutputUInt32) {
ValidateRngOutputAndOutputState<uint32_t>(
tflite::RngAlgorithm_PHILOX, {1, 2, 3},
{3}, {2, 3},
{1, 4, 3},
{4031113034, 1366700262, 495645701, 3051723414, 89414891, 1917262711});
}
TEST(RngBitGeneratorOpTest, PhiloxOutputInt64) {
ValidateRngOutputAndOutputState<int64_t>(
tflite::RngAlgorithm_PHILOX, {1, 2, 3},
{3}, {2, 3},
{1, 5, 3},
{5869932932755744586, -5339691813646437371, 8234580641674714347,
2641225993340350124, 1962472297844690804, -3580856229565614135});
}
TEST(RngBitGeneratorOpTest, PhiloxOutputUInt64) {
ValidateRngOutputAndOutputState<uint64_t>(
tflite::RngAlgorithm_PHILOX, {1, 2, 3},
{3}, {2, 3},
{1, 5, 3},
{5869932932755744586u, 13107052260063114245u, 8234580641674714347u,
2641225993340350124u, 1962472297844690804u, 14865887844143937481u});
}
TEST(RngBitGeneratorOpTest, ThreefryOutputInt32) {
ValidateRngOutputAndOutputState<int32_t>(
tflite::RngAlgorithm_THREEFRY, {1, 2},
{2}, {2, 3},
{1, 5},
{43444564, -2144348869, -315321645, -549236733, 1672743891, -54463903});
}
TEST(RngBitGeneratorOpTest, ThreefryOutputUInt32) {
ValidateRngOutputAndOutputState<uint32_t>(
tflite::RngAlgorithm_THREEFRY, {1, 2},
{2}, {2, 3},
{1, 5},
{43444564, 2150618427, 3979645651, 3745730563, 1672743891, 4240503393});
}
TEST(RngBitGeneratorOpTest, ThreefryOutputInt64) {
ValidateRngOutputAndOutputState<int64_t>(
tflite::RngAlgorithm_THREEFRY, {1, 2},
{2}, {2, 3},
{1, 8},
{-9209908263526143660, -2358953802017238317, -233920680524772397,
2658481902456610144, -2022031683723149139, -2324041912354448873});
}
TEST(RngBitGeneratorOpTest, ThreefryOutputUInt64) {
ValidateRngOutputAndOutputState<uint64_t>(
tflite::RngAlgorithm_THREEFRY, {1, 2},
{2}, {2, 3},
{1, 8},
{9236835810183407956u, 16087790271692313299u, 18212823393184779219u,
2658481902456610144u, 16424712389986402477u, 16122702161355102743u});
}
TEST(RngBitGeneratorOpTest, DefaultOutputInt32) {
ValidateRngOutputAndOutputState<int32_t>(
tflite::RngAlgorithm_DEFAULT, {1, 2, 3},
{3}, {2, 3},
{1, 4, 3},
{-263854262, 1366700262, 495645701, -1243243882, 89414891, 1917262711});
}
TEST(RngBitGeneratorOpTest, DefaultOutputUInt32) {
ValidateRngOutputAndOutputState<uint32_t>(
tflite::RngAlgorithm_DEFAULT, {1, 2, 3},
{3}, {2, 3},
{1, 4, 3},
{4031113034, 1366700262, 495645701, 3051723414, 89414891, 1917262711});
}
TEST(RngBitGeneratorOpTest, DefaultOutputInt64) {
ValidateRngOutputAndOutputState<int64_t>(
tflite::RngAlgorithm_DEFAULT, {1, 2, 3},
{3}, {2, 3},
{1, 5, 3},
{5869932932755744586, -5339691813646437371, 8234580641674714347,
2641225993340350124, 1962472297844690804, -3580856229565614135});
}
TEST(RngBitGeneratorOpTest, DefaultOutputUInt64) {
ValidateRngOutputAndOutputState<uint64_t>(
tflite::RngAlgorithm_DEFAULT, {1, 2, 3},
{3}, {2, 3},
{1, 5, 3},
{5869932932755744586u, 13107052260063114245u, 8234580641674714347u,
2641225993340350124u, 1962472297844690804u, 14865887844143937481u});
}
template <typename output_integer_type>
void OutputIsDeterministicWithSameInitState(
const tflite::RngAlgorithm algorithm,
std::initializer_list<uint64_t> initial_state_val,
std::vector<int> initial_state_shape) {
RngBitGeneratorOpModel m1(
algorithm,
{TensorType_UINT64, initial_state_shape},
{TensorType_UINT64, initial_state_shape},
{GetTensorType<output_integer_type>(), {3, 3}});
RngBitGeneratorOpModel m2(
algorithm,
{TensorType_UINT64, initial_state_shape},
{TensorType_UINT64, initial_state_shape},
{GetTensorType<output_integer_type>(), {3, 3}});
m1.SetInitialState<uint64_t>(initial_state_val);
m2.SetInitialState<uint64_t>(initial_state_val);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<uint64_t> output_state_1a = m1.GetOutputState<uint64_t>();
std::vector<output_integer_type> output_1a =
m1.GetOutput<output_integer_type>();
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<uint64_t> output_state_1b = m1.GetOutputState<uint64_t>();
std::vector<output_integer_type> output_1b =
m1.GetOutput<output_integer_type>();
ASSERT_EQ(output_state_1a, output_state_1b);
ASSERT_EQ(output_1a, output_1b);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<uint64_t> output_state_2 = m2.GetOutputState<uint64_t>();
std::vector<output_integer_type> output_2 =
m2.GetOutput<output_integer_type>();
ASSERT_EQ(output_state_1a, output_state_2);
ASSERT_EQ(output_1a, output_2);
}
TEST(RngBitGeneratorOpTest, PhiloxDeterministicOutputInt32) {
OutputIsDeterministicWithSameInitState<int32_t>(
tflite::RngAlgorithm_PHILOX, {1, 2, 3},
{3});
OutputIsDeterministicWithSameInitState<int32_t>(tflite::RngAlgorithm_DEFAULT,
{1, 2},
{2});
}
TEST(RngBitGeneratorOpTest, ThreefryDeterministicOutputInt32) {
OutputIsDeterministicWithSameInitState<int32_t>(tflite::RngAlgorithm_THREEFRY,
{1, 2},
{2});
}
TEST(RngBitGeneratorOpTest, PhiloxDeterministicOutputUInt32) {
OutputIsDeterministicWithSameInitState<uint32_t>(
tflite::RngAlgorithm_PHILOX, {1, 2, 3},
{3});
OutputIsDeterministicWithSameInitState<uint32_t>(tflite::RngAlgorithm_DEFAULT,
{1, 2},
{2});
}
TEST(RngBitGeneratorOpTest, ThreefryDeterministicOutputUInt32) {
OutputIsDeterministicWithSameInitState<uint32_t>(
tflite::RngAlgorithm_THREEFRY,
{1, 2},
{2});
}
TEST(RngBitGeneratorOpTest, PhiloxDeterministicOutputInt64) {
OutputIsDeterministicWithSameInitState<int64_t>(
tflite::RngAlgorithm_PHILOX, {1, 2, 3},
{3});
OutputIsDeterministicWithSameInitState<int64_t>(tflite::RngAlgorithm_DEFAULT,
{1, 2},
{2});
}
TEST(RngBitGeneratorOpTest, ThreefryDeterministicOutputInt64) {
OutputIsDeterministicWithSameInitState<int64_t>(tflite::RngAlgorithm_THREEFRY,
{1, 2},
{2});
}
TEST(RngBitGeneratorOpTest, PhiloxDeterministicOutputUInt64) {
OutputIsDeterministicWithSameInitState<uint64_t>(
tflite::RngAlgorithm_PHILOX, {1, 2, 3},
{3});
OutputIsDeterministicWithSameInitState<uint64_t>(tflite::RngAlgorithm_DEFAULT,
{1, 2},
{2});
}
TEST(RngBitGeneratorOpTest, ThreefryDeterministicOutputUInt64) {
OutputIsDeterministicWithSameInitState<uint64_t>(
tflite::RngAlgorithm_THREEFRY,
{1, 2},
{2});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rng_bit_generator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rng_bit_generator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
138b136e-4e08-44d1-9be5-5b3d37a1beda | cpp | tensorflow/tensorflow | batch_to_space_nd | tensorflow/lite/kernels/batch_to_space_nd.cc | tensorflow/lite/kernels/internal/batch_to_space_nd_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace batch_to_space_nd {
enum KernelType {
kReference,
kGenericOptimized,
};
struct BatchToSpaceNDContext {
BatchToSpaceNDContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
block_shape = GetInput(context, node, 1);
crops = GetInput(context, node, 2);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
const TfLiteTensor* block_shape;
const TfLiteTensor* crops;
TfLiteTensor* output;
};
const int kInputMinDimensionNum = 3;
const int kInputMaxDimensionNum = 4;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
BatchToSpaceNDContext* op_context) {
TfLiteIntArray* input_size = op_context->input->dims;
const int* block_shape = GetTensorData<int32>(op_context->block_shape);
const int* crops = GetTensorData<int32>(op_context->crops);
int spatial_dims_num = input_size->size - 2;
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), 1);
TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->crops), 2);
TF_LITE_ENSURE_EQ(context, op_context->crops->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, op_context->crops->dims->data[1], 2);
for (int i = 0; i < spatial_dims_num * 2; ++i) {
TF_LITE_ENSURE(context, crops[i] >= 0);
}
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input_size);
int output_batch_size = input_size->data[0];
for (int dim = 0; dim < spatial_dims_num; ++dim) {
TF_LITE_ENSURE(context, block_shape[dim] != 0);
TF_LITE_ENSURE_EQ(context, output_batch_size % block_shape[dim], 0);
output_batch_size = output_batch_size / block_shape[dim];
output_size->data[dim + 1] = input_size->data[dim + 1] * block_shape[dim] -
crops[dim * 2] - crops[dim * 2 + 1];
}
output_size->data[0] = output_batch_size;
output_size->data[input_size->size - 1] =
input_size->data[input_size->size - 1];
return context->ResizeTensor(context, op_context->output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
BatchToSpaceNDContext op_context(context, node);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) >= kInputMinDimensionNum);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) <= kInputMaxDimensionNum);
TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
if (op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
op_context.output->params.scale);
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
op_context.output->params.zero_point);
}
if (op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, op_context.output->params.zero_point, 0);
}
if (!IsConstantOrPersistentTensor(op_context.block_shape) ||
!IsConstantOrPersistentTensor(op_context.crops)) {
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, &op_context);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
BatchToSpaceNDContext op_context(context, node);
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
#define TF_LITE_BATCH_TO_SPACE_ND(type, scalar) \
type::BatchToSpaceND(GetTensorShape(op_context.input), \
GetTensorData<scalar>(op_context.input), \
GetTensorShape(op_context.block_shape), \
GetTensorData<int32_t>(op_context.block_shape), \
GetTensorShape(op_context.crops), \
GetTensorData<int32_t>(op_context.crops), \
GetTensorShape(op_context.output), \
GetTensorData<scalar>(op_context.output))
switch (op_context.input->type) {
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, float);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, float);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, uint8_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, uint8_t);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int8_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int8_t);
}
break;
case kTfLiteInt16:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int16_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int16_t);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int32_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int32_t);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int64_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int64_t);
}
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by BatchToSpace.",
op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_BATCH_TO_SPACE_ND
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BATCH_TO_SPACE_ND_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, batch_to_space_nd::Prepare,
batch_to_space_nd::Eval<batch_to_space_nd::kReference>};
return &r;
}
TfLiteRegistration* Register_BATCH_TO_SPACE_ND_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, batch_to_space_nd::Prepare,
batch_to_space_nd::Eval<batch_to_space_nd::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_BATCH_TO_SPACE_ND() {
return Register_BATCH_TO_SPACE_ND_GENERIC_OPT();
}
}
}
} | #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include <gtest/gtest.h>
namespace tflite {
namespace {
std::pair<int, int> GetIndexRange(int spatial_index_dim, int block_shape_dim,
int input_dim, int output_dim) {
int index_start = 0;
int index_end = 0;
optimized_ops::GetIndexRange(spatial_index_dim, block_shape_dim, input_dim,
output_dim, &index_start, &index_end);
return {index_start, index_end};
}
TEST(BatchToSpaceNDTest, TestIndexRange) {
EXPECT_EQ(GetIndexRange(3, 6,
1, 6),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(2, 6,
5, 30),
std::make_pair(0, 5));
EXPECT_EQ(GetIndexRange(0, 2,
3, 4),
std::make_pair(0, 2));
EXPECT_EQ(GetIndexRange(-2, 2,
3, 4),
std::make_pair(1, 3));
EXPECT_EQ(GetIndexRange(-30, 5,
7, 5),
std::make_pair(6, 7));
EXPECT_EQ(GetIndexRange(-26, 5,
7, 5),
std::make_pair(6, 7));
EXPECT_EQ(GetIndexRange(0, 5,
7, 5),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(4, 5,
7, 5),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(3, 5,
7, 5),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(0, 5,
7, 1),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(-30, 5,
7, 1),
std::make_pair(6, 7));
EXPECT_EQ(GetIndexRange(1, 5,
7, 1),
std::make_pair(0, 0));
EXPECT_EQ(GetIndexRange(-29, 5,
7, 1),
std::make_pair(6, 6));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/batch_to_space_nd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/batch_to_space_nd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db986006-b216-4ed1-9076-c2e7bc628ce6 | cpp | tensorflow/tensorflow | bitwise_xor | tensorflow/lite/kernels/bitwise_xor.cc | tensorflow/lite/kernels/bitwise_xor_test.cc | #include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace bitwise_xor {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast = false;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input1->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
T BitwiseXor(T x, T y) {
return x ^ y;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteType type = output->type;
switch (type) {
case kTfLiteUInt8:
case kTfLiteInt8: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int8_t, int8_t, int8_t>(
GetTensorShape(input1), GetTensorData<int8_t>(input1),
GetTensorShape(input2), GetTensorData<int8_t>(input2),
GetTensorShape(output), GetTensorData<int8_t>(output), BitwiseXor);
} else {
reference_ops::BinaryFunction<int8_t, int8_t, int8_t>(
GetTensorShape(input1), GetTensorData<int8_t>(input1),
GetTensorShape(input2), GetTensorData<int8_t>(input2),
GetTensorShape(output), GetTensorData<int8_t>(output), BitwiseXor);
}
break;
}
case kTfLiteUInt16:
case kTfLiteInt16: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int16_t, int16_t, int16_t>(
GetTensorShape(input1), GetTensorData<int16_t>(input1),
GetTensorShape(input2), GetTensorData<int16_t>(input2),
GetTensorShape(output), GetTensorData<int16_t>(output), BitwiseXor);
} else {
reference_ops::BinaryFunction<int16_t, int16_t, int16_t>(
GetTensorShape(input1), GetTensorData<int16_t>(input1),
GetTensorShape(input2), GetTensorData<int16_t>(input2),
GetTensorShape(output), GetTensorData<int16_t>(output), BitwiseXor);
}
break;
}
case kTfLiteUInt32:
case kTfLiteInt32: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int32_t, int32_t, int32_t>(
GetTensorShape(input1), GetTensorData<int32_t>(input1),
GetTensorShape(input2), GetTensorData<int32_t>(input2),
GetTensorShape(output), GetTensorData<int32_t>(output), BitwiseXor);
} else {
reference_ops::BinaryFunction<int32_t, int32_t, int32_t>(
GetTensorShape(input1), GetTensorData<int32_t>(input1),
GetTensorShape(input2), GetTensorData<int32_t>(input2),
GetTensorShape(output), GetTensorData<int32_t>(output), BitwiseXor);
}
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"BitwiseXor currently only supports "
"8-bit/16-bit/32-bit integer/unsigned integer, got %s",
TfLiteTypeGetName(type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BITWISE_XOR() {
static TfLiteRegistration r = {bitwise_xor::Init, bitwise_xor::Free,
bitwise_xor::Prepare, bitwise_xor::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class BitwiseXorOpModel : public SingleOpModel {
public:
BitwiseXorOpModel(std::initializer_list<int> input1_shape,
std::initializer_list<int> input2_shape,
TensorType tensor_type) {
input1_ = AddInput(tensor_type);
input2_ = AddInput(tensor_type);
output_ = AddOutput(tensor_type);
SetBuiltinOp(BuiltinOperator_BITWISE_XOR, BuiltinOptions_BitwiseXorOptions,
CreateBitwiseXorOptions(builder_).Union());
BuildInterpreter({input1_shape, input2_shape});
}
int input1() const { return input1_; }
int input2() const { return input2_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(BitwiseXorOpTest, SimpleTestInt8) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT8);
model.PopulateTensor<int8_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<int8_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int8_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestInt16) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16);
model.PopulateTensor<int16_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<int16_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int16_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestInt32) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<int32_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestUInt8) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT8);
model.PopulateTensor<uint8_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<uint8_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint8_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestUInt16) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT16);
model.PopulateTensor<uint16_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<uint16_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint16_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestUInt32) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<uint32_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastLhs) {
BitwiseXorOpModel model({1, 1, 1, 1}, {1, 1, 1, 4}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {5});
model.PopulateTensor<int32_t>(model.input2(), {0, -5, -3, 14});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({5, -2, -8, 11}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastRhs) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<uint32_t>(model.input2(), {5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({5, 0, 6, 11}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bitwise_xor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bitwise_xor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7b70efc6-08de-4278-b0b9-a4889873b81c | cpp | tensorflow/tensorflow | reverse_sequence | tensorflow/lite/kernels/reverse_sequence.cc | tensorflow/lite/kernels/reverse_sequence_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace reverse_sequence {
namespace {
constexpr int kInputTensor = 0;
constexpr int kSeqLengthsTensor = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* seq_lengths;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kSeqLengthsTensor, &seq_lengths));
TF_LITE_ENSURE_EQ(context, NumDimensions(seq_lengths), 1);
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteUInt8 && input->type != kTfLiteInt16 &&
input->type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
if (seq_lengths->type != kTfLiteInt32 && seq_lengths->type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(
context, "Seq_lengths type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(seq_lengths->type));
return kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
return context->ResizeTensor(context, output, output_shape);
}
template <typename T, typename TS>
TfLiteStatus ReverseSequenceImpl(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* seq_lengths_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSeqLengthsTensor,
&seq_lengths_tensor));
const TS* seq_lengths = GetTensorData<TS>(seq_lengths_tensor);
auto* params =
reinterpret_cast<TfLiteReverseSequenceParams*>(node->builtin_data);
int seq_dim = params->seq_dim;
int batch_dim = params->batch_dim;
TF_LITE_ENSURE(context, seq_dim >= 0);
TF_LITE_ENSURE(context, batch_dim >= 0);
TF_LITE_ENSURE(context, seq_dim != batch_dim);
TF_LITE_ENSURE(context, seq_dim < NumDimensions(input));
TF_LITE_ENSURE(context, batch_dim < NumDimensions(input));
TF_LITE_ENSURE_EQ(context, SizeOfDimension(seq_lengths_tensor, 0),
SizeOfDimension(input, batch_dim));
for (int i = 0; i < NumDimensions(seq_lengths_tensor); ++i) {
TF_LITE_ENSURE(context, seq_lengths[i] <= SizeOfDimension(input, seq_dim));
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
reference_ops::ReverseSequence<T, TS>(
seq_lengths, seq_dim, batch_dim, GetTensorShape(input),
GetTensorData<T>(input), GetTensorShape(output),
GetTensorData<T>(output));
return kTfLiteOk;
}
template <typename T>
TfLiteStatus ReverseSequenceHelper(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* seq_lengths_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSeqLengthsTensor,
&seq_lengths_tensor));
switch (seq_lengths_tensor->type) {
case kTfLiteInt32: {
return ReverseSequenceImpl<T, int32_t>(context, node);
}
case kTfLiteInt64: {
return ReverseSequenceImpl<T, int64_t>(context, node);
}
default: {
TF_LITE_KERNEL_LOG(
context,
"Seq_lengths type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(seq_lengths_tensor->type));
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (output->type) {
case kTfLiteFloat32: {
return ReverseSequenceHelper<float>(context, node);
}
case kTfLiteUInt8: {
return ReverseSequenceHelper<uint8_t>(context, node);
}
case kTfLiteInt16: {
return ReverseSequenceHelper<int16_t>(context, node);
}
case kTfLiteInt32: {
return ReverseSequenceHelper<int32_t>(context, node);
}
case kTfLiteInt64: {
return ReverseSequenceHelper<int64_t>(context, node);
}
default: {
TF_LITE_KERNEL_LOG(context,
"Type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_REVERSE_SEQUENCE() {
static TfLiteRegistration r = {nullptr, nullptr, reverse_sequence::Prepare,
reverse_sequence::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
class ReverseSequenceOpModel : public SingleOpModel {
public:
ReverseSequenceOpModel(const TensorData& input, const TensorData& seq_lengths,
int seq_dim, int batch_dim) {
input_ = AddInput(input);
seq_lengths_ = AddInput(seq_lengths);
output_ = AddOutput({input.type, {}});
SetBuiltinOp(
BuiltinOperator_REVERSE_SEQUENCE, BuiltinOptions_ReverseSequenceOptions,
CreateReverseSequenceOptions(builder_, seq_dim, batch_dim).Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
int seq_lengths() { return seq_lengths_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int seq_lengths_;
int output_;
};
TEST(ReverseSequenceOpTest, FloatSeqDimIsGreater) {
ReverseSequenceOpModel<float> model({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_INT32, {4}}, 1, 0);
model.PopulateTensor<float>(model.input(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 2, 3, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 9, 10, 7, 8, 11, 12,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
TEST(ReverseSequenceOpTest, FloatBatchDimIsGreater) {
ReverseSequenceOpModel<float> model({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_INT32, {2}}, 0, 2);
model.PopulateTensor<float>(model.input(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({13, 20, 15, 22, 17, 24, 7, 14, 9, 16, 11, 18, 1,
8, 3, 10, 5, 12, 19, 2, 21, 4, 23, 6}));
}
TEST(ReverseSequenceOpTest, Int32SeqDimIsGreater) {
ReverseSequenceOpModel<int32_t> model({TensorType_INT32, {4, 3, 2}},
{TensorType_INT32, {4}}, 1, 0);
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 2, 3, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 9, 10, 7, 8, 11, 12,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
TEST(ReverseSequenceOpTest, Int32BatchDimIsGreater) {
ReverseSequenceOpModel<int32_t> model({TensorType_INT32, {4, 3, 2}},
{TensorType_INT32, {2}}, 0, 2);
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({13, 20, 15, 22, 17, 24, 7, 14, 9, 16, 11, 18, 1,
8, 3, 10, 5, 12, 19, 2, 21, 4, 23, 6}));
}
TEST(ReverseSequenceOpTest, Int64SeqDimIsGreater) {
ReverseSequenceOpModel<int64_t> model({TensorType_INT64, {4, 3, 2}},
{TensorType_INT32, {4}}, 1, 0);
model.PopulateTensor<int64_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 2, 3, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 9, 10, 7, 8, 11, 12,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
TEST(ReverseSequenceOpTest, Int64BatchDimIsGreater) {
ReverseSequenceOpModel<int64_t> model({TensorType_INT64, {4, 3, 2}},
{TensorType_INT32, {2}}, 0, 2);
model.PopulateTensor<int64_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({13, 20, 15, 22, 17, 24, 7, 14, 9, 16, 11, 18, 1,
8, 3, 10, 5, 12, 19, 2, 21, 4, 23, 6}));
}
TEST(ReverseSequenceOpTest, Uint8SeqDimIsGreater) {
ReverseSequenceOpModel<uint8_t> model({TensorType_UINT8, {4, 3, 2}},
{TensorType_INT32, {4}}, 1, 0);
model.PopulateTensor<uint8_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 2, 3, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 9, 10, 7, 8, 11, 12,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
TEST(ReverseSequenceOpTest, Uint8BatchDimIsGreater) {
ReverseSequenceOpModel<uint8_t> model({TensorType_UINT8, {4, 3, 2}},
{TensorType_INT32, {2}}, 0, 2);
model.PopulateTensor<uint8_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({13, 20, 15, 22, 17, 24, 7, 14, 9, 16, 11, 18, 1,
8, 3, 10, 5, 12, 19, 2, 21, 4, 23, 6}));
}
TEST(ReverseSequenceOpTest, Int16SeqDimIsGreater) {
ReverseSequenceOpModel<int16_t> model({TensorType_INT16, {4, 3, 2}},
{TensorType_INT32, {4}}, 1, 0);
model.PopulateTensor<int16_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 2, 3, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 9, 10, 7, 8, 11, 12,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
TEST(ReverseSequenceOpTest, Int16BatchDimIsGreater) {
ReverseSequenceOpModel<int16_t> model({TensorType_INT16, {4, 3, 2}},
{TensorType_INT32, {2}}, 0, 2);
model.PopulateTensor<int16_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.seq_lengths(), {3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({13, 20, 15, 22, 17, 24, 7, 14, 9, 16, 11, 18, 1,
8, 3, 10, 5, 12, 19, 2, 21, 4, 23, 6}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/reverse_sequence.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/reverse_sequence_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f12c6805-40c7-43da-a825-1cf3263758dd | cpp | tensorflow/tensorflow | select | tensorflow/lite/experimental/shlo/legacy/src/select.cc | tensorflow/lite/experimental/shlo/legacy/test/select_test.cc | #include <cstddef>
#include <type_traits>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/dispatch.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
template <typename Value>
absl::Status CheckParameters(const Tensor& pred, const Value& on_true,
const Value& on_false, Value& result) {
if (!(pred.rank() == 0 or pred.shape() == on_true.shape())) {
return absl::InvalidArgumentError(
"Constraint violation: rank(pred) = 0 or shape(pred) = "
"shape(on_true)");
} else if (!(on_true.baseline_type() == on_false.baseline_type() and
on_true.baseline_type() == result.baseline_type())) {
return absl::InvalidArgumentError(
"Constraint violation: baseline_type(on_true) = "
"baseline_type(on_false) = baseline_type(result)");
} else if (pred.element_type() != ElementType::kI1) {
return absl::InvalidArgumentError("Expected boolean tensor as predicate");
}
if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (!(on_true.is_per_tensor_quantized() and
on_false.is_per_tensor_quantized() and
result.is_per_tensor_quantized())) {
return absl::InvalidArgumentError("Expected per-tensor quantization");
}
}
if (pred.layout().has_strides() || on_true.layout().has_strides() ||
on_false.layout().has_strides() || result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type, typename Value>
absl::Status Select(const Tensor& pred, const Value& on_true,
const Value& on_false, Value& result) {
if (auto check = CheckParameters(pred, on_true, on_false, result);
!check.ok()) {
return check;
}
using P = Storage<ElementType::kI1>;
using S = Storage<storage_type>;
const bool pred_is_tensor = (pred.rank() > 0);
const size_t n = result.num_elements();
auto pred_buffer = pred.buffer();
auto on_true_buffer = on_true.buffer();
auto on_false_buffer = on_false.buffer();
auto result_buffer = result.buffer();
if constexpr (std::is_same_v<Value, Tensor>) {
if (storage_type != result.element_type()) {
return absl::InvalidArgumentError("Unexpected tensor element type");
}
bool selection_value;
for (size_t i = 0; i < n; ++i) {
if (pred_is_tensor || (i == 0)) {
selection_value = P::Get(pred_buffer, i);
}
auto input_buffer = selection_value ? on_true_buffer : on_false_buffer;
auto result_value = S::Get(input_buffer, i);
S::Set(result_buffer, i, result_value);
}
} else {
static_assert(std::is_same_v<Value, QuantizedTensor>);
if (storage_type != result.storage_type()) {
return absl::InvalidArgumentError("Unexpected storage type");
} else if (expressed_type != result.expressed_type()) {
return absl::InvalidArgumentError("Unexpected expressed type");
}
using ET = typename Storage<expressed_type>::Type;
const QuantizedParameter& on_true_quant_param =
on_true.type().element_type().parameters(0);
const QuantizedParameter& on_false_quant_param =
on_false.type().element_type().parameters(0);
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
bool selection_value;
for (size_t i = 0; i < n; ++i) {
if (pred_is_tensor || (i == 0)) {
selection_value = P::Get(pred_buffer, i);
}
const void* input_buffer;
const QuantizedParameter* input_quant_param;
if (selection_value) {
input_buffer = on_true_buffer;
input_quant_param = &on_true_quant_param;
} else {
input_buffer = on_false_buffer;
input_quant_param = &on_false_quant_param;
}
auto input_storage = S::Get(input_buffer, i);
auto result_storage =
DequantizeOpQuantizePartial<storage_type, expressed_type>(
input_storage, *input_quant_param, result_scale_inv,
result_quant_param.zero_point, [](auto x) { return x; });
S::Set(result_buffer, i, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result);
!status.ok()) {
return status;
}
}
return absl::OkStatus();
}
}
absl::Status Select(const Tensor& pred, const Tensor& on_true,
const Tensor& on_false, Tensor& result) {
DISPATCH_BOOL_INT_FLOAT(Select, result.element_type(), pred, on_true,
on_false, result);
}
absl::Status Select(const Tensor& pred, const QuantizedTensor& on_true,
const QuantizedTensor& on_false, QuantizedTensor& result) {
DISPATCH_QUANTIZED(Select, result.storage_type(), result.expressed_type(),
pred, on_true, on_false, result);
}
} | #include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<ElementType::kI1>::Type>&& pred_values,
std::vector<typename Storage<element_type>::Type>&& on_true_values,
std::vector<typename Storage<element_type>::Type>&& on_false_values,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Shape pred_shape = (pred_values.size() > 1) ? Shape(shape) : Shape();
Tensor pred(TensorType(std::move(pred_shape), ElementType::kI1),
pred_values.data());
Tensor on_true(TensorType(Shape(shape), element_type), on_true_values.data());
Tensor on_false(TensorType(Shape(shape), element_type),
on_false_values.data());
Tensor expected(TensorType(Shape(shape), element_type),
expected_values.data());
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(shape), element_type), result_values.data());
ASSERT_OK(Select(pred, on_true, on_false, result));
EXPECT_EQ(result, expected) << "pred: " << pred << "\non_true: " << on_true
<< "\nnon_false: " << on_false;
}
template <ElementType storage_type, ElementType expressed_type>
void test(
QuantizedParameter&& quantized_parameter,
std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<ElementType::kI1>::Type>&& pred_values,
std::vector<typename Storage<expressed_type>::Type>&& on_true_values,
std::vector<typename Storage<expressed_type>::Type>&& on_false_values,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
Shape pred_shape = (pred_values.size() > 1) ? Shape(shape) : Shape();
Tensor pred(TensorType(std::move(pred_shape), ElementType::kI1),
pred_values.data());
auto on_true_quant_values = QuantizeVector<storage_type, expressed_type>(
on_true_values, quantized_parameter);
auto on_false_quant_values = QuantizeVector<storage_type, expressed_type>(
on_false_values, quantized_parameter);
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
std::vector<typename Storage<storage_type>::Type> result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor on_true(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
on_true_quant_values.data());
QuantizedTensor on_false(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
on_false_quant_values.data());
QuantizedTensor expected(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
ASSERT_OK(Select(pred, on_true, on_false, result));
EXPECT_EQ(result, expected) << "pred: " << pred << "\non_true: " << on_true
<< "\nnon_false: " << on_false;
}
TEST(Select, Unquantized) {
test<ElementType::kI1>({2}, {true}, {true, false}, {false, true},
{true, false});
test<ElementType::kSI8>({2}, {false}, {1, 2}, {-1, -2}, {-1, -2});
test<ElementType::kSI16>({2}, {true}, {1, 2}, {-1, -2}, {1, 2});
test<ElementType::kSI32>({2}, {false}, {1, 2}, {-1, -2}, {-1, -2});
test<ElementType::kBF16>({2}, {true}, {1, 2}, {-1, -2}, {1, 2});
test<ElementType::kF16>({2}, {false}, {1, 2}, {-1, -2}, {-1, -2});
test<ElementType::kF32>({2}, {true}, {1, 2}, {-1, -2}, {1, 2});
test<ElementType::kI1>({2}, {true, false}, {true, true}, {false, false},
{true, false});
test<ElementType::kSI8>({2}, {true, false}, {1, 2}, {-1, -2}, {1, -2});
test<ElementType::kSI16>({2}, {true, false}, {1, 2}, {-1, -2}, {1, -2});
test<ElementType::kSI32>({2}, {true, false}, {1, 2}, {-1, -2}, {1, -2});
test<ElementType::kBF16>({2}, {true, false}, {1, 2}, {-1, -2}, {1, -2});
test<ElementType::kF16>({2}, {true, false}, {1, 2}, {-1, -2}, {1, -2});
test<ElementType::kF32>({2}, {true, false}, {1, 2}, {-1, -2}, {1, -2});
}
TEST(Select, Quantized) {
test<ElementType::kSI8, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {2}, {true}, {1, 2}, {-1, -2}, {1, 2});
test<ElementType::kSI8, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{2}, {false}, {1, 2}, {-1, -2},
{-1, -2});
test<ElementType::kSI8, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {2}, {true}, {1, 2}, {-1, -2}, {1, 2});
test<ElementType::kSI8, ElementType::kBF16>({.scale = 0.1, .zero_point = 0},
{2}, {true, false}, {1, 2},
{-1, -2}, {1, -2});
test<ElementType::kSI8, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{2}, {true, false}, {1, 2},
{-1, -2}, {1, -2});
test<ElementType::kSI8, ElementType::kF32>({.scale = 0.1, .zero_point = 0},
{2}, {true, false}, {1, 2},
{-1, -2}, {1, -2});
test<ElementType::kSI16, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {2}, {true}, {1, 2}, {-1, -2}, {1, 2});
test<ElementType::kSI16, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{2}, {false}, {1, 2}, {-1, -2},
{-1, -2});
test<ElementType::kSI16, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {2}, {true}, {1, 2}, {-1, -2}, {1, 2});
test<ElementType::kSI16, ElementType::kBF16>({.scale = 0.1, .zero_point = 0},
{2}, {true, false}, {1, 2},
{-1, -2}, {1, -2});
test<ElementType::kSI16, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{2}, {true, false}, {1, 2},
{-1, -2}, {1, -2});
test<ElementType::kSI16, ElementType::kF32>({.scale = 0.1, .zero_point = 0},
{2}, {true, false}, {1, 2},
{-1, -2}, {1, -2});
test<ElementType::kSI32, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {2}, {true}, {1, 2}, {-1, -2}, {1, 2});
test<ElementType::kSI32, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{2}, {false}, {1, 2}, {-1, -2},
{-1, -2});
test<ElementType::kSI32, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {2}, {true}, {1, 2}, {-1, -2}, {1, 2});
test<ElementType::kSI32, ElementType::kBF16>({.scale = 0.1, .zero_point = 0},
{2}, {true, false}, {1, 2},
{-1, -2}, {1, -2});
test<ElementType::kSI32, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{2}, {true, false}, {1, 2},
{-1, -2}, {1, -2});
test<ElementType::kSI32, ElementType::kF32>({.scale = 0.1, .zero_point = 0},
{2}, {true, false}, {1, 2},
{-1, -2}, {1, -2});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/select.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/select_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
08070099-b020-4907-8983-05a3e1f40a7b | cpp | tensorflow/tensorflow | stablehlo_reduce_window | tensorflow/lite/kernels/stablehlo_reduce_window.cc | tensorflow/lite/kernels/stablehlo_reduce_window_test.cc | #include <algorithm>
#include <array>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <type_traits>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
constexpr int32_t kMaxReduceWindowRank = 6;
void StridedCopy(const int rank, const char* input, const int64_t* input_shape,
const int64_t* input_strides, char* output,
const int64_t* output_strides, const int64_t element_size,
const int depth) {
if (depth + 1 == rank) {
for (int64_t i = 0; i < input_shape[depth]; ++i) {
std::memcpy(output, input, element_size);
input += input_strides[depth];
output += output_strides[depth];
}
} else {
for (int64_t i = 0; i < input_shape[depth]; ++i) {
StridedCopy(rank, input, input_shape, input_strides, output,
output_strides, element_size, depth + 1);
input += input_strides[depth];
output += output_strides[depth];
}
}
}
}
namespace dilate {
namespace {
const int64_t kTFLiteDefaultBaseDilation[kMaxReduceWindowRank] = {1, 1, 1,
1, 1, 1};
struct DilateData {
DilateData() = default;
DilateData(const int rank, const int64_t* input_shape,
const int64_t* dilation, const int64_t element_size)
: rank(rank), init_element_size(element_size) {
std::copy_n(input_shape, rank, shape);
std::copy_n(dilation, rank, base_dilations);
ComputeOutputShapeAndSize(element_size);
skip = std::all_of(dilation, dilation + rank,
[](int64_t d) { return d == 1; });
if (skip) {
return;
}
MergeTrailingDilations(element_size);
ComputeInputStrides();
ComputeOutputStridesAndSizes();
}
void MergeTrailingDilations(int64_t element_size) {
for (int i = rank - 2; i >= 0; --i) {
if (base_dilations[i + 1] == 1) {
element_size *= shape[i + 1];
--rank;
} else {
break;
}
}
if (rank == 1 && base_dilations[0] == 1) {
element_size *= shape[0];
shape[0] = 1;
}
input_strides[rank - 1] = element_size;
}
void ComputeInputStrides() {
assert(input_strides[rank - 1] != 0);
for (int i = rank - 2; i >= 0; --i) {
input_strides[i] = shape[i + 1] * input_strides[i + 1];
}
}
void ComputeOutputStridesAndSizes() {
output_dimension_sizes[rank - 1] = input_strides[rank - 1];
output_strides[rank - 1] =
base_dilations[rank - 1] * output_dimension_sizes[rank - 1];
for (int i = rank - 2; i >= 0; --i) {
output_dimension_sizes[i] = ((shape[i + 1] - 1) * output_strides[i + 1] +
output_dimension_sizes[i + 1]);
output_strides[i] = base_dilations[i] * output_dimension_sizes[i];
}
}
void ComputeOutputShapeAndSize(const int64_t element_size) {
output_size = element_size;
for (int i = 0; i < rank; ++i) {
output_shape[i] = (shape[i] - 1) * base_dilations[i] + 1;
output_size *= output_shape[i];
}
}
int64_t ElementSize() const { return input_strides[rank - 1]; }
bool skip = true;
int rank = 0;
int64_t init_element_size = 0;
int64_t shape[kMaxReduceWindowRank] = {};
int64_t base_dilations[kMaxReduceWindowRank] = {};
int64_t output_strides[kMaxReduceWindowRank] = {};
int64_t output_dimension_sizes[kMaxReduceWindowRank] = {};
int64_t input_strides[kMaxReduceWindowRank] = {};
int64_t output_shape[kMaxReduceWindowRank] = {};
int64_t output_size = 1;
};
void Dilate(const DilateData& ctx, const char* input, const char* init_value,
char* output) {
assert(!ctx.skip);
{
std::memcpy(output, init_value, ctx.init_element_size);
int64_t remaining_bytes = ctx.output_size - ctx.init_element_size;
int64_t copied_bytes = ctx.init_element_size;
while (remaining_bytes) {
int64_t bytes = std::min(remaining_bytes, copied_bytes);
std::memcpy(output + copied_bytes, output, bytes);
remaining_bytes -= bytes;
copied_bytes += bytes;
}
}
StridedCopy(ctx.rank, input, ctx.shape, ctx.input_strides, output,
ctx.output_strides, ctx.ElementSize(), 0);
}
}
}
namespace pad {
namespace {
const int64_t kTFLiteDefaultPadding[kMaxReduceWindowRank] = {0, 0, 0, 0, 0, 0};
struct PadCropData {
PadCropData() = default;
PadCropData(int rank, const int64_t* dims, const int64_t* padding,
const int64_t element_size)
: rank(rank), element_size(element_size) {
assert(rank > 0);
assert(rank < kMaxReduceWindowRank);
output_size = element_size;
for (int i = 0; i < rank; ++i) {
output_shape[i] = dims[i] + padding[2 * i] + padding[2 * i + 1];
output_size *= output_shape[i];
}
skip = std::all_of(padding, padding + 2 * rank,
[](int64_t v) { return v == 0; });
if (skip) {
return;
}
output_strides[rank - 1] = element_size;
input_strides[rank - 1] = element_size;
for (int i = rank - 2; i >= 0; --i) {
output_strides[i] = output_shape[i + 1] * output_strides[i + 1];
input_strides[i] = dims[i + 1] * input_strides[i + 1];
}
for (int i = 0; i < rank; ++i) {
input_offset += std::max<int64_t>(-padding[2 * i], 0) * input_strides[i];
output_offset += std::max<int64_t>(padding[2 * i], 0) * output_strides[i];
cropped_input_shape[i] = dims[i] + std::min<int64_t>(padding[2 * i], 0) +
std::min<int64_t>(padding[2 * i + 1], 0);
}
}
bool skip = true;
int rank = 0;
int64_t element_size = 0;
int64_t cropped_input_shape[kMaxReduceWindowRank];
int64_t input_strides[kMaxReduceWindowRank];
int64_t output_shape[kMaxReduceWindowRank];
int64_t output_strides[kMaxReduceWindowRank];
int64_t input_offset = 0;
int64_t output_offset = 0;
int64_t output_size = 0;
};
void PadCrop(const PadCropData& ctx, const char* input, const char* init_value,
char* output) {
assert(!ctx.skip);
{
std::memcpy(output, init_value, ctx.element_size);
int64_t remaining_bytes = ctx.output_size - ctx.element_size;
int64_t copied_bytes = ctx.element_size;
while (remaining_bytes) {
int64_t bytes = std::min(remaining_bytes, copied_bytes);
std::memcpy(output + copied_bytes, output, bytes);
remaining_bytes -= bytes;
copied_bytes += bytes;
}
}
StridedCopy(ctx.rank, input + ctx.input_offset, ctx.cropped_input_shape,
ctx.input_strides, output + ctx.output_offset, ctx.output_strides,
ctx.element_size, 0);
}
}
}
namespace reduce_window {
namespace {
template <class Op, class Type>
void StridedReduce(const Type* input, const int64_t* const shape,
const int64_t* const strides, Type& accu, const int rank,
const int depth) {
const int64_t stride = strides[depth];
const int64_t size = shape[depth];
if (depth + 1 == rank) {
const Op op;
for (int64_t i = 0; i < size; ++i) {
accu = op(accu, *input);
input += stride;
}
} else {
for (int64_t i = 0; i < size; ++i) {
StridedReduce<Op, Type>(input, shape, strides, accu, rank, depth + 1);
input += stride;
}
}
}
template <class Op, class Type>
void ReduceWindowImpl(const Type* input, Type* output,
const int64_t* const output_shape,
const int64_t* const output_strides,
const int64_t* const window_offset_strides,
const int64_t* const window_shape,
const int64_t* const window_reduce_strides,
const Type init, const int rank, const int depth) {
if (depth + 1 == rank) {
for (int32_t dim = 0; dim < output_shape[depth]; ++dim) {
*output = init;
StridedReduce<Op, Type>(input, window_shape, window_reduce_strides,
*output, rank, 0);
input += window_offset_strides[depth];
output += output_strides[depth];
}
} else {
for (int32_t dim = 0; dim < output_shape[depth]; ++dim) {
ReduceWindowImpl<Op, Type>(input, output, output_shape, output_strides,
window_offset_strides, window_shape,
window_reduce_strides, init, rank, depth + 1);
input += window_offset_strides[depth];
output += output_strides[depth];
}
}
}
struct ReduceWindowData {
ReduceWindowData() = default;
ReduceWindowData(const int rank, const int64_t* input_shape,
const int64_t* window_shape, const int64_t* window_strides,
const int64_t* window_dilations)
: rank(rank),
input_shape(input_shape),
window_shape(window_shape),
window_dilations(window_dilations),
window_strides(window_strides) {
ComputeStrides(input_strides, input_shape);
Multiply(window_reduce_strides, input_strides, window_dilations);
Multiply(window_offset_strides, input_strides, window_strides);
ComputeOutputShape();
ComputeStrides(output_strides, output_shape);
}
void ComputeStrides(int64_t* strides, const int64_t* const shape) {
strides[rank - 1] = 1;
for (int64_t i = rank - 2; i >= 0; --i) {
strides[i] = shape[i + 1] * strides[i + 1];
}
}
void Multiply(int64_t* dst, const int64_t* const vec1,
const int64_t* const vec2) {
for (int64_t i = 0; i < rank; ++i) {
dst[i] = vec2[i] * vec1[i];
}
}
void ComputeOutputShape() {
int64_t dilated_window_shape[kMaxReduceWindowRank];
for (int64_t i = 0; i < rank; ++i) {
dilated_window_shape[i] = (window_shape[i] - 1) * window_dilations[i] + 1;
}
for (int64_t i = 0; i < rank; ++i) {
if (input_shape[i] < dilated_window_shape[i]) {
output_shape[i] = 0;
} else {
output_shape[i] =
(input_shape[i] - dilated_window_shape[i]) / window_strides[i] + 1;
}
}
}
int rank = 0;
const int64_t* input_shape;
const int64_t* window_shape;
const int64_t* window_dilations;
const int64_t* window_strides;
int64_t input_strides[kMaxReduceWindowRank] = {};
int64_t window_offset_strides[kMaxReduceWindowRank] = {};
int64_t window_reduce_strides[kMaxReduceWindowRank] = {};
int64_t output_shape[kMaxReduceWindowRank] = {};
int64_t output_strides[kMaxReduceWindowRank] = {};
};
template <class Op, class Type>
void ReduceWindow(const ReduceWindowData& ctx, const Type* const input,
const Type init, Type* output) {
ReduceWindowImpl<Op, Type>(input, output, ctx.output_shape,
ctx.output_strides, ctx.window_offset_strides,
ctx.window_shape, ctx.window_reduce_strides, init,
ctx.rank, 0);
}
}
}
namespace reduce_window_op {
namespace {
struct NodeData {
enum { kDilateOutput, kPadOutput, kTempTensorCount };
int temporary_tensor_offset = -1;
pad::PadCropData pad_ctx;
dilate::DilateData dilate_ctx;
reduce_window::ReduceWindowData reduce_window_ctx;
TfLiteReduceWindowFunction body;
};
struct OpData {
OpData(TfLiteContext* context, TfLiteNode* node)
: context(context), node(node) {}
TfLiteContext* context;
TfLiteNode* node;
TfLiteType type;
int rank;
int64_t element_size;
int64_t input_dims[kMaxReduceWindowRank];
const char* input;
const char* init_value;
const int64_t* window_dimensions;
const int64_t* window_strides;
const int64_t* base_dilations;
const int64_t* window_dilations;
const int64_t* padding;
char* dilate_output = nullptr;
char* pad_output = nullptr;
char* output;
TfLiteStatus ResizeTensor(TfLiteTensor* const tensor,
const int64_t* const shape) {
auto dims = BuildTfLiteArray<int32_t>(rank, shape);
return context->ResizeTensor(context, tensor, dims.release());
}
TfLiteStatus SetElementType(TfLiteType t) {
type = t;
size_t unsigned_element_size;
TF_LITE_ENSURE_OK(context,
GetSizeOfType(context, type, &unsigned_element_size));
TF_LITE_ENSURE_MSG(
context,
sizeof(unsigned_element_size) < sizeof(int64_t) ||
unsigned_element_size <= std::numeric_limits<int64_t>::max(),
"The element size cannot be contained in an int64_t value.");
element_size = unsigned_element_size;
return kTfLiteOk;
}
template <class Semantic>
TfLiteStatus InitializeBase() {
init_value = reinterpret_cast<const char*>(
GetInput(context, node, Semantic::kInitValue)->data.data);
const TfLiteTensor* const input_tensor =
GetInput(context, node, Semantic::kInput);
SetElementType(input_tensor->type);
rank = input_tensor->dims->size;
std::copy_n(input_tensor->dims->data, rank, input_dims);
input = reinterpret_cast<const char*>(input_tensor->data.data);
TfLiteTensor* const output_tensor =
GetOutput(context, node, Semantic::kOutput);
output = reinterpret_cast<char*>(output_tensor->data.data);
return kTfLiteOk;
}
};
struct StablehloData : public OpData {
enum InputTensorId { kInput, kInitValue, kNumInputTensors };
enum OutputTensorId { kOutput, kNumOutputTensors };
using OpData::OpData;
TfLiteTensor* GetTemporary(int id) {
return tflite::GetTemporary(context, node, id);
}
TfLiteStatus Check() const {
TF_LITE_ENSURE_EQ(context, NumInputs(node), kNumInputTensors);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), kNumOutputTensors);
const TfLiteTensor* const input_tensor = GetInput(context, node, kInput);
const TfLiteTensor* const output_tensor = GetOutput(context, node, kOutput);
const TfLiteTensor* const init_value_tensor =
GetInput(context, node, kInitValue);
TF_LITE_ENSURE_EQ(context, input_tensor->type, output_tensor->type);
TF_LITE_ENSURE_EQ(context, input_tensor->type, init_value_tensor->type);
TF_LITE_ENSURE(context, input_tensor->dims != nullptr);
TF_LITE_ENSURE(context, input_tensor->dims->size > 0);
TF_LITE_ENSURE(context, input_tensor->dims->size <= kMaxReduceWindowRank);
return kTfLiteOk;
}
TfLiteStatus Initialize() {
TF_LITE_ENSURE_OK(context, InitializeBase<StablehloData>());
const auto& params = *reinterpret_cast<TfLiteStablehloReduceWindowParams*>(
node->builtin_data);
window_dimensions = params.window_dimensions;
window_strides = params.window_strides;
base_dilations = params.base_dilations;
window_dilations = params.window_dilations;
padding = params.padding;
auto AllGtThanZero = [&](const int64_t* const attr) {
return std::all_of(attr, attr + rank, [](int64_t d) { return d > 0; });
};
TF_LITE_ENSURE(context, AllGtThanZero(base_dilations));
TF_LITE_ENSURE(context, AllGtThanZero(window_dimensions));
TF_LITE_ENSURE(context, AllGtThanZero(window_strides));
TF_LITE_ENSURE(context, AllGtThanZero(window_dilations));
if (node->temporaries &&
node->temporaries->size >= NodeData::kTempTensorCount) {
TfLiteTensor* const dilated_tensor =
GetTemporary(NodeData::kDilateOutput);
TfLiteTensor* const padded_tensor = GetTemporary(NodeData::kPadOutput);
TF_LITE_ENSURE(context, dilated_tensor != nullptr);
TF_LITE_ENSURE(context, padded_tensor != nullptr);
dilate_output = dilated_tensor->data.raw;
pad_output = padded_tensor->data.raw;
}
return kTfLiteOk;
}
TfLiteStatus Setup() {
NodeData& node_data = *reinterpret_cast<NodeData*>(node->user_data);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(NodeData::kTempTensorCount);
for (int i = 0; i < NodeData::kTempTensorCount; ++i) {
node->temporaries->data[i] = node_data.temporary_tensor_offset + i;
}
node_data.body = GetBodyFunction();
node_data.dilate_ctx =
dilate::DilateData(rank, input_dims, base_dilations, element_size);
node_data.pad_ctx = pad::PadCropData(
rank, node_data.dilate_ctx.output_shape, padding, element_size);
node_data.reduce_window_ctx = reduce_window::ReduceWindowData(
rank, node_data.pad_ctx.output_shape, window_dimensions, window_strides,
window_dilations);
TfLiteTensor* const dilated_tensor = GetTemporary(NodeData::kDilateOutput);
TfLiteTensor* const padded_tensor = GetTemporary(NodeData::kPadOutput);
TfLiteTensor* const output_tensor = GetOutput(context, node, kOutput);
dilated_tensor->type = type;
dilated_tensor->allocation_type = kTfLiteArenaRw;
padded_tensor->type = type;
padded_tensor->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, ResizeTensor(dilated_tensor,
node_data.dilate_ctx.output_shape));
TF_LITE_ENSURE_OK(
context, ResizeTensor(padded_tensor, node_data.pad_ctx.output_shape));
TF_LITE_ENSURE_OK(
context,
ResizeTensor(output_tensor, node_data.reduce_window_ctx.output_shape));
return kTfLiteOk;
}
TfLiteReduceWindowFunction GetBodyFunction() {
const TfLiteStablehloReduceWindowParams& params =
*reinterpret_cast<TfLiteStablehloReduceWindowParams*>(
node->builtin_data);
const int body_subgraph_index = params.body_subgraph_index;
const Subgraph& parent_subgraph =
*reinterpret_cast<Subgraph*>(context->impl_);
const std::vector<std::unique_ptr<Subgraph>>& subgraphs =
*parent_subgraph.GetSubgraphs();
if (body_subgraph_index >= subgraphs.size()) {
TF_LITE_KERNEL_LOG(
context, "Body subgraph not found for stablehlo.reduce_window: %d.",
body_subgraph_index);
return TfLiteReduceWindowFunctionUnsupported;
}
const Subgraph& body_subgraph = *subgraphs[body_subgraph_index];
const std::vector<int>& execution_plan =
body_subgraph.pre_delegation_execution_plan().empty()
? body_subgraph.execution_plan()
: body_subgraph.pre_delegation_execution_plan();
if (execution_plan.size() != 1) {
TF_LITE_KERNEL_LOG(context,
"Only one kernel is allowed within "
"stablehlo.reduce_window body. (%zu) kernels found.\n",
execution_plan.size());
return TfLiteReduceWindowFunctionUnsupported;
}
const int body_kernel_index = execution_plan[0];
const TfLiteRegistration& body_kernel_registration =
body_subgraph.node_and_registration(body_kernel_index)->second;
switch (body_kernel_registration.builtin_code) {
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinStablehloAdd:
return TfLiteReduceWindowFunctionAdd;
case kTfLiteBuiltinMul:
case kTfLiteBuiltinStablehloMultiply:
return TfLiteReduceWindowFunctionMul;
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinStablehloMaximum:
return TfLiteReduceWindowFunctionMax;
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinStablehloMinimum:
return TfLiteReduceWindowFunctionMin;
case kTfLiteBuiltinLogicalAnd:
case kTfLiteBuiltinStablehloAnd:
return TfLiteReduceWindowFunctionAll;
case kTfLiteBuiltinLogicalOr:
case kTfLiteBuiltinStablehloOr:
return TfLiteReduceWindowFunctionAny;
default:
TF_LITE_KERNEL_LOG(
context, "%s:%d unsupported reduction body builtin code: %d.\n",
__FILE__, __LINE__, body_kernel_registration.builtin_code);
return TfLiteReduceWindowFunctionUnsupported;
}
}
};
struct TFLiteData : public OpData {
enum InputTensorId {
kInput,
kInitValue,
kWindowShape,
kWindowStrides,
kWindowDilations,
kNumInputTensors
};
enum OutputTensorId { kOutput, kNumOutputTensors };
using OpData::OpData;
TfLiteStatus Check() const {
TF_LITE_ENSURE_EQ(context, NumInputs(node), kNumInputTensors);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), kNumOutputTensors);
const TfLiteTensor* const input_tensor = GetInput(context, node, kInput);
const TfLiteTensor* const init_value_tensor =
GetInput(context, node, kInitValue);
const TfLiteTensor* const window_dimensions_tensor =
GetInput(context, node, kWindowShape);
const TfLiteTensor* const window_strides_tensor =
GetInput(context, node, kWindowStrides);
const TfLiteTensor* const window_dilations_tensor =
GetInput(context, node, kWindowDilations);
const TfLiteTensor* const output_tensor = GetOutput(context, node, kOutput);
TF_LITE_ENSURE(context, IsConstantTensor(window_dimensions_tensor));
TF_LITE_ENSURE(context, IsConstantTensor(window_strides_tensor));
TF_LITE_ENSURE(context, IsConstantTensor(window_dilations_tensor));
TF_LITE_ENSURE_EQ(context, input_tensor->type, output_tensor->type);
TF_LITE_ENSURE_EQ(context, input_tensor->type, init_value_tensor->type);
TF_LITE_ENSURE_EQ(context, window_dimensions_tensor->type, kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, window_strides_tensor->type, kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, window_dilations_tensor->type, kTfLiteInt64);
TF_LITE_ENSURE(context, input_tensor->dims != nullptr);
TF_LITE_ENSURE(context, input_tensor->dims->size > 0);
TF_LITE_ENSURE(context, input_tensor->dims->size <= kMaxReduceWindowRank);
return kTfLiteOk;
}
TfLiteStatus Initialize() {
TF_LITE_ENSURE_OK(context, InitializeBase<TFLiteData>());
window_dimensions = reinterpret_cast<const int64_t*>(
GetInput(context, node, kWindowShape)->data.data);
window_strides = reinterpret_cast<const int64_t*>(
GetInput(context, node, kWindowStrides)->data.data);
base_dilations = dilate::kTFLiteDefaultBaseDilation;
window_dilations = reinterpret_cast<const int64_t*>(
GetInput(context, node, kWindowDilations)->data.data);
padding = pad::kTFLiteDefaultPadding;
return kTfLiteOk;
}
TfLiteStatus Setup() {
NodeData& node_data = *reinterpret_cast<NodeData*>(node->user_data);
const auto& params =
*reinterpret_cast<TfLiteReduceWindowParams*>(node->builtin_data);
node_data.body = params.reduce_function;
node_data.dilate_ctx.skip = true;
node_data.pad_ctx.skip = true;
node_data.reduce_window_ctx = reduce_window::ReduceWindowData(
rank, input_dims, window_dimensions, window_strides, window_dilations);
TfLiteTensor* const output_tensor = GetOutput(context, node, kOutput);
return context->ResizeTensor(
context, output_tensor,
BuildTfLiteArray<int32_t>(rank,
node_data.reduce_window_ctx.output_shape)
.release());
}
};
template <class Op, class Type>
void PadCropReduceWindow(const OpData& op_ctx) {
NodeData& node_data = *reinterpret_cast<NodeData*>(op_ctx.node->user_data);
const char* input = op_ctx.input;
const int64_t* input_shape = op_ctx.input_dims;
if (!node_data.dilate_ctx.skip) {
dilate::Dilate(node_data.dilate_ctx, input, op_ctx.init_value,
op_ctx.dilate_output);
input = op_ctx.dilate_output;
input_shape = node_data.dilate_ctx.output_shape;
}
if (!node_data.pad_ctx.skip) {
pad::PadCrop(node_data.pad_ctx, input, op_ctx.init_value,
op_ctx.pad_output);
input = op_ctx.pad_output;
input_shape = node_data.pad_ctx.output_shape;
}
reduce_window::ReduceWindow<Op, Type>(
node_data.reduce_window_ctx, reinterpret_cast<const Type*>(input),
*reinterpret_cast<const Type*>(op_ctx.init_value),
reinterpret_cast<Type*>(op_ctx.output));
}
template <class Op>
TfLiteStatus DispatchReduceWindowType(OpData& ctx) {
#define REDUCE_WINDOW_TYPE_CASE(CPP_TYPE, TENSOR_TYPE) \
case TENSOR_TYPE: \
PadCropReduceWindow<Op, CPP_TYPE>(ctx); \
break;
switch (ctx.type) {
REDUCE_WINDOW_TYPE_CASE(int8_t, kTfLiteBool);
REDUCE_WINDOW_TYPE_CASE(int8_t, kTfLiteInt8);
REDUCE_WINDOW_TYPE_CASE(int16_t, kTfLiteInt16);
REDUCE_WINDOW_TYPE_CASE(int32_t, kTfLiteInt32);
REDUCE_WINDOW_TYPE_CASE(int64_t, kTfLiteInt64);
REDUCE_WINDOW_TYPE_CASE(uint8_t, kTfLiteUInt8);
REDUCE_WINDOW_TYPE_CASE(float, kTfLiteFloat32);
REDUCE_WINDOW_TYPE_CASE(double, kTfLiteFloat64);
default:
TF_LITE_KERNEL_LOG(
ctx.context,
"%s:%d unsupported kernel data type (TfliteType: %d a.k.a %s).",
__FILE__, __LINE__, ctx.type, TfLiteTypeGetName(ctx.type));
return kTfLiteError;
}
#undef REDUCE_WINDOW_TYPE_CASE
return kTfLiteOk;
}
struct Max {
template <class T>
constexpr T operator()(const T& a, const T& b) const {
return a >= b ? a : b;
}
};
struct Min {
template <class T>
constexpr T operator()(const T& a, const T& b) const {
return a <= b ? a : b;
}
};
TfLiteStatus DispatchReduceWindowBody(OpData& ctx) {
const NodeData& node_data = *static_cast<NodeData*>(ctx.node->user_data);
switch (node_data.body) {
case TfLiteReduceWindowFunctionUnsupported:
TF_LITE_KERNEL_LOG(ctx.context, "%s:%d unsupported reduction body.\n",
__FILE__, __LINE__);
return kTfLiteError;
case TfLiteReduceWindowFunctionAdd:
return DispatchReduceWindowType<std::plus<>>(ctx);
case TfLiteReduceWindowFunctionMul:
return DispatchReduceWindowType<std::multiplies<>>(ctx);
case TfLiteReduceWindowFunctionAll:
return DispatchReduceWindowType<std::logical_and<>>(ctx);
case TfLiteReduceWindowFunctionAny:
return DispatchReduceWindowType<std::logical_or<>>(ctx);
case TfLiteReduceWindowFunctionMin:
return DispatchReduceWindowType<Min>(ctx);
case TfLiteReduceWindowFunctionMax:
return DispatchReduceWindowType<Max>(ctx);
}
TF_LITE_KERNEL_LOG(ctx.context, "%s:%d unhandled reduction body case.\n",
__FILE__, __LINE__);
return kTfLiteError;
}
void* StablehloInit(TfLiteContext* context, const char* options,
size_t options_len) {
NodeData* node_data = new NodeData();
context->AddTensors(context, NodeData::kTempTensorCount,
&node_data->temporary_tensor_offset);
return node_data;
}
void* TFLiteInit(TfLiteContext* context, const char* options,
size_t options_len) {
return new NodeData();
}
void Free(TfLiteContext* context, void* node_data) {
delete static_cast<NodeData*>(node_data);
}
template <class Semantic>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
Semantic ctx(context, node);
TF_LITE_ENSURE_OK(context, ctx.Check());
TF_LITE_ENSURE_OK(context, ctx.Initialize());
return ctx.Setup();
}
template <class Semantic>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
Semantic ctx(context, node);
TF_LITE_ENSURE_OK(context, ctx.Initialize());
NodeData& node_data = *reinterpret_cast<NodeData*>(node->user_data);
TF_LITE_ENSURE_MSG(
context, node_data.pad_ctx.skip || node_data.pad_ctx.output_size > 0,
"The padding specification of stablehlo.reduce_window gives an empty "
"tensor.");
return DispatchReduceWindowBody(ctx);
}
}
}
TfLiteRegistration* Register_STABLEHLO_REDUCE_WINDOW() {
static TfLiteRegistration r = {
reduce_window_op::StablehloInit,
reduce_window_op::Free,
reduce_window_op::Prepare<reduce_window_op::StablehloData>,
reduce_window_op::Eval<reduce_window_op::StablehloData>};
return &r;
}
TfLiteRegistration* Register_REDUCE_WINDOW() {
static TfLiteRegistration r = {
reduce_window_op::TFLiteInit,
reduce_window_op::Free,
reduce_window_op::Prepare<reduce_window_op::TFLiteData>,
reduce_window_op::Eval<reduce_window_op::TFLiteData>};
return &r;
}
}
}
} | #include <cstddef>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <limits>
#include <ostream>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/absl_log.h"
#include "absl/random/bit_gen_ref.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/types/span.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace reduce_window {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
#define REDUCE_WINDOW_ENSURE_OK(expr) \
do { \
if (TfLiteStatus status = (expr); status != kTfLiteOk) { \
ABSL_LOG(ERROR) << #expr " failed.\n"; \
return status; \
} \
} while (false)
#define REDUCE_WINDOW_ENSURE_IMPL(expr, msg) \
do { \
if (!(expr)) { \
ABSL_LOG(ERROR) << #msg " failed.\n"; \
return kTfLiteError; \
} \
} while (false)
#define REDUCE_WINDOW_ENSURE(expr) REDUCE_WINDOW_ENSURE_IMPL((expr), #expr)
#define REDUCE_WINDOW_ENSURE_EQ(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) == (b), #a " == " #b)
#define REDUCE_WINDOW_ENSURE_NE(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) != (b), #a " != " #b)
#define REDUCE_WINDOW_ENSURE_GE(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) >= (b), #a " >= " #b)
#define REDUCE_WINDOW_ENSURE_LE(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) <= (b), #a " <= " #b)
#define REDUCE_WINDOW_ENSURE_GT(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) > (b), #a " > " #b)
#define REDUCE_WINDOW_ENSURE_LT(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) < (b), #a " < " #b)
#define REDUCE_WINDOW_ENSURE_UNREACHABLE(msg) \
REDUCE_WINDOW_ENSURE_IMPL(false, msg)
template <class T>
struct TensorTypeFor;
#define TENSOR_TYPE_ASSOC(CPP_TYPE, TENSORTYPE_VALUE) \
template <> \
struct TensorTypeFor<CPP_TYPE> { \
static constexpr TensorType value = TENSORTYPE_VALUE; \
};
TENSOR_TYPE_ASSOC(int8_t, TensorType_INT8);
TENSOR_TYPE_ASSOC(int16_t, TensorType_INT16);
TENSOR_TYPE_ASSOC(int32_t, TensorType_INT32);
TENSOR_TYPE_ASSOC(int64_t, TensorType_INT64);
TENSOR_TYPE_ASSOC(uint8_t, TensorType_UINT8);
TENSOR_TYPE_ASSOC(uint16_t, TensorType_UINT16);
TENSOR_TYPE_ASSOC(uint32_t, TensorType_UINT32);
TENSOR_TYPE_ASSOC(uint64_t, TensorType_UINT64);
TENSOR_TYPE_ASSOC(float, TensorType_FLOAT32);
static_assert(sizeof(float) == 4, "float type is expected to be 32 bit long");
TENSOR_TYPE_ASSOC(double, TensorType_FLOAT64);
static_assert(sizeof(double) == 8, "double type is expected to be 64 bit long");
enum class BodyFunction {
kUnset,
kUnsupported,
kAdd,
kMul,
kMax,
kMin,
kAll,
kAny
};
std::ostream& operator<<(std::ostream& os, const BodyFunction& f) {
switch (f) {
case BodyFunction::kUnset:
return os << "unset";
case BodyFunction::kUnsupported:
return os << "unsupported";
case BodyFunction::kAdd:
return os << "add";
case BodyFunction::kMul:
return os << "mul";
case BodyFunction::kMax:
return os << "max";
case BodyFunction::kMin:
return os << "min";
case BodyFunction::kAll:
return os << "all";
case BodyFunction::kAny:
return os << "any";
}
return os;
}
template <class T>
class ReduceWindowOpModel : public SingleOpModel {
static constexpr TensorType kTensorType = TensorTypeFor<T>::value;
public:
void SetInput(absl::Span<const int64_t> shape) {
input_shape_.assign(shape.begin(), shape.end());
input_data_.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_iota(input_data_, 1);
}
void SetInput(absl::Span<const int64_t> shape, absl::Span<const T> data) {
input_shape_.assign(shape.begin(), shape.end());
input_data_.assign(data.begin(), data.end());
}
void SetInput(absl::Span<const int64_t> shape, absl::BitGenRef bitgen, T min,
T max) {
input_shape_.assign(shape.begin(), shape.end());
input_data_.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_generate(input_data_, [&] {
return absl::Uniform(absl::IntervalClosed, bitgen, min, max);
});
}
void SetWindowDimensions(absl::Span<const int64_t> dimensions) {
window_dimensions_.assign(dimensions.begin(), dimensions.end());
}
void SetWindowStrides(absl::Span<const int64_t> strides) {
window_strides_.assign(strides.begin(), strides.end());
}
void SetBaseDilations(absl::Span<const int64_t> dilations) {
base_dilations_.assign(dilations.begin(), dilations.end());
}
void SetWindowDilations(absl::Span<const int64_t> dilations) {
window_dilations_.assign(dilations.begin(), dilations.end());
}
void SetPadding(absl::Span<const int64_t> padding) {
padding_.assign(padding.begin(), padding.end());
}
void SetInitValue(const T& val) { init_value_ = val; }
void SetBody(const BodyFunction func) { body_function_ = func; }
TfLiteStatus Build() {
constexpr int kBodySubGraphIndex = 1;
REDUCE_WINDOW_ENSURE(!input_shape_.empty());
REDUCE_WINDOW_ENSURE_EQ(window_dimensions_.size(), input_shape_.size());
REDUCE_WINDOW_ENSURE_EQ(window_strides_.size(), input_shape_.size());
REDUCE_WINDOW_ENSURE_EQ(base_dilations_.size(), input_shape_.size());
REDUCE_WINDOW_ENSURE_EQ(window_dilations_.size(), input_shape_.size());
REDUCE_WINDOW_ENSURE_EQ(padding_.size(), 2 * input_shape_.size());
REDUCE_WINDOW_ENSURE_NE(body_function_, BodyFunction::kUnset);
REDUCE_WINDOW_ENSURE_NE(body_function_, BodyFunction::kUnsupported);
input_tensor_id_ =
AddInput({kTensorType,
std::vector<int>(input_shape_.begin(), input_shape_.end())});
init_value_tensor_id_ = AddConstInput(kTensorType, {init_value_}, {1});
output_tensor_id_ = AddOutput(kTensorType);
SetBuiltinOp(BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_, builder_.CreateVector(window_dimensions_),
builder_.CreateVector(window_strides_),
builder_.CreateVector(base_dilations_),
builder_.CreateVector(window_dilations_),
builder_.CreateVector(padding_), kBodySubGraphIndex)
.Union());
BuildInterpreter(
{std::vector<int>(input_shape_.begin(),
input_shape_.end())},
-1, false,
true, false,
false);
int body_subgraph_index;
AddSubgraphs(1, &body_subgraph_index);
REDUCE_WINDOW_ENSURE_EQ(body_subgraph_index, kBodySubGraphIndex);
switch (body_function_) {
case BodyFunction::kAdd:
subgraph_builder_.BuildAddSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kMul:
subgraph_builder_.BuildMulSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kMax:
subgraph_builder_.BuildMaximumSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kMin:
subgraph_builder_.BuildMinimumSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kAll:
subgraph_builder_.BuildLogicalAndSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kAny:
subgraph_builder_.BuildLogicalOrSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
default:
REDUCE_WINDOW_ENSURE_UNREACHABLE("Unhandled body function enum value.");
}
AllocateAndDelegate(true);
PopulateTensor(input_tensor_id_, input_data_);
return kTfLiteOk;
}
TfLiteStatus BuildAndInvoke() {
REDUCE_WINDOW_ENSURE_OK(Build());
return Invoke();
}
absl::Span<const T> GetOutputData() {
return absl::Span<const T>(interpreter_->typed_tensor<T>(output_tensor_id_),
GetTensorSize(output_tensor_id_));
}
absl::Span<const int> GetOutputShape() {
const TfLiteIntArray& shape =
*(interpreter_->tensor(output_tensor_id_)->dims);
return absl::Span<const int>(shape.data, shape.size);
}
const std::vector<T>& GetInput() const { return input_data_; }
const std::vector<int64_t>& GetInputShape() const { return input_shape_; }
const std::vector<int64_t>& GetWindowDimensions() const {
return window_dimensions_;
}
const std::vector<int64_t>& GetWindowStrides() const {
return window_strides_;
}
const std::vector<int64_t>& GetBaseDilations() const {
return base_dilations_;
}
const std::vector<int64_t>& GetWindowDilations() const {
return window_dilations_;
}
const std::vector<int64_t>& GetPadding() const { return padding_; }
const T& GetInitValue() const { return init_value_; }
const BodyFunction& GetBodyFunction() const { return body_function_; }
friend std::ostream& operator<<(std::ostream& os,
const ReduceWindowOpModel& model) {
using Adapt = ReduceWindowOpModel::VectorOutputAdapter;
os << "input dimensions: {" << Adapt{model.GetInputShape()} << "}\n";
os << " base dilations: {" << Adapt{model.GetBaseDilations()} << "}\n";
os << " padding: {" << Adapt{model.GetPadding()} << "}\n";
os << " window dimensions: {" << Adapt{model.GetWindowDimensions()}
<< "}\n";
os << " window dilations: {" << Adapt{model.GetWindowDilations()} << "}\n";
os << " window strides: {" << Adapt{model.GetWindowStrides()} << "}\n";
os << " init value: " << +model.GetInitValue() << "\n";
os << " body function: " << model.GetBodyFunction() << "\n";
return os;
}
protected:
struct VectorOutputAdapter {
const std::vector<int64_t>& data;
friend std::ostream& operator<<(std::ostream& os,
const VectorOutputAdapter& vec) {
if (!vec.data.empty()) {
os << +vec.data[0];
for (size_t i = 1; i < vec.data.size(); ++i) {
os << ", " << +vec.data[i];
}
}
return os;
}
};
int input_tensor_id_ = -1;
int init_value_tensor_id_ = -1;
int output_tensor_id_ = -1;
std::vector<T> input_data_;
T init_value_;
std::vector<int64_t> input_shape_;
std::vector<int64_t> window_dimensions_;
std::vector<int64_t> window_strides_;
std::vector<int64_t> base_dilations_;
std::vector<int64_t> window_dilations_;
std::vector<int64_t> padding_;
BodyFunction body_function_{};
subgraph_test_util::SubgraphBuilder subgraph_builder_;
};
template <class StorageType>
class StablehloReduceWindowTest : public testing::Test {};
using TestList =
testing::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, float, double>;
TYPED_TEST_SUITE(StablehloReduceWindowTest, TestList);
TYPED_TEST(StablehloReduceWindowTest, Identity) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3));
EXPECT_THAT(model.GetOutputData(), ElementsAre(1, 2, 3, 4, 5, 6, 7, 8, 9));
}
TYPED_TEST(StablehloReduceWindowTest, Dilate) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({2, 2});
model.SetPadding({0, 0, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(5, 5));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({1, 0, 2, 0, 3, 0, 0, 0, 0, 0, 4, 0, 5,
0, 6, 0, 0, 0, 0, 0, 7, 0, 8, 0, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadTop) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({1, 0, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadBottom) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 1, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadLeft) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 1, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 4));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadRight) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 1});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 4));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadAll) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({1, 1, 1, 1});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(5, 5));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 4, 5,
6, 0, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropTop) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({-1, 0, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray({4, 5, 6, 7, 8, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropBottom) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, -1, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropLeft) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, -1, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray({2, 3, 5, 6, 8, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropRight) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, -1});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray({1, 2, 4, 5, 7, 8}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropAll) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({-1, -1, -1, -1});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(model.GetOutputData(), ElementsAre(5));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowFullWindow) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetWindowDimensions({3, 3});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(model.GetOutputData(), ElementsAre(45));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowNoDilation) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAre(12, 16, 24, 28));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowFullWindowWithDilation) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({2, 2});
model.SetInitValue(0);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(model.GetOutputData(), ElementsAre(20));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowWithDilation) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({4, 4});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({2, 2});
model.SetInitValue(0);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAre(24, 28, 40, 44));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowWithStrides) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({4, 4});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({2, 2});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAre(14, 22, 46, 54));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowWithDilationAndStrides) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({5, 5});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({2, 2});
model.SetWindowDilations({2, 2});
model.SetInitValue(2);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAre(30, 38, 70, 78));
}
TYPED_TEST(StablehloReduceWindowTest,
ReduceWindowOutputShapeRoundingIsCorrect) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({1, 64, 114, 114});
model.SetBaseDilations({1, 1, 1, 1});
model.SetPadding({0, 0, 0, 0, 0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({1, 1, 3, 3});
model.SetWindowStrides({1, 1, 2, 2});
model.SetWindowDilations({1, 1, 1, 1});
model.SetInitValue(2);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 64, 56, 56));
}
template <class T>
std::vector<T> RandomVector(absl::BitGen& bitgen, size_t size, T min, T max) {
std::vector<T> vec(size);
for (T& v : vec) {
v = absl::Uniform(absl::IntervalClosed, bitgen, min, max);
}
return vec;
}
struct Body {
static Body GetRandomSupported(absl::BitGen& bitgen, bool allow_mul) {
Body b;
b = Body{static_cast<BodyFunction>(absl::Uniform<int>(
absl::IntervalClosed, bitgen, static_cast<int>(BodyFunction::kAdd),
static_cast<int>(BodyFunction::kAny)))};
if (!allow_mul && b.func == BodyFunction::kMul) {
b.func = BodyFunction::kAdd;
}
return b;
}
template <class T>
T operator()(const T& a, const T& b) const noexcept {
switch (func) {
case BodyFunction::kUnset:
case BodyFunction::kUnsupported:
return -1;
case BodyFunction::kAdd:
return a + b;
case BodyFunction::kMul:
return a * b;
case BodyFunction::kMin:
return a <= b ? a : b;
case BodyFunction::kMax:
return a >= b ? a : b;
case BodyFunction::kAll:
return a && b;
case BodyFunction::kAny:
return a || b;
}
}
template <class T>
T init_value() const noexcept {
switch (func) {
case BodyFunction::kUnset:
case BodyFunction::kUnsupported:
return -1;
case BodyFunction::kAdd:
return 0;
case BodyFunction::kMul:
return 1;
case BodyFunction::kMin:
return std::numeric_limits<T>::max();
case BodyFunction::kMax:
return std::numeric_limits<T>::lowest();
case BodyFunction::kAll:
return true;
case BodyFunction::kAny:
return false;
}
}
BodyFunction func;
};
TYPED_TEST(StablehloReduceWindowTest, FuzzyTest) {
absl::BitGen bitgen;
for (size_t iteration = 0; iteration < 1000; ++iteration) {
const int rank = absl::Uniform(absl::IntervalClosed, bitgen, 1, 3);
ReduceWindowOpModel<TypeParam> model;
Body body = Body::GetRandomSupported(
bitgen, std::is_floating_point<TypeParam>::value);
model.SetInput(
RandomVector<int64_t>(bitgen, rank, 1, 10),
bitgen, -5, 5);
model.SetBaseDilations(
RandomVector<int64_t>(bitgen, rank, 1, 3));
model.SetPadding(
RandomVector<int64_t>(bitgen, 2 * rank, -5, 5));
model.SetWindowDimensions(
RandomVector<int64_t>(bitgen, rank, 1, 3));
model.SetWindowStrides(
RandomVector<int64_t>(bitgen, rank, 1, 3));
model.SetWindowDilations(
RandomVector<int64_t>(bitgen, rank, 1, 3));
model.SetInitValue(body.init_value<TypeParam>());
model.SetBody(body.func);
const std::vector<int64_t> padded_shape = reference::PadCropShape(
reference::DilateShape(model.GetInputShape(), model.GetBaseDilations()),
model.GetPadding());
if (absl::c_any_of(padded_shape, [](int64_t d) { return d <= 0; })) {
iteration = iteration > 1 ? iteration - 1 : 0;
continue;
}
const reference::Tensor<TypeParam> expected = reference::ReduceWindow(
reference::Tensor<TypeParam>{model.GetInputShape(),
model.GetInput()},
model.GetBaseDilations(), model.GetPadding(), model.GetInitValue(),
model.GetWindowDimensions(), model.GetWindowDilations(),
model.GetWindowStrides(), body);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape))
<< model;
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data))
<< model;
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_reduce_window.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_reduce_window_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5aa9fdf1-bc85-46d2-9996-2eebe164027e | cpp | tensorflow/tensorflow | broadcast_to | tensorflow/lite/kernels/broadcast_to.cc | tensorflow/lite/kernels/broadcast_to_test.cc | #include "tensorflow/lite/kernels/internal/reference/broadcast_to.h"
#include <string.h>
#include <cstdint>
#include <memory>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace broadcastto {
constexpr int kInputTensor = 0;
constexpr int kShapeTensor = 1;
constexpr int kOutputTensor = 0;
constexpr int kMaxDims = 8;
struct BroadcastToContext {
BroadcastToContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, kInputTensor);
shape = GetInput(context, node, kShapeTensor);
output = GetOutput(context, node, kOutputTensor);
}
const TfLiteTensor* input;
const TfLiteTensor* shape;
TfLiteTensor* output;
};
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
BroadcastToContext* op_context) {
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->shape), 1);
int input_num_dims = NumDimensions(op_context->input);
int output_num_dims = SizeOfDimension(op_context->shape, 0);
TF_LITE_ENSURE_MSG(context, input_num_dims <= output_num_dims,
"Output shape must be broadcastable from input shape.");
TF_LITE_ENSURE_MSG(context, output_num_dims <= kMaxDims,
"BroadcastTo only supports 1-8D tensor.");
auto get_shape_data = [op_context](int i) -> int32_t {
if (op_context->shape->type == kTfLiteInt32) {
return GetTensorData<int32_t>(op_context->shape)[i];
} else {
return GetTensorData<int64_t>(op_context->shape)[i];
}
};
int extending_dims = output_num_dims - input_num_dims;
for (int idx = 0; idx < input_num_dims; ++idx) {
TF_LITE_ENSURE_MSG(context,
(SizeOfDimension(op_context->input, idx) == 1 ||
SizeOfDimension(op_context->input, idx) ==
get_shape_data(extending_dims + idx)),
"Output shape must be broadcastable from input shape.");
}
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_num_dims);
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)>
scoped_output_shape(output_shape, TfLiteIntArrayFree);
for (int idx = 0; idx < output_num_dims; ++idx) {
output_shape->data[idx] = get_shape_data(idx);
}
return context->ResizeTensor(context, op_context->output,
scoped_output_shape.release());
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_MSG(context,
(NumDimensions(GetInput(context, node, 0)) <= kMaxDims),
"BroadcastTo only supports 1-8D tensor.");
BroadcastToContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.shape->type == kTfLiteInt32 ||
op_context.shape->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
TF_LITE_ENSURE(context, op_context.input->type != kTfLiteString);
if (IsConstantOrPersistentTensor(op_context.shape)) {
return ResizeOutputTensor(context, &op_context);
}
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
BroadcastToContext op_context(context, node);
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
reference_ops::BroadcastTo<kMaxDims>(
GetTensorShape(op_context.input), op_context.input->data.raw,
GetTensorShape(op_context.output), op_context.output->data.raw,
op_context.input->type);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BROADCAST_TO() {
static TfLiteRegistration r = {nullptr, nullptr, broadcastto::Prepare,
broadcastto::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <class InputType, class ShapeType = int32_t>
class BroadcastToOpModel : public SingleOpModel {
public:
BroadcastToOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> shape_shape) {
input_ = AddInput({GetTensorType<InputType>(), input_shape});
shape_ = AddInput({GetTensorType<ShapeType>(), shape_shape});
output_ = AddOutput(GetTensorType<InputType>());
SetBuiltinOp(BuiltinOperator_BROADCAST_TO,
BuiltinOptions_BroadcastToOptions,
CreateBroadcastToOptions(builder_).Union());
BuildInterpreter({input_shape, shape_shape});
}
BroadcastToOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> shape_shape,
std::initializer_list<ShapeType> shape_values) {
input_ = AddInput({GetTensorType<InputType>(), input_shape});
shape_ =
AddConstInput(GetTensorType<ShapeType>(), shape_values, shape_shape);
output_ = AddOutput(GetTensorType<InputType>());
SetBuiltinOp(BuiltinOperator_BROADCAST_TO,
BuiltinOptions_BroadcastToOptions,
CreateBroadcastToOptions(builder_).Union());
BuildInterpreter({input_shape, shape_shape});
}
void SetInput(std::initializer_list<InputType> data) {
PopulateTensor(input_, data);
}
void SetShape(std::initializer_list<ShapeType> data) {
PopulateTensor(shape_, data);
}
std::vector<InputType> GetOutput() {
return ExtractVector<InputType>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int shape_;
int output_;
};
template <typename T>
class BroadcastToOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<float, uint8_t, int8_t, int16_t, int32_t>;
TYPED_TEST_SUITE(BroadcastToOpTest, DataTypes);
#if GTEST_HAS_DEATH_TEST
TYPED_TEST(BroadcastToOpTest, ShapeMustBe1D) {
EXPECT_DEATH(
BroadcastToOpModel<TypeParam>({2, 3, 4, 4}, {2, 2}, {2, 3, 4, 4}), "");
BroadcastToOpModel<TypeParam> m({2, 3, 4, 4}, {2, 2});
m.SetShape({2, 3, 4, 4});
EXPECT_THAT(m.Invoke(), kTfLiteError);
}
TYPED_TEST(BroadcastToOpTest, TooManyDimensions) {
EXPECT_DEATH(BroadcastToOpModel<TypeParam>({1, 2, 3, 4, 5, 6, 7, 8, 9}, {9},
{2, 2, 3, 4, 5, 6, 7, 8, 9}),
"BroadcastTo only supports 1-8D tensor.");
EXPECT_DEATH(BroadcastToOpModel<TypeParam>({1, 2, 3, 4, 5, 6, 7, 8, 9}, {9}),
"BroadcastTo only supports 1-8D tensor.");
}
TYPED_TEST(BroadcastToOpTest, MismatchDimension) {
EXPECT_DEATH(BroadcastToOpModel<TypeParam>({2, 4, 1, 2}, {4}, {2, 4, 1, 3}),
"Output shape must be broadcastable from input shape.");
EXPECT_DEATH(
BroadcastToOpModel<TypeParam>({2, 4, 1, 2, 3}, {4}, {2, 4, 1, 2}),
"Output shape must be broadcastable from input shape.");
BroadcastToOpModel<TypeParam> m1({2, 4, 1, 2}, {4});
m1.SetShape({2, 3, 4, 4});
EXPECT_THAT(m1.Invoke(), kTfLiteError);
BroadcastToOpModel<TypeParam> m2({2, 4, 1, 2}, {5});
m2.SetShape({1, 2, 3, 4, 4});
EXPECT_THAT(m2.Invoke(), kTfLiteError);
}
#endif
TYPED_TEST(BroadcastToOpTest, BroadcastTo1DConstTest) {
BroadcastToOpModel<TypeParam> m({1}, {1}, {4});
m.SetInput({3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 3, 3}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo4DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 2}, {4}, {1, 1, 2, 2});
m.SetInput({3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 4, 3, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo8DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 1, 1, 1, 2, 1}, {8},
{1, 1, 1, 1, 1, 1, 2, 2});
m.SetInput({3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo1DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1}, {1});
m.SetInput({3});
m.SetShape({4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 3, 3}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo4DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 2}, {4});
m.SetInput({3, 4});
m.SetShape({1, 1, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 4, 3, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo8DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 1, 1, 1, 2, 1}, {8});
m.SetInput({3, 4});
m.SetShape({1, 1, 1, 1, 1, 1, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast4DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 3, 1, 2}, {4}, {3, 3, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 2, 2}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4,
3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast4DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 3, 1, 2}, {4});
m.SetInput({1, 2, 3, 4, 5, 6});
m.SetShape({3, 3, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 2, 2}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4,
3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast6DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 2, 1, 3, 1, 2}, {6}, {2, 2, 1, 3, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 1, 3, 2, 2}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12,
1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast6DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 2, 1, 3, 1, 2}, {6});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
m.SetShape({2, 2, 1, 3, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 1, 3, 2, 2}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12,
1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast8DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 3, 1, 2, 1, 4, 1, 1}, {8},
{2, 3, 1, 2, 2, 4, 1, 1});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 1, 2, 2, 4, 1, 1}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6,
7, 8, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16,
13, 14, 15, 16, 17, 18, 19, 20, 17, 18, 19, 20, 21, 22,
23, 24, 21, 22, 23, 24, 1, 2, 3, 4, 1, 2, 3, 4,
5, 6, 7, 8, 5, 6, 7, 8, 9, 10, 11, 12, 9, 10,
11, 12, 13, 14, 15, 16, 13, 14, 15, 16, 17, 18, 19, 20,
17, 18, 19, 20, 21, 22, 23, 24, 21, 22, 23, 24}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast8DDynamicTest) {
BroadcastToOpModel<TypeParam> m({2, 1, 1, 2, 1, 4, 1, 1}, {8});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetShape({2, 3, 2, 2, 2, 4, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 2, 2, 2, 4, 1, 1}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16}));
}
TYPED_TEST(BroadcastToOpTest, ExtendingShape4DConstTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 2}, {4}, {3, 3, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 2, 2}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4,
3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, NoBroadcastingConstTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 2}, {3}, {3, 1, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, NoBroadcasting8DConstTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 1, 1, 1, 1, 1, 2}, {8},
{3, 1, 1, 1, 1, 1, 1, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 1, 1, 1, 1, 1, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, Int64ShapeConstTest) {
BroadcastToOpModel<TypeParam, int64_t> m({1, 1, 1, 1, 1, 1, 2, 1}, {8},
{1, 1, 1, 1, 1, 1, 2, 2});
m.SetInput({3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, Int64ShapeDDynamicTest) {
BroadcastToOpModel<TypeParam, int64_t> m({1, 1, 1, 1, 1, 1, 2, 1}, {8});
m.SetInput({3, 4});
m.SetShape({1, 1, 1, 1, 1, 1, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastToEmtpyShapeTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 2}, {3}, {3, 0, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 0, 2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/broadcast_to.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/broadcast_to_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6907e617-e772-409b-9aa1-787ccb9693f8 | cpp | tensorflow/tensorflow | cumsum | tensorflow/lite/delegates/gpu/common/tasks/cumsum.cc | tensorflow/lite/delegates/gpu/cl/kernels/cumsum_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/cumsum.h"
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
namespace tflite {
namespace gpu {
void Cumsum::GetCumsumCode(const OperationDef& op_def) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
std::map<Axis, std::string> task_sizes = {
{Axis::WIDTH, "args.src_tensor.Width()"},
{Axis::HEIGHT, "args.src_tensor.Height()"},
{Axis::DEPTH, "args.src_tensor.Depth()"},
{Axis::CHANNELS, "args.src_tensor.Slices()"},
{Axis::BATCH, "args.src_tensor.Batch()"},
};
std::string limit = task_sizes[axis_];
task_sizes[axis_] = "1";
std::map<Axis, std::string> index_name = {
{Axis::WIDTH, "X"}, {Axis::HEIGHT, "Y"}, {Axis::DEPTH, "Z"},
{Axis::CHANNELS, "S"}, {Axis::BATCH, "B"},
};
std::string indexes = "X, Y";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (definition_.dst_tensors[0].HasAxis(Axis::DEPTH)) {
indexes += ", Z";
c += " int linear_id = GLOBAL_ID_1;\n";
c += " int Y = linear_id % " + task_sizes[Axis::HEIGHT] + ";\n";
c += " int D = linear_id / " + task_sizes[Axis::HEIGHT] + ";\n";
c += " if (D >= " + task_sizes[Axis::DEPTH] + ") return;\n";
} else {
c += " int Y = GLOBAL_ID_1;\n";
c += " if (Y >= " + task_sizes[Axis::HEIGHT] + ") return;\n";
}
indexes += ", S";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
indexes += ", B";
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / " + task_sizes[Axis::BATCH] + ";\n";
c += " int B = linear_id % " + task_sizes[Axis::BATCH] + ";\n";
c += " if (X >= " + task_sizes[Axis::WIDTH] + ") return;\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
c += " if (X >= " + task_sizes[Axis::WIDTH] + ") return;\n";
}
c += " int S = GLOBAL_ID_2;\n";
c += " if (S >= " + task_sizes[Axis::CHANNELS] + ") return;\n";
c += " args.src_tensor::type res = args.src_tensor::zero_value;\n";
c += " for (; " + index_name[axis_] + " < " + limit + "; " +
index_name[axis_] + "++) {\n";
c += " args.src_tensor::type curr = args.src_tensor.Read(" + indexes +
");\n";
if (axis_ == Axis::CHANNELS) {
c += " res.x = res.w + curr.x;\n";
c += " res.y = res.x + curr.y;\n";
c += " res.z = res.y + curr.z;\n";
c += " res.w = res.z + curr.w;\n";
} else {
c += " res += curr;\n";
}
c += " args.dst_tensor.Write(res, " + indexes + ");\n";
c += " }\n";
c += "}\n";
code_ = c;
}
int3 Cumsum::GetGridSize() const {
const int width = axis_ == Axis::WIDTH ? 1 : src_[0]->Width();
const int height = axis_ == Axis::HEIGHT ? 1 : src_[0]->Height();
const int depth = axis_ == Axis::DEPTH ? 1 : src_[0]->Depth();
const int batch = axis_ == Axis::BATCH ? 1 : src_[0]->Batch();
const int slices = axis_ == Axis::CHANNELS ? 1 : src_[0]->Slices();
const int grid_x = width * batch;
const int grid_y = height * depth;
const int grid_z = slices;
return int3(grid_x, grid_y, grid_z);
}
Cumsum::Cumsum(Cumsum&& operation)
: GPUOperation(std::move(operation)), axis_(operation.axis_) {}
Cumsum& Cumsum::operator=(Cumsum&& operation) {
if (this != &operation) {
axis_ = operation.axis_;
GPUOperation::operator=(std::move(operation));
}
return *this;
}
Cumsum CreateCumsum(const OperationDef& definition,
const CumsumAttributes& attr) {
Cumsum op(definition, attr.axis);
op.GetCumsumCode(definition);
return op;
}
}
} | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/cumsum_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, CumsumHWCTest) {
absl::Status status = CumsumHWCTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, CumsumBHWCTest) {
absl::Status status = CumsumBHWCTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/cumsum.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/cumsum_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5e55870b-179b-4c43-afcc-e62f77d5156c | cpp | tensorflow/tensorflow | embedding_lookup_sparse | tensorflow/lite/kernels/embedding_lookup_sparse.cc | tensorflow/lite/kernels/embedding_lookup_sparse_test.cc | #include <stdint.h>
#include <algorithm>
#include <cmath>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 5);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &ids));
TF_LITE_ENSURE_EQ(context, NumDimensions(ids), 1);
TF_LITE_ENSURE_EQ(context, ids->type, kTfLiteInt32);
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &indices));
TF_LITE_ENSURE_EQ(context, NumDimensions(indices), 2);
TF_LITE_ENSURE_EQ(context, indices->type, kTfLiteInt32);
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &shape));
TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1);
TF_LITE_ENSURE_EQ(context, shape->type, kTfLiteInt32);
const TfLiteTensor* weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 3, &weights));
TF_LITE_ENSURE_EQ(context, NumDimensions(weights), 1);
TF_LITE_ENSURE_EQ(context, weights->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0),
SizeOfDimension(ids, 0));
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0),
SizeOfDimension(weights, 0));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 4, &value));
TF_LITE_ENSURE(context, NumDimensions(value) >= 2);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
output->allocation_type = kTfLiteDynamic;
return kTfLiteOk;
}
void FinalizeAggregation(TfLiteCombinerType combiner, int num_elements,
float current_total_weight,
float current_squares_weight, int embedding_size,
float* output) {
if (combiner != kTfLiteCombinerTypeSum && num_elements > 0) {
float multiplier = 1.0;
switch (combiner) {
case kTfLiteCombinerTypeMean:
multiplier = current_total_weight;
break;
case kTfLiteCombinerTypeSqrtn:
multiplier = std::sqrt(current_squares_weight);
break;
default:
break;
}
for (int k = 0; k < embedding_size; k++) {
output[k] /= multiplier;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteEmbeddingLookupSparseParams*>(node->builtin_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &ids));
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &indices));
const TfLiteTensor* dense_shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &dense_shape));
const TfLiteTensor* weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 3, &weights));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 4, &value));
const size_t values_size = NumElements(value);
const int lookup_rank = SizeOfDimension(indices, 1);
const int embedding_rank = NumDimensions(value);
const int num_lookups = SizeOfDimension(ids, 0);
const int num_rows = SizeOfDimension(value, 0);
const int output_rank = (lookup_rank - 1) + (embedding_rank - 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(dense_shape, 0), lookup_rank);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);
TF_LITE_ENSURE(context, output_shape != nullptr);
int k = 0;
size_t embedding_size = 1;
size_t lookup_size = 1;
for (int i = 0; i < lookup_rank - 1; i++, k++) {
const size_t dim = dense_shape->data.i32[i];
TF_LITE_ENSURE_MSG(
context,
MultiplyAndCheckOverflow(lookup_size, dim, &lookup_size) == kTfLiteOk,
"Lookup size overflowed.");
output_shape->data[k] = dim;
}
for (int i = 1; i < embedding_rank; i++, k++) {
const size_t dim = SizeOfDimension(value, i);
TF_LITE_ENSURE_MSG(context,
MultiplyAndCheckOverflow(embedding_size, dim,
&embedding_size) == kTfLiteOk,
"Embedding size overflowed.");
output_shape->data[k] = dim;
}
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape));
const size_t output_size = lookup_size * embedding_size;
TfLiteTensorRealloc(output_size * sizeof(float), output);
float* output_ptr = GetTensorData<float>(output);
const float* weights_ptr = GetTensorData<float>(weights);
const float* value_ptr = GetTensorData<float>(value);
TF_LITE_ENSURE(context, output_ptr != nullptr);
std::fill_n(output_ptr, output_size, 0.0f);
int current_output_offset = 0;
float current_total_weight = 0.0;
float current_squares_weight = 0.0;
int num_elements = 0;
for (int i = 0; i < num_lookups; i++) {
int idx = ids->data.i32[i];
if (idx >= num_rows || idx < 0) {
TF_LITE_KERNEL_LOG(context,
"Embedding Lookup Sparse: index out of bounds. "
"Got %d, and bounds are [0, %d]",
idx, num_rows - 1);
return kTfLiteError;
}
const int example_indices_offset = i * lookup_rank;
int output_bucket = 0;
int stride = 1;
for (int k = (lookup_rank - 1) - 1; k >= 0; k--) {
output_bucket += indices->data.i32[example_indices_offset + k] * stride;
stride *= dense_shape->data.i32[k];
}
const int output_offset = output_bucket * embedding_size;
if (output_offset != current_output_offset) {
FinalizeAggregation(params->combiner, num_elements, current_total_weight,
current_squares_weight, embedding_size,
&output_ptr[current_output_offset]);
num_elements = 0;
current_total_weight = 0.0;
current_squares_weight = 0.0;
current_output_offset = output_offset;
}
++num_elements;
const int example_embedding_offset = idx * embedding_size;
const float w = weights_ptr[i];
current_squares_weight += w * w;
current_total_weight += w;
for (int k = 0; k < embedding_size; k++) {
if (current_output_offset + k < 0) continue;
if (current_output_offset + k >= output_size) continue;
if (example_embedding_offset + k < 0) continue;
if (example_embedding_offset + k >= values_size) continue;
output_ptr[current_output_offset + k] +=
value_ptr[example_embedding_offset + k] * w;
}
}
FinalizeAggregation(params->combiner, num_elements, current_total_weight,
current_squares_weight, embedding_size,
&GetTensorData<float>(output)[current_output_offset]);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_EMBEDDING_LOOKUP_SPARSE() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <cmath>
#include <functional>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class EmbeddingLookupSparseOpModel : public SingleOpModel {
public:
EmbeddingLookupSparseOpModel(CombinerType type,
std::initializer_list<int> lookup_shape,
std::initializer_list<int> indices_shape,
std::initializer_list<int> dense_shape_shape,
std::initializer_list<int> value_shape) {
lookup_ = AddInput(TensorType_INT32);
indices_ = AddInput(TensorType_INT32);
dense_shape_ = AddInput(TensorType_INT32);
weights_ = AddInput(TensorType_FLOAT32);
value_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
BuiltinOptions_EmbeddingLookupSparseOptions,
CreateEmbeddingLookupSparseOptions(builder_, type).Union());
BuildInterpreter({lookup_shape, indices_shape, dense_shape_shape,
lookup_shape, value_shape});
}
void SetInput(std::initializer_list<int> lookup_data,
std::initializer_list<int> indices_data,
std::initializer_list<int> dense_shape_data,
std::initializer_list<float> weights_data) {
PopulateTensor(lookup_, lookup_data);
PopulateTensor(indices_, indices_data);
PopulateTensor(dense_shape_, dense_shape_data);
PopulateTensor(weights_, weights_data);
}
void Set3DWeightMatrix(const std::function<float(int, int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
int columns = tensor->dims->data[1];
int features = tensor->dims->data[2];
float* tensor_ptr = GetTensorData<float>(tensor);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < columns; j++) {
for (int k = 0; k < features; k++) {
tensor_ptr[(i * columns + j) * features + k] = function(i, j, k);
}
}
}
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int lookup_;
int weights_;
int indices_;
int dense_shape_;
int value_;
int output_;
};
TEST(EmbeddingLookupSparseOpTest, SimpleTest) {
EmbeddingLookupSparseOpModel m(CombinerType_SUM, {3}, {3, 2}, {2}, {4, 3, 2});
m.SetInput({1, 3, 0}, {0, 0, 2, 0, 2, 1}, {3, 2}, {1.0, 2.0, 4.0});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.10, 1.11, 1.20, 1.21,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
6.00, 6.06, 6.60, 6.66, 7.20, 7.26,
})));
}
TEST(EmbeddingLookupSparseOpTest, SimpleTestMean) {
EmbeddingLookupSparseOpModel m(CombinerType_MEAN, {3}, {3, 2}, {2},
{4, 3, 2});
m.SetInput({1, 3, 0}, {0, 0, 2, 0, 2, 1}, {3, 2}, {1.0, 2.0, 4.0});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.10, 1.11, 1.20, 1.21,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1.00, 1.01, 1.10, 1.11, 1.20, 1.21,
})));
}
TEST(EmbeddingLookupSparseOpTest, SimpleTestSqrtn) {
EmbeddingLookupSparseOpModel m(CombinerType_SQRTN, {3}, {3, 2}, {2},
{4, 3, 2});
m.SetInput({1, 3, 0}, {0, 0, 2, 0, 2, 1}, {3, 2}, {1.0, 2.0, 4.0});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.10, 1.11, 1.20, 1.21,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
6.00f / std::sqrt(20.0f), 6.06f / std::sqrt(20.0f),
6.60f / std::sqrt(20.0f), 6.66f / std::sqrt(20.0f),
7.20f / std::sqrt(20.0f),
7.26f / std::sqrt(20.0f),
})));
}
TEST(EmbeddingLookupSparseOpTest, Indices3DTest) {
EmbeddingLookupSparseOpModel m(CombinerType_SUM, {3}, {3, 3}, {3}, {4, 3, 2});
m.SetInput({1, 3, 0}, {0, 0, 0, 2, 0, 0, 2, 0, 1}, {3, 2, 2},
{1.0, 2.0, 4.0});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.10, 1.11, 1.20, 1.21, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 6.00, 6.06, 6.60,
6.66, 7.20, 7.26, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/embedding_lookup_sparse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/embedding_lookup_sparse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28040aa5-8181-4e52-8669-91ca1a3372ac | cpp | tensorflow/tensorflow | skip_gram | tensorflow/lite/kernels/skip_gram.cc | tensorflow/lite/kernels/skip_gram_test.cc | #include <ctype.h>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
TF_LITE_ENSURE_TYPES_EQ(context, input_tensor->type, kTfLiteString);
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
TF_LITE_ENSURE_TYPES_EQ(context, output_tensor->type, kTfLiteString);
return kTfLiteOk;
}
bool ShouldIncludeCurrentNgram(const TfLiteSkipGramParams* params, int size) {
if (size <= 0) {
return false;
}
if (params->include_all_ngrams) {
return size <= params->ngram_size;
} else {
return size == params->ngram_size;
}
}
bool ShouldStepInRecursion(const TfLiteSkipGramParams* params,
const std::vector<int>& stack, int stack_idx,
int num_words) {
if (stack_idx < params->ngram_size && stack[stack_idx] + 1 < num_words) {
if (stack_idx == 0) {
return true;
}
if (stack[stack_idx] - stack[stack_idx - 1] <= params->max_skip_size) {
return true;
}
}
return false;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSkipGramParams*>(node->builtin_data);
std::vector<StringRef> words;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
tflite::StringRef strref = tflite::GetString(input, 0);
int prev_idx = 0;
for (size_t i = 1; i < strref.len; i++) {
if (isspace(*(strref.str + i))) {
if (i > prev_idx && !isspace(*(strref.str + prev_idx))) {
words.push_back({strref.str + prev_idx, i - prev_idx});
}
prev_idx = i + 1;
}
}
if (strref.len > prev_idx) {
words.push_back({strref.str + prev_idx, strref.len - prev_idx});
}
tflite::DynamicBuffer buf;
if (words.size() < params->ngram_size) {
buf.WriteToTensorAsVector(GetOutput(context, node, 0));
return kTfLiteOk;
}
std::vector<int> stack(params->ngram_size, 0);
int stack_idx = 1;
int num_words = words.size();
while (stack_idx >= 0) {
if (ShouldStepInRecursion(params, stack, stack_idx, num_words)) {
stack[stack_idx]++;
stack_idx++;
if (stack_idx < params->ngram_size) {
stack[stack_idx] = stack[stack_idx - 1];
}
} else {
if (ShouldIncludeCurrentNgram(params, stack_idx)) {
std::vector<StringRef> gram(stack_idx);
for (int i = 0; i < stack_idx; i++) {
gram[i] = words[stack[i]];
}
buf.AddJoinedString(gram, ' ');
}
stack_idx--;
}
}
buf.WriteToTensorAsVector(GetOutput(context, node, 0));
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SKIP_GRAM() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
static char kSentence[] = "The quick\t brown fox\n jumps over\n the lazy dog!";
class SkipGramOp : public SingleOpModel {
public:
SkipGramOp(int ngram_size, int max_skip_size, bool include_all_ngrams) {
input_ = AddInput(TensorType_STRING);
output_ = AddOutput(TensorType_STRING);
SetBuiltinOp(BuiltinOperator_SKIP_GRAM, BuiltinOptions_SkipGramOptions,
CreateSkipGramOptions(builder_, ngram_size, max_skip_size,
include_all_ngrams)
.Union());
BuildInterpreter({{1}});
}
void SetInput(const string& content) {
PopulateStringTensor(input_, {content});
}
std::vector<string> GetOutput() {
std::vector<string> ans;
TfLiteTensor* tensor = interpreter_->tensor(output_);
int num = GetStringCount(tensor);
for (int i = 0; i < num; i++) {
StringRef strref = GetString(tensor, i);
ans.push_back(string(strref.str, strref.len));
}
return ans;
}
private:
int input_;
int output_;
};
TEST(SkipGramTest, TestUnigram) {
SkipGramOp m(1, 0, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), testing::UnorderedElementsAreArray(
{"The", "quick", "brown", "fox", "jumps",
"over", "the", "lazy", "dog!"}));
}
TEST(SkipGramTest, TestBigram) {
SkipGramOp m(2, 0, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick", "quick brown", "brown fox", "fox jumps",
"jumps over", "over the", "the lazy", "lazy dog!"}));
}
TEST(SkipGramTest, TestAllBigram) {
SkipGramOp m(2, 0, true);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{
"The", "quick", "brown", "fox", "jumps", "over", "the",
"lazy", "dog!",
"The quick", "quick brown", "brown fox", "fox jumps",
"jumps over", "over the", "the lazy", "lazy dog!"}));
}
TEST(SkipGramTest, TestAllTrigram) {
SkipGramOp m(3, 0, true);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{
"The", "quick", "brown", "fox", "jumps", "over", "the",
"lazy", "dog!",
"The quick", "quick brown", "brown fox", "fox jumps",
"jumps over", "over the", "the lazy", "lazy dog!",
"The quick brown", "quick brown fox", "brown fox jumps",
"fox jumps over", "jumps over the", "over the lazy",
"the lazy dog!"}));
}
TEST(SkipGramTest, TestSkip1Bigram) {
SkipGramOp m(2, 1, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick", "The brown", "quick brown", "quick fox", "brown fox",
"brown jumps", "fox jumps", "fox over", "jumps over", "jumps the",
"over the", "over lazy", "the lazy", "the dog!", "lazy dog!"}));
}
TEST(SkipGramTest, TestSkip2Bigram) {
SkipGramOp m(2, 2, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick", "The brown", "The fox", "quick brown",
"quick fox", "quick jumps", "brown fox", "brown jumps",
"brown over", "fox jumps", "fox over", "fox the",
"jumps over", "jumps the", "jumps lazy", "over the",
"over lazy", "over dog!", "the lazy", "the dog!",
"lazy dog!"}));
}
TEST(SkipGramTest, TestSkip1Trigram) {
SkipGramOp m(3, 1, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick brown", "The quick fox", "The brown fox",
"The brown jumps", "quick brown fox", "quick brown jumps",
"quick fox jumps", "quick fox over", "brown fox jumps",
"brown fox over", "brown jumps over", "brown jumps the",
"fox jumps over", "fox jumps the", "fox over the",
"fox over lazy", "jumps over the", "jumps over lazy",
"jumps the lazy", "jumps the dog!", "over the lazy",
"over the dog!", "over lazy dog!", "the lazy dog!"}));
}
TEST(SkipGramTest, TestSkip2Trigram) {
SkipGramOp m(3, 2, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick brown", "The quick fox", "The quick jumps",
"The brown fox", "The brown jumps", "The brown over",
"The fox jumps", "The fox over", "The fox the",
"quick brown fox", "quick brown jumps", "quick brown over",
"quick fox jumps", "quick fox over", "quick fox the",
"quick jumps over", "quick jumps the", "quick jumps lazy",
"brown fox jumps", "brown fox over", "brown fox the",
"brown jumps over", "brown jumps the", "brown jumps lazy",
"brown over the", "brown over lazy", "brown over dog!",
"fox jumps over", "fox jumps the", "fox jumps lazy",
"fox over the", "fox over lazy", "fox over dog!",
"fox the lazy", "fox the dog!", "jumps over the",
"jumps over lazy", "jumps over dog!", "jumps the lazy",
"jumps the dog!", "jumps lazy dog!", "over the lazy",
"over the dog!", "over lazy dog!", "the lazy dog!"}));
}
TEST(SkipGramTest, TestAllSkip2Trigram) {
SkipGramOp m(3, 2, true);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
testing::UnorderedElementsAreArray(
{
"The", "quick", "brown", "fox", "jumps", "over", "the", "lazy",
"dog!",
"The quick", "The brown", "The fox", "quick brown", "quick fox",
"quick jumps", "brown fox", "brown jumps", "brown over", "fox jumps",
"fox over", "fox the", "jumps over", "jumps the", "jumps lazy",
"over the", "over lazy", "over dog!", "the lazy", "the dog!",
"lazy dog!",
"The quick brown", "The quick fox", "The quick jumps",
"The brown fox", "The brown jumps", "The brown over",
"The fox jumps", "The fox over", "The fox the", "quick brown fox",
"quick brown jumps", "quick brown over", "quick fox jumps",
"quick fox over", "quick fox the", "quick jumps over",
"quick jumps the", "quick jumps lazy", "brown fox jumps",
"brown fox over", "brown fox the", "brown jumps over",
"brown jumps the", "brown jumps lazy", "brown over the",
"brown over lazy", "brown over dog!", "fox jumps over",
"fox jumps the", "fox jumps lazy", "fox over the", "fox over lazy",
"fox over dog!", "fox the lazy", "fox the dog!", "jumps over the",
"jumps over lazy", "jumps over dog!", "jumps the lazy",
"jumps the dog!", "jumps lazy dog!", "over the lazy",
"over the dog!", "over lazy dog!", "the lazy dog!"}));
}
TEST(SkipGramTest, TestSingleWord) {
SkipGramOp m(1, 1, false);
m.SetInput("Hi");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre("Hi"));
}
TEST(SkipGramTest, TestWordsLessThanGram) {
SkipGramOp m(3, 1, false);
m.SetInput("Hi hi");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), std::vector<string>());
}
TEST(SkipGramTest, TestEmptyInput) {
SkipGramOp m(1, 1, false);
m.SetInput("");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre());
}
TEST(SkipGramTest, TestWhitespaceInput) {
SkipGramOp m(1, 1, false);
m.SetInput(" ");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre());
}
TEST(SkipGramTest, TestInputWithExtraSpace) {
SkipGramOp m(1, 1, false);
m.SetInput(" Hello world ! ");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre("Hello", "world", "!"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/skip_gram.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/skip_gram_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be17faa3-3c5c-43a2-8fd7-e2e54af789ce | cpp | tensorflow/tensorflow | conv | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/conv.cc | tensorflow/lite/delegates/hexagon/builders/tests/conv_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/conv.h"
#include <cstdint>
#include <string>
#include <tuple>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/conv_util.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/op_util_common.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
using ::llvm::ArrayRef;
bool IsShapeFullyStatic(ArrayRef<int64_t> shape) {
return llvm::all_of(shape, [](int64_t d) { return d >= 0; });
}
bool NonBatchDimsFullyStatic(ArrayRef<int64_t> shape) {
return IsShapeFullyStatic(shape.drop_front());
}
bool AreShapesFullyStatic(const ConvView& data) {
return IsShapeFullyStatic(data.InputShape()) &&
IsShapeFullyStatic(data.KernelShape()) &&
IsShapeFullyStatic(data.OutputShape());
}
bool InputOutputNonBatchDimsFullyStatic(const ConvView& data) {
return NonBatchDimsFullyStatic(data.InputShape()) &&
IsShapeFullyStatic(data.KernelShape()) &&
NonBatchDimsFullyStatic(data.OutputShape());
}
bool IsPaddingSupported(const ConvView& data) {
return llvm::all_of(data.Padding(), [](const DimPadding& p) {
return p.Hi() == 0 && p.Lo() == 0;
});
}
bool IsInputDilationSupported(const ConvView& data) {
return llvm::all_of(data.InputDilations(), [](int64_t v) { return v == 1; });
}
bool IsBatchGroupSupported(const ConvView& data) {
return data.BatchGroupCount() == 1;
}
bool IsWindowReversalSupported(const ConvView& data) {
return llvm::all_of(data.WindowReversal(), [](bool b) { return !b; });
}
bool IsConvLegal(mhlo::ConvolutionOp op) {
const ConvView data(op);
const bool supported_conv_type = IsStandardConv(data) ||
IsDepthwiseConv(data) ||
IsSupportedNonTrivialConv(data);
const bool is_non_supported_trivial_conv =
(!IsSupportedNonTrivialConv(data) &&
(!IsPaddingSupported(data) || !IsInputDilationSupported(data)));
const bool are_shapes_supported =
((IsStandardConv(data) || IsDepthwiseConv(data)) &&
InputOutputNonBatchDimsFullyStatic(data)) ||
AreShapesFullyStatic(data);
return !supported_conv_type || !IsBatchGroupSupported(data) ||
!are_shapes_supported || !IsTFLNativeLayout(data) ||
is_non_supported_trivial_conv || !IsWindowReversalSupported(data);
}
arith::ConstantOp BuildEmptyBias(OpBuilder& b, Location loc,
const ConvView& data) {
auto bias_type = RankedTensorType::get(
{data.OutputLayout().SpecialDim2(data.OutputShape())},
data.ElementType());
auto bias_const_data = b.getZeroAttr(bias_type);
return b.create<arith::ConstantOp>(loc, bias_const_data);
}
class LegalizeConv2D : public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeConv2D::matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(op);
if (IsConvLegal(op) || !IsStandardConv(data) ||
data.InputLayout().Rank() != 4) {
return failure();
}
const auto& kernel_dilations = data.KernelDilations();
auto tfl_h_dilation = rewriter.getI32IntegerAttr(kernel_dilations[0]);
auto tfl_w_dilation = rewriter.getI32IntegerAttr(kernel_dilations[1]);
const auto& window_strides = data.Strides();
auto tfl_h_stride = rewriter.getI32IntegerAttr(window_strides[0]);
auto tfl_w_stride = rewriter.getI32IntegerAttr(window_strides[1]);
auto tfl_padding = rewriter.getStringAttr("VALID");
auto bias = BuildEmptyBias(rewriter, op->getLoc(), data);
auto tfl_faf_none = rewriter.getStringAttr("NONE");
rewriter.replaceOpWithNewOp<TFL::Conv2DOp>(
op, op.getResult().getType(), op.getLhs(), op.getRhs(), bias,
tfl_h_dilation, tfl_w_dilation, tfl_faf_none, tfl_padding, tfl_h_stride,
tfl_w_stride);
return success();
}
class LegalizeConvDepthwise : public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeConvDepthwise::matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(op);
if (IsConvLegal(op) || !IsDepthwiseConv(data)) {
return failure();
}
const auto& kernel_dilations = data.KernelDilations();
auto tfl_h_dilation = rewriter.getI32IntegerAttr(kernel_dilations[0]);
auto tfl_w_dilation = rewriter.getI32IntegerAttr(kernel_dilations[1]);
const auto& window_strides = data.Strides();
auto tfl_h_stride = rewriter.getI32IntegerAttr(window_strides[0]);
auto tfl_w_stride = rewriter.getI32IntegerAttr(window_strides[1]);
auto tfl_padding = rewriter.getStringAttr("VALID");
const int64_t out_channels =
data.OutputLayout().SpecialDim2(data.OutputShape());
const int64_t in_channels = data.InputLayout().SpecialDim2(data.InputShape());
const int32_t depth_multiplier = out_channels / in_channels;
auto depth_multipler_attr = rewriter.getI32IntegerAttr(depth_multiplier);
auto bias = BuildEmptyBias(rewriter, op->getLoc(), data);
auto tfl_faf_none = rewriter.getStringAttr("NONE");
rewriter.replaceOpWithNewOp<TFL::DepthwiseConv2DOp>(
op, op.getResult().getType(), op.getLhs(), op.getRhs(), bias,
tfl_h_dilation, tfl_w_dilation, tfl_faf_none, tfl_padding, tfl_h_stride,
tfl_w_stride, depth_multipler_attr);
return success();
}
class LegalizeConv3D : public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeConv3D::matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(op);
if (IsConvLegal(op) || !IsStandardConv(data) ||
data.InputLayout().Rank() != 5) {
return failure();
}
const auto& kernel_dilations = data.KernelDilations();
auto tfl_d_dilation = rewriter.getI32IntegerAttr(kernel_dilations[0]);
auto tfl_h_dilation = rewriter.getI32IntegerAttr(kernel_dilations[1]);
auto tfl_w_dilation = rewriter.getI32IntegerAttr(kernel_dilations[2]);
const auto& window_strides = data.Strides();
auto tfl_d_stride = rewriter.getI32IntegerAttr(window_strides[0]);
auto tfl_h_stride = rewriter.getI32IntegerAttr(window_strides[1]);
auto tfl_w_stride = rewriter.getI32IntegerAttr(window_strides[2]);
auto tfl_padding = rewriter.getStringAttr("VALID");
auto bias = BuildEmptyBias(rewriter, op->getLoc(), data);
auto tfl_faf_none = rewriter.getStringAttr("NONE");
rewriter.replaceOpWithNewOp<TFL::Conv3DOp>(
op, op.getResult().getType(), op.getLhs(), op.getRhs(), bias,
tfl_d_dilation, tfl_h_dilation, tfl_w_dilation, tfl_faf_none, tfl_padding,
tfl_d_stride, tfl_h_stride, tfl_w_stride);
return success();
}
class ConvertNonTrivialConvToResizeBilinearOp
: public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult ConvertNonTrivialConvToResizeBilinearOp::matchAndRewrite(
mhlo::ConvolutionOp conv_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(conv_op);
bool align_corners;
if (!MatchWithResizeBilinearOp(data, align_corners)) {
return rewriter.notifyMatchFailure(
conv_op, "op does not match with resize_bilinear op");
}
SmallVector<int32_t, 4> output_shape_i32;
for (int64_t spatial_dim : data.InputLayout().Spatials()) {
output_shape_i32.push_back(
static_cast<int32_t>(data.OutputShape()[spatial_dim]));
}
Value output_sizes_attr = rewriter.create<mlir::arith::ConstantOp>(
conv_op.getLoc(), rewriter.getI32TensorAttr(output_shape_i32));
rewriter.replaceOpWithNewOp<TFL::ResizeBilinearOp>(
conv_op, conv_op.getType(), conv_op.getLhs(), output_sizes_attr,
rewriter.getBoolAttr(align_corners),
rewriter.getBoolAttr(false));
return success();
}
class ConvertNonTrivialConvToTransposeConvOp
: public OpConversionPattern<mhlo::ConvolutionOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult ConvertNonTrivialConvToTransposeConvOp::matchAndRewrite(
mhlo::ConvolutionOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const ConvView data(op);
if (!IsSupportedNonTrivialConv(data)) {
return rewriter.notifyMatchFailure(op, "Not a non-trivial convolution.");
}
if (op.getFeatureGroupCount() != 1) {
return rewriter.notifyMatchFailure(
op, "group or depthwise convolution is not supported");
}
auto strides = data.InputDilations();
auto tfl_h_stride = rewriter.getI32IntegerAttr(strides[0]);
auto tfl_w_stride = rewriter.getI32IntegerAttr(strides[1]);
std::string padding;
SmallVector<int64_t, 4> padding_array;
for (auto& padding : data.Padding()) {
padding_array.push_back(padding.Lo());
padding_array.push_back(padding.Hi());
}
if (IsTransposeConvPaddingValid(op, 2, strides,
padding_array)) {
padding = "VALID";
} else if (IsTransposeConvPaddingSame(op, 2, strides,
padding_array)) {
padding = "SAME";
} else {
return rewriter.notifyMatchFailure(op,
"requires padding to be SAME or VALID");
}
auto bias = BuildEmptyBias(rewriter, op->getLoc(), data);
auto tfl_faf_none = rewriter.getStringAttr("NONE");
SmallVector<int32_t> kernel_spatial_dims_i32(
data.KernelLayout().Spatials().begin(),
data.KernelLayout().Spatials().end());
Value axis = rewriter.create<arith::ConstantOp>(
op.getLoc(), rewriter.getI32TensorAttr(kernel_spatial_dims_i32));
auto filter = rewriter.create<TFL::ReverseV2Op>(
op.getLoc(), op.getRhs().getType(), op.getRhs(), axis);
SmallVector<int32_t, 4> output_shape_i32(data.OutputShape().begin(),
data.OutputShape().end());
auto output_sizes = rewriter.create<arith::ConstantOp>(
op.getLoc(), rewriter.getI32TensorAttr(output_shape_i32));
rewriter.replaceOpWithNewOp<TFL::TransposeConvOp>(
op, op.getResult().getType(), output_sizes,
filter, op.getLhs(), bias,
rewriter.getStringAttr(padding),
tfl_h_stride, tfl_w_stride,
tfl_faf_none);
return success();
}
class SliceDepthwiseTransposedConvolution
: public OpRewritePattern<mhlo::ConvolutionOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::ConvolutionOp op,
PatternRewriter& rewriter) const final;
};
LogicalResult SliceDepthwiseTransposedConvolution::matchAndRewrite(
mhlo::ConvolutionOp conv_op, PatternRewriter& rewriter) const {
const ConvView data(conv_op);
if (!IsSupportedNonTrivialConv(data)) {
return rewriter.notifyMatchFailure(conv_op,
"Not a non-trivial convolution.");
}
mhlo::ConvDimensionNumbersAttr dnums = conv_op.getDimensionNumbers();
const int64_t input_feature_dimension = dnums.getInputFeatureDimension();
const int64_t input_channels =
mlir::cast<ShapedType>(conv_op.getLhs().getType())
.getDimSize(input_feature_dimension);
const int64_t feature_group_count = conv_op.getFeatureGroupCount();
const int64_t kernel_input_feature_dimension =
dnums.getKernelInputFeatureDimension();
const int64_t kernel_input_channels =
mlir::cast<ShapedType>(conv_op.getRhs().getType())
.getDimSize(kernel_input_feature_dimension);
const int64_t kernel_output_feature_dimension =
dnums.getKernelOutputFeatureDimension();
const int64_t kernel_output_channels =
mlir::cast<ShapedType>(conv_op.getRhs().getType())
.getDimSize(kernel_output_feature_dimension);
if (feature_group_count == 1) {
return rewriter.notifyMatchFailure(conv_op, "Not a depthwise convolution");
}
if (input_channels != feature_group_count) {
return rewriter.notifyMatchFailure(
conv_op, "Not a detphwise transposed convolution");
}
if (MatchWithResizeBilinearOp(data)) {
return rewriter.notifyMatchFailure(
conv_op, "Op will be legalized to ResizeBilinearOp");
}
if ((kernel_output_channels % feature_group_count != 0) ||
(kernel_input_channels != 1)) {
return rewriter.notifyMatchFailure(
conv_op, "Not a supported detphwise transposed convolution");
}
if ((kernel_output_channels / feature_group_count) != 1) {
return rewriter.notifyMatchFailure(
conv_op,
"Unsupported detphwise transpose convolution with non-1 channel "
"multiplier");
}
auto create_slice = [&](mlir::Value tensor, int64_t depth_idx,
int64_t channel_idx,
bool is_kernel = false) -> mlir::Value {
auto tensor_shape =
mlir::cast<ShapedType>(tensor.getType()).getShape().vec();
llvm::SmallVector<int64_t> start_indices(tensor_shape.size(), 0);
auto limit_indices = tensor_shape;
const llvm::SmallVector<int64_t> strides(tensor_shape.size(), 1);
start_indices[channel_idx] = depth_idx;
if (is_kernel) {
limit_indices[channel_idx] =
depth_idx + (kernel_output_channels / feature_group_count);
} else {
limit_indices[channel_idx] = depth_idx + 1;
}
return rewriter.create<mhlo::SliceOp>(
conv_op.getLoc(), tensor, rewriter.getI64TensorAttr(start_indices),
rewriter.getI64TensorAttr(limit_indices),
rewriter.getI64TensorAttr(strides));
};
llvm::SmallVector<mlir::Value> conv_results;
for (int i = 0; i < feature_group_count; ++i) {
auto sliced_input =
create_slice(conv_op.getLhs(), i, input_feature_dimension);
auto sliced_kernel = create_slice(conv_op.getRhs(), i,
kernel_output_feature_dimension, true);
auto output_type = mlir::cast<ShapedType>(conv_op->getResult(0).getType());
auto new_output_shape = output_type.getShape().vec();
new_output_shape[dnums.getOutputFeatureDimension()] /= feature_group_count;
auto new_output_type =
RankedTensorType::get(new_output_shape, output_type.getElementType());
auto conv_result = rewriter.create<mhlo::ConvolutionOp>(
conv_op.getLoc(), new_output_type, sliced_input, sliced_kernel,
conv_op.getWindowStridesAttr(), conv_op.getPaddingAttr(),
conv_op.getLhsDilationAttr(), conv_op.getRhsDilationAttr(),
conv_op.getWindowReversalAttr(), conv_op.getDimensionNumbers(),
1, 1,
conv_op.getPrecisionConfigAttr());
conv_results.push_back(conv_result);
}
auto final_output = rewriter.create<mhlo::ConcatenateOp>(
conv_op.getLoc(), conv_results,
rewriter.getI64IntegerAttr(dnums.getOutputFeatureDimension()));
rewriter.replaceOp(conv_op, final_output.getResult());
return success();
}
class Conv1DToConv2D : public OpRewritePattern<mhlo::ConvolutionOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::ConvolutionOp op,
PatternRewriter& rewriter) const final;
};
arith::ConstantOp ShapeToConst(PatternRewriter& rewriter,
ArrayRef<int64_t> shape, Location loc) {
auto attr_type = RankedTensorType::get({static_cast<int64_t>(shape.size())},
rewriter.getIntegerType(32));
auto casted_shape = llvm::map_range(shape, [](auto i64) -> int32_t {
return (i64 < 0) ? -1 : static_cast<int32_t>(i64);
});
auto attr =
DenseIntElementsAttr::get(attr_type, llvm::to_vector(casted_shape));
return rewriter.create<arith::ConstantOp>(loc, attr_type, attr);
}
std::tuple<llvm::SmallVector<int64_t>, Layout> InsertTrivialSpatialDim(
const Layout& layout, ArrayRef<int64_t> shape) {
const int64_t last_spatial = layout.Spatials()[layout.Rank() - 3];
const int64_t new_dim1 = (layout.SpecialDim1() > last_spatial)
? layout.SpecialDim1() + 1
: layout.SpecialDim1();
const int64_t new_dim2 = (layout.SpecialDim2() > last_spatial)
? layout.SpecialDim2() + 1
: layout.SpecialDim2();
llvm::SmallVector<int64_t> new_spatials(layout.Spatials());
const int64_t new_last_spatial = new_spatials.back() + 1;
new_spatials.push_back(new_last_spatial);
llvm::SmallVector<int64_t, 4> new_shape(shape.size() + 1, 1);
new_shape[new_dim1] = layout.SpecialDim1(shape);
new_shape[new_dim2] = layout.SpecialDim2(shape);
for (auto new_spatial : new_spatials) {
if (new_spatial == new_last_spatial) {
continue;
}
new_shape[new_spatial] = shape[new_spatial];
}
return std::tuple(new_shape, Layout(new_dim1, new_dim2, new_spatials));
}
LogicalResult Conv1DToConv2D::matchAndRewrite(mhlo::ConvolutionOp op,
PatternRewriter& rewriter) const {
const ConvView view(op);
if (view.InputLayout().Rank() != 3) {
return rewriter.notifyMatchFailure(op, "Not 1D conv.");
}
if (!IsInputDilationSupported(view)) {
return rewriter.notifyMatchFailure(op, "Expects trivial lhs dims.");
}
if (!InputOutputNonBatchDimsFullyStatic(view)) {
return rewriter.notifyMatchFailure(op, "Expects static dims.");
}
if (!IsWindowReversalSupported(view)) {
return rewriter.notifyMatchFailure(op, "Expects window reversal trivial.");
}
if (!view.InputLayout().AreSpatialsIota() ||
!view.KernelLayout().AreSpatialsIota() ||
!view.OutputLayout().AreSpatialsIota()) {
return rewriter.notifyMatchFailure(op,
"Expects well formed spatials dims.");
}
auto [lhs_new_shape, lhs_new_layout] =
InsertTrivialSpatialDim(view.InputLayout(), view.InputShape());
auto lhs_new_type = op.getLhs().getType().clone(lhs_new_shape);
auto new_lhs = rewriter.create<TFL::ReshapeOp>(
op.getLoc(), lhs_new_type, op.getLhs(),
ShapeToConst(rewriter, lhs_new_shape, op.getLoc()));
auto [rhs_new_shape, rhs_new_layout] =
InsertTrivialSpatialDim(view.KernelLayout(), view.KernelShape());
auto rhs_new_type = op.getRhs().getType().clone(rhs_new_shape);
auto new_rhs =
rewriter.create<mhlo::ReshapeOp>(op.getLoc(), rhs_new_type, op.getRhs());
auto [out_new_shape, out_new_layout] =
InsertTrivialSpatialDim(view.OutputLayout(), view.OutputShape());
auto out_new_type = op.getResult().getType().clone(out_new_shape);
llvm::SmallVector<int64_t, 2> strides_2d;
strides_2d.push_back(view.Strides()[0]);
strides_2d.push_back(1);
auto strides_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2}, rewriter.getI64Type()), strides_2d);
SmallVector<int64_t, 4> padding_2d;
const auto& dim_pad = view.Padding()[0];
padding_2d.push_back(dim_pad.Lo());
padding_2d.push_back(dim_pad.Hi());
padding_2d.push_back(0);
padding_2d.push_back(0);
auto padding_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2, 2}, rewriter.getI64Type()), padding_2d);
SmallVector<int64_t, 2> lhs_dilation_2d(2, 1);
auto lhs_dilation_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2}, rewriter.getI64Type()), lhs_dilation_2d);
SmallVector<int64_t, 2> rhs_dilation_2d;
rhs_dilation_2d.push_back(view.KernelDilations()[0]);
rhs_dilation_2d.push_back(1);
auto rhs_dilation_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2}, rewriter.getI64Type()), rhs_dilation_2d);
auto window_reversal_2d_attr = DenseIntElementsAttr::get(
RankedTensorType::get({2}, rewriter.getIntegerType(1)),
SmallVector<bool>({false, false}));
auto dnums_2d = mhlo::ConvDimensionNumbersAttr::get(
rewriter.getContext(), lhs_new_layout.SpecialDim1(),
lhs_new_layout.SpecialDim2(), lhs_new_layout.Spatials(),
rhs_new_layout.SpecialDim1(), rhs_new_layout.SpecialDim2(),
rhs_new_layout.Spatials(), out_new_layout.SpecialDim1(),
out_new_layout.SpecialDim2(), out_new_layout.Spatials());
auto conv2d_op = rewriter.create<mhlo::ConvolutionOp>(
op.getLoc(), out_new_type, new_lhs, new_rhs, strides_2d_attr,
padding_2d_attr, lhs_dilation_2d_attr, rhs_dilation_2d_attr,
window_reversal_2d_attr, dnums_2d, op.getFeatureGroupCount(),
op.getBatchGroupCount(), op.getPrecisionConfigAttr());
auto new_out_type = op.getResult().getType();
rewriter.replaceOpWithNewOp<TFL::ReshapeOp>(
op, new_out_type, conv2d_op.getResult(),
ShapeToConst(rewriter, new_out_type.getShape(), op.getLoc()));
return success();
}
}
void PopulateLegalizeConvPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeConv2D, LegalizeConv3D, LegalizeConvDepthwise,
ConvertNonTrivialConvToResizeBilinearOp,
ConvertNonTrivialConvToTransposeConvOp>(ctx);
target.addDynamicallyLegalOp<mhlo::ConvolutionOp>(IsConvLegal);
}
void PopulatePrepareConvPatterns(MLIRContext* ctx,
RewritePatternSet& patterns) {
patterns.add<Conv1DToConv2D, SliceDepthwiseTransposedConvolution>(ctx);
}
} | #include <initializer_list>
#include <numeric>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/internal/test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
int NumElements(const std::vector<int>& dims) {
return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>());
}
class QuantizedConvolutionOpModel : public SingleOpModelWithHexagon {
public:
QuantizedConvolutionOpModel(BuiltinOperator type, const TensorData& input,
const TensorData& filter,
const TensorData& output, Padding padding_type,
int dilation_factor = 1, int stride_length = 1,
ActivationFunctionType fused_activation_function =
ActivationFunctionType_NONE) {
input_ = AddInput(input);
filter_ = AddInput(filter);
int bias_size = GetShape(filter_)[0];
if (type == BuiltinOperator_DEPTHWISE_CONV_2D) {
bias_size = GetShape(filter_)[3];
}
if (filter.per_channel_quantization) {
std::vector<float> bias_scale(
filter.per_channel_quantization_scales.size());
std::vector<int64_t> bias_zero_points(
filter.per_channel_quantization_scales.size());
for (size_t i = 0; i < filter.per_channel_quantization_scales.size();
++i) {
bias_scale[i] = input.scale * filter.per_channel_quantization_scales[i];
bias_zero_points[i] = 0;
}
TensorData bias{TensorType_INT32,
{bias_size},
0,
0,
0,
0,
true,
bias_scale,
bias_zero_points,
0};
bias_ = AddInput(bias);
} else {
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
output_ = AddOutput(output);
if (type == BuiltinOperator_DEPTHWISE_CONV_2D) {
int input_depth = GetShape(input_)[3];
int output_depth = GetShape(filter_)[3];
int depth_mul = output_depth / input_depth;
SetBuiltinOp(
BuiltinOperator_DEPTHWISE_CONV_2D,
BuiltinOptions_DepthwiseConv2DOptions,
CreateDepthwiseConv2DOptions(
builder_, padding_type, stride_length, stride_length, depth_mul,
fused_activation_function, dilation_factor, dilation_factor)
.Union());
} else {
SetBuiltinOp(BuiltinOperator_CONV_2D, BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(builder_, padding_type, stride_length,
stride_length, fused_activation_function,
dilation_factor, dilation_factor)
.Union());
}
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)});
auto* filter_tensor = interpreter_->tensor(filter_);
filter_tensor->allocation_type = kTfLiteMmapRo;
}
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
void SetFilter(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(filter_, data);
}
void SetBias(std::initializer_list<float> data) {
QuantizeAndPopulate<int>(bias_, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
void SetInt8Input(std::initializer_list<float> data) {
QuantizeAndPopulate<int8_t>(input_, data);
}
void SetInt8Input(const std::vector<float>& data) {
QuantizeAndPopulate<int8_t>(input_, data);
}
void SetPerChannelQuantizedFilter(std::initializer_list<float> data) {
PerChannelSymmetricQuantizeAndPopulate(filter_, data);
}
void SetPerChannelQuantizedFilter(const std::vector<float>& data) {
PerChannelSymmetricQuantizeAndPopulate(filter_, data);
}
void SetPerChannelQuantizedBias(std::initializer_list<float> data) {
PerChannelQuantizeBias(bias_, data);
}
void SetPerChannelQuantizedBias(const std::vector<float>& data) {
PerChannelQuantizeBias(bias_, data);
}
protected:
int input_;
int filter_;
int bias_;
int output_;
};
TEST(QuantizedConvolutionOpModel, SimpleConvTestNoActivation) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D, {TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
{TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128}, Padding_VALID, 1,
2);
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
18, 2, 5,
18, 2, 5,
17, 4, 3,
37, 4, 3,
},
1e-5)));
}
TEST(QuantizedConvolutionOpModel, SimpleConvTestReLU6Activation) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D, {TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
{TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128}, Padding_VALID, 1,
2, ActivationFunctionType_RELU6);
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
6, 2, 5,
6, 2, 5,
6, 4, 3,
6, 4, 3,
},
1e-5)));
}
TEST(QuantizedConvolutionOpModel,
SimpleConvTestReLU6Activation_NoRequantizeRequired) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D, {TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
{TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64}, {TensorType_UINT8, {}, 0, 6},
Padding_VALID, 1,
2, ActivationFunctionType_RELU6);
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
6, 2, 5,
6, 2, 5,
6, 4, 3,
6, 4, 3,
},
2e-2)));
}
TEST(QuantizedConvolutionOpModel, SimplePerTensor_Int8) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D,
{TensorType_INT8, {1, 2, 3, 2}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{2, 2, 2, 2},
0,
0,
0,
0,
true,
{1},
{0},
0},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
3, 2,
1, -1,
-2, -3,
4, 3,
2, -2,
-3, -4,
});
m.SetPerChannelQuantizedFilter(
{
1, 2,
3, 4,
3, 4,
5, 6,
7, 8,
5, 6,
3, 4,
1, 2,
});
m.SetPerChannelQuantizedBias({3, -2});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({31, 56, -57, -44}, 1e-5)));
}
TEST(QuantizedConvolutionOpModel, SimplePerChannel_Int8) {
QuantizedConvolutionOpModel m(
BuiltinOperator_CONV_2D,
{TensorType_INT8, {1, 2, 3, 2}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{2, 2, 2, 2},
0,
0,
0,
0,
true,
{1, 2},
{0, 0},
0},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
3, 2,
1, -1,
-2, -3,
4, 3,
2, -2,
-3, -4,
});
m.SetPerChannelQuantizedFilter(
{
1, 2,
3, 4,
3, 4,
5, 6,
7, 8,
5, 6,
3, 4,
1, 2,
});
m.SetPerChannelQuantizedBias({3, -2});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({31, 64, -57, -46}, 0.6f)));
}
TEST(QuantizedConvolutionOpModel, SimpleDilatedDepthwiseConvTestPaddingValid) {
const int depth = 1;
const int image_width = 9;
const int image_height = 9;
const int image_batch_count = 1;
const int filter_size = 3;
const int filter_count = 1;
const int dilation_factor = 3;
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_UINT8,
{image_batch_count, image_height, image_width, depth},
0,
255},
{TensorType_UINT8,
{depth, filter_size, filter_size, filter_count},
0,
255},
{TensorType_UINT8, {}, 0, 255}, Padding_VALID, dilation_factor);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5, 6, 7, 8, 9});
m.SetBias({0});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5, 5}));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConv5x5) {
QuantizedConvolutionOpModel m(BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_UINT8, {1, 6, 6, 2}, -63.5, 64},
{TensorType_UINT8, {1, 5, 5, 2}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128},
Padding_VALID);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2,
3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4,
5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, 1e-5)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvWithMultiplier_InputDepth1) {
QuantizedConvolutionOpModel m(BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_UINT8, {1, 6, 6, 1}, -63.5, 64},
{TensorType_UINT8, {1, 5, 5, 3}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128},
Padding_VALID);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, 1e-5)));
}
TEST(QuantizedConvolutionOpModel,
DepthwiseConvWithMultiplier_InputDepth1_RELU) {
QuantizedConvolutionOpModel m(BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_UINT8, {1, 6, 6, 1}, -63.5, 64},
{TensorType_UINT8, {1, 5, 5, 3}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128},
Padding_VALID, 1,
2, ActivationFunctionType_RELU6);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
6, 7, 8, 9, 10,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5,
1, 2, 3, 4, 5});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, 1e-5)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvSimplePerTensor_Int8) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 2, 3, 1}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 2, 2, 4},
0,
0,
0,
0,
true,
{1},
{0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
3,
1,
-2,
4,
2,
-3,
});
m.SetPerChannelQuantizedFilter({
1, 2, 3, 4,
3, 4, 5, 6,
7, 8, 5, 6,
3, 4, 1, 2,
});
m.SetPerChannelQuantizedBias({3, -2, 4, 6});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({43, 48, 40, 52, 3, -4, 4, 4}, 0.6f)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvSimplePerTensor_Int8_RELU1) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 2, 3, 1}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 2, 2, 4},
0,
0,
0,
0,
true,
{0.1, 2, 3, 0.4},
{0, 0, 0, 0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID,
1,
1, ActivationFunctionType_RELU_N1_TO_1);
m.SetInt8Input({
3,
1,
-2,
4,
2,
-4,
});
m.SetPerChannelQuantizedFilter({
1, 2, 3, 4,
3, 4, 5, 6,
7, 8, 5, 6,
3, 4, 1, 2,
});
m.SetPerChannelQuantizedBias({3, -2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<int8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, 1e-2)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvSimplePerAxis_Int8) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 2, 3, 1}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 2, 2, 4},
0,
0,
0,
0,
true,
{0.1, 2, 3, 0.4},
{0, 0, 0, 0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
3,
1,
-2,
4,
2,
-4,
});
m.SetPerChannelQuantizedFilter({
1, 2, 3, 4,
3, 4, 5, 6,
7, 8, 5, 6,
3, 4, 1, 2,
});
m.SetPerChannelQuantizedBias({3, -2, 4, 6});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({43, 48, 42, 52, 0, -8, 6, 2}, 0.6f)));
}
TEST(QuantizedConvolutionOpModel, DepthwiseConvPerChannel_3x3Filter) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 3, 3, 8}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 3, 3, 8},
0,
0,
0,
0,
true,
{0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1},
{0, 0, 0, 0, 0, 0, 0, 0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_VALID);
m.SetInt8Input({
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0});
m.SetPerChannelQuantizedFilter(
{
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8});
m.SetPerChannelQuantizedBias({0, 0, 0, 0, 0, 0, 0, 0});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({9, 18, 0, 0, 47, 54, 0, 0}, 0.6f)));
}
TEST(QuantizedConvolutionOpModel,
DepthwiseConvPerChannel_3x3FilterPaddingSame) {
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 3, 3, 8}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{1, 3, 3, 8},
0,
0,
0,
0,
true,
{0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1},
{0, 0, 0, 0, 0, 0, 0, 0},
3},
{TensorType_INT8, {}, -63.5, 64, 0.5, -1}, Padding_SAME);
m.SetInt8Input({
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0});
m.SetPerChannelQuantizedFilter(
{
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8});
m.SetPerChannelQuantizedBias({0, 0, 0, 0, 0, 0, 0, 0});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear(
{
4, 8, 0, 0, 21, 24, 0, 0, 6, 12, 0, 0, 31.5, 36, 0, 0,
4, 8, 0, 0, 21, 24, 0, 0, 6, 12, 0, 0, 31.5, 36, 0, 0,
9, 18, 0, 0, 47, 54, 0, 0, 6, 12, 0, 0, 31.5, 36, 0, 0,
4, 8, 0, 0, 21, 24, 0, 0, 6, 12, 0, 0, 31.5, 36, 0, 0,
4, 8, 0, 0, 21, 24, 0, 0,
},
0.6f)));
}
TEST(QuantizedConvolutionOpModel,
DepthwiseConvPerChannel_5x5Filt2x2Stride64Chan) {
std::vector<float> per_channel_quantization_scales = {
0.00053629, 0.00052256, 0.00051463, 0.00050993, 0.00050885, 0.00052403,
0.00053925, 0.00053854, 0.00053962, 0.00048332, 0.00053551, 0.00052817,
0.00052771, 0.00051854, 0.00053823, 0.000531, 0.000521, 0.00053908,
0.00053849, 0.0005063, 0.00052631, 0.00050862, 0.00050484, 0.00053353,
0.0005352, 0.00051084, 0.00052429, 0.00052653, 0.00051875, 0.0005391,
0.00050941, 0.00053934, 0.00049698, 0.00050956, 0.00053204, 0.00051116,
0.00052303, 0.00053624, 0.00053452, 0.00050418, 0.00048261, 0.00053418,
0.00053058, 0.0005359, 0.0005324, 0.00053648, 0.00053957, 0.00052388,
0.00053638, 0.00052164, 0.00052303, 0.00053624, 0.00053452, 0.00050418,
0.00048261, 0.00053418, 0.00053058, 0.0005359, 0.0005324, 0.00053648,
0.00053957, 0.00052388, 0.00053638, 0.00052164};
std::vector<int64_t> per_channel_quantization_offsets(64, 0);
QuantizedConvolutionOpModel m(
BuiltinOperator_DEPTHWISE_CONV_2D,
{TensorType_INT8, {1, 5, 5, 64}, 0, 0, 1.8942945003509521, -6},
{TensorType_INT8,
{1, 5, 5, 64},
0,
0,
0,
0,
true,
per_channel_quantization_scales,
per_channel_quantization_offsets,
3},
{TensorType_INT8, {}, 0, 0, 0.2960677146911621, 7}, Padding_VALID,
1,
2);
std::vector<float> inputs;
std::vector<float> filter;
for (auto i = 0; i < 5 * 5 * 64; i++) {
inputs.push_back(UniformRandomFloat(-248, 234));
filter.push_back(UniformRandomFloat(-0.06, 0.06));
}
m.SetInt8Input(inputs);
m.SetPerChannelQuantizedFilter(filter);
std::vector<float> bias(64);
m.SetPerChannelQuantizedBias(bias);
m.Invoke();
auto interpreter_result = m.GetDequantizedOutput<int8_t>();
m.ApplyDelegateAndInvoke();
auto delegate_result = m.GetDequantizedOutput<int8_t>();
EXPECT_THAT(delegate_result,
ElementsAreArray(ArrayFloatNear(interpreter_result, 0.6f)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/conv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/conv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05bd909d-de74-41fe-9f9a-bc1cf9746d43 | cpp | tensorflow/tensorflow | depthwise_conv | tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.cc | tensorflow/lite/delegates/gpu/cl/kernels/depthwise_conv_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
#include "tensorflow/lite/delegates/gpu/gl/workgroups/ideal_workgroup_picker.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class DepthwiseConvolution : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"DepthWise Convolution does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const DepthwiseConvolution2DAttributes&>(ctx.op_attr);
auto weights = attr.weights.shape;
const int offsets_count = weights.h * weights.w;
const bool offsets_count_too_large = offsets_count > kMaxConstArraySize;
std::vector<Variable> parameters;
if (offsets_count_too_large) {
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"padding_w", attr.padding.prepended.w},
{"padding_h", attr.padding.prepended.h},
{"dilation_w", attr.dilations.w},
{"dilation_h", attr.dilations.h},
{"kernel_w", weights.w},
{"kernel_h", weights.h},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"channel_multiplier", weights.o},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
} else {
std::vector<int2> offsets;
for (int h = 0; h < weights.h; ++h) {
for (int w = 0; w < weights.w; ++w) {
offsets.emplace_back(w * attr.dilations.w - attr.padding.prepended.w,
h * attr.dilations.h - attr.padding.prepended.h);
}
}
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"offsets_count", offsets_count},
{"offsets", offsets},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"channel_multiplier", weights.o},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
}
bool non_empty_padding =
attr.padding.appended.h != 0 || attr.padding.appended.w != 0 ||
attr.padding.prepended.h != 0 || attr.padding.prepended.w != 0;
std::vector<std::pair<std::string, Object>> objects = {
{"weights", MakeReadonlyObject(ConvertToPIOHW4(attr.weights))}};
std::string source;
if (offsets_count_too_large) {
source = R"(
int offsets_count = $kernel_w$ * $kernel_h$;
int src_layer_offset = (gid.z % $channel_multiplier$) * 4;
int i = 0;
for (int ky = 0; ky < $kernel_h$; ky++) {
for (int kx = 0; kx < $kernel_w$; kx++, i++) {
ivec2 coord = gid.xy * $stride$ + ivec2(kx * $dilation_w$ - $padding_w$, ky * $dilation_h$ - $padding_h$);)";
} else {
source = R"(
int offsets_count = $offsets_count$;
int src_layer_offset = (gid.z % $channel_multiplier$) * 4;
for (int i = 0; i < offsets_count; ++i) {
ivec2 coord = gid.xy * $stride$ + $offsets[i]$;)";
}
if (non_empty_padding) {
source += R"(
if (coord.x < 0 || coord.y < 0 ||
coord.x >= $input_data_0_w$ || coord.y >= $input_data_0_h$) {
continue;
})";
}
source += R"(
int src_layer = gid.z / $channel_multiplier$;
vec4 input_ = $input_data_0[coord.x, coord.y, src_layer]$;
vec4 input_shifted = vec4(
input_[(src_layer_offset + 0) / $channel_multiplier$],
input_[(src_layer_offset + 1) / $channel_multiplier$],
input_[(src_layer_offset + 2) / $channel_multiplier$],
input_[(src_layer_offset + 3) / $channel_multiplier$]
);
value_0 += input_shifted * $weights[gid.z * offsets_count + i]$;
}
)";
if (offsets_count_too_large) {
source += R"(
}
)";
}
if (!attr.bias.data.empty()) {
source += "value_0 += $bias[gid.z]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
GetIdealWorkgroupIfPossible(
*ctx.gpu_info, OperationType::DEPTHWISE_CONVOLUTION,
HW(attr.weights.shape.h, attr.weights.shape.w), attr.strides,
OHWI(attr.weights.shape.o, ctx.input_shapes[0][1],
ctx.input_shapes[0][2], ctx.input_shapes[0][3])),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewDepthwiseConvolutionNodeShader() {
return std::make_unique<DepthwiseConvolution>();
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, DepthwiseConvSimpleWeights) {
auto status = DepthwiseConvSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, DepthwiseConvNoMultiplier) {
auto status = DepthwiseConvNoMultiplierTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, DepthwiseConvMultiplier2) {
auto status = DepthwiseConvMultiplier2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/depthwise_conv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7e81246b-0365-4530-a3d8-790f88c0fd3b | cpp | tensorflow/tensorflow | unsorted_segment | tensorflow/lite/kernels/unsorted_segment.cc | tensorflow/lite/kernels/unsorted_segment_test.cc | #include <stdint.h>
#include <algorithm>
#include <functional>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace unsorted_segment {
enum SegmentType {
kSegmentMax,
kSegmentMin,
kSegmentProd,
kSegmentSum,
};
static const int kInputDataTensor = 0;
static const int kInputSegmentIdsTensor = 1;
static const int kInputNumSegmentsTensor = 2;
static const int kOutputTensor = 0;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* data,
const TfLiteTensor* segment_ids,
const TfLiteTensor* num_segments,
TfLiteTensor* output) {
const int segment_ids_rank = NumDimensions(segment_ids);
const int data_rank = NumDimensions(data);
TF_LITE_ENSURE(context, segment_ids_rank <= data_rank);
for (int i = 0; i < segment_ids_rank; ++i) {
TF_LITE_ENSURE_EQ(context, segment_ids->dims->data[i], data->dims->data[i]);
}
TF_LITE_ENSURE(context, (num_segments->dims->size == 1 &&
num_segments->dims->data[0] == 1) ||
num_segments->dims->size == 0);
int32_t num_segments_ = GetTensorData<int32_t>(num_segments)[0];
const int num_segment_ids = NumElements(segment_ids);
int max_index = -1;
for (int i = 0; i < num_segment_ids; i++) {
max_index = std::max(GetTensorData<int32_t>(segment_ids)[i], max_index);
}
TF_LITE_ENSURE(context, max_index < num_segments_);
const int output_rank = data_rank - segment_ids_rank + 1;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);
output_shape->data[0] = num_segments_;
for (int i = segment_ids_rank; i < data_rank; ++i) {
output_shape->data[i - segment_ids_rank + 1] = data->dims->data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputDataTensor, &data));
const TfLiteTensor* segment_ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputSegmentIdsTensor,
&segment_ids));
const TfLiteTensor* num_segments;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kInputNumSegmentsTensor, &num_segments));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context,
data->type == kTfLiteInt32 || data->type == kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, segment_ids->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, num_segments->type, kTfLiteInt32);
if (IsDynamicTensor(data) || !IsConstantOrPersistentTensor(segment_ids) ||
!IsConstantOrPersistentTensor(num_segments)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, data, segment_ids, num_segments, output);
}
template <typename T>
struct SegmenMax {
inline T operator()(const T& a, const T& b) const { return std::max(a, b); }
static constexpr T kInitialValue = std::numeric_limits<T>::lowest();
};
template <typename T>
struct SegmenMin {
inline T operator()(const T& a, const T& b) const { return std::min(a, b); }
static constexpr T kInitialValue = std::numeric_limits<T>::max();
};
template <typename T>
struct SegmenProd {
inline T operator()(const T& a, const T& b) const { return a * b; }
static constexpr T kInitialValue = T(1);
};
template <typename T>
struct SegmenSum {
inline T operator()(const T& a, const T& b) const { return a + b; }
static constexpr T kInitialValue = T(0);
};
template <typename T>
TfLiteStatus EvalType(TfLiteContext* context, const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& segment_ids_shape,
const int32_t* segment_ids_data,
const RuntimeShape& output_shape, T* output_data,
SegmentType segment_type) {
switch (segment_type) {
case kSegmentProd:
reference_ops::UnsortedSegmentRef<T, SegmenProd>(
input_shape, input_data, segment_ids_shape, segment_ids_data,
output_shape, output_data);
break;
case kSegmentMax:
reference_ops::UnsortedSegmentRef<T, SegmenMax>(
input_shape, input_data, segment_ids_shape, segment_ids_data,
output_shape, output_data);
break;
case kSegmentSum:
reference_ops::UnsortedSegmentRef<T, SegmenSum>(
input_shape, input_data, segment_ids_shape, segment_ids_data,
output_shape, output_data);
break;
case kSegmentMin:
reference_ops::UnsortedSegmentRef<T, SegmenMin>(
input_shape, input_data, segment_ids_shape, segment_ids_data,
output_shape, output_data);
break;
default:
TF_LITE_KERNEL_LOG(context, "Not recognized segment type: %d",
segment_type);
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus EvalGeneric(TfLiteContext* context, TfLiteNode* node,
SegmentType segment_type) {
const TfLiteTensor* data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputDataTensor, &data));
const TfLiteTensor* segment_ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputSegmentIdsTensor,
&segment_ids));
const TfLiteTensor* num_segments;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kInputNumSegmentsTensor, &num_segments));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, data, segment_ids,
num_segments, output));
}
TF_LITE_ENSURE_EQ(context, GetTensorShape(data).Dims(0),
GetTensorShape(segment_ids).Dims(0));
#define TF_LITE_UNSORTED_SEGMENT(dtype) \
EvalType<dtype>(context, GetTensorShape(data), GetTensorData<dtype>(data), \
GetTensorShape(segment_ids), \
GetTensorData<int32_t>(segment_ids), GetTensorShape(output), \
GetTensorData<dtype>(output), segment_type);
switch (data->type) {
case kTfLiteInt32:
TF_LITE_UNSORTED_SEGMENT(int32_t);
break;
case kTfLiteFloat32:
TF_LITE_UNSORTED_SEGMENT(float);
break;
default:
TF_LITE_KERNEL_LOG(
context, "Currently UnsortedSegment doesn't support data type: %s",
TfLiteTypeGetName(data->type));
return kTfLiteError;
}
#undef TF_LITE_UNSORTED_SEGMENT
return kTfLiteOk;
}
TfLiteStatus EvalProd(TfLiteContext* context, TfLiteNode* node) {
return EvalGeneric(context, node, kSegmentProd);
}
TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) {
return EvalGeneric(context, node, kSegmentMax);
}
TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) {
return EvalGeneric(context, node, kSegmentSum);
}
TfLiteStatus EvalMin(TfLiteContext* context, TfLiteNode* node) {
return EvalGeneric(context, node, kSegmentMin);
}
}
TfLiteRegistration* Register_UNSORTED_SEGMENT_PROD() {
static TfLiteRegistration r = {nullptr, nullptr, unsorted_segment::Prepare,
unsorted_segment::EvalProd};
return &r;
}
TfLiteRegistration* Register_UNSORTED_SEGMENT_MAX() {
static TfLiteRegistration r = {nullptr, nullptr, unsorted_segment::Prepare,
unsorted_segment::EvalMax};
return &r;
}
TfLiteRegistration* Register_UNSORTED_SEGMENT_SUM() {
static TfLiteRegistration r = {nullptr, nullptr, unsorted_segment::Prepare,
unsorted_segment::EvalSum};
return &r;
}
TfLiteRegistration* Register_UNSORTED_SEGMENT_MIN() {
static TfLiteRegistration r = {nullptr, nullptr, unsorted_segment::Prepare,
unsorted_segment::EvalMin};
return &r;
}
}
}
} | #include "tensorflow/lite/kernels/unsorted_segment_test.h"
#include <limits.h>
#include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
TEST_P(UnsortedSegmentTest, SegmentIdsSizeNotEqualToDataFirstDimensionFails) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {3, 2}}, {TensorType_INT32, {2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 1});
model.PopulateTensor<int32_t>(model.num_segments(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST_P(UnsortedSegmentTest,
LargestSegmentIdPlusOneGreaterThanNumSegmentsFails) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2}}, {TensorType_INT32, {2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 1});
model.PopulateTensor<int32_t>(model.num_segments(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST_P(UnsortedSegmentTest, NumSegmentsNotScalarShapeFails) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {3, 2}}, {TensorType_INT32, {3}},
{TensorType_INT32, {2}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 1, 0});
model.PopulateTensor<int32_t>(model.num_segments(), {2, 1});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST_P(UnsortedSegmentTest, Rank2SegIdsNotPrefixFails) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2, 1}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {1, 1});
model.PopulateTensor<int32_t>(model.num_segments(), {3});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST_P(UnsortedSegmentTest, Rank2SegIdsHasShapeNumSegDataShapeSuffix) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {1, 2, 0, 8});
model.PopulateTensor<int32_t>(model.num_segments(), {10});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({10, 2}));
}
TEST_P(UnsortedSegmentTest, Rank2SegIdsHasShapeNumSegDataShapeSuffixConst) {
UnsortedSegmentModel<int32_t> model = getConstModel(
{TensorType_INT32, {2, 2, 2}}, {1, 2, -1, -1}, {2, 2}, {3}, {1});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({3, 2}));
}
TEST_P(UnsortedSegmentTest, SegIdsHasSameShapeAsData2d) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2}}, {TensorType_INT32, {2, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 1, 5, 2, 4});
model.PopulateTensor<int32_t>(model.num_segments(), {10});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({10}));
}
TEST_P(UnsortedSegmentTest, SegIdsHasSameShapeAsData2dConst) {
UnsortedSegmentModel<int32_t> model =
getConstModel({TensorType_INT32, {2, 2}}, {1, 1, 1, 1}, {2, 2}, {3}, {1});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({3}));
}
TEST_P(UnsortedSegmentTest, SegIdsHasSameShapeAsData3d) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2, 2, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6, 7, 8});
model.PopulateTensor<int32_t>(model.segment_ids(), {1, 2, 3, 4, 5, 6, 7, 8});
model.PopulateTensor<int32_t>(model.num_segments(), {10});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({10}));
}
TEST_P(UnsortedSegmentTest, SegIdsHasSameShapeAsData3dConst) {
UnsortedSegmentModel<int32_t> model =
getConstModel({TensorType_INT32, {2, 2, 2}}, {0, 1, 2, -1, 3, -1, 4, -1},
{2, 2, 2}, {8}, {1});
model.PopulateTensor<int32_t>(model.data(), {1, 1, 1, 1, 1, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({8}));
}
TEST_P(UnsortedSegmentTest, Data5dHasShapeNumSegDataShapeSuffix) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 1, 2, 1, 2}}, {TensorType_INT32, {2, 1}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6, 7, 8});
model.PopulateTensor(model.segment_ids(), {0, 1});
model.PopulateTensor(model.num_segments(), {10});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({10, 2, 1, 2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unsorted_segment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unsorted_segment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a4c9281-b303-45c5-9292-6c8a9210ff7f | cpp | tensorflow/tensorflow | right_shift | tensorflow/lite/kernels/right_shift.cc | tensorflow/lite/kernels/right_shift_test.cc | #include <climits>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace right_shift {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast = false;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input1->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
T RightShift(T x, T y) {
T y_clamped = y;
if (y_clamped < 0) {
y_clamped = 0;
} else if (y_clamped > sizeof(T) * CHAR_BIT - 1) {
y_clamped = sizeof(T) * CHAR_BIT - 1;
}
return x >> y_clamped;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteType type = output->type;
switch (type) {
case kTfLiteUInt8: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<uint8_t, uint8_t, uint8_t>(
GetTensorShape(input1), GetTensorData<uint8_t>(input1),
GetTensorShape(input2), GetTensorData<uint8_t>(input2),
GetTensorShape(output), GetTensorData<uint8_t>(output), RightShift);
} else {
reference_ops::BinaryFunction<uint8_t, uint8_t, uint8_t>(
GetTensorShape(input1), GetTensorData<uint8_t>(input1),
GetTensorShape(input2), GetTensorData<uint8_t>(input2),
GetTensorShape(output), GetTensorData<uint8_t>(output), RightShift);
}
break;
}
case kTfLiteInt8: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int8_t, int8_t, int8_t>(
GetTensorShape(input1), GetTensorData<int8_t>(input1),
GetTensorShape(input2), GetTensorData<int8_t>(input2),
GetTensorShape(output), GetTensorData<int8_t>(output), RightShift);
} else {
reference_ops::BinaryFunction<int8_t, int8_t, int8_t>(
GetTensorShape(input1), GetTensorData<int8_t>(input1),
GetTensorShape(input2), GetTensorData<int8_t>(input2),
GetTensorShape(output), GetTensorData<int8_t>(output), RightShift);
}
break;
}
case kTfLiteUInt16: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<uint16_t, uint16_t,
uint16_t>(
GetTensorShape(input1), GetTensorData<uint16_t>(input1),
GetTensorShape(input2), GetTensorData<uint16_t>(input2),
GetTensorShape(output), GetTensorData<uint16_t>(output),
RightShift);
} else {
reference_ops::BinaryFunction<uint16_t, uint16_t, uint16_t>(
GetTensorShape(input1), GetTensorData<uint16_t>(input1),
GetTensorShape(input2), GetTensorData<uint16_t>(input2),
GetTensorShape(output), GetTensorData<uint16_t>(output),
RightShift);
}
break;
}
case kTfLiteInt16: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int16_t, int16_t, int16_t>(
GetTensorShape(input1), GetTensorData<int16_t>(input1),
GetTensorShape(input2), GetTensorData<int16_t>(input2),
GetTensorShape(output), GetTensorData<int16_t>(output), RightShift);
} else {
reference_ops::BinaryFunction<int16_t, int16_t, int16_t>(
GetTensorShape(input1), GetTensorData<int16_t>(input1),
GetTensorShape(input2), GetTensorData<int16_t>(input2),
GetTensorShape(output), GetTensorData<int16_t>(output), RightShift);
}
break;
}
case kTfLiteUInt32: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<uint32_t, uint32_t,
uint32_t>(
GetTensorShape(input1), GetTensorData<uint32_t>(input1),
GetTensorShape(input2), GetTensorData<uint32_t>(input2),
GetTensorShape(output), GetTensorData<uint32_t>(output),
RightShift);
} else {
reference_ops::BinaryFunction<uint32_t, uint32_t, uint32_t>(
GetTensorShape(input1), GetTensorData<uint32_t>(input1),
GetTensorShape(input2), GetTensorData<uint32_t>(input2),
GetTensorShape(output), GetTensorData<uint32_t>(output),
RightShift);
}
break;
}
case kTfLiteInt32: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int32_t, int32_t, int32_t>(
GetTensorShape(input1), GetTensorData<int32_t>(input1),
GetTensorShape(input2), GetTensorData<int32_t>(input2),
GetTensorShape(output), GetTensorData<int32_t>(output), RightShift);
} else {
reference_ops::BinaryFunction<int32_t, int32_t, int32_t>(
GetTensorShape(input1), GetTensorData<int32_t>(input1),
GetTensorShape(input2), GetTensorData<int32_t>(input2),
GetTensorShape(output), GetTensorData<int32_t>(output), RightShift);
}
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"RightShift currently only supports "
"8-bit/16-bit/32-bit integer/unsigned integer, got %s",
TfLiteTypeGetName(type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RIGHT_SHIFT() {
static TfLiteRegistration r = {right_shift::Init, right_shift::Free,
right_shift::Prepare, right_shift::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class RightShiftOpModel : public SingleOpModel {
public:
RightShiftOpModel(std::initializer_list<int> input1_shape,
std::initializer_list<int> input2_shape,
TensorType tensor_type) {
input1_ = AddInput(tensor_type);
input2_ = AddInput(tensor_type);
output_ = AddOutput(tensor_type);
SetBuiltinOp(BuiltinOperator_RIGHT_SHIFT, BuiltinOptions_RightShiftOptions,
CreateRightShiftOptions(builder_).Union());
BuildInterpreter({input1_shape, input2_shape});
}
int input1() const { return input1_; }
int input2() const { return input2_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(RightShiftOpTest, SimpleTestInt8) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT8);
model.PopulateTensor<int8_t>(model.input1(), {-1, -5, -3, -14});
model.PopulateTensor<int8_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int8_t>(), ElementsAreArray({-1, -5, -1, -1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestInt16) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16);
model.PopulateTensor<int16_t>(model.input1(), {-1, -5, -3, -14});
model.PopulateTensor<int16_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int16_t>(), ElementsAreArray({-1, -5, -1, -1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestInt32) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {-1, -5, -3, -14});
model.PopulateTensor<int32_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({-1, -5, -1, -1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestUInt8) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT8);
model.PopulateTensor<uint8_t>(model.input1(), {1, 5, 3, 14});
model.PopulateTensor<uint8_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint8_t>(), ElementsAreArray({0, 5, 0, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestUInt16) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT16);
model.PopulateTensor<uint16_t>(model.input1(), {1, 5, 3, 14});
model.PopulateTensor<uint16_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint16_t>(), ElementsAreArray({0, 5, 0, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestUInt32) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {1, 5, 3, 14});
model.PopulateTensor<uint32_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({0, 5, 0, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastRhsInt) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {-1, -5, -3, -14});
model.PopulateTensor<int32_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({-1, -2, -1, -4}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastLhsInt) {
RightShiftOpModel model({1, 1, 1, 1}, {1, 1, 1, 4}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {4});
model.PopulateTensor<int32_t>(model.input2(), {1, -2, 3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({2, 4, 0, 4}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastRhsUInt) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {5, 0, 7, 11});
model.PopulateTensor<uint32_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({1, 0, 1, 2}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastLhsUInt) {
RightShiftOpModel model({1, 1, 1, 1}, {1, 1, 1, 4}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {4});
model.PopulateTensor<uint32_t>(model.input2(), {1, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({2, 1, 0, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/right_shift.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/right_shift_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
86a41926-4ca1-4b47-996d-dc64e389821d | cpp | tensorflow/tensorflow | lsh_projection | tensorflow/lite/kernels/lsh_projection.cc | tensorflow/lite/kernels/lsh_projection_test.cc | #include <stddef.h>
#include <stdint.h>
#include <cstring>
#include <memory>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include <farmhash.h>
namespace tflite {
namespace ops {
namespace builtin {
namespace lsh_projection {
TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data);
TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* hash;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash));
TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2);
TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input));
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
TF_LITE_ENSURE(context, SizeOfDimension(input, 0) >= 1);
if (NumInputs(node) == 3) {
const TfLiteTensor* weight;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &weight));
TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0),
SizeOfDimension(input, 0));
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
switch (params->type) {
case kTfLiteLshProjectionSparse:
outputSize->data[0] = SizeOfDimension(hash, 0);
break;
case kTfLiteLshProjectionDense:
outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1);
break;
default:
return kTfLiteError;
}
return context->ResizeTensor(context, output, outputSize);
}
int RunningSignBit(const TfLiteTensor* input, const TfLiteTensor* weight,
float seed) {
double score = 0.0;
int input_item_bytes = input->bytes / SizeOfDimension(input, 0);
char* input_ptr = input->data.raw;
const size_t seed_size = sizeof(float);
const size_t key_bytes = sizeof(float) + input_item_bytes;
std::unique_ptr<char[]> key(new char[key_bytes]);
const float* weight_ptr = GetTensorData<float>(weight);
for (int i = 0; i < SizeOfDimension(input, 0); ++i) {
memcpy(key.get(), &seed, seed_size);
memcpy(key.get() + seed_size, input_ptr, input_item_bytes);
int64_t hash_signature = ::util::Fingerprint64(key.get(), key_bytes);
double running_value = static_cast<double>(hash_signature);
input_ptr += input_item_bytes;
if (weight_ptr == nullptr) {
score += running_value;
} else {
score += weight_ptr[i] * running_value;
}
}
return (score > 0) ? 1 : 0;
}
void SparseLshProjection(const TfLiteTensor* hash, const TfLiteTensor* input,
const TfLiteTensor* weight, int32_t* out_buf) {
int num_hash = SizeOfDimension(hash, 0);
int num_bits = SizeOfDimension(hash, 1);
for (int i = 0; i < num_hash; i++) {
int32_t hash_signature = 0;
for (int j = 0; j < num_bits; j++) {
float seed = GetTensorData<float>(hash)[i * num_bits + j];
int bit = RunningSignBit(input, weight, seed);
hash_signature = (hash_signature << 1) | bit;
}
*out_buf++ = hash_signature + i * (1 << num_bits);
}
}
void DenseLshProjection(const TfLiteTensor* hash, const TfLiteTensor* input,
const TfLiteTensor* weight, int32_t* out_buf) {
int num_hash = SizeOfDimension(hash, 0);
int num_bits = SizeOfDimension(hash, 1);
for (int i = 0; i < num_hash; i++) {
for (int j = 0; j < num_bits; j++) {
float seed = GetTensorData<float>(hash)[i * num_bits + j];
int bit = RunningSignBit(input, weight, seed);
*out_buf++ = bit;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data);
TfLiteTensor* out_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor));
int32_t* out_buf = out_tensor->data.i32;
const TfLiteTensor* hash;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input));
const TfLiteTensor* weight =
NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2);
switch (params->type) {
case kTfLiteLshProjectionDense:
DenseLshProjection(hash, input, weight, out_buf);
break;
case kTfLiteLshProjectionSparse:
SparseLshProjection(hash, input, weight, out_buf);
break;
default:
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LSH_PROJECTION() {
static TfLiteRegistration r = {nullptr, nullptr, lsh_projection::Resize,
lsh_projection::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class LSHProjectionOpModel : public SingleOpModel {
public:
LSHProjectionOpModel(LSHProjectionType type,
std::initializer_list<int> hash_shape,
std::initializer_list<int> input_shape,
std::initializer_list<int> weight_shape) {
hash_ = AddInput(TensorType_FLOAT32);
input_ = AddInput(TensorType_INT32);
if (weight_shape.size() > 0) {
weight_ = AddInput(TensorType_FLOAT32);
}
output_ = AddOutput(TensorType_INT32);
SetBuiltinOp(BuiltinOperator_LSH_PROJECTION,
BuiltinOptions_LSHProjectionOptions,
CreateLSHProjectionOptions(builder_, type).Union());
if (weight_shape.size() > 0) {
BuildInterpreter({hash_shape, input_shape, weight_shape});
} else {
BuildInterpreter({hash_shape, input_shape});
}
output_size_ = 1;
for (int i : hash_shape) {
output_size_ *= i;
if (type == LSHProjectionType_SPARSE) {
break;
}
}
}
void SetInput(std::initializer_list<int> data) {
PopulateTensor(input_, data);
}
void SetHash(std::initializer_list<float> data) {
PopulateTensor(hash_, data);
}
void SetWeight(std::initializer_list<float> f) { PopulateTensor(weight_, f); }
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
private:
int input_;
int hash_;
int weight_;
int output_;
int output_size_;
};
TEST(LSHProjectionOpTest2, Dense1DInputs) {
LSHProjectionOpModel m(LSHProjectionType_DENSE, {3, 2}, {5}, {5});
m.SetInput({12345, 54321, 67890, 9876, -12345678});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
m.SetWeight({1.0, 1.0, 1.0, 1.0, 1.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 1, 1, 1, 0));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 0, 1, 0, 0));
#endif
}
TEST(LSHProjectionOpTest2, Sparse1DInputs) {
LSHProjectionOpModel m(LSHProjectionType_SPARSE, {3, 2}, {5}, {});
m.SetInput({12345, 54321, 67890, 9876, -12345678});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 1, 8 + 0));
#endif
}
TEST(LSHProjectionOpTest2, Sparse3DInputs) {
LSHProjectionOpModel m(LSHProjectionType_SPARSE, {3, 2}, {5, 2, 2}, {5});
m.SetInput({1234, 2345, 3456, 1234, 4567, 5678, 6789, 4567, 7891, 8912,
9123, 7890, -987, -876, -765, -987, -543, -432, -321, -543});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
m.SetWeight({0.12, 0.34, 0.56, 0.67, 0.78});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 2, 4 + 1, 8 + 1));
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/lsh_projection.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/lsh_projection_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b05ee199-442e-4b1e-aba6-4f082d0a2e56 | cpp | tensorflow/tensorflow | scatter_nd | tensorflow/lite/kernels/scatter_nd.cc | tensorflow/lite/kernels/scatter_nd_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace scatter_nd {
constexpr int kIndices = 0;
constexpr int kUpdates = 1;
constexpr int kShape = 2;
constexpr int kOutputTensor = 0;
template <typename IndicesT>
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* shape,
TfLiteTensor* output) {
const int shape_rank = SizeOfDimension(shape, 0);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape_rank);
const auto* shape_data = GetTensorData<IndicesT>(shape);
for (int i = 0; i < shape_rank; i++) {
output_shape->data[i] = shape_data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
template <typename IndicesT>
TfLiteStatus CheckShapes(TfLiteContext* context, const RuntimeShape& indices,
const RuntimeShape& updates,
const RuntimeShape& shape_shape,
const IndicesT* shape_data) {
TF_LITE_ENSURE(context, (indices.DimensionsCount() >= 1) &&
(updates.DimensionsCount() >= 1) &&
(shape_shape.DimensionsCount() == 1));
const int outer_dims = indices.DimensionsCount() - 1;
for (int i = 0; i < outer_dims; ++i) {
TF_LITE_ENSURE_EQ(context, indices.Dims(i), updates.Dims(i));
}
const int ix = indices.Dims(outer_dims);
TF_LITE_ENSURE_EQ(context, updates.DimensionsCount() - outer_dims,
shape_shape.Dims(0) - ix);
for (int i = 0; i + outer_dims < updates.DimensionsCount(); ++i) {
TF_LITE_ENSURE_EQ(context, updates.Dims(i + outer_dims),
shape_data[ix + i]);
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates));
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShape, &shape));
switch (updates->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteBool:
case kTfLiteInt8:
case kTfLiteInt64:
case kTfLiteInt32:
break;
default:
TF_LITE_KERNEL_LOG(
context, "Updates of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(updates->type));
return kTfLiteError;
}
if (indices->type != shape->type) {
TF_LITE_KERNEL_LOG(context, "Indices and shape must have the same type.");
return kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = updates->type;
if (IsConstantOrPersistentTensor(shape)) {
switch (indices->type) {
case kTfLiteInt32:
TF_LITE_ENSURE_OK(
context,
CheckShapes<int32_t>(context, GetTensorShape(indices),
GetTensorShape(updates), GetTensorShape(shape),
GetTensorData<int32_t>(shape)));
return ResizeOutputTensor<int32_t>(context, shape, output);
default:
TF_LITE_KERNEL_LOG(
context, "Indices of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
} else {
SetTensorToDynamic(output);
return kTfLiteOk;
}
}
template <typename IndicesT, typename UpdatesT>
TfLiteStatus ScatterNd(const TfLiteTensor* indices, const TfLiteTensor* updates,
TfLiteTensor* output) {
return reference_ops::ScatterNd(
GetTensorShape(indices), GetTensorData<IndicesT>(indices),
GetTensorShape(updates), GetTensorData<UpdatesT>(updates),
GetTensorShape(output), GetTensorData<UpdatesT>(output));
}
template <typename IndicesT>
TfLiteStatus EvalScatterNd(TfLiteContext* context, const TfLiteTensor* indices,
const TfLiteTensor* updates,
const TfLiteTensor* shape, TfLiteTensor* output) {
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(
context, CheckShapes<IndicesT>(
context, GetTensorShape(indices), GetTensorShape(updates),
GetTensorShape(shape), GetTensorData<IndicesT>(shape)));
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor<IndicesT>(context, shape, output));
}
TfLiteStatus status = kTfLiteError;
switch (updates->type) {
case kTfLiteFloat32:
status = ScatterNd<IndicesT, float>(indices, updates, output);
break;
case kTfLiteUInt8:
status = ScatterNd<IndicesT, uint8_t>(indices, updates, output);
break;
case kTfLiteBool:
status = ScatterNd<IndicesT, bool>(indices, updates, output);
break;
case kTfLiteInt8:
status = ScatterNd<IndicesT, int8_t>(indices, updates, output);
break;
case kTfLiteInt32:
status = ScatterNd<IndicesT, int32_t>(indices, updates, output);
break;
case kTfLiteInt64:
status = ScatterNd<IndicesT, int64_t>(indices, updates, output);
break;
default:
TF_LITE_KERNEL_LOG(
context, "Updates of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(updates->type));
return kTfLiteError;
}
if (status != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, "scatter_nd index out of bounds");
}
return status;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates));
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShape, &shape));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (indices->type) {
case kTfLiteInt32:
return EvalScatterNd<int32_t>(context, indices, updates, shape, output);
default:
TF_LITE_KERNEL_LOG(
context, "Indices of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_SCATTER_ND() {
static TfLiteRegistration r = { nullptr, nullptr,
scatter_nd::Prepare, scatter_nd::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class ScatterNdOpModel : public SingleOpModel {
public:
ScatterNdOpModel(const TensorData& indices, const TensorData& updates,
const TensorData& shape) {
indices_ = AddInput(indices);
updates_ = AddInput(updates);
shape_ = AddInput(shape);
output_ = AddOutput(updates.type);
SetBuiltinOp(BuiltinOperator_SCATTER_ND, BuiltinOptions_ScatterNdOptions,
CreateScatterNdOptions(builder_).Union());
BuildInterpreter(
{GetShape(indices_), GetShape(updates_), GetShape(shape_)});
}
template <typename T>
void SetIndices(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
void SetUpdates(std::initializer_list<T> data) {
PopulateTensor<T>(updates_, data);
}
template <typename T>
void SetShape(std::initializer_list<T> data) {
PopulateTensor<T>(shape_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int indices_;
int updates_;
int shape_;
int output_;
};
TEST(ScatterNdOpTest, ScatterElementIntoVector) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_FLOAT32, {4}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 7});
m.SetUpdates<float>({9, 10, 11, 12});
m.SetShape<int32_t>({8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({0, 11, 0, 10, 9, 0, 0, 12}));
}
TEST(ScatterNdOpTest, ScatterMatrixIntoRank3Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 1}},
{TensorType_FLOAT32, {2, 4, 4}}, {TensorType_INT32, {3}});
m.SetIndices<int32_t>({0, 2});
m.SetUpdates<float>({5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8});
m.SetShape<int32_t>({4, 4, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 4, 4}));
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray({5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoMatrix) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_FLOAT32, {4, 4}},
{TensorType_INT32, {2}});
m.SetIndices<int32_t>({ 9, 8, 0, 1});
m.SetUpdates<float>({ 1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16});
m.SetShape<int32_t>({10, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({10, 4}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 9, 10, 11, 12,
13, 14, 15, 16,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
5, 6, 7, 8,
1, 2, 3, 4}));
}
TEST(ScatterNdOpTest, ScatterMatricesIntoRank4Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2}},
{TensorType_INT32, {4}});
m.SetIndices<int32_t>(
{ 1, 1, 0, 1, 0, 0, 1, 0});
m.SetUpdates<float>({ 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16});
m.SetShape<int32_t>({2, 2, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2, 2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray({ 9, 10, 11, 12,
5, 6, 7, 8,
13, 14, 15, 16,
1, 2, 3, 4}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoRank4Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 2, 3}},
{TensorType_FLOAT32, {2, 2, 5}}, {TensorType_INT32, {4}});
m.SetIndices<int32_t>(
{ 2, 2, 2, 1, 0, 1, 0, 2, 0, 2, 2, 0});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({3, 3, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20,
0, 0, 0, 0, 0,
1, 2, 3, 4, 5,
}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoRank3Tensor) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_FLOAT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, OverlappedIndicesSummed) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_FLOAT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 1, 0, 0, 2, 0, 2, 1, 0});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
17, 19, 21, 23, 25,
17, 19, 21, 23, 25,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0}));
}
TEST(ScatterNdOpTest, Int32IndicesUint8Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_UINT8, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<uint8_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<uint8_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt8Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT8, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int8_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt32Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int32_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int32_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt64Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT64, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int64_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesBoolUpdates) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_BOOL, {4}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 7});
m.SetUpdates<bool>({true, false, true, false});
m.SetShape<int32_t>({8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8}));
EXPECT_THAT(
m.GetOutput<bool>(),
ElementsAreArray({false, true, false, false, true, false, false, false}));
}
TEST(ScatterNdOpTest, DynamicShape) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT64, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int64_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
m.SetIndices<int32_t>({ 2, 3, 1, 0, 2, 0, 1, 2});
m.SetShape<int32_t>({3, 4, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 4, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
1, 2, 3, 4, 5}));
}
TEST(ScatterNdOpTest, ReadAndWriteArrayLimits) {
ScatterNdOpModel m({TensorType_INT32, {5, 1}}, {TensorType_INT32, {5}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 0, 2});
m.SetUpdates<int32_t>({1, 2, 3, 7, 9});
m.SetShape<int32_t>({5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({5}));
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({7, 3, 9, 2, 1}));
}
TEST(ScatterNdOpTest, OOBRead) {
ScatterNdOpModel m({TensorType_INT32, {1, 1}}, {TensorType_INT32, {1}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4});
m.SetUpdates<int32_t>({1});
m.SetShape<int32_t>({1});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ScatterNdOpTest, OOBWrites) {
ScatterNdOpModel m({TensorType_INT32, {5, 1}}, {TensorType_INT32, {5}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, -0x38, 0x38});
m.SetUpdates<int32_t>({1, 2, 3, 0x44444444, 0x55555555});
m.SetShape<int32_t>({1});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/scatter_nd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/scatter_nd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3e76bcbc-e1c4-4513-974e-a2ea6cb94dd3 | cpp | tensorflow/tensorflow | floor_mod | tensorflow/lite/kernels/floor_mod.cc | tensorflow/lite/kernels/floor_mod_test.cc | #include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace floor_mod {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteInt8 && type != kTfLiteInt16 && type != kTfLiteInt32 &&
type != kTfLiteFloat32 && type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
TfLiteStatus EvalImpl(TfLiteContext* context, bool requires_broadcast,
const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output) {
const T* denominator_data = GetTensorData<T>(input2);
if (input2->type == kTfLiteInt8 || input2->type == kTfLiteInt16 ||
input2->type == kTfLiteInt32 || input2->type == kTfLiteInt64) {
const int num_elements = NumElements(input2);
for (int i = 0; i < num_elements; ++i) {
if (denominator_data[i] == 0) {
TF_LITE_KERNEL_LOG(context, "Division by 0");
return kTfLiteError;
}
}
}
if (requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), denominator_data, GetTensorShape(output),
GetTensorData<T>(output), reference_ops::FloorMod<T>);
} else {
reference_ops::BinaryFunction<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), GetTensorData<T>(input2),
GetTensorShape(output), GetTensorData<T>(output),
reference_ops::FloorMod<T>);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input1->type) {
case kTfLiteInt8: {
return EvalImpl<int8_t>(context, data->requires_broadcast, input1, input2,
output);
}
case kTfLiteInt16: {
return EvalImpl<int16_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt64: {
return EvalImpl<int64_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_FLOOR_MOD() {
static TfLiteRegistration r = {floor_mod::Init, floor_mod::Free,
floor_mod::Prepare, floor_mod::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/floor_mod_test_common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
TEST(FloorModModel, Simple) {
FloorModModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(FloorModModel, NegativeValue) {
FloorModModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, -2, -1));
}
TEST(FloorModModel, BroadcastFloorMod) {
FloorModModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-2, 0, -2, -2));
}
TEST(FloorModModel, Int64WithBroadcast) {
FloorModModel<int64_t> model({TensorType_INT64, {1, 2, 2, 1}},
{TensorType_INT64, {1}}, {TensorType_INT64, {}});
model.PopulateTensor<int64_t>(model.input1(), {10, -9, -11, (1LL << 34) + 9});
model.PopulateTensor<int64_t>(model.input2(), {-(1LL << 33)});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(),
ElementsAre(-8589934582, -9, -11, -8589934583));
}
TEST(FloorModModel, FloatSimple) {
FloorModModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<float>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(FloorModModel, FloatNegativeValue) {
FloorModModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<float>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, -2, -1));
}
TEST(FloorModModel, FloatBroadcastFloorMod) {
FloorModModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<float>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-2, 0, -2, -2));
}
TEST(FloorModModel, SimpleInt16) {
FloorModModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(FloorModModel, NegativeValueInt16) {
FloorModModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, -2, -1));
}
TEST(FloorModModel, BroadcastFloorModInt16) {
FloorModModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1}}, {TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-2, 0, -2, -2));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/floor_mod.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/floor_mod_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a26331c-d8b1-4846-aa6d-ed53d0ece25a | cpp | tensorflow/tensorflow | matrix_diag | tensorflow/lite/kernels/matrix_diag.cc | tensorflow/lite/kernels/matrix_diag_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace matrix_diag {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteIntArray* input_dims = input->dims;
int input_dims_size = input_dims->size;
TF_LITE_ENSURE(context, input_dims_size >= 1);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1);
for (int i = 0; i < input_dims_size; i++) {
output_shape->data[i] = input_dims->data[i];
}
output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1];
output->type = input->type;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_shape));
return kTfLiteOk;
}
template <typename T>
void FillDiagImpl(const T* in, T* out, const int batch_size, const int row_size,
const int col_size) {
int idx = 0;
for (int b = 0; b < batch_size; b++) {
for (int i = 0; i < row_size; i++) {
for (int j = 0; j < col_size; ++j) {
if (i == j) {
out[i * col_size + j] = in[idx];
idx++;
} else {
out[i * col_size + j] = 0;
}
}
}
out += row_size * col_size;
}
}
template <typename T>
void FillDiag(const TfLiteTensor* input, TfLiteTensor* output,
const int batch_size, const int row_size, const int col_size) {
FillDiagImpl<T>(GetTensorData<T>(input), GetTensorData<T>(output), batch_size,
row_size, col_size);
}
void FillDiagHelper(const TfLiteTensor* input, TfLiteTensor* output) {
const int num_output_dims = output->dims->size;
int batch_size = 1;
for (int i = 0; i < num_output_dims - 2; ++i) {
batch_size *= output->dims->data[i];
}
const int row_size = output->dims->data[num_output_dims - 2];
const int col_size = output->dims->data[num_output_dims - 1];
switch (output->type) {
case kTfLiteInt64: {
return FillDiag<int64_t>(input, output, batch_size, row_size, col_size);
}
case kTfLiteInt32: {
return FillDiag<int32_t>(input, output, batch_size, row_size, col_size);
}
case kTfLiteInt16: {
return FillDiag<int16_t>(input, output, batch_size, row_size, col_size);
}
case kTfLiteInt8: {
return FillDiag<int8_t>(input, output, batch_size, row_size, col_size);
}
case kTfLiteUInt8: {
return FillDiag<uint8_t>(input, output, batch_size, row_size, col_size);
}
default:
return FillDiag<float>(input, output, batch_size, row_size, col_size);
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
FillDiagHelper(input, output);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_MATRIX_DIAG() {
static TfLiteRegistration r = {nullptr, nullptr, matrix_diag::Prepare,
matrix_diag::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
class MatrixDiagOpModel : public SingleOpModel {
public:
explicit MatrixDiagOpModel(const TensorData& input) {
input_ = AddInput(input);
output_ = AddOutput({input.type, {}});
SetBuiltinOp(BuiltinOperator_MATRIX_DIAG, BuiltinOptions_MatrixDiagOptions,
CreateMatrixDiagOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
TfLiteType GetOutputType() {
TfLiteTensor* t = interpreter_->tensor(output_);
return t->type;
}
private:
int input_;
int output_;
};
template <typename T>
class MatrixDiagOpTest : public ::testing::Test {};
using TypesUnderTest =
::testing::Types<TypeUnion<int32_t>, TypeUnion<float>, TypeUnion<int16_t>,
TypeUnion<int8_t>, TypeUnion<uint8_t>>;
TYPED_TEST_SUITE(MatrixDiagOpTest, TypesUnderTest);
TYPED_TEST(MatrixDiagOpTest, ThreeByThreeDiag) {
MatrixDiagOpModel<typename TypeParam::ScalarType> model(
{TypeParam::tensor_type, {3}});
model.template PopulateTensor<typename TypeParam::ScalarType>(model.input(),
{1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0,
0, 2, 0,
0, 0, 3}));
EXPECT_THAT(model.GetOutputType(), TypeParam::tflite_type);
}
TEST(MatrixDiagTest, Int32TestTwoDimDiag) {
MatrixDiagOpModel<int32_t> model({TensorType_INT32, {2, 4}});
model.PopulateTensor<int32_t>(model.input(), {1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 4, 4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0,
0, 2, 0, 0,
0, 0, 3, 0,
0, 0, 0, 4,
5, 0, 0, 0,
0, 6, 0, 0,
0, 0, 7, 0,
0, 0, 0, 8}));
EXPECT_THAT(model.GetOutputType(), TfLiteType::kTfLiteInt32);
}
TEST(MatrixDiagTest, DegenerateCase) {
MatrixDiagOpModel<uint8_t> model({TensorType_UINT8, {1}});
model.PopulateTensor<uint8_t>(model.input(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1}));
EXPECT_THAT(model.GetOutputType(), TfLiteType::kTfLiteUInt8);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/matrix_diag.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/matrix_diag_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd8402fb-b272-4b3d-8727-5a2fb5c803c3 | cpp | tensorflow/tensorflow | stablehlo_and | tensorflow/lite/kernels/stablehlo_and.cc | tensorflow/lite/kernels/stablehlo_and_test.cc | #include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_elementwise.h"
namespace tflite::ops::builtin {
TfLiteRegistration* Register_STABLEHLO_AND() {
static TfLiteRegistration r = {nullptr, nullptr, ElementwisePrepare,
ElementwiseEval<ComputationType::kAnd>};
return &r;
}
} | #include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class AndOpModel : public SingleOpModel {
public:
AndOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_STABLEHLO_AND, BuiltinOptions_NONE, 0);
SetBypassDefaultDelegates();
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int input1_;
int input2_;
int output_;
};
TEST(StablehloElementwise, AndInt32) {
AndOpModel model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {2, 3, 7, 8});
model.PopulateTensor<int32_t>(model.input2(), {4, 5, 7, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAre(0, 1, 7, 0));
}
TEST(StablehloElementwise, AndInt8) {
AndOpModel model({TensorType_INT8, {1, 3, 1}}, {TensorType_INT8, {1, 3, 1}},
{TensorType_INT8, {}});
model.PopulateTensor<int8_t>(model.input1(), {7, -8, -8});
model.PopulateTensor<int8_t>(model.input2(), {0, 7, -8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int8_t>(), ElementsAre(0, 0, -8));
}
TEST(StablehloElementwise, AndInt16) {
AndOpModel model({TensorType_INT16, {1, 1, 3}}, {TensorType_INT16, {1, 1, 3}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {32767, -32768, -32768});
model.PopulateTensor<int16_t>(model.input2(), {32767, -32768, -32768});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int16_t>(), ElementsAre(32767, -32768, -32768));
}
TEST(StablehloElementwise, AndBool) {
AndOpModel model({TensorType_BOOL, {2, 1, 2, 1}},
{TensorType_BOOL, {2, 1, 2, 1}}, {TensorType_BOOL, {}});
model.PopulateTensor<bool>(model.input1(), {false, false, true, true});
model.PopulateTensor<bool>(model.input2(), {false, true, false, true});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<bool>(), ElementsAre(false, false, false, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_and.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_and_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8be0f844-9f6f-4412-9f03-1e5bca387c25 | cpp | tensorflow/tensorflow | if | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/if.cc | tensorflow/lite/kernels/if_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/if.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Region.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
class LegalizeIfOp : public OpConversionPattern<mhlo::IfOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::IfOp if_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final {
auto new_op = rewriter.create<TFL::IfOp>(
if_op.getLoc(), if_op.getResultTypes(), if_op.getPred());
new_op.getThenRegion().takeBody(if_op.getTrueBranch());
new_op.getElseRegion().takeBody(if_op.getFalseBranch());
ReplaceTerminatorWithYield(new_op.getThenRegion(), rewriter);
ReplaceTerminatorWithYield(new_op.getElseRegion(), rewriter);
rewriter.replaceOp(if_op, new_op.getResults());
return success();
}
};
}
void PopulateIfPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeIfOp>(ctx);
target.addIllegalOp<mhlo::IfOp>();
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
namespace tflite {
using subgraph_test_util::CheckIntTensor;
using subgraph_test_util::CheckScalarStringTensor;
using subgraph_test_util::CheckStringTensor;
using subgraph_test_util::ControlFlowOpTest;
using subgraph_test_util::FillIntTensor;
using subgraph_test_util::FillScalarStringTensor;
namespace {
class SimpleIfTest : public ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildMulSubgraph(interpreter_->subgraph(2));
builder_->BuildIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1, 2});
}
};
TEST_F(SimpleIfTest, TestIfTrue) {
interpreter_->typed_input_tensor<bool>(0)[0] = true;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {1, 2}, {6, 9});
}
TEST_F(SimpleIfTest, TestIfFalse) {
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {1, 2}, {5, 14});
}
TEST_F(SimpleIfTest, TestIfTrueWithLargeInputsTwice) {
const size_t kNumLargeTensors = 100000;
interpreter_->ResizeInputTensor(interpreter_->inputs()[1],
{kNumLargeTensors});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
const std::vector<int> input_vector(kNumLargeTensors, 1);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), input_vector);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {9});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
const std::vector<int> expected(kNumLargeTensors, 10);
CheckIntTensor(output, {kNumLargeTensors}, expected);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {19});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
output = interpreter_->tensor(interpreter_->outputs()[0]);
const std::vector<int> expected2(kNumLargeTensors, 20);
CheckIntTensor(output, {kNumLargeTensors}, expected2);
}
TEST_F(SimpleIfTest, TestIfFalseWithLargeInputsTwice) {
const size_t kNumLargeTensors = 100000;
interpreter_->ResizeInputTensor(interpreter_->inputs()[1],
{kNumLargeTensors});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
const std::vector<int> input_vector(kNumLargeTensors, 1);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), input_vector);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
const std::vector<int> expected(kNumLargeTensors, 0);
CheckIntTensor(output, {kNumLargeTensors}, expected);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {7});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
output = interpreter_->tensor(interpreter_->outputs()[0]);
const std::vector<int> expected2(kNumLargeTensors, 7);
CheckIntTensor(output, {kNumLargeTensors}, expected2);
}
class DynamicSubgraphIfTest : public ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildPadSubgraph(interpreter_->subgraph(2));
builder_->BuildIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1, 2});
}
};
TEST_F(DynamicSubgraphIfTest, TestIfTrue) {
interpreter_->typed_input_tensor<bool>(0)[0] = true;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
EXPECT_TRUE(IsDynamicTensor(output));
CheckIntTensor(output, {1, 2}, {6, 9});
}
TEST_F(DynamicSubgraphIfTest, TestIfFalse) {
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
EXPECT_TRUE(IsDynamicTensor(output));
CheckIntTensor(output, {5}, {0, 5, 7, 0, 0});
}
class IfTest : public ControlFlowOpTest {};
TEST_F(IfTest, TestWithXNNPACK) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildXNNPACKSubgraph(interpreter_->subgraph(1));
builder_->BuildXNNPACKSubgraph(interpreter_->subgraph(2));
builder_->BuildFloatIfSubgraph(&interpreter_->primary_subgraph(), 3);
const auto opt = TfLiteXNNPackDelegateOptionsDefault();
TfLiteDelegate* xnnpack_delegate = TfLiteXNNPackDelegateCreate(&opt);
interpreter_->primary_subgraph().MarkAsDelegationSkippable();
interpreter_->subgraph(1)->MarkAsDelegationSkippable();
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(xnnpack_delegate), kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
float* input0 =
GetTensorData<float>(interpreter_->tensor(interpreter_->inputs()[1]));
input0[0] = 1;
float* input1 =
GetTensorData<float>(interpreter_->tensor(interpreter_->inputs()[2]));
input1[0] = 1;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
float* output0_data = GetTensorData<float>(output0);
ASSERT_EQ(output0_data[0], 4);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
float* output1_data = GetTensorData<float>(output1);
ASSERT_EQ(output1_data[0], 4);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteXNNPackDelegateDelete(xnnpack_delegate);
}
TEST_F(IfTest, TestInputIsOutput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(1));
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 4);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {2});
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
CheckIntTensor(output0, {1}, {2});
CheckIntTensor(output1, {1}, {2});
}
TEST_F(IfTest, TestInputIsOutputButDifferent) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildInputIsDifferentOutputSubgraph(interpreter_->subgraph(1));
builder_->BuildInputIsDifferentOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestFlexOutput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildFlexOutputSubgraph(interpreter_->subgraph(1));
builder_->BuildFlexOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {2}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {3, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestCounterOnly) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildCounterOnlySubgraph(interpreter_->subgraph(1));
builder_->BuildCounterOnlySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 2);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestAllCases) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildAllInplaceScenariosSubgraph(interpreter_->subgraph(1));
builder_->BuildAllInplaceScenariosSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 6);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[5], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[4]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[5]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {3});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {2}, {2, 2});
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[3]);
CheckIntTensor(output3, {2}, {3, 3});
TfLiteTensor* output4 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output4, {1}, {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestStaticUnconsumedOutputs) {
for (bool dynamic_tensors : {true, false}) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(1));
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraphWithUnconsumedOutput(
&interpreter_->primary_subgraph(), 4);
InterpreterOptions options;
if (dynamic_tensors) {
options.OptimizeMemoryForLargeTensors(1);
interpreter_->ApplyOptions(&options);
}
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {4});
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {2}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
CheckIntTensor(output1, {2}, {4, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(IfTest, TestDynamicOpTriggersAllocationOfUnsedInput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDynamicOpTriggersAllocationOfUnsedInputSubgraph(
interpreter_->subgraph(1));
builder_->BuildDynamicOpTriggersAllocationOfUnsedInputSubgraph(
interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 4);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {4, 4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {2}, {2, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestStaticInPlace) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDeepBodySubgraph(interpreter_->subgraph(1));
builder_->BuildDeepBodySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {0});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestStaticInPlaceLarge) {
int size = 10000;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeBodySubgraph(interpreter_->subgraph(1));
builder_->BuildLargeBodySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {size}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]),
std::vector<int>(size, 1));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {}, {10000});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {size}, std::vector<int>(size, 6));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestTriangularNumberSequence) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(1));
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
auto body_subgraph = interpreter_->subgraph(2);
TfLiteTensor* subgraph_input2 =
body_subgraph->tensor(body_subgraph->inputs()[1]);
EXPECT_EQ(subgraph_input2->allocation_type, kTfLiteCustom);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestTriangularNumberSequenceWithShallowCopy) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(1));
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1000000});
InterpreterOptions options;
options.OptimizeMemoryForLargeTensors(1000000);
ASSERT_EQ(interpreter_->ApplyOptions(&options), kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
const std::vector<int> input_vector(1000000, 1);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), input_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
auto body_subgraph = interpreter_->subgraph(2);
TfLiteTensor* subgraph_input2 =
body_subgraph->tensor(body_subgraph->inputs()[1]);
ASSERT_EQ(subgraph_input2->allocation_type, kTfLiteCustom);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
const std::vector<int> expected2(1000000, 3);
CheckIntTensor(output2, {1000000}, expected2);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestPadLoop) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(1), {1, 2});
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {5, 7});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {5}, {0, 5, 7, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestDynamicBodyWithSharingEarlyExit) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(1));
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 5);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {10000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {3}, {2, 3, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestDynamicBodyWithSharing) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(1));
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 5);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1000000});
interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1000000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {3}, {2, 3, 4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
EXPECT_EQ(output2->dims->data[0], 1000000);
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[3]);
EXPECT_EQ(output3->dims->data[0], 1000000);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestDynamicBodyWithSharingAndAliases) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDynamicBodySubgraphWithAliases(interpreter_->subgraph(1));
builder_->BuildDynamicBodySubgraphWithAliases(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 6);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {0});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[4]), {3});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[5]), {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {1});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {11});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {1}, {12});
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output3, {1}, {13});
TfLiteTensor* output4 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output4, {1}, {13});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestOutputNotConsumed) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildOutputNotConsumedSubgraph(*interpreter_->subgraph(1));
builder_->BuildOutputNotConsumedSubgraph(*interpreter_->subgraph(2));
builder_->BuildOutputNotConsumedIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestPadLoopWithSharing) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargePadSubgraph(interpreter_->subgraph(1), {1, 2});
builder_->BuildLargePadSubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 4);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {3, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {5, 6});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {5}, {0, 5, 6, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestPadLoopWithShallowCopy) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(1), {1, 2});
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1000000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
std::vector<int> input_vector(1000000, 0);
input_vector[0] = 5;
input_vector[1] = 7;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), input_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
std::vector<int> output_vector(1000003, 0);
output_vector[1] = 5;
output_vector[2] = 7;
CheckIntTensor(output2, {1000003}, output_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestIfLoopWithDynamicTensor) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildBodySubgraphWithDynamicTensor(interpreter_->subgraph(1));
builder_->BuildBodySubgraphWithDynamicTensor(interpreter_->subgraph(2));
builder_->BuildIfSubgraphWithDynamicTensor(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillScalarStringTensor(interpreter_->tensor(interpreter_->inputs()[1]), "A");
FillScalarStringTensor(interpreter_->tensor(interpreter_->inputs()[2]), "A");
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* string_output1 =
interpreter_->tensor(interpreter_->outputs()[0]);
CheckScalarStringTensor(string_output1, "A");
TfLiteTensor* string_output2 =
interpreter_->tensor(interpreter_->outputs()[1]);
CheckStringTensor(string_output2, {2}, {"A", "A"});
TfLiteTensor* integer_output =
interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(integer_output, {1}, {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/if.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/if_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
101f50fd-1f21-43e6-97f3-00b657bd3792 | cpp | tensorflow/tensorflow | exp | tensorflow/lite/kernels/exp.cc | tensorflow/lite/kernels/exp_test.cc | #include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace exp {
enum KernelType {
kReference,
};
struct ExpContext {
ExpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
TfLiteTensor* output;
};
struct OpData {
union {
int8_t lut_int8[LUTSize<int8_t>()];
int16_t lut_int16[LUTSize<int16_t>()];
};
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = static_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
ExpContext op_context(context, node);
const TfLiteTensor* input = op_context.input;
TfLiteTensor* output = op_context.output;
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input->dims);
output->type = input->type;
if (input->type == kTfLiteInt8) {
LUTPopulate<int8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point, [](float value) { return std::exp(value); },
data->lut_int8);
} else if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
LUTPopulate<int16_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point, [](float value) { return std::exp(value); },
data->lut_int16);
}
return context->ResizeTensor(context, op_context.output, output_dims);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
ExpContext op_context(context, node);
if (kernel_type == kReference) {
switch (op_context.input->type) {
case kTfLiteFloat32:
reference_ops::Exp(GetTensorData<float>(op_context.input),
NumElements(op_context.input),
GetTensorData<float>(op_context.output));
break;
case kTfLiteInt8:
reference_integer_ops::LookupTable(
GetTensorData<int8_t>(op_context.input),
NumElements(op_context.input), data->lut_int8,
GetTensorData<int8_t>(op_context.output));
break;
case kTfLiteInt16:
reference_integer_ops::LookupTable(
GetTensorData<int16_t>(op_context.input),
NumElements(op_context.input), data->lut_int16,
GetTensorData<int16_t>(op_context.output));
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by Exp.",
op_context.input->type);
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_EXP_REF() {
static TfLiteRegistration r = {exp::Init, exp::Free, exp::Prepare,
exp::Eval<exp::kReference>};
return &r;
}
TfLiteRegistration* Register_EXP() { return Register_EXP_REF(); }
}
}
} | #include <math.h>
#include <initializer_list>
#include <limits>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class BaseExpOpModel : public SingleOpModel {
public:
BaseExpOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_EXP, BuiltinOptions_ExpOptions,
CreateExpOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int output_;
};
class FloatExpOpModel : public BaseExpOpModel {
public:
using BaseExpOpModel::BaseExpOpModel;
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
class QuantizedExpOpModel : public BaseExpOpModel {
public:
using BaseExpOpModel::BaseExpOpModel;
template <class T>
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename integer_dtype>
std::vector<float> GetDequantizedOutput() {
return Dequantize<integer_dtype>(ExtractVector<integer_dtype>(output_),
GetScale(output_), GetZeroPoint(output_));
}
};
template <typename T>
inline float GetTolerance(float min, float max) {
float kQuantizedTolerance = (max - min) / (std::numeric_limits<T>::max() -
std::numeric_limits<T>::min());
if (std::is_same<T, int8_t>::value) {
kQuantizedTolerance += (max - min) / 256.0f;
} else if (std::is_same<T, int16_t>::value) {
kQuantizedTolerance += (max - min) / 512.0f;
}
return kQuantizedTolerance;
}
TEST(ExpOpTest, ExpFloat) {
std::initializer_list<float> data = {0.0f, 1.0f, -1.0f, 100.0f,
-100.0f, 0.01f, -0.01f};
FloatExpOpModel m({TensorType_FLOAT32, {1, 1, 7}}, {TensorType_FLOAT32, {}});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 7}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{std::exp(0.0f), std::exp(1.0f), std::exp(-1.0f), std::exp(100.0f),
std::exp(-100.0f), std::exp(0.01f), std::exp(-0.01f)})));
}
template <TensorType tensor_type, typename integer_dtype>
void QuantizedExpSymmetricTest() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
const float kQuantizedTolerance = GetTolerance<integer_dtype>(-3.1, 3.1);
QuantizedExpOpModel m({tensor_type, {1, 2, 2, 2}, 1.3f * kMin, 1.3f * kMax},
{tensor_type, {}, 3.01f * kMin, 3.01f * kMax});
m.SetInput<integer_dtype>({-1.3, -1.0, -0.3, 0, 0.1, 0.5, 1.0, 1.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear(
{0.2725, 0.3679, 0.7408, 1.0, 1.1052, 1.6487, 2.7183, 3.0042},
kQuantizedTolerance)));
}
TEST(ExpOpTest, ExpSymmetricInt8) {
QuantizedExpSymmetricTest<TensorType_INT8, int8_t>();
}
TEST(ExpOpTest, ExpSymmetricInt16) {
QuantizedExpSymmetricTest<TensorType_INT16, int16_t>();
}
template <TensorType tensor_type, typename integer_dtype>
void QuantizedExpAsymmetricTest() {
const float kQuantizedTolerance = GetTolerance<integer_dtype>(-1.3, 3.01);
QuantizedExpOpModel m({tensor_type, {1, 2, 2, 2}, -1.3, 1.1},
{tensor_type, {}, 0.0, 3.01});
m.SetInput<integer_dtype>({-1.3, -1.0, -0.3, 0, 0.1, 0.5, 1.0, 1.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear(
{0.2725, 0.3679, 0.7408, 1.0, 1.1052, 1.6487, 2.7183, 3.0042},
kQuantizedTolerance)));
}
TEST(ExpOpTest, ExpAsymmetricInt8) {
QuantizedExpAsymmetricTest<TensorType_INT8, int8_t>();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/exp.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/exp_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
60891ecd-aa49-424b-b793-09e6139c0d9f | cpp | tensorflow/tensorflow | hashtable_lookup | tensorflow/lite/kernels/hashtable_lookup.cc | tensorflow/lite/kernels/hashtable_lookup_test.cc | #include <stdint.h>
#include <cstdlib>
#include <cstring>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
int greater(const void* a, const void* b) {
return *static_cast<const int*>(a) - *static_cast<const int*>(b);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1);
TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32);
const TfLiteTensor* key;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key));
TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1);
TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32);
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value));
TF_LITE_ENSURE(context, NumDimensions(value) >= 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0),
SizeOfDimension(value, 0));
if (value->type == kTfLiteString) {
TF_LITE_ENSURE_EQ(context, NumDimensions(value), 1);
}
TfLiteTensor* hits;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits));
TF_LITE_ENSURE_EQ(context, hits->type, kTfLiteUInt8);
TfLiteIntArray* hitSize = TfLiteIntArrayCreate(1);
hitSize->data[0] = SizeOfDimension(lookup, 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_EQ(context, value->type, output->type);
TfLiteStatus status = kTfLiteOk;
if (output->type != kTfLiteString) {
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value));
outputSize->data[0] = SizeOfDimension(lookup, 0);
for (int i = 1; i < NumDimensions(value); i++) {
outputSize->data[i] = SizeOfDimension(value, i);
}
status = context->ResizeTensor(context, output, outputSize);
}
if (context->ResizeTensor(context, hits, hitSize) != kTfLiteOk) {
status = kTfLiteError;
}
return status;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteTensor* hits;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits));
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
const TfLiteTensor* key;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value));
const int num_rows = SizeOfDimension(value, 0);
TF_LITE_ENSURE(context, num_rows != 0);
const int row_bytes = value->bytes / num_rows;
void* pointer = nullptr;
DynamicBuffer buf;
for (int i = 0; i < SizeOfDimension(lookup, 0); i++) {
int idx = -1;
pointer = bsearch(&(lookup->data.i32[i]), key->data.i32, num_rows,
sizeof(int32_t), greater);
if (pointer != nullptr) {
idx = (reinterpret_cast<char*>(pointer) - (key->data.raw)) /
sizeof(int32_t);
}
if (idx >= num_rows || idx < 0) {
if (output->type == kTfLiteString) {
buf.AddString(nullptr, 0);
} else {
memset(output->data.raw + i * row_bytes, 0, row_bytes);
}
hits->data.uint8[i] = 0;
} else {
if (output->type == kTfLiteString) {
buf.AddString(GetString(value, idx));
} else {
memcpy(output->data.raw + i * row_bytes,
value->data.raw + idx * row_bytes, row_bytes);
}
hits->data.uint8[i] = 1;
}
}
if (output->type == kTfLiteString) {
buf.WriteToTensorAsVector(output);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_HASHTABLE_LOOKUP() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <functional>
#include <initializer_list>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class HashtableLookupOpModel : public SingleOpModel {
public:
HashtableLookupOpModel(std::initializer_list<int> lookup_shape,
std::initializer_list<int> key_shape,
std::initializer_list<int> value_shape,
TensorType type) {
lookup_ = AddInput(TensorType_INT32);
key_ = AddInput(TensorType_INT32);
value_ = AddInput(type);
output_ = AddOutput(type);
hit_ = AddOutput(TensorType_UINT8);
SetBuiltinOp(BuiltinOperator_HASHTABLE_LOOKUP, BuiltinOptions_NONE, 0);
BuildInterpreter({lookup_shape, key_shape, value_shape});
}
void SetLookup(std::initializer_list<int> data) {
PopulateTensor<int>(lookup_, data);
}
void SetHashtableKey(std::initializer_list<int> data) {
PopulateTensor<int>(key_, data);
}
void SetHashtableValue(const std::vector<string>& content) {
PopulateStringTensor(value_, content);
}
void SetHashtableValue(const std::function<float(int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
for (int i = 0; i < rows; i++) {
GetTensorData<float>(tensor)[i] = function(i);
}
}
void SetHashtableValue(const std::function<float(int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
int features = tensor->dims->data[1];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < features; j++) {
GetTensorData<float>(tensor)[i * features + j] = function(i, j);
}
}
}
std::vector<string> GetStringOutput() {
TfLiteTensor* output = interpreter_->tensor(output_);
int num = GetStringCount(output);
std::vector<string> result(num);
for (int i = 0; i < num; i++) {
auto ref = GetString(output, i);
result[i] = string(ref.str, ref.len);
}
return result;
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<uint8_t> GetHit() { return ExtractVector<uint8_t>(hit_); }
private:
int lookup_;
int key_;
int value_;
int output_;
int hit_;
};
TEST(HashtableLookupOpTest, Test2DInput) {
HashtableLookupOpModel m({4}, {3}, {3, 2}, TensorType_FLOAT32);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue([](int i, int j) { return i + j / 10.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
2.0, 2.1,
0, 0,
0.0, 0.1,
1.0, 1.1,
})));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
TEST(HashtableLookupOpTest, Test1DInput) {
HashtableLookupOpModel m({4}, {3}, {3}, TensorType_FLOAT32);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue([](int i) { return i * i / 10.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
0.4,
0,
0.0,
0.1,
})));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
TEST(HashtableLookupOpTest, TestString) {
HashtableLookupOpModel m({4}, {3}, {3}, TensorType_STRING);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue({"Hello", "", "Hi"});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetStringOutput(), ElementsAreArray({
"Hi",
"",
"Hello",
"",
}));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/hashtable_lookup.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/hashtable_lookup_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff3afe7c-f15a-4cb4-a22b-73a9b0ec0931 | cpp | tensorflow/tensorflow | space_to_batch_nd | tensorflow/lite/kernels/space_to_batch_nd.cc | tensorflow/lite/kernels/space_to_batch_nd_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace space_to_batch_nd {
enum KernelType {
kReference,
kGenericOptimized,
};
struct SpaceToBatchNDContext {
SpaceToBatchNDContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
block_shape = GetInput(context, node, 1);
paddings = GetInput(context, node, 2);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
const TfLiteTensor* block_shape;
const TfLiteTensor* paddings;
TfLiteTensor* output;
};
const int kInputMinDimensionNum = 3;
const int kInputMaxDimensionNum = 4;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
SpaceToBatchNDContext* op_context) {
TfLiteIntArray* input_size = op_context->input->dims;
const int32* block_shape = GetTensorData<int32>(op_context->block_shape);
const int32* paddings_data = GetTensorData<int32>(op_context->paddings);
int spatial_dims_num = input_size->size - 2;
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), 1);
TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->paddings), 2);
TF_LITE_ENSURE_EQ(context, op_context->paddings->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, op_context->paddings->dims->data[1], 2);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input_size);
int output_batch_size = input_size->data[0];
for (int dim = 0; dim < spatial_dims_num; ++dim) {
int final_dim_size = (input_size->data[dim + 1] + paddings_data[dim * 2] +
paddings_data[dim * 2 + 1]);
TF_LITE_ENSURE(context, block_shape[dim] != 0);
TF_LITE_ENSURE_EQ(context, final_dim_size % block_shape[dim], 0);
output_size->data[dim + 1] = final_dim_size / block_shape[dim];
output_batch_size *= block_shape[dim];
}
output_size->data[0] = output_batch_size;
output_size->data[input_size->size - 1] =
input_size->data[input_size->size - 1];
return context->ResizeTensor(context, op_context->output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
SpaceToBatchNDContext op_context(context, node);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) >= kInputMinDimensionNum);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) <= kInputMaxDimensionNum);
TF_LITE_ENSURE_TYPES_EQ(context, op_context.input->type,
op_context.output->type);
if (op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
op_context.output->params.scale);
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
op_context.output->params.zero_point);
}
if (op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, op_context.output->params.zero_point, 0);
}
if (!IsConstantOrPersistentTensor(op_context.block_shape) ||
!IsConstantOrPersistentTensor(op_context.paddings)) {
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, &op_context);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
SpaceToBatchNDContext op_context(context, node);
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
#define TF_LITE_SPACE_TO_BATCH_ND(type, scalar, pad_value) \
tflite::SpaceToBatchParams op_params; \
op_params.output_offset = pad_value; \
type::SpaceToBatchND(op_params, GetTensorShape(op_context.input), \
GetTensorData<scalar>(op_context.input), \
GetTensorShape(op_context.block_shape), \
GetTensorData<int32_t>(op_context.block_shape), \
GetTensorShape(op_context.paddings), \
GetTensorData<int32_t>(op_context.paddings), \
GetTensorShape(op_context.output), \
GetTensorData<scalar>(op_context.output))
switch (op_context.input->type) {
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, float, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, float, 0);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, uint8_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, uint8_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int8_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int8_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt16:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int16_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int16_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int32_t, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int32_t, 0);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int64_t, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int64_t, 0);
}
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by SpaceToBatch.",
op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_SPACE_TO_BATCH_ND
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SPACE_TO_BATCH_ND_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, space_to_batch_nd::Prepare,
space_to_batch_nd::Eval<space_to_batch_nd::kReference>};
return &r;
}
TfLiteRegistration* Register_SPACE_TO_BATCH_ND_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, space_to_batch_nd::Prepare,
space_to_batch_nd::Eval<space_to_batch_nd::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_SPACE_TO_BATCH_ND() {
return Register_SPACE_TO_BATCH_ND_GENERIC_OPT();
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::Matcher;
class SpaceToBatchNDOpModel : public SingleOpModel {
public:
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
template <typename T>
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<T>(input_, data);
}
void SetBlockShape(std::initializer_list<int> data) {
PopulateTensor<int>(block_shape_, data);
}
void SetPaddings(std::initializer_list<int> data) {
PopulateTensor<int>(paddings_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
protected:
int input_;
int block_shape_;
int paddings_;
int output_;
};
class SpaceToBatchNDOpConstModel : public SpaceToBatchNDOpModel {
public:
SpaceToBatchNDOpConstModel(
const TensorData& input, std::initializer_list<int> block_shape,
std::initializer_list<int> paddings, const TensorData& output,
std::initializer_list<int> paddings_dims = {2, 2}) {
input_ = AddInput(input);
block_shape_ = AddConstInput(TensorType_INT32, block_shape,
{static_cast<int>(block_shape.size())});
paddings_ = AddConstInput(TensorType_INT32, paddings, paddings_dims);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
BuiltinOptions_SpaceToBatchNDOptions,
CreateSpaceToBatchNDOptions(builder_).Union());
BuildInterpreter({input.shape});
}
};
class SpaceToBatchNDOpDynamicModel : public SpaceToBatchNDOpModel {
public:
SpaceToBatchNDOpDynamicModel(
const TensorData& input, const TensorData& output,
std::initializer_list<int> block_shape_dims = {2},
std::initializer_list<int> paddings_dims = {2, 2}) {
input_ = AddInput(input);
block_shape_ = AddInput(TensorType_INT32);
paddings_ = AddInput(TensorType_INT32);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
BuiltinOptions_SpaceToBatchNDOptions,
CreateSpaceToBatchNDOptions(builder_).Union());
BuildInterpreter({input.shape, block_shape_dims, paddings_dims});
}
};
#if GTEST_HAS_DEATH_TEST
TEST(SpaceToBatchNDOpTest, InvalidShapeTest) {
EXPECT_DEATH(
SpaceToBatchNDOpConstModel({TensorType_FLOAT32, {1, 3, 3, 1}}, {2, 2},
{0, 0, 0, 0}, {TensorType_FLOAT32}),
"Cannot allocate tensors");
}
#endif
TEST(SpaceToBatchNDOpTest, SimpleConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 4, 1}}, {2, 2},
{0, 0, 0, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, SimpleDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2, 2});
m.SetPaddings({0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, MultipleInputBatchesConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {2, 2, 4, 1}}, {2, 2},
{0, 0, 0, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, MultipleInputBatchesDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2, 2});
m.SetPaddings({0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, SimplePaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 5, 2, 1}}, {3, 2},
{1, 0, 2, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
}));
}
TEST(SpaceToBatchNDOpTest, SimplePaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 5, 2, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 0, 2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
}));
}
TEST(SpaceToBatchNDOpTest, ComplexPaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 2, 1}}, {3, 2},
{1, 1, 2, 4}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
}));
}
TEST(SpaceToBatchNDOpTest, ComplexPaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 2, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 1, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
}));
}
template <typename integer_dtype = int8_t>
std::vector<Matcher<float>> DequantizedArrayNear(
const std::vector<float>& values, const float min, const float max) {
const float quantization_tolerance =
(max - min) / (std::numeric_limits<integer_dtype>::max() -
std::numeric_limits<integer_dtype>::min());
return ArrayFloatNear(values, quantization_tolerance);
}
#if GTEST_HAS_DEATH_TEST
TEST(QuantizedSpaceToBatchNDOpTest, ZeroNotInQuantizationRange) {
EXPECT_DEATH(SpaceToBatchNDOpConstModel m(
{TensorType_UINT8, {1, 2, 2, 1}, 1.0, 2.0}, {4, 2},
{0, 0, 1, 1, 1, 1, 0, 0}, {TensorType_UINT8, {}, 1.0, 2.0}),
".*Check failed: f_min <= 0.*");
}
#endif
template <typename integer_dtype>
void SimplePaddingConstTestQuant() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
SpaceToBatchNDOpConstModel m(
{GetTensorType<integer_dtype>(), {1, 5, 2, 1}, 1.0f * kMin, 1.0f * kMax},
{3, 2}, {1, 0, 2, 0},
{GetTensorType<integer_dtype>(), {}, 1.0f * kMin, 1.0f * kMax});
m.SetQuantizedInput<integer_dtype>(
{-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8, -0.9, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(DequantizedArrayNear<integer_dtype>(
{0, 0, 0, -0.5, 0, 0, 0, 0.6, 0, -0.1, 0, -0.7,
0, 0.2, 0, 0.8, 0, -0.3, 0, -0.9, 0, 0.4, 0, 0.1},
-1.0, 1.0)));
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingConstTestUint8) {
SimplePaddingConstTestQuant<uint8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingConstTestInt8) {
SimplePaddingConstTestQuant<int8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingConstTestInt16) {
SimplePaddingConstTestQuant<int16_t>();
}
template <typename integer_dtype>
void SimplePaddingDynamicTestQuant() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
SpaceToBatchNDOpDynamicModel m(
{GetTensorType<integer_dtype>(), {1, 5, 2, 1}, 1.0f * kMin, 1.0f * kMax},
{GetTensorType<integer_dtype>(), {}, 1.0f * kMin, 1.0f * kMax});
m.SetQuantizedInput<integer_dtype>(
{-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8, -0.9, 0.1});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 0, 2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(DequantizedArrayNear<integer_dtype>(
{0, 0, 0, -0.5, 0, 0, 0, 0.6, 0, -0.1, 0, -0.7,
0, 0.2, 0, 0.8, 0, -0.3, 0, -0.9, 0, 0.4, 0, 0.1},
-1.0, 1.0)));
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingDynamicTestUint8) {
SimplePaddingDynamicTestQuant<uint8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingDynamicTestInt8) {
SimplePaddingDynamicTestQuant<int8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingDynamicTestInt16) {
SimplePaddingDynamicTestQuant<int16_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, ComplexPaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_UINT8, {1, 4, 2, 1}, -1.0, 1.0},
{3, 2}, {1, 1, 2, 4},
{TensorType_UINT8, {}, -1.0, 1.0});
m.SetQuantizedInput<uint8_t>({-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(DequantizedArrayNear(
{
0, 0, 0, 0, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.6, 0, 0,
0, -0.1, 0, 0, 0, -0.7, 0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0,
0, -0.3, 0, 0, 0, 0, 0, 0, 0, 0.4, 0, 0, 0, 0, 0, 0,
},
-1.0, 1.0)));
}
TEST(QuantizedSpaceToBatchNDOpTest, ComplexPaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_UINT8, {1, 4, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {}, -1.0, 1.0});
m.SetQuantizedInput<uint8_t>({-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 1, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(DequantizedArrayNear(
{
0, 0, 0, 0, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.6, 0, 0,
0, -0.1, 0, 0, 0, -0.7, 0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0,
0, -0.3, 0, 0, 0, 0, 0, 0, 0, 0.4, 0, 0, 0, 0, 0, 0,
},
-1.0, 1.0)));
}
TEST(SpaceToBatchNDOpTest, Simple3DConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 4}}, {2}, {0, 0},
{TensorType_FLOAT32}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 9, 10, 11, 12, 5, 6,
7, 8, 13, 14, 15, 16}));
}
TEST(SpaceToBatchNDOpTest, Simple3DPaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 4}}, {2}, {2, 2},
{TensorType_FLOAT32}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 4, 4}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 1, 2, 3, 4, 9, 10, 11, 12, 0, 0, 0, 0,
0, 0, 0, 0, 5, 6, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0}));
}
TEST(SpaceToBatchNDOpTest, Simple3DDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 4}},
{TensorType_FLOAT32}, {1}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2});
m.SetPaddings({0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 9, 10, 11, 12, 5, 6,
7, 8, 13, 14, 15, 16}));
}
TEST(SpaceToBatchNDOpTest, Simple3DPaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 4}},
{TensorType_FLOAT32}, {1}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2});
m.SetPaddings({2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 4, 4}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 1, 2, 3, 4, 9, 10, 11, 12, 0, 0, 0, 0,
0, 0, 0, 0, 5, 6, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/space_to_batch_nd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/space_to_batch_nd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f892173-9e18-4bdc-8395-880eeb4ffd46 | cpp | tensorflow/tensorflow | stablehlo_shift_left | tensorflow/lite/kernels/stablehlo_shift_left.cc | tensorflow/lite/kernels/stablehlo_shift_left_test.cc | #include <cstdint>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_shift_left {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
template <typename DataType>
TfLiteStatus EvalImpl(const TfLiteTensor* operand1,
const TfLiteTensor* operand2, TfLiteTensor* result) {
const int num_elements = NumElements(result);
const DataType* input1 = GetTensorData<DataType>(operand1);
const DataType* input2 = GetTensorData<DataType>(operand2);
DataType* output = GetTensorData<DataType>(result);
for (int i = 0; i < num_elements; ++i) {
output[i] = input1[i] << input2[i];
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input1->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input1->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteType data_type = input1->type;
if (data_type == kTfLiteInt8) {
return EvalImpl<int8_t>(input1, input2, output);
} else if (data_type == kTfLiteInt16) {
return EvalImpl<int16_t>(input1, input2, output);
} else if (data_type == kTfLiteInt32) {
return EvalImpl<int32_t>(input1, input2, output);
} else {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(data_type));
return kTfLiteError;
}
}
}
}
TfLiteRegistration* Register_STABLEHLO_SHIFT_LEFT() {
static TfLiteRegistration r = {nullptr, nullptr,
stablehlo_shift_left::Prepare,
stablehlo_shift_left::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using testing::ElementsAreArray;
class ShiftLeftOpModel : public SingleOpModel {
public:
ShiftLeftOpModel(const TensorData& input1, const TensorData& input2) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(TensorData(input1.type, GetShape(input1_)));
SetBuiltinOp(BuiltinOperator_STABLEHLO_SHIFT_LEFT, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int input1_;
int input2_;
int output_;
};
TEST(ShiftLeftOpTest, ShiftLeftInt32) {
ShiftLeftOpModel model({TensorType_INT32, {3}}, {TensorType_INT32, {3}});
model.PopulateTensor<int32_t>(model.input1(), {-1, 0, 1});
model.PopulateTensor<int32_t>(model.input2(), {1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({-2, 0, 8}));
}
TEST(ShiftLeftOpTest, ShiftLeftInt16) {
ShiftLeftOpModel model({TensorType_INT16, {2, 2}},
{TensorType_INT16, {2, 2}});
model.PopulateTensor<int16_t>(model.input1(), {-5, -5, 0, 6});
model.PopulateTensor<int16_t>(model.input2(), {0, 2, 0, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int16_t>(), ElementsAreArray({-5, -20, 0, 24}));
}
TEST(ShiftLeftOpTest, ShiftLeftInt8) {
ShiftLeftOpModel model({TensorType_INT8, {2, 2}}, {TensorType_INT8, {2, 2}});
model.PopulateTensor<int8_t>(model.input1(), {2, -2, -2, -4});
model.PopulateTensor<int8_t>(model.input2(), {0, 1, 0, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int8_t>(), ElementsAreArray({2, -4, -2, -128}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_shift_left.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_shift_left_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
74347d5a-f256-4f23-8442-5f6ae96ccda2 | cpp | tensorflow/tensorflow | rank | tensorflow/lite/kernels/rank.cc | tensorflow/lite/kernels/rank_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace rank {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = kTfLiteInt32;
SetTensorToPersistentRo(output);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(0);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size));
TF_LITE_ENSURE_EQ(context, NumDimensions(output), 0);
if (output->type == kTfLiteInt32) {
int32_t* output_data = GetTensorData<int32_t>(output);
*output_data = NumDimensions(input);
} else {
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RANK() {
static TfLiteRegistration r = {nullptr, nullptr, rank::Prepare, rank::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class RankOpModel : public SingleOpModel {
public:
RankOpModel(std::initializer_list<int> input_shape, TensorType input_type) {
TensorType output_type = TensorType_INT32;
input_ = AddInput(input_type);
output_ = AddOutput(output_type);
SetBuiltinOp(BuiltinOperator_RANK, BuiltinOptions_RankOptions,
CreateRankOptions(builder_).Union());
BuildInterpreter({input_shape});
}
TfLiteStatus InvokeWithResult() { return interpreter_->Invoke(); }
int input() { return input_; }
std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
TfLiteAllocationType GetOutputAllocationType() const {
return interpreter_->tensor(interpreter_->outputs()[0])->allocation_type;
}
private:
int input_;
int output_;
};
TEST(RankOpTest, InputTypeFloat) {
RankOpModel model({1, 3, 1, 3, 5}, TensorType_FLOAT32);
ASSERT_EQ(model.GetOutputAllocationType(), kTfLitePersistentRo);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_TRUE(model.GetOutputShape().empty());
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_TRUE(model.GetOutputShape().empty());
}
TEST(RankOpTest, InputTypeInt) {
RankOpModel model({1, 3, 1, 3, 5}, TensorType_INT32);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_TRUE(model.GetOutputShape().empty());
}
TEST(RankOpTest, ScalarTensor) {
RankOpModel model({}, TensorType_FLOAT32);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0}));
EXPECT_TRUE(model.GetOutputShape().empty());
}
TEST(RankOpTest, EmptyTensor) {
RankOpModel model({1, 0}, TensorType_FLOAT32);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({2}));
EXPECT_TRUE(model.GetOutputShape().empty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rank.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rank_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f292d6b-3fad-4bd3-9908-3933e5739475 | cpp | tensorflow/tensorflow | eigen_support | tensorflow/lite/kernels/eigen_support.cc | tensorflow/lite/kernels/eigen_support_test.cc | #include "tensorflow/lite/kernels/eigen_support.h"
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
#include "tensorflow/lite/kernels/op_macros.h"
#ifndef EIGEN_DONT_ALIGN
#include "tensorflow/lite/util.h"
#endif
namespace tflite {
namespace eigen_support {
namespace {
const int kDefaultNumThreadpoolThreads = 4;
bool IsValidNumThreads(int num_threads) { return num_threads >= -1; }
int GetNumThreads(int num_threads) {
return num_threads > -1 ? num_threads : kDefaultNumThreadpoolThreads;
}
#ifndef EIGEN_DONT_ALIGN
static_assert(
kDefaultTensorAlignment % EIGEN_MAX_ALIGN_BYTES == 0,
"kDefaultTensorAlignment doesn't comply with Eigen alignment requirement.");
#endif
void SetEigenNbThreads(int threads) {
#if defined(EIGEN_HAS_OPENMP)
Eigen::setNbThreads(threads);
#endif
}
class EigenThreadPoolWrapper : public Eigen::ThreadPoolInterface {
public:
explicit EigenThreadPoolWrapper(int num_threads) {
if (num_threads > 1) {
pool_ = std::make_unique<Eigen::ThreadPool>(num_threads);
}
}
~EigenThreadPoolWrapper() override {}
void Schedule(std::function<void()> fn) override {
if (pool_) {
pool_->Schedule(std::move(fn));
} else {
fn();
}
}
int NumThreads() const override { return pool_ ? pool_->NumThreads() : 1; }
int CurrentThreadId() const override {
return pool_ ? pool_->CurrentThreadId() : 0;
}
private:
std::unique_ptr<Eigen::ThreadPool> pool_;
};
class LazyEigenThreadPoolHolder {
public:
explicit LazyEigenThreadPoolHolder(int num_threads) {
SetNumThreads(num_threads);
}
const Eigen::ThreadPoolDevice* GetThreadPoolDevice() {
if (!device_) {
thread_pool_wrapper_ =
std::make_unique<EigenThreadPoolWrapper>(target_num_threads_);
device_ = std::make_unique<Eigen::ThreadPoolDevice>(
thread_pool_wrapper_.get(), target_num_threads_);
}
return device_.get();
}
void SetNumThreads(int num_threads) {
const int target_num_threads = GetNumThreads(num_threads);
if (target_num_threads_ != target_num_threads) {
target_num_threads_ = target_num_threads;
device_.reset();
thread_pool_wrapper_.reset();
}
}
private:
int target_num_threads_ = kDefaultNumThreadpoolThreads;
std::unique_ptr<Eigen::ThreadPoolDevice> device_;
std::unique_ptr<Eigen::ThreadPoolInterface> thread_pool_wrapper_;
};
struct RefCountedEigenContext : public TfLiteExternalContext {
std::unique_ptr<LazyEigenThreadPoolHolder> thread_pool_holder;
int num_references = 0;
};
RefCountedEigenContext* GetEigenContext(TfLiteContext* context) {
return reinterpret_cast<RefCountedEigenContext*>(
context->GetExternalContext(context, kTfLiteEigenContext));
}
TfLiteStatus Refresh(TfLiteContext* context) {
if (IsValidNumThreads(context->recommended_num_threads)) {
SetEigenNbThreads(GetNumThreads(context->recommended_num_threads));
}
auto* ptr = GetEigenContext(context);
if (ptr != nullptr) {
ptr->thread_pool_holder->SetNumThreads(context->recommended_num_threads);
}
return kTfLiteOk;
}
}
void IncrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
if (IsValidNumThreads(context->recommended_num_threads)) {
SetEigenNbThreads(context->recommended_num_threads);
}
ptr = new RefCountedEigenContext;
ptr->type = kTfLiteEigenContext;
ptr->Refresh = Refresh;
ptr->thread_pool_holder = std::make_unique<LazyEigenThreadPoolHolder>(
context->recommended_num_threads);
ptr->num_references = 0;
context->SetExternalContext(context, kTfLiteEigenContext, ptr);
}
ptr->num_references++;
}
void DecrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to DecrementUsageCounter() not preceded by "
"IncrementUsageCounter()");
}
if (--ptr->num_references == 0) {
delete ptr;
context->SetExternalContext(context, kTfLiteEigenContext, nullptr);
}
}
const Eigen::ThreadPoolDevice* GetThreadPoolDevice(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to GetFromContext() not preceded by IncrementUsageCounter()");
}
return ptr->thread_pool_holder->GetThreadPoolDevice();
}
}
} | #include "tensorflow/lite/kernels/eigen_support.h"
#include <utility>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
namespace tflite {
namespace eigen_support {
struct TestTfLiteContext : public TfLiteContext {
TestTfLiteContext() {
recommended_num_threads = -1;
external_context = nullptr;
GetExternalContext = GetExternalContextImpl;
SetExternalContext = SetExternalContextImpl;
}
static void SetExternalContextImpl(TfLiteContext* context,
TfLiteExternalContextType type,
TfLiteExternalContext* external_context) {
static_cast<TestTfLiteContext*>(context)->external_context =
external_context;
}
static TfLiteExternalContext* GetExternalContextImpl(
TfLiteContext* context, TfLiteExternalContextType type) {
return static_cast<TestTfLiteContext*>(context)->external_context;
}
TfLiteExternalContext* external_context;
};
TEST(EigenSupport, Default) {
TestTfLiteContext context;
IncrementUsageCounter(&context);
ASSERT_NE(context.external_context, nullptr);
EXPECT_EQ(context.external_context->type, kTfLiteEigenContext);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, SingleThreaded) {
TestTfLiteContext context;
context.recommended_num_threads = 1;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 1);
EXPECT_EQ(thread_pool_device->numThreadsInPool(), 1);
bool executed = false;
auto notification =
thread_pool_device->enqueue([&executed]() { executed = true; });
ASSERT_NE(notification, nullptr);
notification->Wait();
delete notification;
EXPECT_TRUE(executed);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, MultiThreaded) {
TestTfLiteContext context;
context.recommended_num_threads = 2;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 2);
bool executed = false;
auto notification =
thread_pool_device->enqueue([&executed]() { executed = true; });
ASSERT_NE(notification, nullptr);
notification->Wait();
delete notification;
EXPECT_TRUE(executed);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, NumThreadsChanged) {
TestTfLiteContext context;
context.recommended_num_threads = 1;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 1);
context.recommended_num_threads = 3;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 3);
context.recommended_num_threads = -1;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
context.recommended_num_threads = 0;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 0);
context.recommended_num_threads = 3;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 3);
context.recommended_num_threads = -5;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, RefCounting) {
TestTfLiteContext context;
EXPECT_EQ(context.external_context, nullptr);
IncrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
IncrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
DecrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
DecrementUsageCounter(&context);
EXPECT_EQ(context.external_context, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/eigen_support.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/eigen_support_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
98be112c-907d-41a9-8977-35bd21348749 | cpp | tensorflow/tensorflow | local_response_norm | tensorflow/lite/kernels/local_response_norm.cc | tensorflow/lite/kernels/local_response_norm_test.cc | #include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace local_response_norm {
enum KernelType {
kReference,
kGenericOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = input->dims->data[1];
output_size->data[2] = input->dims->data[2];
output_size->data[3] = input->dims->data[3];
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLocalResponseNormParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32) {
#define TF_LITE_LOCAL_RESPONSE_NORM(type) \
tflite::LocalResponseNormalizationParams op_params; \
op_params.range = params->radius; \
op_params.bias = params->bias; \
op_params.alpha = params->alpha; \
op_params.beta = params->beta; \
type::LocalResponseNormalization( \
op_params, GetTensorShape(input), GetTensorData<float>(input), \
GetTensorShape(output), GetTensorData<float>(output))
if (kernel_type == kReference) {
TF_LITE_LOCAL_RESPONSE_NORM(reference_ops);
}
if (kernel_type == kGenericOptimized) {
TF_LITE_LOCAL_RESPONSE_NORM(optimized_ops);
}
#undef TF_LITE_LOCAL_RESPONSE_NORM
} else {
TF_LITE_KERNEL_LOG(context, "Output type is %d, requires float.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LOCAL_RESPONSE_NORM_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, local_response_norm::Prepare,
local_response_norm::Eval<local_response_norm::kReference>};
return &r;
}
TfLiteRegistration* Register_LOCAL_RESPONSE_NORM_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, local_response_norm::Prepare,
local_response_norm::Eval<local_response_norm::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_LOCAL_RESPONSE_NORMALIZATION() {
return Register_LOCAL_RESPONSE_NORM_GENERIC_OPT();
}
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class LocalResponseNormOpModel : public SingleOpModel {
public:
LocalResponseNormOpModel(std::initializer_list<int> input_shape, int radius,
float bias, float alpha, float beta) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
BuiltinOptions_LocalResponseNormalizationOptions,
CreateLocalResponseNormalizationOptions(builder_, radius, bias,
alpha, beta)
.Union());
BuildInterpreter({input_shape});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
TEST(LocalResponseNormOpTest, SameAsL2Norm) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 0.0,
1.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05})));
}
TEST(LocalResponseNormOpTest, WithAlpha) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 0.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{-0.275, 0.15, 0.175, 0.3, -0.175, 0.025})));
}
TEST(LocalResponseNormOpTest, WithBias) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 9.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.22, 0.12, 0.14, 0.24, -0.14, 0.02})));
}
TEST(LocalResponseNormOpTest, SmallRadius) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 2, 9.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-0.264926, 0.125109, 0.140112, 0.267261, -0.161788, 0.0244266})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/local_response_norm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/local_response_norm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
87f3adea-01c3-4b4c-b1e3-985f1764853d | cpp | tensorflow/tensorflow | conv3d | tensorflow/lite/kernels/conv3d.cc | tensorflow/lite/kernels/conv3d_test.cc | #include "tensorflow/lite/kernels/internal/reference/conv3d.h"
#include <cstddef>
#include <cstdint>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace conv3d {
enum KernelType {
kReference,
kGenericOptimized,
};
const int kTensorNotAllocated = -1;
static constexpr size_t kMaxIm2colBufferSizeMobile = 1024 * 1024 * 1024;
struct OpData {
Padding3DValues padding;
int im2col_tensor_id = kTensorNotAllocated;
bool need_im2col = false;
bool im2col_oversized = false;
int32_t im2col_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* opdata = new OpData;
return opdata;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus AllocateTemporaryTensorsIfRequired(
KernelType kernel_type, TfLiteContext* context, TfLiteNode* node,
OpData* opdata, TfLiteConv3DParams* params, const TfLiteTensor* filter,
size_t im2col_bytes) {
int temporaries_count = 0;
const bool need_dilated_im2col = params->dilation_width_factor != 1 ||
params->dilation_height_factor != 1 ||
params->dilation_depth_factor != 1;
const bool need_non_dilated_im2col =
params->stride_depth != 1 || params->stride_width != 1 ||
params->stride_height != 1 || filter->dims->data[2] != 1 ||
filter->dims->data[1] != 1 || filter->dims->data[0] != 1;
opdata->need_im2col = (kernel_type == kGenericOptimized) &&
(need_dilated_im2col || need_non_dilated_im2col);
if (IsMobilePlatform() && opdata->need_im2col &&
im2col_bytes >= kMaxIm2colBufferSizeMobile) {
opdata->need_im2col = false;
opdata->im2col_oversized = true;
}
if (opdata->need_im2col) {
if (opdata->im2col_tensor_id == kTensorNotAllocated) {
TF_LITE_ENSURE_OK(
context, context->AddTensors(context, 1, &opdata->im2col_tensor_id));
}
opdata->im2col_index = temporaries_count++;
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(temporaries_count);
return kTfLiteOk;
}
TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params = static_cast<TfLiteConv3DParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, node->inputs->size == 2 || node->inputs->size == 3);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
TF_LITE_ENSURE_EQ(context, input->dims->size, 5);
TF_LITE_ENSURE_EQ(context, filter->dims->size, 5);
TF_LITE_ENSURE_EQ(context, input->dims->data[4], filter->dims->data[3]);
TfLiteType input_type = input->type;
TF_LITE_ENSURE_TYPES_EQ(context, input_type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, filter->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_type);
const TfLiteTensor* bias = GetInput(context, node, 2);
if (bias) {
TF_LITE_ENSURE_TYPES_EQ(context, bias->type, input_type);
TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 4));
}
int batches = input->dims->data[0];
int channels_out = filter->dims->data[4];
int depth = input->dims->data[1];
int height = input->dims->data[2];
int width = input->dims->data[3];
int filter_depth = filter->dims->data[0];
int filter_height = filter->dims->data[1];
int filter_width = filter->dims->data[2];
int input_channel = filter->dims->data[3];
int out_width, out_height, out_depth;
opdata->padding = ComputePadding3DValues(
params->stride_height, params->stride_width, params->stride_depth,
params->dilation_height_factor, params->dilation_width_factor,
params->dilation_depth_factor, height, width, depth, filter_height,
filter_width, filter_depth, params->padding, &out_height, &out_width,
&out_depth);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(5);
output_size->data[0] = batches;
output_size->data[1] = out_depth;
output_size->data[2] = out_height;
output_size->data[3] = out_width;
output_size->data[4] = channels_out;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size));
size_t input_type_size;
TF_LITE_ENSURE_STATUS(GetSizeOfType(context, input->type, &input_type_size));
const size_t im2col_bytes = batches * out_depth * out_height * out_width *
input_channel * filter_depth * filter_height *
filter_width * input_type_size;
TF_LITE_ENSURE_OK(context, AllocateTemporaryTensorsIfRequired(
kernel_type, context, node, opdata, params,
filter, im2col_bytes));
if (opdata->need_im2col) {
TfLiteIntArray* im2col_size = TfLiteIntArrayCreate(5);
im2col_size->data[0] = output_size->data[0];
im2col_size->data[1] = output_size->data[1];
im2col_size->data[2] = output_size->data[2];
im2col_size->data[3] = output_size->data[3];
im2col_size->data[4] =
input_channel * filter_depth * filter_height * filter_width;
TfLiteTensor* im2col;
node->temporaries->data[opdata->im2col_index] = opdata->im2col_tensor_id;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node,
opdata->im2col_index, &im2col));
im2col->type = input->type;
im2col->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, im2col, im2col_size));
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
return Prepare(kernel_type, context, node);
}
TfLiteStatus EvalFloat(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node, TfLiteConv3DParams* params,
OpData* opdata, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* im2col, TfLiteTensor* output) {
float output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
Conv3DParams runtime_params;
runtime_params.padding_values = opdata->padding;
runtime_params.stride_depth = params->stride_depth;
runtime_params.stride_height = params->stride_height;
runtime_params.stride_width = params->stride_width;
runtime_params.dilation_depth = params->dilation_depth_factor;
runtime_params.dilation_height = params->dilation_height_factor;
runtime_params.dilation_width = params->dilation_width_factor;
runtime_params.float_activation_min = output_activation_min;
runtime_params.float_activation_max = output_activation_max;
switch (kernel_type) {
case kReference: {
reference_ops::Conv3D(runtime_params, GetTensorShape(input),
GetTensorData<float>(input), GetTensorShape(filter),
GetTensorData<float>(filter), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
case kGenericOptimized: {
return optimized_ops::Conv3D(
runtime_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output),
GetTensorShape(im2col), GetTensorData<float>(im2col),
CpuBackendContext::GetFromContext(context));
}
}
}
TfLiteStatus Eval(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteConv3DParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
const TfLiteTensor* bias = GetInput(context, node, 2);
TfLiteTensor* im2col = opdata->need_im2col
? &context->tensors[opdata->im2col_tensor_id]
: nullptr;
if (opdata->im2col_oversized) {
kernel_type = kReference;
}
switch (input->type) {
case kTfLiteFloat32:
return EvalFloat(kernel_type, context, node, params, opdata, input,
filter, bias, im2col, output);
default:
TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return Eval(kernel_type, context, node);
}
}
TfLiteRegistration* Register_CONV_3D_REF() {
static TfLiteRegistration r = {conv3d::Init, conv3d::Free,
conv3d::Prepare<conv3d::kReference>,
conv3d::Eval<conv3d::kReference>};
return &r;
}
TfLiteRegistration* Register_CONV_3D_GENERIC_OPT() {
static TfLiteRegistration r = {conv3d::Init, conv3d::Free,
conv3d::Prepare<conv3d::kGenericOptimized>,
conv3d::Eval<conv3d::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_CONV_3D() {
return Register_CONV_3D_GENERIC_OPT();
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class Conv3dOpModel : public SingleOpModel {
public:
Conv3dOpModel(const TensorData& input, const TensorData& filter,
const TensorData& bias, const TensorData& output,
Padding padding = Padding_VALID, int32_t stride_depth = 1,
int32_t stride_width = 1, int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
input_ = AddInput(input);
filter_ = AddInput(filter);
bias_ = AddInput(bias);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
Conv3dOpModel(const TensorData& input, const TensorData& filter,
const TensorData& output, Padding padding = Padding_VALID,
int32_t stride_depth = 1, int32_t stride_width = 1,
int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
input_ = AddInput(input);
filter_ = AddInput(filter);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter({GetShape(input_), GetShape(filter_)});
}
void SetFilter(std::vector<float> f) { PopulateTensor(filter_, f); }
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetInput(std::vector<float> data) { PopulateTensor(input_, data); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int filter_;
int bias_;
int output_;
};
template <typename T>
std::vector<T> CreateRangeVector(int N) {
std::vector<T> result;
for (int i = 0; i < N; ++i) result.push_back(i);
return result;
}
TEST(Conv3dOpModel, InvalidInputDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}}),
"input->dims->size != 5");
}
TEST(Conv3dOpModel, InvalidFilterDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}}),
"filter->dims->size != 5");
}
TEST(Conv3dOpModel, MismatchChannelSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 4, 1}},
{TensorType_FLOAT32, {1, 3, 2, 2, 2}},
{TensorType_FLOAT32, {}}),
"input->dims->data.4. != filter->dims->data.3.");
}
TEST(Conv3dOpModel, MismatchBiasSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 4, 2}},
{TensorType_FLOAT32, {1, 3, 2, 2, 1}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {}}),
"NumElements.bias. != SizeOfDimension.filter, 4.");
}
TEST(Conv3dOpModel, SimpleFloat32Test) {
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}});
m.SetInput(CreateRangeVector<float>(32));
m.SetFilter({-1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1,
1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 1, 1, 3, 2));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({30, 6, 26, 10, 22, 14}));
}
TEST(Conv3dOpModel, PaddingValidTest) {
Conv3dOpModel m({TensorType_FLOAT32, {1, 3, 4, 5, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}});
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1,
1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 2, 3, 4, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({-214, 266, -234, 270, -254, 274, -274, 278, -314, 286,
-334, 290, -354, 294, -374, 298, -414, 306, -434, 310,
-454, 314, -474, 318, -614, 346, -634, 350, -654, 354,
-674, 358, -714, 366, -734, 370, -754, 374, -774, 378,
-814, 386, -834, 390, -854, 394, -874, 398}));
}
TEST(Conv3dOpModel, PaddingSameTest) {
Conv3dOpModel m({TensorType_FLOAT32, {1, 3, 4, 5, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}}, Padding_SAME);
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
-1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 4, 5, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-172, 290, -176, 298, -180, 306, -184, 314, 36, 198, -192,
330, -196, 338, -200, 346, -204, 354, 56, 218, -212, 370,
-216, 378, -220, 386, -224, 394, 76, 238, -226, 82, -230,
82, -234, 82, -238, 82, -80, 80, -252, 450, -256, 458,
-260, 466, -264, 474, 116, 278, -272, 490, -276, 498, -280,
506, -284, 514, 136, 298, -292, 530, -296, 538, -300, 546,
-304, 554, 156, 318, -306, 82, -310, 82, -314, 82, -318,
82, -80, 80, 158, -158, 162, -162, 166, -166, 170, -170,
176, -176, 178, -178, 182, -182, 186, -186, 190, -190, 196,
-196, 198, -198, 202, -202, 206, -206, 210, -210, 216, -216,
220, -220, 224, -224, 228, -228, 232, -232, 237, -237}));
}
TEST(Conv3dOpModel, StrideTest) {
Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 3, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}}, Padding_VALID, 2,
2, 2);
m.SetInput(CreateRangeVector<float>(96));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 1, 1, 2, 2));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({52, 8, 68, 8, 244, 8, 260, 8}));
}
TEST(Conv3dOpModel, StrideAndPaddingSameTest) {
Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 3, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}}, Padding_SAME, 2,
2, 2);
m.SetInput(CreateRangeVector<float>(96));
m.SetFilter({-1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1,
1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 1, 2, 2, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-70, -28, -86, -12, -82, -16, -90, -8, -262,
164, -278, 180, -178, 80, -186, 88}));
}
TEST(Conv3dOpModel, DilationTest) {
Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 3, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}}, Padding_VALID, 1,
1, 1,
ActivationFunctionType_NONE,
1, 1,
2);
m.SetInput(CreateRangeVector<float>(96));
m.SetFilter(CreateRangeVector<float>(32));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 1, 1, 3, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({7248, 7592, 7728, 8104, 8208, 8616, 18768,
19880, 19248, 20392, 19728, 20904}));
}
TEST(Conv3dOpModel, BiasTest) {
Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 3, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {}},
Padding_VALID, 2,
2, 2);
m.SetInput(CreateRangeVector<float>(96));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 1, 1, 2, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({53, 10, 69, 10, 245, 10, 261, 10}));
}
TEST(Conv3dOpModel, NoIm2ColTensorTest) {
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 2, 4}},
{TensorType_FLOAT32, {1, 1, 1, 4, 4}},
{TensorType_FLOAT32, {}}, Padding_VALID);
m.SetInput(CreateRangeVector<float>(32));
m.SetFilter(CreateRangeVector<float>(16));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 2, 2, 2, 4));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({56, 62, 68, 74, 152, 174, 196, 218, 248, 286, 324,
362, 344, 398, 452, 506, 440, 510, 580, 650, 536, 622,
708, 794, 632, 734, 836, 938, 728, 846, 964, 1082}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/conv3d.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/conv3d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d0f6cf8-8b40-47ad-a7a3-46a15620d8fc | cpp | tensorflow/tensorflow | round | tensorflow/lite/kernels/round.cc | tensorflow/lite/delegates/xnnpack/round_test.cc | #include "tensorflow/lite/kernels/internal/reference/round.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace round {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
output->type = input->type;
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
optimized_ops::Round(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
}
}
TfLiteRegistration* Register_ROUND() {
static TfLiteRegistration r = {nullptr,
nullptr, round::Prepare, round::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Round, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ROUND, xnnpack_delegate.get());
}
TEST(Round, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_ROUND, xnnpack_delegate.get());
}
TEST(Round, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_ROUND, xnnpack_delegate.get());
}
TEST(Round, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_ROUND,
xnnpack_delegate.get());
}
TEST(Round, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ROUND, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/round.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/round_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
55722f3a-0c73-43b4-9bf2-18020e210eee | cpp | tensorflow/tensorflow | strided_slice | tensorflow/lite/delegates/gpu/common/tasks/strided_slice.cc | tensorflow/lite/delegates/xnnpack/strided_slice_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/strided_slice.h"
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
bool Is4Aligned(const SliceAttributes& attr) {
return attr.strides.c == 1 && attr.starts.c % 4 == 0;
}
int4 GetOffset(const SliceAttributes& attr, int src_width, int src_height,
int src_channels, int src_batch) {
int4 offset;
if (attr.strides.w > 0) {
offset.x = attr.starts.w;
} else {
if (attr.ends.w > 0) {
offset.x = attr.ends.w;
} else {
offset.x = src_width + attr.ends.w;
}
}
if (attr.strides.h > 0) {
offset.y = attr.starts.h;
} else {
if (attr.ends.h > 0) {
offset.y = attr.ends.h;
} else {
offset.y = src_height + attr.ends.h;
}
}
if (attr.strides.c > 0) {
offset.z = attr.starts.c;
} else {
if (attr.ends.c > 0) {
offset.z = attr.ends.c;
} else {
offset.z = src_channels + attr.ends.c;
}
}
if (Is4Aligned(attr)) {
offset.z /= 4;
}
if (attr.strides.b > 0) {
offset.w = attr.starts.b;
} else {
if (attr.ends.b > 0) {
offset.w = attr.ends.b;
} else {
offset.w = src_batch + attr.ends.b;
}
}
return offset;
}
}
StridedSlice::StridedSlice(const OperationDef& definition,
const SliceAttributes& attr)
: GPUOperation(definition), attributes_(attr) {
work_group_size_ = int3(8, 4, 1);
code_ = GetStridedSliceCode(definition_, Is4Aligned(attributes_));
}
StridedSlice::StridedSlice(StridedSlice&& operation)
: GPUOperation(std::move(operation)), attributes_(operation.attributes_) {}
StridedSlice& StridedSlice::operator=(StridedSlice&& operation) {
if (this != &operation) {
attributes_ = operation.attributes_;
GPUOperation::operator=(std::move(operation));
}
return *this;
}
std::string StridedSlice::GetStridedSliceCode(const OperationDef& op_def,
bool alignedx4) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
args_.AddInt("offset_x");
args_.AddInt("offset_y");
args_.AddInt("offset_z");
args_.AddInt("offset_b");
args_.AddInt("stride_x");
args_.AddInt("stride_y");
args_.AddInt("stride_z");
args_.AddInt("stride_b");
const std::string batch_id =
op_def.dst_tensors[0].HasAxis(Axis::BATCH) ? "B" : "0";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int S = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"S >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " int s_x = X * args.stride_x + args.offset_x;\n";
c += " int s_y = Y * args.stride_y + args.offset_y;\n";
if (op_def.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int s_b = " + batch_id + " * args.stride_b + args.offset_b;\n";
c += " args.src_tensor.SetBatchRef(s_b);\n";
}
if (alignedx4) {
c += " int s_z = S + args.offset_z;\n";
c += " args.src_tensor::type result = args.src_tensor.Read(s_x, s_y, "
"s_z);\n";
} else {
c += " args.src_tensor::type result;\n";
const std::string postfixes[] = {"x", "y", "z", "w"};
for (int i = 0; i < 4; ++i) {
c += " {\n";
const std::string channel = "(S * 4 + " + std::to_string(i) + ")";
c += " int s_ch = min(" + channel +
" * args.stride_z + args.offset_z, args.src_tensor.Channels() - "
"1);\n";
c += " args.src_tensor.ReadPerChannel(result." + postfixes[i] +
", s_x, s_y, s_ch);\n";
c += " }\n";
}
}
c += " args.dst_tensor.Write(result, X, Y, S);\n";
c += "}\n";
return c;
}
absl::Status StridedSlice::BindArguments(ArgumentsBinder* args) {
int4 offset = GetOffset(attributes_, src_[0]->Width(), src_[0]->Height(),
src_[0]->Channels(), src_[0]->Batch());
RETURN_IF_ERROR(args->SetInt("offset_x", offset.x));
RETURN_IF_ERROR(args->SetInt("offset_y", offset.y));
RETURN_IF_ERROR(args->SetInt("offset_z", offset.z));
RETURN_IF_ERROR(args->SetInt("offset_b", offset.w));
RETURN_IF_ERROR(args->SetInt("stride_x", attributes_.strides.w));
RETURN_IF_ERROR(args->SetInt("stride_y", attributes_.strides.h));
RETURN_IF_ERROR(args->SetInt("stride_z", attributes_.strides.c));
RETURN_IF_ERROR(args->SetInt("stride_b", attributes_.strides.b));
return absl::OkStatus();
}
int3 StridedSlice::GetGridSize() const {
const int grid_x = dst_[0]->Width() * dst_[0]->Batch();
const int grid_y = dst_[0]->Height();
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
StridedSlice CreateStridedSlice(const OperationDef& definition,
const SliceAttributes& attr) {
return StridedSlice(definition, attr);
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/strided_slice_tester.h"
namespace tflite {
namespace xnnpack {
TEST_F(StridedSliceTest, 1D) {
const std::vector<int32_t> input_shape = {RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
TEST_F(StridedSliceTest, 2D) {
const std::vector<int32_t> input_shape = {RandomShape(), RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
TEST_F(StridedSliceTest, 3D) {
const std::vector<int32_t> input_shape = {RandomShape(), RandomShape(),
RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
TEST_F(StridedSliceTest, 4D) {
const std::vector<int32_t> input_shape = {RandomShape(), RandomShape(),
RandomShape(), RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
TEST_F(StridedSliceTest, 5D) {
const std::vector<int32_t> input_shape = {RandomShape(), RandomShape(),
RandomShape(), RandomShape(),
RandomShape()};
StridedSliceTester()
.InputShape(input_shape)
.RandomBegins(rng_)
.RandomEnds(rng_)
.Test(TensorType_FLOAT32, xnnpack_delegate_.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/strided_slice.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/strided_slice_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5a463dc2-4f5c-449a-811a-81e0b7980903 | cpp | tensorflow/tensorflow | dilate | tensorflow/lite/kernels/dilate.cc | tensorflow/lite/kernels/dilate_test.cc | #include <algorithm>
#include <array>
#include <cstdint>
#include <cstring>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace dilate {
namespace {
constexpr size_t kMaxDilateDims = 6;
using Array = std::array<int32_t, kMaxDilateDims>;
void DilateImpl(const char* input, char* output,
const char* const padding_values, const int32_t size,
const int32_t* const shape, const int32_t* const input_strides,
const int32_t* const output_strides,
const int32_t* const output_element_sizes, size_t depth = 0) {
const int output_stride = output_strides[depth];
const int input_stride = input_strides[depth];
const int num_elts = shape[depth];
const int padding_size = output_stride - output_element_sizes[depth];
if (depth + 1 >= size) {
for (size_t i = 0; i + 1 < num_elts; ++i) {
std::memcpy(output, input, input_stride);
std::memcpy(output + input_stride, padding_values, padding_size);
input += input_stride;
output += output_stride;
}
std::memcpy(output, input, input_stride);
} else {
for (size_t i = 0; i + 1 < num_elts; ++i) {
DilateImpl(input, output, padding_values, size, shape, input_strides,
output_strides, output_element_sizes, depth + 1);
std::memcpy(output + output_element_sizes[depth], padding_values,
padding_size);
input += input_stride;
output += output_stride;
}
DilateImpl(input, output, padding_values, size, shape, input_strides,
output_strides, output_element_sizes, depth + 1);
}
}
class DilationRunner {
public:
DilationRunner(const TfLiteIntArray& shape, const int32_t* const dilations,
const char* padding_value, const int element_size)
: size_(shape.size), element_size_(element_size) {
static_assert(sizeof(shape.data[0]) == sizeof(Array::value_type),
"Don't use memcpy here if you change the Array type.");
std::memcpy(shape_.data(), shape.data, size_ * sizeof(shape.data[0]));
static_assert(sizeof(dilations[0]) == sizeof(Array::value_type),
"Don't use memcpy here if you change the Array type.");
std::memcpy(dilations_.data(), dilations, size_ * sizeof(dilations[0]));
MergeTrailingDilations();
ComputeInputStrides();
ComputeOutputStridesAndElementSizes();
FillPaddingValueBuffer(padding_value, element_size);
}
int size() const { return size_; }
int element_size() const { return element_size_; }
const char* padding_values() const { return padding_value_buffer_.data(); }
const Array& shape() const { return shape_; }
const Array& dilations() const { return dilations_; }
const Array& input_strides() const { return input_strides_; }
const Array& output_strides() const { return output_strides_; }
const Array& output_element_sizes() const { return output_element_sizes_; }
void Run(const char* const input, char* const output) const {
DilateImpl(input, output, padding_values(), size(), shape().data(),
input_strides().data(), output_strides().data(),
output_element_sizes().data());
}
private:
void MergeTrailingDilations() {
for (int i = size_ - 2; i >= 0; --i) {
if (dilations_[i + 1] == 1) {
element_size_ *= shape_[i + 1];
--size_;
} else {
break;
}
}
if (size_ == 1 && dilations_[0] == 1) {
element_size_ *= shape_[0];
shape_[0] = 1;
}
}
void ComputeInputStrides() {
input_strides_[size_ - 1] = element_size_;
for (int i = size_ - 2; i >= 0; --i) {
input_strides_[i] = shape_[i + 1] * input_strides_[i + 1];
}
}
void ComputeOutputStridesAndElementSizes() {
const int last = size_ - 1;
output_element_sizes_[last] = element_size_;
output_strides_[last] = dilations_[last] * output_element_sizes_[last];
for (int i = size_ - 2; i >= 0; --i) {
output_element_sizes_[i] = ((shape_[i + 1] - 1) * output_strides_[i + 1] +
output_element_sizes_[i + 1]);
output_strides_[i] = dilations_[i] * output_element_sizes_[i];
}
}
void FillPaddingValueBuffer(const char* padding_element,
const size_t padding_element_size) {
int first_dilated_idx = 0;
while (dilations_[first_dilated_idx] == 1 &&
first_dilated_idx + 1 < size_) {
++first_dilated_idx;
}
const size_t size = output_strides_[first_dilated_idx] -
output_element_sizes_[first_dilated_idx];
if (!size) {
return;
}
padding_value_buffer_.resize(size);
std::memcpy(padding_value_buffer_.data(), padding_element,
padding_element_size);
size_t sz = padding_element_size;
while (sz < size) {
const size_t bytes_to_copy = std::min(size - sz, sz);
std::memcpy(padding_value_buffer_.data() + sz,
padding_value_buffer_.data(), bytes_to_copy);
sz += bytes_to_copy;
}
}
Array shape_;
Array dilations_;
Array output_strides_;
Array output_element_sizes_;
Array input_strides_;
std::vector<char> padding_value_buffer_;
int size_;
int element_size_;
};
struct DilationContext {
enum InputTensorId { kInput, kDilations, kPaddingValue, kNumInputTensors };
enum OutputTensorId { kOutput, kNumOutputTensors };
DilationContext(TfLiteContext* context, TfLiteNode* node)
: context(context),
node(node),
input_tensor(GetInput(context, node, kInput)),
dilations_tensor(GetInput(context, node, kDilations)),
padding_value_tensor(GetInput(context, node, kPaddingValue)),
output_tensor(GetOutput(context, node, kOutput)) {}
TfLiteContext* context;
TfLiteNode* node;
const TfLiteTensor* input_tensor;
const TfLiteTensor* dilations_tensor;
const TfLiteTensor* padding_value_tensor;
TfLiteTensor* output_tensor;
};
int DilateDim(int dim, int dilate_factor) {
return (dim - 1) * dilate_factor + 1;
}
TfLiteStatus SetupOutputTensor(const DilationContext& ctx) {
const TfLiteIntArray& input_shape = *(ctx.input_tensor->dims);
const int32_t* dilations = ctx.dilations_tensor->data.i32;
IntArrayUniquePtr output_shape = BuildTfLiteArray(input_shape.size);
for (int i = 0; i < output_shape->size; ++i) {
output_shape->data[i] = DilateDim(input_shape.data[i], dilations[i]);
}
return ctx.context->ResizeTensor(ctx.context, ctx.output_tensor,
output_shape.release());
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node),
DilationContext::kNumInputTensors);
TF_LITE_ENSURE_EQ(context, NumOutputs(node),
DilationContext::kNumOutputTensors);
const DilationContext ctx(context, node);
TF_LITE_ENSURE(context, ctx.input_tensor->dims != nullptr);
TF_LITE_ENSURE(context, ctx.input_tensor->dims->size > 0);
TF_LITE_ENSURE(context, ctx.input_tensor->dims->size <= kMaxDilateDims);
TF_LITE_ENSURE_EQ(context, ctx.input_tensor->type, ctx.output_tensor->type);
TF_LITE_ENSURE_EQ(context, ctx.input_tensor->type,
ctx.padding_value_tensor->type);
if (!IsConstantTensor(ctx.dilations_tensor)) {
SetTensorToDynamic(ctx.output_tensor);
return kTfLiteOk;
}
return SetupOutputTensor(ctx);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const DilationContext ctx(context, node);
TF_LITE_ENSURE_EQ(context, ctx.dilations_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE(context, ctx.dilations_tensor->dims != nullptr);
TF_LITE_ENSURE_EQ(context, ctx.dilations_tensor->dims->size, 1);
TF_LITE_ENSURE_EQ(context, ctx.dilations_tensor->dims->data[0],
ctx.input_tensor->dims->size);
for (int i = 0; i < ctx.dilations_tensor->dims->size; ++i) {
TF_LITE_ENSURE(context, ctx.dilations_tensor->data.i32[i] >= 1);
}
if (!IsConstantTensor(ctx.dilations_tensor)) {
TF_LITE_ENSURE_OK(context, SetupOutputTensor(ctx));
}
size_t element_size;
TF_LITE_ENSURE_OK(
context, GetSizeOfType(context, ctx.input_tensor->type, &element_size));
const DilationRunner runner(
*ctx.input_tensor->dims, ctx.dilations_tensor->data.i32,
ctx.padding_value_tensor->data.raw_const, element_size);
runner.Run(ctx.input_tensor->data.raw_const, ctx.output_tensor->data.raw);
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_DILATE() {
static TfLiteRegistration r = {nullptr, nullptr,
dilate::Prepare,
dilate::Eval};
return &r;
}
}
}
} | #include <cstddef>
#include <cstdint>
#include <functional>
#include <numeric>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
namespace tflite {
namespace {
template <class T>
std::vector<T> DilateReference(const std::vector<T>& input,
const std::vector<int32_t>& shape,
const std::vector<int32_t>& dilations,
const T padding_value) {
constexpr int kMaxDilateDims = 6;
std::vector<int> output_shape(kMaxDilateDims, 0);
for (size_t i = 0; i < shape.size(); ++i) {
output_shape[i] = (shape[i] - 1) * dilations[i] + 1;
}
std::vector<int> strides(kMaxDilateDims, 0);
strides[shape.size() - 1] = 1;
for (size_t i = shape.size() - 1; i > 0; --i) {
strides[i - 1] = shape[i] * strides[i];
}
std::vector<int> output_strides(kMaxDilateDims, 0);
output_strides[shape.size() - 1] = 1;
for (size_t i = shape.size() - 1; i > 0; --i) {
output_strides[i - 1] = output_shape[i] * output_strides[i];
}
std::vector<int> safe_dilations(kMaxDilateDims, 0);
absl::c_copy(dilations, safe_dilations.begin());
std::vector<int> safe_input_shape(kMaxDilateDims, 0);
absl::c_copy(shape, safe_input_shape.begin());
std::vector<T> output(
std::accumulate(output_shape.begin(), output_shape.begin() + shape.size(),
1, std::multiplies<>()),
padding_value);
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx = a * strides[0] + b * strides[1] +
c * strides[2] + d * strides[3] +
e * strides[4] + f * strides[5];
const int o_idx = a * safe_dilations[0] * output_strides[0] +
b * safe_dilations[1] * output_strides[1] +
c * safe_dilations[2] * output_strides[2] +
d * safe_dilations[3] * output_strides[3] +
e * safe_dilations[4] * output_strides[4] +
f * safe_dilations[5] * output_strides[5];
output[o_idx] = input[i_idx];
} while (++f < safe_input_shape[5]);
} while (++e < safe_input_shape[4]);
} while (++d < safe_input_shape[3]);
} while (++c < safe_input_shape[2]);
} while (++b < safe_input_shape[1]);
} while (++a < safe_input_shape[0]);
return output;
}
template <class T>
struct TensorTypeFor;
#define TENSOR_TYPE_ASSOC(CPP_TYPE, TENSORTYPE_VALUE) \
template <> \
struct TensorTypeFor<CPP_TYPE> { \
static constexpr TensorType value = TENSORTYPE_VALUE; \
};
TENSOR_TYPE_ASSOC(int8_t, TensorType_INT8);
TENSOR_TYPE_ASSOC(int16_t, TensorType_INT16);
TENSOR_TYPE_ASSOC(int32_t, TensorType_INT32);
TENSOR_TYPE_ASSOC(int64_t, TensorType_INT64);
TENSOR_TYPE_ASSOC(uint8_t, TensorType_UINT8);
TENSOR_TYPE_ASSOC(uint16_t, TensorType_UINT16);
TENSOR_TYPE_ASSOC(uint32_t, TensorType_UINT32);
TENSOR_TYPE_ASSOC(uint64_t, TensorType_UINT64);
TENSOR_TYPE_ASSOC(float, TensorType_FLOAT32);
static_assert(sizeof(float) == 4, "float type is expected to be 32 bit long");
TENSOR_TYPE_ASSOC(double, TensorType_FLOAT64);
static_assert(sizeof(double) == 8, "double type is expected to be 64 bit long");
template <class T, bool IsDilationTensorConst>
class DilateOpModel : public SingleOpModel {
static constexpr TensorType kTensorType = TensorTypeFor<T>::value;
public:
void SetInput(absl::Span<const int32_t> shape,
absl::Span<const T> data = {}) {
input_shape_.assign(shape.begin(), shape.end());
if (data.empty()) {
input_data_.resize(absl::c_accumulate(shape, 1, std::multiplies<int>()));
absl::c_iota(input_data_, 1);
} else {
input_data_.assign(data.begin(), data.end());
}
}
void SetDilations(absl::Span<const int32_t> dilations) {
dilations_shape_ = std::vector<int>(1, dilations.size());
dilations_data_.assign(dilations.begin(), dilations.end());
}
void SetPaddingValue(const T& val) { padding_value_data_ = val; }
void Build() {
input_ = AddInput({kTensorType, input_shape_});
if (IsDilationTensorConst) {
dilations_ = AddConstInput(TensorType_INT32, dilations_data_,
{static_cast<int>(dilations_data_.size())});
} else {
dilations_ = AddInput({TensorType_INT32, dilations_shape_});
}
padding_value_ = AddConstInput(kTensorType, &padding_value_data_, {1});
output_ = AddOutput(kTensorType);
SetBuiltinOp(BuiltinOperator_DILATE, BuiltinOptions2_DilateOptions,
CreateDilateOptions(builder_).Union());
BuildInterpreter({input_shape_});
PopulateTensor(input_, input_data_);
if (!IsDilationTensorConst) {
PopulateTensor(dilations_, dilations_data_);
}
}
TfLiteStatus BuildAndInvoke() {
Build();
return Invoke();
}
absl::Span<const T> GetOutputData() {
return absl::Span<const T>(interpreter_->typed_tensor<T>(output_),
GetTensorSize(output_));
}
absl::Span<const int> GetOutputShape() {
const TfLiteIntArray& shape = *(interpreter_->tensor(output_)->dims);
return absl::Span<const int>(shape.data, shape.size);
}
const std::vector<T>& GetInput() const { return input_data_; }
const std::vector<int>& GetInputShape() const { return input_shape_; }
const std::vector<int>& GetDilations() const { return dilations_data_; }
const T& GetPaddingValue() const { return padding_value_data_; }
protected:
int input_ = -1;
int dilations_ = -1;
int padding_value_ = -1;
int output_ = -1;
std::vector<T> input_data_;
std::vector<int32_t> input_shape_;
std::vector<int32_t> dilations_data_;
std::vector<int32_t> dilations_shape_;
T padding_value_data_ = 0;
};
template <class Configuration>
class DilateTest;
template <class StorageType, class IsDilationTensorConst>
class DilateTest<testing::Types<StorageType, IsDilationTensorConst>>
: public testing::Test {
protected:
DilateOpModel<StorageType, IsDilationTensorConst::value> model_;
};
struct ConstantDilation : std::true_type {};
struct VariableDilation : std::false_type {};
using TestList = testing::Types<testing::Types<int8_t, ConstantDilation>,
testing::Types<int16_t, ConstantDilation>,
testing::Types<int32_t, ConstantDilation>,
testing::Types<int64_t, ConstantDilation>,
testing::Types<uint8_t, ConstantDilation>,
testing::Types<uint16_t, ConstantDilation>,
testing::Types<uint32_t, ConstantDilation>,
testing::Types<uint64_t, ConstantDilation>,
testing::Types<float, ConstantDilation>,
testing::Types<double, ConstantDilation>,
testing::Types<int8_t, VariableDilation>,
testing::Types<int16_t, VariableDilation>,
testing::Types<int32_t, VariableDilation>,
testing::Types<int64_t, VariableDilation>,
testing::Types<uint8_t, VariableDilation>,
testing::Types<uint16_t, VariableDilation>,
testing::Types<uint32_t, VariableDilation>,
testing::Types<uint64_t, VariableDilation>,
testing::Types<float, VariableDilation>,
testing::Types<double, VariableDilation>>;
TYPED_TEST_SUITE(DilateTest, TestList);
TYPED_TEST(DilateTest, DilationManualTest) {
this->model_.SetInput({2, 2});
this->model_.SetDilations({2, 3});
const std::vector<int> expected{
1, 0, 0, 2,
0, 0, 0, 0,
3, 0, 0, 4
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(3, 4));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, DilationManualTest2) {
this->model_.SetInput({2, 3});
this->model_.SetDilations({2, 3});
const std::vector<int> expected{
1, 0, 0, 2, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 5, 0, 0, 6
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(3, 7));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, DilationManualTest3) {
this->model_.SetInput({4, 2, 3});
this->model_.SetDilations({2, 3, 4});
const std::vector<int> expected{
1, 0, 0, 0, 2, 0, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 5, 0, 0, 0, 6,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
7, 0, 0, 0, 8, 0, 0, 0, 9,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 11, 0, 0, 0, 12,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
13, 0, 0, 0, 14, 0, 0, 0, 15,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
16, 0, 0, 0, 17, 0, 0, 0, 18,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
19, 0, 0, 0, 20, 0, 0, 0, 21,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
22, 0, 0, 0, 23, 0, 0, 0, 24,
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(7, 4, 9));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, TrailingDilationOptimizationWorks) {
this->model_.SetInput({2, 2, 2, 2});
this->model_.SetDilations({2, 1, 1, 1});
const std::vector<int> expected{
1, 2, 3, 4, 5, 6, 7, 8,
0, 0, 0, 0, 0, 0, 0, 0,
9, 10, 11, 12, 13, 14, 15, 16
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(3, 2, 2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, TrailingDilationOptimizationDegenerateCaseWorks) {
this->model_.SetInput({2, 2, 2, 2});
this->model_.SetDilations({1, 1, 1, 1});
const std::vector<int> expected{
1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2, 2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, CheckAgainstReferenceImplementation) {
auto& model = this->model_;
model.SetInput({5, 4, 2});
model.SetDilations({2, 3, 5});
model.SetPaddingValue(-1);
const auto expected =
DilateReference(model.GetInput(), model.GetInputShape(),
model.GetDilations(), model.GetPaddingValue());
EXPECT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dilate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dilate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
40874743-137f-4d51-be46-efd8d9dcd11c | cpp | tensorflow/tensorflow | unidirectional_sequence_lstm | tensorflow/lite/kernels/unidirectional_sequence_lstm.cc | tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc | #include <math.h>
#include <algorithm>
#include <cstddef>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/lstm_eval.h"
#include "tensorflow/lite/kernels/lstm_shared.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace unidirectional_sequence_lstm {
namespace {
struct OpData {
bool use_layer_norm;
int scratch_tensor_index;
bool compute_row_sums = false;
bool recurrent_to_input_is_diag = false;
bool recurrent_to_forget_is_diag = false;
bool recurrent_to_cell_is_diag = false;
bool recurrent_to_output_is_diag = false;
lstm_eval::IntegerLstmParameter integer_lstm_param;
};
TfLiteStatus PopulateQuantizedLstmParams8x8_16(
TfLiteContext* context, TfLiteNode* node,
lstm_eval::IntegerLstmParameter* integer_lstm_param) {
const auto* params =
static_cast<TfLiteUnidirectionalSequenceLSTMParams*>(node->builtin_data);
const float cell_clip = params->cell_clip;
const float proj_clip = params->proj_clip;
const TfLiteTensor* cell_state =
GetVariableInput(context, node, lstm::full::kCellStateTensor);
TF_LITE_ENSURE(context, cell_state != nullptr);
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, lstm::full::kOutputTensor, &output_tensor));
TF_LITE_ENSURE(context,
cell_state->quantization.type != kTfLiteNoQuantization);
auto* cell_state_params =
static_cast<TfLiteAffineQuantization*>(cell_state->quantization.params);
TF_LITE_ENSURE(context,
output_tensor->quantization.type != kTfLiteNoQuantization);
auto* proj_params = static_cast<TfLiteAffineQuantization*>(
output_tensor->quantization.params);
if (cell_clip > 0.0) {
integer_lstm_param->quantized_cell_clip = static_cast<int16_t>(std::min(
std::max(cell_clip / cell_state_params->scale->data[0], -32768.0f),
32767.0f));
} else {
integer_lstm_param->quantized_cell_clip = 0;
}
if (proj_clip > 0.0) {
integer_lstm_param->quantized_proj_clip = static_cast<int8_t>(std::min(
std::max(proj_clip / proj_params->scale->data[0], -128.0f), 127.0f));
} else {
integer_lstm_param->quantized_proj_clip = 0;
}
OpData* op_data = static_cast<OpData*>(node->user_data);
const bool use_layer_norm = op_data->use_layer_norm;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kInputTensor, &input));
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor,
&input_to_forget_weights));
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node,
lstm::full::kInputToCellWeightsTensor,
&input_to_cell_weights));
const TfLiteTensor* input_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor,
&input_to_output_weights));
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kRecurrentToInputWeightsTensor);
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor,
&recurrent_to_forget_weights));
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor,
&recurrent_to_cell_weights));
const TfLiteTensor* recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor,
&recurrent_to_output_weights));
const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToInputWeightsTensor);
const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToForgetWeightsTensor);
const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToOutputWeightsTensor);
const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kInputLayerNormCoefficientsTensor);
const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kForgetLayerNormCoefficientsTensor);
const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kCellLayerNormCoefficientsTensor);
const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kOutputLayerNormCoefficientsTensor);
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
TfLiteTensor* output_state =
GetVariableInput(context, node, lstm::full::kOutputStateTensor);
TF_LITE_ENSURE(context, output_state != nullptr);
const bool use_cifg = (input_to_input_weights == nullptr);
const bool use_peephole = (cell_to_output_weights != nullptr);
const bool use_projection = (projection_weights != nullptr);
std::vector<float> intermediate_scale;
std::vector<int32> intermediate_zp;
for (int i = 0; i < 4; ++i) {
if (use_layer_norm) {
TfLiteTensor* intermediate;
TF_LITE_ENSURE_OK(context,
GetIntermediatesSafe(context, node, i, &intermediate));
TF_LITE_ENSURE(context,
intermediate->quantization.type != kTfLiteNoQuantization);
auto* params = static_cast<TfLiteAffineQuantization*>(
intermediate->quantization.params);
intermediate_scale.push_back(params->scale->data[0]);
intermediate_zp.push_back(params->zero_point->data[0]);
} else {
intermediate_scale.push_back(std::pow(2, -12));
intermediate_zp.push_back(0);
}
}
TfLiteTensor* hidden;
TF_LITE_ENSURE_OK(context, GetIntermediatesSafe(context, node, 4, &hidden));
TF_LITE_ENSURE(context, hidden->quantization.type != kTfLiteNoQuantization);
auto* hidden_params =
static_cast<TfLiteAffineQuantization*>(hidden->quantization.params);
intermediate_scale.push_back(hidden_params->scale->data[0]);
intermediate_zp.push_back(hidden_params->zero_point->data[0]);
const float default_scale = 1.0;
float input_scale = default_scale;
float input_to_input_weight_scale = default_scale;
float recurrent_to_input_weight_scale = default_scale;
float cell_to_input_weight_scale = default_scale;
float input_to_forget_weight_scale = default_scale;
float recurrent_to_forget_weight_scale = default_scale;
float cell_to_forget_weight_scale = default_scale;
float input_to_cell_weight_scale = default_scale;
float recurrent_to_cell_weight_scale = default_scale;
float input_to_output_weight_scale = default_scale;
float recurrent_to_output_weight_scale = default_scale;
float cell_to_output_weight_scale = default_scale;
float projection_weight_scale = default_scale;
float layer_norm_input_scale = default_scale;
float layer_norm_forget_scale = default_scale;
float layer_norm_cell_scale = default_scale;
float layer_norm_output_scale = default_scale;
float output_state_scale = default_scale;
int cell_scale = 1;
float effective_input_to_input_scale = default_scale;
float effective_recurrent_to_input_scale = default_scale;
float effective_cell_to_input_scale = default_scale;
float effective_input_to_forget_scale = default_scale;
float effective_recurrent_to_forget_scale = default_scale;
float effective_cell_to_forget_scale = default_scale;
float effective_input_to_cell_scale = default_scale;
float effective_recurrent_to_cell_scale = default_scale;
float effective_input_to_output_scale = default_scale;
float effective_recurrent_to_output_scale = default_scale;
float effective_cell_to_output_scale = default_scale;
float effective_proj_scale = default_scale;
float effective_hidden_scale = default_scale;
if (!use_cifg) {
input_to_input_weight_scale = input_to_input_weights->params.scale;
recurrent_to_input_weight_scale = recurrent_to_input_weights->params.scale;
}
if (use_peephole) {
if (!use_cifg) {
cell_to_input_weight_scale = cell_to_input_weights->params.scale;
}
cell_to_forget_weight_scale = cell_to_forget_weights->params.scale;
cell_to_output_weight_scale = cell_to_output_weights->params.scale;
}
if (use_layer_norm) {
if (!use_cifg) {
layer_norm_input_scale = input_layer_norm_coefficients->params.scale;
}
layer_norm_forget_scale = forget_layer_norm_coefficients->params.scale;
layer_norm_cell_scale = cell_layer_norm_coefficients->params.scale;
layer_norm_output_scale = output_layer_norm_coefficients->params.scale;
}
if (use_projection) {
projection_weight_scale = projection_weights->params.scale;
}
output_state_scale = output_state->params.scale;
input_to_forget_weight_scale = input_to_forget_weights->params.scale;
input_to_cell_weight_scale = input_to_cell_weights->params.scale;
input_to_output_weight_scale = input_to_output_weights->params.scale;
recurrent_to_forget_weight_scale = recurrent_to_forget_weights->params.scale;
recurrent_to_cell_weight_scale = recurrent_to_cell_weights->params.scale;
recurrent_to_output_weight_scale = recurrent_to_output_weights->params.scale;
TF_LITE_ENSURE(context, CheckedLog2(cell_state->params.scale, &cell_scale));
integer_lstm_param->cell_scale = cell_scale;
input_scale = input->params.scale;
if (!use_cifg) {
effective_input_to_input_scale =
input_to_input_weight_scale * input_scale / intermediate_scale[0];
effective_recurrent_to_input_scale = recurrent_to_input_weight_scale *
output_state_scale /
intermediate_scale[0];
}
effective_input_to_forget_scale =
input_to_forget_weight_scale * input_scale / intermediate_scale[1];
effective_recurrent_to_forget_scale = recurrent_to_forget_weight_scale *
output_state_scale /
intermediate_scale[1];
effective_input_to_cell_scale =
input_to_cell_weight_scale * input_scale / intermediate_scale[2];
effective_recurrent_to_cell_scale = recurrent_to_cell_weight_scale *
output_state_scale /
intermediate_scale[2];
effective_input_to_output_scale =
input_to_output_weight_scale * input_scale / intermediate_scale[3];
effective_recurrent_to_output_scale = recurrent_to_output_weight_scale *
output_state_scale /
intermediate_scale[3];
effective_hidden_scale =
std::pow(2, -15) / intermediate_scale[4] * std::pow(2, -15);
effective_proj_scale =
projection_weight_scale * intermediate_scale[4] / output_state_scale;
if (use_peephole) {
if (!use_cifg) {
effective_cell_to_input_scale = std::pow(2, cell_scale) *
cell_to_input_weight_scale /
intermediate_scale[0];
}
effective_cell_to_forget_scale = std::pow(2, cell_scale) *
cell_to_forget_weight_scale /
intermediate_scale[1];
effective_cell_to_output_scale = std::pow(2, cell_scale) *
cell_to_output_weight_scale /
intermediate_scale[3];
}
QuantizeMultiplier(effective_input_to_input_scale,
&integer_lstm_param->effective_input_to_input_scale_a,
&integer_lstm_param->effective_input_to_input_scale_b);
QuantizeMultiplier(effective_recurrent_to_input_scale,
&integer_lstm_param->effective_recurrent_to_input_scale_a,
&integer_lstm_param->effective_recurrent_to_input_scale_b);
QuantizeMultiplier(effective_cell_to_input_scale,
&integer_lstm_param->effective_cell_to_input_scale_a,
&integer_lstm_param->effective_cell_to_input_scale_b);
QuantizeMultiplier(effective_input_to_forget_scale,
&integer_lstm_param->effective_input_to_forget_scale_a,
&integer_lstm_param->effective_input_to_forget_scale_b);
QuantizeMultiplier(
effective_recurrent_to_forget_scale,
&integer_lstm_param->effective_recurrent_to_forget_scale_a,
&integer_lstm_param->effective_recurrent_to_forget_scale_b);
QuantizeMultiplier(effective_cell_to_forget_scale,
&integer_lstm_param->effective_cell_to_forget_scale_a,
&integer_lstm_param->effective_cell_to_forget_scale_b);
QuantizeMultiplier(effective_input_to_cell_scale,
&integer_lstm_param->effective_input_to_cell_scale_a,
&integer_lstm_param->effective_input_to_cell_scale_b);
QuantizeMultiplier(effective_recurrent_to_cell_scale,
&integer_lstm_param->effective_recurrent_to_cell_scale_a,
&integer_lstm_param->effective_recurrent_to_cell_scale_b);
QuantizeMultiplier(effective_input_to_output_scale,
&integer_lstm_param->effective_input_to_output_scale_a,
&integer_lstm_param->effective_input_to_output_scale_b);
QuantizeMultiplier(
effective_recurrent_to_output_scale,
&integer_lstm_param->effective_recurrent_to_output_scale_a,
&integer_lstm_param->effective_recurrent_to_output_scale_b);
QuantizeMultiplier(effective_cell_to_output_scale,
&integer_lstm_param->effective_cell_to_output_scale_a,
&integer_lstm_param->effective_cell_to_output_scale_b);
QuantizeMultiplier(effective_proj_scale,
&integer_lstm_param->effective_proj_scale_a,
&integer_lstm_param->effective_proj_scale_b);
QuantizeMultiplier(effective_hidden_scale,
&integer_lstm_param->effective_hidden_scale_a,
&integer_lstm_param->effective_hidden_scale_b);
QuantizeMultiplier(layer_norm_input_scale,
&integer_lstm_param->layer_norm_input_scale_a,
&integer_lstm_param->layer_norm_input_scale_b);
QuantizeMultiplier(layer_norm_forget_scale,
&integer_lstm_param->layer_norm_forget_scale_a,
&integer_lstm_param->layer_norm_forget_scale_b);
QuantizeMultiplier(layer_norm_cell_scale,
&integer_lstm_param->layer_norm_cell_scale_a,
&integer_lstm_param->layer_norm_cell_scale_b);
QuantizeMultiplier(layer_norm_output_scale,
&integer_lstm_param->layer_norm_output_scale_a,
&integer_lstm_param->layer_norm_output_scale_b);
integer_lstm_param->hidden_zp = intermediate_zp[4];
if (!use_cifg) {
integer_lstm_param->input_variance_guard =
std::max(1, static_cast<int32_t>(10000 * layer_norm_input_scale));
}
integer_lstm_param->forget_variance_guard =
std::max(1, static_cast<int32_t>(10000 * layer_norm_forget_scale));
integer_lstm_param->cell_variance_guard =
std::max(1, static_cast<int32_t>(10000 * layer_norm_cell_scale));
integer_lstm_param->output_variance_guard =
std::max(1, static_cast<int32_t>(10000 * layer_norm_output_scale));
return kTfLiteOk;
}
}
enum TemporaryTensor {
kScratchBuffer = 0,
kInputQuantized = 1,
kOutputStateQuantized = 2,
kCellStateQuantized = 3,
kInputScalingFactors = 4,
kOutputStateScalingFactors = 5,
kProductScalingFactors = 6,
kRecoveredCellWeights = 7,
kAccumScratch = 8,
kInputZeroPoints = 9,
kOutputStateZeroPoints = 10,
kRowSums = 11,
kNumTemporaryTensors = 12,
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, kNumTemporaryTensors,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
TfLiteNode* node, int n_input,
int n_output, int n_cell,
bool use_layer_norm, bool is_integer) {
const auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
TF_LITE_ENSURE(context, params->cell_clip >= 0);
TF_LITE_ENSURE(context, params->proj_clip >= 0);
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
if (input_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input);
}
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor,
&input_to_forget_weights));
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input);
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node,
lstm::full::kInputToCellWeightsTensor,
&input_to_cell_weights));
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[1], n_input);
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kRecurrentToInputWeightsTensor);
if (recurrent_to_input_weights != nullptr) {
bool recurrent_to_input_is_diag =
recurrent_to_input_weights->dims->size == 1;
if (recurrent_to_input_is_diag) {
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->size, 1);
} else {
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[1],
n_output);
TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_input_weights->type,
input_to_forget_weights->type);
}
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[0],
n_cell);
}
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor,
&recurrent_to_forget_weights));
bool recurrent_to_forget_is_diag =
recurrent_to_forget_weights->dims->size == 1;
if (recurrent_to_forget_is_diag) {
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 1);
} else {
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1],
n_output);
TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_forget_weights->type,
input_to_forget_weights->type);
}
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor,
&recurrent_to_cell_weights));
bool recurrent_to_cell_is_diag = recurrent_to_cell_weights->dims->size == 1;
if (recurrent_to_cell_is_diag) {
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 1);
} else {
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[1],
n_output);
TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_cell_weights->type,
input_to_forget_weights->type);
}
const bool cifg_weights_all_or_none =
((input_to_input_weights != nullptr) &&
(recurrent_to_input_weights != nullptr)) ||
((input_to_input_weights == nullptr) &&
(recurrent_to_input_weights == nullptr));
TF_LITE_ENSURE(context, cifg_weights_all_or_none == true);
const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToInputWeightsTensor);
if (cell_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(
context, cell_to_input_weights->type,
is_integer ? kTfLiteInt16 : input_to_forget_weights->type);
}
const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToForgetWeightsTensor);
if (cell_to_forget_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(
context, cell_to_forget_weights->type,
is_integer ? kTfLiteInt16 : input_to_forget_weights->type);
}
const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToOutputWeightsTensor);
if (cell_to_output_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(
context, cell_to_output_weights->type,
is_integer ? kTfLiteInt16 : input_to_forget_weights->type);
}
const bool use_cifg = (input_to_input_weights == nullptr);
const bool peephole_weights_all_or_none =
((cell_to_input_weights != nullptr || use_cifg) &&
(cell_to_forget_weights != nullptr) &&
(cell_to_output_weights != nullptr)) ||
((cell_to_input_weights == nullptr) &&
(cell_to_forget_weights == nullptr) &&
(cell_to_output_weights == nullptr));
TF_LITE_ENSURE(context, peephole_weights_all_or_none == true);
const TfLiteTensor* input_gate_bias =
GetOptionalInputTensor(context, node, lstm::full::kInputGateBiasTensor);
if (use_cifg) {
TF_LITE_ENSURE_EQ(context, input_gate_bias, nullptr);
} else {
TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, input_gate_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, input_gate_bias->type, kTfLiteFloat32);
}
}
const TfLiteTensor* forget_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kForgetGateBiasTensor,
&forget_gate_bias));
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, forget_gate_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, forget_gate_bias->type, kTfLiteFloat32);
}
const TfLiteTensor* cell_gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, lstm::full::kCellGateBiasTensor,
&cell_gate_bias));
TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->data[0], n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, cell_gate_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, cell_gate_bias->type, kTfLiteFloat32);
}
const TfLiteTensor* output_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kOutputGateBiasTensor,
&output_gate_bias));
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, output_gate_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, output_gate_bias->type, kTfLiteFloat32);
}
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
if (projection_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, projection_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[0], n_output);
TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[1], n_cell);
}
const TfLiteTensor* projection_bias =
GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor);
if (projection_bias != nullptr) {
TF_LITE_ENSURE_EQ(context, projection_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, projection_bias->dims->data[0], n_output);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, projection_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, projection_bias->type, kTfLiteFloat32);
}
}
const bool projecton_tensors_consistent =
((projection_weights != nullptr) || (projection_bias == nullptr));
TF_LITE_ENSURE(context, projecton_tensors_consistent == true);
if (use_layer_norm) {
const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kInputLayerNormCoefficientsTensor);
if (use_cifg) {
TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients, nullptr);
} else {
TF_LITE_ENSURE(context, input_layer_norm_coefficients != nullptr);
TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->data[0],
n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, input_layer_norm_coefficients->type,
kTfLiteInt16);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, input_layer_norm_coefficients->type,
kTfLiteFloat32);
}
}
const TfLiteTensor* forget_layer_norm_coefficients;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node,
lstm::full::kForgetLayerNormCoefficientsTensor,
&forget_layer_norm_coefficients));
TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->data[0],
n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, forget_layer_norm_coefficients->type,
kTfLiteInt16);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, forget_layer_norm_coefficients->type,
kTfLiteFloat32);
}
const TfLiteTensor* cell_layer_norm_coefficients;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node,
lstm::full::kCellLayerNormCoefficientsTensor,
&cell_layer_norm_coefficients));
TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->data[0],
n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, cell_layer_norm_coefficients->type,
kTfLiteInt16);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, cell_layer_norm_coefficients->type,
kTfLiteFloat32);
}
const TfLiteTensor* output_layer_norm_coefficients;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node,
lstm::full::kOutputLayerNormCoefficientsTensor,
&output_layer_norm_coefficients));
TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->data[0],
n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, output_layer_norm_coefficients->type,
kTfLiteInt16);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, output_layer_norm_coefficients->type,
kTfLiteFloat32);
}
}
return kTfLiteOk;
}
TfLiteStatus PrecomputeZeroPointTimesWeightWithBias(
TfLiteContext* context, int32_t zero_point,
const TfLiteTensor* weight_tensor, const TfLiteTensor* bias_tensor,
std::unique_ptr<int32_t[]>* output) {
if (weight_tensor == nullptr) {
return kTfLiteOk;
}
const RuntimeShape& weight_shape = GetTensorShape(weight_tensor);
TF_LITE_ENSURE_EQ(context, weight_shape.DimensionsCount(), 2);
const int row = weight_shape.Dims(0);
const int col = weight_shape.Dims(1);
output->reset(new int32_t[row]);
if (bias_tensor == nullptr) {
memset(output->get(), 0, row * sizeof(int32_t));
} else {
const int32_t* bias = GetTensorData<int32_t>(bias_tensor);
memcpy(output->get(), bias, row * sizeof(int32_t));
}
if (zero_point != 0) {
const int8_t* weight = GetTensorData<int8_t>(weight_tensor);
tensor_utils::MatrixScalarMultiplyAccumulate(weight, zero_point, row, col,
output->get());
}
return kTfLiteOk;
}
TfLiteStatus PopulatePrecomputedZPTimesWeightsWithBias(TfLiteContext* context,
OpData* op_data,
TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kInputTensor, &input));
const TfLiteTensor* output_state =
GetVariableInput(context, node, lstm::full::kOutputStateTensor);
TF_LITE_ENSURE(context, output_state != nullptr);
const int32_t input_zero_point = -input->params.zero_point;
const int32_t output_state_zero_point = -output_state->params.zero_point;
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor,
&input_to_forget_weights));
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node,
lstm::full::kInputToCellWeightsTensor,
&input_to_cell_weights));
const TfLiteTensor* input_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor,
&input_to_output_weights));
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kRecurrentToInputWeightsTensor);
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor,
&recurrent_to_forget_weights));
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor,
&recurrent_to_cell_weights));
const TfLiteTensor* recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor,
&recurrent_to_output_weights));
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
const TfLiteTensor* projection_bias =
GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor);
lstm_eval::IntegerLstmParameter* integer_lstm_params =
&op_data->integer_lstm_param;
const TfLiteTensor* intermediate =
&context->tensors[node->intermediates->data[4]];
TF_LITE_ENSURE(context,
intermediate->quantization.type != kTfLiteNoQuantization);
const auto* params =
static_cast<TfLiteAffineQuantization*>(intermediate->quantization.params);
const int32_t hidden_zp = params->zero_point->data[0];
const bool is_layer_norm = op_data->use_layer_norm;
const TfLiteTensor* forget_gate_bias =
is_layer_norm
? nullptr
: GetInput(context, node, lstm::full::kForgetGateBiasTensor);
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, input_zero_point, input_to_forget_weights, forget_gate_bias,
&(integer_lstm_params->input_to_forget_effective_bias)));
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, output_state_zero_point, recurrent_to_forget_weights,
nullptr, &(integer_lstm_params->recurrent_to_forget_effective_bias)));
const TfLiteTensor* cell_gate_bias =
is_layer_norm ? nullptr
: GetInput(context, node, lstm::full::kCellGateBiasTensor);
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, input_zero_point, input_to_cell_weights, cell_gate_bias,
&(integer_lstm_params->input_to_cell_effective_bias)));
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, output_state_zero_point, recurrent_to_cell_weights, nullptr,
&(integer_lstm_params->recurrent_to_cell_effective_bias)));
const TfLiteTensor* output_gate_bias =
is_layer_norm
? nullptr
: GetInput(context, node, lstm::full::kOutputGateBiasTensor);
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, input_zero_point, input_to_output_weights, output_gate_bias,
&(integer_lstm_params->input_to_output_effective_bias)));
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, output_state_zero_point, recurrent_to_output_weights,
nullptr, &(integer_lstm_params->recurrent_to_output_effective_bias)));
const TfLiteTensor* input_gate_bias =
is_layer_norm ? nullptr
: GetInput(context, node, lstm::full::kInputGateBiasTensor);
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, input_zero_point, input_to_input_weights, input_gate_bias,
&(integer_lstm_params->input_to_input_effective_bias)));
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, output_state_zero_point, recurrent_to_input_weights, nullptr,
&(integer_lstm_params->recurrent_to_input_effective_bias)));
TF_LITE_ENSURE_OK(context,
PrecomputeZeroPointTimesWeightWithBias(
context, hidden_zp, projection_weights, projection_bias,
&(integer_lstm_params->projection_effective_bias)));
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const int scratch_tensor_index = op_data->scratch_tensor_index;
bool use_layer_norm = false;
if (node->inputs->size == 24) {
const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kForgetLayerNormCoefficientsTensor);
if (forget_layer_norm_coefficients == nullptr) {
use_layer_norm = false;
} else {
use_layer_norm = true;
}
} else if (node->inputs->size == 20) {
use_layer_norm = false;
} else {
TF_LITE_KERNEL_LOG(
context, "The LSTM Full kernel expects 20 or 24 inputs. Got %d inputs",
node->inputs->size);
return kTfLiteError;
}
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
op_data->use_layer_norm = use_layer_norm;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kInputTensor, &input));
const bool is_integer = input->type == kTfLiteInt8;
TF_LITE_ENSURE(context, input->dims->size > 1);
const auto* params =
reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
node->builtin_data);
const bool time_major = params->time_major;
const int n_batch = time_major ? input->dims->data[1] : input->dims->data[0];
const int n_input = input->dims->data[2];
const TfLiteTensor* input_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor,
&input_to_output_weights));
const int n_cell = input_to_output_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input);
const TfLiteTensor* recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor,
&recurrent_to_output_weights));
bool recurrent_to_output_is_diag =
recurrent_to_output_weights->dims->size == 1 ? true : false;
if (recurrent_to_output_is_diag) {
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 1);
} else {
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->type,
input_to_output_weights->type);
}
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->data[0],
n_cell);
const int n_output = recurrent_to_output_is_diag
? recurrent_to_output_weights->dims->data[0]
: recurrent_to_output_weights->dims->data[1];
TF_LITE_ENSURE_OK(
context, CheckInputTensorDimensions(context, node, n_input, n_output,
n_cell, use_layer_norm, is_integer));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
lstm::full::kOutputTensor, &output));
TfLiteTensor* output_state =
GetVariableInput(context, node, lstm::full::kOutputStateTensor);
TF_LITE_ENSURE(context, output_state != nullptr);
TfLiteTensor* cell_state =
GetVariableInput(context, node, lstm::full::kCellStateTensor);
TF_LITE_ENSURE(context, cell_state != nullptr);
TF_LITE_ENSURE_EQ(context, NumElements(output_state), n_batch * n_output);
TF_LITE_ENSURE_EQ(context, NumElements(cell_state), n_batch * n_cell);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
output_size->data[input->dims->size - 1] = n_output;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size));
if (is_integer) {
const int num_intermediate_tensors = node->intermediates->size;
TF_LITE_ENSURE(context, num_intermediate_tensors == 5);
}
TfLiteIntArrayFree(node->temporaries);
if (IsHybridOp(input, input_to_output_weights)) {
node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors);
} else if (is_integer) {
node->temporaries = TfLiteIntArrayCreate(6);
} else {
node->temporaries = TfLiteIntArrayCreate(1);
}
node->temporaries->data[kScratchBuffer] =
scratch_tensor_index + kScratchBuffer;
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kScratchBuffer,
&scratch_buffer));
scratch_buffer->type = input->type;
scratch_buffer->allocation_type = kTfLiteArenaRw;
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
const bool use_cifg = (input_to_input_weights == nullptr);
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
scratch_buffer_size->data[0] = n_batch;
if (use_cifg) {
scratch_buffer_size->data[1] = n_cell * 4 + 16;
} else {
scratch_buffer_size->data[1] = n_cell * 5 + 16;
}
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
scratch_buffer_size));
if (IsHybridOp(input, input_to_output_weights)) {
op_data->compute_row_sums = true;
node->temporaries->data[kInputQuantized] =
scratch_tensor_index + kInputQuantized;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kInputQuantized,
&input_quantized));
input_quantized->type = input_to_output_weights->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[kOutputStateQuantized] =
scratch_tensor_index + kOutputStateQuantized;
TfLiteTensor* output_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kOutputStateQuantized,
&output_state_quantized));
output_state_quantized->type = input_to_output_weights->type;
output_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(output_state_quantized->dims,
output_state->dims)) {
TfLiteIntArray* output_state_quantized_size =
TfLiteIntArrayCopy(output_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output_state_quantized,
output_state_quantized_size));
}
node->temporaries->data[kCellStateQuantized] =
scratch_tensor_index + kCellStateQuantized;
TfLiteTensor* cell_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kCellStateQuantized,
&cell_state_quantized));
cell_state_quantized->type = input_to_output_weights->type;
cell_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(cell_state_quantized->dims, cell_state->dims)) {
TfLiteIntArray* cell_state_quantized_size =
TfLiteIntArrayCopy(cell_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, cell_state_quantized,
cell_state_quantized_size));
}
node->temporaries->data[kInputScalingFactors] =
op_data->scratch_tensor_index + kInputScalingFactors;
TfLiteTensor* input_sf;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kInputScalingFactors, &input_sf));
input_sf->type = kTfLiteFloat32;
input_sf->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {n_batch};
if (!TfLiteIntArrayEqualsArray(input_sf->dims, 1, scaling_dims)) {
TfLiteIntArray* input_sf_size = TfLiteIntArrayCreate(1);
input_sf_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, input_sf, input_sf_size));
}
node->temporaries->data[kOutputStateScalingFactors] =
op_data->scratch_tensor_index + kOutputStateScalingFactors;
TfLiteTensor* output_state_sf;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kOutputStateScalingFactors,
&output_state_sf));
output_state_sf->type = kTfLiteFloat32;
output_state_sf->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(output_state_sf->dims, 1, scaling_dims)) {
TfLiteIntArray* output_state_sf_size = TfLiteIntArrayCreate(1);
output_state_sf_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_sf,
output_state_sf_size));
}
node->temporaries->data[kProductScalingFactors] =
scratch_tensor_index + kProductScalingFactors;
TfLiteTensor* prod_scaling_factors;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kProductScalingFactors,
&prod_scaling_factors));
prod_scaling_factors->type = kTfLiteFloat32;
prod_scaling_factors->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(prod_scaling_factors->dims, 1,
scaling_dims)) {
TfLiteIntArray* prod_scaling_factors_size = TfLiteIntArrayCreate(1);
prod_scaling_factors_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, prod_scaling_factors,
prod_scaling_factors_size));
}
node->temporaries->data[kRecoveredCellWeights] =
scratch_tensor_index + kRecoveredCellWeights;
TfLiteTensor* recovered_cell_weights;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kRecoveredCellWeights,
&recovered_cell_weights));
recovered_cell_weights->type = kTfLiteFloat32;
recovered_cell_weights->allocation_type = kTfLiteArenaRw;
int recovered_cell_dims[1] = {n_cell};
if (!TfLiteIntArrayEqualsArray(recovered_cell_weights->dims, 1,
recovered_cell_dims)) {
TfLiteIntArray* recovered_cell_weights_size = TfLiteIntArrayCreate(1);
recovered_cell_weights_size->data[0] = n_cell;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, recovered_cell_weights,
recovered_cell_weights_size));
}
node->temporaries->data[kAccumScratch] =
scratch_tensor_index + kAccumScratch;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kAccumScratch,
&accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {n_cell, n_batch};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2);
accum_size->data[0] = n_cell;
accum_size->data[1] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, accum_scratch, accum_size));
}
node->temporaries->data[kInputZeroPoints] =
op_data->scratch_tensor_index + kInputZeroPoints;
TfLiteTensor* input_zp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kInputZeroPoints, &input_zp));
input_zp->type = kTfLiteFloat32;
input_zp->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_zp->dims, 1, scaling_dims)) {
TfLiteIntArray* input_zp_size = TfLiteIntArrayCreate(1);
input_zp_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, input_zp, input_zp_size));
}
node->temporaries->data[kOutputStateZeroPoints] =
op_data->scratch_tensor_index + kOutputStateZeroPoints;
TfLiteTensor* output_state_zp;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kOutputStateZeroPoints,
&output_state_zp));
output_state_zp->type = kTfLiteFloat32;
output_state_zp->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(output_state_zp->dims, 1, scaling_dims)) {
TfLiteIntArray* output_state_zp_size = TfLiteIntArrayCreate(1);
output_state_zp_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_zp,
output_state_zp_size));
}
node->temporaries->data[kRowSums] = scratch_tensor_index + kRowSums;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kRowSums, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->name = "Lstm_row_sums";
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_rows = use_cifg ? 6 : 8;
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
if (projection_weights != nullptr) {
row_sums_rows += ceil(static_cast<float>(n_output) / n_cell);
}
int row_sums_dims[2] = {row_sums_rows, n_cell};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(2);
row_sums_size->data[0] = row_sums_dims[0];
row_sums_size->data[1] = row_sums_dims[1];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
if (is_integer) {
PopulateQuantizedLstmParams8x8_16(context, node,
&op_data->integer_lstm_param);
for (int scratch_index = 0; scratch_index < 6; ++scratch_index) {
node->temporaries->data[scratch_index] =
op_data->scratch_tensor_index + scratch_index;
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, scratch_index,
&scratch_tensor));
scratch_tensor->type = kTfLiteInt16;
if (scratch_index == 4) {
scratch_tensor->type = kTfLiteInt8;
} else if (scratch_index == 5) {
scratch_tensor->type = kTfLiteInt32;
}
scratch_tensor->allocation_type = kTfLiteArenaRw;
const int scratch_dimension[2] = {n_batch, n_cell};
if (!TfLiteIntArrayEqualsArray(scratch_tensor->dims, 2,
scratch_dimension)) {
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
scratch_buffer_size->data[0] = n_batch;
scratch_buffer_size->data[1] = n_cell;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, scratch_tensor,
scratch_buffer_size));
}
}
TF_LITE_ENSURE_OK(context, PopulatePrecomputedZPTimesWeightsWithBias(
context, op_data, node));
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const auto* params =
reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
node->builtin_data);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const bool use_layer_norm = op_data->use_layer_norm;
const bool time_major = params->time_major;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kInputTensor, &input));
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor,
&input_to_forget_weights));
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node,
lstm::full::kInputToCellWeightsTensor,
&input_to_cell_weights));
const TfLiteTensor* input_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor,
&input_to_output_weights));
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kRecurrentToInputWeightsTensor);
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor,
&recurrent_to_forget_weights));
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor,
&recurrent_to_cell_weights));
const TfLiteTensor* recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor,
&recurrent_to_output_weights));
const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToInputWeightsTensor);
const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToForgetWeightsTensor);
const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToOutputWeightsTensor);
const TfLiteTensor* input_gate_bias =
GetOptionalInputTensor(context, node, lstm::full::kInputGateBiasTensor);
const TfLiteTensor* forget_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kForgetGateBiasTensor,
&forget_gate_bias));
const TfLiteTensor* cell_gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, lstm::full::kCellGateBiasTensor,
&cell_gate_bias));
const TfLiteTensor* output_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kOutputGateBiasTensor,
&output_gate_bias));
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
const TfLiteTensor* projection_bias =
GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor);
TfLiteTensor* output_state =
GetVariableInput(context, node, lstm::full::kOutputStateTensor);
TFLITE_DCHECK(output_state != nullptr);
TfLiteTensor* cell_state =
GetVariableInput(context, node, lstm::full::kCellStateTensor);
TFLITE_DCHECK(cell_state != nullptr);
const TfLiteTensor* input_layer_norm_coefficients =
use_layer_norm
? GetOptionalInputTensor(
context, node, lstm::full::kInputLayerNormCoefficientsTensor)
: nullptr;
const TfLiteTensor* forget_layer_norm_coefficients =
use_layer_norm ? GetInput(context, node,
lstm::full::kForgetLayerNormCoefficientsTensor)
: nullptr;
const TfLiteTensor* cell_layer_norm_coefficients =
use_layer_norm ? GetInput(context, node,
lstm::full::kCellLayerNormCoefficientsTensor)
: nullptr;
const TfLiteTensor* output_layer_norm_coefficients =
use_layer_norm ? GetInput(context, node,
lstm::full::kOutputLayerNormCoefficientsTensor)
: nullptr;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
lstm::full::kOutputTensor, &output));
TfLiteLSTMParams lstm_params;
lstm_params.activation = params->activation;
lstm_params.cell_clip = params->cell_clip;
lstm_params.proj_clip = params->proj_clip;
lstm_params.asymmetric_quantize_inputs = params->asymmetric_quantize_inputs;
switch (input_to_output_weights->type) {
case kTfLiteFloat32: {
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kScratchBuffer,
&scratch_buffer));
return lstm_eval::EvalFloat(
input, input_to_input_weights, input_to_forget_weights,
input_to_cell_weights, input_to_output_weights,
recurrent_to_input_weights, recurrent_to_forget_weights,
recurrent_to_cell_weights, recurrent_to_output_weights,
cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights,
input_layer_norm_coefficients, forget_layer_norm_coefficients,
cell_layer_norm_coefficients, output_layer_norm_coefficients,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr, input_gate_bias,
forget_gate_bias, cell_gate_bias, output_gate_bias,
projection_weights, projection_bias, &lstm_params,
true, time_major,
0, scratch_buffer, output_state, cell_state, output,
(recurrent_to_input_weights == nullptr ||
recurrent_to_input_weights->dims->size == 1),
(recurrent_to_forget_weights->dims->size == 1),
(recurrent_to_cell_weights->dims->size == 1),
(recurrent_to_output_weights->dims->size == 1),
CpuBackendContext::GetFromContext(context));
}
case kTfLiteUInt8:
case kTfLiteInt8: {
const bool is_hybrid = input->type == kTfLiteFloat32;
if (is_hybrid) {
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kScratchBuffer, &scratch_buffer));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kRowSums, &row_sums));
const int row_sums_size = row_sums->dims->data[0];
return lstm_eval::EvalHybrid(
input, input_to_input_weights,
nullptr, input_to_forget_weights,
nullptr, input_to_cell_weights,
nullptr, input_to_output_weights,
nullptr,
recurrent_to_input_weights,
nullptr,
recurrent_to_forget_weights,
nullptr,
recurrent_to_cell_weights,
nullptr,
recurrent_to_output_weights,
nullptr,
cell_to_input_weights, cell_to_forget_weights,
cell_to_output_weights, input_layer_norm_coefficients,
forget_layer_norm_coefficients, cell_layer_norm_coefficients,
output_layer_norm_coefficients,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr, input_gate_bias,
forget_gate_bias, cell_gate_bias, output_gate_bias,
projection_weights, nullptr,
projection_bias, &lstm_params,
true, time_major,
0, scratch_buffer,
GetTemporary(context, node, kInputScalingFactors),
nullptr,
GetTemporary(context, node, kOutputStateScalingFactors),
GetTemporary(context, node, kProductScalingFactors),
GetTemporary(context, node, kRecoveredCellWeights),
GetTemporary(context, node, kInputQuantized),
nullptr,
GetTemporary(context, node, kOutputStateQuantized),
GetTemporary(context, node, kCellStateQuantized), output_state,
cell_state, GetTemporary(context, node, kAccumScratch), output,
GetTemporary(context, node, kInputZeroPoints),
nullptr,
GetTemporary(context, node, kOutputStateZeroPoints), row_sums,
row_sums_size, &op_data->compute_row_sums,
(recurrent_to_input_weights == nullptr ||
recurrent_to_input_weights->dims->size == 1),
(recurrent_to_forget_weights->dims->size == 1),
(recurrent_to_cell_weights->dims->size == 1),
(recurrent_to_output_weights->dims->size == 1),
CpuBackendContext::GetFromContext(context));
} else {
TfLiteTensor* scratch0;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &scratch0));
TfLiteTensor* scratch1;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 1, &scratch1));
TfLiteTensor* scratch2;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 2, &scratch2));
TfLiteTensor* scratch3;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 3, &scratch3));
TfLiteTensor* scratch4;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 4, &scratch4));
TfLiteTensor* scratch5;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 5, &scratch5));
return lstm_eval::EvalInteger8x8_16(
input, input_to_input_weights, input_to_forget_weights,
input_to_cell_weights, input_to_output_weights,
recurrent_to_input_weights, recurrent_to_forget_weights,
recurrent_to_cell_weights, recurrent_to_output_weights,
cell_to_input_weights, cell_to_forget_weights,
cell_to_output_weights, input_layer_norm_coefficients,
forget_layer_norm_coefficients, cell_layer_norm_coefficients,
output_layer_norm_coefficients, input_gate_bias, forget_gate_bias,
cell_gate_bias, output_gate_bias, projection_weights,
projection_bias, &lstm_params, true,
time_major, &op_data->integer_lstm_param, output_state, cell_state,
output, scratch0, scratch1, scratch2, scratch3, scratch4, scratch5,
CpuBackendContext::GetFromContext(context));
}
}
default:
TF_LITE_KERNEL_LOG(context, "Type %s is not currently supported.",
TfLiteTypeGetName(input_to_output_weights->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_LSTM() {
static TfLiteRegistration r = {unidirectional_sequence_lstm::Init,
unidirectional_sequence_lstm::Free,
unidirectional_sequence_lstm::Prepare,
unidirectional_sequence_lstm::Eval};
return &r;
}
}
}
} | #include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "benchmark/benchmark.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/unidirectional_sequence_lstm_test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class BaseUnidirectionalLstmTest : public ::testing::TestWithParam<bool> {
protected:
std::vector<float> input_to_input_weights_;
std::vector<float> input_to_cell_weights_;
std::vector<float> input_to_forget_weights_;
std::vector<float> input_to_output_weights_;
std::vector<float> input_gate_bias_;
std::vector<float> cell_gate_bias_;
std::vector<float> forget_gate_bias_;
std::vector<float> output_gate_bias_;
std::vector<float> recurrent_to_input_weights_;
std::vector<float> recurrent_to_cell_weights_;
std::vector<float> recurrent_to_forget_weights_;
std::vector<float> recurrent_to_output_weights_;
std::vector<float> cell_to_input_weights_;
std::vector<float> cell_to_forget_weights_;
std::vector<float> cell_to_output_weights_;
std::vector<float> projection_weights_;
std::vector<float> projection_bias_;
std::vector<std::vector<float>> lstm_input_;
std::vector<std::vector<float>> lstm_golden_output_;
void VerifyGoldens(const std::vector<std::vector<float>>& input,
const std::vector<std::vector<float>>& output,
UnidirectionalLSTMOpModel* lstm, float tolerance = 1e-5,
bool time_major = true) {
const int num_batches = input.size();
EXPECT_GT(num_batches, 0);
const int num_inputs = lstm->num_inputs();
EXPECT_GT(num_inputs, 0);
const int input_sequence_size = input[0].size() / num_inputs;
EXPECT_GT(input_sequence_size, 0);
if (time_major) {
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* batch_start = input[b].data() + i * num_inputs;
const float* batch_end = batch_start + num_inputs;
lstm->SetInput(((i * num_batches) + b) * num_inputs, batch_start,
batch_end);
}
}
} else {
for (int b = 0; b < num_batches; ++b) {
const float* batch_start = input[b].data();
const float* batch_end = batch_start + input_sequence_size * num_inputs;
lstm->SetInput(b * input_sequence_size * num_inputs, batch_start,
batch_end);
}
}
ASSERT_EQ(lstm->Invoke(), kTfLiteOk);
const int num_outputs = lstm->num_outputs();
EXPECT_GT(num_outputs, 0);
std::vector<float> expected;
if (time_major) {
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* golden_start_batch = output[b].data() + i * num_outputs;
const float* golden_end_batch = golden_start_batch + num_outputs;
expected.insert(expected.end(), golden_start_batch, golden_end_batch);
}
}
} else {
for (int b = 0; b < num_batches; ++b) {
const float* golden_batch_start = output[b].data();
const float* golden_batch_end =
golden_batch_start + input_sequence_size * num_outputs;
expected.insert(expected.end(), golden_batch_start, golden_batch_end);
}
}
EXPECT_THAT(lstm->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
};
class NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest
: public BaseUnidirectionalLstmTest {
void SetUp() override {
input_to_input_weights_ = {-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524};
input_to_cell_weights_ = {-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113, -0.29909778};
input_to_forget_weights_ = {0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212};
input_to_output_weights_ = {-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077,
-0.1556896, 0.19487578};
input_gate_bias_ = {0., 0., 0., 0.};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_input_weights_ = {
-0.0063535, -0.2042388, 0.31454784, -0.35746509,
0.28902304, 0.08183324, -0.16555229, 0.02286911,
-0.13566875, 0.03034258, 0.48091322, -0.12528998,
0.24077177, -0.51332325, -0.33502164, 0.10629296};
recurrent_to_cell_weights_ = {
-0.3407414, 0.24443203, -0.2078532, 0.26320225,
0.05695659, -0.00123841, -0.4744786, -0.35869038,
-0.06418842, -0.13502428, -0.501764, 0.22830659,
-0.46367589, 0.26016325, -0.03894562, -0.16368064};
recurrent_to_forget_weights_ = {
-0.48684245, -0.06655136, 0.42224967, 0.2112639,
0.27654213, 0.20864892, -0.07646349, 0.45877004,
0.00141793, -0.14609534, 0.36447752, 0.09196436,
0.28053468, 0.01560611, -0.20127171, -0.01140004};
recurrent_to_output_weights_ = {
0.43385774, -0.17194885, 0.2718237, 0.09215671,
0.24107647, -0.39835793, 0.18212086, 0.01301402,
0.48572797, -0.50656658, 0.20047462, -0.20607421,
-0.51818722, -0.15390486, 0.0468148, 0.39922136};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.02973187, 0.1229473, 0.20885126, -0.15358765,
-0.03716109, 0.12507336, 0.41193449, -0.20860538,
-0.15053082, 0.09120187, 0.24278517, -0.12222792}};
}
};
TEST_F(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, false,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
TEST_F(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
LstmBlackBoxTestBatchMajor) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, false,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
std::vector<std::vector<float>> input;
std::vector<std::vector<float>> output;
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 1e-5,
false);
}
TEST_P(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestUint8) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, false,
false,
false, 0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_UINT8, GetParam());
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm,
0.0157651);
}
TEST_P(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestInt8) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, false,
false,
false, 0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_INT8, GetParam());
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm,
0.0157651);
}
class CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest
: public BaseUnidirectionalLstmTest {
void SetUp() override {
input_to_cell_weights_ = {-0.49770179, -0.27711356, -0.09624726,
0.05100781, 0.04717243, 0.48944736,
-0.38535351, -0.17212132};
input_to_forget_weights_ = {-0.55291498, -0.42866567, 0.13056988,
-0.3633365, -0.22755712, 0.28253698,
0.24407166, 0.33826375};
input_to_output_weights_ = {0.10725588, -0.02335852, -0.55932593,
-0.09426838, -0.44257352, 0.54939759,
0.01533556, 0.42751634};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_cell_weights_ = {
0.54066205, -0.32668582, -0.43562764, -0.56094903,
0.42957711, 0.01841056, -0.32764608, -0.33027974,
-0.10826075, 0.20675004, 0.19069612, -0.03026325,
-0.54532051, 0.33003211, 0.44901288, 0.21193194};
recurrent_to_forget_weights_ = {
-0.13832897, -0.0515101, -0.2359007, -0.16661474,
-0.14340827, 0.36986142, 0.23414481, 0.55899,
0.10798943, -0.41174671, 0.17751795, -0.34484994,
-0.35874045, -0.11352962, 0.27268326, 0.54058349};
recurrent_to_output_weights_ = {
0.41613156, 0.42610586, -0.16495961, -0.5663873,
0.30579174, -0.05115908, -0.33941799, 0.23364776,
0.11178309, 0.09481031, -0.26424935, 0.46261835,
0.50248802, 0.26114327, -0.43736315, 0.33149987};
cell_to_forget_weights_ = {0.47485286, -0.51955009, -0.24458408,
0.31544167};
cell_to_output_weights_ = {-0.17135078, 0.82760304, 0.85573703,
-0.77109635};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.36444446, -0.00352185, 0.12886585, -0.05163646,
-0.42312205, -0.01218222, 0.24201041, -0.08124574,
-0.358325, -0.04621704, 0.21641694, -0.06471302}};
}
};
TEST_F(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
TEST_P(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestUint8) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_UINT8, GetParam());
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.03573);
}
TEST_P(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestInt8) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_INT8, GetParam());
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.03573);
}
class NoCifgPeepholeProjectionClippingUnidirectionalLstmTest
: public BaseUnidirectionalLstmTest {
void SetUp() override {
input_to_input_weights_ = {
0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677};
input_to_forget_weights_ = {
-0.0018401089, -0.004852237, 0.03698424, 0.014181704,
0.028273236, -0.016726194, -0.05249759, -0.10204261,
0.00861066, -0.040979505, -0.009899187, 0.01923892,
-0.028177269, -0.08535103, -0.14585495, 0.10662567,
-0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395,
0.0814421, -0.12257899, -0.033945758, -0.031303465,
0.045630626, 0.06843887, -0.13492945, -0.012480007,
-0.0811829, -0.07224499, -0.09628791, 0.045100946,
0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997,
0.052625068, 0.12784666, 0.07077897, 0.025725935,
0.04165009, 0.07241905, 0.018668644, -0.037377294,
-0.06277783, -0.08833636, -0.040120605, -0.011405586,
-0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839,
0.13396506, -0.08402166, -0.01901462, -0.044678304,
-0.07720565, 0.014350063, -0.11757958, -0.0652038,
-0.08185733, -0.076754324, -0.092614375, 0.10405491,
0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447,
-0.054523353, 0.02582715, 0.02327355, -0.011857179,
-0.0011980024, -0.034641717, -0.026125094, -0.17582615,
-0.15923657, -0.27486774, -0.0006143371, 0.0001771948,
-8.470171e-05, 0.02651807, 0.045790765, 0.06956496};
input_to_cell_weights_ = {
-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042};
input_to_output_weights_ = {
-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956};
input_gate_bias_ = {0.02234832, 0.14757581, 0.18176508, 0.10380666,
0.053110216, -0.06928846, -0.13942584, -0.11816189,
0.19483899, 0.03652339, -0.10250295, 0.036714908,
-0.18426876, 0.036065217, 0.21810818, 0.02383196,
-0.043370757, 0.08690144, -0.04444982, 0.00030581196};
forget_gate_bias_ = {0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739};
cell_gate_bias_ = {-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027};
output_gate_bias_ = {0.046159424, -0.0012809046, 0.03563469, 0.12648113,
0.027195795, 0.35373217, -0.018957434, 0.008907322,
-0.0762701, 0.12018895, 0.04216877, 0.0022856654,
0.040952638, 0.3147856, 0.08225149, -0.057416286,
-0.14995944, -0.008040261, 0.13208859, 0.029760877};
recurrent_to_input_weights_ = {
-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447};
recurrent_to_cell_weights_ = {
-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404};
recurrent_to_forget_weights_ = {
-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027};
recurrent_to_output_weights_ = {
0.025825322, -0.05813119, 0.09495884, -0.045984812,
-0.01255415, -0.0026479573, -0.08196161, -0.054914974,
-0.0046604523, -0.029587349, -0.044576716, -0.07480124,
-0.082868785, 0.023254942, 0.027502948, -0.0039728214,
-0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307,
-0.08829125, -0.005139627, -0.08989442, -0.0555066,
0.13596267, -0.025062224, -0.048351806, -0.03850004,
0.07266485, -0.022414139, 0.05940088, 0.075114764,
0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916,
0.014416728, 0.043229222, 0.034178585, -0.07530371,
0.035837382, -0.085607, -0.007721233, -0.03287832,
-0.043848954, -0.06404588, -0.06632928, -0.073643476,
0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091,
-0.030063879, 0.008801774, -0.023021035, -0.019558564,
0.05158114, -0.010947698, -0.011825728, 0.0075720972,
0.0699727, -0.0039981045, 0.069350146, 0.08799282,
0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699,
-0.00924166, 0.0046702605, -0.036598757, -0.08811812,
0.10522024, -0.032441203, 0.008176899, -0.04454919,
0.07058152, 0.0067963637, 0.039206743, 0.03259838,
0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724,
0.036879618, 0.043357447, 0.028362012, -0.05908629,
0.0059240665, -0.04995891, -0.019187413, 0.0276265,
-0.01628143, 0.0025863599, 0.08800015, 0.035250366,
-0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454,
-0.009660886, 0.019076364, 0.018299393, -0.046004917,
0.08891175, 0.0431396, -0.026327137, -0.051502608,
0.08979574, -0.051670972, 0.04940282, -0.07491107,
-0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988,
-0.035936575, -0.011681591, 0.064818054, 0.0073146066,
-0.021745546, -0.043124277, -0.06471268, -0.07053354,
-0.029321948, -0.05330136, 0.016933719, -0.053782392,
0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434,
-0.07924483, 0.06936997, 0.0034815092, -0.007305279,
-0.037325785, -0.07251102, -0.033633437, -0.08677009,
0.091591336, -0.14165086, 0.021752775, 0.019683983,
0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828,
0.1183656, -0.0010731248, -0.023590032, -0.072285876,
-0.0724771, -0.026382286, -0.0014920527, 0.042667855,
0.0018776858, 0.02986552, 0.009814309, 0.0733756,
0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798,
-0.010036754, 0.02576849, -0.08307328, 0.010112348,
0.042521734, -0.05869831, -0.071689695, 0.03876447,
-0.13275425, -0.0352966, -0.023077697, 0.10285965,
0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767,
-0.08271222, -0.0030240538, -0.016368777, 0.1070414,
0.042672627, 0.013456989, -0.0437609, -0.022309763,
0.11576483, 0.04108048, 0.061026827, -0.0190714,
-0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893,
-0.023771819, -0.01965048, 0.007955471, -0.043740474,
0.03346837, -0.10549954, 0.090567775, 0.042013682,
-0.03176985, 0.12569028, -0.02421228, -0.029526481,
0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367,
-0.06861939, -0.021256343, -0.041093912, -0.06669611,
0.035498552, 0.021757556, -0.09302526, -0.015403468,
-0.06614931, -0.051798206, -0.013874718, 0.03630673,
0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424,
-0.020674974, -0.03944324, -0.008110165, -0.11113267,
0.08484226, 0.043586485, 0.040582247, 0.0968012,
-0.065249965, -0.028036479, 0.0050708856, 0.0017462453,
0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844,
-0.11768019, 0.085926116, -0.08251791, -0.045081906,
0.0948852, 0.068401024, 0.024856757, 0.06978981,
-0.057309967, -0.012775832, -0.0032452994, 0.01977615,
-0.041040014, -0.024264973, 0.063464895, 0.05431621,
};
cell_to_input_weights_ = {
0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175};
cell_to_forget_weights_ = {
-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355};
cell_to_output_weights_ = {
0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733};
projection_weights_ = {
-0.009802181, 0.09401916, 0.0717386, -0.13895074,
0.09641832, 0.060420845, 0.08539281, 0.054285463,
0.061395317, 0.034448683, -0.042991187, 0.019801661,
-0.16840284, -0.015726732, -0.23041931, -0.024478018,
-0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914,
0.16169067, 0.22465782, -0.03993472, -0.004017731,
0.08633481, -0.28869787, 0.08682067, 0.17240396,
0.014975425, 0.056431185, 0.031037588, 0.16702051,
0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434,
0.014777949, -0.20203483, 0.094781205, 0.19100232,
0.13987629, -0.036132768, -0.06426278, -0.05108664,
0.13221376, 0.009441198, -0.16715929, 0.15859416,
-0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123,
0.0046453946, 0.050794356, 0.10770313, -0.20790008,
-0.07149004, -0.11425117, 0.008225835, -0.035802525,
0.14374903, 0.15262283, 0.048710253, 0.1847461,
-0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216,
0.016261552, 0.022461696, 0.12689082, -0.043589946,
-0.12035478, -0.08361797, -0.050666027, -0.1248618,
-0.1275799, -0.071875185, 0.07377272, 0.09944291,
-0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444,
0.041546922, -0.20424393, 0.06907816, 0.050412357,
0.00724631, 0.039827548, 0.12449835, 0.10747581,
0.13708383, 0.09134148, -0.12617786, -0.06428341,
0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063,
0.022913318, -0.042050496, 0.16842307, -0.060597885,
0.10531834, -0.06411776, -0.07451711, -0.03410368,
-0.13393489, 0.06534304, 0.003620307, 0.04490757,
0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227,
-0.024575593, -0.036445823, 0.07155557, 0.009672501,
-0.02328883, 0.009533515, -0.03606021, -0.07421458,
-0.028082801, -0.2678904, -0.13221288, 0.18419984,
-0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874,
0.14494097, -0.12522776, -0.098633975, -0.10766018,
-0.08317623, 0.08594209, 0.07749552, 0.039474737,
0.1776665, -0.07409566, -0.0477268, 0.29323658,
0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189,
0.10034707, 0.045594677, 0.0635285, -0.0715442,
-0.089667566, -0.10811871, 0.00026344223, 0.08298446,
-0.009525053, 0.006585689, -0.24567553, -0.09450807,
0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363,
0.067035615, 0.19271925, -0.0032889997, -0.043264326,
0.09663576, -0.057112187, -0.10100678, 0.0628376,
0.04447668, 0.017961001, -0.10094388, -0.10190601,
0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151,
-0.1947263, 0.02251204, 0.11216432, -0.10307853,
0.17351969, -0.039091777, 0.08066188, -0.00561982,
0.12633002, 0.11335965, -0.0088127935, -0.019777594,
0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895,
-0.07468996, -0.0855457, 0.099339016, -0.07580735,
-0.13775392, 0.08434318, 0.08330512, -0.12131499,
0.031935584, 0.09180414, -0.08876437, -0.08049874,
0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513,
0.04331237, 0.04299654, -0.036394123, -0.12915532,
0.09793732, 0.07512415, -0.11319543, -0.032502122,
0.15661901, 0.07671967, -0.005491124, -0.19379048,
-0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725,
-0.09334311, 0.15026465, -0.15493552, -0.057762887,
-0.11604192, -0.262013, -0.01391798, 0.012185008,
0.11156489, -0.07483202, 0.06693364, -0.26151478,
0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075,
0.030635102, 0.010969227, 0.11109743, 0.010919218,
0.027526086, 0.13519906, 0.01891392, -0.046839405,
-0.040167913, 0.017953383, -0.09700955, 0.0061885654,
-0.07000971, 0.026893595, -0.038844477, 0.14543656};
lstm_input_ = {
{
0.787926, 0.151646, 0.071352, 0.118426, 0.458058,
0.596268, 0.998386, 0.568695, 0.864524, 0.571277,
0.073204, 0.296072, 0.743333, 0.069199, 0.045348,
0.867394, 0.291279, 0.013714, 0.482521, 0.626339},
{
0.295743, 0.544053, 0.690064, 0.858138, 0.497181,
0.642421, 0.524260, 0.134799, 0.003639, 0.162482,
0.640394, 0.930399, 0.050782, 0.432485, 0.988078,
0.082922, 0.563329, 0.865614, 0.333232, 0.259916}
};
lstm_golden_output_ = {
{
-0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576,
-0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004,
-0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147,
0.0134203, -0.0166936, 0.0381209, 0.000889694, 0.0143363,
-0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322,
-0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308,
0.0155969, 0.0312091, -0.0213783, 0.0350169, 0.000324794,
0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474,
0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827,
0.0136115, 0.0243435, 0.0354492, -0.0189322, 0.0464512,
-0.00251373, 0.0225745, -0.0308346, -0.0317124, 0.0460407,
-0.0189395, 0.0149363, -0.0530162, -0.0150767, -0.0340193,
0.0286833, 0.00824207, 0.0264887, 0.0305169},
{
-0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926,
-0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232,
0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954,
0.02168, -0.0141913, 0.0322082, 0.00227024, 0.0260507,
-0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039,
-0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233,
0.0214762, 0.0293641, -0.0204549, 0.0450315, -0.00117378,
0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034,
0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789,
0.00790065, 0.0220157, 0.0333314, -0.0264787, 0.0387855,
-0.000764675, 0.0217599, -0.037537, -0.0335206, 0.0431679,
-0.0211424, 0.010203, -0.062785, -0.00832363, -0.025181,
0.0412031, 0.0118723, 0.0239643, 0.0394009}};
}
};
TEST_F(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest,
LstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, true,
true,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
TEST_P(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestUint8) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
if (GetParam()) {
return;
}
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, true,
true,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_UINT8, GetParam());
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.00467);
}
TEST_P(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestInt8) {
if (GetParam()) {
return;
}
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, true,
true,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_INT8, GetParam());
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.00467);
}
class NoCifgPeepholeProjectionAndBiasClippingUnidirectionalLstmTest
: public BaseUnidirectionalLstmTest {
void SetUp() override {
input_to_input_weights_ = {
0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677};
input_to_forget_weights_ = {
-0.0018401089, -0.004852237, 0.03698424, 0.014181704,
0.028273236, -0.016726194, -0.05249759, -0.10204261,
0.00861066, -0.040979505, -0.009899187, 0.01923892,
-0.028177269, -0.08535103, -0.14585495, 0.10662567,
-0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395,
0.0814421, -0.12257899, -0.033945758, -0.031303465,
0.045630626, 0.06843887, -0.13492945, -0.012480007,
-0.0811829, -0.07224499, -0.09628791, 0.045100946,
0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997,
0.052625068, 0.12784666, 0.07077897, 0.025725935,
0.04165009, 0.07241905, 0.018668644, -0.037377294,
-0.06277783, -0.08833636, -0.040120605, -0.011405586,
-0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839,
0.13396506, -0.08402166, -0.01901462, -0.044678304,
-0.07720565, 0.014350063, -0.11757958, -0.0652038,
-0.08185733, -0.076754324, -0.092614375, 0.10405491,
0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447,
-0.054523353, 0.02582715, 0.02327355, -0.011857179,
-0.0011980024, -0.034641717, -0.026125094, -0.17582615,
-0.15923657, -0.27486774, -0.0006143371, 0.0001771948,
-8.470171e-05, 0.02651807, 0.045790765, 0.06956496};
input_to_cell_weights_ = {
-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042};
input_to_output_weights_ = {
-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956};
input_gate_bias_ = {0.02234832, 0.14757581, 0.18176508, 0.10380666,
0.053110216, -0.06928846, -0.13942584, -0.11816189,
0.19483899, 0.03652339, -0.10250295, 0.036714908,
-0.18426876, 0.036065217, 0.21810818, 0.02383196,
-0.043370757, 0.08690144, -0.04444982, 0.00030581196};
forget_gate_bias_ = {0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739};
cell_gate_bias_ = {-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027};
output_gate_bias_ = {0.046159424, -0.0012809046, 0.03563469, 0.12648113,
0.027195795, 0.35373217, -0.018957434, 0.008907322,
-0.0762701, 0.12018895, 0.04216877, 0.0022856654,
0.040952638, 0.3147856, 0.08225149, -0.057416286,
-0.14995944, -0.008040261, 0.13208859, 0.029760877};
recurrent_to_input_weights_ = {
-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447};
recurrent_to_cell_weights_ = {
-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404};
recurrent_to_forget_weights_ = {
-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027};
recurrent_to_output_weights_ = {
0.025825322, -0.05813119, 0.09495884, -0.045984812,
-0.01255415, -0.0026479573, -0.08196161, -0.054914974,
-0.0046604523, -0.029587349, -0.044576716, -0.07480124,
-0.082868785, 0.023254942, 0.027502948, -0.0039728214,
-0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307,
-0.08829125, -0.005139627, -0.08989442, -0.0555066,
0.13596267, -0.025062224, -0.048351806, -0.03850004,
0.07266485, -0.022414139, 0.05940088, 0.075114764,
0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916,
0.014416728, 0.043229222, 0.034178585, -0.07530371,
0.035837382, -0.085607, -0.007721233, -0.03287832,
-0.043848954, -0.06404588, -0.06632928, -0.073643476,
0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091,
-0.030063879, 0.008801774, -0.023021035, -0.019558564,
0.05158114, -0.010947698, -0.011825728, 0.0075720972,
0.0699727, -0.0039981045, 0.069350146, 0.08799282,
0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699,
-0.00924166, 0.0046702605, -0.036598757, -0.08811812,
0.10522024, -0.032441203, 0.008176899, -0.04454919,
0.07058152, 0.0067963637, 0.039206743, 0.03259838,
0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724,
0.036879618, 0.043357447, 0.028362012, -0.05908629,
0.0059240665, -0.04995891, -0.019187413, 0.0276265,
-0.01628143, 0.0025863599, 0.08800015, 0.035250366,
-0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454,
-0.009660886, 0.019076364, 0.018299393, -0.046004917,
0.08891175, 0.0431396, -0.026327137, -0.051502608,
0.08979574, -0.051670972, 0.04940282, -0.07491107,
-0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988,
-0.035936575, -0.011681591, 0.064818054, 0.0073146066,
-0.021745546, -0.043124277, -0.06471268, -0.07053354,
-0.029321948, -0.05330136, 0.016933719, -0.053782392,
0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434,
-0.07924483, 0.06936997, 0.0034815092, -0.007305279,
-0.037325785, -0.07251102, -0.033633437, -0.08677009,
0.091591336, -0.14165086, 0.021752775, 0.019683983,
0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828,
0.1183656, -0.0010731248, -0.023590032, -0.072285876,
-0.0724771, -0.026382286, -0.0014920527, 0.042667855,
0.0018776858, 0.02986552, 0.009814309, 0.0733756,
0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798,
-0.010036754, 0.02576849, -0.08307328, 0.010112348,
0.042521734, -0.05869831, -0.071689695, 0.03876447,
-0.13275425, -0.0352966, -0.023077697, 0.10285965,
0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767,
-0.08271222, -0.0030240538, -0.016368777, 0.1070414,
0.042672627, 0.013456989, -0.0437609, -0.022309763,
0.11576483, 0.04108048, 0.061026827, -0.0190714,
-0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893,
-0.023771819, -0.01965048, 0.007955471, -0.043740474,
0.03346837, -0.10549954, 0.090567775, 0.042013682,
-0.03176985, 0.12569028, -0.02421228, -0.029526481,
0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367,
-0.06861939, -0.021256343, -0.041093912, -0.06669611,
0.035498552, 0.021757556, -0.09302526, -0.015403468,
-0.06614931, -0.051798206, -0.013874718, 0.03630673,
0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424,
-0.020674974, -0.03944324, -0.008110165, -0.11113267,
0.08484226, 0.043586485, 0.040582247, 0.0968012,
-0.065249965, -0.028036479, 0.0050708856, 0.0017462453,
0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844,
-0.11768019, 0.085926116, -0.08251791, -0.045081906,
0.0948852, 0.068401024, 0.024856757, 0.06978981,
-0.057309967, -0.012775832, -0.0032452994, 0.01977615,
-0.041040014, -0.024264973, 0.063464895, 0.05431621,
};
cell_to_input_weights_ = {
0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175};
cell_to_forget_weights_ = {
-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355};
cell_to_output_weights_ = {
0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733};
projection_weights_ = {
-0.009802181, 0.09401916, 0.0717386, -0.13895074,
0.09641832, 0.060420845, 0.08539281, 0.054285463,
0.061395317, 0.034448683, -0.042991187, 0.019801661,
-0.16840284, -0.015726732, -0.23041931, -0.024478018,
-0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914,
0.16169067, 0.22465782, -0.03993472, -0.004017731,
0.08633481, -0.28869787, 0.08682067, 0.17240396,
0.014975425, 0.056431185, 0.031037588, 0.16702051,
0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434,
0.014777949, -0.20203483, 0.094781205, 0.19100232,
0.13987629, -0.036132768, -0.06426278, -0.05108664,
0.13221376, 0.009441198, -0.16715929, 0.15859416,
-0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123,
0.0046453946, 0.050794356, 0.10770313, -0.20790008,
-0.07149004, -0.11425117, 0.008225835, -0.035802525,
0.14374903, 0.15262283, 0.048710253, 0.1847461,
-0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216,
0.016261552, 0.022461696, 0.12689082, -0.043589946,
-0.12035478, -0.08361797, -0.050666027, -0.1248618,
-0.1275799, -0.071875185, 0.07377272, 0.09944291,
-0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444,
0.041546922, -0.20424393, 0.06907816, 0.050412357,
0.00724631, 0.039827548, 0.12449835, 0.10747581,
0.13708383, 0.09134148, -0.12617786, -0.06428341,
0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063,
0.022913318, -0.042050496, 0.16842307, -0.060597885,
0.10531834, -0.06411776, -0.07451711, -0.03410368,
-0.13393489, 0.06534304, 0.003620307, 0.04490757,
0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227,
-0.024575593, -0.036445823, 0.07155557, 0.009672501,
-0.02328883, 0.009533515, -0.03606021, -0.07421458,
-0.028082801, -0.2678904, -0.13221288, 0.18419984,
-0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874,
0.14494097, -0.12522776, -0.098633975, -0.10766018,
-0.08317623, 0.08594209, 0.07749552, 0.039474737,
0.1776665, -0.07409566, -0.0477268, 0.29323658,
0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189,
0.10034707, 0.045594677, 0.0635285, -0.0715442,
-0.089667566, -0.10811871, 0.00026344223, 0.08298446,
-0.009525053, 0.006585689, -0.24567553, -0.09450807,
0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363,
0.067035615, 0.19271925, -0.0032889997, -0.043264326,
0.09663576, -0.057112187, -0.10100678, 0.0628376,
0.04447668, 0.017961001, -0.10094388, -0.10190601,
0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151,
-0.1947263, 0.02251204, 0.11216432, -0.10307853,
0.17351969, -0.039091777, 0.08066188, -0.00561982,
0.12633002, 0.11335965, -0.0088127935, -0.019777594,
0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895,
-0.07468996, -0.0855457, 0.099339016, -0.07580735,
-0.13775392, 0.08434318, 0.08330512, -0.12131499,
0.031935584, 0.09180414, -0.08876437, -0.08049874,
0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513,
0.04331237, 0.04299654, -0.036394123, -0.12915532,
0.09793732, 0.07512415, -0.11319543, -0.032502122,
0.15661901, 0.07671967, -0.005491124, -0.19379048,
-0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725,
-0.09334311, 0.15026465, -0.15493552, -0.057762887,
-0.11604192, -0.262013, -0.01391798, 0.012185008,
0.11156489, -0.07483202, 0.06693364, -0.26151478,
0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075,
0.030635102, 0.010969227, 0.11109743, 0.010919218,
0.027526086, 0.13519906, 0.01891392, -0.046839405,
-0.040167913, 0.017953383, -0.09700955, 0.0061885654,
-0.07000971, 0.026893595, -0.038844477, 0.14543656};
projection_bias_ = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6};
lstm_input_ = {
{
0.787926, 0.151646, 0.071352, 0.118426, 0.458058,
0.596268, 0.998386, 0.568695, 0.864524, 0.571277,
0.073204, 0.296072, 0.743333, 0.069199, 0.045348,
0.867394, 0.291279, 0.013714, 0.482521, 0.626339},
{
0.295743, 0.544053, 0.690064, 0.858138, 0.497181,
0.642421, 0.524260, 0.134799, 0.003639, 0.162482,
0.640394, 0.930399, 0.050782, 0.432485, 0.988078,
0.082922, 0.563329, 0.865614, 0.333232, 0.259916}
};
lstm_golden_output_ = {
{
0.0960319489, 0.229351997, 0.297207743, 0.415997744, 0.491644233,
0.578822136, 0.728351235, 0.788540304, 0.909073055, 0.975599587,
1.08478093, 1.17409372, 1.30914319, 1.4041512, 1.51714694,
1.61342025, 0.0634541437, 0.190279216, 0.317923307, 0.415168911,
0.458113253, 0.609743774, 0.731511116, 0.795806408, 0.876155913,
0.960330188, 1.12396312, 1.22149014, 1.33917773, 1.43213499,
1.54139447, 1.65451813, 0.0485293195, 0.160991609, 0.337073475,
0.428976893, 0.459505379, 0.617044866, 0.743735075, 0.790821671,
0.85271728, 0.946818829, 1.12779701, 1.23345077, 1.35309088,
1.44595909, 1.56173062, 1.67839324, 0.0445971154, 0.156434938,
0.341761589, 0.425259203, 0.449760497, 0.633765697, 0.745093822,
0.791106999, 0.84820503, 0.952787101, 1.13438797, 1.24063754,
1.34668994, 1.44879568, 1.57038593, 1.67956686},
{
0.0861309841, 0.228726774, 0.296653062, 0.40733397, 0.47120741,
0.581307411, 0.719366193, 0.788456261, 0.904226124, 0.965476751,
1.10223258, 1.19042683, 1.32106233, 1.41333091, 1.51509535,
1.62168002, 0.0652779415, 0.18218407, 0.324066937, 0.42611438,
0.47292757, 0.602282405, 0.739310443, 0.791508496, 0.870626807,
0.955534995, 1.10976851, 1.21598971, 1.34197009, 1.43256509,
1.54804492, 1.65581059, 0.0492607877, 0.169714347, 0.332315415,
0.419173867, 0.44699502, 0.630063772, 0.737177074, 0.792844594,
0.858417571, 0.956391335, 1.13453305, 1.23976779, 1.34693861,
1.4410423, 1.55988359, 1.67204297, 0.0390465111, 0.15099439,
0.3439475, 0.424439192, 0.444207728, 0.632501483, 0.742233515,
0.791400731, 0.845713973, 0.944575012, 1.14116096, 1.24791968,
1.35954499, 1.45086145, 1.56633317, 1.68943977}};
}
};
TEST_F(NoCifgPeepholeProjectionAndBiasClippingUnidirectionalLstmTest,
LstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, true,
true,
true,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{n_output},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
lstm.SetProjectionBias(projection_bias_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class LayerNormUnidirectionalLSTMOpModel : public UnidirectionalLSTMOpModel {
public:
LayerNormUnidirectionalLSTMOpModel(
int n_batch, int n_input, int n_cell, int n_output, int sequence_length,
bool time_major, bool use_cifg, bool use_peephole,
bool use_projection_weights, bool use_projection_bias, float cell_clip,
float proj_clip, const std::vector<std::vector<int>>& input_shapes,
const TensorType& weights_type = TensorType_FLOAT32)
: UnidirectionalLSTMOpModel(
n_batch, n_input, n_cell, n_output, sequence_length, time_major,
use_cifg, use_peephole, use_projection_weights, use_projection_bias,
cell_clip, proj_clip, input_shapes, TensorType_FLOAT32, true) {}
};
class BaseLayerNormUnidirectionalLstmTest : public ::testing::Test {
protected:
std::vector<float> input_to_input_weights_;
std::vector<float> input_to_cell_weights_;
std::vector<float> input_to_forget_weights_;
std::vector<float> input_to_output_weights_;
std::vector<float> input_gate_bias_;
std::vector<float> cell_gate_bias_;
std::vector<float> forget_gate_bias_;
std::vector<float> output_gate_bias_;
std::vector<float> recurrent_to_input_weights_;
std::vector<float> recurrent_to_cell_weights_;
std::vector<float> recurrent_to_forget_weights_;
std::vector<float> recurrent_to_output_weights_;
std::vector<float> cell_to_input_weights_;
std::vector<float> cell_to_forget_weights_;
std::vector<float> cell_to_output_weights_;
std::vector<float> projection_weights_;
std::vector<float> projection_bias_;
std::vector<float> input_layer_norm_coefficients_;
std::vector<float> forget_layer_norm_coefficients_;
std::vector<float> cell_layer_norm_coefficients_;
std::vector<float> output_layer_norm_coefficients_;
std::vector<std::vector<float>> lstm_input_;
std::vector<std::vector<float>> lstm_golden_output_;
void VerifyGoldens(const std::vector<std::vector<float>>& input,
const std::vector<std::vector<float>>& output,
UnidirectionalLSTMOpModel* lstm, float tolerance = 1e-5) {
const int num_batches = input.size();
EXPECT_GT(num_batches, 0);
const int num_inputs = lstm->num_inputs();
EXPECT_GT(num_inputs, 0);
const int input_sequence_size = input[0].size() / num_inputs;
EXPECT_GT(input_sequence_size, 0);
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* batch_start = input[b].data() + i * num_inputs;
const float* batch_end = batch_start + num_inputs;
lstm->SetInput(((i * num_batches) + b) * num_inputs, batch_start,
batch_end);
}
}
ASSERT_EQ(lstm->Invoke(), kTfLiteOk);
const int num_outputs = lstm->num_outputs();
EXPECT_GT(num_outputs, 0);
std::vector<float> expected;
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* golden_start_batch = output[b].data() + i * num_outputs;
const float* golden_end_batch = golden_start_batch + num_outputs;
expected.insert(expected.end(), golden_start_batch, golden_end_batch);
}
}
EXPECT_THAT(lstm->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
};
class CifgPeepholeNoProjectionNoClippingLayerNormUnidirectionalLstmTest
: public BaseLayerNormUnidirectionalLstmTest {
void SetUp() override {
input_to_cell_weights_ = {-0.49770179, -0.27711356, -0.09624726,
0.05100781, 0.04717243, 0.48944736,
-0.38535351, -0.17212132};
input_to_forget_weights_ = {-0.55291498, -0.42866567, 0.13056988,
-0.3633365, -0.22755712, 0.28253698,
0.24407166, 0.33826375};
input_to_output_weights_ = {0.10725588, -0.02335852, -0.55932593,
-0.09426838, -0.44257352, 0.54939759,
0.01533556, 0.42751634};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_cell_weights_ = {
0.54066205, -0.32668582, -0.43562764, -0.56094903,
0.42957711, 0.01841056, -0.32764608, -0.33027974,
-0.10826075, 0.20675004, 0.19069612, -0.03026325,
-0.54532051, 0.33003211, 0.44901288, 0.21193194};
recurrent_to_forget_weights_ = {
-0.13832897, -0.0515101, -0.2359007, -0.16661474,
-0.14340827, 0.36986142, 0.23414481, 0.55899,
0.10798943, -0.41174671, 0.17751795, -0.34484994,
-0.35874045, -0.11352962, 0.27268326, 0.54058349};
recurrent_to_output_weights_ = {
0.41613156, 0.42610586, -0.16495961, -0.5663873,
0.30579174, -0.05115908, -0.33941799, 0.23364776,
0.11178309, 0.09481031, -0.26424935, 0.46261835,
0.50248802, 0.26114327, -0.43736315, 0.33149987};
cell_to_forget_weights_ = {0.47485286, -0.51955009, -0.24458408,
0.31544167};
cell_to_output_weights_ = {-0.17135078, 0.82760304, 0.85573703,
-0.77109635};
input_layer_norm_coefficients_ = {0.1, 0.2, 0.3, 0.5};
forget_layer_norm_coefficients_ = {0.2, 0.2, 0.4, 0.3};
cell_layer_norm_coefficients_ = {0.7, 0.2, 0.3, 0.8};
output_layer_norm_coefficients_ = {0.6, 0.2, 0.2, 0.5};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.102089, 0.00653987, 0.0515139, -0.0630045,
-0.173317, 0.0109206, 0.0903292, -0.109497,
-0.23827, 0.0119514, 0.119525, -0.12748}};
}
};
TEST_F(CifgPeepholeNoProjectionNoClippingLayerNormUnidirectionalLstmTest,
LayerNormLstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
LayerNormUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
});
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetForgetLayerNormCoefficients(forget_layer_norm_coefficients_);
lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients_);
lstm.SetOutputLayerNormCoefficients(output_layer_norm_coefficients_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
TEST_F(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
NonLayerNormLstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
LayerNormUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{0},
{0},
{0},
{0},
});
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class UnidirectionalSequenceLSTMIntegerOpModel : public SingleOpModel {
public:
UnidirectionalSequenceLSTMIntegerOpModel(
int n_batch, int n_input, int n_cell, int n_output, int sequence_length,
bool time_major, bool use_cifg, bool use_peephole,
bool use_projection_weights, bool use_projection_bias,
bool use_layer_norm, bool use_8x8_8_implementation,
const std::vector<std::pair<float, float>>& ranges,
const std::vector<std::pair<float, int>>& intermediates,
bool asymmetric_quantize_inputs = false)
: n_input_(n_input), n_output_(n_output) {
input_ = AddInput({TensorType_INT8,
{sequence_length, n_batch, n_input},
ranges[0].first,
ranges[0].second});
if (use_cifg) {
input_to_input_weights_ = AddNullInput();
} else {
input_to_input_weights_ = AddInput({TensorType_INT8,
{n_cell, n_input},
ranges[1].first,
ranges[1].second});
}
input_to_forget_weights_ = AddInput({TensorType_INT8,
{n_cell, n_input},
ranges[2].first,
ranges[2].second});
input_to_cell_weights_ = AddInput({TensorType_INT8,
{n_cell, n_input},
ranges[3].first,
ranges[3].second});
input_to_output_weights_ = AddInput({TensorType_INT8,
{n_cell, n_input},
ranges[4].first,
ranges[4].second});
if (use_cifg) {
recurrent_to_input_weights_ = AddNullInput();
} else {
recurrent_to_input_weights_ = AddInput({TensorType_INT8,
{n_cell, n_output},
ranges[5].first,
ranges[5].second});
}
recurrent_to_forget_weights_ = AddInput({TensorType_INT8,
{n_cell, n_output},
ranges[6].first,
ranges[6].second});
recurrent_to_cell_weights_ = AddInput({TensorType_INT8,
{n_cell, n_output},
ranges[7].first,
ranges[7].second});
recurrent_to_output_weights_ = AddInput({TensorType_INT8,
{n_cell, n_output},
ranges[8].first,
ranges[8].second});
if (use_peephole) {
if (use_cifg) {
cell_to_input_weights_ = AddNullInput();
} else {
cell_to_input_weights_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[9].first, ranges[9].second});
}
cell_to_forget_weights_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[10].first, ranges[10].second});
cell_to_output_weights_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[11].first, ranges[11].second});
} else {
cell_to_input_weights_ = AddNullInput();
cell_to_forget_weights_ = AddNullInput();
cell_to_output_weights_ = AddNullInput();
}
if (use_cifg) {
input_gate_bias_ = AddNullInput();
} else {
input_gate_bias_ = AddInput(
{TensorType_INT32, {n_cell}, ranges[12].first, ranges[12].second});
}
forget_gate_bias_ = AddInput(
{TensorType_INT32, {n_cell}, ranges[13].first, ranges[13].second});
cell_gate_bias_ = AddInput(
{TensorType_INT32, {n_cell}, ranges[14].first, ranges[14].second});
output_gate_bias_ = AddInput(
{TensorType_INT32, {n_cell}, ranges[15].first, ranges[15].second});
if (use_projection_weights) {
projection_weights_ = AddInput({TensorType_INT8,
{n_output, n_cell},
ranges[16].first,
ranges[16].second});
} else {
projection_weights_ = AddNullInput();
}
if (use_projection_bias) {
CHECK(use_projection_weights);
projection_bias_ = AddInput(
{TensorType_INT32, {n_output}, ranges[17].first, ranges[17].second});
} else {
projection_bias_ = AddNullInput();
}
AddVariableInput({TensorType_INT16,
{n_batch, n_output},
ranges[18].first,
ranges[18].second});
AddVariableInput({TensorType_INT16,
{n_batch, n_cell},
ranges[19].first,
ranges[19].second});
if (use_layer_norm) {
if (use_cifg) {
input_layer_norm_coefficients_ = AddNullInput();
} else {
input_layer_norm_coefficients_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[20].first, ranges[20].second});
}
forget_layer_norm_coefficients_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[21].first, ranges[21].second});
cell_layer_norm_coefficients_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[22].first, ranges[22].second});
output_layer_norm_coefficients_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[23].first, ranges[23].second});
}
CHECK(!use_8x8_8_implementation);
EXPECT_EQ(intermediates.size(), 5);
for (int i = 0; i < intermediates.size(); ++i) {
AddIntermediate(TensorType_INT16, {intermediates[i].first},
{intermediates[i].second});
}
output_ = AddOutput({TensorType_INT8,
{n_batch, n_output},
ranges[24].first,
ranges[24].second});
SetBuiltinOp(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
BuiltinOptions_UnidirectionalSequenceLSTMOptions,
CreateUnidirectionalSequenceLSTMOptions(
builder_, ActivationFunctionType_TANH, 0.0f,
0.0f, time_major, asymmetric_quantize_inputs)
.Union());
BuildInterpreter({}, -1,
false,
true, false);
}
void PerformAllocateAndDelegate() { AllocateAndDelegate(true); }
void SetInputToInputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_to_input_weights_, f);
}
void SetInputToForgetWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_to_forget_weights_, f);
}
void SetInputToCellWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_to_cell_weights_, f);
}
void SetInputToOutputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_to_output_weights_, f);
}
void SetRecurrentToInputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(recurrent_to_input_weights_, f);
}
void SetRecurrentToForgetWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(recurrent_to_forget_weights_, f);
}
void SetRecurrentToCellWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(recurrent_to_cell_weights_, f);
}
void SetRecurrentToOutputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(recurrent_to_output_weights_, f);
}
void SetCellToInputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(cell_to_input_weights_, f);
}
void SetCellToForgetWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(cell_to_forget_weights_, f);
}
void SetCellToOutputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(cell_to_output_weights_, f);
}
void SetInputLayerNormCoefficients(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(input_layer_norm_coefficients_, f);
}
void SetForgetLayerNormCoefficients(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(forget_layer_norm_coefficients_, f);
}
void SetCellLayerNormCoefficients(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(cell_layer_norm_coefficients_, f);
}
void SetOutputLayerNormCoefficients(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(output_layer_norm_coefficients_, f);
}
void SetInputGateBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(input_gate_bias_, f);
}
void SetForgetGateBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(forget_gate_bias_, f);
}
void SetCellBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(cell_gate_bias_, f);
}
void SetOutputGateBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(output_gate_bias_, f);
}
void SetProjectionWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(projection_weights_, f);
}
void SetProjectionBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(projection_bias_, f);
}
void SetInput(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_, f);
}
std::vector<int8_t> GetOutput() { return ExtractVector<int8_t>(output_); }
int num_inputs() { return n_input_; }
int num_outputs() { return n_output_; }
protected:
int input_;
int input_to_input_weights_;
int input_to_forget_weights_;
int input_to_cell_weights_;
int input_to_output_weights_;
int recurrent_to_input_weights_;
int recurrent_to_forget_weights_;
int recurrent_to_cell_weights_;
int recurrent_to_output_weights_;
int cell_to_input_weights_;
int cell_to_forget_weights_;
int cell_to_output_weights_;
int input_layer_norm_coefficients_;
int forget_layer_norm_coefficients_;
int cell_layer_norm_coefficients_;
int output_layer_norm_coefficients_;
int input_gate_bias_;
int forget_gate_bias_;
int cell_gate_bias_;
int output_gate_bias_;
int projection_weights_;
int projection_bias_;
int output_;
int n_input_;
int n_output_;
};
TEST(IntegerUnidirectionalSequenceLstmOpTest,
NoCifg_NoPeephole_Projection_LayerNorm) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 4;
const int n_output = 3;
const int sequence_length = 3;
const std::vector<float> input_to_input_weights = {
0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5,
-0.8, 0.7, -0.6, 0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1};
const std::vector<float> input_to_forget_weights = {
-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8,
-0.4, 0.3, -0.5, -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5};
const std::vector<float> input_to_cell_weights = {
-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6,
0.6, -0.1, -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6};
const std::vector<float> input_to_output_weights = {
-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2,
0.6, -0.2, 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4};
const std::vector<float> input_gate_bias = {0.03, 0.15, 0.22, 0.38};
const std::vector<float> forget_gate_bias = {0.1, -0.3, -0.2, 0.1};
const std::vector<float> cell_gate_bias = {-0.05, 0.72, 0.25, 0.08};
const std::vector<float> output_gate_bias = {0.05, -0.01, 0.2, 0.1};
const std::vector<float> recurrent_to_input_weights = {
-0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6};
const std::vector<float> recurrent_to_cell_weights = {
-0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2};
const std::vector<float> recurrent_to_forget_weights = {
-0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2};
const std::vector<float> recurrent_to_output_weights = {
0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2};
const std::vector<float> input_layer_norm_coefficients = {0.1, 0.2, 0.3, 0.5};
const std::vector<float> forget_layer_norm_coefficients = {0.2, 0.2, 0.4,
0.3};
const std::vector<float> cell_layer_norm_coefficients = {0.7, 0.2, 0.3, 0.8};
const std::vector<float> output_layer_norm_coefficients = {0.6, 0.2, 0.2,
0.5};
const std::vector<float> projection_weights = {
-0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2};
const std::vector<std::pair<float, float>> ranges = {
{-1.0, 127.0 / 128},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1, 1},
{-1, 1},
{-1, 1},
{-100, 100},
{-100, 100},
{-100, 100},
{-100, 100},
{-0.5, 0.5},
{-1, 1},
{-1.0, 32767.0 / 32768},
{-1, 1},
{-1.00001, 1.0},
{-1.00001, 1.0},
{-1.00001, 1.0},
{-1.00001, 1.0},
{-1.0, 32767.0 / 32768},
};
std::vector<std::pair<float, int>> intermediates = {
{0.007059, 0}, {0.007812, 0}, {0.007059, 0}, {0.007812, 0}, {0.007, 0}};
UnidirectionalSequenceLSTMIntegerOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
false, false,
true,
false,
true,
false, ranges, intermediates);
lstm.PerformAllocateAndDelegate();
lstm.SetInputToInputWeights(input_to_input_weights);
lstm.SetInputToCellWeights(input_to_cell_weights);
lstm.SetInputToForgetWeights(input_to_forget_weights);
lstm.SetInputToOutputWeights(input_to_output_weights);
lstm.SetInputGateBias(input_gate_bias);
lstm.SetCellBias(cell_gate_bias);
lstm.SetForgetGateBias(forget_gate_bias);
lstm.SetOutputGateBias(output_gate_bias);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights);
lstm.SetProjectionWeights(projection_weights);
lstm.SetInputLayerNormCoefficients(input_layer_norm_coefficients);
lstm.SetForgetLayerNormCoefficients(forget_layer_norm_coefficients);
lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients);
lstm.SetOutputLayerNormCoefficients(output_layer_norm_coefficients);
const std::vector<float> lstm_input = {
0.7, 0.8, 0.1, 0.2, 0.3,
0.8, 0.1, 0.2, 0.4, 0.5,
0.2, 0.7, 0.7, 0.1, 0.7,
0.3, 0.2, 0.9, 0.8, 0.1,
0.7, 0.8, 0.1, 0.2, 0.3,
0.3, 0.2, 0.9, 0.8, 0.1,
};
const std::vector<int8_t> expected_output = {
127, 127, -108, -67, 127, 127, -128, 127, 127,
-128, 127, 127, 127, 127, 127, -128, 127, 127,
};
lstm.SetInput(lstm_input);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
EXPECT_THAT(lstm.GetOutput(), ElementsAreArray(expected_output));
}
TEST(IntegerUnidirectionalSequenceLstmOpTest,
NoCifg_Peephole_Projection_LayerNorm) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 4;
const int n_output = 3;
const int sequence_length = 3;
const std::vector<float> input_to_input_weights = {
0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5,
-0.8, 0.7, -0.6, 0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1};
const std::vector<float> input_to_forget_weights = {
-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8,
-0.4, 0.3, -0.5, -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5};
const std::vector<float> input_to_cell_weights = {
-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6,
0.6, -0.1, -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6};
const std::vector<float> input_to_output_weights = {
-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2,
0.6, -0.2, 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4};
const std::vector<float> input_gate_bias = {0.03, 0.15, 0.22, 0.38};
const std::vector<float> forget_gate_bias = {0.1, -0.3, -0.2, 0.1};
const std::vector<float> cell_gate_bias = {-0.05, 0.72, 0.25, 0.08};
const std::vector<float> output_gate_bias = {0.05, -0.01, 0.2, 0.1};
const std::vector<float> recurrent_to_input_weights = {
-0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6};
const std::vector<float> recurrent_to_cell_weights = {
-0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2};
const std::vector<float> recurrent_to_forget_weights = {
-0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2};
const std::vector<float> recurrent_to_output_weights = {
0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2};
const std::vector<float> cell_to_input_weights = {0.3, -0.1, 0.1, -0.2};
const std::vector<float> cell_to_forget_weights = {0.2, -0.1, 0.1, -0.2};
const std::vector<float> cell_to_output_weights = {0.3, -0.1, 0.1, -0.3};
const std::vector<float> input_layer_norm_coefficients = {0.1, 0.2, 0.3, 0.5};
const std::vector<float> forget_layer_norm_coefficients = {0.2, 0.2, 0.4,
0.3};
const std::vector<float> cell_layer_norm_coefficients = {0.7, 0.2, 0.3, 0.8};
const std::vector<float> output_layer_norm_coefficients = {0.6, 0.2, 0.2,
0.5};
const std::vector<float> projection_weights = {
-0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2};
const std::vector<std::pair<float, float>> ranges = {
{-1.0, 127.0 / 128},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-0.9, 0.9},
{-1.0, 1.0},
{-1.0, 1.0},
{-0.3, 0.3},
{-0.3, 0.3},
{-0.3, 0.3},
{-100, 100},
{-100, 80},
{-100, 100},
{-100, 100},
{-0.5, 0.5},
{-1, 1},
{-1.0, 32767.0 / 32768},
{-1, 1},
{-0.5, 0.5},
{-0.5, 0.5},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 32767.0 / 32768},
};
std::vector<std::pair<float, int>> intermediates = {
{0.007059, 0}, {0.007812, 0}, {0.007059, 0}, {0.007812, 0}, {0.007, 0}};
UnidirectionalSequenceLSTMIntegerOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
false, true,
true,
false,
true,
false, ranges, intermediates);
lstm.PerformAllocateAndDelegate();
lstm.SetInputToInputWeights(input_to_input_weights);
lstm.SetInputToCellWeights(input_to_cell_weights);
lstm.SetInputToForgetWeights(input_to_forget_weights);
lstm.SetInputToOutputWeights(input_to_output_weights);
lstm.SetInputGateBias(input_gate_bias);
lstm.SetCellBias(cell_gate_bias);
lstm.SetForgetGateBias(forget_gate_bias);
lstm.SetOutputGateBias(output_gate_bias);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights);
lstm.SetCellToInputWeights(cell_to_input_weights);
lstm.SetCellToForgetWeights(cell_to_forget_weights);
lstm.SetCellToOutputWeights(cell_to_output_weights);
lstm.SetProjectionWeights(projection_weights);
lstm.SetInputLayerNormCoefficients(input_layer_norm_coefficients);
lstm.SetForgetLayerNormCoefficients(forget_layer_norm_coefficients);
lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients);
lstm.SetOutputLayerNormCoefficients(output_layer_norm_coefficients);
const std::vector<float> lstm_input = {
0.7, 0.8, 0.1, 0.2, 0.3,
0.8, 0.1, 0.2, 0.4, 0.5,
0.2, 0.7, 0.7, 0.1, 0.7,
0.3, 0.2, 0.9, 0.8, 0.1,
0.7, 0.8, 0.1, 0.2, 0.3,
0.3, 0.2, 0.9, 0.8, 0.1,
};
const std::vector<int8_t> expected_output = {
127, 127, -16, -21, 127, 127, 23, 127, 127,
-128, 127, 127, 127, 127, 127, -128, 127, 127,
};
lstm.SetInput(lstm_input);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
EXPECT_THAT(lstm.GetOutput(), ElementsAreArray(expected_output));
}
class IndyLSTMOpTest
: public ::testing::TestWithParam<std::tuple<bool, bool, bool>> {};
INSTANTIATE_TEST_SUITE_P(
PeepHoleAndCifg, IndyLSTMOpTest,
testing::Combine(testing::Bool(),
testing::Bool(),
testing::Bool()));
TEST_P(IndyLSTMOpTest, HybridCheckThatDiagAndNonDiagRecurrentWeightsAreEqual) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
auto params = GetParam();
const bool use_cifg = std::get<0>(params);
const bool use_peephole = std::get<1>(params);
const bool asymmetric_quantize_inputs = std::get<2>(params);
auto SetLstmWeights = [&](HybridUnidirectionalLSTMOpModel& model) -> void {
if (!use_cifg) {
model.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524});
}
model.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113,
-0.29909778});
model.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212});
model.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077,
-0.1556896, 0.19487578});
if (!use_cifg) {
model.SetInputGateBias({0., 0., 0., 0.});
}
model.SetCellBias({0., 0., 0., 0.});
model.SetForgetGateBias({1., 1., 1., 1.});
model.SetOutputGateBias({0., 0., 0., 0.});
if (use_peephole) {
if (!use_cifg) {
model.SetCellToInputWeights(
{0.040369894, 0.030746894, 0.24704495, 0.018586371,
-0.037586458, -0.15312155, -0.11812848, -0.11465643,
0.20259799, 0.11418174, -0.10116027, -0.011334949,
0.12411352, -0.076769054, -0.052169047, 0.21198851,
-0.38871562, -0.09061183, -0.09683246, -0.21929175});
}
model.SetCellToForgetWeights(
{0.47485286, -0.51955009, -0.24458408, 0.31544167});
model.SetCellToOutputWeights(
{-0.17135078, 0.82760304, 0.85573703, -0.77109635});
}
};
std::vector<int> input_weights_shape{n_cell, n_input};
if (use_cifg) {
input_weights_shape = std::vector<int>{0, 0};
}
std::vector<int> recurrent_to_input_weights_shape{n_cell, n_output};
if (use_cifg) {
input_weights_shape = std::vector<int>{0, 0};
}
std::vector<std::vector<int>> input_shapes = {
{sequence_length, n_batch, n_input},
input_weights_shape,
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
recurrent_to_input_weights_shape,
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{(use_peephole & !use_cifg) ? n_cell : 0},
{use_peephole ? n_cell : 0},
{use_peephole ? n_cell : 0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
};
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
use_cifg,
use_peephole,
false,
false,
0.0,
0.0, input_shapes, TensorType_UINT8,
asymmetric_quantize_inputs, false);
if (!use_cifg) {
lstm.SetRecurrentToInputWeights({-0.0063535, 0.0, 0.0, 0.0,
0.0, 0.08183324, 0.0, 0.0,
0.0, 0.0, 0.48091322, 0.0,
0.0, 0.0, 0.0, 0.10629296});
}
lstm.SetRecurrentToCellWeights({-0.3407414, 0.0, 0.0, 0.0,
0.0, -0.00123841, 0.0, 0.0,
0.0, 0.0, -0.501764, 0.0,
0.0, 0.0, 0.0, -0.16368064});
lstm.SetRecurrentToForgetWeights({-0.48684245, 0.0, 0.0, 0.0,
0.0, 0.20864892, 0.0, 0.0,
0.0, 0.0, 0.36447752, 0.0,
0.0, 0.0, 0.0, -0.01140004});
lstm.SetRecurrentToOutputWeights({0.43385774, 0.0, 0.0, 0.0,
0.0, -0.39835793, 0.0, 0.0,
0.0, 0.0, 0.20047462, 0.0,
0.0, 0.0, 0.0, 0.39922136});
input_shapes[5] = {n_cell};
input_shapes[6] = {n_cell};
input_shapes[7] = {n_cell};
input_shapes[8] = {n_cell};
HybridUnidirectionalLSTMOpModel indy_lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
use_cifg,
use_peephole,
false,
false,
0.0,
0.0, input_shapes, TensorType_UINT8,
asymmetric_quantize_inputs, true);
SetLstmWeights(lstm);
SetLstmWeights(indy_lstm);
if (!use_cifg) {
indy_lstm.SetRecurrentToInputWeights(
{-0.0063535, 0.08183324, 0.48091322, 0.10629296});
}
indy_lstm.SetRecurrentToCellWeights(
{-0.3407414, -0.00123841, -0.501764, -0.16368064});
indy_lstm.SetRecurrentToForgetWeights(
{-0.48684245, 0.20864892, 0.36447752, -0.01140004});
indy_lstm.SetRecurrentToOutputWeights(
{0.43385774, -0.39835793, 0.20047462, 0.39922136});
static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
float* batch0_start = lstm_input;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
indy_lstm.SetInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
ASSERT_EQ(indy_lstm.Invoke(), kTfLiteOk);
EXPECT_THAT(indy_lstm.GetOutput(),
ElementsAreArray(ArrayFloatNear(lstm.GetOutput(), 1e-3)));
}
TEST_P(IndyLSTMOpTest, CheckThatDiagAndNonDiagRecurrentWeightsAreEqual) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
auto params = GetParam();
const bool use_cifg = std::get<0>(params);
const bool use_peephole = std::get<1>(params);
auto SetLstmWeights = [&](UnidirectionalLSTMOpModel& model) -> void {
if (!use_cifg) {
model.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524});
}
model.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113,
-0.29909778});
model.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212});
model.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077,
-0.1556896, 0.19487578});
if (!use_cifg) {
model.SetInputGateBias({0., 0., 0., 0.});
}
model.SetCellBias({0., 0., 0., 0.});
model.SetForgetGateBias({1., 1., 1., 1.});
model.SetOutputGateBias({0., 0., 0., 0.});
if (use_peephole) {
if (!use_cifg) {
model.SetCellToInputWeights(
{0.040369894, 0.030746894, 0.24704495, 0.018586371,
-0.037586458, -0.15312155, -0.11812848, -0.11465643,
0.20259799, 0.11418174, -0.10116027, -0.011334949,
0.12411352, -0.076769054, -0.052169047, 0.21198851,
-0.38871562, -0.09061183, -0.09683246, -0.21929175});
}
model.SetCellToForgetWeights(
{0.47485286, -0.51955009, -0.24458408, 0.31544167});
model.SetCellToOutputWeights(
{-0.17135078, 0.82760304, 0.85573703, -0.77109635});
}
};
std::vector<int> input_weights_shape{n_cell, n_input};
if (use_cifg) {
input_weights_shape = std::vector<int>{0, 0};
}
std::vector<int> recurrent_to_input_weights_shape{n_cell, n_output};
if (use_cifg) {
input_weights_shape = std::vector<int>{0, 0};
}
std::vector<std::vector<int>> input_shapes = {
{sequence_length, n_batch, n_input},
input_weights_shape,
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
recurrent_to_input_weights_shape,
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{(use_peephole & !use_cifg) ? n_cell : 0},
{use_peephole ? n_cell : 0},
{use_peephole ? n_cell : 0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
};
UnidirectionalLSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
sequence_length, true,
use_cifg,
use_peephole,
false,
false,
0.0,
0.0, input_shapes);
SetLstmWeights(lstm);
if (!use_cifg) {
lstm.SetRecurrentToInputWeights({-0.0063535, 0.0, 0.0, 0.0,
0.0, 0.08183324, 0.0, 0.0,
0.0, 0.0, 0.48091322, 0.0,
0.0, 0.0, 0.0, 0.10629296});
}
lstm.SetRecurrentToCellWeights({-0.3407414, 0.0, 0.0, 0.0,
0.0, -0.00123841, 0.0, 0.0,
0.0, 0.0, -0.501764, 0.0,
0.0, 0.0, 0.0, -0.16368064});
lstm.SetRecurrentToForgetWeights({-0.48684245, 0.0, 0.0, 0.0,
0.0, 0.20864892, 0.0, 0.0,
0.0, 0.0, 0.36447752, 0.0,
0.0, 0.0, 0.0, -0.01140004});
lstm.SetRecurrentToOutputWeights({0.43385774, 0.0, 0.0, 0.0,
0.0, -0.39835793, 0.0, 0.0,
0.0, 0.0, 0.20047462, 0.0,
0.0, 0.0, 0.0, 0.39922136});
input_shapes[5] = {n_cell};
input_shapes[6] = {n_cell};
input_shapes[7] = {n_cell};
input_shapes[8] = {n_cell};
UnidirectionalLSTMOpModel indy_lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
use_cifg,
use_peephole,
false,
false,
0.0,
0.0, input_shapes, TensorType_FLOAT32,
false, false,
true);
SetLstmWeights(lstm);
SetLstmWeights(indy_lstm);
if (!use_cifg) {
indy_lstm.SetRecurrentToInputWeights(
{-0.0063535, 0.08183324, 0.48091322, 0.10629296});
}
indy_lstm.SetRecurrentToCellWeights(
{-0.3407414, -0.00123841, -0.501764, -0.16368064});
indy_lstm.SetRecurrentToForgetWeights(
{-0.48684245, 0.20864892, 0.36447752, -0.01140004});
indy_lstm.SetRecurrentToOutputWeights(
{0.43385774, -0.39835793, 0.20047462, 0.39922136});
static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
float* batch0_start = lstm_input;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
indy_lstm.SetInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
ASSERT_EQ(indy_lstm.Invoke(), kTfLiteOk);
EXPECT_THAT(indy_lstm.GetOutput(),
ElementsAreArray(ArrayFloatNear(lstm.GetOutput(), 1e-6)));
}
#define QUANTIZE_PARAMETER_TEST(test) \
INSTANTIATE_TEST_SUITE_P(test, test, ::testing::ValuesIn({false, true}));
QUANTIZE_PARAMETER_TEST(
CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest);
QUANTIZE_PARAMETER_TEST(
NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest);
QUANTIZE_PARAMETER_TEST(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest);
#undef QUANTIZE_PARAMETER_TEST
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_lstm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
862266e5-6eee-42ce-88d0-6997e0b227b4 | cpp | tensorflow/tensorflow | split | tensorflow/lite/delegates/gpu/common/tasks/split.cc | tensorflow/lite/delegates/xnnpack/split_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/split.h"
#include <map>
#include <string>
#include <vector>
namespace tflite {
namespace gpu {
Split::Split(const GpuInfo& gpu_info, const OperationDef& definition,
const SplitAttributes& attr, const std::vector<int>& channels)
: GPUOperation(definition), attr_(attr) {
work_group_size_ = int3(8, 4, 1);
code_ = attr.axis == Axis::CHANNELS ? GetSplitChannelsCode(gpu_info, channels)
: GetSplitCode();
}
std::string Split::GetSplitCode() {
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
for (int i = 0; i < definition_.dst_tensors.size(); ++i) {
AddDstTensor("dst_tensor_" + std::to_string(i), definition_.dst_tensors[i]);
}
const std::string task_width =
attr_.axis == Axis::WIDTH ? "1" : "args.src_tensor.Width()";
const std::string task_height =
attr_.axis == Axis::HEIGHT ? "1" : "args.src_tensor.Height()";
const std::string task_depth =
attr_.axis == Axis::DEPTH ? "1" : "args.src_tensor.Depth()";
const std::string task_batch =
attr_.axis == Axis::BATCH ? "1" : "args.src_tensor.Batch()";
const std::string task_slices =
attr_.axis == Axis::CHANNELS ? "1" : "args.src_tensor.Slices()";
std::map<Axis, std::string> axis_to_selector = {
{Axis::WIDTH, "Width"}, {Axis::HEIGHT, "Height"},
{Axis::DEPTH, "Depth"}, {Axis::CHANNELS, "Slices"},
{Axis::BATCH, "Batch"},
};
std::map<Axis, std::string> axis_to_coord = {
{Axis::WIDTH, "X"}, {Axis::HEIGHT, "Y"}, {Axis::DEPTH, "D"},
{Axis::CHANNELS, "S"}, {Axis::BATCH, "B"},
};
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (definition_.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / " + task_batch + ";\n";
c += " int B = linear_id % " + task_batch + ";\n";
c += " if (X >= " + task_width + ") return;\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
c += " if (X >= " + task_width + ") return;\n";
}
if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) {
c += " int linear_id = GLOBAL_ID_1;\n";
c += " int Y = linear_id % " + task_height + ";\n";
c += " int D = linear_id / " + task_height + ";\n";
c += " if (D >= " + task_depth + ") return;\n";
} else {
c += " int Y = GLOBAL_ID_1;\n";
c += " if (Y >= " + task_height + ") return;\n";
}
c += " int S = GLOBAL_ID_2;\n";
c += " if (S >= " + task_slices + ") return;\n";
c += " int src_counter = 0;\n";
std::vector<std::string> src_coords;
for (auto axis :
{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH, Axis::CHANNELS, Axis::BATCH}) {
if (definition_.src_tensors[0].HasAxis(axis)) {
const std::string coord_name =
attr_.axis == axis ? "src_counter" : axis_to_coord[axis];
src_coords.push_back(coord_name);
}
}
std::string src_coords_str = src_coords[0];
for (int i = 1; i < src_coords.size(); ++i) {
src_coords_str += ", " + src_coords[i];
}
for (int i = 0; i < definition_.dst_tensors.size(); ++i) {
std::vector<std::string> dst_coords;
for (auto axis : {Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH, Axis::CHANNELS,
Axis::BATCH}) {
if (definition_.dst_tensors[i].HasAxis(axis)) {
const std::string coord_name =
attr_.axis == axis ? "i" : axis_to_coord[axis];
dst_coords.push_back(coord_name);
}
}
std::string dst_coords_str = dst_coords[0];
for (int j = 1; j < dst_coords.size(); ++j) {
dst_coords_str += ", " + dst_coords[j];
}
const std::string dst_name = "args.dst_tensor_" + std::to_string(i);
c += " for (int i = 0; i < " + dst_name + "." +
axis_to_selector[attr_.axis] + "(); ++i, src_counter++) {\n";
c += " args.src_tensor::type result = args.src_tensor.Read(" +
src_coords_str + ");\n";
c += " " + dst_name + ".Write(result, " + dst_coords_str + ");\n";
c += " }\n";
}
c += "}\n";
return c;
}
std::string Split::GetSplitChannelsCode(const GpuInfo& gpu_info,
const std::vector<int>& channels) {
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
for (int i = 0; i < definition_.dst_tensors.size(); ++i) {
AddDstTensor("dst_tensor_" + std::to_string(i), definition_.dst_tensors[i]);
}
const std::string batch_coord =
definition_.src_tensors[0].HasAxis(Axis::BATCH) ? ", B" : "";
std::string coords = "X, Y";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (definition_.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.src_tensor.Batch();\n";
c += " int B = linear_id % args.src_tensor.Batch();\n";
c += " if (X >= args.src_tensor.Width()) return;\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
c += " if (X >= args.src_tensor.Width()) return;\n";
}
if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) {
c += " int linear_id = GLOBAL_ID_1;\n";
c += " int Y = linear_id % args.src_tensor.Height();\n";
c += " int Z = linear_id / args.src_tensor.Height();\n";
c += " if (Z >= args.src_tensor.Depth()) return;\n";
coords += ", Z";
} else {
c += " int Y = GLOBAL_ID_1;\n";
c += " if (Y >= args.src_tensor.Height()) return;\n";
}
int src_channels = 0;
for (auto dst_ch : channels) {
src_channels += dst_ch;
}
const int src_slices = DivideRoundUp(src_channels, 4);
int dst_ch = 0;
int dst_slice = 0;
int dst_tensor = 0;
const std::string postfix[] = {".x", ".y", ".z", ".w"};
c += " args.src_tensor::type dst_val;\n";
for (int s = 0; s < src_slices; ++s) {
c += " if (" + std::to_string(s) + " < args.src_tensor.Slices()) {\n";
c += " args.src_tensor::type src_val = args.src_tensor.Read(" + coords +
", " + std::to_string(s) + batch_coord + ");\n";
for (int k = 0; k < 4 && s * 4 + k < src_channels; ++k) {
c += " dst_val" + postfix[dst_ch % 4] + " = src_val" + postfix[k] +
";\n";
dst_ch++;
if (dst_ch == channels[dst_tensor]) {
const std::string dst_name =
"args.dst_tensor_" + std::to_string(dst_tensor);
c += " " + dst_name + ".Write(dst_val, " + coords + ", " +
std::to_string(dst_slice) + batch_coord + ");\n";
dst_tensor += 1;
dst_ch = 0;
dst_slice = 0;
}
if (dst_ch != 0 && dst_ch % 4 == 0) {
const std::string dst_name =
"args.dst_tensor_" + std::to_string(dst_tensor);
c += " " + dst_name + ".Write(dst_val, " + coords + ", " +
std::to_string(dst_slice) + batch_coord + ");\n";
dst_slice += 1;
}
}
if (gpu_info.IsMali()) {
c += " } else { return; }\n";
} else {
c += " }\n";
}
}
c += "}\n";
return c;
}
int3 Split::GetGridSize() const {
const int width = attr_.axis == Axis::WIDTH ? 1 : src_[0]->Width();
const int height = attr_.axis == Axis::HEIGHT ? 1 : src_[0]->Height();
const int depth = attr_.axis == Axis::DEPTH ? 1 : src_[0]->Depth();
const int batch = attr_.axis == Axis::BATCH ? 1 : src_[0]->Batch();
const int slices = attr_.axis == Axis::CHANNELS ? 1 : src_[0]->Slices();
const int grid_x = width * batch;
const int grid_y = height * depth;
const int grid_z = slices;
return int3(grid_x, grid_y, grid_z);
}
Split CreateSplit(const GpuInfo& gpu_info, const OperationDef& definition,
const SplitAttributes& attr,
const std::vector<int>& channels) {
return Split(gpu_info, definition, attr, channels);
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/split_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Split, 1D_to_2_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
const std::vector<int32_t> shape({shape_rng() * 2});
for (int i = -1; i < 1; i++) {
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(2)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 2D_to_2_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -2; i < 2; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 2;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(2)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 3D_to_2_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -3; i < 3; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 2;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(2)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 4D_to_2_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -4; i < 4; i++) {
std::vector<int32_t> shape(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 2;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(2)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 1D_to_3_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
const std::vector<int32_t> shape({shape_rng() * 3});
for (int i = -1; i < 1; i++) {
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(3)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 2D_to_3_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -2; i < 2; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 3;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(3)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 3D_to_3_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -3; i < 3; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 3;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(3)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 4D_to_3_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -4; i < 4; i++) {
std::vector<int32_t> shape(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 3;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(3)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 1D_to_4_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
const std::vector<int32_t> shape({shape_rng() * 4});
for (int i = -1; i < 1; i++) {
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(4)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 2D_to_4_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -2; i < 2; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 4;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(4)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 3D_to_4_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -3; i < 3; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 4;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(4)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 4D_to_4_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -4; i < 4; i++) {
std::vector<int32_t> shape(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 4;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(4)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/split.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/split_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93519a1a-1102-481f-8561-353d6512ea43 | cpp | tensorflow/tensorflow | random_standard_normal_custom | tensorflow/lite/kernels/random_standard_normal_custom.cc | tensorflow/lite/kernels/random_standard_normal_custom_test.cc | #include <cmath>
#include <random>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace random_standard_normal {
struct OpData {
std::default_random_engine rng;
};
namespace {
constexpr int kShapeTensor = 0;
constexpr int kOutputTensor = 0;
template <typename T>
TfLiteStatus RandomStandardNormalSample(std::default_random_engine& rng,
T* output, size_t output_size) {
std::normal_distribution<T> dist;
std::generate(output, output + output_size, [&]() { return dist(rng); });
return kTfLiteOk;
}
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData();
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
TF_LITE_ENSURE_EQ(context, shape->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (!IsConstantOrPersistentTensor(shape)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
TfLiteIntArray* output_shape;
TF_LITE_ENSURE_OK(context,
GetOutputShapeFromInput(context, shape, &output_shape));
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* params = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, params != nullptr);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (IsDynamicTensor(output)) {
const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
TfLiteIntArray* output_shape;
TF_LITE_ENSURE_OK(context,
GetOutputShapeFromInput(context, shape, &output_shape));
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_shape));
}
const size_t output_size = NumElements(output);
switch (output->type) {
case kTfLiteFloat32:
RandomStandardNormalSample<float>(
params->rng, GetTensorData<float>(output), output_size);
break;
case kTfLiteFloat64:
RandomStandardNormalSample<double>(
params->rng, GetTensorData<double>(output), output_size);
break;
default:
TF_LITE_KERNEL_LOG(
context, "Unsupported output datatype for RandomStandardNormal: %s",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RANDOM_STANDARD_NORMAL() {
static TfLiteRegistration r = {
random_standard_normal::Init, random_standard_normal::Free,
random_standard_normal::Prepare, random_standard_normal::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
template <typename T>
TensorType GetTTEnum();
template <>
TensorType GetTTEnum<float>() {
return TensorType_FLOAT32;
}
template <>
TensorType GetTTEnum<double>() {
return TensorType_FLOAT64;
}
class RandomStandardNormalOpModel : public SingleOpModel {
public:
RandomStandardNormalOpModel(const std::initializer_list<int>& input,
TensorData output, bool dynamic_input) {
if (dynamic_input) {
input_ = AddInput({TensorType_INT32, {3}});
} else {
input_ = AddConstInput(TensorType_INT32, input,
{static_cast<int>(input.size())});
}
output_ = AddOutput(output);
SetCustomOp("RandomStandardNormal", {},
ops::custom::Register_RANDOM_STANDARD_NORMAL);
BuildInterpreter({GetShape(input_)});
if (dynamic_input) {
PopulateTensor<int32_t>(input_, std::vector<int32_t>(input));
}
}
int input_;
int output_;
int input() { return input_; }
int output() { return output_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
};
}
}
template <typename InputType>
struct RandomStandardNormalTest : public ::testing::Test {
using Type = InputType;
};
using TestTypes = ::testing::Types<float, double>;
TYPED_TEST_SUITE(RandomStandardNormalTest, TestTypes);
TYPED_TEST(RandomStandardNormalTest, TestOutput) {
using Type = typename TestFixture::Type;
for (const auto dynamic : {false, true}) {
tflite::RandomStandardNormalOpModel m(
{1000, 50, 5}, {tflite::GetTTEnum<Type>(), {}}, dynamic);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Type>();
EXPECT_EQ(output.size(), 1000 * 50 * 5);
double sum = 0;
for (auto r : output) {
sum += r;
}
double avg = sum / output.size();
ASSERT_LT(std::abs(avg), 0.05);
double sum_squared = 0;
for (auto r : output) {
sum_squared += std::pow(r - avg, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1 - var), 0.05);
}
}
TYPED_TEST(RandomStandardNormalTest, TestOutputDistributionRange) {
using Type = typename TestFixture::Type;
tflite::RandomStandardNormalOpModel m({1000, 50, 5},
{tflite::GetTTEnum<Type>(), {}}, false);
const std::vector<Type> output_data(1000 * 50 * 5,
std::numeric_limits<Type>::infinity());
m.PopulateTensor(m.output(), output_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Type>();
EXPECT_EQ(output.size(), 1000 * 50 * 5);
double sum = 0;
for (auto r : output) {
sum += r;
}
double avg = sum / output.size();
ASSERT_LT(std::abs(avg), 0.05);
double sum_squared = 0;
for (auto r : output) {
sum_squared += std::pow(r - avg, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1 - var), 0.05);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/random_standard_normal_custom.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/random_standard_normal_custom_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bbcb9a75-88c7-401a-a921-5db53309d6d1 | cpp | tensorflow/tensorflow | kernel_util | tensorflow/lite/kernels/kernel_util.cc | tensorflow/lite/kernels/kernel_util_test.cc | #include "tensorflow/lite/kernels/kernel_util.h"
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#include <complex>
#include <limits>
#include <memory>
#ifndef TF_LITE_STATIC_MEMORY
#include <string>
#include "tensorflow/lite/array.h"
#endif
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#if defined(__APPLE__)
#include "TargetConditionals.h"
#endif
namespace tflite {
namespace {
inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context,
int tensor_index) {
if (context->tensors != nullptr) {
return &context->tensors[tensor_index];
} else {
return context->GetTensor(context, tensor_index);
}
}
inline TfLiteStatus ValidateTensorIndexingSafe(const TfLiteContext* context,
int index, int max_size,
const int* tensor_indices,
int* tensor_index) {
if (index < 0 || index >= max_size) {
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
"Invalid tensor index %d (not in [0, %d))\n", index,
max_size);
return kTfLiteError;
}
if (tensor_indices[index] == kTfLiteOptionalTensor) {
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
"Tensor at index %d was optional but was expected\n",
index);
return kTfLiteError;
}
*tensor_index = tensor_indices[index];
return kTfLiteOk;
}
inline int ValidateTensorIndexing(const TfLiteContext* context, int index,
int max_size, const int* tensor_indices) {
if (index >= 0 && index < max_size) {
const int tensor_index = tensor_indices[index];
if (tensor_index != kTfLiteOptionalTensor) {
return tensor_index;
}
}
return -1;
}
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
const TfLiteNode* node, int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->inputs->size, node->inputs->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
inline TfLiteStatus GetMutableInputSafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
const TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(
context, ValidateTensorIndexingSafe(context, index, node->inputs->size,
node->inputs->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
}
const TfLiteTensor* GetInput(const TfLiteContext* context,
const TfLiteNode* node, int index) {
return GetMutableInput(context, node, index);
}
TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
int index, const TfLiteTensor** tensor) {
return GetMutableInputSafe(context, node, index, tensor);
}
TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
int index) {
TfLiteTensor* tensor = GetMutableInput(context, node, index);
if (tensor == nullptr) return nullptr;
return tensor->is_variable ? tensor : nullptr;
}
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->outputs->size, node->outputs->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
int index, TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(
context, ValidateTensorIndexingSafe(context, index, node->outputs->size,
node->outputs->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
const TfLiteNode* node, int index) {
return GetInput(context, node, index);
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->temporaries->size, node->temporaries->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
context, index, node->temporaries->size,
node->temporaries->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
const TfLiteTensor* GetIntermediates(TfLiteContext* context,
const TfLiteNode* node, int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->intermediates->size, node->intermediates->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
context, index, node->intermediates->size,
node->intermediates->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
#endif
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
int32_t* per_channel_multiplier, int32_t* per_channel_shift) {
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
return PopulateConvolutionQuantizationParams(
context, input, filter, bias, output, activation, multiplier, shift,
output_activation_min, output_activation_max, per_channel_multiplier,
per_channel_shift, affine_quantization->scale->size);
}
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
int32_t* per_channel_multiplier, int32_t* per_channel_shift,
int num_channels) {
TF_LITE_ENSURE_EQ(context, input->quantization.type,
kTfLiteAffineQuantization);
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
kTfLiteAffineQuantization);
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
TF_LITE_ENSURE(context, affine_quantization);
TF_LITE_ENSURE(context, affine_quantization->scale);
const bool is_per_channel = affine_quantization->scale->size > 1;
if (is_per_channel) {
TF_LITE_ENSURE(context,
input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
TF_LITE_ENSURE(context,
filter->type == kTfLiteInt8 || filter->type == kTfLiteInt4);
TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels);
TF_LITE_ENSURE_EQ(
context, num_channels,
filter->dims->data[affine_quantization->quantized_dimension]);
}
const float input_scale = input->params.scale;
const float output_scale = output->params.scale;
const float* filter_scales = affine_quantization->scale->data;
for (int i = 0; i < num_channels; ++i) {
const float scale = is_per_channel ? filter_scales[i] : filter_scales[0];
const double filter_scale = static_cast<double>(scale);
const double effective_output_scale = static_cast<double>(input_scale) *
filter_scale /
static_cast<double>(output_scale);
int32_t significand;
int channel_shift;
QuantizeMultiplier(effective_output_scale, &significand, &channel_shift);
per_channel_multiplier[i] = significand;
per_channel_shift[i] = channel_shift;
}
if (input->type == kTfLiteUInt8) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, input, filter, bias, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, multiplier, &exponent);
*shift = -exponent;
}
if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
input->type == kTfLiteInt16) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, activation, output, output_activation_min,
output_activation_max));
}
return kTfLiteOk;
}
TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias,
TfLiteTensor* output,
double* multiplier) {
const double input_product_scale = static_cast<double>(input->params.scale) *
static_cast<double>(filter->params.scale);
if (bias) {
const double bias_scale = static_cast<double>(bias->params.scale);
const double scale_diff = std::abs(input_product_scale - bias_scale);
const double output_scale = static_cast<double>(output->params.scale);
TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02);
}
return GetQuantizedConvolutionMultipler(context, input, filter, output,
multiplier);
}
TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
TfLiteTensor* output,
double* multiplier) {
const double input_product_scale =
static_cast<double>(input->params.scale * filter->params.scale);
TF_LITE_ENSURE(context, input_product_scale >= 0);
*multiplier = input_product_scale / static_cast<double>(output->params.scale);
return kTfLiteOk;
}
namespace {
inline TfLiteStatus Quantize(TfLiteContext* context, float scale,
int32_t zero_point, float f, int32_t& q) {
const float tmp = TfLiteRound(f / scale);
const bool no_integer_overflow_from_quantization =
(tmp >= static_cast<float>(std::numeric_limits<int32_t>::min()) &&
tmp <= static_cast<float>(std::numeric_limits<int32_t>::max()));
TF_LITE_ENSURE(context, no_integer_overflow_from_quantization);
q = zero_point + static_cast<int32_t>(tmp);
return kTfLiteOk;
}
TfLiteStatus CalculateActivationRangeQuantizedImpl(
TfLiteContext* context, TfLiteFusedActivation activation, int32_t qmin,
int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) {
const auto scale = output->params.scale;
const auto zero_point = output->params.zero_point;
int32_t tmp_q;
if (activation == kTfLiteActRelu) {
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 0.0, tmp_q));
*act_min = std::max(qmin, tmp_q);
*act_max = qmax;
} else if (activation == kTfLiteActRelu6) {
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 0.0, tmp_q));
*act_min = std::max(qmin, tmp_q);
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 6.0, tmp_q));
*act_max = std::min(qmax, tmp_q);
} else if (activation == kTfLiteActReluN1To1) {
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, -1.0, tmp_q));
*act_min = std::max(qmin, tmp_q);
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 1.0, tmp_q));
*act_max = std::min(qmax, tmp_q);
} else {
*act_min = qmin;
*act_max = qmax;
}
return kTfLiteOk;
}
}
TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
TfLiteFusedActivation activation,
TfLiteTensor* output,
int32_t* act_min,
int32_t* act_max) {
int32_t qmin = 0;
int32_t qmax = 0;
if (output->type == kTfLiteUInt8) {
qmin = std::numeric_limits<uint8_t>::min();
qmax = std::numeric_limits<uint8_t>::max();
} else if (output->type == kTfLiteInt8) {
qmin = std::numeric_limits<int8_t>::min();
qmax = std::numeric_limits<int8_t>::max();
} else if (output->type == kTfLiteInt16) {
qmin = std::numeric_limits<int16_t>::min();
qmax = std::numeric_limits<int16_t>::max();
} else {
TF_LITE_ENSURE(context, false);
}
return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax,
output, act_min, act_max);
}
bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
return TfLiteIntArrayEqual(input1->dims, input2->dims);
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteIntArray** output_shape) {
if (NumDimensions(input) != 1) {
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
"Invalid %dD input tensor (must be a 1D tensor).",
NumDimensions(input));
return kTfLiteError;
}
const int output_dims = SizeOfDimension(input, 0);
IntArrayUniquePtr shape(TfLiteIntArrayCreate(output_dims));
for (int i = 0; i < output_dims; i++) {
shape->data[i] = input->data.i32[i];
}
*output_shape = shape.release();
return kTfLiteOk;
}
std::string GetShapeDebugString(const TfLiteIntArray* shape) {
std::string str;
for (int d = 0; d < shape->size; ++d) {
if (str.empty())
str = "[" + std::to_string(shape->data[d]);
else
str += "," + std::to_string(shape->data[d]);
}
if (str.empty()) {
str = "[]";
} else {
str += "]";
}
return str;
}
std::string GetTensorDebugString(const TfLiteTensor* tensor) {
return std::string("{\n type: ") + TfLiteTypeGetName(tensor->type) +
"\n data: {...}\n dims: " + GetShapeDebugString(tensor->dims) +
"\n}";
}
TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
TfLiteIntArray** output_shape) {
const int dims1 = NumDimensions(input1);
const int dims2 = NumDimensions(input2);
const int out_dims = std::max(dims1, dims2);
IntArrayUniquePtr shape(TfLiteIntArrayCreate(out_dims));
for (int i = 0; i < out_dims; ++i) {
const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
if (!(d1 == d2 || d1 == 1 || d2 == 1)) {
TF_LITE_KERNEL_LOG(context,
"Given shapes, %s and %s, are not broadcastable.",
GetShapeDebugString(input1->dims).c_str(),
GetShapeDebugString(input2->dims).c_str());
return kTfLiteError;
}
if (d1 == 0 || d2 == 0) {
shape->data[out_dims - i - 1] = 0;
} else {
shape->data[out_dims - i - 1] = std::max(d1, d2);
}
}
*output_shape = shape.release();
return kTfLiteOk;
}
TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
const TfLiteTensor* input3,
TfLiteIntArray** output_shape) {
const int dims1 = NumDimensions(input1);
const int dims2 = NumDimensions(input2);
const int dims3 = NumDimensions(input3);
const int out_dims = std::max(std::max(dims1, dims2), dims3);
IntArrayUniquePtr shape(TfLiteIntArrayCreate(out_dims));
for (int i = 0; i < out_dims; ++i) {
const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1);
const int min_value = std::min(std::min(d1, d2), d3);
int max_value = std::max(std::max(d1, d2), d3);
if (min_value == 0) max_value = 0;
if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) ||
!(d3 == 1 || d3 == max_value)) {
TF_LITE_KERNEL_LOG(context,
"Given shapes, %s, %s and %s, are not broadcastable.",
GetShapeDebugString(input1->dims).c_str(),
GetShapeDebugString(input2->dims).c_str(),
GetShapeDebugString(input3->dims).c_str());
return kTfLiteError;
}
shape->data[out_dims - i - 1] = max_value;
}
*output_shape = shape.release();
return kTfLiteOk;
}
#endif
int TfLiteTypeGetSize(TfLiteType type) {
switch (type) {
case kTfLiteUInt8:
static_assert(sizeof(uint8_t) == 1, "");
return 1;
case kTfLiteInt8:
static_assert(sizeof(int8_t) == 1, "");
return 1;
case kTfLiteBool:
return sizeof(bool);
case kTfLiteUInt16:
static_assert(sizeof(uint16_t) == 2, "");
return 2;
case kTfLiteInt16:
static_assert(sizeof(int16_t) == 2, "");
return 2;
case kTfLiteFloat16:
static_assert(sizeof(int16_t) == 2, "");
return 2;
case kTfLiteFloat32:
static_assert(sizeof(float) == 4, "");
return 4;
case kTfLiteInt32:
static_assert(sizeof(int32_t) == 4, "");
return 4;
case kTfLiteUInt32:
static_assert(sizeof(uint32_t) == 4, "");
return 4;
case kTfLiteInt64:
static_assert(sizeof(int64_t) == 8, "");
return 8;
case kTfLiteUInt64:
static_assert(sizeof(uint64_t) == 8, "");
return 8;
case kTfLiteFloat64:
static_assert(sizeof(double) == 8, "");
return 8;
case kTfLiteComplex64:
static_assert(sizeof(std::complex<float>) == 8, "");
return 8;
case kTfLiteComplex128:
static_assert(sizeof(std::complex<double>) == 16, "");
return 16;
default:
return 0;
}
}
bool IsMobilePlatform() {
#if defined(ANDROID) || defined(__ANDROID__)
return true;
#elif defined(__APPLE__) && (TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE)
return true;
#else
return false;
#endif
}
bool HasUnspecifiedDimension(const TfLiteTensor* tensor) {
#ifndef TF_LITE_STATIC_MEMORY
if (tensor->dims_signature) {
for (int i : TfLiteIntArrayView(tensor->dims_signature)) {
if (i == -1) return true;
}
}
#endif
return false;
}
} | #include "tensorflow/lite/kernels/kernel_util.h"
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
struct TestContext : public TfLiteContext {
string error;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
TestContext* c = static_cast<TestContext*>(context);
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
c->error = temp_buffer;
}
class TestWithTfLiteContext : public ::testing::Test {
public:
TestWithTfLiteContext() { context_.ReportError = ReportError; }
TensorUniquePtr BuildTfLiteTensorForTest(std::initializer_list<int> dims) {
return BuildTfLiteTensor(kTfLiteInt32, dims, kTfLiteDynamic);
}
protected:
TestContext context_;
};
class HaveSameShapeTest : public TestWithTfLiteContext {};
TEST_F(HaveSameShapeTest, NullPointerIsSameShape) {
TensorUniquePtr t1 = BuildTfLiteTensor();
t1->dims = nullptr;
TensorUniquePtr t2 = BuildTfLiteTensor();
t2->dims = nullptr;
EXPECT_TRUE(HaveSameShapes(t1.get(), t2.get()));
}
TEST_F(HaveSameShapeTest, NotSameShapeFalse) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({2, 3});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3});
EXPECT_FALSE(HaveSameShapes(t1.get(), t2.get()));
}
TEST_F(HaveSameShapeTest, EmptyShapeEqualTrue) {
TensorUniquePtr t1 = BuildTfLiteTensor();
TensorUniquePtr t2 = BuildTfLiteTensor();
EXPECT_TRUE(HaveSameShapes(t1.get(), t2.get()));
}
class BroadcastShapeTest : public TestWithTfLiteContext {};
TEST_F(BroadcastShapeTest, IncompatibleDimNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* output = nullptr;
EXPECT_NE(kTfLiteOk,
CalculateShapeForBroadcast(&context_, t1.get(), t2.get(), &output));
EXPECT_EQ(output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,2] and [1,3], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, IncompatibleDimWithZeroNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 0});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* output = nullptr;
EXPECT_NE(kTfLiteOk,
CalculateShapeForBroadcast(&context_, t1.get(), t2.get(), &output));
EXPECT_EQ(output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,0] and [1,3], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, BroadCastSecondDimension) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* raw_output;
auto status =
CalculateShapeForBroadcast(&context_, t1.get(), t2.get(), &raw_output);
ASSERT_EQ(kTfLiteOk, status);
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 3}));
}
TEST_F(BroadcastShapeTest, ScalarAnd2dBroadcastsTo2d) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 2}));
}
TEST_F(BroadcastShapeTest, DifferentRankBroadcastsToHigherRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 1, 2});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 1, 2}));
}
TEST_F(BroadcastShapeTest, ZeroDimDifferentRankBroadcastsToHigherRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 0, 2});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 0, 2}));
}
TEST_F(BroadcastShapeTest, ZeroDimSameRankBroadcastsToHigherRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 0, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 0, 2}));
}
TEST_F(BroadcastShapeTest, IncompatibleDimOnThreeTensorsNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 4});
TfLiteIntArray* raw_output = nullptr;
EXPECT_NE(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
EXPECT_EQ(raw_output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,2], [1,3] and [1,4], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, IncompatibleDimWithZeroOnThreeTensorsNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 0});
TfLiteIntArray* raw_output = nullptr;
EXPECT_NE(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
EXPECT_EQ(raw_output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,1], [1,3] and [1,0], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, ThreeTensorsBroadcastToLarger2ndDim) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 3}));
}
TEST_F(BroadcastShapeTest, TwoScalarsBroadcastTo2d) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 2}));
}
TEST_F(BroadcastShapeTest, DifferentSizesOnThreeTensorsBroadcastToLargerRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 1, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({3, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 3, 2}));
}
TEST_F(BroadcastShapeTest,
DifferentSizesOnThreeTensors4dBroadcastToLargerRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({3, 4});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 2, 1, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 2, 3, 4}));
}
TEST_F(BroadcastShapeTest, ZeroOnThreeTensorsBroadcastToLargerRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 1, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({0, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 0, 2}));
}
TEST(GetShapeDebugStringTest, GetShapeDebugString) {
IntArrayUniquePtr dims0 = BuildTfLiteArray({});
EXPECT_EQ("[]", GetShapeDebugString(dims0.get()));
IntArrayUniquePtr dims1 = BuildTfLiteArray({1});
dims1->data[0] = 1;
EXPECT_EQ("[1]", GetShapeDebugString(dims1.get()));
IntArrayUniquePtr dims2 = BuildTfLiteArray({2, 3});
dims2->data[0] = 2;
dims2->data[1] = 3;
EXPECT_EQ("[2,3]", GetShapeDebugString(dims2.get()));
IntArrayUniquePtr dims3 = BuildTfLiteArray({4, 5, 6});
dims3->data[0] = 4;
dims3->data[1] = 5;
dims3->data[2] = 6;
EXPECT_EQ("[4,5,6]", GetShapeDebugString(dims3.get()));
}
class QuantizationParamsTest : public TestWithTfLiteContext {};
TEST_F(QuantizationParamsTest, PerChannelConvolution) {
TensorUniquePtr input = BuildTfLiteTensor();
input->type = kTfLiteInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {0.5, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 0.5;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
TensorUniquePtr filter = BuildTfLiteTensor();
filter->type = kTfLiteInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {0.25, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(3);
filter_params->scale->data[0] = 0.25;
filter_params->scale->data[1] = 0.125;
filter_params->scale->data[2] = 0.25;
filter_params->zero_point = TfLiteIntArrayCreate(3);
filter_params->zero_point->data[0] = 0;
filter_params->zero_point->data[1] = 0;
filter_params->zero_point->data[2] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
TensorUniquePtr bias = BuildTfLiteTensor();
bias->type = kTfLiteInt32;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {0.125, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(3);
bias_params->scale->data[0] = 0.125;
bias_params->scale->data[1] = 0.0625;
bias_params->scale->data[2] = 0.125;
bias_params->zero_point = TfLiteIntArrayCreate(3);
bias_params->zero_point->data[0] = 11;
bias_params->zero_point->data[1] = 12;
bias_params->zero_point->data[2] = 15;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
TensorUniquePtr output = BuildTfLiteTensor();
output->type = kTfLiteInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {0.5, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 0.5;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int32_t> per_channel_shift(3);
auto status = PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data());
EXPECT_EQ(kTfLiteOk, status);
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-1, -2, -1));
}
TEST_F(QuantizationParamsTest, CheckAndPopulateShift) {
TensorUniquePtr input = BuildTfLiteTensor();
input->type = kTfLiteUInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {0.5, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 0.5;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
TensorUniquePtr filter = BuildTfLiteTensor();
filter->type = kTfLiteUInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {0.25, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(1);
filter_params->scale->data[0] = 0.25;
filter_params->zero_point = TfLiteIntArrayCreate(1);
filter_params->zero_point->data[0] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
TensorUniquePtr bias = BuildTfLiteTensor();
bias->type = kTfLiteUInt8;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {0.125, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(3);
bias_params->scale->data[0] = 0.125;
bias_params->scale->data[1] = 0.0625;
bias_params->scale->data[2] = 0.125;
bias_params->zero_point = TfLiteIntArrayCreate(3);
bias_params->zero_point->data[0] = 11;
bias_params->zero_point->data[1] = 12;
bias_params->zero_point->data[2] = 15;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
TensorUniquePtr output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {0.5, 128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 0.5;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = 128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-1, -1, -1));
EXPECT_EQ(shift, 1);
EXPECT_EQ(multiplier, 1073741824);
}
#ifndef __APPLE__
TEST_F(QuantizationParamsTest, CheckAndPopulateZeroValue) {
auto input = BuildTfLiteTensor();
input->type = kTfLiteInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {1, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 1;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
auto filter = BuildTfLiteTensor();
filter->type = kTfLiteInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(3);
filter_params->scale->data[0] = std::ldexp(1.0f, -31);
filter_params->scale->data[1] = std::ldexp(1.0f, -32);
filter_params->scale->data[2] = std::ldexp(1.0f, -33);
filter_params->zero_point = TfLiteIntArrayCreate(3);
filter_params->zero_point->data[0] = 0;
filter_params->zero_point->data[1] = 0;
filter_params->zero_point->data[2] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
auto bias = BuildTfLiteTensor();
bias->type = kTfLiteInt32;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {4.6566129e-10, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(3);
bias_params->scale->data[0] = std::ldexp(1.0f, -31);
bias_params->scale->data[1] = std::ldexp(1.0f, -32);
bias_params->scale->data[2] = std::ldexp(1.0f, -33);
bias_params->zero_point = TfLiteIntArrayCreate(3);
bias_params->zero_point->data[0] = 11;
bias_params->zero_point->data[1] = 12;
bias_params->zero_point->data[2] = 15;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
auto output = BuildTfLiteTensor();
output->type = kTfLiteInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier, ElementsAre(1073741824, 1073741824, 0));
EXPECT_THAT(per_channel_shift, ElementsAre(-30, -31, 0));
}
#endif
TEST_F(QuantizationParamsTest, CheckAndPopulateUint8) {
auto input = BuildTfLiteTensor();
input->type = kTfLiteUInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {1, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 1;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
auto filter = BuildTfLiteTensor();
filter->type = kTfLiteUInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(1);
int32_t two_pow_neg_31 = 0x30000000;
filter_params->scale->data[0] = *reinterpret_cast<float*>(&two_pow_neg_31);
filter_params->zero_point = TfLiteIntArrayCreate(1);
filter_params->zero_point->data[0] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
auto bias = BuildTfLiteTensor();
bias->type = kTfLiteInt32;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {4.6566129e-10, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(1);
bias_params->scale->data[0] = 4.6566129e-10;
bias_params->zero_point = TfLiteIntArrayCreate(1);
bias_params->zero_point->data[0] = 11;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
auto output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-30, -30, -30));
}
TEST_F(QuantizationParamsTest, CheckAndPopulateWithoutBias) {
auto input = BuildTfLiteTensor();
input->type = kTfLiteUInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {1, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 1;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
auto filter = BuildTfLiteTensor();
filter->type = kTfLiteUInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(1);
int32_t two_pow_neg_31 = 0x30000000;
filter_params->scale->data[0] = *reinterpret_cast<float*>(&two_pow_neg_31);
filter_params->zero_point = TfLiteIntArrayCreate(1);
filter_params->zero_point->data[0] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
auto output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), nullptr, output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-30, -30, -30));
}
TEST_F(QuantizationParamsTest, ActivationRangeQuantizedOverflow) {
auto output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1e-10, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t act_min, act_max;
ASSERT_EQ(kTfLiteOk,
CalculateActivationRangeQuantized(
&context_, kTfLiteActRelu, output.get(), &act_min, &act_max));
ASSERT_NE(kTfLiteOk,
CalculateActivationRangeQuantized(
&context_, kTfLiteActRelu6, output.get(), &act_min, &act_max));
EXPECT_TRUE(absl::StrContains(
context_.error, "no_integer_overflow_from_quantization was not true"));
ASSERT_NE(kTfLiteOk, CalculateActivationRangeQuantized(
&context_, kTfLiteActReluN1To1, output.get(),
&act_min, &act_max));
EXPECT_TRUE(absl::StrContains(
context_.error, "no_integer_overflow_from_quantization was not true"));
}
TEST_F(QuantizationParamsTest, IsMobilePlatform) {
#if defined(__ANDROID__)
EXPECT_TRUE(IsMobilePlatform());
#elif defined(__linux__)
EXPECT_FALSE(IsMobilePlatform());
#elif defined(_WIN32)
EXPECT_FALSE(IsMobilePlatform());
#endif
}
TEST(HasUnspecifiedDimensions, ReturnsTrueIfADimIsMinusOne) {
auto tensor = BuildTfLiteTensor(kTfLiteInt32, {1, 1, 3}, kTfLiteDynamic);
tensor->dims_signature = ConvertVectorToTfLiteIntArray({1, -1, 3});
EXPECT_TRUE(HasUnspecifiedDimension(tensor.get()));
}
TEST(HasUnspecifiedDimensions, ReturnsFalseIfAllPostiveDims) {
auto tensor = BuildTfLiteTensor(kTfLiteInt32, {1, 1, 3}, kTfLiteDynamic);
tensor->dims_signature = ConvertVectorToTfLiteIntArray({1, 1, 3});
EXPECT_FALSE(HasUnspecifiedDimension(tensor.get()));
}
class SetTensorAllocationTypeTest : public testing::Test {
public:
SetTensorAllocationTypeTest() {
tensor_->type = kTfLiteInt32;
tensor_->allocation_type = kTfLiteDynamic;
}
protected:
Interpreter interpreter_;
TfLiteContext& context_ = *interpreter_.primary_subgraph().context();
IntArrayUniquePtr dims_ = BuildTfLiteArray({2, 3, 4});
TensorUniquePtr tensor_ = BuildTfLiteTensor();
};
TEST_F(SetTensorAllocationTypeTest,
SetUnallocatedDynamicTensorToDynamicIsANoop) {
tensor_->allocation_type = kTfLiteDynamic;
SetTensorToDynamic(tensor_.get());
EXPECT_EQ(tensor_->data.data, nullptr);
EXPECT_EQ(tensor_->allocation_type, kTfLiteDynamic);
}
TEST_F(SetTensorAllocationTypeTest, SetAllocatedDynamicTensorToDynamicIsANoop) {
tensor_->allocation_type = kTfLiteDynamic;
ASSERT_EQ(context_.ResizeTensor(&context_, tensor_.get(), dims_.release()),
kTfLiteOk);
const void* const original_data = tensor_->data.data;
SetTensorToDynamic(tensor_.get());
EXPECT_EQ(tensor_->data.data, original_data);
EXPECT_EQ(tensor_->allocation_type, kTfLiteDynamic);
}
TEST_F(SetTensorAllocationTypeTest,
SetAllocatedPersistentRoTensorToDynamicFreesExistingTensorData) {
tensor_->allocation_type = kTfLitePersistentRo;
ASSERT_EQ(context_.ResizeTensor(&context_, tensor_.get(), dims_.release()),
kTfLiteOk);
SetTensorToDynamic(tensor_.get());
EXPECT_EQ(tensor_->data.data, nullptr);
EXPECT_EQ(tensor_->allocation_type, kTfLiteDynamic);
}
TEST_F(SetTensorAllocationTypeTest,
SetUnallocatedPersistentRoTensorToPersistentRoIsANoop) {
tensor_->allocation_type = kTfLitePersistentRo;
SetTensorToPersistentRo(tensor_.get());
EXPECT_EQ(tensor_->data.data, nullptr);
EXPECT_EQ(tensor_->allocation_type, kTfLitePersistentRo);
}
TEST_F(SetTensorAllocationTypeTest,
SetAllocatedPersistentRoTensorToPersistentRoIsANoop) {
tensor_->allocation_type = kTfLitePersistentRo;
ASSERT_EQ(context_.ResizeTensor(&context_, tensor_.get(), dims_.release()),
kTfLiteOk);
const void* const original_data = tensor_->data.data;
SetTensorToPersistentRo(tensor_.get());
EXPECT_EQ(tensor_->data.data, original_data);
EXPECT_EQ(tensor_->allocation_type, kTfLitePersistentRo);
}
TEST_F(SetTensorAllocationTypeTest,
SetAllocatedDynamicTensorToPersistentRoFreesExistingTensorData) {
tensor_->allocation_type = kTfLiteDynamic;
ASSERT_EQ(context_.ResizeTensor(&context_, tensor_.get(), dims_.release()),
kTfLiteOk);
SetTensorToPersistentRo(tensor_.get());
EXPECT_EQ(tensor_->data.data, nullptr);
EXPECT_EQ(tensor_->allocation_type, kTfLitePersistentRo);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/kernel_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/kernel_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
598ed75f-f71c-4968-831b-ad85395fbcd0 | cpp | tensorflow/tensorflow | expand_dims | tensorflow/lite/kernels/expand_dims.cc | tensorflow/lite/kernels/expand_dims_test.cc | #include <stdint.h>
#include <string.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace expand_dims {
enum { kInput = 0, kAxis };
namespace {
TfLiteStatus ExpandTensorDim(TfLiteContext* context, const TfLiteTensor& input,
int axis, TfLiteTensor* output) {
const TfLiteIntArray& input_dims = *input.dims;
if (axis < 0) {
axis = input_dims.size + 1 + axis;
}
TF_LITE_ENSURE(context, axis <= input_dims.size);
TF_LITE_ENSURE(context, axis >= 0);
TfLiteIntArray* output_dims = TfLiteIntArrayCreate(input_dims.size + 1);
for (int i = 0; i < output_dims->size; ++i) {
if (i < axis) {
output_dims->data[i] = input_dims.data[i];
} else if (i == axis) {
output_dims->data[i] = 1;
} else {
output_dims->data[i] = input_dims.data[i - 1];
}
}
return context->ResizeTensor(context, output, output_dims);
}
TfLiteStatus GetAxisValueFromTensor(TfLiteContext* context,
const TfLiteTensor& axis, int* axis_value) {
TF_LITE_ENSURE_EQ(context, NumElements(&axis), 1);
switch (axis.type) {
case kTfLiteInt32:
*axis_value = *GetTensorData<int32_t>(&axis);
return kTfLiteOk;
case kTfLiteInt64:
*axis_value = *GetTensorData<int64_t>(&axis);
return kTfLiteOk;
default:
return kTfLiteError;
}
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInput, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
output->type = input->type;
TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale);
TF_LITE_ENSURE_EQ(context, input->params.zero_point,
output->params.zero_point);
if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
}
if (IsConstantOrPersistentTensor(axis)) {
int axis_value;
TF_LITE_ENSURE_OK(context,
GetAxisValueFromTensor(context, *axis, &axis_value));
return ExpandTensorDim(context, *input, axis_value, output);
}
SetTensorToDynamic(output);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInput, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis));
if (IsDynamicTensor(output)) {
int axis_value;
TF_LITE_ENSURE_OK(context,
GetAxisValueFromTensor(context, *axis, &axis_value));
TF_LITE_ENSURE_OK(context,
ExpandTensorDim(context, *input, axis_value, output));
}
if (output->type == kTfLiteString) {
TfLiteTensorRealloc(input->bytes, output);
}
if (output->data.data != input->data.data) {
memcpy(output->data.data, input->data.data, input->bytes);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_EXPAND_DIMS() {
static TfLiteRegistration r = {
nullptr,
nullptr,
expand_dims::Prepare,
expand_dims::Eval,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
enum class TestType {
kConst = 0,
kDynamic = 1,
};
template <typename InputType>
class ExpandDimsOpModel : public SingleOpModel {
public:
ExpandDimsOpModel(int axis, std::initializer_list<int> input_shape,
std::initializer_list<InputType> input_data,
TestType input_tensor_types) {
if (input_tensor_types == TestType::kDynamic) {
input_ = AddInput(GetTensorType<InputType>());
axis_ = AddInput(TensorType_INT32);
} else {
input_ =
AddConstInput(GetTensorType<InputType>(), input_data, input_shape);
axis_ = AddConstInput(TensorType_INT32, {axis}, {1});
}
output_ = AddOutput(GetTensorType<InputType>());
SetBuiltinOp(BuiltinOperator_EXPAND_DIMS, BuiltinOptions_ExpandDimsOptions,
0);
BuildInterpreter({input_shape, {1}});
if (input_tensor_types == TestType::kDynamic) {
PopulateTensor<InputType>(input_, input_data);
PopulateTensor<int32_t>(axis_, {axis});
}
}
std::vector<InputType> GetValues() {
return ExtractVector<InputType>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int axis_;
int output_;
};
template <typename T>
class ExpandDimsOpTest : public ::testing::Test {
public:
static std::vector<TestType> range_;
};
template <>
std::vector<TestType> ExpandDimsOpTest<TestType>::range_{TestType::kConst,
TestType::kDynamic};
using DataTypes = ::testing::Types<float, int8_t, int16_t, int32_t>;
TYPED_TEST_SUITE(ExpandDimsOpTest, DataTypes);
TYPED_TEST(ExpandDimsOpTest, PositiveAxisInplace) {
std::initializer_list<TypeParam> values = {-1, 1, -2, 2};
ExpandDimsOpModel<TypeParam> axis_0(0, {2, 2}, values, TestType::kConst);
const int kInplaceInputTensorIdx = 0;
const int kInplaceOutputTensorIdx = 0;
const TfLiteTensor* input_tensor =
axis_0.GetInputTensor(kInplaceInputTensorIdx);
TfLiteTensor* output_tensor = axis_0.GetOutputTensor(kInplaceOutputTensorIdx);
output_tensor->data.data = input_tensor->data.data;
ASSERT_EQ(axis_0.Invoke(), kTfLiteOk);
EXPECT_THAT(axis_0.GetValues(), ElementsAreArray(values));
EXPECT_THAT(axis_0.GetOutputShape(), ElementsAreArray({1, 2, 2}));
EXPECT_EQ(output_tensor->data.data, input_tensor->data.data);
}
TYPED_TEST(ExpandDimsOpTest, PositiveAxis) {
for (TestType test_type : ExpandDimsOpTest<TestType>::range_) {
std::initializer_list<TypeParam> values = {-1, 1, -2, 2};
ExpandDimsOpModel<TypeParam> axis_0(0, {2, 2}, values, test_type);
ASSERT_EQ(axis_0.Invoke(), kTfLiteOk);
EXPECT_THAT(axis_0.GetValues(), ElementsAreArray(values));
EXPECT_THAT(axis_0.GetOutputShape(), ElementsAreArray({1, 2, 2}));
ExpandDimsOpModel<TypeParam> axis_1(1, {2, 2}, values, test_type);
ASSERT_EQ(axis_1.Invoke(), kTfLiteOk);
EXPECT_THAT(axis_1.GetValues(), ElementsAreArray(values));
EXPECT_THAT(axis_1.GetOutputShape(), ElementsAreArray({2, 1, 2}));
ExpandDimsOpModel<TypeParam> axis_2(2, {2, 2}, values, test_type);
ASSERT_EQ(axis_2.Invoke(), kTfLiteOk);
EXPECT_THAT(axis_2.GetValues(), ElementsAreArray(values));
EXPECT_THAT(axis_2.GetOutputShape(), ElementsAreArray({2, 2, 1}));
}
}
TYPED_TEST(ExpandDimsOpTest, NegativeAxis) {
for (TestType test_type : ExpandDimsOpTest<TestType>::range_) {
std::initializer_list<TypeParam> values = {-1, 1, -2, 2};
ExpandDimsOpModel<TypeParam> m(-1, {2, 2}, values, test_type);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetValues(), ElementsAreArray(values));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 1}));
}
}
TEST(ExpandDimsOpTest, StrTensor) {
std::initializer_list<std::string> values = {"abc", "de", "fghi"};
ExpandDimsOpModel<std::string> m(0, {3}, values, TestType::kDynamic);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetValues(), ElementsAreArray(values));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/expand_dims.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/expand_dims_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
84344003-afcb-4e8e-aaae-11c12bb58ce0 | cpp | tensorflow/tensorflow | unidirectional_sequence_gru | tensorflow/lite/kernels/unidirectional_sequence_gru.cc | tensorflow/lite/kernels/unidirectional_sequence_gru_test.cc | #include <limits>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/gru_cell.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace unidirectional_sequence_gru {
namespace {
void GruImpl(const TfLiteTensor* input, const TfLiteTensor* input_state,
const TfLiteTensor* gate_weight, const TfLiteTensor* gate_bias,
const TfLiteTensor* candidate_weight,
const TfLiteTensor* candidate_bias, TfLiteTensor* output,
TfLiteTensor* output_state, TfLiteTensor* activation,
TfLiteTensor* concat,
tflite::CpuBackendContext* cpu_backend_context) {
const int n_time = input->dims->data[0];
const int n_batch = input->dims->data[1];
const int n_input = input->dims->data[2];
const int n_output = output->dims->data[2];
const int n_batch_input = n_batch * n_input;
const int n_batch_output = n_batch * n_output;
const RuntimeShape input_shape({n_batch, n_input});
const float* input_data = GetTensorData<float>(input);
const RuntimeShape state_shape = GetTensorShape(input_state);
const float* input_state_data = GetTensorData<float>(input_state);
const RuntimeShape gate_weight_shape = GetTensorShape(gate_weight);
const float* gate_weight_data = GetTensorData<float>(gate_weight);
const RuntimeShape gate_bias_shape = GetTensorShape(gate_bias);
const float* gate_bias_data = GetTensorData<float>(gate_bias);
const RuntimeShape candidate_weight_shape = GetTensorShape(candidate_weight);
const float* candidate_weight_data = GetTensorData<float>(candidate_weight);
const RuntimeShape candidate_bias_shape = GetTensorShape(candidate_bias);
const float* candidate_bias_data = GetTensorData<float>(candidate_bias);
const RuntimeShape activation_shape = GetTensorShape(activation);
const RuntimeShape output_shape = RuntimeShape({n_batch, n_output});
float* output_data = GetTensorData<float>(output);
float* output_state_data = GetTensorData<float>(output_state);
float* activation_data = GetTensorData<float>(activation);
const RuntimeShape concat_shape = GetTensorShape(concat);
float* concat_data = GetTensorData<float>(concat);
tflite::FullyConnectedParams fc_params;
fc_params.float_activation_min = std::numeric_limits<float>::lowest();
fc_params.float_activation_max = std::numeric_limits<float>::max();
fc_params.lhs_cacheable =
IsConstantTensor(gate_weight) && IsConstantTensor(candidate_weight);
fc_params.rhs_cacheable = false;
for (int i = 0; i < n_time; ++i) {
gru_cell::GruCell(
input_shape, input_data, state_shape, input_state_data,
gate_weight_shape, gate_weight_data, gate_bias_shape, gate_bias_data,
candidate_weight_shape, candidate_weight_data, candidate_bias_shape,
candidate_bias_data, output_shape, output_data, output_state_data,
activation_shape, activation_data, concat_shape, concat_data, fc_params,
cpu_backend_context);
input_data += n_batch_input;
output_data += n_batch_output;
input_state_data = output_state_data;
}
}
}
enum InputTensor {
kInput = 0,
kInputState = 1,
kGateWeight = 2,
kGateBias = 3,
kCandidateWeight = 4,
kCandidateBias = 5,
kInputNum = 6
};
enum OutputTensor {
kOutput = 0,
kOutputState = 1,
kOutputNum = 2
};
enum TemporaryTensor {
kActivation = 0,
kConcat = 1,
kTemporaryNum = 2
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* scratch_tensor_index = new int;
context->AddTensors(context, kTemporaryNum, scratch_tensor_index);
return scratch_tensor_index;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int* scratch_tensor_index = reinterpret_cast<int*>(node->user_data);
TF_LITE_ENSURE_EQ(context, node->inputs->size, kInputNum);
TF_LITE_ENSURE_EQ(context, node->outputs->size, kOutputNum);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInput, &input));
TF_LITE_ENSURE_EQ(context, input->dims->size, 3);
const int n_time = input->dims->data[0];
const int n_batch = input->dims->data[1];
const int n_input = input->dims->data[2];
const TfLiteTensor* input_state;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputState, &input_state));
TF_LITE_ENSURE_EQ(context, input_state->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_state->dims->data[0], n_batch);
const int n_output = input_state->dims->data[1];
const TfLiteTensor* gate_weight;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kGateWeight, &gate_weight));
TF_LITE_ENSURE_EQ(context, gate_weight->dims->size, 2);
TF_LITE_ENSURE_EQ(context, gate_weight->dims->data[0], 2 * n_output);
TF_LITE_ENSURE_EQ(context, gate_weight->dims->data[1], n_input + n_output);
const TfLiteTensor* gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kGateBias, &gate_bias));
TF_LITE_ENSURE_EQ(context, gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, gate_bias->dims->data[0], 2 * n_output);
const TfLiteTensor* candidate_weight;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kCandidateWeight,
&candidate_weight));
TF_LITE_ENSURE_EQ(context, candidate_weight->dims->size, 2);
TF_LITE_ENSURE_EQ(context, candidate_weight->dims->data[0], n_output);
TF_LITE_ENSURE_EQ(context, candidate_weight->dims->data[1],
n_input + n_output);
const TfLiteTensor* candidate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kCandidateBias, &candidate_bias));
TF_LITE_ENSURE_EQ(context, candidate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, candidate_bias->dims->data[0], n_output);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutput, &output));
TfLiteIntArray* output_size = TfLiteIntArrayCreate(3);
output_size->data[0] = n_time;
output_size->data[1] = n_batch;
output_size->data[2] = n_output;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size));
TfLiteTensor* output_state;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputState, &output_state));
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, output_state,
TfLiteIntArrayCopy(input_state->dims)));
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(kTemporaryNum);
node->temporaries->data[kActivation] = *scratch_tensor_index;
TfLiteTensor* activation;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kActivation, &activation));
activation->type = input->type;
activation->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* activation_size = TfLiteIntArrayCreate(2);
activation_size->data[0] = n_batch;
activation_size->data[1] = 2 * n_output;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, activation, activation_size));
node->temporaries->data[kConcat] = (*scratch_tensor_index) + kConcat;
TfLiteTensor* concat;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kConcat, &concat));
concat->type = input->type;
concat->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* concat_size = TfLiteIntArrayCreate(2);
concat_size->data[0] = n_batch;
concat_size->data[1] = n_input + n_output;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, concat, concat_size));
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInput, &input));
const TfLiteTensor* input_state;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputState, &input_state));
const TfLiteTensor* gate_weight;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kGateWeight, &gate_weight));
const TfLiteTensor* gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kGateBias, &gate_bias));
const TfLiteTensor* candidate_weight;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kCandidateWeight,
&candidate_weight));
const TfLiteTensor* candidate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kCandidateBias, &candidate_bias));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutput, &output));
TfLiteTensor* output_state;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputState, &output_state));
TfLiteTensor* activation;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kActivation, &activation));
TfLiteTensor* concat;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kConcat, &concat));
auto cpu_backend_context = CpuBackendContext::GetFromContext(context);
if (gate_weight->type == kTfLiteFloat32) {
GruImpl(input, input_state, gate_weight, gate_bias, candidate_weight,
candidate_bias, output, output_state, activation, concat,
cpu_backend_context);
} else {
TF_LITE_KERNEL_LOG(context,
"Unsupported combination of data types for GruCell");
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_GRU() {
static TfLiteRegistration r = {
unidirectional_sequence_gru::Init, unidirectional_sequence_gru::Free,
unidirectional_sequence_gru::Prepare, unidirectional_sequence_gru::Eval};
return &r;
}
}
}
} | #include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_GRU();
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class GRUOpModel : public SingleOpModel {
public:
explicit GRUOpModel(int n_batch, int n_input, int n_output,
const std::vector<std::vector<int>>& input_shapes,
const TensorType& weight_type = TensorType_FLOAT32)
: n_batch_(n_batch), n_input_(n_input), n_output_(n_output) {
input_ = AddInput(TensorType_FLOAT32);
input_state_ =
AddVariableInput(TensorData{TensorType_FLOAT32, {n_batch, n_output}});
gate_weight_ = AddInput(TensorType_FLOAT32);
gate_bias_ = AddInput(TensorType_FLOAT32);
candidate_weight_ = AddInput(TensorType_FLOAT32);
candidate_bias_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
output_state_ = AddOutput(TensorType_FLOAT32);
SetCustomOp("UNIDIRECTIONAL_SEQUENCE_GRU", {},
Register_UNIDIRECTIONAL_SEQUENCE_GRU);
BuildInterpreter(input_shapes);
}
void SetInput(const std::vector<float>& f) { PopulateTensor(input_, f); }
void SetInputState(const std::vector<float>& f) {
PopulateTensor(input_state_, f);
}
void SetGateWeight(const std::vector<float>& f) {
PopulateTensor(gate_weight_, f);
}
void SetGateBias(const std::vector<float>& f) {
PopulateTensor(gate_bias_, f);
}
void SetCandidateWeight(const std::vector<float>& f) {
PopulateTensor(candidate_weight_, f);
}
void SetCandidateBias(const std::vector<float>& f) {
PopulateTensor(candidate_bias_, f);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int num_batches() { return n_batch_; }
int num_inputs() { return n_input_; }
int num_outputs() { return n_output_; }
private:
int input_;
int input_state_;
int gate_weight_;
int gate_bias_;
int candidate_weight_;
int candidate_bias_;
int output_;
int output_state_;
int n_batch_;
int n_input_;
int n_output_;
};
TEST(GRUTest, SimpleTest) {
const int n_time = 2;
const int n_batch = 2;
const int n_input = 2;
const int n_output = 3;
GRUOpModel m(n_batch, n_input, n_output,
{{n_time, n_batch, n_input},
{n_batch, n_output},
{2 * n_output, n_input + n_output},
{2 * n_output},
{n_output, n_input + n_output},
{n_output}});
m.SetInput({0.89495724, 0.34482682, 0.68505806, 0.7135783, 0.3167085,
0.93647677, 0.47361764, 0.39643127});
m.SetInputState(
{0.09992421, 0.3028481, 0.78305984, 0.50438094, 0.11269058, 0.10244724});
m.SetGateWeight({0.7256918, 0.8945897, 0.03285786, 0.42637166, 0.119376324,
0.83035135, 0.16997327, 0.42302176, 0.77598256, 0.2660894,
0.9587266, 0.6218451, 0.88164485, 0.12272458, 0.2699055,
0.18399088, 0.21930052, 0.3374841, 0.70866305, 0.9523419,
0.25170696, 0.60988617, 0.79823977, 0.64477515, 0.2602957,
0.5053131, 0.93722224, 0.8451359, 0.97905475, 0.38669217});
m.SetGateBias(
{0.032708533, 0.018445263, 0.15320699, 0.8163046, 0.26683575, 0.1412022});
m.SetCandidateWeight({0.96165305, 0.95572084, 0.11534478, 0.96965164,
0.33562955, 0.8680755, 0.003066936, 0.057793964,
0.8671354, 0.33354893, 0.7313398, 0.78492093,
0.19530584, 0.116550304, 0.13599132});
m.SetCandidateBias({0.89837056, 0.54769796, 0.63364106});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(n_time, n_batch, n_output));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.20112592, 0.45286041, 0.80842507, 0.59567153, 0.2619998,
0.22922856, 0.27715868, 0.5247152, 0.82300174, 0.65812796,
0.38217607, 0.3401444})));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_gru.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_gru_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7e686392-98dc-4e35-9be8-7b119eee4999 | cpp | tensorflow/tensorflow | irfft2d | tensorflow/lite/kernels/irfft2d.cc | tensorflow/lite/kernels/irfft2d_test.cc | #include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <complex>
#include "third_party/fft2d/fft2d.h"
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace irfft2d {
using std::complex;
constexpr int kInputTensor = 0;
constexpr int kFftLengthTensor = 1;
constexpr int kOutputTensor = 0;
constexpr int kFftIntegerWorkingAreaTensor = 0;
constexpr int kFftDoubleWorkingAreaTensor = 1;
constexpr int kTensorNotAllocated = -1;
struct OpData {
int fft_integer_working_area_id = kTensorNotAllocated;
int fft_double_working_area_id = kTensorNotAllocated;
};
bool IsPowerOfTwo(uint32_t v) { return v && !(v & (v - 1)); }
static TfLiteStatus InitTemporaryTensors(TfLiteContext* context,
TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
if (data->fft_integer_working_area_id != kTensorNotAllocated &&
data->fft_double_working_area_id != kTensorNotAllocated) {
return kTfLiteOk;
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(2);
int first_new_index;
TF_LITE_ENSURE_STATUS(context->AddTensors(context, 2, &first_new_index));
node->temporaries->data[kFftIntegerWorkingAreaTensor] = first_new_index;
data->fft_integer_working_area_id = first_new_index;
node->temporaries->data[kFftDoubleWorkingAreaTensor] = first_new_index + 1;
data->fft_double_working_area_id = first_new_index + 1;
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
fft_integer_working_area->type = kTfLiteInt32;
fft_integer_working_area->allocation_type = kTfLiteArenaRw;
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
fft_double_working_area->type = kTfLiteInt64;
fft_double_working_area->allocation_type = kTfLiteArenaRw;
return kTfLiteOk;
}
TfLiteStatus ResizeOutputandTemporaryTensors(TfLiteContext* context,
TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const int num_dims = NumDimensions(input);
TF_LITE_ENSURE(context, num_dims >= 2);
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[0]));
TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[1]));
int fft_height, fft_width;
fft_height = fft_length_data[0];
fft_width = fft_length_data[1];
int fft_working_length = std::max(fft_height, fft_width / 2);
int half_fft_working_length = fft_working_length / 2;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
output_shape->data[num_dims - 2] = fft_length_data[0];
output_shape->data[num_dims - 1] = fft_length_data[1];
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape));
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
TfLiteIntArray* fft_integer_working_area_shape = TfLiteIntArrayCreate(1);
fft_integer_working_area_shape->data[0] =
2 + static_cast<int>(sqrt(fft_working_length));
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, fft_integer_working_area,
fft_integer_working_area_shape));
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
TfLiteIntArray* fft_double_working_area_shape = TfLiteIntArrayCreate(1);
fft_double_working_area_shape->data[0] =
half_fft_working_length + fft_width / 4;
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, fft_double_working_area,
fft_double_working_area_shape));
return kTfLiteOk;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TF_LITE_ENSURE(context, NumDimensions(input) >= 2);
if (input->type != kTfLiteComplex64) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' for input is not supported by irfft2.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const RuntimeShape fft_length_shape = GetTensorShape(fft_length);
TF_LITE_ENSURE_EQ(context, NumDimensions(fft_length), 1);
TF_LITE_ENSURE_EQ(context, fft_length_shape.Dims(0), 2);
if (fft_length->type != kTfLiteInt32) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' for fft_length is not supported by irfft2.",
TfLiteTypeGetName(fft_length->type));
return kTfLiteError;
}
TF_LITE_ENSURE_STATUS(InitTemporaryTensors(context, node));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = kTfLiteFloat32;
if (!IsConstantOrPersistentTensor(fft_length)) {
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
SetTensorToDynamic(fft_integer_working_area);
SetTensorToDynamic(fft_double_working_area);
SetTensorToDynamic(output);
return kTfLiteOk;
}
TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node));
return kTfLiteOk;
}
void Irfft2dReorder(int fft_height, int fft_width, double** fft_input_output) {
ruy::profiler::ScopeLabel label("Irfft2dReorder");
for (int i = 0; i < fft_height; ++i) {
for (int j = 1; j < fft_width + 2; j += 2) {
fft_input_output[i][j] = -fft_input_output[i][j];
}
}
const int kBackwardFft = -1;
rdft2dsort(fft_height, fft_width, kBackwardFft, fft_input_output);
}
void Irfft2dImpl(int fft_height, int fft_width, double** fft_input_output,
int* fft_integer_working_area_data,
double* fft_double_working_area_data) {
ruy::profiler::ScopeLabel label("Irfft2dImpl");
Irfft2dReorder(fft_height, fft_width, fft_input_output);
double* fft_dynamic_working_area = nullptr;
const int kBackwardFft = -1;
rdft2d(fft_height, fft_width, kBackwardFft, fft_input_output,
fft_dynamic_working_area, fft_integer_working_area_data,
fft_double_working_area_data);
}
void PrepareInputBuffer(const complex<float>* input_data, int input_height,
int input_width, int fft_height, int fft_width,
double** fft_input_output) {
int valid_input_height = std::min(input_height, fft_height);
int valid_input_width = std::min(input_width, fft_width / 2 + 1);
for (int i = 0; i < valid_input_height; ++i) {
int in_pos = i * input_width;
for (int j = 0; j < valid_input_width; ++j) {
fft_input_output[i][2 * j] = input_data[in_pos].real();
fft_input_output[i][2 * j + 1] = input_data[in_pos].imag();
++in_pos;
}
for (int j = valid_input_width; j < fft_width / 2 + 1; ++j) {
fft_input_output[i][2 * j] = 0;
fft_input_output[i][2 * j + 1] = 0;
}
}
for (int i = valid_input_height; i < fft_height; ++i) {
for (int j = 0; j < fft_width / 2 + 1; ++j) {
fft_input_output[i][2 * j] = 0;
fft_input_output[i][2 * j + 1] = 0;
}
}
}
void PrepareOutputBuffer(float* output_data, int fft_height, int fft_width,
double** fft_input_output) {
int cnt = 0;
float norm = 2.0 / static_cast<float>(fft_height * fft_width);
for (int i = 0; i < fft_height; ++i) {
for (int j = 0; j < fft_width; ++j) {
output_data[cnt++] = fft_input_output[i][j] * norm;
}
}
}
TfLiteStatus Irfft2dHelper(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const complex<float>* input_data = GetTensorData<complex<float>>(input);
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
float* output_data = GetTensorData<float>(output);
int fft_height, fft_width;
fft_height = fft_length_data[0];
fft_width = fft_length_data[1];
const RuntimeShape input_shape = GetTensorShape(input);
const int input_dims_count = input_shape.DimensionsCount();
const auto* input_dims_data = input_shape.DimsData();
int num_slices = 1;
for (int i = 0; i < input_dims_count - 2; ++i) {
num_slices *= input_dims_data[i];
}
int input_height = input_dims_data[input_dims_count - 2];
int input_width = input_dims_data[input_dims_count - 1];
int input_slice_size = input_height * input_width;
int output_slice_size = fft_height * fft_width;
double** fft_input_output = new double*[fft_height];
for (int i = 0; i < fft_height; ++i) {
fft_input_output[i] = new double[fft_width + 2];
}
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
int* fft_integer_working_area_data =
GetTensorData<int>(fft_integer_working_area);
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
double* fft_double_working_area_data = reinterpret_cast<double*>(
GetTensorData<int64_t>(fft_double_working_area));
for (int i = 0; i < num_slices; ++i) {
PrepareInputBuffer(input_data, input_height, input_width, fft_height,
fft_width, fft_input_output);
memset(fft_integer_working_area_data, 0, fft_integer_working_area->bytes);
memset(fft_double_working_area_data, 0, fft_double_working_area->bytes);
Irfft2dImpl(fft_height, fft_width, fft_input_output,
fft_integer_working_area_data, fft_double_working_area_data);
PrepareOutputBuffer(output_data, fft_height, fft_width, fft_input_output);
input_data += input_slice_size;
output_data += output_slice_size;
}
for (int i = 0; i < fft_height; ++i) {
delete[] fft_input_output[i];
}
delete[] fft_input_output;
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type != kTfLiteFloat32) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' for output is not supported by irfft2.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
if (!IsConstantTensor(fft_length)) {
TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node));
} else {
int num_dims_output = NumDimensions(output);
const RuntimeShape output_shape = GetTensorShape(output);
TF_LITE_ENSURE_EQ(context, num_dims_output, NumDimensions(input));
TF_LITE_ENSURE(context, num_dims_output >= 2);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 2),
fft_length_data[0]);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 1),
fft_length_data[1]);
}
return Irfft2dHelper(context, node);
}
}
TfLiteRegistration* Register_IRFFT2D() {
static TfLiteRegistration r = {irfft2d::Init, irfft2d::Free, irfft2d::Prepare,
irfft2d::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <complex>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using ::testing::ElementsAreArray;
class Irfft2dOpModel : public SingleOpModel {
public:
Irfft2dOpModel(const TensorData& input, const TensorData& fft_lengths) {
input_ = AddInput(input);
fft_lengths_ = AddInput(fft_lengths);
TensorType output_type = TensorType_FLOAT32;
output_ = AddOutput({output_type, {}});
const std::vector<uint8_t> custom_option;
SetCustomOp("Irfft2d", custom_option, Register_IRFFT2D);
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
int fft_lengths() { return fft_lengths_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int fft_lengths_;
int output_;
};
TEST(Irfft2dOpTest, FftLengthMatchesInputSize) {
Irfft2dOpModel model({TensorType_COMPLEX64, {4, 3}}, {TensorType_INT32, {2}});
model.PopulateTensor<std::complex<float>>(model.input(), {
{75, 0}, {-6, -1}, {9, 0}, {-10, 5}, {-3, 2}, {-6, 11},
{-15, 0}, {-2, 13}, {-5, 0}, {-10, -5}, {3, -6}, {-6, -11}
});
model.PopulateTensor<int32_t>(model.fft_lengths(), {4, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
float expected_result[16] = {1, 2, 3, 4, 3, 8, 6, 3, 5, 2, 7, 6, 9, 5, 8, 3};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
TEST(Irfft2dOpTest, FftLengthSmallerThanInputSize) {
Irfft2dOpModel model({TensorType_COMPLEX64, {4, 3}}, {TensorType_INT32, {2}});
model.PopulateTensor<std::complex<float>>(model.input(), {
{75, 0}, {-6, -1}, {9, 0}, {-10, 5}, {-3, 2}, {-6, 11},
{-15, 0}, {-2, 13}, {-5, 0}, {-10, -5}, {3, -6}, {-6, -11}
});
model.PopulateTensor<int32_t>(model.fft_lengths(), {2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
float expected_result[4] = {14, 18.5, 20.5, 22};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
TEST(Irfft2dOpTest, FftLengthGreaterThanInputSize) {
Irfft2dOpModel model({TensorType_COMPLEX64, {4, 3}}, {TensorType_INT32, {2}});
model.PopulateTensor<std::complex<float>>(model.input(), {
{75, 0}, {-6, -1}, {9, 0}, {-10, 5}, {-3, 2}, {-6, 11},
{-15, 0}, {-2, 13}, {-5, 0}, {-10, -5}, {3, -6}, {-6, -11}
});
model.PopulateTensor<int32_t>(model.fft_lengths(), {4, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
float expected_result[32] = {
0.25, 0.54289322, 1.25, 1.25, 1.25, 1.95710678, 2.25, 1.25,
1.25, 2.85355339, 4.25, 3.91421356, 2.75, 2.14644661, 1.75, 1.08578644,
3., 1.43933983, 0.5, 2.14644661, 4., 3.56066017, 2.5, 2.85355339,
5.625, 3.65533009, 1.375, 3.3017767, 5.125, 2.59466991, 0.375, 2.9482233
};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
TEST(Irfft2dOpTest, InputDimsGreaterThan2) {
Irfft2dOpModel model({TensorType_COMPLEX64, {2, 2, 3}},
{TensorType_INT32, {2}});
model.PopulateTensor<std::complex<float>>(model.input(), {
{30., 0.}, {-5, -3.}, { -4., 0.},
{-10., 0.}, {1., 7.}, { 0., 0.},
{58., 0.}, {-18., 6.}, { 26., 0.},
{-18., 0.}, { 14., 2.}, {-18., 0.}
});
model.PopulateTensor<int32_t>(model.fft_lengths(), {2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
float expected_result[16] = {1., 2., 3., 4., 3., 8., 6., 3.,
5., 2., 7., 6., 7., 3., 23., 5.};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/irfft2d.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/irfft2d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a31fc2ca-d988-44e3-8fc8-687ff14810e6 | cpp | tensorflow/tensorflow | transpose_conv | tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.cc | tensorflow/lite/delegates/xnnpack/transpose_conv_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ConvolutionTransposedBuffers : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"Convolution Transposed does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const ConvolutionTransposedAttributes&>(ctx.op_attr);
auto weights = attr.weights.shape;
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"kernel_size", int2(weights.w, weights.h)},
{"stride", int2(attr.stride.w, attr.stride.h)},
{"padding", int2(weights.w - 1 - attr.padding.prepended.w,
weights.h - 1 - attr.padding.prepended.h)},
};
std::vector<std::pair<std::string, Object>> objects = {
{"weights",
MakeReadonlyObject(Get3DSizeForPHWO4I4(attr.weights.shape),
ConvertToPHWO4I4Transposed(attr.weights))}};
std::string source = R"(
#define IN_BOUNDS(p, p0, p1) (all(greaterThanEqual(p, p0)) && all(lessThan(p, p1)))
ivec2 p0 = ($padding$ + $stride$ - gid.xy % $stride$) % $stride$;
for (int y = p0.y; y < $kernel_size.y$; y += $stride.y$) {
for (int x = p0.x; x < $kernel_size.x$; x += $stride.x$) {
int i = int(float(y * $kernel_size.x$) + float(x));
ivec2 idx = ivec2(vec2(gid.xy + ivec2(x, y)) - vec2($padding$));
if (IN_BOUNDS(idx, ivec2(0), ivec2($input_data_0_w$, $input_data_0_h$) * $stride$)) {
ivec2 coord = idx / $stride$;
for (int l = 0; l < $src_depth$; ++l) {
vec4 src_color = $input_data_0[coord.x, coord.y, l]$;
value_0.x += dot(src_color, $weights[l * 4 + 0, i, gid.z]$);
value_0.y += dot(src_color, $weights[l * 4 + 1, i, gid.z]$);
value_0.z += dot(src_color, $weights[l * 4 + 2, i, gid.z]$);
value_0.w += dot(src_color, $weights[l * 4 + 3, i, gid.z]$);
}
}
}
}
)";
if (!attr.bias.data.empty()) {
source += "value_0 += $bias[gid.z]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
uint3(),
source,
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewConvolutionTransposedNodeShader() {
return std::make_unique<ConvolutionTransposedBuffers>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/transpose_conv_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(TransposeConvTest, 2x2Stride2) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(2)
.KernelWidth(2)
.StrideHeight(2)
.StrideWidth(2)
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 2x2Stride2NoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(2)
.KernelWidth(2)
.StrideHeight(2)
.StrideWidth(2)
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 3x3Stride2) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(3)
.KernelWidth(3)
.StrideHeight(2)
.StrideWidth(2)
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 3x3Stride2NoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(3)
.KernelWidth(3)
.StrideHeight(2)
.StrideWidth(2)
.SamePadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 4x4Stride2) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(4)
.KernelWidth(4)
.StrideHeight(2)
.StrideWidth(2)
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 4x4Stride2NoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(4)
.KernelWidth(4)
.StrideHeight(2)
.StrideWidth(2)
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 4x4Stride4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(4)
.KernelWidth(4)
.StrideHeight(4)
.StrideWidth(4)
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 4x4Stride4NoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(4)
.KernelWidth(4)
.StrideHeight(4)
.StrideWidth(4)
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SmallKernelWithSamePadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SmallKernelWithSamePaddingNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.SamePadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SmallKernelWithValidPadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SmallKernelWithValidPaddingNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, StrideWithSamePadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, StrideWithSamePaddingNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, StrideWithValidPadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, StrideWithValidPaddingNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.FP16Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, FP16WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.FP16Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, TensorWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.TensorWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, TensorWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.TensorWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, ChannelWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.ChannelWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, ChannelWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.ChannelWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseWeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseFP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.FP16Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseFP16WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.FP16Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseTensorWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.TensorWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseTensorWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.TensorWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseChannelWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.ChannelWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseChannelWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.ChannelWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, MultiThreadingNoBias) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, WeightsCache) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
std::unique_ptr<TfLiteXNNPackDelegateWeightsCache,
decltype(&TfLiteXNNPackDelegateWeightsCacheDelete)>
weights_cache(TfLiteXNNPackDelegateWeightsCacheCreate(),
TfLiteXNNPackDelegateWeightsCacheDelete);
delegate_options.weights_cache = weights_cache.get();
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.WeightsCache(weights_cache.get())
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/transpose_conv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
154f17c3-5ac5-44f5-807f-66c9ebfb7ed3 | cpp | tensorflow/tensorflow | while | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/while.cc | tensorflow/lite/kernels/while_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/while.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Region.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
void TFLReplaceReturnOp(Region& region, PatternRewriter& rewriter) {
OpBuilder::InsertionGuard guard(rewriter);
for (auto& block : region.getBlocks()) {
Operation* terminator = block.getTerminator();
rewriter.setInsertionPoint(terminator);
rewriter.replaceOpWithNewOp<TFL::YieldOp>(terminator,
terminator->getOperands());
}
}
class LeagalizeWhileOp : public OpConversionPattern<mhlo::WhileOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::WhileOp while_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final {
auto is_stateless = rewriter.getBoolAttr(false);
auto new_while = rewriter.create<TFL::WhileOp>(
while_op.getLoc(), while_op->getResultTypes(), while_op->getOperands(),
is_stateless);
new_while.getCond().takeBody(while_op.getCond());
new_while.getBody().takeBody(while_op.getBody());
TFLReplaceReturnOp(new_while.getCond(), rewriter);
TFLReplaceReturnOp(new_while.getBody(), rewriter);
rewriter.replaceOp(while_op, new_while.getResults());
return success();
}
};
bool IsWhileLegal(mhlo::WhileOp while_op) {
for (auto type : while_op->getOperandTypes()) {
if (mlir::isa<TupleType>(type)) return true;
}
return false;
}
}
void PopulateWhilePatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
target.addDynamicallyLegalOp<mhlo::WhileOp>(IsWhileLegal);
patterns.add<LeagalizeWhileOp>(ctx);
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/profiling/memory_info.h"
namespace tflite {
using subgraph_test_util::CheckIntTensor;
using subgraph_test_util::CheckScalarStringTensor;
using subgraph_test_util::CheckStringTensor;
using subgraph_test_util::ControlFlowOpTest;
using subgraph_test_util::FillIntTensor;
using subgraph_test_util::FillScalarStringTensor;
namespace {
class WhileTest : public ControlFlowOpTest {};
TEST_F(WhileTest, TestWithXNNPACK) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildFloatLessCondSubgraph(interpreter_->subgraph(1), 100);
builder_->BuildXNNPACKSubgraph(interpreter_->subgraph(2));
builder_->BuildFloatWhileSubgraph(&interpreter_->primary_subgraph(), 2);
const auto opt = TfLiteXNNPackDelegateOptionsDefault();
TfLiteDelegate* xnnpack_delegate = TfLiteXNNPackDelegateCreate(&opt);
interpreter_->primary_subgraph().MarkAsDelegationSkippable();
interpreter_->subgraph(1)->MarkAsDelegationSkippable();
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(xnnpack_delegate), kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
float* input0 =
GetTensorData<float>(interpreter_->tensor(interpreter_->inputs()[0]));
input0[0] = 1;
float* input1 =
GetTensorData<float>(interpreter_->tensor(interpreter_->inputs()[1]));
input1[0] = 1;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
float* output0_data = GetTensorData<float>(output0);
ASSERT_EQ(output0_data[0], 256);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
float* output1_data = GetTensorData<float>(output1);
ASSERT_EQ(output1_data[0], 256);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteXNNPackDelegateDelete(xnnpack_delegate);
}
TEST_F(WhileTest, TestInputIsOutput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 3);
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestInputIsOutputButDifferent) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 2);
builder_->BuildInputIsDifferentOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 2);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {5});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {8});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestFlexOutput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 2);
builder_->BuildFlexOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 2);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {5, 6});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestCounterOnly) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 1);
builder_->BuildCounterOnlySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 1);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestAllCases) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 5);
builder_->BuildAllInplaceScenariosSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 5);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[4]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {5});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {6}, {2, 2, 2, 2, 2, 2});
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[3]);
CheckIntTensor(output3, {6}, {4, 4, 4, 4, 4, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestStaticUnconsumedOutputs) {
for (bool dynamic_tensors : {true, false}) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 3);
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraphWithUnconsumedOutput(
&interpreter_->primary_subgraph(), 3);
InterpreterOptions options;
if (dynamic_tensors) {
options.OptimizeMemoryForLargeTensors(1);
interpreter_->ApplyOptions(&options);
}
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {8});
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {2}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
CheckIntTensor(output1, {2}, {8, 8});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(WhileTest, TestDynamicOpTriggersAllocationOfUnsedInput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 2, 3);
builder_->BuildDynamicOpTriggersAllocationOfUnsedInputSubgraph(
interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {4, 4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {2}, {2, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestStaticInPlace) {
const std::vector<int> expected = {6, 10, 15, 21, 28};
for (int i = 0; i < expected.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), i + 1);
builder_->BuildDeepBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {0});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {i + 2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {expected[i]});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(WhileTest, TestStaticInPlaceLarge) {
int size = 10000;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), 60000);
builder_->BuildLargeBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {size}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]),
std::vector<int>(size, 1));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {}, {10010 * size});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {size}, std::vector<int>(size, 70014));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestTriangularNumberSequence) {
const std::vector<int> expected = {1, 3, 6, 10, 15, 21, 28};
for (int i = 0; i < expected.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), i);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
auto body_subgraph = interpreter_->subgraph(2);
TfLiteTensor* subgraph_input2 =
body_subgraph->tensor(body_subgraph->inputs()[1]);
EXPECT_EQ(subgraph_input2->allocation_type, kTfLiteCustom);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {i + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {expected[i]});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(WhileTest, TestTriangularNumberSequenceWithShallowCopy) {
const std::vector<int> expected = {1, 3, 6, 10, 15, 21, 28};
for (int i = 0; i < expected.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), i);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1000000});
InterpreterOptions options;
options.OptimizeMemoryForLargeTensors(1000000);
ASSERT_EQ(interpreter_->ApplyOptions(&options), kTfLiteOk);
const size_t initial_mem_usage =
profiling::memory::GetMemoryUsage().mem_footprint_kb;
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_LE(profiling::memory::GetMemoryUsage().mem_footprint_kb -
initial_mem_usage,
9000);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
const std::vector<int> input_vector(1000000, 1);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]),
input_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
auto body_subgraph = interpreter_->subgraph(2);
TfLiteTensor* subgraph_input2 =
body_subgraph->tensor(body_subgraph->inputs()[1]);
ASSERT_EQ(subgraph_input2->allocation_type, kTfLiteCustom);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {i + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
const std::vector<int> expected2(1000000, expected[i]);
CheckIntTensor(output2, {1000000}, expected2);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(WhileTest, TestPadLoop) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), 4);
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {5});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {14}, {0, 0, 0, 0, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestDynamicBodyWithSharingEarlyExit) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 0, 4);
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 4);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {10000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {1});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {3}, {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestDynamicBodyWithSharing) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 4);
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 4);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1000000});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1000000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {18},
{4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
EXPECT_EQ(output2->dims->data[0], 1000000);
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[3]);
EXPECT_EQ(output3->dims->data[0], 1000000);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestDynamicBodyWithSharingAndAliases) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 0, 5);
builder_->BuildDynamicBodySubgraphWithAliases(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 5);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {0});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {3});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[4]), {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {1});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {11});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {1}, {12});
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output3, {1}, {13});
TfLiteTensor* output4 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output4, {1}, {13});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestOutputNotConsumed) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 11, 3);
builder_->BuildOutputNotConsumedSubgraph(*interpreter_->subgraph(2));
builder_->BuildOutputNotConsumedWhileSubgraph(
&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {3}, {18, 18, 18});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestPadLoopWithSharing) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 3);
builder_->BuildLargePadSubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {3, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {5});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {5}, {4, 9, 10, 4, 4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {8}, {0, 4, 9, 10, 4, 4, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestPadLoopWithShallowCopy) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), 3);
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1000000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
std::vector<int> input_vector(1000000, 0);
input_vector[0] = 5;
input_vector[1] = 7;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), input_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
std::vector<int> output_vector(1000009, 0);
output_vector[3] = 5;
output_vector[4] = 7;
CheckIntTensor(output2, {1000009}, output_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestWhileLoopWithDynamicTensor) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraphWithDynamicTensor(
interpreter_->subgraph(1), 3);
builder_->BuildBodySubgraphWithDynamicTensor(interpreter_->subgraph(2));
builder_->BuildWhileSubgraphWithDynamicTensor(
&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillScalarStringTensor(interpreter_->tensor(interpreter_->inputs()[0]), "A");
FillScalarStringTensor(interpreter_->tensor(interpreter_->inputs()[1]), "A");
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* string_output1 =
interpreter_->tensor(interpreter_->outputs()[0]);
CheckScalarStringTensor(string_output1, "A");
TfLiteTensor* string_output2 =
interpreter_->tensor(interpreter_->outputs()[1]);
CheckStringTensor(string_output2, {4}, {"A", "A", "A", "A"});
TfLiteTensor* integer_output =
interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(integer_output, {1}, {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/while.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/while_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4a180767-e286-4079-a73a-03335853fab7 | cpp | tensorflow/tensorflow | random_uniform_custom | tensorflow/lite/kernels/random_uniform_custom.cc | tensorflow/lite/kernels/random_uniform_custom_test.cc | #include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <random>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace random_uniform {
struct OpData {
std::default_random_engine rng;
};
namespace {
template <typename T, typename dist_type>
void RandomUniformSample(std::default_random_engine& rng, T* buffer,
size_t buffer_size, T min_value, T max_value) {
dist_type dist(min_value, max_value);
std::generate(buffer, buffer + buffer_size, [&]() { return dist(rng); });
}
TfLiteIntArray* CreateDimensionsFromTensor(const TfLiteTensor* tensor) {
const int output_dims = tflite::SizeOfDimension(tensor, 0);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_dims);
for (int i = 0; i < output_dims; i++) {
if (tensor->type == kTfLiteInt32) {
output_shape->data[i] = tensor->data.i32[i];
} else {
output_shape->data[i] = tensor->data.i64[i];
}
}
return output_shape;
}
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData();
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, tflite::NumInputs(node) >= 1);
TF_LITE_ENSURE_EQ(context, tflite::NumOutputs(node), 1);
const TfLiteTensor* input = tflite::GetInput(context, node, 0);
TF_LITE_ENSURE(context,
input->type == kTfLiteInt32 || input->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, tflite::NumDimensions(input), 1);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
if (!IsConstantOrPersistentTensor(input)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return context->ResizeTensor(context, output,
CreateDimensionsFromTensor(input));
}
TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node) {
OpData* params = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, params != nullptr);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
if (IsDynamicTensor(output)) {
const TfLiteTensor* input = tflite::GetInput(context, node, 0);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output,
CreateDimensionsFromTensor(input)));
}
const size_t output_size = tflite::NumElements(output);
switch (output->type) {
case kTfLiteFloat32:
RandomUniformSample<float, std::uniform_real_distribution<float>>(
params->rng, GetTensorData<float>(output), output_size, 0.f, 1.f);
break;
case kTfLiteFloat64:
RandomUniformSample<double, std::uniform_real_distribution<double>>(
params->rng, GetTensorData<double>(output), output_size, 0.f, 1.f);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unsupported output datatype for RandomUniform: %s",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
int64_t IntValueFromTensor(const TfLiteTensor* tensor) {
switch (tensor->type) {
case kTfLiteInt8:
return *GetTensorData<int8_t>(tensor);
case kTfLiteInt32:
return *GetTensorData<int32_t>(tensor);
case kTfLiteInt64:
return *GetTensorData<int64_t>(tensor);
default:
return -1;
}
}
TfLiteStatus EvalInt(TfLiteContext* context, TfLiteNode* node) {
OpData* params = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, params != nullptr);
TF_LITE_ENSURE(context, tflite::NumInputs(node) >= 3);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
if (IsDynamicTensor(output)) {
const TfLiteTensor* input = tflite::GetInput(context, node, 0);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output,
CreateDimensionsFromTensor(input)));
}
int64_t min_value = IntValueFromTensor(tflite::GetInput(context, node, 1));
int64_t max_value = IntValueFromTensor(tflite::GetInput(context, node, 2));
TF_LITE_ENSURE(context, min_value < max_value);
size_t output_size = tflite::NumElements(output);
switch (output->type) {
case kTfLiteInt8:
RandomUniformSample<int8_t, std::uniform_int_distribution<int32_t>>(
params->rng, GetTensorData<int8_t>(output), output_size, min_value,
max_value);
break;
case kTfLiteInt32:
RandomUniformSample<int32_t, std::uniform_int_distribution<int32_t>>(
params->rng, GetTensorData<int32_t>(output), output_size, min_value,
max_value);
break;
case kTfLiteInt64:
RandomUniformSample<int64_t, std::uniform_int_distribution<int64_t>>(
params->rng, GetTensorData<int64_t>(output), output_size, min_value,
max_value);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unsupported output datatype for RandomUniformInt: %s",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RANDOM_UNIFORM() {
static TfLiteRegistration r = {random_uniform::Init, random_uniform::Free,
random_uniform::Prepare,
random_uniform::EvalFloat};
return &r;
}
TfLiteRegistration* Register_RANDOM_UNIFORM_INT() {
static TfLiteRegistration r = {random_uniform::Init, random_uniform::Free,
random_uniform::Prepare,
random_uniform::EvalInt};
return &r;
}
}
}
} | #include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
template <typename T>
tflite::TensorType GetTTEnum();
template <>
tflite::TensorType GetTTEnum<float>() {
return tflite::TensorType_FLOAT32;
}
template <>
tflite::TensorType GetTTEnum<double>() {
return tflite::TensorType_FLOAT64;
}
template <>
tflite::TensorType GetTTEnum<int8_t>() {
return tflite::TensorType_INT8;
}
template <>
tflite::TensorType GetTTEnum<int32_t>() {
return tflite::TensorType_INT32;
}
template <>
tflite::TensorType GetTTEnum<int64_t>() {
return tflite::TensorType_INT64;
}
template <typename INPUT_TYPE>
class RandomUniformOpModel : public tflite::SingleOpModel {
public:
RandomUniformOpModel(const std::initializer_list<INPUT_TYPE>& input,
TensorType input_type, tflite::TensorData output,
bool dynamic_input) {
if (dynamic_input) {
input_ = AddInput({input_type, {3}});
} else {
input_ =
AddConstInput(input_type, input, {static_cast<int>(input.size())});
}
output_ = AddOutput(output);
SetCustomOp("RandomUniform", {}, ops::custom::Register_RANDOM_UNIFORM);
BuildInterpreter({GetShape(input_)});
if (dynamic_input) {
PopulateTensor<INPUT_TYPE>(input_, std::vector<INPUT_TYPE>(input));
}
}
int input_;
int output_;
int input() { return input_; }
int output() { return output_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
};
template <typename INPUT_TYPE>
class RandomUniformIntOpModel : public tflite::SingleOpModel {
public:
RandomUniformIntOpModel(const std::initializer_list<INPUT_TYPE>& input,
TensorType input_type, tflite::TensorData output,
INPUT_TYPE min_val, INPUT_TYPE max_val) {
input_ = AddConstInput(input_type, input, {static_cast<int>(input.size())});
input_minval_ = AddConstInput(input_type, {min_val}, {1});
input_maxval_ = AddConstInput(input_type, {max_val}, {1});
output_ = AddOutput(output);
SetCustomOp("RandomUniformInt", {},
ops::custom::Register_RANDOM_UNIFORM_INT);
BuildInterpreter({GetShape(input_)});
}
int input_;
int input_minval_;
int input_maxval_;
int output_;
int input() { return input_; }
int output() { return output_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
};
}
}
template <typename FloatType>
class RandomUniformTest : public ::testing::Test {
public:
using Float = FloatType;
};
using TestTypes = ::testing::Types<float, double>;
TYPED_TEST_SUITE(RandomUniformTest, TestTypes);
TYPED_TEST(RandomUniformTest, TestOutput) {
using Float = typename TestFixture::Float;
for (const auto dynamic : {true, false}) {
tflite::RandomUniformOpModel<int32_t> m(
{1000, 50, 5}, tflite::TensorType_INT32,
{tflite::GetTTEnum<Float>(), {}}, dynamic);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Float>();
EXPECT_EQ(output.size(), 1000 * 50 * 5);
double sum = 0;
for (const auto r : output) {
sum += r;
}
double avg = sum / output.size();
ASSERT_LT(std::abs(avg - 0.5), 0.05);
double sum_squared = 0;
for (const auto r : output) {
sum_squared += std::pow(r - avg, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1. / 12 - var),
0.05);
}
}
TYPED_TEST(RandomUniformTest, TestOutputInt64) {
using Float = typename TestFixture::Float;
for (const auto dynamic : {true, false}) {
tflite::RandomUniformOpModel<int64_t> m(
{1000, 50, 5}, tflite::TensorType_INT64,
{tflite::GetTTEnum<Float>(), {}}, dynamic);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Float>();
EXPECT_EQ(output.size(), 1000 * 50 * 5);
double sum = 0;
for (const auto r : output) {
sum += r;
}
double avg = sum / output.size();
ASSERT_LT(std::abs(avg - 0.5), 0.05);
double sum_squared = 0;
for (const auto r : output) {
sum_squared += std::pow(r - avg, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1. / 12 - var),
0.05);
}
}
template <typename IntType>
class RandomUniformIntTest : public ::testing::Test {
public:
using Int = IntType;
};
using TestTypesInt = ::testing::Types<int8_t, int32_t, int64_t>;
TYPED_TEST_SUITE(RandomUniformIntTest, TestTypesInt);
TYPED_TEST(RandomUniformIntTest, TestOutput) {
using Int = typename TestFixture::Int;
tflite::RandomUniformIntOpModel<int32_t> m(
{1000, 50, 5}, tflite::TensorType_INT32, {tflite::GetTTEnum<Int>(), {}},
0, 5);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
EXPECT_EQ(output.size(), 1000 * 50 * 5);
int counters[] = {0, 0, 0, 0, 0, 0};
for (const auto r : output) {
ASSERT_GE(r, 0);
ASSERT_LE(r, 5);
++counters[r];
}
for (int i = 1; i < 6; ++i) {
EXPECT_LT(std::abs(counters[i] - counters[0]), 1000);
}
}
TYPED_TEST(RandomUniformIntTest, TestOutputInt64) {
using Int = typename TestFixture::Int;
tflite::RandomUniformIntOpModel<int64_t> m(
{1000, 50, 5}, tflite::TensorType_INT64, {tflite::GetTTEnum<Int>(), {}},
0, 5);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
EXPECT_EQ(output.size(), 1000 * 50 * 5);
int counters[] = {0, 0, 0, 0, 0, 0};
for (const auto r : output) {
ASSERT_GE(r, 0);
ASSERT_LE(r, 5);
++counters[r];
}
for (int i = 1; i < 6; ++i) {
EXPECT_LT(std::abs(counters[i] - counters[0]), 1000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/random_uniform_custom.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/random_uniform_custom_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fb10c7dd-f1a5-419f-af6d-78be1ca2f1ea | cpp | tensorflow/tensorflow | fully_connected | tensorflow/lite/delegates/gpu/gl/kernels/fully_connected.cc | tensorflow/lite/delegates/xnnpack/fully_connected_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/fully_connected.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class FullyConnectedBuffers : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr =
std::any_cast<const FullyConnectedAttributes&>(ctx.op_attr);
const int src_depth = DivideRoundUp(attr.weights.shape.i, 4);
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
constexpr int kWorkgroupHintX = 4;
constexpr int kWorkgroupHintY = 4;
std::vector<Variable> parameters = {
{"src_depth", src_depth},
{"dst_depth", dst_depth},
};
std::vector<std::pair<std::string, Object>> objects = {
{"weights", MakeReadonlyObject(ConvertToPHWO4I4(attr.weights))}};
std::string source = R"(
const int threads = int(gl_WorkGroupSize.y);
const int workers = int(gl_WorkGroupSize.x);
ivec3 tid = ivec3(gl_LocalInvocationID);
if (gid.x < $dst_depth$) {
int offset = 4 * gid.x * $src_depth$ + 4 * tid.y;
for (int d = tid.y; d < $src_depth$; d += threads, offset += 4 * threads) {
vec4 src = $input_data_0[0, 0, d]$;
value_0.x += dot(src, $weights[offset + 0]$);
value_0.y += dot(src, $weights[offset + 1]$);
value_0.z += dot(src, $weights[offset + 2]$);
value_0.w += dot(src, $weights[offset + 3]$);
}
sh_mem[workers * tid.y + tid.x] = value_0;
}
memoryBarrierShared();
barrier();
if (tid.y > 0 || gid.x >= $dst_depth$) {
return;
}
for (int t = 1; t < threads; t++) {
value_0 += sh_mem[workers * t + tid.x];
}
)";
if (!attr.bias.data.empty()) {
source += " value_0 += $bias[gid.x]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
source += " $output_data_0[0, 0, gid.x] = value_0$;";
std::vector<Variable> shared_variables = {
#ifdef __APPLE__
{"sh_mem", std::vector<float4>(32)},
#else
{"sh_mem", std::vector<float4>(0)},
#endif
};
*generated_code = {
std::move(parameters),
std::move(objects),
std::move(shared_variables),
uint3(dst_depth, kWorkgroupHintY, 1),
uint3(kWorkgroupHintX, kWorkgroupHintY, 1),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::ONLY_DEFINITIONS,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewFullyConnectedNodeShader() {
return std::make_unique<FullyConnectedBuffers>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/fully_connected_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(FullyConnected, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, 1DKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.KeepDims(true)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, 2DKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.KeepDims(true)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, width, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, 3DReshape) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, width, input_channels})
.InputChannels(width * input_channels)
.OutputChannels(output_channels)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, 3DKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, width, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.KeepDims(true)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, height, width, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, 4DKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, height, width, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.KeepDims(true)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, NoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.FP16Weights()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, FP16WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.FP16Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, DynamicWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.DynamicWeights()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, DynamicWeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.DynamicWeights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, DynamicBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.DynamicBias()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, DynamicWeightsAndBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.DynamicWeights()
.DynamicBias()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, TensorWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.TensorWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, TensorWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.TensorWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, ChannelWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.ChannelWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, ChannelWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.ChannelWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, ReluActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.ReluActivation()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, Relu6Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.Relu6Activation()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, ReluMinus1To1Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.ReluMinus1To1Activation()
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.Test(xnnpack_delegate.get());
}
TEST(FullyConnected, WeightsCache) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
std::unique_ptr<TfLiteXNNPackDelegateWeightsCache,
decltype(&TfLiteXNNPackDelegateWeightsCacheDelete)>
weights_cache(TfLiteXNNPackDelegateWeightsCacheCreate(),
TfLiteXNNPackDelegateWeightsCacheDelete);
delegate_options.weights_cache = weights_cache.get();
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
auto channels_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 9), std::ref(rng));
const auto batch = batch_rng();
const auto input_channels = channels_rng();
const auto output_channels = channels_rng();
FullyConnectedTester()
.InputShape({batch, input_channels})
.InputChannels(input_channels)
.OutputChannels(output_channels)
.WeightsCache(weights_cache.get())
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/fully_connected.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/fully_connected_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff2eab9f-255b-41b1-b6ad-6edd45bec668 | cpp | tensorflow/tensorflow | pad | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/pad.cc | tensorflow/lite/delegates/xnnpack/pad_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/pad.h"
#include <cstdint>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/op_util_common.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/pad_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
bool IsPadLegal(mhlo::PadOp op) {
return AnyNegativePads(op) || !TrivialInterior(op);
}
bool IsPadValCstZero(mhlo::PadOp op) {
if (matchPattern(op.getPaddingValue(), m_AnyZeroFloat())) {
return true;
}
if (matchPattern(op.getPaddingValue(), m_Zero())) {
return true;
}
return false;
}
DenseIntElementsAttr BuildTFLPaddingAttr(OpBuilder& b, mhlo::PadOp op) {
auto lows = UnrollI64Splat(op.getEdgePaddingLow());
auto highs = UnrollI64Splat(op.getEdgePaddingHigh());
llvm::SmallVector<int64_t> res;
for (auto [l, h] : llvm::zip(lows, highs)) {
res.push_back(l);
res.push_back(h);
}
const int64_t n_dims = res.size();
auto tfl_padding_type =
RankedTensorType::get({n_dims / 2, 2}, b.getI64Type());
return DenseIntElementsAttr::get(tfl_padding_type, res);
}
class LegalizePad : public OpConversionPattern<mhlo::PadOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::PadOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizePad::matchAndRewrite(
mhlo::PadOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
if (IsPadLegal(op)) {
return rewriter.notifyMatchFailure(op, "Matching an already legal pad op.");
}
if (!IsPadValCstZero(op)) {
return rewriter.notifyMatchFailure(
op, "Legalizing to padv1 requires zero const padding values.");
}
auto tfl_paddings = BuildTFLPaddingAttr(rewriter, op);
auto paddings_op =
rewriter.create<arith::ConstantOp>(op->getLoc(), tfl_paddings);
rewriter.replaceOpWithNewOp<TFL::PadOp>(op, op.getType(), op.getOperand(),
paddings_op);
return success();
}
class LegalizePadV2 : public OpConversionPattern<mhlo::PadOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::PadOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizePadV2::matchAndRewrite(
mhlo::PadOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
if (IsPadLegal(op)) {
return rewriter.notifyMatchFailure(op, "Matching an already legal pad op.");
}
if (IsPadValCstZero(op)) {
return rewriter.notifyMatchFailure(
op, "Legalizing to padv2 requires non zero const padding values.");
}
auto tfl_paddings = BuildTFLPaddingAttr(rewriter, op);
auto paddings_op =
rewriter.create<arith::ConstantOp>(op->getLoc(), tfl_paddings);
rewriter.replaceOpWithNewOp<TFL::PadV2Op>(op, op.getType(), op.getOperand(),
paddings_op, op.getPaddingValue());
return success();
}
}
void PopulatePadPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizePad>(ctx);
patterns.add<LegalizePadV2>(ctx);
target.addDynamicallyLegalOp<mhlo::PadOp>(IsPadLegal);
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/pad_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Pad, Full4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({pad_rng(), pad_rng(), pad_rng(), pad_rng()})
.InputPostPaddings({pad_rng(), pad_rng(), pad_rng(), pad_rng()})
.InputShape({shape_rng(), shape_rng(), shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, Batch4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({pad_rng(), 0, 0, 0})
.InputPostPaddings({pad_rng(), 0, 0, 0})
.InputShape({shape_rng(), shape_rng(), shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, HeightAndWidth4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({0, pad_rng(), pad_rng(), 0})
.InputPostPaddings({0, pad_rng(), pad_rng(), 0})
.InputShape({shape_rng(), shape_rng(), shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, Channels4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({0, 0, 0, pad_rng()})
.InputPostPaddings({0, 0, 0, pad_rng()})
.InputShape({shape_rng(), shape_rng(), shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, Full3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({pad_rng(), pad_rng(), pad_rng()})
.InputPostPaddings({pad_rng(), pad_rng(), pad_rng()})
.InputShape({shape_rng(), shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, Batch3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({pad_rng(), 0, 0})
.InputPostPaddings({pad_rng(), 0, 0})
.InputShape({shape_rng(), shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, Width3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({0, pad_rng(), 0})
.InputPostPaddings({0, pad_rng(), 0})
.InputShape({shape_rng(), shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, Channels3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({0, 0, pad_rng()})
.InputPostPaddings({0, 0, pad_rng()})
.InputShape({shape_rng(), shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, Full2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({pad_rng(), pad_rng()})
.InputPostPaddings({pad_rng(), pad_rng()})
.InputShape({shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, Batch2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({pad_rng(), 0})
.InputPostPaddings({pad_rng(), 0})
.InputShape({shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, Channels2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({0, pad_rng()})
.InputPostPaddings({0, pad_rng()})
.InputShape({shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({pad_rng(), pad_rng()})
.InputPostPaddings({pad_rng(), pad_rng()})
.InputShape({shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
TEST(Pad, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto pad_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 3), std::ref(rng));
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
PadTester()
.InputPrePaddings({0, 0, 0, pad_rng()})
.InputPostPaddings({0, 0, 0, pad_rng()})
.InputShape({shape_rng(), shape_rng(), shape_rng(), shape_rng()})
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/pad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/pad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4851473d-8a48-4b60-bc60-b61715739037 | cpp | tensorflow/tensorflow | stablehlo_pad | tensorflow/lite/kernels/stablehlo_pad.cc | tensorflow/lite/kernels/stablehlo_pad_test.cc | #include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <numeric>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_pad {
namespace {
static constexpr int kMaxDims = TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT;
void FillBuffer(char* buffer, int64_t buffer_bytes, const char* data,
int64_t data_bytes) {
if (buffer_bytes == 0) {
return;
}
assert(buffer_bytes % data_bytes == 0);
std::memcpy(buffer, data, data_bytes);
buffer_bytes -= data_bytes;
while (buffer_bytes) {
const int64_t bytes = std::min(buffer_bytes, data_bytes);
std::memcpy(buffer + data_bytes, buffer, bytes);
buffer_bytes -= bytes;
data_bytes += bytes;
}
}
void StridedCopy(const int rank, const char* input, const int64_t* input_shape,
const int64_t* input_strides, char* output,
const int64_t* output_strides, const int64_t element_size,
const int depth) {
if (depth + 1 == rank) {
for (int64_t i = 0; i < input_shape[depth]; ++i) {
std::memcpy(output, input, element_size);
input += input_strides[depth];
output += output_strides[depth];
}
} else {
for (int64_t i = 0; i < input_shape[depth]; ++i) {
StridedCopy(rank, input, input_shape, input_strides, output,
output_strides, element_size, depth + 1);
input += input_strides[depth];
output += output_strides[depth];
}
}
}
class PadData {
public:
enum { kInput, kPaddingValue, kInputTensorCount };
enum { kOutput, kOutputTensorCount };
explicit PadData(const TfLiteStablehloPadParams& params) {
std::memcpy(
edge_pad_low_, params.edge_padding_low,
TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT * sizeof(int64_t));
std::memcpy(
edge_pad_high_, params.edge_padding_high,
TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT * sizeof(int64_t));
std::memcpy(
interior_pad_, params.interior_padding,
TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT * sizeof(int64_t));
}
void Setup(const int* dims, const int rank, const int64_t element_size) {
rank_ = rank;
element_size_ = element_size;
input_offset_ = 0;
output_offset_ = 0;
output_size_ = 0;
for (int i = 0; i < rank; ++i) {
output_shape_[i] = (dims[i] - 1) * (interior_pad_[i] + 1) + 1 +
edge_pad_low_[i] + edge_pad_high_[i];
}
if (std::any_of(output_shape_, output_shape_ + rank,
[](auto s) { return s <= 0; })) {
std::memset(input_shape_, 0, sizeof(input_shape_));
std::memset(output_shape_, 0, sizeof(output_shape_));
output_size_ = 0;
return;
}
output_dimension_sizes_[rank - 1] = element_size;
for (int i = rank - 2; i >= 0; --i) {
output_dimension_sizes_[i] =
output_shape_[i + 1] * output_dimension_sizes_[i + 1];
}
output_strides_[rank - 1] = element_size * (interior_pad_[rank - 1] + 1);
for (int i = rank - 2; i >= 0; --i) {
output_strides_[i] = output_dimension_sizes_[i] * (interior_pad_[i] + 1);
}
for (int i = 0; i < rank; ++i) {
output_offset_ +=
std::max<int64_t>(edge_pad_low_[i], 0) * output_dimension_sizes_[i];
}
output_size_ = std::accumulate(output_shape_, output_shape_ + rank,
element_size, std::multiplies<>());
input_strides_[rank - 1] = element_size;
for (int i = rank - 1; i >= 1; --i) {
input_strides_[i - 1] = dims[i] * input_strides_[i];
}
auto DivNegRoundAwayOrZero = [](int64_t num, int64_t denum) -> int64_t {
assert(denum > 0);
return num < 0 ? (num - denum + 1) / denum : 0;
};
for (int i = 0; i < rank; ++i) {
input_shape_[i] =
dims[i] +
DivNegRoundAwayOrZero(edge_pad_low_[i], interior_pad_[i] + 1) +
DivNegRoundAwayOrZero(edge_pad_high_[i], interior_pad_[i] + 1);
}
for (int i = 0; i < rank; ++i) {
input_offset_ -=
DivNegRoundAwayOrZero(edge_pad_low_[i], interior_pad_[i] + 1) *
input_strides_[i];
if (edge_pad_low_[i] < 0) {
int64_t tmp_offset = ((interior_pad_[i] + 1 + edge_pad_low_[i]) %
(interior_pad_[i] + 1));
if (tmp_offset < 0) {
tmp_offset += interior_pad_[i] + 1;
}
output_offset_ += tmp_offset * output_dimension_sizes_[i];
}
}
}
void Apply(const char* input, const char* padding_value, char* output) const {
FillBuffer(output, output_size_, padding_value, element_size_);
StridedCopy(rank_, input + input_offset_, input_shape_, input_strides_,
output + output_offset_, output_strides_, element_size_,
0);
}
TfLiteIntArray* BuildOuputTensorDims() const {
TfLiteIntArray* dims = TfLiteIntArrayCreate(rank_);
for (int64_t i = 0; i < rank_; ++i) {
dims->data[i] = output_shape_[i];
}
return dims;
}
private:
int64_t edge_pad_low_[kMaxDims];
int64_t edge_pad_high_[kMaxDims];
int64_t interior_pad_[kMaxDims];
int64_t rank_ = 0;
int64_t element_size_ = 0;
int64_t input_shape_[kMaxDims];
int64_t output_shape_[kMaxDims];
int64_t input_strides_[kMaxDims];
int64_t output_strides_[kMaxDims];
int64_t output_dimension_sizes_[kMaxDims];
int64_t input_offset_ = 0;
int64_t output_offset_ = 0;
int64_t output_size_ = 0;
};
void* Init(TfLiteContext* context, const char* options, size_t options_len) {
return new PadData(
*reinterpret_cast<const TfLiteStablehloPadParams*>(options));
}
void Free(TfLiteContext* context, void* node_data) {
delete reinterpret_cast<PadData*>(node_data);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_tensor = GetInput(context, node, PadData::kInput);
const TfLiteTensor* padding_value_tensor =
GetInput(context, node, PadData::kPaddingValue);
TF_LITE_ENSURE(context, input_tensor->type == padding_value_tensor->type);
size_t element_size;
TF_LITE_ENSURE(context, GetSizeOfType(context, input_tensor->type,
&element_size) == kTfLiteOk);
PadData& pad_data = *reinterpret_cast<PadData*>(node->user_data);
pad_data.Setup(input_tensor->dims->data, input_tensor->dims->size,
element_size);
TfLiteTensor* output_tensor = GetOutput(context, node, PadData::kOutput);
TF_LITE_ENSURE(context, input_tensor->type == output_tensor->type);
context->ResizeTensor(context, output_tensor,
pad_data.BuildOuputTensorDims());
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_tensor = GetInput(context, node, PadData::kInput);
const TfLiteTensor* padding_value_tensor =
GetInput(context, node, PadData::kPaddingValue);
TfLiteTensor* output_tensor = GetOutput(context, node, PadData::kOutput);
PadData& pad_data = *reinterpret_cast<PadData*>(node->user_data);
pad_data.Apply(input_tensor->data.raw_const,
padding_value_tensor->data.raw_const, output_tensor->data.raw);
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_STABLEHLO_PAD() {
static TfLiteRegistration r = {stablehlo_pad::Init,
stablehlo_pad::Free,
stablehlo_pad::Prepare,
stablehlo_pad::Eval};
return &r;
}
}
}
} | #include <cstddef>
#include <cstdint>
#include <functional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_pad {
namespace {
using testing::ElementsAre;
using testing::ElementsAreArray;
using testing::HasSubstr;
template <class T>
class StablehloPadModel : public SingleOpModel {
public:
static constexpr TensorType kTensorType = GetTensorType<T>();
void SetEdgePadding(std::vector<int64_t> low, std::vector<int64_t> high) {
edge_padding_low_ = std::move(low);
edge_padding_high_ = std::move(high);
}
const std::vector<int64_t>& GetEdgePaddingLow() const {
return edge_padding_low_;
}
const std::vector<int64_t>& GetEdgePaddingHigh() const {
return edge_padding_high_;
}
void SetInteriorPadding(std::vector<int64_t> padding) {
interior_padding_ = std::move(padding);
}
const std::vector<int64_t>& GetInteriorPadding() const {
return interior_padding_;
}
void SetInput(std::vector<int64_t> shape) {
input_.shape = shape;
input_.data.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_iota(input_.data, static_cast<T>(1));
}
void SetInput(std::vector<int64_t> shape, std::vector<T> data) {
input_.shape = shape;
input_.data = data;
}
void SetInput(absl::Span<const int64_t> shape, absl::BitGenRef bitgen, T min,
T max) {
input_.shape.assign(shape.begin(), shape.end());
input_.data.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_generate(input_.data, [&] {
return absl::Uniform(absl::IntervalClosed, bitgen, min, max);
});
}
const reduce_window::reference::Tensor<T>& GetInput() const { return input_; }
void SetPaddingValue(const T& v) { padding_value_ = v; }
T GetPaddingValue() const { return padding_value_; }
absl::Span<const T> GetOutputData() {
return absl::Span<const T>(interpreter_->typed_tensor<T>(output_tensor_id_),
GetTensorSize(output_tensor_id_));
}
absl::Span<const int> GetOutputShape() {
const TfLiteIntArray& shape =
*(interpreter_->tensor(output_tensor_id_)->dims);
return absl::Span<const int>(shape.data, shape.size);
}
absl::Status CheckPreconditions() {
const size_t rank = input_.shape.size();
if (rank == 0) {
return absl::FailedPreconditionError("Input rank is 0.");
}
if (edge_padding_low_.empty()) {
edge_padding_low_ = std::vector<int64_t>(rank, 0);
} else if (edge_padding_low_.size() != rank) {
return absl::FailedPreconditionError(
"Low edge padding does not have the right size.");
}
if (edge_padding_high_.empty()) {
edge_padding_high_ = std::vector<int64_t>(rank, 0);
} else if (edge_padding_high_.size() != rank) {
return absl::FailedPreconditionError(
"High edge padding does not have the right size.");
}
if (interior_padding_.empty()) {
interior_padding_ = std::vector<int64_t>(rank, 0);
} else if (interior_padding_.size() != rank) {
return absl::FailedPreconditionError(
"Interior padding does not have the right size.");
}
return absl::OkStatus();
}
absl::Status Build() {
if (absl::Status status = CheckPreconditions(); !status.ok()) {
return status;
}
input_tensor_id_ =
AddInput({kTensorType,
std::vector<int>(input_.shape.begin(), input_.shape.end())});
padding_value_tensor_id_ =
AddConstInput(kTensorType, {padding_value_}, {1});
output_tensor_id_ = AddOutput(kTensorType);
SetBuiltinOp(BuiltinOperator_STABLEHLO_PAD,
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_, builder_.CreateVector(edge_padding_low_),
builder_.CreateVector(edge_padding_high_),
builder_.CreateVector(interior_padding_))
.Union());
BuildInterpreter(
{std::vector<int>(input_.shape.begin(),
input_.shape.end())},
-1, false,
true, false,
false);
AllocateAndDelegate(true);
PopulateTensor(input_tensor_id_, input_.data);
return absl::OkStatus();
}
absl::Status BuildAndInvoke() {
if (absl::Status status = Build(); !status.ok()) {
return status;
}
if (TfLiteStatus status = Invoke(); status != kTfLiteOk) {
const std::string msg =
absl::StrFormat("Invoke failed with status %d.", status);
return absl::InternalError(msg);
}
return absl::OkStatus();
}
friend std::ostream& operator<<(std::ostream& os,
const StablehloPadModel& model) {
auto print_vec = [&os](const auto& vec) {
os << "[";
if (!vec.empty()) {
auto it = vec.begin();
os << +*(it++);
for (; it != vec.end(); ++it) {
os << ", " << +*it;
}
}
os << "]";
};
os << " edge_padding_low: ";
print_vec(model.GetEdgePaddingLow());
os << "\n edge_padding_high: ";
print_vec(model.GetEdgePaddingHigh());
os << "\n interior_padding: ";
print_vec(model.GetInteriorPadding());
os << "\n padding_value: " << +model.GetPaddingValue();
os << "\n input shape: ";
print_vec(model.GetInput().shape);
return os;
}
private:
std::vector<int64_t> edge_padding_low_;
std::vector<int64_t> edge_padding_high_;
std::vector<int64_t> interior_padding_;
reduce_window::reference::Tensor<T> input_;
T padding_value_ = 0;
int input_tensor_id_;
int padding_value_tensor_id_;
int output_tensor_id_;
};
template <class T>
absl::StatusOr<reduce_window::reference::Tensor<T>> ComputeReference(
StablehloPadModel<T>& model) {
if (absl::Status status = model.CheckPreconditions(); !status.ok()) {
return status;
}
std::vector<int64_t> dilations, padding;
for (size_t i = 0; i < model.GetInput().shape.size(); ++i) {
padding.push_back(model.GetEdgePaddingLow()[i]);
padding.push_back(model.GetEdgePaddingHigh()[i]);
dilations.push_back(model.GetInteriorPadding()[i] + 1);
}
auto dilated_tensor = reduce_window::reference::Dilate(
model.GetInput(), dilations, model.GetPaddingValue());
auto padded_tensor = reduce_window::reference::Pad(dilated_tensor, padding,
model.GetPaddingValue());
return reduce_window::reference::Crop(padded_tensor, padding);
}
TEST(StablehloPadModelTest, DefaultModelFails) {
StablehloPadModel<int> model;
const auto expected_status = ComputeReference(model);
EXPECT_FALSE(expected_status.ok());
EXPECT_EQ(expected_status.status().code(),
absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(expected_status.status().message(),
HasSubstr("Input rank is 0."));
}
TEST(StablehloPadModelTest, DefaultModelReturnsIdentity) {
StablehloPadModel<int> model;
model.SetInput({3, 1});
EXPECT_THAT(model.GetInput().shape, ElementsAre(3, 1));
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
EXPECT_THAT(expected_status.value().data,
ElementsAreArray(model.GetInput().data));
}
TEST(StablehloPadModelTest, WrongEdgePaddingSizeIsAnError) {
StablehloPadModel<int> model;
model.SetInput({3, 1});
model.SetEdgePadding({3, 4, 5}, {6, 7});
{
const auto expected_status = ComputeReference(model);
EXPECT_FALSE(expected_status.ok());
EXPECT_EQ(expected_status.status().code(),
absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(expected_status.status().message(),
HasSubstr("Low edge padding does not have the right size."));
}
model.SetEdgePadding({3, 4}, {5, 6, 7});
{
const auto expected_status = ComputeReference(model);
EXPECT_FALSE(expected_status.ok());
EXPECT_EQ(expected_status.status().code(),
absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(expected_status.status().message(),
HasSubstr("High edge padding does not have the right size."));
}
}
TEST(StablehloPadModelTest, WrongInteriorPaddingSizeIsAnError) {
StablehloPadModel<int> model;
model.SetInput({3, 1});
model.SetInteriorPadding({3, 4, 5});
const auto expected_status = ComputeReference(model);
EXPECT_FALSE(expected_status.ok());
EXPECT_EQ(expected_status.status().code(),
absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(expected_status.status().message(),
HasSubstr("Interior padding does not have the right size."));
}
TEST(StablehloPadTest, IdentityParams) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(model.GetInput().shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(model.GetInput().data));
}
TEST(StablehloPadTest, InteriorPad) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetInteriorPadding({1, 2});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, LowPad) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({1, 1}, {0, 0});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, HighPad) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({0, 0}, {1, 1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, AllPad) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({1, 1}, {1, 1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, LowCrop) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({-1, -1}, {0, 0});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, HighCrop) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({0, 0}, {-1, -1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, AllCrop) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({-1, -1}, {-1, -1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, PadCrop) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({1, -1}, {1, -1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, InteriorEdgePadding) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({-1, -4}, {0, 0});
model.SetInteriorPadding({1, 2});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, CallPrepareTwiceDoesNotFail) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({-1, -4}, {0, 0});
model.SetInteriorPadding({1, 2});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
model.SetApplyDefaultDelegates();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
template <class T>
std::vector<T> RandomVector(absl::BitGen& bitgen, size_t size, T min, T max) {
std::vector<T> vec(size);
for (T& v : vec) {
v = absl::Uniform(absl::IntervalClosed, bitgen, min, max);
}
return vec;
}
template <class T>
class StablehloPadFuzzyTest : public testing::Test {};
using TestList =
testing::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, float, double>;
TYPED_TEST_SUITE(StablehloPadFuzzyTest, TestList);
TYPED_TEST(StablehloPadFuzzyTest, FuzzyTest) {
absl::BitGen bitgen;
for (size_t iteration = 0; iteration < 10000; ++iteration) {
const int rank = absl::Uniform(absl::IntervalClosed, bitgen, 1, 2);
StablehloPadModel<TypeParam> model;
model.SetInput(
RandomVector<int64_t>(bitgen, rank, 1, 3),
bitgen, -5, 5);
model.SetInteriorPadding(
RandomVector<int64_t>(bitgen, rank, 0, 2));
model.SetEdgePadding(
RandomVector<int64_t>(bitgen, rank, -5, 5),
RandomVector<int64_t>(bitgen, rank, -5, 5));
model.SetPaddingValue(
absl::Uniform(absl::IntervalClosed, bitgen, -127, 127));
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape))
<< model;
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data))
<< model;
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_pad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_pad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f9ac04a7-075e-4cf7-8f9c-30f4b82ef78e | cpp | tensorflow/tensorflow | stablehlo_min_max | tensorflow/lite/kernels/stablehlo_min_max.cc | tensorflow/lite/kernels/stablehlo_min_max_test.cc | #include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_elementwise.h"
namespace tflite::ops::builtin {
TfLiteRegistration* Register_STABLEHLO_MAXIMUM() {
static TfLiteRegistration r = {nullptr, nullptr, ElementwisePrepare,
ElementwiseEval<ComputationType::kMax>};
return &r;
}
TfLiteRegistration* Register_STABLEHLO_MINIMUM() {
static TfLiteRegistration r = {nullptr, nullptr, ElementwisePrepare,
ElementwiseEval<ComputationType::kMin>};
return &r;
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
enum class ModelType { kMax, kMin };
class MinMaxOpModel : public SingleOpModel {
public:
MinMaxOpModel(ModelType model_type, const TensorData& input1,
const TensorData& input2, const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
model_type_ = model_type;
switch (model_type_) {
case ModelType::kMax:
SetBuiltinOp(BuiltinOperator_STABLEHLO_MAXIMUM, BuiltinOptions_NONE, 0);
break;
case ModelType::kMin:
SetBuiltinOp(BuiltinOperator_STABLEHLO_MINIMUM, BuiltinOptions_NONE, 0);
break;
default:
ABSL_LOG(FATAL) << "Unknown model type.";
}
SetBypassDefaultDelegates();
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
ModelType model_type_;
};
TEST(StablehloElementwise, MaxWorks) {
MinMaxOpModel model(ModelType::kMax, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {1.2, 2.5, -1.2, 1});
model.PopulateTensor<float>(model.input2(), {0.1, 3, 2, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(), {1.2, 3.0, 2.0, 1.0}));
}
TEST(StablehloElementwise, MinWorks) {
MinMaxOpModel model(ModelType::kMin, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {1.2, 2.5, -1.2, 1});
model.PopulateTensor<float>(model.input2(), {0.1, 3, 2, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(), {0.1, 2.5, -1.2, 0.5}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_min_max.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_min_max_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e0114625-d98e-428a-bcf2-b720b35e5a92 | cpp | tensorflow/tensorflow | stablehlo_composite | tensorflow/lite/kernels/stablehlo_composite.cc | tensorflow/lite/kernels/stablehlo_composite_test.cc | #include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/control_flow_common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_composite {
struct State {
int32_t subgraph_index;
bool subgraph_has_dynamic_output_tensors = false;
};
void* Init(TfLiteContext* context, const char* options, size_t options_len) {
auto data = std::make_unique<State>();
const TfLiteStablehloCompositeParams* params =
reinterpret_cast<const TfLiteStablehloCompositeParams*>(options);
data->subgraph_index = params->subgraph_index;
return data.release();
}
void Free(TfLiteContext* context, void* node_data) {
delete static_cast<State*>(node_data);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
State* op_state = reinterpret_cast<State*>(node->user_data);
TF_LITE_ENSURE(context, node->inputs->size > 0);
const int num_inputs = node->inputs->size;
const int num_outputs = node->outputs->size;
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
const auto* subgraphs = this_subgraph->GetSubgraphs();
TF_LITE_ENSURE(context, op_state->subgraph_index < subgraphs->size());
Subgraph* decomposition_subgraph =
(*subgraphs)[op_state->subgraph_index].get();
TF_LITE_ENSURE_EQ(context, num_inputs,
decomposition_subgraph->inputs().size());
TF_LITE_ENSURE_EQ(context, num_outputs,
decomposition_subgraph->outputs().size());
decomposition_subgraph->RemoveUnusedInputs();
std::vector<int> node_inputs(node->inputs->data,
node->inputs->data + num_inputs);
TF_LITE_ENSURE_OK(context,
CopyTensorsShapeAndType(context, this_subgraph, node_inputs,
decomposition_subgraph,
decomposition_subgraph->inputs(),
true));
for (int i = 0; i < num_inputs; ++i) {
int input_idx = decomposition_subgraph->inputs()[i];
if (input_idx == kTfLiteOptionalTensor) {
continue;
}
TfLiteTensor* subgraph_input = decomposition_subgraph->tensor(input_idx);
if (!IsResourceOrVariant(subgraph_input)) {
subgraph_input->allocation_type = kTfLiteCustom;
}
}
TF_LITE_ENSURE_OK(context, decomposition_subgraph->AllocateTensors());
op_state->subgraph_has_dynamic_output_tensors |=
decomposition_subgraph->HasDynamicTensors();
for (int i = 0; i < num_outputs; ++i) {
if (node->outputs->data[i] == kTfLiteOptionalTensor) {
continue;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
if (op_state->subgraph_has_dynamic_output_tensors) {
SetTensorToDynamic(output);
} else {
TfLiteTensor* subgraph_output =
decomposition_subgraph->tensor(decomposition_subgraph->outputs()[i]);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(subgraph_output->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size));
}
}
return kTfLiteOk;
}
TfLiteStatus Eval_dynamic(TfLiteContext* context, TfLiteNode* node,
Subgraph* this_subgraph,
Subgraph* decomposition_subgraph) {
TF_LITE_ENSURE_OK(context, decomposition_subgraph->AllocateTensors());
const int num_inputs = node->inputs->size;
const int num_outputs = node->outputs->size;
const int* const start = node->inputs->data;
std::vector<int> node_inputs(start, start + num_inputs);
TF_LITE_ENSURE_OK(
context, DeepOrShallowCopyTensorsShapeTypeData(
context, node, this_subgraph, node_inputs,
decomposition_subgraph, decomposition_subgraph->inputs()));
TF_LITE_ENSURE_OK(context, decomposition_subgraph->Invoke());
for (int tensor_index : decomposition_subgraph->outputs()) {
decomposition_subgraph->EnsureTensorDataIsReadable(tensor_index);
}
TF_LITE_ENSURE_OK(context,
DeepCopyTensorsShapeTypeData(
context, node, decomposition_subgraph,
decomposition_subgraph->outputs(), this_subgraph,
TfLiteIntArrayView(node->outputs), true));
for (int i = 0; i < num_outputs; ++i) {
const int input_pos = OutputIsInput(decomposition_subgraph->outputs()[i],
decomposition_subgraph->inputs());
if (input_pos != -1) {
TfLiteTensor* this_input =
this_subgraph->tensor(node->inputs->data[input_pos]);
TfLiteTensor* this_output = this_subgraph->tensor(node->outputs->data[i]);
TfLiteTensorCopy(this_input, this_output);
}
}
return kTfLiteOk;
}
TfLiteStatus Eval_static(TfLiteContext* context, TfLiteNode* node,
Subgraph* this_subgraph,
Subgraph* decomposition_subgraph) {
const int num_inputs = node->inputs->size;
const int num_outputs = node->outputs->size;
const int* const start = node->inputs->data;
std::vector<int> node_inputs(start, start + num_inputs);
for (int i = 0; i < num_outputs; ++i) {
int output_idx = decomposition_subgraph->outputs()[i];
if (output_idx == kTfLiteOptionalTensor) continue;
TfLiteTensor* subgraph_output = decomposition_subgraph->tensor(output_idx);
if (!IsResourceOrVariant(subgraph_output) &&
!IsConstantTensor(subgraph_output)) {
subgraph_output->allocation_type = kTfLiteCustom;
}
}
TF_LITE_ENSURE_OK(
context, DeepOrShallowCopyTensorsShapeTypeData(
context, node, this_subgraph, node_inputs,
decomposition_subgraph, decomposition_subgraph->inputs()));
TF_LITE_ENSURE_OK(
context,
CopyTensorsShapeAndType(context, decomposition_subgraph,
decomposition_subgraph->outputs(), this_subgraph,
TfLiteIntArrayView(node->outputs), false));
for (int i = 0; i < num_outputs; ++i) {
TfLiteTensor* this_output = this_subgraph->tensor(node->outputs->data[i]);
TfLiteTensor* subgraph_output =
decomposition_subgraph->tensor(decomposition_subgraph->outputs()[i]);
if (decomposition_subgraph->outputs()[i] == kTfLiteOptionalTensor) {
TfLiteTensor* this_input = this_subgraph->tensor(node->inputs->data[i]);
TfLiteTensorResizeMaybeCopy(this_input->bytes, this_output, false);
TfLiteTensorCopy(this_input, this_output);
} else {
const int input_pos = OutputIsInput(decomposition_subgraph->outputs()[i],
decomposition_subgraph->inputs());
if (input_pos != -1) {
TfLiteTensor* this_input =
this_subgraph->tensor(node->inputs->data[input_pos]);
TfLiteTensorResizeMaybeCopy(this_input->bytes, this_output, false);
TfLiteTensorCopy(this_input, this_output);
} else if (IsConstantTensor(subgraph_output)) {
TfLiteTensorCopy(subgraph_output, this_output);
} else {
subgraph_output->data = this_output->data;
}
}
}
TF_LITE_ENSURE_OK(context, decomposition_subgraph->Invoke());
for (int tensor_index : decomposition_subgraph->outputs()) {
decomposition_subgraph->EnsureTensorDataIsReadable(tensor_index);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
State* op_state = reinterpret_cast<State*>(node->user_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto* subgraphs = this_subgraph->GetSubgraphs();
Subgraph* decomposition_subgraph =
(*subgraphs)[op_state->subgraph_index].get();
if (op_state->subgraph_has_dynamic_output_tensors) {
TF_LITE_ENSURE_OK(context, Eval_dynamic(context, node, this_subgraph,
decomposition_subgraph));
} else {
TF_LITE_ENSURE_OK(context, Eval_static(context, node, this_subgraph,
decomposition_subgraph));
}
if (!this_subgraph->ShouldPreserveAllTensors()) {
TF_LITE_ENSURE_OK(context, decomposition_subgraph->ReleaseMemory());
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_STABLEHLO_COMPOSITE() {
static TfLiteRegistration r = {stablehlo_composite::Init,
stablehlo_composite::Free,
stablehlo_composite::Prepare,
stablehlo_composite::Eval};
return &r;
}
}
}
} | #include <cstddef>
#include <memory>
#include <numeric>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
using testing::ElementsAreArray;
using testing::FloatEq;
using testing::Pointwise;
namespace tflite {
namespace {
class CompositeTest : public subgraph_test_util::ControlFlowOpTest {
protected:
template <class IndirectionVector>
TfLiteTensor* GetTensorWithIndirection(int id,
const IndirectionVector& tensor_map) {
return interpreter_->tensor(tensor_map[id]);
}
TfLiteTensor* GetInputTensor(int id) {
return GetTensorWithIndirection(id, interpreter_->inputs());
}
TfLiteTensor* GetOutputTensor(int id) {
return GetTensorWithIndirection(id, interpreter_->outputs());
}
template <class T, class IndirectionVector>
absl::Span<T> GetTensorDataWithIndirection(
int id, const IndirectionVector& tensor_map) {
TfLiteTensor* const tensor = GetTensorWithIndirection(id, tensor_map);
const size_t size = NumElements(tensor);
return absl::Span<T>(GetTensorData<T>(tensor), size);
}
template <class T>
absl::Span<T> GetInputData(int id) {
return GetTensorDataWithIndirection<T>(id, interpreter_->inputs());
}
template <class T>
absl::Span<T> GetOutputData(int id) {
return GetTensorDataWithIndirection<T>(id, interpreter_->outputs());
}
};
TEST_F(CompositeTest, TestInvokeWorks) {
AddSubgraphs(1);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildCompositeSubgraph(&interpreter_->primary_subgraph(),
interpreter_->subgraph(1));
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2, 3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2, 3});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(GetInputTensor(0), {1, 2, 3, 4, 5, 6});
subgraph_test_util::FillIntTensor(GetInputTensor(1), {7, 8, 9, 10, 11, 12});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
const TfLiteTensor* const output = GetOutputTensor(0);
ASSERT_THAT(output, DimsAre({2, 3}));
EXPECT_THAT(GetOutputData<int>(0), ElementsAreArray({8, 10, 12, 14, 16, 18}));
}
TEST_F(CompositeTest, TestXNNPACKDelegation) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(1);
builder_->BuildXNNPACKSubgraph(interpreter_->subgraph(1));
builder_->BuildCompositeSubgraph(&interpreter_->primary_subgraph(),
interpreter_->subgraph(1));
const auto opt = TfLiteXNNPackDelegateOptionsDefault();
TfLiteDelegate* xnnpack_delegate = TfLiteXNNPackDelegateCreate(&opt);
interpreter_->primary_subgraph().MarkAsDelegationSkippable();
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(xnnpack_delegate), kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2, 3}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2, 3}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
absl::Span<float> input0 = GetInputData<float>(0);
std::iota(input0.begin(), input0.end(), 1.0f);
absl::Span<float> input1 = GetInputData<float>(1);
std::iota(input1.begin(), input1.end(), 7.0f);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
const std::vector<float> expected_values = {16, 20, 24, 28, 32, 36};
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
const absl::Span<float> output0_data(GetTensorData<float>(output0), 6);
ASSERT_THAT(output0, DimsAre({2, 3}));
EXPECT_THAT(output0_data, Pointwise(FloatEq(), expected_values));
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
const absl::Span<float> output1_data(GetTensorData<float>(output1), 6);
ASSERT_THAT(output1, DimsAre({2, 3}));
EXPECT_THAT(output1_data, Pointwise(FloatEq(), expected_values));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteXNNPackDelegateDelete(xnnpack_delegate);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_composite.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_composite_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0f7e370-4018-4e23-a4da-4d004db961cd | cpp | tensorflow/tensorflow | mul | tensorflow/lite/delegates/gpu/gl/kernels/mul.cc | tensorflow/lite/delegates/xnnpack/mul_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/mul.h"
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
absl::Status GetCoordinate(const NodeShader::GenerationContext& ctx, int dim,
const std::string& default_coord,
std::string* coord) {
std::string result;
if (ctx.input_shapes[1][dim] == 1 && ctx.input_shapes[0][dim] != 1) {
result = "0";
} else if (ctx.input_shapes[0][dim] == ctx.input_shapes[1][dim]) {
result = default_coord;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Second runtime tensor dimension ", dim,
" must either match "
"first tensor's dimensions or be 1."));
}
*coord = result;
return absl::OkStatus();
}
absl::Status GenerateMultiplyRuntimeTensorCode(
const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) {
std::string x_coord, y_coord, z_coord;
RETURN_IF_ERROR(
GetCoordinate(ctx, 2, "gid.x", &x_coord));
RETURN_IF_ERROR(
GetCoordinate(ctx, 1, "gid.y", &y_coord));
RETURN_IF_ERROR(
GetCoordinate(ctx, 3, "gid.z", &z_coord));
std::string source =
absl::StrCat("vec4 input1_value = $input_data_1[", x_coord, ", ", y_coord,
", ", z_coord, "]$;");
if (ctx.input_shapes[1][3] == 1 && ctx.input_shapes[0][3] != 1) {
absl::StrAppend(
&source,
"\ninput1_value = vec4(input1_value.x, input1_value.x, input1_value.x, "
"input1_value.x);\n");
}
absl::StrAppend(
&source, "value_0 = $input_data_0[gid.x, gid.y, gid.z]$ * input1_value;");
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
absl::Status GenerateMultiplyConstantTensorCode(
const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) {
const auto& attr = std::any_cast<const ElementwiseAttributes&>(ctx.op_attr);
if (std::holds_alternative<float>(attr.param)) {
*generated_code = {
{{"scalar", std::get<float>(attr.param)}},
{},
{},
uint3(),
uint3(),
"value_0 *= $scalar$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(attr.param)) {
*generated_code = {
{},
{{"mul_buffer",
MakeReadonlyObject(
std::get<Tensor<Linear, DataType::FLOAT32>>(attr.param).data)}},
{},
uint3(static_cast<int>(ctx.input_shapes[0][2]),
static_cast<int>(ctx.input_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)),
uint3(),
"value_0 *= $mul_buffer[gid.z]$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (std::holds_alternative<Tensor<HWC, DataType::FLOAT32>>(attr.param)) {
std::string source;
if (ctx.input_shapes[0][1] == 1 && ctx.input_shapes[0][2] == 1 &&
ctx.input_shapes[0][3] == 1) {
source = R"(
value_0 = $input_data_0[0, 0, 0]$;
value_0 = vec4(value_0.x, value_0.x, value_0.x, value_0.x);
)";
}
auto param_shape =
std::get<Tensor<HWC, DataType::FLOAT32>>(attr.param).shape;
if (param_shape.c == 1) {
if (param_shape.h == 1 && param_shape.w == 1) {
absl::StrAppend(&source, "vec4 const_val = $hwc_buffer[0, 0, 0]$;");
} else {
absl::StrAppend(&source,
"vec4 const_val = $hwc_buffer[gid.x, gid.y, 0]$;");
}
absl::StrAppend(&source,
"const_val = vec4(const_val.x, const_val.x, const_val.x, "
"const_val.x);");
} else {
source += "vec4 const_val = $hwc_buffer[gid.x, gid.y, gid.z]$;";
}
absl::StrAppend(&source, "value_0 *= const_val;");
*generated_code = {
{},
{{"hwc_buffer",
MakeReadonlyObject(
uint3(param_shape.w, param_shape.h,
DivideRoundUp(param_shape.c, 4)),
ConvertToPHWC4(
std::get<Tensor<HWC, DataType::FLOAT32>>(attr.param)))}},
{},
uint3(static_cast<int>(ctx.input_shapes[0][2]),
static_cast<int>(ctx.input_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)),
uint3(),
std::move(source),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
return absl::InvalidArgumentError("Unsupported Multiplication case.");
}
class Multiply : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() == 2) {
return GenerateMultiplyRuntimeTensorCode(ctx, generated_code);
} else {
return GenerateMultiplyConstantTensorCode(ctx, generated_code);
}
}
};
}
std::unique_ptr<NodeShader> NewMultiplyNodeShader() {
return std::make_unique<Multiply>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Mul, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, ReluActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluActivation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, Relu6Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Relu6Activation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, ReluMinus1To1Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluMinus1To1Activation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, DISABLED_TanhActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.TanhActivation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, DISABLED_SignBitActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.SignBitActivation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/mul.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/mul_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2091f3ec-d32a-44a7-96a0-c83bf61ad3d7 | cpp | tensorflow/tensorflow | dequantize | tensorflow/lite/toco/graph_transformations/dequantize.cc | tensorflow/lite/kernels/dequantize_test.cc | #include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
template <ArrayDataType A>
void DequantizeBuffer(Array* array) {
const auto old_data = array->GetBuffer<A>().data;
array->buffer = nullptr;
array->data_type = ArrayDataType::kFloat;
auto& new_data = array->GetMutableBuffer<ArrayDataType::kFloat>().data;
new_data.resize(old_data.size());
const auto& qparams = array->GetQuantizationParams();
for (int i = 0, end = old_data.size(); i < end; i++) {
new_data[i] = qparams.scale * (old_data[i] - qparams.zero_point);
}
}
std::vector<std::unique_ptr<Operator>>::iterator FindFirstOpWithInput(
Model* model, const std::string& array_name) {
for (auto it = model->operators.begin(); it != model->operators.end(); ++it) {
for (const auto& input : it->get()->inputs) {
if (input == array_name) {
return it;
}
}
}
return model->operators.end();
}
void ClearArrayQuantizationParams(const std::string& array_name, Model* model) {
auto* array = &model->GetArray(array_name);
CHECK(array->quantization_params);
for (auto& input_array : *model->flags.mutable_input_arrays()) {
if (input_array.name() == array_name) {
auto& qparams = *array->quantization_params;
const double new_std_value = 1. / qparams.scale;
const double new_mean_value = qparams.zero_point;
if (input_array.has_std_value()) {
CHECK_LE(std::abs(new_std_value - input_array.std_value()), 0.001);
} else {
input_array.set_std_value(new_std_value);
}
if (input_array.has_mean_value()) {
CHECK_LE(std::abs(new_mean_value - input_array.mean_value()), 0.001);
} else {
input_array.set_mean_value(new_mean_value);
}
}
}
array->quantization_params = nullptr;
}
bool DequantizeArray(const std::string& array_name,
GraphTransformation* transformation, Model* model) {
auto* array = &model->GetArray(array_name);
if (!array->quantization_params) {
return false;
}
transformation->AddMessageF("Dequantizing array: %s", array_name);
if (array->buffer) {
if (array->data_type == ArrayDataType::kUint8) {
DequantizeBuffer<ArrayDataType::kUint8>(array);
} else if (array->data_type == ArrayDataType::kInt32) {
DequantizeBuffer<ArrayDataType::kInt32>(array);
} else {
LOG(FATAL) << "Unhandled data type";
}
CHECK(array->data_type == ArrayDataType::kFloat);
CHECK(array->buffer->type == ArrayDataType::kFloat);
ClearArrayQuantizationParams(array_name, model);
return true;
} else {
array->data_type = ArrayDataType::kFloat;
}
ClearArrayQuantizationParams(array_name, model);
if (array->buffer) {
return true;
}
auto* op_outputting_array = GetOpWithOutput(*model, array_name);
if (op_outputting_array) {
if (op_outputting_array->type == OperatorType::kReshape) {
return true;
}
}
if (!array->minmax) {
return true;
}
bool must_insert_fakequant_before = false;
bool must_insert_fakequant_after = false;
if (IsInputArray(*model, array_name)) {
must_insert_fakequant_after = true;
}
for (const std::string& output_array : model->flags.output_arrays()) {
if (array_name == output_array) {
must_insert_fakequant_before = true;
}
}
for (const auto& rnn_state : model->flags.rnn_states()) {
if (array_name == rnn_state.state_array()) {
must_insert_fakequant_after = true;
}
if (array_name == rnn_state.back_edge_source_array()) {
must_insert_fakequant_before = true;
}
}
CHECK(!(must_insert_fakequant_before && must_insert_fakequant_after));
auto* fakequant_op = new FakeQuantOperator;
model->operators.emplace(FindFirstOpWithInput(model, array_name),
fakequant_op);
const std::string& new_array_name = AvailableArrayName(*model, array_name);
auto& new_array = model->GetOrCreateArray(new_array_name);
new_array.data_type = ArrayDataType::kFloat;
new_array.copy_shape(array->shape());
new_array.GetOrCreateMinMax() = array->GetMinMax();
fakequant_op->minmax = std::make_unique<MinMax>();
*fakequant_op->minmax = array->GetMinMax();
fakequant_op->narrow_range = array->narrow_range;
if (must_insert_fakequant_before) {
for (const auto& op : model->operators) {
for (std::string& output : op->outputs) {
if (output == array_name) {
output = new_array_name;
}
}
}
fakequant_op->inputs = {new_array_name};
fakequant_op->outputs = {array_name};
} else {
for (const auto& op : model->operators) {
for (std::string& input : op->inputs) {
if (input == array_name) {
input = new_array_name;
}
}
}
fakequant_op->inputs = {array_name};
fakequant_op->outputs = {new_array_name};
}
return true;
}
}
::tensorflow::Status Dequantize::Run(Model* model, std::size_t op_index,
bool* modified) {
*modified = false;
const auto op_it = model->operators.begin() + op_index;
auto* op = op_it->get();
if (op->type == OperatorType::kDequantize) {
auto& input_array = model->GetArray(op->inputs[0]);
if (input_array.data_type == ArrayDataType::kFloat) {
return absl::OkStatus();
}
if (input_array.final_data_type != ArrayDataType::kFloat) {
return absl::OkStatus();
}
input_array.data_type = ArrayDataType::kFloat;
input_array.quantization_params = nullptr;
auto& output_array = model->GetArray(op->outputs[0]);
output_array.data_type = ArrayDataType::kFloat;
output_array.quantization_params = nullptr;
*modified = RemoveTrivialPassthroughOp(this, model, op_index);
return absl::OkStatus();
}
std::vector<std::string> arrays;
arrays.reserve(op->inputs.size());
for (const std::string& input : op->inputs) {
arrays.push_back(input);
}
for (const std::string& output : op->outputs) {
arrays.push_back(output);
}
bool changed = false;
for (const std::string& array : arrays) {
if (!model->IsOptionalArray(array)) {
changed |= DequantizeArray(array, this, model);
}
}
*modified = changed;
return absl::OkStatus();
}
} | #include <cstdint>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "Eigen/Core"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_DEQUANTIZE();
}
}
namespace {
using ::testing::ElementsAreArray;
class DequantizeOpModel : public SingleOpModel {
public:
explicit DequantizeOpModel() {}
DequantizeOpModel(TensorType type, std::initializer_list<int> shape,
float scale, int32_t zero_point, int version) {
const TensorData input_tensor_data = {type, shape, 0, 0, scale, zero_point};
input_ = AddInput(input_tensor_data);
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEQUANTIZE, ops::builtin::Register_DEQUANTIZE(),
version);
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
template <typename T>
void SetInputInt4(int input, const std::vector<T> data) {
auto non_const = *const_cast<std::vector<T>*>(&data);
std::vector<int8_t> data_int8(non_const.size());
std::copy(non_const.begin(), non_const.end(), data_int8.begin());
PopulateTensor4bit(input, 0, data_int8.data(),
data_int8.data() + data_int8.size());
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int output_;
};
TEST(DequantizeOpTest, Int4) {
DequantizeOpModel m(TensorType_INT4, {2, 2}, 0.5, -1, 6);
m.SetInputInt4<int8_t>(0, {7, 6, -7, -8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({4, 3.5, -3, -3.5})));
}
TEST(DequantizeOpTest, Uint8) {
DequantizeOpModel m(TensorType_UINT8, {2, 5}, 0.5, 127, 1);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizeOpTest, Int8) {
DequantizeOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizeOpTest, Float16) {
DequantizeOpModel m(TensorType_FLOAT16, {2, 3}, 1.0f, 0, 3);
std::vector<Eigen::half> half{Eigen::half{-535.54f}, Eigen::half{-100.0f},
Eigen::half{-1.0f}, Eigen::half{0.f},
Eigen::half{1.0f}, Eigen::half{100.32f}};
m.PopulateTensor(0, 0, reinterpret_cast<TfLiteFloat16*>(half.data()),
reinterpret_cast<TfLiteFloat16*>(half.data()) + half.size());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{-535.54f, -100.0f, -1.0f, 0.f, 1.0f, 100.32f},
0.1f)));
}
TEST(DequantizeOpTest, Int16) {
DequantizeOpModel m(TensorType_INT16, {2, 5}, 0.5, 0, 4);
m.SetInput<int16_t>({-129, -126, -125, -124, -123, 124, 125, 126, 127, 131});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-64.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 65.5})));
}
class DequantizePerChannelOpModel : public DequantizeOpModel {
public:
DequantizePerChannelOpModel(TensorType type, std::initializer_list<int> shape,
std::initializer_list<float> scales,
std::initializer_list<int64_t> zero_points,
int channel_dim, int version) {
std::vector<float> per_channel_scales(scales);
std::vector<int64_t> input_offsets(zero_points);
const TensorData input_tensor_data = {
type, shape, 0, 0, 0.0f, 0, true, per_channel_scales,
input_offsets, channel_dim};
input_ = AddInput(input_tensor_data);
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEQUANTIZE, ops::builtin::Register_DEQUANTIZE(),
version);
BuildInterpreter({GetShape(input_)});
}
};
TEST(DequantizePerChannelOpTest, Uint8) {
DequantizePerChannelOpModel m(TensorType_UINT8, {2, 5}, {0.5, 0.5},
{127, 127}, 0, 5);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizePerChannelOpTest, Int8) {
DequantizePerChannelOpModel m(TensorType_INT8, {2, 5}, {0.5, 0.5}, {-1, -1},
0, 5);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/dequantize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dequantize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
586bc274-4a10-4d17-bc26-e31c7ec062a0 | cpp | tensorflow/tensorflow | resize_nearest_neighbor | tensorflow/lite/kernels/resize_nearest_neighbor.cc | tensorflow/lite/kernels/internal/resize_nearest_neighbor_test.cc | #include "tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h"
#include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace resize_nearest_neighbor {
enum KernelType {
kReference,
kGenericOptimized,
kNeonOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kSizeTensor = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* size,
TfLiteTensor* output) {
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
const int32* size_data = GetTensorData<int32>(size);
output_size->data[1] = size_data[0];
output_size->data[2] = size_data[1];
output_size->data[3] = input->dims->data[3];
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_TYPES_EQ(context, size->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, size->dims->data[0], 2);
output->type = input->type;
if (!IsConstantOrPersistentTensor(size)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, input, size, output);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteResizeNearestNeighborParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor(context, input, size, output));
}
tflite::ResizeNearestNeighborParams op_params;
op_params.align_corners = params->align_corners;
op_params.half_pixel_centers = params->half_pixel_centers;
if (output->type == kTfLiteFloat32) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<int32>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<int32>(output));
} else if (output->type == kTfLiteUInt8) {
if (kernel_type == kReference) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) {
optimized_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
} else if (output->type == kTfLiteInt8) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else if (output->type == kTfLiteInt16) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<int16_t>(output));
} else {
TF_LITE_KERNEL_LOG(
context, "Output type is %s, requires float, uint8, int8 or int16.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_nearest_neighbor::Prepare,
resize_nearest_neighbor::Eval<resize_nearest_neighbor::kReference>};
return &r;
}
TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_nearest_neighbor::Prepare,
resize_nearest_neighbor::Eval<
resize_nearest_neighbor::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR_NEON_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_nearest_neighbor::Prepare,
resize_nearest_neighbor::Eval<resize_nearest_neighbor::kNeonOptimized>};
return &r;
}
TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR() {
#ifdef USE_NEON
return Register_RESIZE_NEAREST_NEIGHBOR_NEON_OPT();
#else
return Register_RESIZE_NEAREST_NEIGHBOR_GENERIC_OPT();
#endif
}
}
}
} | #include <algorithm>
#include <cmath>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/test_util.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace {
template <typename T>
void TestReferenceResizeNearestNeighbor(
const RuntimeShape& input_shape, const std::vector<T>& input_data,
const std::vector<int32_t>& output_size_data,
const RuntimeShape& output_shape,
const std::vector<T>& expected_output_data, bool align_corners = false,
bool half_pixel_centers = false) {
ResizeNearestNeighborParams op_params{align_corners, half_pixel_centers};
RuntimeShape output_size_shape({1, 1, 1, 2});
std::vector<T> output_data(expected_output_data.size());
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(expected_output_data, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To1x1) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {1, 1};
RuntimeShape output_shape = {1, 1, 1, 1};
std::vector<float> output_data = {1};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To1x1_AlignCorners) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {1, 1};
RuntimeShape output_shape = {1, 1, 1, 1};
std::vector<float> output_data = {1};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data,
true);
}
TEST(ResizeNearestNeighborReference, Test2x2To1x1_HalfPixelCenters) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {1, 1};
RuntimeShape output_shape = {1, 1, 1, 1};
std::vector<float> output_data = {4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test2x2To3x3) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8_t> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8_t> output_data = {1, 1, 2, 1, 1, 2, 3, 3, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To3x3Int16) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<int16_t> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<int16_t> output_data = {1, 1, 2, 1, 1, 2, 3, 3, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To3x3_AlignCorners) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8_t> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8_t> output_data = {1, 2, 2, 3, 4, 4, 3, 4, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data,
true);
}
TEST(ResizeNearestNeighborReference, Test2x2To3x3_HalfPixelCenters) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8_t> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8_t> output_data = {1, 2, 2, 3, 4, 4, 3, 4, 4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test3x3To2x2) {
RuntimeShape input_shape = {1, 3, 3, 1};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<int32_t> output_size_data = {2, 2};
RuntimeShape output_shape = {1, 2, 2, 1};
std::vector<float> output_data = {1, 2, 4, 5};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test3x3To2x2_AlignCorners) {
RuntimeShape input_shape = {1, 3, 3, 1};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<int32_t> output_size_data = {2, 2};
RuntimeShape output_shape = {1, 2, 2, 1};
std::vector<float> output_data = {1, 3, 7, 9};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data,
true);
}
TEST(ResizeNearestNeighborReference, Test3x3To2x2_HalfPixelCenters) {
RuntimeShape input_shape = {1, 3, 3, 1};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<int32_t> output_size_data = {2, 2};
RuntimeShape output_shape = {1, 2, 2, 1};
std::vector<float> output_data = {1, 3, 7, 9};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test2x2To2x5) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8_t> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {2, 5};
RuntimeShape output_shape = {1, 2, 5, 1};
std::vector<uint8_t> output_data = {1, 1, 1, 2, 2, 3, 3, 3, 4, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To2x5_HalfPixelCenters) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8_t> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {2, 5};
RuntimeShape output_shape = {1, 2, 5, 1};
std::vector<uint8_t> output_data = {1, 1, 2, 2, 2, 3, 3, 4, 4, 4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test4x4To3x3) {
RuntimeShape input_shape = {1, 4, 4, 1};
std::vector<uint8_t> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8_t> output_data = {1, 2, 3, 5, 6, 7, 9, 10, 11};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test4x4To3x3_AlignCorners) {
RuntimeShape input_shape = {1, 4, 4, 1};
std::vector<uint8_t> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8_t> output_data = {1, 3, 4, 9, 11, 12, 13, 15, 16};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data,
true);
}
TEST(ResizeNearestNeighborReference, Test4x4To3x3_HalfPixelCenters) {
RuntimeShape input_shape = {1, 4, 4, 1};
std::vector<uint8_t> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8_t> output_data = {1, 3, 4, 9, 11, 12, 13, 15, 16};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test2x2To5x2) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {5, 2};
RuntimeShape output_shape = {1, 5, 2, 1};
std::vector<float> output_data = {1, 2, 1, 2, 1, 2, 3, 4, 3, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To5x2_HalfPixelCenters) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {5, 2};
RuntimeShape output_shape = {1, 5, 2, 1};
std::vector<float> output_data = {1, 2, 1, 2, 3, 4, 3, 4, 3, 4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference,
Test2x2To5x2_HalfPixelCenters_AlignCorners) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {5, 2};
RuntimeShape output_shape = {1, 5, 2, 1};
std::vector<float> output_data = {2, 2, 2, 2, 4, 4, 4, 4, 4, 4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
true, true);
}
TEST(ResizeNearestNeighborReference, Test2x2To4x4) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8_t> input_data = {1, 2, 3, 4};
std::vector<int32_t> output_size_data = {4, 4};
RuntimeShape output_shape = {1, 4, 4, 1};
std::vector<uint8_t> output_data = {1, 1, 2, 2, 1, 1, 2, 2,
3, 3, 4, 4, 3, 3, 4, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2x2x2To2x3x3x2) {
RuntimeShape input_shape = {2, 2, 2, 2};
std::vector<float> input_data = {1, 1, 2, 2, 3, 3, 4, 4,
5, 5, 6, 6, 7, 7, 8, 8};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {2, 3, 3, 2};
std::vector<float> output_data = {1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2,
3, 3, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6,
5, 5, 5, 5, 6, 6, 7, 7, 7, 7, 8, 8};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2x2x2To2x3x3x2_AlignCorners) {
RuntimeShape input_shape = {2, 2, 2, 2};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {2, 3, 3, 2};
std::vector<float> output_data = {
1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 5, 6, 7, 8, 7, 8,
1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 5, 6, 7, 8, 7, 8,
};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
true, false);
}
TEST(ResizeNearestNeighborReference, Test2x2x2x2To2x3x3x2_HalfPixelCenters) {
RuntimeShape input_shape = {2, 2, 2, 2};
std::vector<float> input_data = {1, 1, 2, 2, 3, 3, 4, 4,
5, 5, 6, 6, 7, 7, 8, 8};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {2, 3, 3, 2};
std::vector<float> output_data = {1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 4,
3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6,
7, 7, 8, 8, 8, 8, 7, 7, 8, 8, 8, 8};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference,
Test2x2x2x2To2x3x3x2_HalfPixelCenters_AlignCorners) {
RuntimeShape input_shape = {2, 2, 2, 2};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int32_t> output_size_data = {3, 3};
RuntimeShape output_shape = {2, 3, 3, 2};
std::vector<float> output_data = {1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8,
5, 6, 7, 8, 7, 8, 1, 2, 3, 4, 3, 4,
5, 6, 7, 8, 7, 8, 5, 6, 7, 8, 7, 8};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
true, true);
}
void TestOptimizedResizeNearestNeighbor(int batch, int depth, int input_width,
int input_height, int output_width,
int output_height) {
RuntimeShape output_size_shape({1, 1, 1, 2});
RuntimeShape input_shape({batch, input_height, input_width, depth});
RuntimeShape output_shape({batch, output_height, output_width, depth});
std::vector<uint8_t> input_data(input_shape.FlatSize(), 0);
FillRandom(&input_data, static_cast<uint8_t>(0), static_cast<uint8_t>(255));
std::vector<uint8_t> reference_output_data(output_shape.FlatSize(), 0);
std::vector<uint8_t> output_data(output_shape.FlatSize(), 3);
std::vector<int32_t> output_size_data = {output_height, output_width};
ResizeNearestNeighborParams op_params{false,
false};
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, reference_output_data.data());
optimized_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(reference_output_data, output_data);
op_params.align_corners = true;
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, reference_output_data.data());
optimized_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(reference_output_data, output_data);
op_params.align_corners = false;
op_params.half_pixel_centers = true;
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, reference_output_data.data());
optimized_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(reference_output_data, output_data);
op_params.align_corners = true;
op_params.half_pixel_centers = true;
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, reference_output_data.data());
optimized_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(reference_output_data, output_data);
}
bool is_valid_scale(int input_width, int input_height, int output_width,
int output_height) {
const float height_scale_float =
static_cast<float>(input_height) / output_height;
const float width_scale_float =
static_cast<float>(input_width) / output_width;
int32_t height_scale_int = (input_height << 16) / output_height + 1;
int32_t width_scale_int = (input_width << 16) / output_width + 1;
for (int y = 0; y < output_height; ++y) {
int32_t in_y_float =
std::min(static_cast<int32_t>(std::floor(y * height_scale_float)),
input_height - 1);
int32_t in_y_int = std::min((y * height_scale_int) >> 16, input_height - 1);
if (in_y_int != in_y_float) {
return false;
}
for (int x = 0; x < output_width; ++x) {
int32_t in_x_float =
std::min(static_cast<int32_t>(std::floor(x * width_scale_float)),
input_width - 1);
int32_t in_x_int = std::min((x * width_scale_int) >> 16, input_width - 1);
if (in_x_int != in_x_float) {
return false;
}
}
}
return true;
}
TEST(ResizeNearestNeighborOptimized, TestReferenceParity) {
int invalid_count = 0;
const int kTestsToRun = 10000;
for (int i = 0; i < kTestsToRun; i++) {
const int batch = ExponentialRandomPositiveInt(0.9f, 3, 20);
const int depth = ExponentialRandomPositiveInt(0.9f, 6, 50);
const int input_width = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int input_height = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int output_width = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int output_height = ExponentialRandomPositiveInt(0.9f, 20, 200);
if (is_valid_scale(input_width, input_height, output_width,
output_height)) {
TestOptimizedResizeNearestNeighbor(
batch, depth, input_width, input_height, output_width, output_height);
} else {
invalid_count++;
}
}
ASSERT_LT(static_cast<float>(invalid_count) / kTestsToRun, 0.001f);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/resize_nearest_neighbor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/resize_nearest_neighbor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d7f0d8b-ca02-430c-84bd-cbb68e3a0073 | cpp | tensorflow/tensorflow | unpack | tensorflow/lite/kernels/unpack.cc | tensorflow/lite/kernels/unpack_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace unpack {
namespace {
constexpr int kInputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteUnpackParams* data =
reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), data->num);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TF_LITE_ENSURE(context, NumElements(input) > 0);
int axis = data->axis;
if (axis < 0) {
axis += NumDimensions(input);
}
TF_LITE_ENSURE(context, 0 <= axis && axis < NumDimensions(input));
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteUInt8 && input->type != kTfLiteInt8 &&
input->type != kTfLiteInt16 && input->type != kTfLiteBool) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by unpack.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
const TfLiteIntArray* input_shape = input->dims;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(input) - 1);
int o = 0;
for (int index = 0; index < NumDimensions(input); ++index) {
if (index != axis) {
output_shape->data[o++] = input_shape->data[index];
}
}
TF_LITE_ENSURE_EQ(context, data->num, input_shape->data[axis]);
for (int i = 0; i < data->num; ++i) {
TfLiteIntArray* copied_output_shape = TfLiteIntArrayCopy(output_shape);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
TF_LITE_ENSURE_EQ(context, input->params.zero_point,
output->params.zero_point);
TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, output, copied_output_shape));
}
TfLiteIntArrayFree(output_shape);
return kTfLiteOk;
}
template <typename T>
void UnpackImpl(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input, int output_count, int axis) {
tflite::UnpackParams op_params;
op_params.axis = axis;
op_params.num_split = output_count;
VectorOfTensors<T> all_outputs(*context, *node->outputs);
reference_ops::Unpack<T>(op_params, GetTensorShape(input),
GetTensorData<T>(input), **all_outputs.shapes(),
all_outputs.data());
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteUnpackParams* data =
reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
switch (input->type) {
case kTfLiteFloat32: {
UnpackImpl<float>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteInt32: {
UnpackImpl<int32_t>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteUInt8: {
UnpackImpl<uint8_t>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteInt8: {
UnpackImpl<int8_t>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteBool: {
UnpackImpl<bool>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteInt16: {
UnpackImpl<int16_t>(context, node, input, data->num, data->axis);
break;
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by unpack.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_UNPACK() {
static TfLiteRegistration r = {nullptr, nullptr, unpack::Prepare,
unpack::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <iostream>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <typename T>
class UnpackOpModel : public SingleOpModel {
public:
UnpackOpModel(const TensorData& input, int axis) {
if (axis < 0) {
axis += input.shape.size();
}
const int num_outputs = input.shape[axis];
input_ = AddInput(input);
for (int i = 0; i < num_outputs; ++i) {
outputs_.push_back(AddOutput(input.type));
}
SetBuiltinOp(BuiltinOperator_UNPACK, BuiltinOptions_UnpackOptions,
CreateUnpackOptions(builder_, num_outputs, axis).Union());
BuildInterpreter({GetShape(input_)});
}
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
std::vector<std::vector<T>> GetOutputDatas() {
std::vector<std::vector<T>> output_datas;
for (const int output : outputs_) {
std::cerr << "the output is " << output << std::endl;
output_datas.push_back(ExtractVector<T>(output));
}
return output_datas;
}
std::vector<std::vector<int>> GetOutputShapes() {
std::vector<std::vector<int>> output_shapes;
for (const int output : outputs_) {
output_shapes.push_back(GetTensorShape(output));
}
return output_shapes;
}
private:
int input_;
std::vector<int> outputs_;
};
template <typename T>
void Check(int axis, const std::initializer_list<int>& input_shape,
const std::initializer_list<T>& input_data,
const std::vector<std::vector<int>>& exp_output_shape,
const std::vector<std::vector<T>>& exp_output_data,
const TensorType& type = TensorType_FLOAT32) {
UnpackOpModel<T> m({type, input_shape}, axis);
m.SetInput(input_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShapes(), ElementsAreArray(exp_output_shape));
EXPECT_THAT(m.GetOutputDatas(), ElementsAreArray(exp_output_data));
}
template <typename InputType>
struct UnpackOpTest : public ::testing::Test {
using TypeToTest = InputType;
TensorType TENSOR_TYPE =
(std::is_same<InputType, int16_t>::value
? TensorType_INT16
: (std::is_same<InputType, uint8_t>::value
? TensorType_UINT8
: (std::is_same<InputType, int8_t>::value
? TensorType_INT8
: (std::is_same<InputType, int32_t>::value
? TensorType_INT32
: TensorType_FLOAT32))));
};
using TestTypes = testing::Types<float, int32_t, int8_t, uint8_t, int16_t>;
TYPED_TEST_CASE(UnpackOpTest, TestTypes);
TYPED_TEST(UnpackOpTest, ThreeOutputs) {
Check<typename TestFixture::TypeToTest>(
0, {3, 2},
{1, 2, 3, 4, 5, 6},
{{2}, {2}, {2}},
{{1, 2}, {3, 4}, {5, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, ThreeOutputsAxisOne) {
Check<typename TestFixture::TypeToTest>(
1, {3, 2},
{1, 2, 3, 4, 5, 6},
{{3}, {3}},
{{1, 3, 5}, {2, 4, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, ThreeOutputsNegativeAxisOne) {
Check<typename TestFixture::TypeToTest>(
-1, {3, 2},
{1, 2, 3, 4, 5, 6},
{{3}, {3}},
{{1, 3, 5}, {2, 4, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, OneOutput) {
Check<typename TestFixture::TypeToTest>(
0, {1, 6},
{1, 2, 3, 4, 5, 6},
{{6}},
{{1, 2, 3, 4, 5, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, ThreeDimensionsOutputs) {
Check<typename TestFixture::TypeToTest>(
2, {2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
{{2, 2}, {2, 2}},
{{1, 3, 5, 7}, {2, 4, 6, 8}},
TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, FiveDimensionsOutputs) {
Check<typename TestFixture::TypeToTest>(
2, {2, 2, 2, 2, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{{2, 2, 2, 1}, {2, 2, 2, 1}},
{{1, 2, 5, 6, 9, 10, 13, 14}, {3, 4, 7, 8, 11, 12, 15, 16}},
TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, VectorToScalar) {
Check<typename TestFixture::TypeToTest>(
0, {5},
{1, 2, 3, 4, 5},
{{}, {}, {}, {}, {}},
{{1}, {2}, {3}, {4}, {5}}, TestFixture::TENSOR_TYPE);
}
TEST(UnpackOpTestBool, BoolThreeOutputs) {
Check<bool>(
0, {3, 2},
{true, false, true, false, true, false},
{{2}, {2}, {2}},
{{true, false}, {true, false}, {true, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeOutputsAxisOne) {
Check<bool>(
1, {3, 2},
{true, false, true, false, true, false},
{{3}, {3}},
{{true, true, true}, {false, false, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeOutputsNegativeAxisOne) {
Check<bool>(
-1, {3, 2},
{true, false, true, false, true, false},
{{3}, {3}},
{{true, true, true}, {false, false, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeOutputsNegativeAxisTwo) {
Check<bool>(
-2, {3, 2},
{true, false, true, false, true, false},
{{2}, {2}, {2}},
{{true, false}, {true, false}, {true, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolOneOutput) {
Check<bool>(
0, {1, 6},
{true, false, true, false, true, false},
{{6}},
{{true, false, true, false, true, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeDimensionsOutputs) {
Check<bool>(
2, {2, 2, 2},
{true, false, true, false, true, false, true, false},
{{2, 2}, {2, 2}},
{{true, true, true, true}, {false, false, false, false}},
TensorType_BOOL);
}
TEST(UnpackOpTest, BoolFiveDimensionsOutputs) {
Check<bool>(
2, {2, 2, 2, 2, 1},
{true, false, true, false, true, false, true, false, true, true, true,
true, true, true, true, true},
{{2, 2, 2, 1}, {2, 2, 2, 1}},
{{true, false, true, false, true, true, true, true},
{true, false, true, false, true, true, true, true}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolVectorToScalar) {
Check<bool>(0, {5},
{true, false, true, false, true},
{{}, {}, {}, {}, {}},
{{true}, {false}, {true}, {false}, {true}},
TensorType_BOOL);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unpack.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unpack_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f8005be0-efb5-4e02-b34f-67e3f01f5cb9 | cpp | tensorflow/tensorflow | non_max_suppression | tensorflow/lite/kernels/non_max_suppression.cc | tensorflow/lite/kernels/internal/non_max_suppression_test.cc | #include "tensorflow/lite/kernels/internal/reference/non_max_suppression.h"
#include <initializer_list>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace non_max_suppression {
constexpr int kInputTensorBoxes = 0;
constexpr int kInputTensorScores = 1;
constexpr int kInputTensorMaxOutputSize = 2;
constexpr int kInputTensorIouThreshold = 3;
constexpr int kInputTensorScoreThreshold = 4;
constexpr int kInputTensorSigma = 5;
constexpr int kNMSOutputTensorSelectedIndices = 0;
constexpr int kNMSOutputTensorNumSelectedIndices = 1;
constexpr int kSoftNMSOutputTensorSelectedIndices = 0;
constexpr int kSoftNMSOutputTensorSelectedScores = 1;
constexpr int kSoftNMSOutputTensorNumSelectedIndices = 2;
TfLiteStatus SetTensorSizes(TfLiteContext* context, TfLiteTensor* tensor,
std::initializer_list<int> values) {
TfLiteIntArray* size = TfLiteIntArrayCreate(values.size());
int index = 0;
for (const auto& v : values) {
size->data[index++] = v;
}
return context->ResizeTensor(context, tensor, size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int num_inputs = NumInputs(node);
const bool is_soft_nms = num_inputs == 6;
if (num_inputs != 5 && num_inputs != 6) {
TF_LITE_KERNEL_LOG(context, "Found NMS op with invalid num inputs: %d",
NumInputs(node));
return kTfLiteError;
}
const TfLiteTensor* input_boxes;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorBoxes, &input_boxes));
TF_LITE_ENSURE_EQ(context, input_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_boxes), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_boxes, 1), 4);
const int num_boxes = SizeOfDimension(input_boxes, 0);
const TfLiteTensor* input_scores;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorScores, &input_scores));
TF_LITE_ENSURE_EQ(context, input_scores->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_scores), 1);
TF_LITE_ENSURE_EQ(context, num_boxes, SizeOfDimension(input_scores, 0));
const TfLiteTensor* input_max_output_size;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorMaxOutputSize,
&input_max_output_size));
TF_LITE_ENSURE_EQ(context, input_max_output_size->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_max_output_size), 0);
const bool is_max_output_size_const =
IsConstantOrPersistentTensor(input_max_output_size);
int max_output_size_value = 0;
if (is_max_output_size_const) {
max_output_size_value = *GetTensorData<int>(input_max_output_size);
TF_LITE_ENSURE(context, (max_output_size_value >= 0));
}
const TfLiteTensor* input_iou_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorIouThreshold,
&input_iou_threshold));
TF_LITE_ENSURE_EQ(context, input_iou_threshold->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_iou_threshold), 0);
const TfLiteTensor* input_score_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorScoreThreshold,
&input_score_threshold));
TF_LITE_ENSURE_EQ(context, input_iou_threshold->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_score_threshold), 0);
if (is_soft_nms) {
const TfLiteTensor* input_sigma;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorSigma, &input_sigma));
TF_LITE_ENSURE_EQ(context, input_sigma->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_sigma), 0);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3);
TfLiteTensor* output_selected_indices;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorSelectedIndices,
&output_selected_indices));
output_selected_indices->type = kTfLiteInt32;
TfLiteTensor* output_selected_scores;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kSoftNMSOutputTensorSelectedScores,
&output_selected_scores));
output_selected_scores->type = kTfLiteFloat32;
TfLiteTensor* output_num_selected_indices;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
output_num_selected_indices->type = kTfLiteInt32;
SetTensorSizes(context, output_num_selected_indices, {});
if (is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
SetTensorSizes(context, output_selected_scores, {max_output_size_value});
} else {
SetTensorToDynamic(output_selected_indices);
SetTensorToDynamic(output_selected_scores);
}
} else {
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
TfLiteTensor* output_selected_indices;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kNMSOutputTensorSelectedIndices,
&output_selected_indices));
output_selected_indices->type = kTfLiteInt32;
TfLiteTensor* output_num_selected_indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
output_num_selected_indices->type = kTfLiteInt32;
SetTensorSizes(context, output_num_selected_indices, {});
if (is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
} else {
SetTensorToDynamic(output_selected_indices);
}
}
return kTfLiteOk;
}
void ResetUnusedElementsToZeroes(const int max_output_size,
const int num_selected_indices,
int* selected_indices,
float* selected_scores) {
for (int i = num_selected_indices; i < max_output_size; ++i) {
selected_indices[i] = 0;
if (selected_scores) {
selected_scores[i] = 0.0;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const bool is_soft_nms = NumInputs(node) == 6;
const TfLiteTensor* input_boxes;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorBoxes, &input_boxes));
const int num_boxes = SizeOfDimension(input_boxes, 0);
const TfLiteTensor* input_scores;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorScores, &input_scores));
const TfLiteTensor* input_max_output_size;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorMaxOutputSize,
&input_max_output_size));
const int max_output_size_value = *GetTensorData<int>(input_max_output_size);
TF_LITE_ENSURE(context, (max_output_size_value >= 0));
const bool is_max_output_size_const =
IsConstantOrPersistentTensor(input_max_output_size);
const TfLiteTensor* input_iou_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorIouThreshold,
&input_iou_threshold));
const float iou_threshold = *GetTensorData<float>(input_iou_threshold);
const TfLiteTensor* input_score_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorScoreThreshold,
&input_score_threshold));
const float score_threshold = *GetTensorData<float>(input_score_threshold);
TfLiteTensor* output_selected_indices = nullptr;
TfLiteTensor* output_selected_scores = nullptr;
TfLiteTensor* output_num_selected_indices = nullptr;
if (is_soft_nms) {
const TfLiteTensor* input_sigma;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorSigma, &input_sigma));
const float soft_nms_sigma = *GetTensorData<float>(input_sigma);
if (soft_nms_sigma < 0) {
TF_LITE_KERNEL_LOG(context, "Invalid sigma value for soft NMS: %f",
soft_nms_sigma);
return kTfLiteError;
}
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorSelectedIndices,
&output_selected_indices));
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kSoftNMSOutputTensorSelectedScores,
&output_selected_scores));
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
if (!is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
SetTensorSizes(context, output_selected_scores, {max_output_size_value});
}
reference_ops::NonMaxSuppression(
input_boxes->data.f, num_boxes, input_scores->data.f,
max_output_size_value, iou_threshold, score_threshold, soft_nms_sigma,
output_selected_indices->data.i32, output_selected_scores->data.f,
output_num_selected_indices->data.i32);
ResetUnusedElementsToZeroes(
max_output_size_value, *output_num_selected_indices->data.i32,
output_selected_indices->data.i32, output_selected_scores->data.f);
} else {
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kNMSOutputTensorSelectedIndices,
&output_selected_indices));
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
if (!is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
}
reference_ops::NonMaxSuppression(
input_boxes->data.f, num_boxes, input_scores->data.f,
max_output_size_value, iou_threshold, score_threshold, 0.0,
output_selected_indices->data.i32, nullptr,
output_num_selected_indices->data.i32);
ResetUnusedElementsToZeroes(max_output_size_value,
*output_num_selected_indices->data.i32,
output_selected_indices->data.i32, nullptr);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_NON_MAX_SUPPRESSION_V4() {
static TfLiteRegistration r = {nullptr, nullptr, non_max_suppression::Prepare,
non_max_suppression::Eval};
return &r;
}
TfLiteRegistration* Register_NON_MAX_SUPPRESSION_V5() {
static TfLiteRegistration r = {nullptr, nullptr, non_max_suppression::Prepare,
non_max_suppression::Eval};
return &r;
}
}
}
} | #include "tensorflow/lite/kernels/internal/reference/non_max_suppression.h"
#include <algorithm>
#include <cmath>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
constexpr int kNumBoxes = 6;
void InitializeCandidates(std::vector<float>* boxes, std::vector<float>* scores,
bool flip_coordinates = false) {
if (!flip_coordinates) {
*boxes = {
0, 0, 1, 1,
0, 0.1, 1, 1.1,
0, -0.1, 1, 0.9,
0, 10, 1, 11,
0, 10.1, 1, 11.1,
0, 100, 1, 101
};
} else {
*boxes = {
1, 1, 0, 0,
0, 0.1, 1, 1.1,
0, .9f, 1, -0.1,
0, 10, 1, 11,
1, 10.1f, 0, 11.1,
1, 101, 0, 100
};
}
*scores = {0.9, 0.75, 0.6, 0.95, 0.5, 0.3};
}
template <typename T>
void MatchFirstNElements(int num_elements, const std::vector<T>& test_values,
const std::vector<T>& reference_values) {
EXPECT_LT(num_elements, test_values.size());
EXPECT_EQ(num_elements, reference_values.size());
for (int i = 0; i < num_elements; ++i) {
EXPECT_EQ(test_values[i], reference_values[i]);
}
}
TEST(NonMaxSuppression, TestZeroBoxes) {
std::vector<float> boxes(1);
std::vector<float> scores(1);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
const int max_output_size = 4;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), 0, scores.data(), max_output_size,
iou_threshold, score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 0);
}
TEST(NonMaxSuppression, TestSelectFromIdenticalBoxes) {
std::vector<float> boxes(kNumBoxes * 4);
std::vector<float> scores(kNumBoxes);
for (int i = 0; i < kNumBoxes; ++i) {
boxes[i * 4 + 0] = 0;
boxes[i * 4 + 1] = 0;
boxes[i * 4 + 2] = 1;
boxes[i * 4 + 3] = 1;
scores[i] = 0.75;
}
const float iou_threshold = 0.5;
float score_threshold = 0.5;
const int max_output_size = kNumBoxes;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 1);
MatchFirstNElements(1, selected_scores, {.75});
score_threshold = 0.95;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 0);
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithZeroScoreThreshold) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 0.5;
int max_output_size;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
max_output_size = 100;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 3);
MatchFirstNElements(3, selected_indices, {3, 0, 5});
MatchFirstNElements(3, selected_scores, {0.95, 0.9, 0.3});
max_output_size = 2;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, max_output_size);
MatchFirstNElements(max_output_size, selected_indices, {3, 0});
MatchFirstNElements(max_output_size, selected_scores, {0.95, 0.9});
max_output_size = 0;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 0);
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithScoreThreshold) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
int max_output_size;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
max_output_size = 100;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 2);
MatchFirstNElements(2, selected_indices, {3, 0});
MatchFirstNElements(2, selected_scores, {0.95, 0.9});
max_output_size = 1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 1);
MatchFirstNElements(1, selected_indices, {3});
MatchFirstNElements(1, selected_scores, {0.95});
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithFlippedCoordinates) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores, true);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
const int max_output_size = 3;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 2);
MatchFirstNElements(2, selected_indices, {3, 0});
MatchFirstNElements(2, selected_scores, {0.95, 0.9});
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 3);
MatchFirstNElements(3, selected_indices, {3, 0, 5});
MatchFirstNElements(3, selected_scores, {0.95, 0.9, 0.3});
}
TEST(NonMaxSuppression, TestIoUThresholdBoundaryCases) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float score_threshold = 0.4;
const int max_output_size = 4;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size,
0.0, score_threshold, 0.0,
selected_indices.data(), selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 1);
MatchFirstNElements(1, selected_indices, {3});
MatchFirstNElements(1, selected_scores, {0.95});
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size,
0.9999,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, max_output_size);
MatchFirstNElements(max_output_size, selected_indices, {3, 0, 1, 2});
MatchFirstNElements(max_output_size, selected_scores, {0.95, 0.9, 0.75, 0.6});
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithSoftNMS) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 1.0;
float score_threshold = 0.0;
const float soft_nms_sigma = 0.5;
int max_output_size = 6;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, soft_nms_sigma, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 6);
EXPECT_THAT(selected_indices, ElementsAreArray({3, 0, 1, 5, 4, 2}));
EXPECT_THAT(selected_scores,
ElementsAreArray(
ArrayFloatNear({0.95, 0.9, 0.384, 0.3, 0.256, 0.197}, 1e-3)));
score_threshold = 0.299;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, soft_nms_sigma, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 4);
MatchFirstNElements(4, selected_indices, {3, 0, 1, 5});
}
TEST(NonMaxSuppression, TestNullSelectedScoresOutput) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
int max_output_size;
std::vector<int> selected_indices(6);
int num_selected_indices = -1;
max_output_size = 100;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
nullptr, &num_selected_indices);
EXPECT_EQ(num_selected_indices, 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/non_max_suppression.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/non_max_suppression_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e5bec101-66d0-4856-83f0-6cc668d1a910 | cpp | tensorflow/tensorflow | pooling3d | tensorflow/lite/kernels/pooling3d.cc | tensorflow/lite/kernels/pooling3d_test.cc | #include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cstdlib>
#include <string>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace ops {
namespace custom {
namespace pooling_3d {
namespace {
struct Pool3DParams {
TfLiteFusedActivation activation;
TfLitePadding padding_type;
Padding3DValues padding_values;
int stride_depth;
int stride_height;
int stride_width;
int filter_depth;
int filter_height;
int filter_width;
int32_t quantized_activation_min;
int32_t quantized_activation_max;
float float_activation_min;
float float_activation_max;
};
template <typename T, typename ActivationT>
inline T RoundAndAverage(ActivationT sum, int count) {
return sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count;
}
template <>
inline float RoundAndAverage(float sum, int count) {
return sum / count;
}
template <typename T, typename ActivationT>
inline void AveragePool3D(const Pool3DParams& params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 5);
ActivationT activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int channels = MatchingDim(input_shape, 4, output_shape, 4);
const int in_spatial_dim_1 = input_shape.Dims(1);
const int in_spatial_dim_2 = input_shape.Dims(2);
const int in_spatial_dim_3 = input_shape.Dims(3);
const int out_spatial_dim_1 = output_shape.Dims(1);
const int out_spatial_dim_2 = output_shape.Dims(2);
const int out_spatial_dim_3 = output_shape.Dims(3);
const int stride_spatial_dim_1 = params.stride_depth;
const int stride_spatial_dim_2 = params.stride_height;
const int stride_spatial_dim_3 = params.stride_width;
const int filter_spatial_dim_1 = params.filter_depth;
const int filter_spatial_dim_2 = params.filter_height;
const int filter_spatial_dim_3 = params.filter_width;
const int padding_spatial_dim_1 = params.padding_values.depth;
const int padding_spatial_dim_2 = params.padding_values.height;
const int padding_spatial_dim_3 = params.padding_values.width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_d1 = 0; out_d1 < out_spatial_dim_1; ++out_d1) {
const int in_d1_origin =
(out_d1 * stride_spatial_dim_1) - padding_spatial_dim_1;
const int filter_d1_start = std::max(0, -in_d1_origin);
const int filter_d1_end =
std::min(filter_spatial_dim_1, in_spatial_dim_1 - in_d1_origin);
for (int out_d2 = 0; out_d2 < out_spatial_dim_2; ++out_d2) {
const int in_d2_origin =
(out_d2 * stride_spatial_dim_2) - padding_spatial_dim_2;
const int filter_d2_start = std::max(0, -in_d2_origin);
const int filter_d2_end =
std::min(filter_spatial_dim_2, in_spatial_dim_2 - in_d2_origin);
for (int out_d3 = 0; out_d3 < out_spatial_dim_3; ++out_d3) {
const int in_d3_origin =
(out_d3 * stride_spatial_dim_3) - padding_spatial_dim_3;
const int filter_d3_start = std::max(0, -in_d3_origin);
const int filter_d3_end =
std::min(filter_spatial_dim_3, in_spatial_dim_3 - in_d3_origin);
for (int channel = 0; channel < channels; ++channel) {
ActivationT total = 0;
for (int filter_d1 = filter_d1_start; filter_d1 < filter_d1_end;
++filter_d1) {
const int in_d1 = in_d1_origin + filter_d1;
for (int filter_d2 = filter_d2_start; filter_d2 < filter_d2_end;
++filter_d2) {
const int in_d2 = in_d2_origin + filter_d2;
for (int filter_d3 = filter_d3_start; filter_d3 < filter_d3_end;
++filter_d3) {
const int in_d3 = in_d3_origin + filter_d3;
total += input_data[Offset(input_shape, batch, in_d1, in_d2,
in_d3, channel)];
}
}
}
const int filter_count = (filter_d1_end - filter_d1_start) *
(filter_d2_end - filter_d2_start) *
(filter_d3_end - filter_d3_start);
T average = pooling_3d::RoundAndAverage<T, ActivationT>(
total, filter_count);
average = std::max<T>(average, activation_min);
average = std::min<T>(average, activation_max);
output_data[Offset(output_shape, batch, out_d1, out_d2, out_d3,
channel)] = average;
}
}
}
}
}
}
template <typename T, typename ActivationT>
inline void MaxPool3D(const Pool3DParams& params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 5);
ActivationT activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int channels = MatchingDim(input_shape, 4, output_shape, 4);
const int in_spatial_dim_1 = input_shape.Dims(1);
const int in_spatial_dim_2 = input_shape.Dims(2);
const int in_spatial_dim_3 = input_shape.Dims(3);
const int out_spatial_dim_1 = output_shape.Dims(1);
const int out_spatial_dim_2 = output_shape.Dims(2);
const int out_spatial_dim_3 = output_shape.Dims(3);
const int stride_spatial_dim_1 = params.stride_depth;
const int stride_spatial_dim_2 = params.stride_height;
const int stride_spatial_dim_3 = params.stride_width;
const int filter_spatial_dim_1 = params.filter_depth;
const int filter_spatial_dim_2 = params.filter_height;
const int filter_spatial_dim_3 = params.filter_width;
const int padding_spatial_dim_1 = params.padding_values.depth;
const int padding_spatial_dim_2 = params.padding_values.height;
const int padding_spatial_dim_3 = params.padding_values.width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_d1 = 0; out_d1 < out_spatial_dim_1; ++out_d1) {
const int in_d1_origin =
(out_d1 * stride_spatial_dim_1) - padding_spatial_dim_1;
const int filter_d1_start = std::max(0, -in_d1_origin);
const int filter_d1_end =
std::min(filter_spatial_dim_1, in_spatial_dim_1 - in_d1_origin);
for (int out_d2 = 0; out_d2 < out_spatial_dim_2; ++out_d2) {
const int in_d2_origin =
(out_d2 * stride_spatial_dim_2) - padding_spatial_dim_2;
const int filter_d2_start = std::max(0, -in_d2_origin);
const int filter_d2_end =
std::min(filter_spatial_dim_2, in_spatial_dim_2 - in_d2_origin);
for (int out_d3 = 0; out_d3 < out_spatial_dim_3; ++out_d3) {
const int in_d3_origin =
(out_d3 * stride_spatial_dim_3) - padding_spatial_dim_3;
const int filter_d3_start = std::max(0, -in_d3_origin);
const int filter_d3_end =
std::min(filter_spatial_dim_3, in_spatial_dim_3 - in_d3_origin);
for (int channel = 0; channel < channels; ++channel) {
T max = std::numeric_limits<T>::lowest();
for (int filter_d1 = filter_d1_start; filter_d1 < filter_d1_end;
++filter_d1) {
const int in_d1 = in_d1_origin + filter_d1;
for (int filter_d2 = filter_d2_start; filter_d2 < filter_d2_end;
++filter_d2) {
const int in_d2 = in_d2_origin + filter_d2;
for (int filter_d3 = filter_d3_start; filter_d3 < filter_d3_end;
++filter_d3) {
const int in_d3 = in_d3_origin + filter_d3;
max =
std::max(max, input_data[Offset(input_shape, batch, in_d1,
in_d2, in_d3, channel)]);
}
}
}
max = std::max<T>(max, activation_min);
max = std::min<T>(max, activation_max);
output_data[Offset(output_shape, batch, out_d1, out_d2, out_d3,
channel)] = max;
}
}
}
}
}
}
}
enum PoolType {
kAverage,
kMax,
};
constexpr const char kPoolSizeStr[] = "ksize";
constexpr const char kStridesStr[] = "strides";
constexpr const char kPaddingStr[] = "padding";
constexpr const char kDataFormatStr[] = "data_format";
constexpr const char kPaddingSameStr[] = "SAME";
constexpr const char kPaddingValidStr[] = "VALID";
struct OpData {
Pool3DParams params;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
OpData* opdata = new OpData;
opdata->params.activation = kTfLiteActNone;
const flexbuffers::Map& m =
flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(buffer), length)
.AsMap();
const std::string data_format = m[kDataFormatStr].AsString().str();
TFLITE_CHECK_EQ(data_format, "NDHWC");
const std::string padding = m[kPaddingStr].AsString().str();
if (padding == kPaddingValidStr) {
opdata->params.padding_type = kTfLitePaddingValid;
} else if (padding == kPaddingSameStr) {
opdata->params.padding_type = kTfLitePaddingSame;
} else {
opdata->params.padding_type = kTfLitePaddingUnknown;
}
const auto pool_size = m[kPoolSizeStr].AsTypedVector();
TFLITE_CHECK_EQ(pool_size.size(), 5);
TFLITE_CHECK_EQ(pool_size[0].AsInt32(), 1);
TFLITE_CHECK_EQ(pool_size[4].AsInt32(), 1);
opdata->params.filter_depth = pool_size[1].AsInt32();
opdata->params.filter_height = pool_size[2].AsInt32();
opdata->params.filter_width = pool_size[3].AsInt32();
const auto strides = m[kStridesStr].AsTypedVector();
TFLITE_CHECK_EQ(strides.size(), 5);
TFLITE_CHECK_EQ(strides[0].AsInt32(), 1);
TFLITE_CHECK_EQ(strides[4].AsInt32(), 1);
opdata->params.stride_depth = strides[1].AsInt32();
opdata->params.stride_height = strides[2].AsInt32();
opdata->params.stride_width = strides[3].AsInt32();
return opdata;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) {
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
Pool3DParams& params = opdata->params;
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 5);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE_EQ(context,
input->type == kTfLiteFloat32 ||
input->type == kTfLiteInt16 ||
input->type == kTfLiteInt8,
true);
int batches = input->dims->data[0];
int depth = input->dims->data[1];
int height = input->dims->data[2];
int width = input->dims->data[3];
int channels = input->dims->data[4];
TF_LITE_ENSURE(context, params.stride_depth > 0);
TF_LITE_ENSURE(context, params.stride_height > 0);
TF_LITE_ENSURE(context, params.stride_width > 0);
int out_width, out_height, out_depth;
params.padding_values = ComputePadding3DValues(
params.stride_height, params.stride_width, params.stride_depth, 1, 1, 1,
height, width, depth, params.filter_height, params.filter_width,
params.filter_depth, params.padding_type, &out_height, &out_width,
&out_depth);
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_NEAR(context, input->params.scale, output->params.scale,
1.0e-6);
TFLITE_DCHECK_EQ(input->params.zero_point, output->params.zero_point);
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(5);
output_size->data[0] = batches;
output_size->data[1] = out_depth;
output_size->data[2] = out_height;
output_size->data[3] = out_width;
output_size->data[4] = channels;
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) {
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
Pool3DParams& params = opdata->params;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
#define TF_LITE_AVERAGE_POOL_3D(type, activation_type) \
SetActivationParams(activation_min, activation_max, ¶ms); \
AveragePool3D<type, activation_type>( \
params, GetTensorShape(input), GetTensorData<type>(input), \
GetTensorShape(output), GetTensorData<type>(output))
switch (input->type) {
case kTfLiteFloat32: {
float activation_min, activation_max;
CalculateActivationRange(params.activation, &activation_min,
&activation_max);
TF_LITE_AVERAGE_POOL_3D(float, float);
} break;
case kTfLiteInt8: {
int32_t activation_min;
int32_t activation_max;
CalculateActivationRangeQuantized(context, params.activation, output,
&activation_min, &activation_max);
TF_LITE_AVERAGE_POOL_3D(int8_t, int32_t);
} break;
case kTfLiteInt16: {
int32_t activation_min;
int32_t activation_max;
CalculateActivationRangeQuantized(context, params.activation, output,
&activation_min, &activation_max);
TF_LITE_AVERAGE_POOL_3D(int16_t, int32_t);
} break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
#undef TF_LITE_AVERAGE_POOL_3D
return kTfLiteOk;
}
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) {
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
Pool3DParams& params = opdata->params;
#define TF_LITE_MAX_POOL_3D(type, activation_type) \
SetActivationParams(activation_min, activation_max, ¶ms); \
MaxPool3D<type, activation_type>( \
params, GetTensorShape(input), GetTensorData<type>(input), \
GetTensorShape(output), GetTensorData<type>(output))
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
switch (input->type) {
case kTfLiteFloat32: {
float activation_min, activation_max;
CalculateActivationRange(params.activation, &activation_min,
&activation_max);
TF_LITE_MAX_POOL_3D(float, float);
} break;
case kTfLiteInt8: {
int32_t activation_min;
int32_t activation_max;
CalculateActivationRangeQuantized(context, params.activation, output,
&activation_min, &activation_max);
TF_LITE_MAX_POOL_3D(int8_t, int32_t);
} break;
case kTfLiteInt16: {
int32_t activation_min;
int32_t activation_max;
CalculateActivationRangeQuantized(context, params.activation, output,
&activation_min, &activation_max);
TF_LITE_MAX_POOL_3D(int16_t, int32_t);
} break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
#undef TF_LITE_MAX_POOL_3D
return kTfLiteOk;
}
}
TfLiteRegistration* Register_AVG_POOL_3D() {
static TfLiteRegistration r = {pooling_3d::Init, pooling_3d::Free,
pooling_3d::GenericPrepare,
pooling_3d::AverageEval};
return &r;
}
TfLiteRegistration* Register_MAX_POOL_3D() {
static TfLiteRegistration r = {pooling_3d::Init, pooling_3d::Free,
pooling_3d::GenericPrepare,
pooling_3d::MaxEval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
using ::testing::ElementsAreArray;
enum PoolType {
kAverage,
kMax,
};
template <typename T>
class BasePoolingOpModel : public SingleOpModel {
public:
BasePoolingOpModel(PoolType pool_type, TensorData input, int filter_d,
int filter_h, int filter_w, TensorData output,
TfLitePadding padding = kTfLitePaddingValid,
int stride_d = 2, int stride_h = 2, int stride_w = 2) {
if (input.type == TensorType_FLOAT32) {
input.min = input.max = 0.f;
output.min = output.max = 0.f;
}
input_ = AddInput(input);
output_ = AddOutput(output);
std::vector<uint8_t> custom_option = CreateCustomOptions(
stride_d, stride_h, stride_w, filter_d, filter_h, filter_w, padding);
if (pool_type == kAverage) {
SetCustomOp("AveragePool3D", custom_option,
ops::custom::Register_AVG_POOL_3D);
} else {
SetCustomOp("MaxPool3D", custom_option,
ops::custom::Register_MAX_POOL_3D);
}
BuildInterpreter({GetShape(input_)});
}
void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input_, data);
}
std::vector<float> GetOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
protected:
int input_;
int output_;
private:
std::vector<uint8_t> CreateCustomOptions(int stride_depth, int stride_height,
int stride_width, int filter_depth,
int filter_height, int filter_width,
TfLitePadding padding) {
auto flex_builder = std::make_unique<flexbuffers::Builder>();
size_t map_start = flex_builder->StartMap();
flex_builder->String("data_format", "NDHWC");
if (padding == kTfLitePaddingValid) {
flex_builder->String("padding", "VALID");
} else {
flex_builder->String("padding", "SAME");
}
auto start = flex_builder->StartVector("ksize");
flex_builder->Add(1);
flex_builder->Add(filter_depth);
flex_builder->Add(filter_height);
flex_builder->Add(filter_width);
flex_builder->Add(1);
flex_builder->EndVector(start, true, false);
auto strides_start = flex_builder->StartVector("strides");
flex_builder->Add(1);
flex_builder->Add(stride_depth);
flex_builder->Add(stride_height);
flex_builder->Add(stride_width);
flex_builder->Add(1);
flex_builder->EndVector(strides_start, true, false);
flex_builder->EndMap(map_start);
flex_builder->Finish();
return flex_builder->GetBuffer();
}
};
template <>
void BasePoolingOpModel<float>::SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
template <>
std::vector<float> BasePoolingOpModel<float>::GetOutput() {
return ExtractVector<float>(output_);
}
#if GTEST_HAS_DEATH_TEST
TEST(AveragePoolingOpTest, InvalidDimSize) {
EXPECT_DEATH(BasePoolingOpModel<float> m(
kAverage,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2,
2, 2,
{TensorType_FLOAT32, {}},
kTfLitePaddingValid, 1,
1, 1),
"NumDimensions.input. != 5 .4 != 5.");
}
TEST(AveragePoolingOpTest, ZeroStride) {
EXPECT_DEATH(BasePoolingOpModel<float> m(
kAverage,
{TensorType_FLOAT32, {1, 2, 2, 4, 1}},
2,
2, 2,
{TensorType_FLOAT32, {}},
kTfLitePaddingValid, 0,
0, 0),
"Cannot allocate tensors");
}
#endif
template <typename T>
class AveragePoolingOpTest : public ::testing::Test {};
template <typename T>
class MaxPoolingOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<float, int8_t, int16_t>;
TYPED_TEST_SUITE(AveragePoolingOpTest, DataTypes);
TYPED_TEST_SUITE(MaxPoolingOpTest, DataTypes);
TYPED_TEST(AveragePoolingOpTest, AveragePool) {
BasePoolingOpModel<TypeParam> m(
kAverage,
{GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375},
2,
2, 2,
{GetTensorType<TypeParam>(), {}, 0, 15.9375});
m.SetInput({0, 6, 2, 4, 4, 5, 1, 4, 3, 2, 10, 7, 2, 3, 5, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {3.125, 4.25}));
}
TYPED_TEST(AveragePoolingOpTest, AveragePoolFilterH1) {
BasePoolingOpModel<TypeParam> m(
kAverage,
{GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375},
2,
1, 2,
{GetTensorType<TypeParam>(), {}, 0, 15.9375});
m.SetInput({0, 6, 2, 4, 4, 5, 1, 4, 3, 2, 10, 7, 2, 3, 5, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {2.75, 5.75}));
}
TYPED_TEST(AveragePoolingOpTest, AveragePoolPaddingSameStride1) {
BasePoolingOpModel<TypeParam> m(
kAverage,
{GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375},
2,
2, 2,
{GetTensorType<TypeParam>(), {}, 0, 15.9375},
kTfLitePaddingSame,
1, 1,
1);
m.SetInput({0, 6, 2, 4, 2, 5, 4, 3, 3, 2, 10, 7, 3, 2, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
Pointwise(FloatingPointEq(),
{2.875, 4.125, 4.5, 4.5, 3.0, 3.25, 3.25, 3.5, 2.5, 4.0,
5.75, 5.5, 2.5, 2.0, 3.0, 4.0}));
}
TYPED_TEST(AveragePoolingOpTest, AveragePoolPaddingValidStride1) {
BasePoolingOpModel<TypeParam> m(
kAverage,
{GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375},
2,
2, 2,
{GetTensorType<TypeParam>(), {}, 0, 15.9375},
kTfLitePaddingValid,
1, 1,
1);
m.SetInput({0, 6, 2, 4, 2, 5, 4, 3, 3, 2, 10, 7, 3, 2, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {2.875, 4.125, 4.5}));
}
TYPED_TEST(MaxPoolingOpTest, MaxPool) {
BasePoolingOpModel<TypeParam> m(
kMax,
{GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375},
2,
2, 2,
{GetTensorType<TypeParam>(), {}, 0, 15.9375});
m.SetInput({0, 6, 2, 4, 4, 5, 1, 4, 3, 2, 10, 7, 2, 3, 5, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {6.0, 10.0}));
}
TYPED_TEST(MaxPoolingOpTest, MaxPoolFilterH1) {
BasePoolingOpModel<TypeParam> m(
kMax,
{GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375},
2,
1, 2,
{GetTensorType<TypeParam>(), {}, 0, 15.9375});
m.SetInput({0, 6, 2, 4, 4, 5, 1, 4, 3, 2, 10, 7, 2, 3, 5, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({6, 10}));
}
TYPED_TEST(MaxPoolingOpTest, MaxPoolPaddingSameStride1) {
BasePoolingOpModel<TypeParam> m(
kMax,
{GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375},
2,
2, 2,
{GetTensorType<TypeParam>(), {}, 0, 15.9375},
kTfLitePaddingSame,
1, 1,
1);
m.SetInput({0, 6, 2, 4, 2, 5, 4, 3, 3, 2, 10, 7, 3, 2, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({6, 10, 10, 7, 5, 5, 4, 4, 3, 10,
10, 7, 3, 2, 4, 4}));
}
TYPED_TEST(MaxPoolingOpTest, MaxPoolPaddingValidStride1) {
BasePoolingOpModel<TypeParam> m(
kMax,
{GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375},
2,
2, 2,
{GetTensorType<TypeParam>(), {}, 0, 15.9375},
kTfLitePaddingValid,
1, 1,
1);
m.SetInput({0, 6, 2, 4, 2, 5, 4, 3, 3, 2, 10, 7, 3, 2, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {6.0, 10.0, 10.0}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/pooling3d.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/pooling3d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4142408e-4266-428e-ac32-9ee5cf8ea20c | cpp | tensorflow/tensorflow | arg_min_max | tensorflow/lite/kernels/arg_min_max.cc | tensorflow/lite/delegates/hexagon/builders/tests/arg_min_max_test.cc | #include "tensorflow/lite/kernels/internal/reference/arg_min_max.h"
#include <stdint.h>
#include <functional>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace arg_min_max {
constexpr int kInputTensor = 0;
constexpr int kAxis = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* axis, TfLiteTensor* output) {
int axis_value;
if (axis->type == kTfLiteInt64) {
axis_value = static_cast<int>(*GetTensorData<int64_t>(axis));
} else {
axis_value = *GetTensorData<int>(axis);
}
if (axis_value < 0) {
axis_value += NumDimensions(input);
}
TF_LITE_ENSURE(context, axis_value >= 0);
TF_LITE_ENSURE(context, axis_value < NumDimensions(input));
TfLiteIntArray* output_dims = TfLiteIntArrayCreate(NumDimensions(input) - 1);
int j = 0;
for (int i = 0; i < NumDimensions(input); ++i) {
if (i != axis_value) {
output_dims->data[j] = SizeOfDimension(input, i);
++j;
}
}
return context->ResizeTensor(context, output, output_dims);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis));
TF_LITE_ENSURE_EQ(context, NumElements(axis), 1);
TF_LITE_ENSURE(context,
axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data);
switch (params->output_type) {
case kTfLiteInt32:
output->type = kTfLiteInt32;
break;
case kTfLiteInt64:
output->type = kTfLiteInt64;
break;
default:
TF_LITE_KERNEL_LOG(context, "Unknown index output data type: %d",
params->output_type);
return kTfLiteError;
}
switch (input->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt32:
case kTfLiteBool:
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unknown input type: %d, only float32, int types "
"and bool are supported",
input->type);
return kTfLiteError;
}
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
if (IsConstantOrPersistentTensor(axis)) {
TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output));
} else {
SetTensorToDynamic(output);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output));
}
#define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \
optimized_ops::ArgMinMax( \
GetTensorShape(input), GetTensorData<data_type>(input), \
GetTensorData<axis_type>(axis), GetTensorShape(output), \
GetTensorData<output_type>(output), is_arg_max)
if (axis->type == kTfLiteInt32) {
switch (output->type) {
case kTfLiteInt32: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int32_t, int32_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
case kTfLiteInt64: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int64_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int32_t, int64_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only int32 and int64 are supported currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
} else {
switch (output->type) {
case kTfLiteInt32: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int64_t, int32_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int32_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int32_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int32_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int64_t, int32_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
case kTfLiteInt64: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int64_t, int64_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int64_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int64_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int64_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int64_t, int64_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only int32 and int64 are supported currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
#undef TF_LITE_ARG_MIN_MAX
return kTfLiteOk;
}
TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) {
return Eval(context, node, false);
}
TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) {
return Eval(context, node, true);
}
}
TfLiteRegistration* Register_ARG_MAX() {
static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare,
arg_min_max::ArgMaxEval};
return &r;
}
TfLiteRegistration* Register_ARG_MIN() {
static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare,
arg_min_max::ArgMinEval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class ArgBaseOpModel : public SingleOpModelWithHexagon {
public:
explicit ArgBaseOpModel(TensorType input_type) {
input_ = AddInput(input_type);
output_ = AddOutput(TensorType_INT32);
}
int input() const { return input_; }
std::vector<int> GetInt32Output() const {
return ExtractVector<int>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
using SingleOpModelWithHexagon::builder_;
int input_;
int output_;
};
class ArgMinOpModel : public ArgBaseOpModel {
public:
ArgMinOpModel(std::initializer_list<int> input_shape, TensorType input_type)
: ArgBaseOpModel(input_type ), input_shape_(input_shape) {}
void Build() {
SetBuiltinOp(BuiltinOperator_ARG_MIN, BuiltinOptions_ArgMinOptions,
CreateArgMinOptions(builder_, TensorType_INT32 )
.Union());
BuildInterpreter({input_shape_, {1}});
}
private:
std::vector<int> input_shape_;
};
class ArgMaxOpModel : public ArgBaseOpModel {
public:
ArgMaxOpModel(std::initializer_list<int> input_shape, TensorType input_type)
: ArgBaseOpModel(input_type ), input_shape_(input_shape) {}
void Build() {
SetBuiltinOp(BuiltinOperator_ARG_MAX, BuiltinOptions_ArgMaxOptions,
CreateArgMaxOptions(builder_, TensorType_INT32 )
.Union());
BuildInterpreter({input_shape_, {1}});
}
private:
std::vector<int> input_shape_;
};
template <typename integer_type, TensorType tensor_dtype>
void ArgMinTestImpl() {
ArgMinOpModel model({1, 1, 1, 4}, tensor_dtype);
model.AddConstInput(TensorType_INT32, {3}, {1});
model.Build();
if (tensor_dtype == TensorType_UINT8) {
model.SymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
} else {
model.SignedSymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
}
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1}));
}
template <typename integer_type, TensorType tensor_dtype>
void ArgMinNegativeTestImpl() {
ArgMinOpModel model({1, 1, 2, 4}, tensor_dtype);
model.AddConstInput(TensorType_INT32, {-2}, {1});
model.Build();
if (tensor_dtype == TensorType_UINT8) {
model.SymmetricQuantizeAndPopulate(model.input(), {1, 2, 7, 8, 1, 9, 7, 3});
} else {
model.SignedSymmetricQuantizeAndPopulate(model.input(),
{1, 2, 7, 8, 1, 9, 7, 3});
}
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({0, 0, 0, 1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 4}));
}
template <typename integer_type, TensorType tensor_dtype>
void ArgMaxTestImpl() {
ArgMaxOpModel model({1, 1, 1, 4}, tensor_dtype);
model.AddConstInput(TensorType_INT32, {3}, {1});
model.Build();
if (tensor_dtype == TensorType_UINT8) {
model.SymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
} else {
model.SignedSymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
}
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({3}));
}
TEST(ArgMinTest, GetArgMin_UInt8) {
ArgMinTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ArgMinTest, GetArgMin_Int8) { ArgMinTestImpl<int8_t, TensorType_INT8>(); }
TEST(ArgMinTest, GetArgMinNegative_UInt8) {
ArgMinNegativeTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ArgMinTest, GetArgMinNegative_Int8) {
ArgMinNegativeTestImpl<int8_t, TensorType_INT8>();
}
TEST(ArgMaxTest, GetArgMax_UInt8) {
ArgMaxTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ArgMaxTest, GetArgMax_Int8) { ArgMaxTestImpl<int8_t, TensorType_INT8>(); }
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/arg_min_max.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/arg_min_max_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
411065d3-9fec-4494-900d-a7c9d8538492 | cpp | tensorflow/tensorflow | bitcast | tensorflow/c/kernels/ops/bitcast.cc | tensorflow/lite/kernels/bitcast_test.cc | #include <sstream>
#include <string>
#include "tensorflow/c/ops.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/core/framework/registration/registration.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
static void ComputeNewShape(TF_ShapeInferenceContext* ctx,
TF_ShapeHandle* shape, TF_DataType input_type,
TF_DataType output_type, TF_Status* status) {
size_t input_type_size = TF_DataTypeSize(input_type);
size_t output_type_size = TF_DataTypeSize(output_type);
if (input_type_size == 0 || output_type_size == 0) {
std::ostringstream err;
err << "Cannot bitcast type " << input_type << " to " << output_type
<< " because one of the type sizes is zero";
TF_SetStatus(status, TF_INVALID_ARGUMENT, err.str().c_str());
return;
}
TF_SetStatus(status, TF_OK, "");
if (input_type_size < output_type_size) {
TF_ShapeInferenceContextWithRankAtLeast(ctx, shape, 1, shape, status);
if (TF_GetCode(status) == TF_OK) {
TF_DimensionHandle* last_dim = TF_NewDimensionHandle();
size_t divisor_val = output_type_size / input_type_size;
TF_ShapeInferenceContextDim(ctx, shape, -1, last_dim);
if (!TF_DimensionHandleValueKnown(last_dim) ||
TF_DimensionHandleValue(last_dim) == divisor_val) {
TF_ShapeInferenceContextSubshape(ctx, shape, 0, -1, shape, status);
} else {
std::ostringstream err;
err << "Cannot bitcast from " << input_type << " to " << output_type
<< " due to shape. " << TF_DimensionHandleValue(last_dim)
<< " does not match " << divisor_val;
TF_SetStatus(status, TF_INVALID_ARGUMENT, err.str().c_str());
}
TF_DeleteDimensionHandle(last_dim);
}
} else if (input_type_size > output_type_size) {
size_t divisor_val = input_type_size / output_type_size;
TF_ShapeHandle* extension =
TF_ShapeInferenceContextVectorFromSize(ctx, divisor_val);
TF_ShapeInferenceContextConcatenateShapes(ctx, shape, extension, shape,
status);
TF_DeleteShapeHandle(extension);
}
}
static void bitcast_shape_inference_fn(TF_ShapeInferenceContext* ctx,
TF_Status* status) {
TF_ShapeHandle* result = TF_NewShapeHandle();
TF_ShapeInferenceContextGetInput(ctx, 0, result, status);
if (TF_GetCode(status) == TF_OK &&
!TF_ShapeInferenceContextRankKnown(ctx, result)) {
TF_ShapeInferenceContextSetUnknownShape(ctx, status);
TF_DeleteShapeHandle(result);
return;
}
TF_DataType input_type;
TF_DataType output_type;
if (TF_GetCode(status) == TF_OK) {
TF_ShapeInferenceContext_GetAttrType(ctx, "T", &input_type, status);
}
if (TF_GetCode(status) == TF_OK) {
TF_ShapeInferenceContext_GetAttrType(ctx, "type", &output_type, status);
}
if (TF_GetCode(status) == TF_OK) {
ComputeNewShape(ctx, result, input_type, output_type, status);
}
if (TF_GetCode(status) == TF_OK) {
TF_ShapeInferenceContextSetOutput(ctx, 0, result, status);
}
TF_DeleteShapeHandle(result);
}
void RegisterBitcastOp() {
TF_Status* status = TF_NewStatus();
TF_OpDefinitionBuilder* op_builder = TF_NewOpDefinitionBuilder("Bitcast");
TF_OpDefinitionBuilderAddInput(op_builder, "input: T");
TF_OpDefinitionBuilderAddOutput(op_builder, "output: type");
TF_OpDefinitionBuilderAddAttr(
op_builder,
"T: {bfloat16, half, float, double, int64, int32, uint8, uint16, "
"uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, "
"qint16, quint16, qint32}");
TF_OpDefinitionBuilderAddAttr(
op_builder,
"type: {bfloat16, half, float, double, int64, int32, uint8, uint16, "
"uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, "
"qint16, quint16, qint32}");
TF_OpDefinitionBuilderSetShapeInferenceFunction(op_builder,
&bitcast_shape_inference_fn);
TF_RegisterOpDefinition(op_builder, status);
CHECK_EQ(TF_GetCode(status), TF_OK)
<< "Bitcast op registration failed: " << TF_Message(status);
TF_DeleteStatus(status);
}
TF_ATTRIBUTE_UNUSED static bool IsBitcastOpRegistered = []() {
if ((&TF_NewStatus != nullptr) && SHOULD_REGISTER_OP("Bitcast")) {
RegisterBitcastOp();
}
return true;
}(); | #include <algorithm>
#include <cstdint>
#include <iterator>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <
typename Dest, typename Source,
typename std::enable_if<sizeof(Dest) == sizeof(Source) &&
std::is_trivially_copyable<Source>::value &&
std::is_trivially_copyable<Dest>::value &&
std::is_default_constructible<Dest>::value,
int>::type = 0>
inline Dest bit_cast(const Source& source) {
Dest dest;
memcpy(static_cast<void*>(std::addressof(dest)),
static_cast<const void*>(std::addressof(source)), sizeof(dest));
return dest;
}
class BitcastOpModel : public SingleOpModel {
public:
BitcastOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_BITCAST, BuiltinOptions_BitcastOptions,
CreateBitcastOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() const { return input_; }
int output() const { return output_; }
protected:
int input_;
int output_;
};
TEST(BitcastOpModel, BitcastInt32ToUint32) {
BitcastOpModel m({TensorType_INT32, {2, 3}}, {TensorType_UINT32, {2, 3}});
std::vector<int32_t> input = {INT32_MIN, -100, -1, 0, 100, INT32_MAX};
m.PopulateTensor<int32_t>(m.input(), input);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<uint32_t> output;
std::transform(input.cbegin(), input.cend(), std::back_inserter(output),
[](int32_t a) { return bit_cast<std::uint32_t>(a); });
EXPECT_THAT(m.ExtractVector<uint32_t>(m.output()), ElementsAreArray(output));
}
TEST(BitcastOpModel, BitcastUInt32ToInt32Inplace) {
BitcastOpModel m({TensorType_UINT32, {2, 3}}, {TensorType_INT32, {2, 3}});
std::vector<uint32_t> input = {0,
1,
100,
bit_cast<uint32_t>(INT32_MAX),
bit_cast<uint32_t>(INT32_MIN),
UINT32_MAX};
m.PopulateTensor<uint32_t>(m.input(), input);
const int kInplaceTensorIdx = 0;
const TfLiteTensor* input_tensor = m.GetInputTensor(kInplaceTensorIdx);
TfLiteTensor* output_tensor = m.GetOutputTensor(kInplaceTensorIdx);
output_tensor->data.data = input_tensor->data.data;
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int32_t> output;
std::transform(input.cbegin(), input.cend(), std::back_inserter(output),
[](uint32_t a) { return bit_cast<std::uint32_t>(a); });
EXPECT_THAT(m.ExtractVector<int32_t>(m.output()), ElementsAreArray(output));
EXPECT_EQ(output_tensor->data.data, input_tensor->data.data);
}
TEST(BitcastOpModel, BitcastUInt32ToInt32) {
BitcastOpModel m({TensorType_UINT32, {2, 3}}, {TensorType_INT32, {2, 3}});
std::vector<uint32_t> input = {0,
1,
100,
bit_cast<uint32_t>(INT32_MAX),
bit_cast<uint32_t>(INT32_MIN),
UINT32_MAX};
m.PopulateTensor<uint32_t>(m.input(), input);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int32_t> output;
std::transform(input.cbegin(), input.cend(), std::back_inserter(output),
[](uint32_t a) { return bit_cast<std::uint32_t>(a); });
EXPECT_THAT(m.ExtractVector<int32_t>(m.output()), ElementsAreArray(output));
}
TEST(BitcastOpModel, BitcastUInt32Toint16) {
BitcastOpModel m({TensorType_UINT32, {2, 1}}, {TensorType_INT16, {2, 1, 2}});
std::vector<uint32_t> input = {(uint32_t)UINT16_MAX + 1,
(uint32_t)UINT16_MAX};
m.PopulateTensor<uint32_t>(m.input(), input);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
std::vector<int16_t> output = {1, 0, 0, -1};
#else
std::vector<int16_t> output = {0, 1, -1, 0};
#endif
EXPECT_THAT(m.ExtractVector<int16_t>(m.output()), ElementsAreArray(output));
}
TEST(BitcastOpModel, BitcastInt16ToUint32) {
BitcastOpModel m({TensorType_INT16, {2, 1, 2}}, {TensorType_UINT32, {2, 1}});
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
std::vector<int16_t> input = {1, 0, 0, -1};
#else
std::vector<int16_t> input = {0, 1, -1, 0};
#endif
m.PopulateTensor<int16_t>(m.input(), input);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<uint32_t> output = {(uint32_t)UINT16_MAX + 1,
(uint32_t)UINT16_MAX};
EXPECT_THAT(m.ExtractVector<uint32_t>(m.output()), ElementsAreArray(output));
}
TEST(BitcastOpModel, BitcastInt16ToUint32WrongShape) {
#if GTEST_HAS_DEATH_TEST
EXPECT_DEATH(BitcastOpModel m({TensorType_INT16, {2, 2, 7}},
{TensorType_UINT32, {2, 7}}),
"7 != 2");
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/ops/bitcast.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bitcast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9314abd7-8c5d-42d1-a964-8ca3231083e7 | cpp | tensorflow/tensorflow | unidirectional_sequence_rnn | tensorflow/lite/kernels/unidirectional_sequence_rnn.cc | tensorflow/lite/kernels/unidirectional_sequence_rnn_test.cc | #include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace unidirectional_sequence_rnn {
namespace {
struct OpData {
int scratch_tensor_index;
bool compute_row_sums = false;
};
}
constexpr int kInputTensor = 0;
constexpr int kWeightsTensor = 1;
constexpr int kRecurrentWeightsTensor = 2;
constexpr int kBiasTensor = 3;
constexpr int kHiddenStateTensor = 4;
constexpr int kOutputTensor = 0;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, 6,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, node->inputs->size, 5);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* input_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTensor, &input_weights));
const TfLiteTensor* recurrent_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kRecurrentWeightsTensor, &recurrent_weights));
const TfLiteTensor* bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBiasTensor, &bias));
const TfLiteTensor* hidden_state;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kHiddenStateTensor, &hidden_state));
auto* params = reinterpret_cast<TfLiteSequenceRNNParams*>(node->builtin_data);
const bool time_major = params->time_major;
const int batch_size =
(time_major) ? input->dims->data[1] : input->dims->data[0];
const int max_time =
(time_major) ? input->dims->data[0] : input->dims->data[1];
const int num_units = input_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, input->dims->data[2],
input_weights->dims->data[1]);
TF_LITE_ENSURE_EQ(context, input_weights->dims->data[0], bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, recurrent_weights->dims->data[0],
bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, recurrent_weights->dims->data[1],
bias->dims->data[0]);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input_weights->type,
recurrent_weights->type);
TF_LITE_ENSURE_EQ(context, NumDimensions(hidden_state), 2);
TF_LITE_ENSURE_EQ(context, hidden_state->dims->data[0], batch_size);
TF_LITE_ENSURE_EQ(context, hidden_state->dims->data[1], num_units);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_size_array = TfLiteIntArrayCreate(3);
output_size_array->data[0] = (time_major) ? max_time : batch_size;
output_size_array->data[1] = (time_major) ? batch_size : max_time;
output_size_array->data[2] = num_units;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
const bool is_hybrid = IsHybridOp(input, input_weights);
if (is_hybrid) {
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
op_data->compute_row_sums = true;
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(6);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 0,
&input_quantized));
input_quantized->type = input_weights->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[1] = op_data->scratch_tensor_index + 1;
TfLiteTensor* hidden_state_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 1,
&hidden_state_quantized));
hidden_state_quantized->type = input_weights->type;
hidden_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(hidden_state_quantized->dims,
hidden_state->dims)) {
TfLiteIntArray* hidden_state_quantized_size =
TfLiteIntArrayCopy(hidden_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, hidden_state_quantized,
hidden_state_quantized_size));
}
node->temporaries->data[2] = op_data->scratch_tensor_index + 2;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[3] = op_data->scratch_tensor_index + 3;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 3, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {num_units, batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_scratch_size = TfLiteIntArrayCreate(2);
accum_scratch_size->data[0] = accum_scratch_dims[0];
accum_scratch_size->data[1] = accum_scratch_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, accum_scratch,
accum_scratch_size));
}
node->temporaries->data[4] = op_data->scratch_tensor_index + 4;
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &zero_points));
zero_points->type = kTfLiteInt32;
zero_points->allocation_type = kTfLiteArenaRw;
int zero_points_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) {
TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1);
zero_points_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points,
zero_points_size));
}
node->temporaries->data[5] = op_data->scratch_tensor_index + 5;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 5, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[2] = {2, num_units};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(2);
row_sums_size->data[0] = row_sums_dims[0];
row_sums_size->data[1] = row_sums_dims[1];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
return kTfLiteOk;
}
TfLiteStatus EvalFloat(const TfLiteTensor* input,
const TfLiteTensor* input_weights,
const TfLiteTensor* recurrent_weights,
const TfLiteTensor* bias,
const TfLiteSequenceRNNParams* params,
TfLiteTensor* hidden_state, TfLiteTensor* output) {
const float* bias_ptr = GetTensorData<float>(bias);
const bool time_major = params->time_major;
const int batch_size =
(time_major) ? input->dims->data[1] : input->dims->data[0];
const int max_time =
(time_major) ? input->dims->data[0] : input->dims->data[1];
const int num_units = input_weights->dims->data[0];
const int input_size = input->dims->data[2];
const float* input_weights_ptr = GetTensorData<float>(input_weights);
const float* recurrent_weights_ptr = GetTensorData<float>(recurrent_weights);
if (time_major) {
float* hidden_state_ptr_batch = GetTensorData<float>(hidden_state);
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch =
GetTensorData<float>(input) + s * input_size * batch_size;
float* output_ptr_batch =
GetTensorData<float>(output) + s * num_units * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, input_weights_ptr, recurrent_weights_ptr, bias_ptr,
input_size, num_units, batch_size, num_units, params->activation,
hidden_state_ptr_batch, output_ptr_batch);
}
} else {
for (int b = 0; b < batch_size; b++) {
float* hidden_state_ptr_batch =
GetTensorData<float>(hidden_state) + b * num_units;
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
float* output_ptr_batch = GetTensorData<float>(output) +
b * num_units * max_time + s * num_units;
kernel_utils::RnnBatchStep(
input_ptr_batch, input_weights_ptr, recurrent_weights_ptr, bias_ptr,
input_size, num_units, 1, num_units,
params->activation, hidden_state_ptr_batch, output_ptr_batch);
}
}
}
return kTfLiteOk;
}
TfLiteStatus EvalHybrid(
const TfLiteTensor* input, const TfLiteTensor* input_weights,
const TfLiteTensor* recurrent_weights, const TfLiteTensor* bias,
const TfLiteSequenceRNNParams* params, TfLiteTensor* input_scratch,
TfLiteTensor* hidden_state_scratch, TfLiteTensor* scaling_factors,
TfLiteTensor* hidden_state, TfLiteTensor* output, TfLiteTensor* zero_points,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
bool* compute_row_sums) {
const bool time_major = params->time_major;
const int batch_size =
(time_major) ? input->dims->data[1] : input->dims->data[0];
const int max_time =
(time_major) ? input->dims->data[0] : input->dims->data[1];
const int num_units = input_weights->dims->data[0];
const int input_size = input->dims->data[2];
const float* bias_ptr = GetTensorData<float>(bias);
const int8_t* input_weights_ptr = GetTensorData<int8_t>(input_weights);
const int8_t* recurrent_weights_ptr =
GetTensorData<int8_t>(recurrent_weights);
int8_t* quantized_input_ptr = GetTensorData<int8_t>(input_scratch);
int8_t* quantized_hidden_state_ptr =
GetTensorData<int8_t>(hidden_state_scratch);
float input_weights_scale = input_weights->params.scale;
float recurrent_weights_scale = recurrent_weights->params.scale;
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* accum_scratch_ptr = GetTensorData<int32_t>(accum_scratch);
int32_t* zero_points_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
zero_points_ptr = GetTensorData<int32_t>(zero_points);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
if (time_major) {
float* hidden_state_ptr_batch = GetTensorData<float>(hidden_state);
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch =
GetTensorData<float>(input) + s * input_size * batch_size;
float* output_ptr_batch =
GetTensorData<float>(output) + s * num_units * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, input_weights_ptr, input_weights_scale,
recurrent_weights_ptr, recurrent_weights_scale, bias_ptr, input_size,
num_units, batch_size, num_units, params->activation,
quantized_input_ptr, quantized_hidden_state_ptr, scaling_factors_ptr,
hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, row_sums_ptr, compute_row_sums);
}
} else {
for (int b = 0; b < batch_size; b++) {
float* hidden_state_ptr_batch =
GetTensorData<float>(hidden_state) + b * num_units;
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
float* output_ptr_batch = GetTensorData<float>(output) +
b * num_units * max_time + s * num_units;
kernel_utils::RnnBatchStep(
input_ptr_batch, input_weights_ptr, input_weights_scale,
recurrent_weights_ptr, recurrent_weights_scale, bias_ptr,
input_size, num_units, 1, num_units,
params->activation, quantized_input_ptr, quantized_hidden_state_ptr,
scaling_factors_ptr, hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, row_sums_ptr, compute_row_sums);
}
}
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSequenceRNNParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* input_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTensor, &input_weights));
const TfLiteTensor* recurrent_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kRecurrentWeightsTensor, &recurrent_weights));
const TfLiteTensor* bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBiasTensor, &bias));
TfLiteTensor* hidden_state =
GetVariableInput(context, node, kHiddenStateTensor);
TF_LITE_ENSURE(context, hidden_state != nullptr);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input_weights->type) {
case kTfLiteFloat32:
return EvalFloat(input, input_weights, recurrent_weights, bias, params,
hidden_state, output);
case kTfLiteUInt8:
case kTfLiteInt8: {
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &input_quantized));
TfLiteTensor* hidden_state_quantized;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &hidden_state_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 2, &scaling_factors));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 3, &accum_scratch));
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 4, &zero_points));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 5, &row_sums));
return EvalHybrid(input, input_weights, recurrent_weights, bias, params,
input_quantized, hidden_state_quantized,
scaling_factors, hidden_state, output, zero_points,
accum_scratch, row_sums, &op_data->compute_row_sums);
}
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input_weights->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_RNN() {
static TfLiteRegistration r = {
unidirectional_sequence_rnn::Init, unidirectional_sequence_rnn::Free,
unidirectional_sequence_rnn::Prepare, unidirectional_sequence_rnn::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
static float rnn_input[] = {
0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133,
0.43773448, 0.60379338, 0.35562468, -0.69424844, -0.93421471,
-0.87287879, 0.37144363, -0.62476718, 0.23791671, 0.40060222,
0.1356622, -0.99774903, -0.98858172, -0.38952237, -0.47685933,
0.31073618, 0.71511042, -0.63767755, -0.31729108, 0.33468103,
0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
-0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007,
-0.61777675, -0.21095741, 0.41213346, 0.73784804, 0.094794154,
0.47791874, 0.86496925, -0.53376222, 0.85315156, 0.10288584,
0.86684, -0.011186242, 0.10513687, 0.87825835, 0.59929144,
0.62827742, 0.18899453, 0.31440187, 0.99059987, 0.87170351,
-0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567,
-0.66609079, 0.59098077, 0.73017097, 0.74604273, 0.32882881,
-0.17503482, 0.22396147, 0.19379807, 0.29120302, 0.077113032,
-0.70331609, 0.15804303, -0.93407321, 0.40182066, 0.036301374,
0.66521823, 0.0300982, -0.7747041, -0.02038002, 0.020698071,
-0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
-0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682,
0.43519354, 0.14744234, 0.62589407, 0.1653645, -0.10651493,
-0.045277178, 0.99032974, -0.88255352, -0.85147917, 0.28153265,
0.19455957, -0.55479527, -0.56042433, 0.26048636, 0.84702539,
0.47587705, -0.074295521, -0.12287641, 0.70117295, 0.90532446,
0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
-0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563,
0.93455386, -0.6324693, -0.083922029};
static float rnn_golden_output[] = {
0.496726, 0, 0.965996, 0, 0.0584254, 0,
0, 0.12315, 0, 0, 0.612266, 0.456601,
0, 0.52286, 1.16099, 0.0291232,
0, 0, 0.524901, 0, 0, 0,
0, 1.02116, 0, 1.35762, 0, 0.356909,
0.436415, 0.0355727, 0, 0,
0, 0, 0, 0.262335, 0, 0,
0, 1.33992, 0, 2.9739, 0, 0,
1.31914, 2.66147, 0, 0,
0.942568, 0, 0, 0, 0.025507, 0,
0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
0.8158, 1.21805, 0.586239, 0.25427,
1.04436, 0, 0.630725, 0, 0.133801, 0.210693,
0.363026, 0, 0.533426, 0, 1.25926, 0.722707,
0, 1.22031, 1.30117, 0.495867,
0.222187, 0, 0.72725, 0, 0.767003, 0,
0, 0.147835, 0, 0, 0, 0.608758,
0.469394, 0.00720298, 0.927537, 0,
0.856974, 0.424257, 0, 0, 0.937329, 0,
0, 0, 0.476425, 0, 0.566017, 0.418462,
0.141911, 0.996214, 1.13063, 0,
0.967899, 0, 0, 0, 0.0831304, 0,
0, 1.00378, 0, 0, 0, 1.44818,
1.01768, 0.943891, 0.502745, 0,
0.940135, 0, 0, 0, 0, 0,
0, 2.13243, 0, 0.71208, 0.123918, 1.53907,
1.30225, 1.59644, 0.70222, 0,
0.804329, 0, 0.430576, 0, 0.505872, 0.509603,
0.343448, 0, 0.107756, 0.614544, 1.44549, 1.52311,
0.0454298, 0.300267, 0.562784, 0.395095,
0.228154, 0, 0.675323, 0, 1.70536, 0.766217,
0, 0, 0, 0.735363, 0.0759267, 1.91017,
0.941888, 0, 0, 0,
0, 0, 1.5909, 0, 0, 0,
0, 0.5755, 0, 0.184687, 0, 1.56296,
0.625285, 0, 0, 0,
0, 0, 0.0857888, 0, 0, 0,
0, 0.488383, 0.252786, 0, 0, 0,
1.02817, 1.85665, 0, 0,
0.00981836, 0, 1.06371, 0, 0, 0,
0, 0, 0, 0.290445, 0.316406, 0,
0.304161, 1.25079, 0.0707152, 0,
0.986264, 0.309201, 0, 0, 0, 0,
0, 1.64896, 0.346248, 0, 0.918175, 0.78884,
0.524981, 1.92076, 2.07013, 0.333244,
0.415153, 0.210318, 0, 0, 0, 0,
0, 2.02616, 0, 0.728256, 0.84183, 0.0907453,
0.628881, 3.58099, 1.49974, 0};
static std::initializer_list<float> rnn_weights = {
0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346,
0.317493, 0.969689, -0.343251, 0.186423, 0.398151, 0.152399,
0.448504, 0.317662, 0.523556, -0.323514, 0.480877, 0.333113,
-0.757714, -0.674487, -0.643585, 0.217766, -0.0251462, 0.79512,
-0.595574, -0.422444, 0.371572, -0.452178, -0.556069, -0.482188,
-0.685456, -0.727851, 0.841829, 0.551535, -0.232336, 0.729158,
-0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183,
0.306261, -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303,
0.0354295, 0.566564, -0.485469, -0.620498, 0.832546, 0.697884,
-0.279115, 0.294415, -0.584313, 0.548772, 0.0648819, 0.968726,
0.723834, -0.0080452, -0.350386, -0.272803, 0.115121, -0.412644,
-0.824713, -0.992843, -0.592904, -0.417893, 0.863791, -0.423461,
-0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042,
0.0960841, 0.368357, 0.244191, -0.817703, -0.211223, 0.442012,
0.37225, -0.623598, -0.405423, 0.455101, 0.673656, -0.145345,
-0.511346, -0.901675, -0.81252, -0.127006, 0.809865, -0.721884,
0.636255, 0.868989, -0.347973, -0.10179, -0.777449, 0.917274,
0.819286, 0.206218, -0.00785118, 0.167141, 0.45872, 0.972934,
-0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
0.277308, 0.415818};
static std::initializer_list<float> rnn_recurrent_weights = {
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1};
static std::initializer_list<float> rnn_bias = {
0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
-0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
0.37197268, 0.61957061, 0.3956964, -0.37609905};
class UnidirectionalRNNOpModel : public SingleOpModel {
public:
UnidirectionalRNNOpModel(
int batches, int sequence_len, int units, int size, bool time_major,
const TensorType& weights = TensorType_FLOAT32,
const TensorType& recurrent_weights = TensorType_FLOAT32,
bool asymmetric_quantize_inputs = false)
: batches_(batches),
sequence_len_(sequence_len),
units_(units),
input_size_(size) {
input_ = AddInput(TensorType_FLOAT32);
weights_ = AddInput(weights);
recurrent_weights_ = AddInput(recurrent_weights);
bias_ = AddInput(TensorType_FLOAT32);
hidden_state_ = AddVariableInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
BuiltinOptions_SequenceRNNOptions,
CreateSequenceRNNOptions(builder_, time_major,
ActivationFunctionType_RELU,
asymmetric_quantize_inputs)
.Union());
if (time_major) {
BuildInterpreter({{sequence_len_, batches_, input_size_},
{units_, input_size_},
{units_, units_},
{units_},
{batches_, units}});
} else {
BuildInterpreter({{batches_, sequence_len_, input_size_},
{units_, input_size_},
{units_, units_},
{units_},
{batches_, units_}});
}
}
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetWeights(std::initializer_list<float> f) {
PopulateTensor(weights_, f);
}
void SetRecurrentWeights(std::initializer_list<float> f) {
PopulateTensor(recurrent_weights_, f);
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
int sequence_len() { return sequence_len_; }
protected:
int input_;
int weights_;
int recurrent_weights_;
int bias_;
int hidden_state_;
int output_;
int batches_;
int sequence_len_;
int units_;
int input_size_;
};
class HybridUnidirectionalRNNOpModel : public UnidirectionalRNNOpModel {
public:
HybridUnidirectionalRNNOpModel(int batches, int sequence_len, int units,
int size, bool time_major,
TensorType tensor_type,
bool asymmetric_quantize_inputs)
: UnidirectionalRNNOpModel(batches, sequence_len, units, size, time_major,
tensor_type, tensor_type,
asymmetric_quantize_inputs) {
tensor_type_ = tensor_type;
}
void SetWeights(int weights_idx, const std::vector<float>& f) {
if (tensor_type_ == TensorType_UINT8) {
SymmetricQuantizeAndPopulate(weights_idx, f);
} else {
SignedSymmetricQuantizeAndPopulate(weights_idx, f);
}
}
void SetWeights(std::initializer_list<float> f) { SetWeights(weights_, f); }
void SetRecurrentWeights(std::initializer_list<float> f) {
SetWeights(recurrent_weights_, f);
}
protected:
TensorType tensor_type_;
};
TEST(UnidirectionalRNNOpTest, BlackBoxTest) {
UnidirectionalRNNOpModel rnn(2, 16,
16, 8, false);
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
float* batch_start = rnn_input;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(input_sequence_size, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output;
float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
}
class HybridUnidirectionalRNNOpModelOpTest
: public ::testing::TestWithParam<bool> {};
TEST_P(HybridUnidirectionalRNNOpModelOpTest, BlackBoxTestUint8) {
HybridUnidirectionalRNNOpModel rnn(2, 16,
16, 8,
false, TensorType_UINT8,
GetParam());
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
float* batch_start = rnn_input;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(input_sequence_size, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output;
float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(
expected, 0.013)));
}
TEST_P(HybridUnidirectionalRNNOpModelOpTest, BlackBoxTestInt8) {
HybridUnidirectionalRNNOpModel rnn(2, 16,
16, 8,
false, TensorType_INT8,
GetParam());
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
float* batch_start = rnn_input;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(input_sequence_size, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output;
float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(
expected, 0.013)));
}
TEST(UnidirectionalRNNOpTest, TimeMajorBlackBoxTest) {
UnidirectionalRNNOpModel rnn(2, 16,
16, 8,
true);
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_batch_start = rnn_golden_output + i * rnn.num_units();
float* golden_batch_end = golden_batch_start + rnn.num_units();
expected.insert(expected.end(), golden_batch_start, golden_batch_end);
expected.insert(expected.end(), golden_batch_start, golden_batch_end);
}
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
}
TEST_P(HybridUnidirectionalRNNOpModelOpTest, TimeMajorBlackBoxTestUint8) {
HybridUnidirectionalRNNOpModel rnn(2, 16,
16, 8,
true, TensorType_UINT8,
GetParam());
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_batch_start = rnn_golden_output + i * rnn.num_units();
float* golden_batch_end = golden_batch_start + rnn.num_units();
expected.insert(expected.end(), golden_batch_start, golden_batch_end);
expected.insert(expected.end(), golden_batch_start, golden_batch_end);
}
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(
expected, 0.013)));
}
TEST_P(HybridUnidirectionalRNNOpModelOpTest, TimeMajorBlackBoxTestInt8) {
HybridUnidirectionalRNNOpModel rnn(2, 16,
16, 8,
true, TensorType_INT8,
GetParam());
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_batch_start = rnn_golden_output + i * rnn.num_units();
float* golden_batch_end = golden_batch_start + rnn.num_units();
expected.insert(expected.end(), golden_batch_start, golden_batch_end);
expected.insert(expected.end(), golden_batch_start, golden_batch_end);
}
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(
expected, 0.013)));
}
INSTANTIATE_TEST_SUITE_P(HybridUnidirectionalRNNOpModelOpTest,
HybridUnidirectionalRNNOpModelOpTest,
::testing::ValuesIn({true, false}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_rnn.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_rnn_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3394c0a7-db8c-4a2f-8b1d-18e30532a010 | cpp | tensorflow/tensorflow | sparse_to_dense | tensorflow/lite/kernels/sparse_to_dense.cc | tensorflow/lite/kernels/sparse_to_dense_test.cc | #include <stdint.h>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace sparse_to_dense {
constexpr int kIndicesTensor = 0;
constexpr int kOutputShapeTensor = 1;
constexpr int kValueInputTensor = 2;
constexpr int kDefaultValueTensor = 3;
constexpr int kOutputTensor = 0;
constexpr int kMaxDimensions = 4;
template <typename T>
TfLiteStatus Resize(TfLiteContext* context, const TfLiteTensor* output_shape,
TfLiteTensor* output) {
const int output_dimensions = NumElements(output_shape);
TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(output_dimensions);
for (int i = 0; i < output_dimensions; ++i) {
output_shape_array->data[i] = GetTensorData<T>(output_shape)[i];
}
return context->ResizeTensor(context, output, output_shape_array);
}
TfLiteStatus CheckDimensionsMatch(TfLiteContext* context,
const TfLiteTensor* indices,
const TfLiteTensor* output_shape,
const TfLiteTensor* values) {
switch (NumDimensions(indices)) {
case 0:
case 1: {
if (NumDimensions(values) == 0) {
TF_LITE_ENSURE_EQ(context, NumElements(indices), NumElements(values));
}
TF_LITE_ENSURE_EQ(context, NumElements(output_shape), 1);
break;
}
case 2: {
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 1),
NumElements(output_shape));
if (NumDimensions(values) == 0)
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0),
NumElements(values));
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"Wrong indices dimensions %d, should be less than 3.",
NumDimensions(indices));
return kTfLiteError;
}
return kTfLiteOk;
}
template <typename T>
TfLiteStatus GetIndicesVector(TfLiteContext* context,
const TfLiteTensor* indices,
const int num_indices,
std::vector<std::vector<T>>* indices_vector) {
switch (NumDimensions(indices)) {
case 0:
case 1: {
const auto indices_data = GetTensorData<T>(indices);
for (int i = 0; i < num_indices; ++i) {
std::vector<T> index({0, 0, 0, indices_data[i]});
indices_vector->push_back(index);
}
break;
}
case 2: {
const int true_dimensions = SizeOfDimension(indices, 1);
TF_LITE_ENSURE(context, true_dimensions <= kMaxDimensions);
for (int i = 0; i < num_indices; ++i) {
std::vector<T> index;
index.reserve(kMaxDimensions);
for (int j = 0; j < kMaxDimensions - true_dimensions; ++j) {
index.push_back(0);
}
for (int j = 0; j < true_dimensions; ++j) {
index.push_back(GetTensorData<T>(indices)[i * true_dimensions + j]);
}
indices_vector->push_back(index);
}
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"Indices dimensions problem, got %d dimensions",
NumDimensions(indices));
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus ResizeOutputShape(TfLiteContext* context,
const TfLiteTensor* output_shape,
TfLiteTensor* output) {
if (output_shape->type == kTfLiteInt32) {
return Resize<int32_t>(context, output_shape, output);
} else if (output_shape->type == kTfLiteInt64) {
return Resize<int64_t>(context, output_shape, output);
} else {
TF_LITE_KERNEL_LOG(context, "Dense shape type %d not supported.",
output_shape->type);
return kTfLiteError;
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 4);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kIndicesTensor, &indices));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kOutputShapeTensor, &output_shape));
const TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueInputTensor, &values));
const TfLiteTensor* default_value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDefaultValueTensor,
&default_value));
TF_LITE_ASSERT(NumDimensions(indices) >= 0);
TF_LITE_ENSURE(context, NumDimensions(indices) < 3);
TF_LITE_ASSERT(NumDimensions(output_shape) >= 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(output_shape), 1);
TF_LITE_ASSERT(NumDimensions(values) >= 0);
TF_LITE_ENSURE(context, NumDimensions(values) < 2);
TF_LITE_ENSURE_EQ(context, NumElements(default_value), 1);
TF_LITE_ENSURE(
context, indices->type == kTfLiteInt32 || indices->type == kTfLiteInt64);
TF_LITE_ENSURE(context, output_shape->type == kTfLiteInt32 ||
output_shape->type == kTfLiteInt64);
TF_LITE_ENSURE(context, values->type == kTfLiteInt32 ||
values->type == kTfLiteInt64 ||
values->type == kTfLiteInt8 ||
values->type == kTfLiteUInt8 ||
values->type == kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, values->type, default_value->type);
TF_LITE_ENSURE_OK(
context, CheckDimensionsMatch(context, indices, output_shape, values));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = values->type;
TF_LITE_ENSURE_EQ(context, NumDimensions(output_shape), 1);
if (!IsConstantOrPersistentTensor(output_shape)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputShape(context, output_shape, output);
}
template <typename T, typename TI>
TfLiteStatus SparseToDenseImpl(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kIndicesTensor, &indices));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kOutputShapeTensor, &output_shape));
const TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueInputTensor, &values));
const TfLiteTensor* default_value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDefaultValueTensor,
&default_value));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputShape(context, output_shape, output));
}
const int num_indices = SizeOfDimension(indices, 0);
const bool value_is_scalar = NumDimensions(values) == 0;
std::vector<std::vector<TI>> indices_vector;
indices_vector.reserve(num_indices);
TF_LITE_ENSURE_OK(context, GetIndicesVector<TI>(context, indices, num_indices,
&indices_vector));
reference_ops::SparseToDense(indices_vector, GetTensorData<T>(values),
*GetTensorData<T>(default_value),
value_is_scalar, GetTensorShape(output),
GetTensorData<T>(output));
return kTfLiteOk;
}
template <typename T>
TfLiteStatus EvalForIndexType(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* indices) {
switch (indices->type) {
case kTfLiteInt32: {
return SparseToDenseImpl<T, int32_t>(context, node);
}
case kTfLiteInt64: {
return SparseToDenseImpl<T, int64_t>(context, node);
}
default:
TF_LITE_KERNEL_LOG(
context,
"Indice type %s is currently not supported by sparse to dense.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kIndicesTensor, &indices));
const TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueInputTensor, &values));
switch (values->type) {
case kTfLiteFloat32:
return EvalForIndexType<float>(context, node, indices);
case kTfLiteInt32:
return EvalForIndexType<int32_t>(context, node, indices);
case kTfLiteInt64:
return EvalForIndexType<int64_t>(context, node, indices);
case kTfLiteInt8:
return EvalForIndexType<int8_t>(context, node, indices);
case kTfLiteUInt8:
return EvalForIndexType<uint8_t>(context, node, indices);
default:
TF_LITE_KERNEL_LOG(
context,
"Value type %s is currently not supported by sparse to dense.",
TfLiteTypeGetName(values->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_SPARSE_TO_DENSE() {
static TfLiteRegistration r = {nullptr, nullptr, sparse_to_dense::Prepare,
sparse_to_dense::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
enum class TestType {
kPersistentRo = 0,
kConstant = 1,
kDynamic = 2,
};
template <typename T>
class SparseToDenseOpModel : public SingleOpModel {
public:
SparseToDenseOpModel(std::initializer_list<int> indices_shape,
std::initializer_list<int> output_shape_shape,
std::initializer_list<int> values_shape, T default_value,
TensorType tensor_index_type,
TensorType tensor_input_type,
std::initializer_list<int> output_shape_data,
TestType test_type)
: test_type_(test_type) {
indices_ = AddInput(tensor_index_type);
output_shape_ = test_type == TestType::kConstant
? AddConstInput(TensorType_INT32, output_shape_data,
output_shape_shape)
: AddInput(TensorType_INT32);
values_ = AddInput(tensor_input_type);
default_value_ = AddInput(tensor_input_type);
output_ = AddOutput(tensor_input_type);
SetBuiltinOp(BuiltinOperator_SPARSE_TO_DENSE,
BuiltinOptions_SparseToDenseOptions,
CreateSparseToDenseOptions(builder_, false).Union());
BuildInterpreter({indices_shape, output_shape_shape, values_shape, {1}},
-1,
false,
true, false);
if (test_type == TestType::kPersistentRo) {
interpreter_->tensor(output_shape_)->allocation_type =
kTfLitePersistentRo;
interpreter_->ResizeInputTensorStrict(output_shape_, output_shape_shape);
PopulateTensor<int32_t>(output_shape_, output_shape_data);
}
AllocateAndDelegate(true);
PopulateTensor<T>(default_value_, {default_value});
}
int indices() { return indices_; }
int output_shape() { return output_shape_; }
int values() { return values_; }
bool IsDynamicOutput() {
const TfLiteTensor* tensor = interpreter_->tensor(output_);
return tensor->allocation_type == kTfLiteDynamic;
}
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int indices_;
int output_shape_;
int values_;
int default_value_;
int output_;
TestType test_type_;
};
class SparseToDenseOpModelTest : public ::testing::TestWithParam<TestType> {};
TEST_P(SparseToDenseOpModelTest, ZeroDimensionTest) {
SparseToDenseOpModel<float> m({1}, {1}, {1}, 0, TensorType_INT32,
TensorType_FLOAT32, {5}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {3});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {5});
}
m.PopulateTensor<float>(m.values(), {7});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 7, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({5}));
}
TEST_P(SparseToDenseOpModelTest, OneDimensionTest) {
SparseToDenseOpModel<float> m({3}, {1}, {3}, 0, TensorType_INT32,
TensorType_FLOAT32, {7}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {1, 3, 5});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {7});
}
m.PopulateTensor<float>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 2, 0, 4, 0, 6, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({7}));
}
TEST_P(SparseToDenseOpModelTest, TwoDimensionsTest) {
SparseToDenseOpModel<float> m({3, 3}, {3}, {3}, 0, TensorType_INT32,
TensorType_FLOAT32, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<float>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 4, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, Int64IndexTest) {
SparseToDenseOpModel<float> m({3, 3}, {3}, {3}, -1, TensorType_INT64,
TensorType_FLOAT32, {3, 3, 3}, GetParam());
m.PopulateTensor<int64_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<float>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, DefaultValueTest) {
SparseToDenseOpModel<float> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
TensorType_FLOAT32, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<float>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, Int32ValueTest) {
SparseToDenseOpModel<int32_t> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
TensorType_INT32, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<int32_t>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, Int64ValueTest) {
SparseToDenseOpModel<int64_t> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
TensorType_INT64, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<int64_t>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, Int8ValueTest) {
SparseToDenseOpModel<int8_t> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
TensorType_INT8, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<int8_t>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, UInt8ValueTest) {
SparseToDenseOpModel<uint8_t> m({3, 3}, {3}, {3}, 1, TensorType_INT32,
TensorType_UINT8, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<uint8_t>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 4, 1, 1, 6, 1, 1, 1, 1, 1, 1, 1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
INSTANTIATE_TEST_SUITE_P(SparseToDenseOpModelTest, SparseToDenseOpModelTest,
::testing::Values(TestType::kPersistentRo,
TestType::kConstant,
TestType::kDynamic));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/sparse_to_dense.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/sparse_to_dense_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
46e44543-59fa-42da-9f9d-9bb7acad2bdd | cpp | tensorflow/tensorflow | one_hot | tensorflow/lite/delegates/gpu/common/tasks/one_hot.cc | tensorflow/lite/delegates/gpu/cl/kernels/one_hot_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/one_hot.h"
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
namespace tflite {
namespace gpu {
std::string GetOneHotCode(const OperationDef& op_def,
const OneHotAttributes& attr, GPUOperation* op) {
op->AddSrcTensor("src_tensor", op_def.src_tensors[0]);
op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += " args.src_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int Z = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"Z >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " int idx = Z * 4;\n";
c += " int hot_idx = args.src_tensor.Read(0, 0, 0).x;\n";
c += " FLT4 res = INIT_FLT4(args.off_value);\n";
c += " if ((hot_idx >= idx) && (hot_idx < (idx + 4))) {\n";
c += " res.x = (idx + 0) == hot_idx ? args.on_value : args.off_value;\n";
c += " res.y = (idx + 1) == hot_idx ? args.on_value : args.off_value;\n";
c += " res.z = (idx + 2) == hot_idx ? args.on_value : args.off_value;\n";
c += " res.w = (idx + 3) == hot_idx ? args.on_value : args.off_value;\n";
c += " }\n";
c += " args.dst_tensor.Write(res, X, Y, Z);\n";
c += "}\n";
return c;
}
GPUOperation CreateOneHot(const OperationDef& definition,
const OneHotAttributes& attr) {
GPUOperation op(definition);
op.code_ = GetOneHotCode(definition, attr, &op);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
if (definition.precision == CalculationsPrecision::F32) {
op.args_.AddFloat("on_value", attr.on_value);
op.args_.AddFloat("off_value", attr.off_value);
} else {
op.args_.AddHalf("on_value", half(attr.on_value));
op.args_.AddHalf("off_value", half(attr.off_value));
}
return op;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/one_hot_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, OneHot) {
auto status = OneHotTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, OneHotBatch) {
auto status = OneHotBatchTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/one_hot.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/one_hot_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8e9734e8-2cbe-4ac0-bf10-2ea388d9cbf8 | cpp | tensorflow/tensorflow | gather_nd | tensorflow/lite/kernels/gather_nd.cc | tensorflow/lite/kernels/gather_nd_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace gather_nd {
constexpr int kParams = 0;
constexpr int kIndices = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* params;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, ¶ms));
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (params->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt16:
case kTfLiteInt64:
case kTfLiteInt32:
case kTfLiteString:
case kTfLiteBool:
break;
default:
TF_LITE_KERNEL_LOG(context,
"Params of type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(params->type));
return kTfLiteError;
}
switch (indices->type) {
case kTfLiteInt64:
case kTfLiteInt32:
case kTfLiteInt16:
break;
default:
TF_LITE_KERNEL_LOG(context,
"Indices of type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
const int params_rank = NumDimensions(params);
const int indices_rank = NumDimensions(indices);
const int indices_nd = SizeOfDimension(indices, indices_rank - 1);
if (params_rank < 1) {
TF_LITE_KERNEL_LOG(context, "Params must be at least a vector.");
return kTfLiteError;
}
if (indices_rank < 1) {
TF_LITE_KERNEL_LOG(context, "Indices must be at least a vector.");
return kTfLiteError;
}
if (indices_nd > params_rank) {
TF_LITE_KERNEL_LOG(
context, "Index innermost dimension length must be <= params rank.");
return kTfLiteError;
}
output->type = params->type;
const int output_rank = indices_rank + params_rank - indices_nd - 1;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);
int output_index = 0;
for (int i = 0; i < indices_rank - 1; ++i) {
output_shape->data[output_index++] = indices->dims->data[i];
}
for (int i = indices_nd; i < params_rank; ++i) {
output_shape->data[output_index++] = params->dims->data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
template <typename ParamsT, typename IndicesT>
TfLiteStatus GatherNd(const TfLiteTensor* params, const TfLiteTensor* indices,
TfLiteTensor* output) {
return reference_ops::GatherNd(
GetTensorShape(params), GetTensorData<ParamsT>(params),
GetTensorShape(indices), GetTensorData<IndicesT>(indices),
GetTensorShape(output), GetTensorData<ParamsT>(output));
}
template <typename IndicesT>
TfLiteStatus GatherNdString(const TfLiteTensor* params,
const TfLiteTensor* indices, TfLiteTensor* output) {
return reference_ops::GatherNdString(
GetTensorShape(params), params, GetTensorShape(indices),
GetTensorData<IndicesT>(indices), GetTensorShape(output), output);
}
template <typename IndicesT>
TfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params,
const TfLiteTensor* indices, TfLiteTensor* output) {
bool indices_has_only_positive_elements = true;
const auto* indices_values = GetTensorData<IndicesT>(indices);
const size_t num_indices = indices->bytes / sizeof(IndicesT);
for (size_t i = 0; i < num_indices; i++) {
if (indices_values[i] < 0) {
indices_has_only_positive_elements = false;
break;
}
}
TF_LITE_ENSURE(context, indices_has_only_positive_elements);
TfLiteStatus status = kTfLiteError;
switch (params->type) {
case kTfLiteFloat32:
status = GatherNd<float, IndicesT>(params, indices, output);
break;
case kTfLiteUInt8:
status = GatherNd<uint8_t, IndicesT>(params, indices, output);
break;
case kTfLiteInt8:
status = GatherNd<int8_t, IndicesT>(params, indices, output);
break;
case kTfLiteInt16:
status = GatherNd<int16_t, IndicesT>(params, indices, output);
break;
case kTfLiteInt32:
status = GatherNd<int32_t, IndicesT>(params, indices, output);
break;
case kTfLiteInt64:
status = GatherNd<int64_t, IndicesT>(params, indices, output);
break;
case kTfLiteString:
status = GatherNdString<IndicesT>(params, indices, output);
break;
case kTfLiteBool:
status = GatherNd<bool, IndicesT>(params, indices, output);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Params type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(params->type));
return kTfLiteError;
}
if (status != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, "gather_nd index out of bounds");
}
return status;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* params;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, ¶ms));
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context,
(NumElements(params) == 0 && NumElements(indices) == 0) ||
NumElements(params) > 0);
switch (indices->type) {
case kTfLiteInt16:
return EvalGatherNd<int16_t>(context, params, indices, output);
case kTfLiteInt32:
return EvalGatherNd<int32_t>(context, params, indices, output);
case kTfLiteInt64:
return EvalGatherNd<int64_t>(context, params, indices, output);
default:
TF_LITE_KERNEL_LOG(context,
"Indices of type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_GATHER_ND() {
static TfLiteRegistration r = { nullptr, nullptr,
gather_nd::Prepare, gather_nd::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class GatherNdOpModel : public SingleOpModel {
public:
GatherNdOpModel(const TensorData& params, const TensorData& indices) {
params_ = AddInput(params);
indices_ = AddInput(indices);
output_ = AddOutput(params.type);
SetBuiltinOp(BuiltinOperator_GATHER_ND, BuiltinOptions_GatherNdOptions,
CreateGatherNdOptions(builder_).Union());
BuildInterpreter({GetShape(params_), GetShape(indices_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(params_, data);
}
template <typename T>
void SetPositions(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int params_;
int indices_;
int output_;
};
TEST(GatherNdOpTest, ElementIndexingIntoMatrix) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({0, 0, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, 2.2}));
}
TEST(GatherNdOpTest, ErrorOnOutOfBoundsTooLarge) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({0, 0, 2, 0});
EXPECT_EQ(m.Invoke(), kTfLiteError);
m.SetPositions<int32_t>({0, 0, 1, 2});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(GatherNdOpTest, ErrorOnOutOfBoundsNegative) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({1, -1, 1, 1});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(GatherNdOpTest, SliceIndexingIntoMatrix) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 1}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {2.1, 2.2, 1.1, 1.2}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoMatrix1) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}},
{TensorType_INT32, {2, 1, 1}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {2.1, 2.2, 1.1, 1.2}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoMatrix2) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}},
{TensorType_INT32, {2, 1, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({0, 0, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, 2.2}));
}
TEST(GatherNdOpTest, DuplicateIndexingIntoMatrix) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, 1.1}));
}
TEST(GatherNdOpTest, ElementIndexingIntoRank3Tensor) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {1, 2, 3}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 0, 1, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-1.2, -4.1}));
}
TEST(GatherNdOpTest, SliceIndexingIntoRank3Tensor) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 1}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 5.1,
-5.2, 5.3, 6.1, -6.2, 6.3}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor1) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 1, 3}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 0, 1, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-1.2, -4.1}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor2) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 1, 1}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
1.1, -1.2, 1.3, -2.1, 2.2, 2.3}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor3) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 1, 1, 0, 0, 0, 2, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3, 1.1,
-1.2, 1.3, 6.1, -6.2, 6.3}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor4) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 2, 3}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 0, 1, 1, 0, 1, 1, 1, 2, 2, 1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-1.2, 3.2, 4.3, 6.3}));
}
TEST(GatherNdOpTest, DuplicateIndexingIntoRank3Tensor) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 1, 0, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, -2.1, 2.2, 2.3}));
}
TEST(GatherNdOpTest, Float32Int32) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3}));
}
TEST(GatherNdOpTest, Float32Int64) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT64, {2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3}));
}
TEST(GatherNdOpTest, Int32Int32) {
GatherNdOpModel m({TensorType_INT32, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int32_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int32Int64) {
GatherNdOpModel m({TensorType_INT32, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<int32_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Uint8Int32) {
GatherNdOpModel m({TensorType_UINT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<uint8_t>({1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4,
5, 5, 5, 6, 6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<uint8_t>(), ElementsAreArray({2, 2, 2, 3, 3, 3}));
}
TEST(GatherNdOpTest, Uint8Int64) {
GatherNdOpModel m({TensorType_UINT8, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<uint8_t>({1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4,
5, 5, 5, 6, 6, 6});
m.SetPositions<int64_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<uint8_t>(), ElementsAreArray({2, 2, 2, 3, 3, 3}));
}
TEST(GatherNdOpTest, Int8Int32) {
GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int8_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int8Int64) {
GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<int8_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int16Int32) {
GatherNdOpModel m({TensorType_INT16, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int16_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int16Int64) {
GatherNdOpModel m({TensorType_INT16, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<int16_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int64Int32) {
GatherNdOpModel m({TensorType_INT64, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int64_t>({1LL, -1LL, 1LL, -2LL, 2LL, 2LL,
3LL, 3LL, -3LL, -4LL, -4LL, 4LL,
5LL, -5LL, 5LL, 6LL, -6LL, 6LL});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({-2LL, 2LL, 2LL, 3LL, 3LL, -3LL}));
}
TEST(GatherNdOpTest, Int64Int64) {
GatherNdOpModel m({TensorType_INT64, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<int64_t>({1LL, -1LL, 1LL, -2LL, 2LL, 2LL,
3LL, 3LL, -3LL, -4LL, -4LL, 4LL,
5LL, -5LL, 5LL, 6LL, -6LL, 6LL});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({-2LL, 2LL, 2LL, 3LL, 3LL, -3LL}));
}
TEST(GatherNdOpTest, Float32Int16) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT16, {2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int16_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3}));
}
TEST(GatherNdOpTest, StringInt32) {
GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<std::string>({"A", "B", "C",
"D", "E", "F",
"G", "H", "I",
"J", "K", "L",
"M", "N", "O",
"P", "Q", "R"});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<std::string>(),
ElementsAreArray({"D", "E", "F", "G", "H", "I"}));
}
TEST(GatherNdOpTest, StringInt64) {
GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<std::string>({"A", "B", "C",
"D", "E", "F",
"G", "H", "I",
"J", "K", "L",
"M", "N", "O",
"P", "Q", "R"});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<std::string>(),
ElementsAreArray({"D", "E", "F", "G", "H", "I"}));
}
TEST(GatherNdOpTest, StringOutOfBoundsTooLarge) {
GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<std::string>({"A", "B", "C",
"D", "E", "F",
"G", "H", "I",
"J", "K", "L",
"M", "N", "O",
"P", "Q", "R"});
m.SetPositions<int32_t>({0, 0, 3, 0});
ASSERT_EQ(m.Invoke(), kTfLiteError);
m.SetPositions<int32_t>({0, 0, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(GatherNdOpTest, StringOutOfBoundsNegative) {
GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<std::string>({"A", "B", "C",
"D", "E", "F",
"G", "H", "I",
"J", "K", "L",
"M", "N", "O",
"P", "Q", "R"});
m.SetPositions<int32_t>({1, -1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(GatherNdOpTest, EmptyParamsAndIndex) {
GatherNdOpModel m({TensorType_FLOAT32, {1, 0}}, {TensorType_INT32, {0, 2}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gather_nd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gather_nd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8d3ccf57-d872-467d-90c4-231d9eca64f4 | cpp | tensorflow/tensorflow | fake_quant | tensorflow/lite/kernels/fake_quant.cc | tensorflow/lite/kernels/fake_quant_test.cc | #include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace fake_quant {
enum KernelType {
kReference,
};
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
TfLiteTensor* output;
};
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const auto* params =
reinterpret_cast<TfLiteFakeQuantParams*>(node->builtin_data);
if (params->narrow_range) {
TF_LITE_KERNEL_LOG(
context,
"narrow_range FakeQuant is not currently supported at runtime. "
"narrow_range is only meant to be applied to weights, not activations");
return kTfLiteError;
}
OpContext op_context(context, node);
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(op_context.input->dims);
op_context.output->type = op_context.input->type;
return context->ResizeTensor(context, op_context.output, output_dims);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpContext op_context(context, node);
const auto* params =
reinterpret_cast<TfLiteFakeQuantParams*>(node->builtin_data);
tflite::FakeQuantParams op_params;
op_params.num_bits = params->num_bits;
op_params.minmax.min = params->min;
op_params.minmax.max = params->max;
reference_ops::FakeQuant(op_params, GetTensorShape(op_context.input),
GetTensorData<float>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<float>(op_context.output));
return kTfLiteOk;
}
}
TfLiteRegistration* Register_FAKE_QUANT_REF() {
static TfLiteRegistration r = {nullptr, nullptr, fake_quant::Prepare,
fake_quant::Eval<fake_quant::kReference>};
return &r;
}
TfLiteRegistration* Register_FAKE_QUANT() { return Register_FAKE_QUANT_REF(); }
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class FakeQuantOpModel : public SingleOpModel {
public:
FakeQuantOpModel(const TensorData& input, const TensorType& output, float min,
float max, int num_bits) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_FAKE_QUANT, BuiltinOptions_FakeQuantOptions,
CreateFakeQuantOptions(builder_, min, max, num_bits).Union());
BuildInterpreter({GetShape(input_)});
}
template <class T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
template <class T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int output_;
};
TEST(FakeQuantOpTest, FloatPositiveRange8Test) {
std::initializer_list<float> data = {0.0, 1.0, 0.25,
0.50, 0.4444444, 0.00001};
FakeQuantOpModel m({TensorType_FLOAT32, {3, 1, 2}}, TensorType_FLOAT32, 0.0f,
1.0f, 8);
m.SetInput<float>(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({0, 1, 0.25098, 0.498039, 0.443137, 0})));
}
TEST(FakeQuantOpTest, FloatNegativeRange8Test) {
std::initializer_list<float> data = {0.0, -0.9, 0.25,
0.50, 0.4444444, -0.00001};
FakeQuantOpModel m({TensorType_FLOAT32, {3, 1, 2}}, TensorType_FLOAT32, -0.9f,
0.9f, 8);
m.SetInput<float>(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{0, -0.896471, 0.247059, 0.501176, 0.444706, 0})));
}
TEST(FakeQuantOpTest, FloatPositiveRange16Test) {
std::initializer_list<float> data = {0.0, 1.0, 0.25,
0.50, 0.4444444, 0.00001};
FakeQuantOpModel m({TensorType_FLOAT32, {3, 1, 2}}, TensorType_FLOAT32, 0.0f,
1.0f, 16);
m.SetInput<float>(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{0, 1, 0.250004, 0.500008, 0.44445, 1.5259e-05})));
}
TEST(FakeQuantOpTest, FloatNegativeRange16Test) {
std::initializer_list<float> data = {0.0, -0.9, 0.25,
0.50, 0.4444444, -0.00001};
FakeQuantOpModel m({TensorType_FLOAT32, {3, 1, 2}}, TensorType_FLOAT32, -0.9f,
0.9f, 16);
m.SetInput<float>(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{0, -0.900014, 0.249998, 0.499995, 0.444431, 0})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/fake_quant.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/fake_quant_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0342d3b-45bb-49f1-8468-d8b5280b1d89 | cpp | tensorflow/tensorflow | rfft2d | tensorflow/lite/kernels/rfft2d.cc | tensorflow/lite/kernels/rfft2d_test.cc | #include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <complex>
#include "third_party/fft2d/fft2d.h"
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace rfft2d {
using std::complex;
constexpr int kInputTensor = 0;
constexpr int kFftLengthTensor = 1;
constexpr int kOutputTensor = 0;
constexpr int kFftIntegerWorkingAreaTensor = 0;
constexpr int kFftDoubleWorkingAreaTensor = 1;
constexpr int kTensorNotAllocated = -1;
struct OpData {
int fft_integer_working_area_id = kTensorNotAllocated;
int fft_double_working_area_id = kTensorNotAllocated;
};
bool IsPowerOfTwo(uint32_t v) { return v && !(v & (v - 1)); }
static TfLiteStatus InitTemporaryTensors(TfLiteContext* context,
TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
if (data->fft_integer_working_area_id != kTensorNotAllocated &&
data->fft_double_working_area_id != kTensorNotAllocated) {
return kTfLiteOk;
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(2);
int first_new_index;
TF_LITE_ENSURE_STATUS(context->AddTensors(context, 2, &first_new_index));
node->temporaries->data[kFftIntegerWorkingAreaTensor] = first_new_index;
data->fft_integer_working_area_id = first_new_index;
node->temporaries->data[kFftDoubleWorkingAreaTensor] = first_new_index + 1;
data->fft_double_working_area_id = first_new_index + 1;
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
fft_integer_working_area->type = kTfLiteInt32;
fft_integer_working_area->allocation_type = kTfLiteArenaRw;
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
fft_double_working_area->type = kTfLiteInt64;
fft_double_working_area->allocation_type = kTfLiteArenaRw;
return kTfLiteOk;
}
TfLiteStatus ResizeOutputandTemporaryTensors(TfLiteContext* context,
TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const int num_dims = NumDimensions(input);
TF_LITE_ENSURE(context, num_dims >= 2);
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[0]));
TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[1]));
int fft_height, fft_width;
fft_height = fft_length_data[0];
fft_width = fft_length_data[1];
int fft_working_length = std::max(fft_height, fft_width / 2);
int half_fft_working_length = fft_working_length / 2;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
output_shape->data[num_dims - 2] = fft_length_data[0];
output_shape->data[num_dims - 1] = fft_length_data[1] / 2 + 1;
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape));
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
TfLiteIntArray* fft_integer_working_area_shape = TfLiteIntArrayCreate(1);
fft_integer_working_area_shape->data[0] =
2 + static_cast<int>(sqrt(fft_working_length));
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, fft_integer_working_area,
fft_integer_working_area_shape));
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
TfLiteIntArray* fft_double_working_area_shape = TfLiteIntArrayCreate(1);
fft_double_working_area_shape->data[0] =
half_fft_working_length + fft_width / 4;
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, fft_double_working_area,
fft_double_working_area_shape));
return kTfLiteOk;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TF_LITE_ENSURE(context, NumDimensions(input) >= 2);
if (input->type != kTfLiteFloat32) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' for input is not supported by rfft2d.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const RuntimeShape fft_length_shape = GetTensorShape(fft_length);
TF_LITE_ENSURE_EQ(context, NumDimensions(fft_length), 1);
TF_LITE_ENSURE_EQ(context, fft_length_shape.Dims(0), 2);
if (fft_length->type != kTfLiteInt32) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' for fft_length is not supported by rfft2d.",
TfLiteTypeGetName(fft_length->type));
return kTfLiteError;
}
TF_LITE_ENSURE_STATUS(InitTemporaryTensors(context, node));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = kTfLiteComplex64;
if (!IsConstantOrPersistentTensor(fft_length)) {
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
SetTensorToDynamic(fft_integer_working_area);
SetTensorToDynamic(fft_double_working_area);
SetTensorToDynamic(output);
return kTfLiteOk;
}
TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node));
return kTfLiteOk;
}
void Rfft2dReorder(int fft_height, int fft_width, double** fft_input_output) {
int fft_height_half;
ruy::profiler::ScopeLabel label("Rfft2dReorder");
double real, img;
fft_height_half = fft_height >> 1;
for (int i = fft_height_half + 1; i < fft_height; ++i) {
real = fft_input_output[i][0];
img = fft_input_output[i][1];
fft_input_output[i][fft_width] = img;
fft_input_output[i][fft_width + 1] = real;
fft_input_output[fft_height - i][fft_width] = img;
fft_input_output[fft_height - i][fft_width + 1] = -real;
fft_input_output[i][0] = fft_input_output[fft_height - i][0];
fft_input_output[i][1] = -fft_input_output[fft_height - i][1];
}
double temp = fft_input_output[0][1];
fft_input_output[0][fft_width + 1] = 0;
fft_input_output[0][1] = 0;
fft_input_output[fft_height_half][fft_width] =
fft_input_output[fft_height_half][1];
fft_input_output[fft_height_half][fft_width + 1] = 0;
fft_input_output[fft_height_half][1] = 0;
fft_input_output[0][fft_width] = temp;
for (int i = 0; i < fft_height; ++i) {
for (int j = 1; j < fft_width + 2; j += 2) {
fft_input_output[i][j] = -fft_input_output[i][j];
}
}
}
void Rfft2dImpl(int fft_height, int fft_width, double** fft_input_output,
int* fft_integer_working_area_data,
double* fft_double_working_area_data) {
ruy::profiler::ScopeLabel label("Rfft2dImpl");
double* fft_dynamic_working_area = nullptr;
const int kForwardFft = 1;
rdft2d(fft_height, fft_width, kForwardFft, fft_input_output,
fft_dynamic_working_area, fft_integer_working_area_data,
fft_double_working_area_data);
Rfft2dReorder(fft_height, fft_width, fft_input_output);
}
void PrepareInputBuffer(const float* input_data, int input_height,
int input_width, int fft_height, int fft_width,
double** fft_input_output) {
int valid_input_height = std::min(input_height, fft_height);
int valid_input_width = std::min(input_width, fft_width);
for (int i = 0; i < valid_input_height; ++i) {
int in_pos = i * input_width;
for (int j = 0; j < valid_input_width; ++j) {
fft_input_output[i][j] = input_data[in_pos++];
}
for (int j = valid_input_width; j < fft_width + 2; ++j) {
fft_input_output[i][j] = 0;
}
}
for (int i = valid_input_height; i < fft_height; ++i) {
for (int j = 0; j < fft_width + 2; ++j) {
fft_input_output[i][j] = 0;
}
}
}
void PrepareOutputBuffer(complex<float>* output_data, int fft_height,
int fft_width, double** fft_input_output) {
int cnt = 0;
for (int i = 0; i < fft_height; ++i) {
for (int j = 0; j < fft_width / 2 + 1; ++j) {
output_data[cnt++] = complex<float>(fft_input_output[i][j * 2],
fft_input_output[i][j * 2 + 1]);
}
}
}
TfLiteStatus Rfft2dHelper(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const float* input_data = GetTensorData<float>(input);
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
complex<float>* output_data = GetTensorData<complex<float>>(output);
int fft_height, fft_width;
fft_height = fft_length_data[0];
fft_width = fft_length_data[1];
const RuntimeShape input_shape = GetTensorShape(input);
const int input_dims_count = input_shape.DimensionsCount();
const auto* input_dims_data = input_shape.DimsData();
int num_slices = 1;
for (int i = 0; i < input_dims_count - 2; ++i) {
num_slices *= input_dims_data[i];
}
int input_height = input_dims_data[input_dims_count - 2];
int input_width = input_dims_data[input_dims_count - 1];
int input_slice_size = input_height * input_width;
int output_slice_size = fft_height * (fft_width / 2 + 1);
double** fft_input_output = new double*[fft_height];
for (int i = 0; i < fft_height; ++i) {
fft_input_output[i] = new double[fft_width + 2];
}
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
int* fft_integer_working_area_data =
GetTensorData<int>(fft_integer_working_area);
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
double* fft_double_working_area_data = reinterpret_cast<double*>(
GetTensorData<int64_t>(fft_double_working_area));
for (int i = 0; i < num_slices; ++i) {
PrepareInputBuffer(input_data, input_height, input_width, fft_height,
fft_width, fft_input_output);
memset(fft_integer_working_area_data, 0, fft_integer_working_area->bytes);
memset(fft_double_working_area_data, 0, fft_double_working_area->bytes);
Rfft2dImpl(fft_height, fft_width, fft_input_output,
fft_integer_working_area_data, fft_double_working_area_data);
PrepareOutputBuffer(output_data, fft_height, fft_width, fft_input_output);
input_data += input_slice_size;
output_data += output_slice_size;
}
for (int i = 0; i < fft_height; ++i) {
delete[] fft_input_output[i];
}
delete[] fft_input_output;
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type != kTfLiteComplex64) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' for output is not supported by rfft2d.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
if (!IsConstantTensor(fft_length)) {
TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node));
} else {
int num_dims_output = NumDimensions(output);
const RuntimeShape output_shape = GetTensorShape(output);
TF_LITE_ENSURE_EQ(context, num_dims_output, NumDimensions(input));
TF_LITE_ENSURE(context, num_dims_output >= 2);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 2),
fft_length_data[0]);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 1),
fft_length_data[1] / 2 + 1);
}
return Rfft2dHelper(context, node);
}
}
TfLiteRegistration* Register_RFFT2D() {
static TfLiteRegistration r = {rfft2d::Init, rfft2d::Free, rfft2d::Prepare,
rfft2d::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <complex>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
using std::complex;
using ::testing::ElementsAreArray;
class Rfft2dOpModel : public SingleOpModel {
public:
Rfft2dOpModel(const TensorData& input, const TensorData& fft_lengths) {
input_ = AddInput(input);
fft_lengths_ = AddInput(fft_lengths);
TensorType output_type = TensorType_COMPLEX64;
output_ = AddOutput({output_type, {}});
SetBuiltinOp(BuiltinOperator_RFFT2D, BuiltinOptions_Rfft2dOptions,
CreateRfft2dOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
int fft_lengths() { return fft_lengths_; }
std::vector<complex<float>> GetOutput() {
return ExtractVector<complex<float>>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int fft_lengths_;
int output_;
};
TEST(Rfft2dOpTest, FftLengthMatchesInputSize) {
Rfft2dOpModel model({TensorType_FLOAT32, {4, 4}}, {TensorType_INT32, {2}});
model.PopulateTensor<float>(model.input(),
{1, 2, 3, 4,
3, 8, 6, 3,
5, 2, 7, 6,
9, 5, 8, 3});
model.PopulateTensor<int32_t>(model.fft_lengths(), {4, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::complex<float> expected_result[12] = {
{75, 0}, {-6, -1}, {9, 0}, {-10, 5}, {-3, 2}, {-6, 11},
{-15, 0}, {-2, 13}, {-5, 0}, {-10, -5}, {3, -6}, {-6, -11}};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
TEST(Rfft2dOpTest, FftLengthSmallerThanInputSize) {
Rfft2dOpModel model({TensorType_FLOAT32, {4, 5}}, {TensorType_INT32, {2}});
model.PopulateTensor<float>(model.input(),
{1, 2, 3, 4, 0,
3, 8, 6, 3, 0,
5, 2, 7, 6, 0,
9, 5, 8, 3, 0});
model.PopulateTensor<int32_t>(model.fft_lengths(), {4, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::complex<float> expected_result[12] = {
{75, 0}, {-6, -1}, {9, 0}, {-10, 5}, {-3, 2}, {-6, 11},
{-15, 0}, {-2, 13}, {-5, 0}, {-10, -5}, {3, -6}, {-6, -11}};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
TEST(Rfft2dOpTest, FftLengthGreaterThanInputSize) {
Rfft2dOpModel model({TensorType_FLOAT32, {3, 4}}, {TensorType_INT32, {2}});
model.PopulateTensor<float>(model.input(),
{1, 2, 3, 4,
3, 8, 6, 3,
5, 2, 7, 6});
model.PopulateTensor<int32_t>(model.fft_lengths(), {4, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::complex<float> expected_result[20] = {
{50, 0}, {8.29289341, -33.6776695}, {-7, 1}, {9.70710659, -1.67766953},
{0, 0},
{-10, -20}, {-16.3639603, -1.12132037}, {-5, 1}, {-7.19238806, -2.05025244},
{-6, 2},
{10, 0}, {-4.7781744, -6.12132025}, {-1, 11}, {10.7781744, 1.87867963},
{4, 0},
{-10, 20}, {11.1923885, 11.9497471}, {5, -5}, {-3.63603902, -3.12132025},
{-6, -2}};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
TEST(Rfft2dOpTest, InputDimsGreaterThan2) {
Rfft2dOpModel model({TensorType_FLOAT32, {2, 2, 4}}, {TensorType_INT32, {2}});
model.PopulateTensor<float>(model.input(),
{1., 2., 3., 4.,
3., 8., 6., 3.,
5., 2., 7., 6.,
7., 3., 23., 5.});
model.PopulateTensor<int32_t>(model.fft_lengths(), {2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::complex<float> expected_result[12] = {
{30., 0.}, {-5, -3.}, { -4., 0.},
{-10., 0.}, {1., 7.}, { 0., 0.},
{58., 0.}, {-18., 6.}, { 26., 0.},
{-18., 0.}, { 14., 2.}, {-18., 0.}};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rfft2d.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rfft2d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2b964149-b89f-41ad-bc48-4df56d45ea38 | cpp | tensorflow/tensorflow | sub | tensorflow/lite/kernels/sub.cc | tensorflow/lite/delegates/xnnpack/sub_test.cc | #include "tensorflow/lite/kernels/internal/reference/sub.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/sub.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/add.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h"
#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace sub {
enum KernelType {
kReference,
kGenericOptimized,
kNeonOptimized,
};
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
int input1_shift;
int input2_shift;
int32 output_activation_min;
int32 output_activation_max;
int32 input1_multiplier;
int32 input2_multiplier;
int32 output_multiplier;
int output_shift;
int left_shift;
int32 input1_offset;
int32 input2_offset;
int32 output_offset;
bool pot_scale_int16;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus PrepareGeneralSubOp(TfLiteContext* context,
const TfLiteTensor* input_1,
const TfLiteTensor* input_2,
TfLiteTensor* output, TfLiteSubParams* params,
OpData* op_params) {
TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 ||
output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16);
const auto& input1_quantization_params = input_1->params;
const auto& input2_quantization_params = input_2->params;
const auto& output_quantization_params = output->params;
int32_t integer_type_min = 0;
int32_t integer_type_max = 0;
if (output->type == kTfLiteUInt8) {
integer_type_min = std::numeric_limits<uint8_t>::min();
integer_type_max = std::numeric_limits<uint8_t>::max();
} else if (output->type == kTfLiteInt16) {
integer_type_min = std::numeric_limits<int16_t>::min();
integer_type_max = std::numeric_limits<int16_t>::max();
} else {
integer_type_min = std::numeric_limits<int8_t>::min();
integer_type_max = std::numeric_limits<int8_t>::max();
}
TF_LITE_ENSURE(context,
input1_quantization_params.zero_point >= integer_type_min);
TF_LITE_ENSURE(context,
input1_quantization_params.zero_point <= integer_type_max);
TF_LITE_ENSURE(context,
input2_quantization_params.zero_point >= integer_type_min);
TF_LITE_ENSURE(context,
input2_quantization_params.zero_point <= integer_type_max);
TF_LITE_ENSURE(context,
output_quantization_params.zero_point >= integer_type_min);
TF_LITE_ENSURE(context,
output_quantization_params.zero_point <= integer_type_max);
op_params->input1_offset = -input1_quantization_params.zero_point;
op_params->input2_offset = -input2_quantization_params.zero_point;
op_params->output_offset = output_quantization_params.zero_point;
op_params->left_shift = output->type == kTfLiteInt16 ? 15 : 20;
const double twice_max_input_scale =
2 * std::max(input1_quantization_params.scale,
input2_quantization_params.scale);
const double real_input1_multiplier =
input1_quantization_params.scale / twice_max_input_scale;
const double real_input2_multiplier =
input2_quantization_params.scale / twice_max_input_scale;
const double real_output_multiplier =
twice_max_input_scale /
((1 << op_params->left_shift) * output_quantization_params.scale);
tflite::QuantizeMultiplierSmallerThanOneExp(real_input1_multiplier,
&op_params->input1_multiplier,
&op_params->input1_shift);
tflite::QuantizeMultiplierSmallerThanOneExp(real_input2_multiplier,
&op_params->input2_multiplier,
&op_params->input2_shift);
if (real_output_multiplier > 1) {
tflite::QuantizeMultiplierGreaterThanOne(real_output_multiplier,
&op_params->output_multiplier,
&op_params->output_shift);
} else {
tflite::QuantizeMultiplierSmallerThanOneExp(real_output_multiplier,
&op_params->output_multiplier,
&op_params->output_shift);
}
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &op_params->output_activation_min,
&op_params->output_activation_max));
return kTfLiteOk;
}
TfLiteStatus PrepareInt16SubOpPOT(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
TfLiteTensor* output, TfLiteSubParams* params,
OpData* data) {
TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
int input1_scale_log2_rounded;
bool input1_scale_is_pot =
CheckedLog2(input1->params.scale, &input1_scale_log2_rounded);
TF_LITE_ENSURE(context, input1_scale_is_pot);
int input2_scale_log2_rounded;
bool input2_scale_is_pot =
CheckedLog2(input2->params.scale, &input2_scale_log2_rounded);
TF_LITE_ENSURE(context, input2_scale_is_pot);
int output_scale_log2_rounded;
bool output_scale_is_pot =
CheckedLog2(output->params.scale, &output_scale_log2_rounded);
TF_LITE_ENSURE(context, output_scale_is_pot);
data->input1_shift = input1_scale_log2_rounded - output_scale_log2_rounded;
data->input2_shift = input2_scale_log2_rounded - output_scale_log2_rounded;
TF_LITE_ENSURE(context, data->input1_shift == 0 || data->input2_shift == 0);
TF_LITE_ENSURE(context, data->input1_shift <= 0);
TF_LITE_ENSURE(context, data->input2_shift <= 0);
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input2->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
bool general_scale_int16 = false;
bool input1_scale_is_pot = false;
bool input2_scale_is_pot = false;
bool output_scale_is_pot = false;
int input1_scale_log2_rounded{0};
int input2_scale_log2_rounded{0};
int output_scale_log2_rounded{0};
if (input1->type == kTfLiteInt16 && input2->type == kTfLiteInt16 &&
output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
general_scale_int16 = !params || !params->pot_scale_int16;
if (!general_scale_int16) {
input1_scale_is_pot =
CheckedLog2(input1->params.scale, &input1_scale_log2_rounded);
input2_scale_is_pot =
CheckedLog2(input2->params.scale, &input2_scale_log2_rounded);
output_scale_is_pot =
CheckedLog2(output->params.scale, &output_scale_log2_rounded);
general_scale_int16 =
!input1_scale_is_pot || !input2_scale_is_pot || !output_scale_is_pot;
}
}
data->pot_scale_int16 = !general_scale_int16;
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
general_scale_int16) {
TF_LITE_ENSURE_OK(context, PrepareGeneralSubOp(context, input1, input2,
output, params, data));
} else if (output->type == kTfLiteInt16) {
TF_LITE_ENSURE_OK(context, PrepareInt16SubOpPOT(context, input1, input2,
output, params, data));
}
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type, typename data_type>
void EvalSubImpl(TfLiteContext* context, TfLiteNode* node,
TfLiteSubParams* params, const OpData* data,
const TfLiteTensor* input1, const TfLiteTensor* input2,
bool requires_broadcast, TfLiteTensor* output) {
data_type output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
switch (kernel_type) {
case kReference:
if (requires_broadcast) {
reference_ops::BroadcastSubSlow(
op_params, GetTensorShape(input1), GetTensorData<data_type>(input1),
GetTensorShape(input2), GetTensorData<data_type>(input2),
GetTensorShape(output), GetTensorData<data_type>(output));
} else {
reference_ops::SubWithActivation(
op_params, GetTensorShape(input1), GetTensorData<data_type>(input1),
GetTensorShape(input2), GetTensorData<data_type>(input2),
GetTensorShape(output), GetTensorData<data_type>(output));
}
break;
case kGenericOptimized:
case kNeonOptimized:
if (requires_broadcast) {
optimized_ops::BroadcastSubSlow(
op_params, GetTensorShape(input1), GetTensorData<data_type>(input1),
GetTensorShape(input2), GetTensorData<data_type>(input2),
GetTensorShape(output), GetTensorData<data_type>(output));
} else {
optimized_ops::SubWithActivation(
op_params, GetTensorShape(input1), GetTensorData<data_type>(input1),
GetTensorShape(input2), GetTensorData<data_type>(input2),
GetTensorShape(output), GetTensorData<data_type>(output));
}
break;
}
}
template <KernelType kernel_type>
void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params,
const OpData* data, const TfLiteTensor* input1,
const TfLiteTensor* input2, TfLiteTensor* output) {
const bool requires_broadcast = data->requires_broadcast;
switch (output->type) {
case kTfLiteInt32:
EvalSubImpl<kernel_type, int32_t>(context, node, params, data, input1,
input2, requires_broadcast, output);
break;
case kTfLiteFloat32:
EvalSubImpl<kernel_type, float>(context, node, params, data, input1,
input2, requires_broadcast, output);
break;
case kTfLiteInt64:
EvalSubImpl<kernel_type, int64_t>(context, node, params, data, input1,
input2, requires_broadcast, output);
break;
default:
TF_LITE_KERNEL_LOG(context, "output type %s is not supported.",
TfLiteTypeGetName(output->type));
}
}
template <KernelType kernel_type>
void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteSubParams* params, const OpData* data,
const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output) {
tflite::ArithmeticParams op_params;
op_params.left_shift = data->left_shift;
op_params.input1_offset = data->input1_offset;
op_params.input1_multiplier = data->input1_multiplier;
op_params.input1_shift = data->input1_shift;
op_params.input2_offset = data->input2_offset;
op_params.input2_multiplier = data->input2_multiplier;
op_params.input2_shift = data->input2_shift;
op_params.output_offset = data->output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
SetActivationParams(data->output_activation_min, data->output_activation_max,
&op_params);
const bool need_broadcast = optimized_ops::ProcessBroadcastShapes(
GetTensorShape(input1), GetTensorShape(input2), &op_params);
#define TF_LITE_SUB(type, opname, data_type) \
type::opname(op_params, GetTensorShape(input1), \
GetTensorData<data_type>(input1), GetTensorShape(input2), \
GetTensorData<data_type>(input2), GetTensorShape(output), \
GetTensorData<data_type>(output))
if (output->type == kTfLiteInt8) {
if (need_broadcast) {
TF_LITE_SUB(reference_ops, BroadcastQuantSubSlow, int8_t);
} else {
TF_LITE_SUB(reference_ops, Sub, int8_t);
}
} else if (!data->pot_scale_int16) {
if (kernel_type == kReference) {
if (need_broadcast) {
TF_LITE_SUB(reference_ops, BroadcastQuantSubSlow, int16_t);
} else {
TF_LITE_SUB(reference_ops, Sub, int16_t);
}
} else {
if (need_broadcast) {
TF_LITE_SUB(optimized_integer_ops, BroadcastSubDispatch, int16_t);
} else {
TF_LITE_SUB(optimized_integer_ops, Sub, int16_t);
}
}
} else if (output->type == kTfLiteUInt8) {
if (need_broadcast) {
TF_LITE_SUB(reference_ops, BroadcastQuantSubSlow, uint8_t);
} else {
TF_LITE_SUB(reference_ops, Sub, uint8_t);
}
} else {
if (kernel_type == kReference) {
if (need_broadcast) {
TF_LITE_SUB(reference_ops, BroadcastSub16POTSlow, int16_t);
} else {
TF_LITE_SUB(reference_ops, Sub16, int16_t);
}
} else {
if (need_broadcast) {
TF_LITE_SUB(optimized_ops, BroadcastSub16POTSlow, int16_t);
} else {
TF_LITE_SUB(optimized_ops, Sub16, int16_t);
}
}
}
#undef TF_LITE_SUB
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32 ||
output->type == kTfLiteInt64) {
EvalSub<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
EvalQuantized<kernel_type>(context, node, params, data, input1, input2,
output);
} else {
TF_LITE_KERNEL_LOG(
context,
"output type %d is not supported, requires float|uint8|int32 types.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SUB_REF() {
static TfLiteRegistration r = {
sub::Init,
sub::Free,
sub::Prepare,
sub::Eval<sub::kReference>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared};
return &r;
}
TfLiteRegistration* Register_SUB_GENERIC_OPT() {
static TfLiteRegistration r = {
sub::Init,
sub::Free,
sub::Prepare,
sub::Eval<sub::kGenericOptimized>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared};
return &r;
}
TfLiteRegistration* Register_SUB_NEON_OPT() {
static TfLiteRegistration r = {
sub::Init,
sub::Free,
sub::Prepare,
sub::Eval<sub::kNeonOptimized>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared};
return &r;
}
TfLiteRegistration* Register_SUB() {
#ifdef USE_NEON
return Register_SUB_NEON_OPT();
#else
return Register_SUB_GENERIC_OPT();
#endif
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Sub, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, ReluActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluActivation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, Relu6Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Relu6Activation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, ReluMinus1To1Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluMinus1To1Activation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, DISABLED_TanhActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.TanhActivation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, DISABLED_SignBitActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.SignBitActivation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/sub.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/sub_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
089abd1e-d29d-43a7-89fe-1428446e1f3b | cpp | tensorflow/tensorflow | densify | tensorflow/lite/kernels/densify.cc | tensorflow/lite/kernels/densify_test.cc | #include "tensorflow/lite/kernels/internal/reference/densify.h"
#include <stddef.h>
#include <cstdint>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace densify {
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
TfLiteTensor* output;
};
struct OpData {
bool dense_weights_initialized;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->dense_weights_initialized = false;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type != kTfLiteString);
TF_LITE_ENSURE(context, IsConstantTensor(op_context.input));
TF_LITE_ENSURE(context, op_context.input->sparsity != nullptr);
op_context.output->type = op_context.input->type;
op_context.output->name = "Densify_output";
op_context.output->allocation_type = kTfLiteArenaRwPersistent;
return context->ResizeTensor(context, op_context.output,
TfLiteIntArrayCopy(op_context.input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
if (op_data->dense_weights_initialized) {
return kTfLiteOk;
}
switch (op_context.input->type) {
case kTfLiteFloat32:
reference_ops::Densify(op_context.input->sparsity,
GetTensorShape(op_context.input),
GetTensorData<float>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<float>(op_context.output), context);
break;
case kTfLiteFloat16:
reference_ops::Densify(
op_context.input->sparsity, GetTensorShape(op_context.input),
GetTensorData<Eigen::half>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<Eigen::half>(op_context.output), context);
break;
case kTfLiteInt8:
reference_ops::Densify(op_context.input->sparsity,
GetTensorShape(op_context.input),
GetTensorData<int8_t>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<int8_t>(op_context.output), context);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %d not supported.",
op_context.input->type);
return kTfLiteError;
}
op_data->dense_weights_initialized = true;
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DENSIFY() {
static TfLiteRegistration r = {densify::Init, densify::Free, densify::Prepare,
densify::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_DENSIFY();
}
}
namespace {
using ::testing::ElementsAreArray;
template <typename T>
class DensifyOpModel : public SingleOpModel {
public:
DensifyOpModel(const TensorData& input, const std::vector<T>& input_data,
int version = 1) {
input_ = AddConstSparseInput(input, input_data);
output_ = AddOutput({input.type, input.shape});
SetBuiltinOp(BuiltinOperator_DENSIFY, BuiltinOptions_DensifyOptions,
CreateDensifyOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DENSIFY, ops::builtin::Register_DENSIFY(), version);
BuildInterpreter({input.shape}, -1,
false,
false, true);
}
std::vector<T> GetInput() { return ExtractVector<T>(input_); }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
private:
int input_;
int output_;
};
TEST(DensifyOpTest, Float) {
std::vector<float> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
std::vector<float> sparse_values = {6, 9, 8, 5, 7};
TensorData input = {};
input.type = TensorType_FLOAT32;
input.shape = {3, 4};
input.traversal_order = {0, 1};
input.format = {kTfLiteDimDense, kTfLiteDimSparseCSR};
DensifyOpModel<float> m(input, dense_values);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetInput(), ElementsAreArray(sparse_values));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(dense_values));
}
TEST(DensifyOpTest, Float3D) {
std::vector<float> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
std::vector<float> sparse_values = {6, 9, 8, 5, 7};
TensorData input = {};
input.type = TensorType_FLOAT32;
input.shape = {3, 2, 2};
input.traversal_order = {0, 1, 2};
input.format = {kTfLiteDimDense, kTfLiteDimDense, kTfLiteDimSparseCSR};
DensifyOpModel<float> m(input, dense_values);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetInput(), ElementsAreArray(sparse_values));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(dense_values));
}
TEST(DensifyOpTest, Int8) {
std::vector<int8_t> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
std::vector<int8_t> sparse_values = {6, 9, 8, 5, 7};
TensorData input = {};
input.type = TensorType_INT8;
input.shape = {3, 4};
input.traversal_order = {0, 1};
input.format = {kTfLiteDimDense, kTfLiteDimSparseCSR};
DensifyOpModel<int8_t> m(input, dense_values);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetInput(), ElementsAreArray(sparse_values));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(dense_values));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/densify.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/densify_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f811e098-b9eb-4721-a5ef-a33f172b9062 | cpp | tensorflow/tensorflow | basic_rnn | tensorflow/lite/kernels/basic_rnn.cc | tensorflow/lite/kernels/basic_rnn_test.cc | #include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace rnn {
namespace {
struct OpData {
int scratch_tensor_index;
bool compute_row_sums = false;
};
}
constexpr int kInputTensor = 0;
constexpr int kWeightsTensor = 1;
constexpr int kRecurrentWeightsTensor = 2;
constexpr int kBiasTensor = 3;
constexpr int kHiddenStateTensor = 4;
constexpr int kOutputTensor = 0;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, 6,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, node->inputs->size, 5);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* input_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTensor, &input_weights));
const TfLiteTensor* recurrent_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kRecurrentWeightsTensor, &recurrent_weights));
const TfLiteTensor* bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBiasTensor, &bias));
const TfLiteTensor* hidden_state;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kHiddenStateTensor, &hidden_state));
const int batch_size = input->dims->data[0];
const int num_units = input_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, input->dims->data[1],
input_weights->dims->data[1]);
TF_LITE_ENSURE_EQ(context, input_weights->dims->data[0], bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, recurrent_weights->dims->data[0],
bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, recurrent_weights->dims->data[1],
bias->dims->data[0]);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input_weights->type,
recurrent_weights->type);
TF_LITE_ENSURE_EQ(context, NumDimensions(hidden_state), 2);
TF_LITE_ENSURE_EQ(context, hidden_state->dims->data[0], batch_size);
TF_LITE_ENSURE_EQ(context, hidden_state->dims->data[1], num_units);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_size_array = TfLiteIntArrayCreate(2);
output_size_array->data[0] = batch_size;
output_size_array->data[1] = num_units;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
const bool is_hybrid = IsHybridOp(input, input_weights);
if (is_hybrid) {
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
op_data->compute_row_sums = true;
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(6);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 0,
&input_quantized));
input_quantized->type = input_weights->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[1] = op_data->scratch_tensor_index + 1;
TfLiteTensor* hidden_state_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 1,
&hidden_state_quantized));
hidden_state_quantized->type = input_weights->type;
hidden_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(hidden_state_quantized->dims,
hidden_state->dims)) {
TfLiteIntArray* hidden_state_quantized_size =
TfLiteIntArrayCopy(hidden_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, hidden_state_quantized,
hidden_state_quantized_size));
}
node->temporaries->data[2] = op_data->scratch_tensor_index + 2;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[3] = op_data->scratch_tensor_index + 3;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 3, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {num_units, batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_scratch_size = TfLiteIntArrayCreate(2);
accum_scratch_size->data[0] = accum_scratch_dims[0];
accum_scratch_size->data[1] = accum_scratch_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, accum_scratch,
accum_scratch_size));
}
node->temporaries->data[4] = op_data->scratch_tensor_index + 4;
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &zero_points));
zero_points->type = kTfLiteInt32;
zero_points->allocation_type = kTfLiteArenaRw;
int zero_points_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) {
TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1);
zero_points_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points,
zero_points_size));
}
node->temporaries->data[5] = op_data->scratch_tensor_index + 5;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 5, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->name = "Rnn_row_sums";
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[2] = {2, num_units};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(2);
row_sums_size->data[0] = row_sums_dims[0];
row_sums_size->data[1] = row_sums_dims[1];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
return kTfLiteOk;
}
TfLiteStatus EvalFloat(const TfLiteTensor* input,
const TfLiteTensor* input_weights,
const TfLiteTensor* recurrent_weights,
const TfLiteTensor* bias, const TfLiteRNNParams* params,
TfLiteTensor* hidden_state, TfLiteTensor* output) {
const int batch_size = input->dims->data[0];
const int num_units = input_weights->dims->data[0];
const int input_size = input->dims->data[1];
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
float* hidden_state_ptr_batch = GetTensorData<float>(hidden_state);
const float* input_ptr_batch = GetTensorData<float>(input);
float* output_ptr_batch = GetTensorData<float>(output);
const float* input_weights_ptr = GetTensorData<float>(input_weights);
const float* recurrent_weights_ptr = GetTensorData<float>(recurrent_weights);
const float* bias_ptr = GetTensorData<float>(bias);
kernel_utils::RnnBatchStep(
input_ptr_batch, input_weights_ptr, recurrent_weights_ptr, bias_ptr,
input_size, num_units, batch_size, output_batch_leading_dim,
params->activation, hidden_state_ptr_batch, output_ptr_batch);
return kTfLiteOk;
}
TfLiteStatus EvalHybrid(const TfLiteTensor* input,
const TfLiteTensor* input_weights,
const TfLiteTensor* recurrent_weights,
const TfLiteTensor* bias, const TfLiteRNNParams* params,
TfLiteTensor* input_scratch,
TfLiteTensor* hidden_state_scratch,
TfLiteTensor* scaling_factors,
TfLiteTensor* hidden_state, TfLiteTensor* output,
TfLiteTensor* zero_points, TfLiteTensor* accum_scratch,
TfLiteTensor* row_sums, bool* compute_row_sums) {
const int batch_size = input->dims->data[0];
const int num_units = input_weights->dims->data[0];
const int input_size = input->dims->data[1];
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
float* hidden_state_ptr_batch = GetTensorData<float>(hidden_state);
const float* input_ptr_batch = GetTensorData<float>(input);
float* output_ptr_batch = GetTensorData<float>(output);
const int8_t* input_weights_ptr = GetTensorData<int8_t>(input_weights);
const int8_t* recurrent_weights_ptr =
GetTensorData<int8_t>(recurrent_weights);
const float* bias_ptr = GetTensorData<float>(bias);
float input_weights_scale = input_weights->params.scale;
float recurrent_weights_scale = recurrent_weights->params.scale;
int8_t* quantized_input_ptr = GetTensorData<int8_t>(input_scratch);
int8_t* quantized_hidden_state_ptr =
GetTensorData<int8_t>(hidden_state_scratch);
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* accum_scratch_ptr = GetTensorData<int32_t>(accum_scratch);
int32_t* zero_points_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
zero_points_ptr = GetTensorData<int32_t>(zero_points);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
kernel_utils::RnnBatchStep(
input_ptr_batch, input_weights_ptr, input_weights_scale,
recurrent_weights_ptr, recurrent_weights_scale, bias_ptr, input_size,
num_units, batch_size, output_batch_leading_dim, params->activation,
quantized_input_ptr, quantized_hidden_state_ptr, scaling_factors_ptr,
hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr, accum_scratch_ptr,
row_sums_ptr, compute_row_sums);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteRNNParams*>(node->builtin_data);
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* input_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTensor, &input_weights));
const TfLiteTensor* recurrent_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kRecurrentWeightsTensor, &recurrent_weights));
const TfLiteTensor* bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBiasTensor, &bias));
TfLiteTensor* hidden_state =
GetVariableInput(context, node, kHiddenStateTensor);
TF_LITE_ENSURE(context, hidden_state != nullptr);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input_weights->type) {
case kTfLiteFloat32:
return EvalFloat(input, input_weights, recurrent_weights, bias, params,
hidden_state, output);
case kTfLiteUInt8:
case kTfLiteInt8: {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &input_quantized));
TfLiteTensor* hidden_state_quantized;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &hidden_state_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 2, &scaling_factors));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 3, &accum_scratch));
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 4, &zero_points));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 5, &row_sums));
return EvalHybrid(input, input_weights, recurrent_weights, bias, params,
input_quantized, hidden_state_quantized,
scaling_factors, hidden_state, output, zero_points,
accum_scratch, row_sums, &op_data->compute_row_sums);
}
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input_weights->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_RNN() {
static TfLiteRegistration r = {rnn::Init, rnn::Free, rnn::Prepare, rnn::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
static float rnn_input[] = {
0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133,
0.43773448, 0.60379338, 0.35562468, -0.69424844, -0.93421471,
-0.87287879, 0.37144363, -0.62476718, 0.23791671, 0.40060222,
0.1356622, -0.99774903, -0.98858172, -0.38952237, -0.47685933,
0.31073618, 0.71511042, -0.63767755, -0.31729108, 0.33468103,
0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
-0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007,
-0.61777675, -0.21095741, 0.41213346, 0.73784804, 0.094794154,
0.47791874, 0.86496925, -0.53376222, 0.85315156, 0.10288584,
0.86684, -0.011186242, 0.10513687, 0.87825835, 0.59929144,
0.62827742, 0.18899453, 0.31440187, 0.99059987, 0.87170351,
-0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567,
-0.66609079, 0.59098077, 0.73017097, 0.74604273, 0.32882881,
-0.17503482, 0.22396147, 0.19379807, 0.29120302, 0.077113032,
-0.70331609, 0.15804303, -0.93407321, 0.40182066, 0.036301374,
0.66521823, 0.0300982, -0.7747041, -0.02038002, 0.020698071,
-0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
-0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682,
0.43519354, 0.14744234, 0.62589407, 0.1653645, -0.10651493,
-0.045277178, 0.99032974, -0.88255352, -0.85147917, 0.28153265,
0.19455957, -0.55479527, -0.56042433, 0.26048636, 0.84702539,
0.47587705, -0.074295521, -0.12287641, 0.70117295, 0.90532446,
0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
-0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563,
0.93455386, -0.6324693, -0.083922029};
static float rnn_golden_output[] = {
0.496726, 0, 0.965996, 0, 0.0584254, 0,
0, 0.12315, 0, 0, 0.612266, 0.456601,
0, 0.52286, 1.16099, 0.0291232,
0, 0, 0.524901, 0, 0, 0,
0, 1.02116, 0, 1.35762, 0, 0.356909,
0.436415, 0.0355727, 0, 0,
0, 0, 0, 0.262335, 0, 0,
0, 1.33992, 0, 2.9739, 0, 0,
1.31914, 2.66147, 0, 0,
0.942568, 0, 0, 0, 0.025507, 0,
0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
0.8158, 1.21805, 0.586239, 0.25427,
1.04436, 0, 0.630725, 0, 0.133801, 0.210693,
0.363026, 0, 0.533426, 0, 1.25926, 0.722707,
0, 1.22031, 1.30117, 0.495867,
0.222187, 0, 0.72725, 0, 0.767003, 0,
0, 0.147835, 0, 0, 0, 0.608758,
0.469394, 0.00720298, 0.927537, 0,
0.856974, 0.424257, 0, 0, 0.937329, 0,
0, 0, 0.476425, 0, 0.566017, 0.418462,
0.141911, 0.996214, 1.13063, 0,
0.967899, 0, 0, 0, 0.0831304, 0,
0, 1.00378, 0, 0, 0, 1.44818,
1.01768, 0.943891, 0.502745, 0,
0.940135, 0, 0, 0, 0, 0,
0, 2.13243, 0, 0.71208, 0.123918, 1.53907,
1.30225, 1.59644, 0.70222, 0,
0.804329, 0, 0.430576, 0, 0.505872, 0.509603,
0.343448, 0, 0.107756, 0.614544, 1.44549, 1.52311,
0.0454298, 0.300267, 0.562784, 0.395095,
0.228154, 0, 0.675323, 0, 1.70536, 0.766217,
0, 0, 0, 0.735363, 0.0759267, 1.91017,
0.941888, 0, 0, 0,
0, 0, 1.5909, 0, 0, 0,
0, 0.5755, 0, 0.184687, 0, 1.56296,
0.625285, 0, 0, 0,
0, 0, 0.0857888, 0, 0, 0,
0, 0.488383, 0.252786, 0, 0, 0,
1.02817, 1.85665, 0, 0,
0.00981836, 0, 1.06371, 0, 0, 0,
0, 0, 0, 0.290445, 0.316406, 0,
0.304161, 1.25079, 0.0707152, 0,
0.986264, 0.309201, 0, 0, 0, 0,
0, 1.64896, 0.346248, 0, 0.918175, 0.78884,
0.524981, 1.92076, 2.07013, 0.333244,
0.415153, 0.210318, 0, 0, 0, 0,
0, 2.02616, 0, 0.728256, 0.84183, 0.0907453,
0.628881, 3.58099, 1.49974, 0};
static std::initializer_list<float> rnn_weights = {
0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346,
0.317493, 0.969689, -0.343251, 0.186423, 0.398151, 0.152399,
0.448504, 0.317662, 0.523556, -0.323514, 0.480877, 0.333113,
-0.757714, -0.674487, -0.643585, 0.217766, -0.0251462, 0.79512,
-0.595574, -0.422444, 0.371572, -0.452178, -0.556069, -0.482188,
-0.685456, -0.727851, 0.841829, 0.551535, -0.232336, 0.729158,
-0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183,
0.306261, -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303,
0.0354295, 0.566564, -0.485469, -0.620498, 0.832546, 0.697884,
-0.279115, 0.294415, -0.584313, 0.548772, 0.0648819, 0.968726,
0.723834, -0.0080452, -0.350386, -0.272803, 0.115121, -0.412644,
-0.824713, -0.992843, -0.592904, -0.417893, 0.863791, -0.423461,
-0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042,
0.0960841, 0.368357, 0.244191, -0.817703, -0.211223, 0.442012,
0.37225, -0.623598, -0.405423, 0.455101, 0.673656, -0.145345,
-0.511346, -0.901675, -0.81252, -0.127006, 0.809865, -0.721884,
0.636255, 0.868989, -0.347973, -0.10179, -0.777449, 0.917274,
0.819286, 0.206218, -0.00785118, 0.167141, 0.45872, 0.972934,
-0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
0.277308, 0.415818};
static std::initializer_list<float> rnn_recurrent_weights = {
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1};
static std::initializer_list<float> rnn_bias = {
0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
-0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
0.37197268, 0.61957061, 0.3956964, -0.37609905};
class RNNOpModel : public SingleOpModel {
public:
RNNOpModel(int batches, int units, int size,
const TensorType& weights = TensorType_FLOAT32,
const TensorType& recurrent_weights = TensorType_FLOAT32,
bool asymmetric_quantize_inputs = false)
: batches_(batches), units_(units), input_size_(size) {
input_ = AddInput(TensorType_FLOAT32);
weights_ = AddInput(weights);
recurrent_weights_ = AddInput(recurrent_weights);
bias_ = AddInput(TensorType_FLOAT32);
hidden_state_ = AddVariableInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_RNN, BuiltinOptions_RNNOptions,
CreateRNNOptions(builder_, ActivationFunctionType_RELU,
asymmetric_quantize_inputs)
.Union());
BuildInterpreter({{batches_, input_size_},
{units_, input_size_},
{units_, units_},
{units_},
{batches_, units_}});
}
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetWeights(std::initializer_list<float> f) {
PopulateTensor(weights_, f);
}
void SetRecurrentWeights(std::initializer_list<float> f) {
PopulateTensor(recurrent_weights_, f);
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_;
int recurrent_weights_;
int bias_;
int hidden_state_;
int output_;
int batches_;
int units_;
int input_size_;
};
class HybridRNNOpModel : public RNNOpModel {
public:
HybridRNNOpModel(int batches, int units, int size, TensorType tensor_type,
bool asymmetric_quantize_inputs)
: RNNOpModel(batches, units, size, tensor_type, tensor_type,
asymmetric_quantize_inputs) {
tensor_type_ = tensor_type;
}
TensorType tensor_type_;
void SetWeights(int weights_idx, const std::vector<float>& f) {
if (tensor_type_ == TensorType_UINT8) {
SymmetricQuantizeAndPopulate(weights_idx, f);
} else {
SignedSymmetricQuantizeAndPopulate(weights_idx, f);
}
}
void SetWeights(std::initializer_list<float> f) { SetWeights(weights_, f); }
void SetRecurrentWeights(std::initializer_list<float> f) {
SetWeights(recurrent_weights_, f);
}
};
TEST(RnnOpTest, BlackBoxTest) {
RNNOpModel rnn(2, 16, 8);
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = sizeof(rnn_input) / sizeof(float) /
(rnn.input_size() * rnn.num_batches());
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(rnn.input_size(), batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output + i * rnn.num_units();
float* golden_end = golden_start + rnn.num_units();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
}
}
class HybridRnnOpTest : public ::testing::TestWithParam<bool> {};
TEST_P(HybridRnnOpTest, BlackBoxTestUint8) {
HybridRNNOpModel rnn(2, 16, 8, TensorType_UINT8, GetParam());
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = sizeof(rnn_input) / sizeof(float) /
(rnn.input_size() * rnn.num_batches());
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(rnn.input_size(), batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output + i * rnn.num_units();
float* golden_end = golden_start + rnn.num_units();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(
expected, 0.0104)));
}
}
TEST_P(HybridRnnOpTest, BlackBoxTestInt8) {
HybridRNNOpModel rnn(2, 16, 8, TensorType_INT8, GetParam());
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = sizeof(rnn_input) / sizeof(float) /
(rnn.input_size() * rnn.num_batches());
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(rnn.input_size(), batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output + i * rnn.num_units();
float* golden_end = golden_start + rnn.num_units();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(
expected, 0.0104)));
}
}
INSTANTIATE_TEST_SUITE_P(HybridRnnOpTest, HybridRnnOpTest,
::testing::ValuesIn({false, true}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/basic_rnn.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/basic_rnn_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
defc3c9f-2423-4e72-b81a-20362e9dce10 | cpp | tensorflow/tensorflow | stablehlo_scatter | tensorflow/lite/kernels/stablehlo_scatter.cc | tensorflow/lite/kernels/stablehlo_scatter_test.cc | #include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <utility>
#include <vector>
#include "Eigen/Core"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/tensor_slice_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_scatter {
namespace {
constexpr int kInputsTensor = 0;
constexpr int kScatterIndicesTensor = 1;
constexpr int kUpdatesTensor = 2;
constexpr int kOutputTensor = 0;
enum class ComputationType {
kUpdate,
kAdd,
kMultiply,
kMaximum,
kMinimum,
kOther
};
struct OpData {
ComputationType computation_type;
};
using DimVector = std::vector<int64_t>;
static DimVector GetUpdateScatterDims(int64_t updates_rank,
const int64_t* update_window_dims,
int num_update_window_dims) {
DimVector result;
for (int64_t dim = 0; dim < updates_rank; ++dim) {
if (!ArrayContains(update_window_dims, num_update_window_dims, dim)) {
result.push_back(dim);
}
}
return result;
}
template <typename IndexType>
static Index<IndexType> GatherIndex(const Index<IndexType>& index,
const DimVector& dims) {
Index<IndexType> result;
for (auto dim : dims) {
result.push_back(index[dim]);
}
return result;
}
template <typename IndexType>
static bool IsInBounds(Index<IndexType> index, RuntimeShape shape) {
if (index.size() != shape.DimensionsCount()) {
return false;
}
for (int dim = 0; dim < shape.DimensionsCount(); ++dim) {
if (index[dim] >= shape.Dims(dim)) {
return false;
}
}
return true;
}
static ComputationType OpCodeToComputationType(int op_code) {
switch (op_code) {
case kTfLiteBuiltinStablehloAdd:
return ComputationType::kAdd;
case kTfLiteBuiltinStablehloMultiply:
return ComputationType::kMultiply;
case kTfLiteBuiltinStablehloMaximum:
return ComputationType::kMaximum;
case kTfLiteBuiltinStablehloMinimum:
return ComputationType::kMinimum;
default:
return ComputationType::kOther;
}
}
static TfLiteStatus GetComputationType(const Subgraph* computation_subgraph,
ComputationType* computation_type,
TfLiteContext* context) {
if (computation_subgraph->execution_plan().empty()) {
*computation_type = ComputationType::kUpdate;
return kTfLiteOk;
}
if (computation_subgraph->execution_plan().size() > 1) {
TF_LITE_KERNEL_LOG(context,
"Only one kernel allowed withing the stablehlo region. "
"(%zu) kernels found.\n",
computation_subgraph->execution_plan().size());
return kTfLiteError;
}
const TfLiteRegistration* kernel =
&(computation_subgraph
->node_and_registration(computation_subgraph->execution_plan()[0])
->second);
*computation_type = OpCodeToComputationType(kernel->builtin_code);
if (*computation_type == ComputationType::kOther) {
TF_LITE_KERNEL_LOG(
context,
"Only update, Add, Multiply, Maximum and Minimum operations are "
"currently supported for stablehlo.scatter.");
return kTfLiteError;
}
return kTfLiteOk;
}
template <typename DataType, typename IndexType>
static TfLiteStatus ApplyComputation(TfLiteTensor* tensor,
Index<IndexType> index,
DataType input_value,
DataType update_value,
ComputationType computation_type,
TfLiteContext* context) {
DataType* tensor_data = GetTensorData<DataType>(tensor);
DataType result;
if (computation_type == ComputationType::kUpdate) {
result = update_value;
} else if (computation_type == ComputationType::kAdd) {
result = input_value + update_value;
} else if (computation_type == ComputationType::kMultiply) {
result = input_value * update_value;
} else if (computation_type == ComputationType::kMaximum) {
result = std::max(input_value, update_value);
} else if (computation_type == ComputationType::kMinimum) {
result = std::min(input_value, update_value);
} else {
TF_LITE_KERNEL_LOG(context,
"Provided kernel in the stablehlo scatter region is not "
"yet supported.");
return kTfLiteError;
}
tensor_data[TensorIndexToFlat(index.data(), index.size(),
GetTensorShape(tensor))] = result;
return kTfLiteOk;
}
template <typename IndexType, typename DataType>
TfLiteStatus EvalWithTypes(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteStablehloScatterParams* data =
reinterpret_cast<TfLiteStablehloScatterParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &input));
const TfLiteTensor* scatter_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kScatterIndicesTensor,
&scatter_indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kUpdatesTensor, &updates));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
memcpy(output->data.data, input->data.data, input->bytes);
RuntimeShape input_shape = GetTensorShape(input);
int input_rank = input_shape.DimensionsCount();
const DataType* output_data = GetTensorData<DataType>(output);
RuntimeShape scatter_indices_shape = GetTensorShape(scatter_indices);
RuntimeShape updates_shape = GetTensorShape(updates);
int64_t updates_rank = updates_shape.DimensionsCount();
Index<IndexType> update_index = Index<IndexType>(updates_rank, 0);
const DataType* updates_data = GetTensorData<DataType>(updates);
DimVector update_scatter_dims = GetUpdateScatterDims(
updates_rank, data->update_window_dims, data->num_update_window_dims);
std::vector<int64_t> update_window_dims_vec(
data->update_window_dims,
data->update_window_dims + data->num_update_window_dims);
do {
Index<IndexType> update_scatter_index =
GatherIndex(update_index, update_scatter_dims);
Index<IndexType> start_index =
ReadIndexVector(scatter_indices, scatter_indices_shape,
update_scatter_index, data->index_vector_dim);
Index<IndexType> full_start_index;
TF_LITE_ENSURE_STATUS(ScatterIndex(
start_index, data->scatter_dims_to_operand_dims,
data->num_scatter_dims_to_operand_dims, input_rank, &full_start_index));
Index<IndexType> window_index =
GatherIndex(update_index, update_window_dims_vec);
Index<IndexType> full_window_index;
TF_LITE_ENSURE_STATUS(ExpandDims(window_index, data->inserted_window_dims,
data->num_inserted_window_dims,
&full_window_index));
Index<IndexType> result_index =
AddIndices(full_start_index, full_window_index);
if (!IsInBounds(result_index, input_shape)) {
continue;
}
DataType input_value = output_data[TensorIndexToFlat(
result_index.data(), input_rank, input_shape)];
DataType update_value = updates_data[TensorIndexToFlat(
update_index.data(), updates_rank, updates_shape)];
TF_LITE_ENSURE_STATUS(ApplyComputation(output, result_index, input_value,
update_value,
op_data->computation_type, context));
} while (
NextIndex(updates_rank, updates_shape.DimsData(), update_index.data()));
return TfLiteStatus::kTfLiteOk;
}
template <typename IndexType>
TfLiteStatus EvalWithIndexType(TfLiteContext* context, TfLiteNode* node,
TfLiteType index_type, TfLiteType data_type) {
switch (data_type) {
case kTfLiteFloat16:
return EvalWithTypes<IndexType, Eigen::half>(context, node);
case kTfLiteFloat32:
return EvalWithTypes<IndexType, float>(context, node);
case kTfLiteFloat64:
return EvalWithTypes<IndexType, double>(context, node);
case kTfLiteInt8:
return EvalWithTypes<IndexType, int8_t>(context, node);
case kTfLiteInt16:
return EvalWithTypes<IndexType, int16_t>(context, node);
case kTfLiteInt32:
return EvalWithTypes<IndexType, int32_t>(context, node);
case kTfLiteInt64:
return EvalWithTypes<IndexType, int64_t>(context, node);
case kTfLiteUInt8:
return EvalWithTypes<IndexType, uint8_t>(context, node);
case kTfLiteUInt16:
return EvalWithTypes<IndexType, uint16_t>(context, node);
case kTfLiteUInt32:
return EvalWithTypes<IndexType, uint32_t>(context, node);
case kTfLiteUInt64:
return EvalWithTypes<IndexType, uint64_t>(context, node);
default:
TF_LITE_KERNEL_LOG(
context, "(Index Type: %s, Data Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type), TfLiteTypeGetName(data_type));
return TfLiteStatus::kTfLiteError;
}
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new ComputationType{ComputationType::kOther};
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<ComputationType*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size));
const TfLiteStablehloScatterParams* data =
reinterpret_cast<TfLiteStablehloScatterParams*>(node->builtin_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto* subgraphs = this_subgraph->GetSubgraphs();
if (data->update_computation_subgraph_index >= subgraphs->size()) {
TF_LITE_KERNEL_LOG(context,
"Computation subgraph not found for stablehlo.scatter.");
return TfLiteStatus::kTfLiteError;
}
Subgraph* computation_subgraph =
(*subgraphs)[data->update_computation_subgraph_index].get();
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_STATUS(GetComputationType(
computation_subgraph, &op_data->computation_type, context));
return TfLiteStatus::kTfLiteOk;
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &input));
const TfLiteTensor* scatter_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kScatterIndicesTensor,
&scatter_indices));
TfLiteType index_type = scatter_indices->type;
TfLiteType data_type = input->type;
if (index_type == kTfLiteInt32) {
return EvalWithIndexType<int32_t>(context, node, index_type, data_type);
} else if (index_type == kTfLiteInt64) {
return EvalWithIndexType<int64_t>(context, node, index_type, data_type);
} else {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type));
return TfLiteStatus::kTfLiteError;
}
}
}
TfLiteRegistration* Register_STABLEHLO_SCATTER() {
static TfLiteRegistration r = {
stablehlo_scatter::Init, stablehlo_scatter::Free,
stablehlo_scatter::Prepare, stablehlo_scatter::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
enum class StablehloScatterOpType { kAdd, kMul, kMax, kMin, kUpdate };
class StablehloScatterOpModel : public SingleOpModel {
public:
StablehloScatterOpModel(const TensorData& input, const TensorData& indices,
const TensorData& updates,
const TfLiteStablehloScatterParams& params,
StablehloScatterOpType op_type) {
input_ = AddInput(input);
indices_ = AddInput(indices);
updates_ = AddInput(updates);
output_ = AddOutput(input.type);
SetBuiltinOp(
BuiltinOperator_STABLEHLO_SCATTER,
BuiltinOptions2_StablehloScatterOptions,
CreateStablehloScatterOptions(
builder_, params.indices_are_sorted,
builder_.CreateVector(std::vector(
params.update_window_dims,
params.update_window_dims + params.num_update_window_dims)),
builder_.CreateVector(std::vector(
params.inserted_window_dims,
params.inserted_window_dims + params.num_inserted_window_dims)),
builder_.CreateVector(
std::vector(params.scatter_dims_to_operand_dims,
params.scatter_dims_to_operand_dims +
params.num_scatter_dims_to_operand_dims)),
params.index_vector_dim, params.unique_indices, 1)
.Union());
BuildInterpreter({GetShape(input_), GetShape(indices_), GetShape(updates_)},
-1, false,
false, false,
false);
int* dummy = nullptr;
AddSubgraphs(1, dummy);
if (op_type == StablehloScatterOpType::kAdd) {
subgraph_builder_.BuildStablehloAddSubgraph(interpreter_->subgraph(1));
} else if (op_type == StablehloScatterOpType::kMul) {
subgraph_builder_.BuildStablehloMulSubgraph(interpreter_->subgraph(1));
} else if (op_type == StablehloScatterOpType::kMax) {
subgraph_builder_.BuildStablehloMaximumSubgraph(
interpreter_->subgraph(1));
} else if (op_type == StablehloScatterOpType::kMin) {
subgraph_builder_.BuildStablehloMinimumSubgraph(
interpreter_->subgraph(1));
} else if (op_type == StablehloScatterOpType::kUpdate) {
subgraph_builder_.BuildOutputIsSecondInputSubgraph(
interpreter_->subgraph(1));
}
AllocateAndDelegate(true);
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
template <typename T>
void SetIndices(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
void SetUpdates(std::initializer_list<T> data) {
PopulateTensor<T>(updates_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
Subgraph* subgraph_;
int input_;
int indices_;
int updates_;
int output_;
subgraph_test_util::SubgraphBuilder subgraph_builder_;
};
TEST(StablehloScatterOpTest, PerformsAddition) {
StablehloScatterOpType op_type = StablehloScatterOpType::kAdd;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 7, 8, 9, 10, 7, 8,
11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, PerformsMultiplication) {
StablehloScatterOpType op_type = StablehloScatterOpType::kMul;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 12, 16, 20, 24, 7, 8,
18, 20, 22, 24, 26, 28, 30, 32,
34, 36, 38, 40, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, PerformsMaximum) {
StablehloScatterOpType op_type = StablehloScatterOpType::kMax;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, PerformsMinimum) {
StablehloScatterOpType op_type = StablehloScatterOpType::kMin;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 2, 2, 2, 2, 7, 8, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, PerformsUpdate) {
StablehloScatterOpType op_type = StablehloScatterOpType::kUpdate;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 2, 2, 2, 2, 7, 8, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_scatter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_scatter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e17e7a4e-c44e-407c-9a22-e2ea6b473e56 | cpp | tensorflow/tensorflow | atan2 | tensorflow/lite/kernels/atan2.cc | tensorflow/lite/kernels/atan2_test.cc | #include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace atan2 {
TfLiteStatus EnsureSameShape(
TfLiteContext* context,
const TfLiteTensor* a, const TfLiteTensor* b) {
TF_LITE_ENSURE_EQ(context,
tflite::NumDimensions(a),
tflite::NumDimensions(b));
return TfLiteStatus::kTfLiteOk;
}
TfLiteStatus Atan2Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, tflite::NumOutputs(node), 1);
const TfLiteTensor* input_y = tflite::GetInput(context, node, 0);
const TfLiteTensor* input_x = tflite::GetInput(context, node, 1);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
TF_LITE_ENSURE_OK(context, EnsureSameShape(context, input_y, input_x));
TF_LITE_ENSURE_TYPES_EQ(context, input_y->type, input_x->type);
TF_LITE_ENSURE_TYPES_EQ(context, input_y->type, output->type);
TF_LITE_ENSURE(context,
input_y->type == kTfLiteFloat32 ||
input_y->type == kTfLiteFloat64);
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input_y->dims);
return context->ResizeTensor(context, output, output_shape);
}
template<typename Float>
TfLiteStatus Atan2(const TfLiteTensor* input_y,
const TfLiteTensor* input_x,
TfLiteTensor* output) {
const Float* data_y = tflite::GetTensorData<Float>(input_y);
const Float* data_x = tflite::GetTensorData<Float>(input_x);
Float* data_output = tflite::GetTensorData<Float>(output);
const int64_t num_elements = NumElements(input_y);
for (int64_t i = 0; i < num_elements; ++i) {
data_output[i] = std::atan2(data_y[i], data_x[i]);
}
return TfLiteStatus::kTfLiteOk;
}
TfLiteStatus Atan2Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_y = tflite::GetInput(context, node, 0);
const TfLiteTensor* input_x = tflite::GetInput(context, node, 1);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
switch (output->type) {
case kTfLiteFloat32:
TF_LITE_ENSURE_OK(context, Atan2<float>(input_y, input_x, output));
break;
case kTfLiteFloat64:
TF_LITE_ENSURE_OK(context, Atan2<double>(input_y, input_x, output));
break;
default: {
TF_LITE_KERNEL_LOG(
context,
"Unsupported datatype for atan2 output: %s",
TfLiteTypeGetName(output->type));
return TfLiteStatus::kTfLiteError;
}
}
return TfLiteStatus::kTfLiteOk;
}
}
TfLiteRegistration* Register_ATAN2() {
static TfLiteRegistration r = {
nullptr, nullptr, atan2::Atan2Prepare, atan2::Atan2Eval};
return &r;
}
}
}
} | #include <cmath>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
template <typename T>
tflite::TensorType GetTTEnum();
template <>
tflite::TensorType GetTTEnum<float>() {
return tflite::TensorType_FLOAT32;
}
template <>
tflite::TensorType GetTTEnum<double>() {
return tflite::TensorType_FLOAT64;
}
class Atan2Model : public tflite::SingleOpModel {
public:
Atan2Model(tflite::TensorData y,
tflite::TensorData x,
tflite::TensorData output) {
y_ = AddInput(y);
x_ = AddInput(x);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ATAN2, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(y_), GetShape(x_)});
}
template <typename T>
std::vector<T> GetOutput(
const std::vector<T>& y,
const std::vector<T>& x) {
PopulateTensor<T>(y_, y);
PopulateTensor<T>(x_, x);
Invoke();
return ExtractVector<T>(output_);
}
private:
int y_;
int x_;
int output_;
};
template <typename Float>
class Atan2Test : public ::testing::Test {
public:
using FloatType = Float;
};
using TestTypes = ::testing::Types<float, double>;
TYPED_TEST_SUITE(Atan2Test, TestTypes);
TYPED_TEST(Atan2Test, TestScalar) {
using Float = typename TestFixture::FloatType;
tflite::TensorData y = {GetTTEnum<Float>(), {}};
tflite::TensorData x = {GetTTEnum<Float>(), {}};
tflite::TensorData output = {GetTTEnum<Float>(), {}};
Atan2Model m(y, x, output);
auto got = m.GetOutput<Float>({0.0}, {0.0});
ASSERT_EQ(got.size(), 1);
EXPECT_FLOAT_EQ(got[0], 0.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({1.0}, {0.0})[0], M_PI/2);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({0.0}, {1.0})[0], 0.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({-1.0}, {0.0})[0], -M_PI/2);
}
TYPED_TEST(Atan2Test, TestBatch) {
using Float = typename TestFixture::FloatType;
tflite::TensorData y = {GetTTEnum<Float>(), {4, 2, 1}};
tflite::TensorData x = {GetTTEnum<Float>(), {4, 2, 1}};
tflite::TensorData output = {GetTTEnum<Float>(), {4, 2, 1}};
Atan2Model m(y, x, output);
std::vector<Float> y_data = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8};
std::vector<Float> x_data = {0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1};
auto got = m.GetOutput<Float>(y_data, x_data);
ASSERT_EQ(got.size(), 8);
for (int i = 0; i < 8; ++i) {
EXPECT_FLOAT_EQ(got[i], std::atan2(y_data[i], x_data[i]));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/atan2.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/atan2_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c9ed0c99-fb9b-460c-b0cb-e6aefe4b7ca8 | cpp | tensorflow/tensorflow | range | tensorflow/lite/kernels/range.cc | tensorflow/lite/kernels/range_test.cc | #include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <functional>
#include <type_traits>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace range {
namespace {
constexpr int kStartTensor = 0;
constexpr int kLimitTensor = 1;
constexpr int kDeltaTensor = 2;
constexpr int kOutputTensor = 0;
struct OpData {
bool noop;
};
template <typename T>
TfLiteStatus GetSize(TfLiteContext* context, T start, T limit, T delta,
int* size) {
TF_LITE_ENSURE(context, !std::equal_to<T>()(delta, 0));
TF_LITE_ENSURE(
context, (start >= limit && delta < 0) || (start <= limit && delta > 0));
*size =
(std::is_integral<T>::value
? ((std::abs(limit - start) + std::abs(delta) - 1) / std::abs(delta))
: std::ceil(std::abs((limit - start) / delta)));
return kTfLiteOk;
}
TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* start,
const TfLiteTensor* limit, const TfLiteTensor* delta,
TfLiteTensor* output) {
int size = 0;
switch (start->type) {
case kTfLiteInt32: {
TF_LITE_ENSURE_OK(context,
GetSize(context, *GetTensorData<int32_t>(start),
*GetTensorData<int32_t>(limit),
*GetTensorData<int32_t>(delta), &size));
break;
}
case kTfLiteInt64: {
TF_LITE_ENSURE_OK(context,
GetSize(context, *GetTensorData<int64_t>(start),
*GetTensorData<int64_t>(limit),
*GetTensorData<int64_t>(delta), &size));
break;
}
case kTfLiteFloat32: {
TF_LITE_ENSURE_OK(context, GetSize(context, *GetTensorData<float>(start),
*GetTensorData<float>(limit),
*GetTensorData<float>(delta), &size));
break;
}
default: {
TF_LITE_KERNEL_LOG(context, "Unknown data type: %d", start->type);
return kTfLiteError;
}
}
TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(1);
output_shape_array->data[0] = size;
return context->ResizeTensor(context, output, output_shape_array);
}
template <typename T>
void CalculateRange(const TfLiteTensor* start, const TfLiteTensor* delta,
TfLiteTensor* output) {
const T start_value = *GetTensorData<T>(start);
const T delta_value = *GetTensorData<T>(delta);
T* output_data = GetTensorData<T>(output);
const int num_elements = NumElements(output);
T value = start_value;
for (int i = 0; i < num_elements; ++i) {
output_data[i] = value;
value += delta_value;
}
}
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* start,
const TfLiteTensor* delta, TfLiteTensor* output) {
switch (output->type) {
case kTfLiteInt32: {
CalculateRange<int32_t>(start, delta, output);
break;
}
case kTfLiteFloat32: {
CalculateRange<float>(start, delta, output);
break;
}
case kTfLiteInt64: {
CalculateRange<int64_t>(start, delta, output);
break;
}
default: {
TF_LITE_KERNEL_LOG(context, "Unsupported data type: %d", output->type);
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
op_data->noop = false;
const TfLiteTensor* start;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartTensor, &start));
const TfLiteTensor* limit;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kLimitTensor, &limit));
const TfLiteTensor* delta;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDeltaTensor, &delta));
TF_LITE_ENSURE_EQ(context, NumDimensions(start), 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(limit), 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(delta), 0);
const auto dtype = start->type;
if (dtype != kTfLiteFloat32 && dtype != kTfLiteInt32 &&
dtype != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "Unknown index output data type: %s",
TfLiteTypeGetName(dtype));
return kTfLiteError;
}
TF_LITE_ENSURE_TYPES_EQ(context, limit->type, dtype);
TF_LITE_ENSURE_TYPES_EQ(context, delta->type, dtype);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = dtype;
if (IsConstantOrPersistentTensor(start) &&
IsConstantOrPersistentTensor(limit) &&
IsConstantOrPersistentTensor(delta)) {
SetTensorToPersistentRo(output);
TF_LITE_ENSURE_OK(context,
ResizeOutput(context, start, limit, delta, output));
op_data->noop = true;
return EvalImpl(context, start, delta, output);
}
SetTensorToDynamic(output);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* start;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartTensor, &start));
const TfLiteTensor* limit;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kLimitTensor, &limit));
const TfLiteTensor* delta;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDeltaTensor, &delta));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
if (op_data->noop) {
return kTfLiteOk;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutput(context, start, limit, delta, output));
}
return EvalImpl(context, start, delta, output);
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
}
}
TfLiteRegistration* Register_RANGE() {
static TfLiteRegistration r = {range::Init, range::Free, range::Prepare,
range::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
template <typename T>
class RangeOpModel : public SingleOpModel {
public:
explicit RangeOpModel(const TensorType& dtype) {
start_ = AddInput(dtype);
limit_ = AddInput(dtype);
delta_ = AddInput(dtype);
output_ = AddOutput(dtype);
SetBuiltinOp(BuiltinOperator_RANGE, BuiltinOptions_RangeOptions,
CreateRangeOptions(builder_).Union());
BuildInterpreter({GetShape(start_), GetShape(limit_), GetShape(delta_)});
}
explicit RangeOpModel(const TensorType& dtype, const std::vector<T>& start,
const std::vector<T>& limit,
const std::vector<T>& delta) {
start_ = AddConstInput(dtype, start);
limit_ = AddConstInput(dtype, limit);
delta_ = AddConstInput(dtype, delta);
output_ = AddOutput(dtype);
SetBuiltinOp(BuiltinOperator_RANGE, BuiltinOptions_RangeOptions,
CreateRangeOptions(builder_).Union());
BuildInterpreter({GetShape(start_), GetShape(limit_), GetShape(delta_)});
}
int start() { return start_; }
int limit() { return limit_; }
int delta() { return delta_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int start_;
int limit_;
int delta_;
int output_;
};
TEST(RangeOpModel, Simple) {
RangeOpModel<int32_t> model(TensorType_INT32);
model.PopulateTensor<int32_t>(model.start(), {0});
model.PopulateTensor<int32_t>(model.limit(), {4});
model.PopulateTensor<int32_t>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, SimpleConst) {
RangeOpModel<int32_t> model(TensorType_INT32, {0}, {4}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, DeltaGreaterThanOne) {
RangeOpModel<int32_t> model(TensorType_INT32);
model.PopulateTensor<int32_t>(model.start(), {2});
model.PopulateTensor<int32_t>(model.limit(), {9});
model.PopulateTensor<int32_t>(model.delta(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, DeltaGreaterThanOneConst) {
RangeOpModel<int32_t> model(TensorType_INT32, {2}, {9}, {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, NegativeDelta) {
RangeOpModel<int32_t> model(TensorType_INT32);
model.PopulateTensor<int32_t>(model.start(), {10});
model.PopulateTensor<int32_t>(model.limit(), {3});
model.PopulateTensor<int32_t>(model.delta(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, NegativeDeltaConst) {
RangeOpModel<int32_t> model(TensorType_INT32, {10}, {3}, {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, FloatSimple) {
RangeOpModel<float> model(TensorType_FLOAT32);
model.PopulateTensor<float>(model.start(), {0});
model.PopulateTensor<float>(model.limit(), {4});
model.PopulateTensor<float>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, FloatSimpleConst) {
RangeOpModel<float> model(TensorType_FLOAT32, {0}, {4}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, FloatDeltaGreaterThanOne) {
RangeOpModel<float> model(TensorType_FLOAT32);
model.PopulateTensor<float>(model.start(), {2});
model.PopulateTensor<float>(model.limit(), {9});
model.PopulateTensor<float>(model.delta(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, FloatDeltaGreaterThanOneConst) {
RangeOpModel<float> model(TensorType_FLOAT32, {2}, {9}, {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, FloatNegativeDelta) {
RangeOpModel<float> model(TensorType_FLOAT32);
model.PopulateTensor<float>(model.start(), {10});
model.PopulateTensor<float>(model.limit(), {3});
model.PopulateTensor<float>(model.delta(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, FloatNegativeDeltaConst) {
RangeOpModel<float> model(TensorType_FLOAT32, {10}, {3}, {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, EmptyOutput) {
RangeOpModel<int32_t> model(TensorType_INT32);
model.PopulateTensor<int32_t>(model.start(), {0});
model.PopulateTensor<int32_t>(model.limit(), {0});
model.PopulateTensor<int32_t>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(0));
EXPECT_THAT(model.GetOutput(), ElementsAre());
}
TEST(RangeOpModel, EmptyOutputConst) {
RangeOpModel<int32_t> model(TensorType_INT32, {0}, {0}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(0));
EXPECT_THAT(model.GetOutput(), ElementsAre());
}
TEST(RangeOpModel, Int64Simple) {
RangeOpModel<int64_t> model(TensorType_INT64);
model.PopulateTensor<int64_t>(model.start(), {0});
model.PopulateTensor<int64_t>(model.limit(), {4});
model.PopulateTensor<int64_t>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, Int64SimpleConst) {
RangeOpModel<int64_t> model(TensorType_INT64, {0}, {4}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, Int64DeltaGreaterThanOne) {
RangeOpModel<int64_t> model(TensorType_INT64);
model.PopulateTensor<int64_t>(model.start(), {2});
model.PopulateTensor<int64_t>(model.limit(), {9});
model.PopulateTensor<int64_t>(model.delta(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, Int64DeltaGreaterThanOneConst) {
RangeOpModel<int64_t> model(TensorType_INT64, {2}, {9}, {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, Int64NegativeDelta) {
RangeOpModel<int64_t> model(TensorType_INT64);
model.PopulateTensor<int64_t>(model.start(), {10});
model.PopulateTensor<int64_t>(model.limit(), {3});
model.PopulateTensor<int64_t>(model.delta(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, Int64NegativeDeltaConst) {
RangeOpModel<int64_t> model(TensorType_INT64, {10}, {3}, {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, Int64EmptyOutput) {
RangeOpModel<int64_t> model(TensorType_INT64);
model.PopulateTensor<int64_t>(model.start(), {0});
model.PopulateTensor<int64_t>(model.limit(), {0});
model.PopulateTensor<int64_t>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(0));
EXPECT_THAT(model.GetOutput(), ElementsAre());
}
TEST(RangeOpModel, Int64EmptyOutputConst) {
RangeOpModel<int64_t> model(TensorType_INT64, {0}, {0}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(0));
EXPECT_THAT(model.GetOutput(), ElementsAre());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/range.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/range_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f9d15818-4525-489c-998c-818e362a0e14 | cpp | tensorflow/tensorflow | reshape | tensorflow/lite/delegates/gpu/gl/kernels/reshape.cc | tensorflow/lite/delegates/xnnpack/reshape_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/reshape.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Reshape : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes[0][1] * ctx.input_shapes[0][2] *
ctx.input_shapes[0][3] !=
ctx.output_shapes[0][1] * ctx.output_shapes[0][2] *
ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError(
"Number of elements in input & output tensors don't match.");
}
const auto& attr = std::any_cast<const ReshapeAttributes&>(ctx.op_attr);
if (attr.new_shape.h != ctx.output_shapes[0][1] ||
attr.new_shape.w != ctx.output_shapes[0][2] ||
attr.new_shape.c != ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError(
"Dimensions for output does not match new_shape attribute");
}
std::string code = R"(
int input_ch_w = $input_channels$ * $input_data_0_w$;
int output_ch_w = $output_channels$ * $output_data_0_w$;
for (int i = 0; i < 4; ++i) {
int dst_channel = gid.z * 4 + i;
if (dst_channel >= $output_channels$) {
continue;
}
int p = dst_channel + $output_channels$ * gid.x + output_ch_w * gid.y;
int src_y = p / input_ch_w;
int src_x = (p % input_ch_w) / $input_channels$;
int src_z = (p % input_ch_w) % $input_channels$;
int src_layer = src_z / 4;
int src_channel = src_z % 4;
value_0[i] = $input_data_0[src_x, src_y, src_layer]$[src_channel];
}
)";
*generated_code = {
{
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"input_channels", static_cast<int>(ctx.input_shapes[0][3])},
{"output_data_0_w", static_cast<int>(ctx.output_shapes[0][2])},
{"output_channels", static_cast<int>(ctx.output_shapes[0][3])},
},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewReshapeNodeShader() {
return std::make_unique<Reshape>();
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/reshape_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Reshape, 4DShapeAsInput) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Reshape, 4DShapeAsParam) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(false)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Reshape, 3DShapeAsInput) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Reshape, 3DShapeAsParam) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(false)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Reshape, 2DShapeAsInput) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{{shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Reshape, 2DShapeAsParam) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{{shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(false)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Reshape, 1DShapeAsInput) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape({shape_rng()});
ReshapeTester()
.InputShape(shape)
.OutputShape(shape)
.OutputShapeAsInput(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Reshape, 1DShapeAsParam) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape({shape_rng()});
ReshapeTester()
.InputShape(shape)
.OutputShape(shape)
.OutputShapeAsInput(false)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Reshape, 0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
ReshapeTester()
.InputShape(std::vector<int32_t>())
.OutputShape(std::vector<int32_t>())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Reshape, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/reshape.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/reshape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e4cffad6-7c59-4630-b97b-acf78e9c302a | cpp | tensorflow/tensorflow | stablehlo_multiply | tensorflow/lite/kernels/stablehlo_multiply.cc | tensorflow/lite/kernels/stablehlo_multiply_test.cc | #include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_elementwise.h"
namespace tflite::ops::builtin {
TfLiteRegistration* Register_STABLEHLO_MULTIPLY() {
static TfLiteRegistration r = {nullptr, nullptr, ElementwisePrepare,
ElementwiseEval<ComputationType::kMul>};
return &r;
}
} | #include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
class MultiplyOpModel : public SingleOpModel {
public:
MultiplyOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_STABLEHLO_MULTIPLY, BuiltinOptions_NONE, 0);
SetBypassDefaultDelegates();
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(StablehloElementwise, MultiplyWorks) {
MultiplyOpModel model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {1.2, 2.5, -1.2, 1});
model.PopulateTensor<float>(model.input2(), {0.1, 3, 2, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {0.12, 7.5, -2.4, 0.5};
std::vector<float> actual_values = model.GetOutput();
ASSERT_EQ(actual_values.size(), expected_values.size());
for (int idx = 0; idx < expected_values.size(); ++idx) {
ASSERT_NEAR(actual_values[idx], expected_values[idx], 1e-6);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_multiply.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_multiply_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c3d39c28-8d42-4581-a701-ed09abead87c | cpp | tensorflow/tensorflow | zeros_like | tensorflow/lite/kernels/zeros_like.cc | tensorflow/lite/kernels/zeros_like_test.cc | #include <stdint.h>
#include <string.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace zeros_like {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const int num_elements = NumElements(input);
switch (input->type) {
case kTfLiteInt64:
memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t));
break;
case kTfLiteInt32:
memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t));
break;
case kTfLiteFloat32:
memset(GetTensorData<float>(output), 0, num_elements * sizeof(float));
break;
default:
TF_LITE_KERNEL_LOG(context,
"ZerosLike only currently supports int64, int32, "
"and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_ZEROS_LIKE() {
static TfLiteRegistration r = {nullptr, nullptr,
zeros_like::Prepare, zeros_like::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class ZerosLikeOpModel : public SingleOpModel {
public:
explicit ZerosLikeOpModel(const TensorData& input) {
input_ = AddInput(input);
output_ = AddOutput(input);
SetBuiltinOp(BuiltinOperator_ZEROS_LIKE, BuiltinOptions_ZerosLikeOptions,
CreateZerosLikeOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
int output() { return output_; }
protected:
int input_;
int output_;
};
TEST(ZerosLikeOpModel, ZerosLikeFloat) {
ZerosLikeOpModel m({TensorType_FLOAT32, {2, 3}});
m.PopulateTensor<float>(m.input(), {-2.0, -1.0, 0.0, 1.0, 2.0, 3.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
Pointwise(FloatingPointEq(), {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({2, 3}));
}
TEST(ZerosLikeOpModel, ZerosLikeInt32) {
ZerosLikeOpModel m({TensorType_INT32, {1, 2, 2, 1}});
m.PopulateTensor<int32_t>(m.input(), {-2, -1, 0, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<int32_t>(m.output()),
ElementsAreArray({0, 0, 0, 0}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 2, 2, 1}));
}
TEST(ZerosLikeOpModel, ZerosLikeInt64) {
ZerosLikeOpModel m({TensorType_INT64, {1, 2, 2, 1}});
m.PopulateTensor<int64_t>(m.input(), {-2, -1, 0, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<int64_t>(m.output()),
ElementsAreArray({0, 0, 0, 0}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 2, 2, 1}));
}
TEST(ZerosLikeOpModel, InvalidTypeTest) {
ZerosLikeOpModel m_uint8({TensorType_UINT8, {1, 1}});
ASSERT_NE(m_uint8.Invoke(), kTfLiteOk)
<< "ZerosLike only currently supports int64, int32, and float32";
ZerosLikeOpModel m_int16({TensorType_INT16, {1, 1}});
ASSERT_NE(m_int16.Invoke(), kTfLiteOk)
<< "ZerosLike only currently supports int64, int32, and float32";
ZerosLikeOpModel m_complex({TensorType_COMPLEX64, {1, 1}});
ASSERT_NE(m_complex.Invoke(), kTfLiteOk)
<< "ZerosLike only currently supports int64, int32, and float32";
ZerosLikeOpModel m_int8({TensorType_INT8, {1, 1}});
ASSERT_NE(m_int8.Invoke(), kTfLiteOk)
<< "ZerosLike only currently supports int64, int32, and float32";
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/zeros_like.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/zeros_like_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa61fef8-ae7b-4e7f-b284-21e5c94ec56b | cpp | tensorflow/tensorflow | where | tensorflow/lite/kernels/where.cc | tensorflow/lite/kernels/where_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace where {
constexpr int kInputConditionTensor = 0;
constexpr int kOutputTensor = 0;
template <typename T>
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* cond_tensor,
TfLiteTensor* output_tensor) {
const RuntimeShape& cond_shape = GetTensorShape(cond_tensor);
const int size = cond_shape.FlatSize();
const int cond_rank = cond_shape.DimensionsCount();
const T* cond_data = GetTensorData<T>(cond_tensor);
int true_count = 0;
for (int i = 0; i < size; ++i) {
if (cond_data[i] != T(0)) {
true_count++;
}
}
TfLiteIntArray* output_dims = TfLiteIntArrayCreate(2);
output_dims->data[0] = true_count;
output_dims->data[1] = cond_rank;
return context->ResizeTensor(context, output_tensor, output_dims);
}
template <typename T>
TfLiteStatus PrepareOutput(TfLiteContext* context,
const TfLiteTensor* cond_tensor,
TfLiteTensor* output) {
output->type = kTfLiteInt64;
if (!IsConstantOrPersistentTensor(cond_tensor)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputTensor<T>(context, cond_tensor, output);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* cond_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputConditionTensor,
&cond_tensor));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (cond_tensor->type) {
case kTfLiteBool:
return PrepareOutput<bool>(context, cond_tensor, output);
case kTfLiteFloat32:
return PrepareOutput<float>(context, cond_tensor, output);
case kTfLiteInt64:
return PrepareOutput<int64_t>(context, cond_tensor, output);
case kTfLiteInt32:
return PrepareOutput<int32_t>(context, cond_tensor, output);
case kTfLiteInt8:
return PrepareOutput<int8_t>(context, cond_tensor, output);
case kTfLiteUInt8:
return PrepareOutput<uint8_t>(context, cond_tensor, output);
case kTfLiteUInt32:
return PrepareOutput<uint32_t>(context, cond_tensor, output);
default:
TF_LITE_KERNEL_LOG(context,
"Condition tensor has unsupported type: '%s'.",
TfLiteTypeGetName(cond_tensor->type));
return kTfLiteError;
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* cond_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputConditionTensor,
&cond_tensor));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
switch (cond_tensor->type) {
case kTfLiteBool:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<bool>(context, cond_tensor, output));
break;
case kTfLiteFloat32:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<float>(context, cond_tensor, output));
break;
case kTfLiteInt64:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<int64_t>(context, cond_tensor, output));
break;
case kTfLiteInt32:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<int32_t>(context, cond_tensor, output));
break;
case kTfLiteInt8:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<int8_t>(context, cond_tensor, output));
break;
case kTfLiteUInt8:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<uint8_t>(context, cond_tensor, output));
break;
case kTfLiteUInt32:
TF_LITE_ENSURE_OK(context, ResizeOutputTensor<uint32_t>(
context, cond_tensor, output));
break;
default:
TF_LITE_KERNEL_LOG(context,
"Condition tensor has unsupported type: '%s'.",
TfLiteTypeGetName(cond_tensor->type));
return kTfLiteError;
}
}
TfLiteIntArray* dims = cond_tensor->dims;
if (dims->size == 0) {
TF_LITE_KERNEL_LOG(context, "Where op requires condition w/ rank > 0");
return kTfLiteError;
}
switch (cond_tensor->type) {
case kTfLiteBool:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<bool>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteFloat32:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<float>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteInt64:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<int64_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteInt32:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<int32_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteInt8:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<int8_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteUInt8:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<uint8_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteUInt32:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<uint32_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
default:
TF_LITE_KERNEL_LOG(context,
"Condition tensor has unsupported type: '%s'.",
TfLiteTypeGetName(cond_tensor->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_WHERE() {
static TfLiteRegistration r = { nullptr, nullptr,
where::Prepare, where::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::Test;
class BaseWhereOpModel : public SingleOpModel {
public:
BaseWhereOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_WHERE, BuiltinOptions_WhereOptions,
CreateWhereOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
protected:
int input_;
int output_;
};
class IntegerWhereOpModel : public BaseWhereOpModel {
public:
using BaseWhereOpModel::BaseWhereOpModel;
std::vector<int64_t> GetOutput() { return ExtractVector<int64_t>(output_); }
};
template <typename T1>
class ConstInputWhereOpModel : public SingleOpModel {
public:
ConstInputWhereOpModel(T1 constant_values, const TensorData& output) {
input_ = AddConstInput(GetTensorType<T1>(), {constant_values}, {});
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_WHERE, BuiltinOptions_WhereOptions,
CreateWhereOptions(builder_).Union());
BuildInterpreter({{}});
}
int input() { return input_; }
std::vector<int64_t> GetOutput() { return ExtractVector<int64_t>(output_); }
protected:
int input_;
int output_;
};
template <typename T>
TensorType GetTfLiteType();
template <>
TensorType GetTfLiteType<bool>() {
return TensorType_BOOL;
}
template <>
TensorType GetTfLiteType<float>() {
return TensorType_FLOAT32;
}
template <>
TensorType GetTfLiteType<int8_t>() {
return TensorType_INT8;
}
template <>
TensorType GetTfLiteType<uint8_t>() {
return TensorType_UINT8;
}
template <>
TensorType GetTfLiteType<int32_t>() {
return TensorType_INT32;
}
template <>
TensorType GetTfLiteType<uint32_t>() {
return TensorType_UINT32;
}
template <>
TensorType GetTfLiteType<int64_t>() {
return TensorType_INT64;
}
template <typename T>
std::vector<T> GetCompatibleData(const std::initializer_list<bool>& data) {
std::vector<T> result;
for (auto item : data)
if (item)
result.push_back(T(1));
else
result.push_back(T(0));
return result;
}
template <typename T>
class WhereOpTest : public Test {
public:
using List = std::list<T>;
static T shared_;
T value_;
};
using MyTypes =
::testing::Types<bool, float, int32_t, uint32_t, int64_t, int8_t, uint8_t>;
TYPED_TEST_SUITE(WhereOpTest, MyTypes);
TYPED_TEST(WhereOpTest, ScalarValueFail) {
ConstInputWhereOpModel<bool> m(false, {TensorType_INT64, {}});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TYPED_TEST(WhereOpTest, SelectFromVectorNoResult) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({false, false, false}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput().size(), 0);
}
TYPED_TEST(WhereOpTest, SelectFromVector) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, false, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 2}));
}
TYPED_TEST(WhereOpTest, SelectFromMatrixNoResult) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3, 3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({false, false, false,
false, false, false,
false, false, false}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_EQ(m.GetOutput().size(), 0);
}
TYPED_TEST(WhereOpTest, SelectFromMatrix1) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3, 1}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, false, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0,
2, 0}));
}
TYPED_TEST(WhereOpTest, SelectFromMatrix2) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3, 3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, true, false,
true, false, false,
true, false, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0,
0, 1,
1, 0,
2, 0,
2, 2}));
}
TYPED_TEST(WhereOpTest, SelectFromMatrix3) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3, 5}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(),
GetCompatibleData<TypeParam>({true, false, false, true, true,
false, true, true, false, false,
true, false, true, false, false}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0,
0, 3,
0, 4,
1, 1,
1, 2,
2, 0,
2, 2}));
}
TYPED_TEST(WhereOpTest, SelectFromRank3TensorNoResult) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {2, 2, 2}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({false, false, false, false,
false, false, false, false}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_EQ(m.GetOutput().size(), 0);
}
TYPED_TEST(WhereOpTest, SelectFromRank3Tensor1) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {2, 1, 3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, false, true,
false, false, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0,
0, 0, 2,
1, 0, 2}));
}
TYPED_TEST(WhereOpTest, SelectFromRank3Tensor2) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {2, 2, 2}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, true, false, true,
false, false, true, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0,
0, 0, 1,
0, 1, 1,
1, 1, 0,
1, 1, 1}));
}
TYPED_TEST(WhereOpTest, SelectFromRank3Tensor3) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {2, 3, 2}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(),
GetCompatibleData<TypeParam>({true, true, false, true, false, false,
false, false, true, false, true, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0,
0, 0, 1,
0, 1, 1,
1, 1, 0,
1, 2, 0,
1, 2, 1}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/where.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/where_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ebea19d-f62a-4d53-8079-fcaa3d63901f | cpp | tensorflow/tensorflow | bidirectional_sequence_lstm | tensorflow/lite/kernels/bidirectional_sequence_lstm.cc | tensorflow/lite/kernels/bidirectional_sequence_lstm_test.cc | #include <math.h>
#include <algorithm>
#include <cstddef>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/lstm_eval.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace bidirectional_sequence_lstm {
constexpr int kInputTensor = 0;
constexpr int kFwInputToInputWeightsTensor = 1;
constexpr int kFwInputToForgetWeightsTensor = 2;
constexpr int kFwInputToCellWeightsTensor = 3;
constexpr int kFwInputToOutputWeightsTensor = 4;
constexpr int kFwRecurrentToInputWeightsTensor = 5;
constexpr int kFwRecurrentToForgetWeightsTensor = 6;
constexpr int kFwRecurrentToCellWeightsTensor = 7;
constexpr int kFwRecurrentToOutputWeightsTensor = 8;
constexpr int kFwCellToInputWeightsTensor = 9;
constexpr int kFwCellToForgetWeightsTensor = 10;
constexpr int kFwCellToOutputWeightsTensor = 11;
constexpr int kFwInputGateBiasTensor = 12;
constexpr int kFwForgetGateBiasTensor = 13;
constexpr int kFwCellGateBiasTensor = 14;
constexpr int kFwOutputGateBiasTensor = 15;
constexpr int kFwProjectionWeightsTensor = 16;
constexpr int kFwProjectionBiasTensor = 17;
constexpr int kBwInputToInputWeightsTensor = 18;
constexpr int kBwInputToForgetWeightsTensor = 19;
constexpr int kBwInputToCellWeightsTensor = 20;
constexpr int kBwInputToOutputWeightsTensor = 21;
constexpr int kBwRecurrentToInputWeightsTensor = 22;
constexpr int kBwRecurrentToForgetWeightsTensor = 23;
constexpr int kBwRecurrentToCellWeightsTensor = 24;
constexpr int kBwRecurrentToOutputWeightsTensor = 25;
constexpr int kBwCellToInputWeightsTensor = 26;
constexpr int kBwCellToForgetWeightsTensor = 27;
constexpr int kBwCellToOutputWeightsTensor = 28;
constexpr int kBwInputGateBiasTensor = 29;
constexpr int kBwForgetGateBiasTensor = 30;
constexpr int kBwCellGateBiasTensor = 31;
constexpr int kBwOutputGateBiasTensor = 32;
constexpr int kBwProjectionWeightsTensor = 33;
constexpr int kBwProjectionBiasTensor = 34;
constexpr int kFwInputActivationStateTensor = 35;
constexpr int kFwInputCellStateTensor = 36;
constexpr int kBwInputActivationStateTensor = 37;
constexpr int kBwInputCellStateTensor = 38;
constexpr int kAuxInputTensor = 39;
constexpr int kFwAuxInputToInputWeightsTensor = 40;
constexpr int kFwAuxInputToForgetWeightsTensor = 41;
constexpr int kFwAuxInputToCellWeightsTensor = 42;
constexpr int kFwAuxInputToOutputWeightsTensor = 43;
constexpr int kBwAuxInputToInputWeightsTensor = 44;
constexpr int kBwAuxInputToForgetWeightsTensor = 45;
constexpr int kBwAuxInputToCellWeightsTensor = 46;
constexpr int kBwAuxInputToOutputWeightsTensor = 47;
constexpr int kFwOutputTensor = 0;
constexpr int kBwOutputTensor = 1;
enum TemporaryTensor {
kFwScratchBuffer = 0,
kBwScratchBuffer = 1,
kInputQuantized = 2,
kFwActivationStateQuantized = 3,
kBwActivationStateQuantized = 4,
kFwCellStateQuantized = 5,
kBwCellStateQuantized = 6,
kInputScalingFactors = 7,
kAuxInputScalingFactors = 8,
kOutputStateScalingFactors = 9,
kProductScalingFactors = 10,
kRecoveredCellWeights = 11,
kAccumScratchBuffer = 12,
kInputZeroPoints = 13,
kAuxInputZeroPoints = 14,
kOutputStateZeroPoints = 15,
kFwRowSums = 16,
kBwRowSums = 17,
kAuxInputQuantized = 18,
kNumTemporaryTensors = 19,
};
struct OpData {
int scratch_tensor_index;
bool compute_fw_row_sums = false;
bool compute_bw_row_sums = false;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, kNumTemporaryTensors,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus CheckLstmTensorDimensionsAndTypes(
TfLiteContext* context, TfLiteNode* node, int n_input, int n_output,
int n_cell, int input_to_input_weights_tensor,
int input_to_forget_weights_tensor, int input_to_cell_weights_tensor,
int input_to_output_weights_tensor, int recurrent_to_input_weights_tensor,
int recurrent_to_forget_weights_tensor,
int recurrent_to_cell_weights_tensor,
int recurrent_to_output_weights_tensor, int cell_to_input_weights_tensor,
int cell_to_forget_weights_tensor, int cell_to_output_weights_tensor,
int input_gate_bias_tensor, int forget_gate_bias_tensor,
int cell_gate_bias_tensor, int output_gate_bias_tensor,
int projection_weights_tensor, int projection_bias_tensor) {
const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
node->builtin_data);
TF_LITE_ENSURE(context, params->cell_clip >= 0);
TF_LITE_ENSURE(context, params->proj_clip >= 0);
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, input_to_forget_weights_tensor,
&input_to_forget_weights));
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input);
TF_LITE_ENSURE(context, (input_to_forget_weights->type == kTfLiteFloat32) ||
(input_to_forget_weights->type == kTfLiteInt8) ||
(input_to_forget_weights->type == kTfLiteUInt8));
const TfLiteTensor* input_to_input_weights =
GetOptionalInputTensor(context, node, input_to_input_weights_tensor);
if (input_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input);
TF_LITE_ENSURE_TYPES_EQ(context, input_to_input_weights->type,
input_to_forget_weights->type);
}
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, input_to_cell_weights_tensor,
&input_to_cell_weights));
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[1], n_input);
TF_LITE_ENSURE_TYPES_EQ(context, input_to_cell_weights->type,
input_to_forget_weights->type);
const TfLiteTensor* input_to_output_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, input_to_output_weights_tensor,
&input_to_output_weights));
TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input);
TF_LITE_ENSURE_TYPES_EQ(context, input_to_output_weights->type,
input_to_forget_weights->type);
const TfLiteTensor* recurrent_to_input_weights =
GetOptionalInputTensor(context, node, recurrent_to_input_weights_tensor);
if (recurrent_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[0],
n_cell);
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[1],
n_output);
TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_input_weights->type,
input_to_forget_weights->type);
}
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, recurrent_to_forget_weights_tensor,
&recurrent_to_forget_weights));
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[0],
n_cell);
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1],
n_output);
TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_forget_weights->type,
input_to_forget_weights->type);
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, recurrent_to_cell_weights_tensor,
&recurrent_to_cell_weights));
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[1],
n_output);
TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_cell_weights->type,
input_to_forget_weights->type);
const bool cifg_weights_all_or_none =
((input_to_input_weights != nullptr) &&
(recurrent_to_input_weights != nullptr)) ||
((input_to_input_weights == nullptr) &&
(recurrent_to_input_weights == nullptr));
TF_LITE_ENSURE(context, cifg_weights_all_or_none == true);
const TfLiteTensor* cell_to_input_weights =
GetOptionalInputTensor(context, node, cell_to_input_weights_tensor);
if (cell_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, cell_to_input_weights->type,
input_to_forget_weights->type);
}
const TfLiteTensor* cell_to_forget_weights =
GetOptionalInputTensor(context, node, cell_to_forget_weights_tensor);
if (cell_to_forget_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, cell_to_forget_weights->type,
input_to_forget_weights->type);
}
const TfLiteTensor* cell_to_output_weights =
GetOptionalInputTensor(context, node, cell_to_output_weights_tensor);
if (cell_to_output_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, cell_to_output_weights->type,
input_to_forget_weights->type);
}
const bool use_cifg = (input_to_input_weights == nullptr);
const bool peephole_weights_all_or_none =
((cell_to_input_weights != nullptr || use_cifg) &&
(cell_to_forget_weights != nullptr) &&
(cell_to_output_weights != nullptr)) ||
((cell_to_input_weights == nullptr) &&
(cell_to_forget_weights == nullptr) &&
(cell_to_output_weights == nullptr));
TF_LITE_ENSURE(context, peephole_weights_all_or_none == true);
const TfLiteTensor* input_gate_bias =
GetOptionalInputTensor(context, node, input_gate_bias_tensor);
if (use_cifg) {
TF_LITE_ENSURE_EQ(context, input_gate_bias, nullptr);
} else {
TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, input_gate_bias->type, kTfLiteFloat32);
}
const TfLiteTensor* forget_gate_bias;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, forget_gate_bias_tensor, &forget_gate_bias));
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, forget_gate_bias->type, kTfLiteFloat32);
const TfLiteTensor* cell_gate_bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, cell_gate_bias_tensor,
&cell_gate_bias));
TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, cell_gate_bias->type, kTfLiteFloat32);
const TfLiteTensor* output_gate_bias;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, output_gate_bias_tensor, &output_gate_bias));
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, output_gate_bias->type, kTfLiteFloat32);
const TfLiteTensor* projection_weights =
GetOptionalInputTensor(context, node, projection_weights_tensor);
if (projection_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, projection_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[0], n_output);
TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[1], n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, projection_weights->type,
input_to_forget_weights->type);
}
const TfLiteTensor* projection_bias =
GetOptionalInputTensor(context, node, projection_bias_tensor);
if (projection_bias != nullptr) {
TF_LITE_ENSURE_EQ(context, projection_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, projection_bias->dims->data[0], n_output);
TF_LITE_ENSURE_TYPES_EQ(context, projection_bias->type, kTfLiteFloat32);
}
const bool projecton_tensors_consistent =
((projection_weights != nullptr) || (projection_bias == nullptr));
TF_LITE_ENSURE(context, projecton_tensors_consistent == true);
return kTfLiteOk;
}
TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
TfLiteNode* node, int n_input,
int n_output, int n_cell) {
TF_LITE_ENSURE_OK(
context,
CheckLstmTensorDimensionsAndTypes(
context, node, n_input, n_output, n_cell,
kFwInputToInputWeightsTensor, kFwInputToForgetWeightsTensor,
kFwInputToCellWeightsTensor, kFwInputToOutputWeightsTensor,
kFwRecurrentToInputWeightsTensor, kFwRecurrentToForgetWeightsTensor,
kFwRecurrentToCellWeightsTensor, kFwRecurrentToOutputWeightsTensor,
kFwCellToInputWeightsTensor, kFwCellToForgetWeightsTensor,
kFwCellToOutputWeightsTensor, kFwInputGateBiasTensor,
kFwForgetGateBiasTensor, kFwCellGateBiasTensor,
kFwOutputGateBiasTensor, kFwProjectionWeightsTensor,
kFwProjectionBiasTensor));
TF_LITE_ENSURE_OK(
context,
CheckLstmTensorDimensionsAndTypes(
context, node, n_input, n_output, n_cell,
kBwInputToInputWeightsTensor, kBwInputToForgetWeightsTensor,
kBwInputToCellWeightsTensor, kBwInputToOutputWeightsTensor,
kBwRecurrentToInputWeightsTensor, kBwRecurrentToForgetWeightsTensor,
kBwRecurrentToCellWeightsTensor, kBwRecurrentToOutputWeightsTensor,
kBwCellToInputWeightsTensor, kBwCellToForgetWeightsTensor,
kBwCellToOutputWeightsTensor, kBwInputGateBiasTensor,
kBwForgetGateBiasTensor, kBwCellGateBiasTensor,
kBwOutputGateBiasTensor, kBwProjectionWeightsTensor,
kBwProjectionBiasTensor));
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
node->builtin_data);
TF_LITE_ENSURE_EQ(context, node->inputs->size, 48);
TF_LITE_ENSURE_EQ(context, node->outputs->size,
params->merge_outputs ? 1 : 2);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, input->dims->size, 3);
const bool time_major = params->time_major;
const int max_time = time_major ? input->dims->data[0] : input->dims->data[1];
const int n_batch = time_major ? input->dims->data[1] : input->dims->data[0];
const int n_input = input->dims->data[2];
const TfLiteTensor* fw_input_to_output_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwInputToOutputWeightsTensor,
&fw_input_to_output_weights));
const int n_fw_cell = fw_input_to_output_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, fw_input_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, fw_input_to_output_weights->dims->data[1],
n_input);
const TfLiteTensor* bw_input_to_output_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwInputToOutputWeightsTensor,
&bw_input_to_output_weights));
const int n_bw_cell = bw_input_to_output_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, bw_input_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, bw_input_to_output_weights->dims->data[1],
n_input);
TF_LITE_ENSURE_EQ(context, bw_input_to_output_weights->type,
fw_input_to_output_weights->type);
const TfLiteTensor* fw_recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kFwRecurrentToOutputWeightsTensor,
&fw_recurrent_to_output_weights));
TF_LITE_ENSURE_EQ(context, fw_recurrent_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, fw_recurrent_to_output_weights->dims->data[0],
n_fw_cell);
TF_LITE_ENSURE_EQ(context, fw_recurrent_to_output_weights->type,
fw_input_to_output_weights->type);
const int n_fw_output = fw_recurrent_to_output_weights->dims->data[1];
const TfLiteTensor* bw_recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kBwRecurrentToOutputWeightsTensor,
&bw_recurrent_to_output_weights));
TF_LITE_ENSURE_EQ(context, bw_recurrent_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, bw_recurrent_to_output_weights->dims->data[0],
n_bw_cell);
TF_LITE_ENSURE_EQ(context, bw_recurrent_to_output_weights->type,
fw_input_to_output_weights->type);
const int n_bw_output = bw_recurrent_to_output_weights->dims->data[1];
TF_LITE_ENSURE_OK(
context, CheckInputTensorDimensions(context, node, n_input, n_fw_output,
n_fw_cell));
const TfLiteTensor* aux_input =
GetOptionalInputTensor(context, node, kAuxInputTensor);
const TfLiteTensor* fw_aux_input_to_input_weights =
GetOptionalInputTensor(context, node, kFwAuxInputToInputWeightsTensor);
const TfLiteTensor* fw_aux_input_to_forget_weights =
GetOptionalInputTensor(context, node, kFwAuxInputToForgetWeightsTensor);
const TfLiteTensor* fw_aux_input_to_cell_weights =
GetOptionalInputTensor(context, node, kFwAuxInputToCellWeightsTensor);
const TfLiteTensor* fw_aux_input_to_output_weights =
GetOptionalInputTensor(context, node, kFwAuxInputToOutputWeightsTensor);
const TfLiteTensor* bw_aux_input_to_input_weights =
GetOptionalInputTensor(context, node, kBwAuxInputToInputWeightsTensor);
const TfLiteTensor* bw_aux_input_to_forget_weights =
GetOptionalInputTensor(context, node, kBwAuxInputToForgetWeightsTensor);
const TfLiteTensor* bw_aux_input_to_cell_weights =
GetOptionalInputTensor(context, node, kBwAuxInputToCellWeightsTensor);
const TfLiteTensor* bw_aux_input_to_output_weights =
GetOptionalInputTensor(context, node, kBwAuxInputToOutputWeightsTensor);
const bool aux_inputs_weights_all_or_none =
((fw_aux_input_to_cell_weights != nullptr) &&
(fw_aux_input_to_forget_weights != nullptr) &&
(fw_aux_input_to_output_weights != nullptr) &&
(bw_aux_input_to_cell_weights != nullptr) &&
(bw_aux_input_to_forget_weights != nullptr) &&
(bw_aux_input_to_output_weights != nullptr)) ||
((fw_aux_input_to_cell_weights == nullptr) &&
(fw_aux_input_to_forget_weights == nullptr) &&
(fw_aux_input_to_output_weights == nullptr) &&
(bw_aux_input_to_cell_weights == nullptr) &&
(bw_aux_input_to_forget_weights == nullptr) &&
(bw_aux_input_to_output_weights == nullptr));
TF_LITE_ENSURE(context, aux_inputs_weights_all_or_none);
const bool has_aux_input = (fw_aux_input_to_forget_weights != nullptr);
if (has_aux_input) {
TF_LITE_ASSERT_EQ(aux_input->dims->data[0], input->dims->data[0]);
TF_LITE_ASSERT_EQ(aux_input->dims->data[1], input->dims->data[1]);
}
TfLiteTensor* fw_output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFwOutputTensor, &fw_output));
TfLiteTensor* fw_activation_state =
GetVariableInput(context, node, kFwInputActivationStateTensor);
TF_LITE_ENSURE(context, fw_activation_state != nullptr);
TfLiteTensor* fw_cell_state =
GetVariableInput(context, node, kFwInputCellStateTensor);
TF_LITE_ENSURE(context, fw_cell_state != nullptr);
TF_LITE_ENSURE_EQ(context, NumElements(fw_activation_state),
n_batch * n_fw_output);
TF_LITE_ENSURE_EQ(context, NumElements(fw_cell_state), n_batch * n_fw_cell);
TfLiteIntArray* fw_output_size = TfLiteIntArrayCreate(3);
fw_output_size->data[0] = time_major ? max_time : n_batch;
fw_output_size->data[1] = time_major ? n_batch : max_time;
fw_output_size->data[2] =
params->merge_outputs ? n_bw_output + n_fw_output : n_fw_output;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, fw_output, fw_output_size));
const bool is_hybrid_op = IsHybridOp(input, fw_input_to_output_weights);
TfLiteIntArrayFree(node->temporaries);
if (is_hybrid_op) {
node->temporaries = TfLiteIntArrayCreate(
has_aux_input ? kNumTemporaryTensors : kNumTemporaryTensors - 1);
} else {
node->temporaries = TfLiteIntArrayCreate(2);
}
node->temporaries->data[kFwScratchBuffer] =
op_data->scratch_tensor_index + kFwScratchBuffer;
TfLiteTensor* fw_scratch_buffer;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kFwScratchBuffer,
&fw_scratch_buffer));
fw_scratch_buffer->type = input->type;
fw_scratch_buffer->allocation_type = kTfLiteArenaRw;
const TfLiteTensor* fw_input_to_input_weights =
GetOptionalInputTensor(context, node, kFwInputToInputWeightsTensor);
const bool fw_use_cifg = (fw_input_to_input_weights == nullptr);
if (has_aux_input && !fw_use_cifg) {
TF_LITE_ENSURE_EQ(context, fw_aux_input_to_input_weights->dims->data[0],
fw_input_to_input_weights->dims->data[0]);
}
TfLiteIntArray* fw_scratch_buffer_size = TfLiteIntArrayCreate(2);
fw_scratch_buffer_size->data[0] = n_batch;
if (fw_use_cifg) {
fw_scratch_buffer_size->data[1] = n_fw_cell * 4 + 16;
} else {
fw_scratch_buffer_size->data[1] = n_fw_cell * 5 + 16;
}
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, fw_scratch_buffer,
fw_scratch_buffer_size));
TF_LITE_ENSURE_OK(
context, CheckInputTensorDimensions(context, node, n_input, n_bw_output,
n_bw_cell));
TfLiteTensor* bw_activation_state =
GetVariableInput(context, node, kBwInputActivationStateTensor);
TF_LITE_ENSURE(context, bw_activation_state != nullptr);
TfLiteTensor* bw_cell_state =
GetVariableInput(context, node, kBwInputCellStateTensor);
TF_LITE_ENSURE(context, bw_cell_state != nullptr);
if (!params->merge_outputs) {
TfLiteTensor* bw_output;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kBwOutputTensor, &bw_output));
TfLiteIntArray* bw_output_size = TfLiteIntArrayCreate(3);
bw_output_size->data[0] = time_major ? max_time : n_batch;
bw_output_size->data[1] = time_major ? n_batch : max_time;
bw_output_size->data[2] = n_bw_output;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, bw_output, bw_output_size));
}
TF_LITE_ENSURE_EQ(context, NumElements(bw_activation_state),
n_batch * n_bw_output);
TF_LITE_ENSURE_EQ(context, NumElements(bw_cell_state), n_batch * n_bw_cell);
node->temporaries->data[kBwScratchBuffer] =
op_data->scratch_tensor_index + kBwScratchBuffer;
TfLiteTensor* bw_scratch_buffer;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kBwScratchBuffer,
&bw_scratch_buffer));
bw_scratch_buffer->type = input->type;
bw_scratch_buffer->allocation_type = kTfLiteArenaRw;
const TfLiteTensor* bw_input_to_input_weights =
GetOptionalInputTensor(context, node, kBwInputToInputWeightsTensor);
const bool bw_use_cifg = (bw_input_to_input_weights == nullptr);
if (has_aux_input && !bw_use_cifg) {
TF_LITE_ENSURE_EQ(context, bw_aux_input_to_input_weights->dims->data[0],
bw_input_to_input_weights->dims->data[0]);
}
TfLiteIntArray* bw_scratch_buffer_size = TfLiteIntArrayCreate(2);
bw_scratch_buffer_size->data[0] = n_batch;
if (bw_use_cifg) {
bw_scratch_buffer_size->data[1] = n_bw_cell * 4;
} else {
bw_scratch_buffer_size->data[1] = n_bw_cell * 5;
}
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_scratch_buffer,
bw_scratch_buffer_size));
if (is_hybrid_op) {
op_data->compute_fw_row_sums = true;
op_data->compute_bw_row_sums = true;
node->temporaries->data[kInputQuantized] =
op_data->scratch_tensor_index + kInputQuantized;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kInputQuantized,
&input_quantized));
input_quantized->type = fw_input_to_output_weights->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[kFwActivationStateQuantized] =
op_data->scratch_tensor_index + kFwActivationStateQuantized;
TfLiteTensor* fw_activation_state_quantized;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFwActivationStateQuantized,
&fw_activation_state_quantized));
fw_activation_state_quantized->type = fw_input_to_output_weights->type;
fw_activation_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(fw_activation_state_quantized->dims,
fw_activation_state->dims)) {
TfLiteIntArray* fw_activation_state_quantized_size =
TfLiteIntArrayCopy(fw_activation_state->dims);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, fw_activation_state_quantized,
fw_activation_state_quantized_size));
}
node->temporaries->data[kBwActivationStateQuantized] =
op_data->scratch_tensor_index + kBwActivationStateQuantized;
TfLiteTensor* bw_activation_state_quantized;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kBwActivationStateQuantized,
&bw_activation_state_quantized));
bw_activation_state_quantized->type = fw_input_to_output_weights->type;
bw_activation_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(bw_activation_state_quantized->dims,
bw_activation_state->dims)) {
TfLiteIntArray* bw_activation_state_quantized_size =
TfLiteIntArrayCopy(bw_activation_state->dims);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, bw_activation_state_quantized,
bw_activation_state_quantized_size));
}
node->temporaries->data[kFwCellStateQuantized] =
op_data->scratch_tensor_index + kFwCellStateQuantized;
TfLiteTensor* fw_cell_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFwCellStateQuantized,
&fw_cell_state_quantized));
fw_cell_state_quantized->type = fw_input_to_output_weights->type;
fw_cell_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(fw_cell_state_quantized->dims,
fw_cell_state->dims)) {
TfLiteIntArray* fw_cell_state_quantized_size =
TfLiteIntArrayCopy(fw_cell_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, fw_cell_state_quantized,
fw_cell_state_quantized_size));
}
node->temporaries->data[kBwCellStateQuantized] =
op_data->scratch_tensor_index + kBwCellStateQuantized;
TfLiteTensor* bw_cell_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kBwCellStateQuantized,
&bw_cell_state_quantized));
bw_cell_state_quantized->type = fw_input_to_output_weights->type;
bw_cell_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(bw_cell_state_quantized->dims,
bw_cell_state->dims)) {
TfLiteIntArray* bw_cell_state_quantized_size =
TfLiteIntArrayCopy(bw_cell_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, bw_cell_state_quantized,
bw_cell_state_quantized_size));
}
node->temporaries->data[kInputScalingFactors] =
op_data->scratch_tensor_index + kInputScalingFactors;
TfLiteTensor* input_sf;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kInputScalingFactors, &input_sf));
input_sf->type = kTfLiteFloat32;
input_sf->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {n_batch};
if (!TfLiteIntArrayEqualsArray(input_sf->dims, 1, scaling_dims)) {
TfLiteIntArray* input_sf_size = TfLiteIntArrayCreate(1);
input_sf_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, input_sf, input_sf_size));
}
node->temporaries->data[kAuxInputScalingFactors] =
op_data->scratch_tensor_index + kAuxInputScalingFactors;
TfLiteTensor* aux_input_sf;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kAuxInputScalingFactors,
&aux_input_sf));
aux_input_sf->type = kTfLiteFloat32;
aux_input_sf->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(aux_input_sf->dims, 1, scaling_dims)) {
TfLiteIntArray* aux_input_sf_size = TfLiteIntArrayCreate(1);
aux_input_sf_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, aux_input_sf,
aux_input_sf_size));
}
node->temporaries->data[kOutputStateScalingFactors] =
op_data->scratch_tensor_index + kOutputStateScalingFactors;
TfLiteTensor* output_state_sf;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kOutputStateScalingFactors,
&output_state_sf));
output_state_sf->type = kTfLiteFloat32;
output_state_sf->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(output_state_sf->dims, 1, scaling_dims)) {
TfLiteIntArray* output_state_sf_size = TfLiteIntArrayCreate(1);
output_state_sf_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_sf,
output_state_sf_size));
}
node->temporaries->data[kProductScalingFactors] =
op_data->scratch_tensor_index + kProductScalingFactors;
TfLiteTensor* prod_scaling_factors;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kProductScalingFactors,
&prod_scaling_factors));
prod_scaling_factors->type = kTfLiteFloat32;
prod_scaling_factors->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(prod_scaling_factors->dims, 1,
scaling_dims)) {
TfLiteIntArray* prod_scaling_factors_size = TfLiteIntArrayCreate(1);
prod_scaling_factors_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, prod_scaling_factors,
prod_scaling_factors_size));
}
node->temporaries->data[kRecoveredCellWeights] =
op_data->scratch_tensor_index + kRecoveredCellWeights;
TfLiteTensor* recovered_cell_weights;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kRecoveredCellWeights,
&recovered_cell_weights));
recovered_cell_weights->type = kTfLiteFloat32;
recovered_cell_weights->allocation_type = kTfLiteArenaRw;
int recovered_cell_dims[1] = {n_fw_cell};
if (!TfLiteIntArrayEqualsArray(recovered_cell_weights->dims, 1,
recovered_cell_dims)) {
TfLiteIntArray* recovered_cell_weights_size = TfLiteIntArrayCreate(1);
recovered_cell_weights_size->data[0] = n_fw_cell;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, recovered_cell_weights,
recovered_cell_weights_size));
}
node->temporaries->data[kAccumScratchBuffer] =
op_data->scratch_tensor_index + kAccumScratchBuffer;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kAccumScratchBuffer, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int n_cell = std::max(n_fw_cell, n_bw_cell);
if (has_aux_input) {
n_cell = std::max(n_cell, fw_aux_input_to_output_weights->dims->data[0]);
n_cell = std::max(n_cell, bw_aux_input_to_output_weights->dims->data[0]);
}
int accum_scratch_dims[2] = {n_cell, n_batch};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2);
accum_size->data[0] = n_cell;
accum_size->data[1] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, accum_scratch, accum_size));
}
node->temporaries->data[kInputZeroPoints] =
op_data->scratch_tensor_index + kInputZeroPoints;
TfLiteTensor* input_zp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kInputZeroPoints, &input_zp));
input_zp->type = kTfLiteFloat32;
input_zp->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_zp->dims, 1, scaling_dims)) {
TfLiteIntArray* input_zp_size = TfLiteIntArrayCreate(1);
input_zp_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, input_zp, input_zp_size));
}
node->temporaries->data[kAuxInputZeroPoints] =
op_data->scratch_tensor_index + kAuxInputZeroPoints;
TfLiteTensor* aux_input_zp;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kAuxInputZeroPoints, &aux_input_zp));
aux_input_zp->type = kTfLiteFloat32;
aux_input_zp->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(aux_input_zp->dims, 1, scaling_dims)) {
TfLiteIntArray* aux_input_zp_size = TfLiteIntArrayCreate(1);
aux_input_zp_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, aux_input_zp,
aux_input_zp_size));
}
node->temporaries->data[kOutputStateZeroPoints] =
op_data->scratch_tensor_index + kOutputStateZeroPoints;
TfLiteTensor* output_state_zp;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kOutputStateZeroPoints,
&output_state_zp));
output_state_zp->type = kTfLiteFloat32;
output_state_zp->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(output_state_zp->dims, 1, scaling_dims)) {
TfLiteIntArray* output_state_zp_size = TfLiteIntArrayCreate(1);
output_state_zp_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_zp,
output_state_zp_size));
}
int fw_row_sums_rows = fw_use_cifg ? 6 : 8;
if (has_aux_input) {
fw_row_sums_rows += fw_use_cifg ? 3 : 4;
}
const TfLiteTensor* fw_projection_weights =
GetOptionalInputTensor(context, node, kFwProjectionWeightsTensor);
if (fw_projection_weights != nullptr) {
fw_row_sums_rows += ceil(static_cast<float>(n_fw_output) / n_fw_cell);
}
node->temporaries->data[kFwRowSums] =
op_data->scratch_tensor_index + kFwRowSums;
TfLiteTensor* fw_row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFwRowSums, &fw_row_sums));
fw_row_sums->type = kTfLiteInt32;
fw_row_sums->allocation_type = kTfLiteArenaRwPersistent;
int fw_row_sums_dims[2] = {fw_row_sums_rows, n_fw_cell};
if (!TfLiteIntArrayEqualsArray(fw_row_sums->dims, 2, fw_row_sums_dims)) {
TfLiteIntArray* fw_hybrid_scratch_size = TfLiteIntArrayCreate(2);
fw_hybrid_scratch_size->data[0] = fw_row_sums_dims[0];
fw_hybrid_scratch_size->data[1] = fw_row_sums_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, fw_row_sums,
fw_hybrid_scratch_size));
}
int bw_row_sums_rows = bw_use_cifg ? 6 : 8;
if (has_aux_input) {
bw_row_sums_rows += bw_use_cifg ? 3 : 4;
}
const TfLiteTensor* bw_projection_weights =
GetOptionalInputTensor(context, node, kBwProjectionWeightsTensor);
if (bw_projection_weights != nullptr) {
bw_row_sums_rows += ceil(static_cast<float>(n_bw_output) / n_bw_cell);
}
node->temporaries->data[kBwRowSums] =
op_data->scratch_tensor_index + kBwRowSums;
TfLiteTensor* bw_row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kBwRowSums, &bw_row_sums));
bw_row_sums->type = kTfLiteInt32;
bw_row_sums->allocation_type = kTfLiteArenaRwPersistent;
int bw_row_sums_dims[2] = {bw_row_sums_rows, n_bw_cell};
if (!TfLiteIntArrayEqualsArray(bw_row_sums->dims, 2, bw_row_sums_dims)) {
TfLiteIntArray* bw_row_sums_size = TfLiteIntArrayCreate(2);
bw_row_sums_size->data[0] = bw_row_sums_dims[0];
bw_row_sums_size->data[1] = bw_row_sums_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_row_sums,
bw_row_sums_size));
}
if (has_aux_input) {
node->temporaries->data[kAuxInputQuantized] =
op_data->scratch_tensor_index + kAuxInputQuantized;
TfLiteTensor* aux_input_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kAuxInputQuantized,
&aux_input_quantized));
aux_input_quantized->type = fw_input_to_output_weights->type;
aux_input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(aux_input_quantized->dims, aux_input->dims)) {
TfLiteIntArray* aux_input_quantized_size =
TfLiteIntArrayCopy(aux_input->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, aux_input_quantized,
aux_input_quantized_size));
}
}
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
node->builtin_data);
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* fw_input_to_input_weights =
GetOptionalInputTensor(context, node, kFwInputToInputWeightsTensor);
const TfLiteTensor* fw_input_to_forget_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwInputToForgetWeightsTensor,
&fw_input_to_forget_weights));
const TfLiteTensor* fw_input_to_cell_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwInputToCellWeightsTensor,
&fw_input_to_cell_weights));
const TfLiteTensor* fw_input_to_output_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwInputToOutputWeightsTensor,
&fw_input_to_output_weights));
const TfLiteTensor* fw_recurrent_to_input_weights =
GetOptionalInputTensor(context, node, kFwRecurrentToInputWeightsTensor);
const TfLiteTensor* fw_recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kFwRecurrentToForgetWeightsTensor,
&fw_recurrent_to_forget_weights));
const TfLiteTensor* fw_recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwRecurrentToCellWeightsTensor,
&fw_recurrent_to_cell_weights));
const TfLiteTensor* fw_recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kFwRecurrentToOutputWeightsTensor,
&fw_recurrent_to_output_weights));
const TfLiteTensor* fw_cell_to_input_weights =
GetOptionalInputTensor(context, node, kFwCellToInputWeightsTensor);
const TfLiteTensor* fw_cell_to_forget_weights =
GetOptionalInputTensor(context, node, kFwCellToForgetWeightsTensor);
const TfLiteTensor* fw_cell_to_output_weights =
GetOptionalInputTensor(context, node, kFwCellToOutputWeightsTensor);
const TfLiteTensor* fw_input_gate_bias =
GetOptionalInputTensor(context, node, kFwInputGateBiasTensor);
const TfLiteTensor* fw_forget_gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwForgetGateBiasTensor,
&fw_forget_gate_bias));
const TfLiteTensor* fw_cell_gate_bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kFwCellGateBiasTensor,
&fw_cell_gate_bias));
const TfLiteTensor* fw_output_gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwOutputGateBiasTensor,
&fw_output_gate_bias));
const TfLiteTensor* fw_projection_weights =
GetOptionalInputTensor(context, node, kFwProjectionWeightsTensor);
const TfLiteTensor* fw_projection_bias =
GetOptionalInputTensor(context, node, kFwProjectionBiasTensor);
TfLiteTensor* fw_activation_state =
GetVariableInput(context, node, kFwInputActivationStateTensor);
TFLITE_DCHECK(fw_activation_state != nullptr);
TfLiteTensor* fw_cell_state =
GetVariableInput(context, node, kFwInputCellStateTensor);
TFLITE_DCHECK(fw_cell_state != nullptr);
TfLiteTensor* fw_output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFwOutputTensor, &fw_output));
const TfLiteTensor* bw_input_to_input_weights =
GetOptionalInputTensor(context, node, kBwInputToInputWeightsTensor);
const TfLiteTensor* bw_input_to_forget_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwInputToForgetWeightsTensor,
&bw_input_to_forget_weights));
const TfLiteTensor* bw_input_to_cell_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwInputToCellWeightsTensor,
&bw_input_to_cell_weights));
const TfLiteTensor* bw_input_to_output_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwInputToOutputWeightsTensor,
&bw_input_to_output_weights));
const TfLiteTensor* bw_recurrent_to_input_weights =
GetOptionalInputTensor(context, node, kBwRecurrentToInputWeightsTensor);
const TfLiteTensor* bw_recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kBwRecurrentToForgetWeightsTensor,
&bw_recurrent_to_forget_weights));
const TfLiteTensor* bw_recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwRecurrentToCellWeightsTensor,
&bw_recurrent_to_cell_weights));
const TfLiteTensor* bw_recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kBwRecurrentToOutputWeightsTensor,
&bw_recurrent_to_output_weights));
const TfLiteTensor* bw_cell_to_input_weights =
GetOptionalInputTensor(context, node, kBwCellToInputWeightsTensor);
const TfLiteTensor* bw_cell_to_forget_weights =
GetOptionalInputTensor(context, node, kBwCellToForgetWeightsTensor);
const TfLiteTensor* bw_cell_to_output_weights =
GetOptionalInputTensor(context, node, kBwCellToOutputWeightsTensor);
const TfLiteTensor* bw_input_gate_bias =
GetOptionalInputTensor(context, node, kBwInputGateBiasTensor);
const TfLiteTensor* bw_forget_gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwForgetGateBiasTensor,
&bw_forget_gate_bias));
const TfLiteTensor* bw_cell_gate_bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBwCellGateBiasTensor,
&bw_cell_gate_bias));
const TfLiteTensor* bw_output_gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwOutputGateBiasTensor,
&bw_output_gate_bias));
const TfLiteTensor* bw_projection_weights =
GetOptionalInputTensor(context, node, kBwProjectionWeightsTensor);
const TfLiteTensor* bw_projection_bias =
GetOptionalInputTensor(context, node, kBwProjectionBiasTensor);
TfLiteTensor* bw_activation_state =
GetVariableInput(context, node, kBwInputActivationStateTensor);
TFLITE_DCHECK(bw_activation_state != nullptr);
TfLiteTensor* bw_cell_state =
GetVariableInput(context, node, kBwInputCellStateTensor);
TFLITE_DCHECK(bw_cell_state != nullptr);
TfLiteTensor* bw_output = params->merge_outputs
? nullptr
: GetOutput(context, node, kBwOutputTensor);
TfLiteTensor* fw_scratch_buffer;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kFwScratchBuffer,
&fw_scratch_buffer));
TfLiteTensor* bw_scratch_buffer;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kBwScratchBuffer,
&bw_scratch_buffer));
const TfLiteTensor* aux_input =
GetOptionalInputTensor(context, node, kAuxInputTensor);
const TfLiteTensor* fw_aux_input_to_input_weights =
GetOptionalInputTensor(context, node, kFwAuxInputToInputWeightsTensor);
const TfLiteTensor* fw_aux_input_to_forget_weights =
GetOptionalInputTensor(context, node, kFwAuxInputToForgetWeightsTensor);
const TfLiteTensor* fw_aux_input_to_cell_weights =
GetOptionalInputTensor(context, node, kFwAuxInputToCellWeightsTensor);
const TfLiteTensor* fw_aux_input_to_output_weights =
GetOptionalInputTensor(context, node, kFwAuxInputToOutputWeightsTensor);
const TfLiteTensor* bw_aux_input_to_input_weights =
GetOptionalInputTensor(context, node, kBwAuxInputToInputWeightsTensor);
const TfLiteTensor* bw_aux_input_to_forget_weights =
GetOptionalInputTensor(context, node, kBwAuxInputToForgetWeightsTensor);
const TfLiteTensor* bw_aux_input_to_cell_weights =
GetOptionalInputTensor(context, node, kBwAuxInputToCellWeightsTensor);
const TfLiteTensor* bw_aux_input_to_output_weights =
GetOptionalInputTensor(context, node, kBwAuxInputToOutputWeightsTensor);
const bool has_previous_bw_output = (aux_input != nullptr);
const bool use_aux_input = (fw_aux_input_to_forget_weights != nullptr);
TfLiteLSTMParams lstm_params = {params->activation, params->cell_clip,
params->proj_clip, kTfLiteLSTMFullKernel,
params->asymmetric_quantize_inputs};
const int bw_output_offset =
params->merge_outputs ? fw_recurrent_to_output_weights->dims->data[1] : 0;
const auto actual_bw_output = params->merge_outputs ? fw_output : bw_output;
const bool time_major = params->time_major;
const bool non_stacking_mode = !use_aux_input && has_previous_bw_output;
const TfLiteTensor* bw_input = non_stacking_mode ? aux_input : input;
const TfLiteTensor* real_aux_input = non_stacking_mode ? nullptr : aux_input;
switch (fw_input_to_output_weights->type) {
case kTfLiteFloat32: {
TfLiteStatus fw_pass_status = lstm_eval::EvalFloat(
input, fw_input_to_input_weights, fw_input_to_forget_weights,
fw_input_to_cell_weights, fw_input_to_output_weights,
fw_recurrent_to_input_weights, fw_recurrent_to_forget_weights,
fw_recurrent_to_cell_weights, fw_recurrent_to_output_weights,
fw_cell_to_input_weights, fw_cell_to_forget_weights,
fw_cell_to_output_weights,
nullptr,
nullptr,
nullptr,
nullptr, real_aux_input,
fw_aux_input_to_input_weights, fw_aux_input_to_forget_weights,
fw_aux_input_to_cell_weights, fw_aux_input_to_output_weights,
fw_input_gate_bias, fw_forget_gate_bias, fw_cell_gate_bias,
fw_output_gate_bias, fw_projection_weights, fw_projection_bias,
&lstm_params,
true, time_major, 0,
fw_scratch_buffer, fw_activation_state, fw_cell_state, fw_output,
false,
false,
false,
false,
CpuBackendContext::GetFromContext(context));
TF_LITE_ENSURE_OK(context, fw_pass_status);
TfLiteStatus bw_pass_status = lstm_eval::EvalFloat(
bw_input, bw_input_to_input_weights, bw_input_to_forget_weights,
bw_input_to_cell_weights, bw_input_to_output_weights,
bw_recurrent_to_input_weights, bw_recurrent_to_forget_weights,
bw_recurrent_to_cell_weights, bw_recurrent_to_output_weights,
bw_cell_to_input_weights, bw_cell_to_forget_weights,
bw_cell_to_output_weights,
nullptr,
nullptr,
nullptr,
nullptr, real_aux_input,
bw_aux_input_to_input_weights, bw_aux_input_to_forget_weights,
bw_aux_input_to_cell_weights, bw_aux_input_to_output_weights,
bw_input_gate_bias, bw_forget_gate_bias, bw_cell_gate_bias,
bw_output_gate_bias, bw_projection_weights, bw_projection_bias,
&lstm_params,
false, time_major, bw_output_offset,
bw_scratch_buffer, bw_activation_state, bw_cell_state,
actual_bw_output,
false,
false,
false,
false,
CpuBackendContext::GetFromContext(context));
TF_LITE_ENSURE_OK(context, bw_pass_status);
return kTfLiteOk;
}
case kTfLiteUInt8:
case kTfLiteInt8: {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kInputQuantized, &input_quantized));
TfLiteTensor* fw_activation_state_quantized;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFwActivationStateQuantized,
&fw_activation_state_quantized));
TfLiteTensor* bw_activation_state_quantized;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kBwActivationStateQuantized,
&bw_activation_state_quantized));
TfLiteTensor* fw_cell_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFwCellStateQuantized,
&fw_cell_state_quantized));
TfLiteTensor* bw_cell_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kBwCellStateQuantized,
&bw_cell_state_quantized));
TfLiteTensor* prod_scaling_factors;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kProductScalingFactors,
&prod_scaling_factors));
TfLiteTensor* recovered_cell_weights;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kRecoveredCellWeights,
&recovered_cell_weights));
TfLiteTensor* aux_input_quantized =
use_aux_input ? GetTemporary(context, node, kAuxInputQuantized)
: nullptr;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kAccumScratchBuffer, &accum_scratch));
TfLiteTensor* fw_row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFwRowSums, &fw_row_sums));
TfLiteTensor* bw_row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kBwRowSums, &bw_row_sums));
const int fw_row_sums_size = fw_row_sums->dims->data[0];
const int bw_row_sums_size = bw_row_sums->dims->data[0];
TfLiteStatus fw_pass_status = lstm_eval::EvalHybrid(
input, fw_input_to_input_weights,
nullptr, fw_input_to_forget_weights,
nullptr, fw_input_to_cell_weights,
nullptr, fw_input_to_output_weights,
nullptr,
fw_recurrent_to_input_weights,
nullptr,
fw_recurrent_to_forget_weights,
nullptr,
fw_recurrent_to_cell_weights,
nullptr,
fw_recurrent_to_output_weights,
nullptr,
fw_cell_to_input_weights, fw_cell_to_forget_weights,
fw_cell_to_output_weights,
nullptr,
nullptr,
nullptr,
nullptr, real_aux_input,
fw_aux_input_to_input_weights, fw_aux_input_to_forget_weights,
fw_aux_input_to_cell_weights, fw_aux_input_to_output_weights,
fw_input_gate_bias, fw_forget_gate_bias, fw_cell_gate_bias,
fw_output_gate_bias, fw_projection_weights,
nullptr, fw_projection_bias,
&lstm_params,
true, time_major, 0,
fw_scratch_buffer, GetTemporary(context, node, kInputScalingFactors),
GetTemporary(context, node, kAuxInputScalingFactors),
GetTemporary(context, node, kOutputStateScalingFactors),
prod_scaling_factors, recovered_cell_weights, input_quantized,
aux_input_quantized, fw_activation_state_quantized,
fw_cell_state_quantized, fw_activation_state, fw_cell_state,
accum_scratch, fw_output,
GetTemporary(context, node, kInputZeroPoints),
GetTemporary(context, node, kAuxInputZeroPoints),
GetTemporary(context, node, kOutputStateZeroPoints), fw_row_sums,
fw_row_sums_size, &op_data->compute_fw_row_sums,
false,
false,
false,
false,
CpuBackendContext::GetFromContext(context));
TF_LITE_ENSURE_OK(context, fw_pass_status);
TfLiteStatus bw_pass_status = lstm_eval::EvalHybrid(
bw_input, bw_input_to_input_weights,
nullptr, bw_input_to_forget_weights,
nullptr, bw_input_to_cell_weights,
nullptr, bw_input_to_output_weights,
nullptr,
bw_recurrent_to_input_weights,
nullptr,
bw_recurrent_to_forget_weights,
nullptr,
bw_recurrent_to_cell_weights,
nullptr,
bw_recurrent_to_output_weights,
nullptr,
bw_cell_to_input_weights, bw_cell_to_forget_weights,
bw_cell_to_output_weights,
nullptr,
nullptr,
nullptr,
nullptr, real_aux_input,
bw_aux_input_to_input_weights, bw_aux_input_to_forget_weights,
bw_aux_input_to_cell_weights, bw_aux_input_to_output_weights,
bw_input_gate_bias, bw_forget_gate_bias, bw_cell_gate_bias,
bw_output_gate_bias, bw_projection_weights,
nullptr, bw_projection_bias,
&lstm_params,
false, time_major, bw_output_offset,
bw_scratch_buffer, GetTemporary(context, node, kInputScalingFactors),
GetTemporary(context, node, kAuxInputScalingFactors),
GetTemporary(context, node, kOutputStateScalingFactors),
prod_scaling_factors, recovered_cell_weights, input_quantized,
aux_input_quantized, bw_activation_state_quantized,
bw_cell_state_quantized, bw_activation_state, bw_cell_state,
accum_scratch, actual_bw_output,
GetTemporary(context, node, kInputZeroPoints),
GetTemporary(context, node, kAuxInputZeroPoints),
GetTemporary(context, node, kOutputStateZeroPoints), bw_row_sums,
bw_row_sums_size, &op_data->compute_bw_row_sums,
false,
false,
false,
false,
CpuBackendContext::GetFromContext(context));
TF_LITE_ENSURE_OK(context, bw_pass_status);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(context, "Type %s is not currently supported.",
TfLiteTypeGetName(fw_input_to_output_weights->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_LSTM() {
static TfLiteRegistration r = {
bidirectional_sequence_lstm::Init, bidirectional_sequence_lstm::Free,
bidirectional_sequence_lstm::Prepare, bidirectional_sequence_lstm::Eval};
return &r;
}
}
}
} | #include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class BidirectionalLSTMOpModel : public SingleOpModel {
public:
BidirectionalLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output,
int sequence_length, bool use_cifg,
bool use_peephole, bool use_projection_weights,
bool use_projection_bias, bool merge_outputs,
bool use_aux_input, float cell_clip, float proj_clip,
bool quantize_weights, bool time_major,
const std::vector<std::vector<int>>& input_shapes,
bool asymmetric_quantize_inputs = false)
: n_batch_(n_batch),
n_input_(n_input),
n_fw_cell_(n_cell),
n_bw_cell_(n_cell),
n_fw_output_(n_output),
n_bw_output_(n_output),
sequence_length_(sequence_length),
quantize_weights_(quantize_weights) {
input_ = AddInput(TensorType_FLOAT32);
const auto weight_type =
quantize_weights_ ? TensorType_UINT8 : TensorType_FLOAT32;
if (use_cifg) {
fw_input_to_input_weights_ = AddNullInput();
} else {
fw_input_to_input_weights_ = AddInput(weight_type);
}
fw_input_to_forget_weights_ = AddInput(weight_type);
fw_input_to_cell_weights_ = AddInput(weight_type);
fw_input_to_output_weights_ = AddInput(weight_type);
if (use_cifg) {
fw_recurrent_to_input_weights_ = AddNullInput();
} else {
fw_recurrent_to_input_weights_ = AddInput(weight_type);
}
fw_recurrent_to_forget_weights_ = AddInput(weight_type);
fw_recurrent_to_cell_weights_ = AddInput(weight_type);
fw_recurrent_to_output_weights_ = AddInput(weight_type);
if (use_peephole) {
if (use_cifg) {
fw_cell_to_input_weights_ = AddNullInput();
} else {
fw_cell_to_input_weights_ = AddInput(weight_type);
}
fw_cell_to_forget_weights_ = AddInput(weight_type);
fw_cell_to_output_weights_ = AddInput(weight_type);
} else {
fw_cell_to_input_weights_ = AddNullInput();
fw_cell_to_forget_weights_ = AddNullInput();
fw_cell_to_output_weights_ = AddNullInput();
}
if (use_cifg) {
fw_input_gate_bias_ = AddNullInput();
} else {
fw_input_gate_bias_ = AddInput(TensorType_FLOAT32);
}
fw_forget_gate_bias_ = AddInput(TensorType_FLOAT32);
fw_cell_gate_bias_ = AddInput(TensorType_FLOAT32);
fw_output_gate_bias_ = AddInput(TensorType_FLOAT32);
if (use_projection_weights) {
fw_projection_weights_ = AddInput(TensorType_FLOAT32);
if (use_projection_bias) {
fw_projection_bias_ = AddInput(TensorType_FLOAT32);
} else {
fw_projection_bias_ = AddNullInput();
}
} else {
fw_projection_weights_ = AddNullInput();
fw_projection_bias_ = AddNullInput();
}
if (use_cifg) {
bw_input_to_input_weights_ = AddNullInput();
} else {
bw_input_to_input_weights_ = AddInput(weight_type);
}
bw_input_to_forget_weights_ = AddInput(weight_type);
bw_input_to_cell_weights_ = AddInput(weight_type);
bw_input_to_output_weights_ = AddInput(weight_type);
if (use_cifg) {
bw_recurrent_to_input_weights_ = AddNullInput();
} else {
bw_recurrent_to_input_weights_ = AddInput(weight_type);
}
bw_recurrent_to_forget_weights_ = AddInput(weight_type);
bw_recurrent_to_cell_weights_ = AddInput(weight_type);
bw_recurrent_to_output_weights_ = AddInput(weight_type);
if (use_peephole) {
if (use_cifg) {
bw_cell_to_input_weights_ = AddNullInput();
} else {
bw_cell_to_input_weights_ = AddInput(weight_type);
}
bw_cell_to_forget_weights_ = AddInput(weight_type);
bw_cell_to_output_weights_ = AddInput(weight_type);
} else {
bw_cell_to_input_weights_ = AddNullInput();
bw_cell_to_forget_weights_ = AddNullInput();
bw_cell_to_output_weights_ = AddNullInput();
}
if (use_cifg) {
bw_input_gate_bias_ = AddNullInput();
} else {
bw_input_gate_bias_ = AddInput(TensorType_FLOAT32);
}
bw_forget_gate_bias_ = AddInput(TensorType_FLOAT32);
bw_cell_gate_bias_ = AddInput(TensorType_FLOAT32);
bw_output_gate_bias_ = AddInput(TensorType_FLOAT32);
if (use_projection_weights) {
bw_projection_weights_ = AddInput(weight_type);
if (use_projection_bias) {
bw_projection_bias_ = AddInput(TensorType_FLOAT32);
} else {
bw_projection_bias_ = AddNullInput();
}
} else {
bw_projection_weights_ = AddNullInput();
bw_projection_bias_ = AddNullInput();
}
fw_input_activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_fw_output_ * n_batch_}});
fw_input_cell_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_fw_cell_ * n_batch_}});
bw_input_activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_bw_output_ * n_batch_}});
bw_input_cell_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_bw_cell_ * n_batch_}});
fw_output_ = AddOutput(TensorType_FLOAT32);
if (!merge_outputs) {
bw_output_ = AddOutput(TensorType_FLOAT32);
}
if (use_aux_input) {
aux_input_ = AddInput(TensorType_FLOAT32);
fw_aux_input_to_input_weights_ = AddInput(weight_type);
fw_aux_input_to_forget_weights_ = AddInput(weight_type);
fw_aux_input_to_cell_weights_ = AddInput(weight_type);
fw_aux_input_to_output_weights_ = AddInput(weight_type);
bw_aux_input_to_input_weights_ = AddInput(weight_type);
bw_aux_input_to_forget_weights_ = AddInput(weight_type);
bw_aux_input_to_cell_weights_ = AddInput(weight_type);
bw_aux_input_to_output_weights_ = AddInput(weight_type);
} else {
aux_input_ = AddNullInput();
fw_aux_input_to_input_weights_ = AddNullInput();
fw_aux_input_to_forget_weights_ = AddNullInput();
fw_aux_input_to_cell_weights_ = AddNullInput();
fw_aux_input_to_output_weights_ = AddNullInput();
bw_aux_input_to_input_weights_ = AddNullInput();
bw_aux_input_to_forget_weights_ = AddNullInput();
bw_aux_input_to_cell_weights_ = AddNullInput();
bw_aux_input_to_output_weights_ = AddNullInput();
}
SetBuiltinOp(
BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
BuiltinOptions_BidirectionalSequenceLSTMOptions,
CreateBidirectionalSequenceLSTMOptions(
builder_, ActivationFunctionType_TANH, cell_clip, proj_clip,
merge_outputs, time_major, asymmetric_quantize_inputs)
.Union());
BuildInterpreter(input_shapes);
}
void PopulateWeightTensor(int tensor_id, const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(tensor_id, f);
} else {
PopulateTensor(tensor_id, f);
}
}
void SetInputToInputWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_input_to_input_weights_, f);
PopulateWeightTensor(bw_input_to_input_weights_, f);
}
void SetInputToForgetWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_input_to_forget_weights_, f);
PopulateWeightTensor(bw_input_to_forget_weights_, f);
}
void SetInputToCellWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_input_to_cell_weights_, f);
PopulateWeightTensor(bw_input_to_cell_weights_, f);
}
void SetInputToOutputWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_input_to_output_weights_, f);
PopulateWeightTensor(bw_input_to_output_weights_, f);
}
void SetRecurrentToInputWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_recurrent_to_input_weights_, f);
PopulateWeightTensor(bw_recurrent_to_input_weights_, f);
}
void SetRecurrentToForgetWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_recurrent_to_forget_weights_, f);
PopulateWeightTensor(bw_recurrent_to_forget_weights_, f);
}
void SetRecurrentToCellWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_recurrent_to_cell_weights_, f);
PopulateWeightTensor(bw_recurrent_to_cell_weights_, f);
}
void SetRecurrentToOutputWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_recurrent_to_output_weights_, f);
PopulateWeightTensor(bw_recurrent_to_output_weights_, f);
}
void SetCellToInputWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_cell_to_input_weights_, f);
PopulateWeightTensor(bw_cell_to_input_weights_, f);
}
void SetCellToForgetWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_cell_to_forget_weights_, f);
PopulateWeightTensor(bw_cell_to_forget_weights_, f);
}
void SetCellToOutputWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_cell_to_output_weights_, f);
PopulateWeightTensor(bw_cell_to_output_weights_, f);
}
void SetInputGateBias(const std::vector<float>& f) {
PopulateTensor(fw_input_gate_bias_, f);
PopulateTensor(bw_input_gate_bias_, f);
}
void SetForgetGateBias(const std::vector<float>& f) {
PopulateTensor(fw_forget_gate_bias_, f);
PopulateTensor(bw_forget_gate_bias_, f);
}
void SetCellBias(const std::vector<float>& f) {
PopulateTensor(fw_cell_gate_bias_, f);
PopulateTensor(bw_cell_gate_bias_, f);
}
void SetOutputGateBias(const std::vector<float>& f) {
PopulateTensor(fw_output_gate_bias_, f);
PopulateTensor(bw_output_gate_bias_, f);
}
void SetProjectionWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_projection_weights_, f);
PopulateWeightTensor(bw_projection_weights_, f);
}
void SetProjectionBias(const std::vector<float>& f) {
PopulateTensor(fw_projection_bias_, f);
PopulateTensor(bw_projection_bias_, f);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
void SetAuxInput(int offset, float* begin, float* end) {
PopulateTensor(aux_input_, offset, begin, end);
}
void SetAuxInputToInputWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_aux_input_to_input_weights_, f);
PopulateWeightTensor(bw_aux_input_to_input_weights_, f);
}
void SetAuxInputToForgetWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_aux_input_to_forget_weights_, f);
PopulateWeightTensor(bw_aux_input_to_forget_weights_, f);
}
void SetAuxInputToCellWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_aux_input_to_cell_weights_, f);
PopulateWeightTensor(bw_aux_input_to_cell_weights_, f);
}
void SetAuxInputToOutputWeights(const std::vector<float>& f) {
PopulateWeightTensor(fw_aux_input_to_output_weights_, f);
PopulateWeightTensor(bw_aux_input_to_output_weights_, f);
}
std::vector<float> GetFwOutput() { return ExtractVector<float>(fw_output_); }
std::vector<float> GetBwOutput() { return ExtractVector<float>(bw_output_); }
int num_inputs() { return n_input_; }
int num_fw_outputs() { return n_fw_output_; }
int num_bw_outputs() { return n_bw_output_; }
int num_fw_cells() { return n_fw_cell_; }
int num_bw_cells() { return n_bw_cell_; }
int num_batches() { return n_batch_; }
int sequence_length() { return sequence_length_; }
private:
int input_;
int fw_input_to_input_weights_;
int fw_input_to_forget_weights_;
int fw_input_to_cell_weights_;
int fw_input_to_output_weights_;
int fw_recurrent_to_input_weights_;
int fw_recurrent_to_forget_weights_;
int fw_recurrent_to_cell_weights_;
int fw_recurrent_to_output_weights_;
int fw_cell_to_input_weights_;
int fw_cell_to_forget_weights_;
int fw_cell_to_output_weights_;
int fw_input_gate_bias_;
int fw_forget_gate_bias_;
int fw_cell_gate_bias_;
int fw_output_gate_bias_;
int fw_projection_weights_;
int fw_projection_bias_;
int bw_input_to_input_weights_;
int bw_input_to_forget_weights_;
int bw_input_to_cell_weights_;
int bw_input_to_output_weights_;
int bw_recurrent_to_input_weights_;
int bw_recurrent_to_forget_weights_;
int bw_recurrent_to_cell_weights_;
int bw_recurrent_to_output_weights_;
int bw_cell_to_input_weights_;
int bw_cell_to_forget_weights_;
int bw_cell_to_output_weights_;
int bw_input_gate_bias_;
int bw_forget_gate_bias_;
int bw_cell_gate_bias_;
int bw_output_gate_bias_;
int bw_projection_weights_;
int bw_projection_bias_;
int fw_input_activation_state_;
int fw_input_cell_state_;
int bw_input_activation_state_;
int bw_input_cell_state_;
int fw_output_;
int bw_output_;
int aux_input_;
int fw_aux_input_to_input_weights_;
int fw_aux_input_to_forget_weights_;
int fw_aux_input_to_cell_weights_;
int fw_aux_input_to_output_weights_;
int bw_aux_input_to_input_weights_;
int bw_aux_input_to_forget_weights_;
int bw_aux_input_to_cell_weights_;
int bw_aux_input_to_output_weights_;
int n_batch_;
int n_input_;
int n_fw_cell_;
int n_bw_cell_;
int n_fw_output_;
int n_bw_output_;
int sequence_length_;
bool quantize_weights_;
};
class LSTMOpTest
: public ::testing::TestWithParam<::testing::tuple<bool, bool>> {};
INSTANTIATE_TEST_SUITE_P(QuantizationOrNot, LSTMOpTest,
::testing::Combine(
::testing::Bool(),
::testing::Bool()));
TEST_P(LSTMOpTest, BlackBoxTestNoCifgNoPeepholeNoProjectionNoClipping) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, false,
false, false,
false, false,
false, 0.0,
0.0, quantize_weights, true,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, n_output},
{n_batch, n_cell},
{sequence_length, n_batch, 0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
},
asymmetric_quantize_inputs);
lstm.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524});
lstm.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113,
-0.29909778});
lstm.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212});
lstm.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077, -0.1556896,
0.19487578});
lstm.SetInputGateBias({0., 0., 0., 0.});
lstm.SetCellBias({0., 0., 0., 0.});
lstm.SetForgetGateBias({1., 1., 1., 1.});
lstm.SetOutputGateBias({0., 0., 0., 0.});
lstm.SetRecurrentToInputWeights(
{-0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
-0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
-0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296});
lstm.SetRecurrentToCellWeights(
{-0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
-0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
-0.46367589, 0.26016325, -0.03894562, -0.16368064});
lstm.SetRecurrentToForgetWeights(
{-0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
-0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
0.28053468, 0.01560611, -0.20127171, -0.01140004});
lstm.SetRecurrentToOutputWeights(
{0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
-0.51818722, -0.15390486, 0.0468148, 0.39922136});
static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
static float lstm_fw_golden_output[] = {
-0.02973187, 0.1229473, 0.20885126, -0.15358765,
-0.03716109, 0.12507336, 0.41193449, -0.20860538,
-0.15053082, 0.09120187, 0.24278517, -0.12222792};
static float lstm_bw_golden_output[] = {
-0.0806187, 0.139077, 0.400476, -0.197842, -0.0332076, 0.123838,
0.309777, -0.17621, -0.0490733, 0.0739237, 0.067706, -0.0208124};
float* batch0_start = lstm_input;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
float* fw_golden_start = lstm_fw_golden_output;
float* fw_golden_end =
fw_golden_start + lstm.num_fw_outputs() * lstm.sequence_length();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), fw_golden_start, fw_golden_end);
EXPECT_THAT(lstm.GetFwOutput(),
ElementsAreArray(
ArrayFloatNear(fw_expected, quantize_weights ? 1e-2 : 1e-5)));
float* bw_golden_start = lstm_bw_golden_output;
float* bw_golden_end =
bw_golden_start + lstm.num_bw_outputs() * lstm.sequence_length();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), bw_golden_start, bw_golden_end);
EXPECT_THAT(lstm.GetBwOutput(),
ElementsAreArray(
ArrayFloatNear(bw_expected, quantize_weights ? 1e-2 : 1e-5)));
}
TEST_P(LSTMOpTest, BlackBoxTestMergedOutput) {
const int n_batch = 2;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, false,
false, false,
false, true,
false, 0.0,
0.0, quantize_weights, true,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, n_output},
{n_batch, n_cell},
{sequence_length, n_batch, 0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
},
asymmetric_quantize_inputs);
lstm.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524});
lstm.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113,
-0.29909778});
lstm.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212});
lstm.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077, -0.1556896,
0.19487578});
lstm.SetInputGateBias({0., 0., 0., 0.});
lstm.SetCellBias({0., 0., 0., 0.});
lstm.SetForgetGateBias({1., 1., 1., 1.});
lstm.SetOutputGateBias({0., 0., 0., 0.});
lstm.SetRecurrentToInputWeights(
{-0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
-0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
-0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296});
lstm.SetRecurrentToCellWeights(
{-0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
-0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
-0.46367589, 0.26016325, -0.03894562, -0.16368064});
lstm.SetRecurrentToForgetWeights(
{-0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
-0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
0.28053468, 0.01560611, -0.20127171, -0.01140004});
lstm.SetRecurrentToOutputWeights(
{0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
-0.51818722, -0.15390486, 0.0468148, 0.39922136});
static float lstm_input[] = {2., 3., 2., 3., 3., 4., 3., 4., 1., 1., 1., 1.};
static float lstm_fw_golden_output[] = {
-0.02973187, 0.1229473, 0.20885126, -0.15358765, -0.02973187,
0.1229473, 0.20885126, -0.15358765, -0.03716109, 0.12507336,
0.41193449, -0.20860538, -0.03716109, 0.12507336, 0.41193449,
-0.20860538, -0.15053082, 0.09120187, 0.24278517, -0.12222792,
-0.15053082, 0.09120187, 0.24278517, -0.12222792};
static float lstm_bw_golden_output[] = {
-0.0806187, 0.139077, 0.400476, -0.197842, -0.0806187, 0.139077,
0.400476, -0.197842, -0.0332076, 0.123838, 0.309777, -0.17621,
-0.0332076, 0.123838, 0.309777, -0.17621, -0.0490733, 0.0739237,
0.067706, -0.0208124, -0.0490733, 0.0739237, 0.067706, -0.0208124};
float* batch0_start = lstm_input;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.num_batches() *
lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
std::vector<float> merged_expected;
for (int k = 0; k < lstm.sequence_length() * lstm.num_batches(); k++) {
merged_expected.insert(
merged_expected.end(),
lstm_fw_golden_output + k * lstm.num_fw_outputs(),
lstm_fw_golden_output + (k + 1) * lstm.num_fw_outputs());
merged_expected.insert(
merged_expected.end(),
lstm_bw_golden_output + k * lstm.num_bw_outputs(),
lstm_bw_golden_output + (k + 1) * lstm.num_bw_outputs());
}
EXPECT_THAT(lstm.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(merged_expected,
quantize_weights ? 1e-2 : 1e-5)));
}
TEST(LSTMOpTest, BlackBoxTestNoCifgNoPeepholeNoProjectionNoClippingReverse) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
BidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, false,
false, false,
false, false,
false, 0.0,
0.0, false, true,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, n_output},
{n_batch, n_cell},
{sequence_length, n_batch, 0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
});
lstm.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524});
lstm.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113,
-0.29909778});
lstm.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212});
lstm.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077, -0.1556896,
0.19487578});
lstm.SetInputGateBias({0., 0., 0., 0.});
lstm.SetCellBias({0., 0., 0., 0.});
lstm.SetForgetGateBias({1., 1., 1., 1.});
lstm.SetOutputGateBias({0., 0., 0., 0.});
lstm.SetRecurrentToInputWeights(
{-0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
-0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
-0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296});
lstm.SetRecurrentToCellWeights(
{-0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
-0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
-0.46367589, 0.26016325, -0.03894562, -0.16368064});
lstm.SetRecurrentToForgetWeights(
{-0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
-0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
0.28053468, 0.01560611, -0.20127171, -0.01140004});
lstm.SetRecurrentToOutputWeights(
{0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
-0.51818722, -0.15390486, 0.0468148, 0.39922136});
static float lstm_input_reversed[] = {1., 1., 3., 4., 2., 3.};
static float lstm_fw_golden_output[] = {
-0.02973187, 0.1229473, 0.20885126, -0.15358765,
-0.03716109, 0.12507336, 0.41193449, -0.20860538,
-0.15053082, 0.09120187, 0.24278517, -0.12222792};
static float lstm_bw_golden_output[] = {
-0.0806187, 0.139077, 0.400476, -0.197842, -0.0332076, 0.123838,
0.309777, -0.17621, -0.0490733, 0.0739237, 0.067706, -0.0208124};
float* batch0_start = lstm_input_reversed;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int s = 0; s < lstm.sequence_length(); s++) {
float* fw_golden_start = lstm_fw_golden_output + s * lstm.num_fw_outputs();
float* fw_golden_end = fw_golden_start + lstm.num_fw_outputs();
fw_expected.insert(fw_expected.begin(), fw_golden_start, fw_golden_end);
}
EXPECT_THAT(lstm.GetBwOutput(),
ElementsAreArray(ArrayFloatNear(fw_expected)));
std::vector<float> bw_expected;
for (int s = 0; s < lstm.sequence_length(); s++) {
float* bw_golden_start = lstm_bw_golden_output + s * lstm.num_bw_outputs();
float* bw_golden_end = bw_golden_start + lstm.num_bw_outputs();
bw_expected.insert(bw_expected.begin(), bw_golden_start, bw_golden_end);
}
EXPECT_THAT(lstm.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(LSTMOpTest, BlackBoxTestWithCifgWithPeepholeNoProjectionNoClipping) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
BidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
true, false,
false, false,
false, 0.0,
0.0, false, true,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, n_output},
{n_batch, n_cell},
{sequence_length, n_batch, 0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
});
lstm.SetInputToCellWeights({-0.49770179, -0.27711356, -0.09624726, 0.05100781,
0.04717243, 0.48944736, -0.38535351,
-0.17212132});
lstm.SetInputToForgetWeights({-0.55291498, -0.42866567, 0.13056988,
-0.3633365, -0.22755712, 0.28253698, 0.24407166,
0.33826375});
lstm.SetInputToOutputWeights({0.10725588, -0.02335852, -0.55932593,
-0.09426838, -0.44257352, 0.54939759,
0.01533556, 0.42751634});
lstm.SetCellBias({0., 0., 0., 0.});
lstm.SetForgetGateBias({1., 1., 1., 1.});
lstm.SetOutputGateBias({0., 0., 0., 0.});
lstm.SetRecurrentToCellWeights(
{0.54066205, -0.32668582, -0.43562764, -0.56094903, 0.42957711,
0.01841056, -0.32764608, -0.33027974, -0.10826075, 0.20675004,
0.19069612, -0.03026325, -0.54532051, 0.33003211, 0.44901288,
0.21193194});
lstm.SetRecurrentToForgetWeights(
{-0.13832897, -0.0515101, -0.2359007, -0.16661474, -0.14340827,
0.36986142, 0.23414481, 0.55899, 0.10798943, -0.41174671, 0.17751795,
-0.34484994, -0.35874045, -0.11352962, 0.27268326, 0.54058349});
lstm.SetRecurrentToOutputWeights(
{0.41613156, 0.42610586, -0.16495961, -0.5663873, 0.30579174, -0.05115908,
-0.33941799, 0.23364776, 0.11178309, 0.09481031, -0.26424935, 0.46261835,
0.50248802, 0.26114327, -0.43736315, 0.33149987});
lstm.SetCellToForgetWeights(
{0.47485286, -0.51955009, -0.24458408, 0.31544167});
lstm.SetCellToOutputWeights(
{-0.17135078, 0.82760304, 0.85573703, -0.77109635});
static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
static float lstm_fw_golden_output[] = {
-0.36444446, -0.00352185, 0.12886585, -0.05163646,
-0.42312205, -0.01218222, 0.24201041, -0.08124574,
-0.358325, -0.04621704, 0.21641694, -0.06471302};
static float lstm_bw_golden_output[] = {
-0.401685, -0.0232794, 0.288642, -0.123074, -0.42915, -0.00871577,
0.20912, -0.103567, -0.166398, -0.00486649, 0.0697471, -0.0537578};
float* batch0_start = lstm_input;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
float* fw_golden_start = lstm_fw_golden_output;
float* fw_golden_end =
fw_golden_start + lstm.num_fw_outputs() * lstm.sequence_length();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), fw_golden_start, fw_golden_end);
EXPECT_THAT(lstm.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(fw_expected)));
float* bw_golden_start = lstm_bw_golden_output;
float* bw_golden_end =
bw_golden_start + lstm.num_bw_outputs() * lstm.sequence_length();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), bw_golden_start, bw_golden_end);
EXPECT_THAT(lstm.GetBwOutput(),
ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(LSTMOpTest,
BlackBoxTestWithCifgWithPeepholeNoProjectionNoClippingReversed) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
BidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
true, false,
false, false,
false, 0.0,
0.0, false, true,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, n_output},
{n_batch, n_cell},
{sequence_length, n_batch, 0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
});
lstm.SetInputToCellWeights({-0.49770179, -0.27711356, -0.09624726, 0.05100781,
0.04717243, 0.48944736, -0.38535351,
-0.17212132});
lstm.SetInputToForgetWeights({-0.55291498, -0.42866567, 0.13056988,
-0.3633365, -0.22755712, 0.28253698, 0.24407166,
0.33826375});
lstm.SetInputToOutputWeights({0.10725588, -0.02335852, -0.55932593,
-0.09426838, -0.44257352, 0.54939759,
0.01533556, 0.42751634});
lstm.SetCellBias({0., 0., 0., 0.});
lstm.SetForgetGateBias({1., 1., 1., 1.});
lstm.SetOutputGateBias({0., 0., 0., 0.});
lstm.SetRecurrentToCellWeights(
{0.54066205, -0.32668582, -0.43562764, -0.56094903, 0.42957711,
0.01841056, -0.32764608, -0.33027974, -0.10826075, 0.20675004,
0.19069612, -0.03026325, -0.54532051, 0.33003211, 0.44901288,
0.21193194});
lstm.SetRecurrentToForgetWeights(
{-0.13832897, -0.0515101, -0.2359007, -0.16661474, -0.14340827,
0.36986142, 0.23414481, 0.55899, 0.10798943, -0.41174671, 0.17751795,
-0.34484994, -0.35874045, -0.11352962, 0.27268326, 0.54058349});
lstm.SetRecurrentToOutputWeights(
{0.41613156, 0.42610586, -0.16495961, -0.5663873, 0.30579174, -0.05115908,
-0.33941799, 0.23364776, 0.11178309, 0.09481031, -0.26424935, 0.46261835,
0.50248802, 0.26114327, -0.43736315, 0.33149987});
lstm.SetCellToForgetWeights(
{0.47485286, -0.51955009, -0.24458408, 0.31544167});
lstm.SetCellToOutputWeights(
{-0.17135078, 0.82760304, 0.85573703, -0.77109635});
static float lstm_input_reversed[] = {1., 1., 3., 4., 2., 3.};
static float lstm_fw_golden_output[] = {
-0.36444446, -0.00352185, 0.12886585, -0.05163646,
-0.42312205, -0.01218222, 0.24201041, -0.08124574,
-0.358325, -0.04621704, 0.21641694, -0.06471302};
static float lstm_bw_golden_output[] = {
-0.401685, -0.0232794, 0.288642, -0.123074, -0.42915, -0.00871577,
0.20912, -0.103567, -0.166398, -0.00486649, 0.0697471, -0.0537578};
float* batch0_start = lstm_input_reversed;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int s = 0; s < lstm.sequence_length(); s++) {
float* fw_golden_start = lstm_fw_golden_output + s * lstm.num_fw_outputs();
float* fw_golden_end = fw_golden_start + lstm.num_fw_outputs();
fw_expected.insert(fw_expected.begin(), fw_golden_start, fw_golden_end);
}
EXPECT_THAT(lstm.GetBwOutput(),
ElementsAreArray(ArrayFloatNear(fw_expected)));
std::vector<float> bw_expected;
for (int s = 0; s < lstm.sequence_length(); s++) {
float* bw_golden_start = lstm_bw_golden_output + s * lstm.num_bw_outputs();
float* bw_golden_end = bw_golden_start + lstm.num_bw_outputs();
bw_expected.insert(bw_expected.begin(), bw_golden_start, bw_golden_end);
}
EXPECT_THAT(lstm.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(LSTMOpTest, BlackBoxTestWithPeepholeWithProjectionNoClipping) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
BidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, false,
true, true,
false, false,
false, 0.0,
0.0, false, true,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, n_output},
{n_batch, n_cell},
{sequence_length, n_batch, 0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
});
lstm.SetInputToInputWeights(
{0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677});
lstm.SetInputToForgetWeights(
{-0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236,
-0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505,
-0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495,
0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421,
-0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887,
-0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791,
0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068,
0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905,
0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605,
-0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506,
-0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063,
-0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375,
0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353,
0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717,
-0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371,
0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496});
lstm.SetInputToCellWeights(
{-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042});
lstm.SetInputToOutputWeights(
{-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956});
lstm.SetInputGateBias(
{0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216,
-0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339,
-0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818,
0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196});
lstm.SetForgetGateBias({0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739});
lstm.SetCellBias({-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027});
lstm.SetOutputGateBias(
{0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795,
0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895,
0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149,
-0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877});
lstm.SetRecurrentToInputWeights(
{-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447});
lstm.SetRecurrentToForgetWeights(
{-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027});
lstm.SetRecurrentToCellWeights(
{-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404});
lstm.SetRecurrentToOutputWeights({
0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415,
-0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349,
-0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948,
-0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125,
-0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224,
-0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088,
0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728,
0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607,
-0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928,
-0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879,
0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698,
-0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146,
0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166,
0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203,
0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743,
0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618,
0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891,
-0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015,
0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886,
0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396,
-0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282,
-0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575,
-0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277,
-0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719,
-0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483,
0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102,
-0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775,
0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656,
-0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286,
-0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309,
0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754,
0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831,
-0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697,
0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222,
-0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989,
-0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827,
-0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819,
-0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954,
0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228,
-0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939,
-0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556,
-0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718,
0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974,
-0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485,
0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856,
0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019,
0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024,
0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994,
0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621,
});
lstm.SetCellToInputWeights(
{0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175});
lstm.SetCellToForgetWeights(
{-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355});
lstm.SetCellToOutputWeights(
{0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733});
lstm.SetProjectionWeights(
{-0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832,
0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683,
-0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931,
-0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067,
0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787,
0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588,
0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949,
-0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768,
-0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929,
0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946,
0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117,
0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253,
0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552,
0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797,
-0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272,
0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922,
-0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548,
0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786,
-0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318,
-0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776,
-0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307,
0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593,
-0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515,
-0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288,
0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097,
-0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209,
0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268,
0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707,
0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871,
0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553,
-0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615,
0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187,
-0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388,
-0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263,
0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777,
0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935,
-0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996,
-0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318,
0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437,
-0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237,
0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415,
-0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124,
-0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311,
0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013,
-0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364,
-0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102,
0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906,
0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955,
0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656});
static float lstm_input[][20] = {
{
0.787926, 0.151646, 0.071352, 0.118426, 0.458058, 0.596268, 0.998386,
0.568695, 0.864524, 0.571277, 0.073204, 0.296072, 0.743333, 0.069199,
0.045348, 0.867394, 0.291279, 0.013714, 0.482521, 0.626339},
{
0.295743, 0.544053, 0.690064, 0.858138, 0.497181, 0.642421, 0.524260,
0.134799, 0.003639, 0.162482, 0.640394, 0.930399, 0.050782, 0.432485,
0.988078, 0.082922, 0.563329, 0.865614, 0.333232, 0.259916}};
static float lstm_fw_golden_output[][64] = {
{
-0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576,
-0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004,
-0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147,
0.0134203, -0.0166936, 0.0381209, 0.000889694, 0.0143363,
-0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322,
-0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308,
0.0155969, 0.0312091, -0.0213783, 0.0350169, 0.000324794,
0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474,
0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827,
0.0136115, 0.0243435, 0.0354492, -0.0189322, 0.0464512,
-0.00251373, 0.0225745, -0.0308346, -0.0317124, 0.0460407,
-0.0189395, 0.0149363, -0.0530162, -0.0150767, -0.0340193,
0.0286833, 0.00824207, 0.0264887, 0.0305169},
{
-0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926,
-0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232,
0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954,
0.02168, -0.0141913, 0.0322082, 0.00227024, 0.0260507,
-0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039,
-0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233,
0.0214762, 0.0293641, -0.0204549, 0.0450315, -0.00117378,
0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034,
0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789,
0.00790065, 0.0220157, 0.0333314, -0.0264787, 0.0387855,
-0.000764675, 0.0217599, -0.037537, -0.0335206, 0.0431679,
-0.0211424, 0.010203, -0.062785, -0.00832363, -0.025181,
0.0412031, 0.0118723, 0.0239643, 0.0394009}};
static float lstm_combined_golden_output[][64] = {
{-0.022014, 0.073544, -0.002235, 0.040068, -0.037136, -0.052788,
0.075325, -0.029378, 0.024298, -0.07733, -0.030674, -0.060229,
0.040599, 0.011608, 0.042005, 0.045977, -0.039225, 0.076294,
0.000735, 0.032852, -0.069869, -0.053312, 0.073527, -0.028136,
0.021585, -0.102679, -0.004327, -0.043304, 0.072861, 0.027077,
0.034558, 0.068292, -0.036292, 0.069832, -0.003032, 0.053829,
-0.043821, -0.072713, 0.085029, -0.040374, 0.020014, -0.104521,
-0.034504, -0.059759, 0.062569, 0.025652, 0.049306, 0.061189,
-0.025146, 0.079643, -0.005188, 0.033080, -0.048079, -0.048082,
0.069369, -0.028900, 0.024572, -0.077547, -0.022517, -0.054477,
0.038857, 0.013336, 0.043234, 0.044788},
{-0.039186, 0.070792, -0.005913, 0.02642, -0.068274, -0.05022,
0.061444, -0.031241, 0.014996, -0.094544, -0.004146, -0.03464,
0.058981, 0.026097, 0.039781, 0.058408, -0.031887, 0.069252,
0.00576, 0.054062, -0.042801, -0.059974, 0.085272, -0.034453,
0.026097, -0.0959, -0.031164, -0.058699, 0.06839, 0.020512,
0.044727, 0.063609, -0.039863, 0.084819, -0.003909, 0.028666,
-0.075677, -0.045125, 0.070379, -0.033895, 0.022111, -0.097184,
-0.004921, -0.040851, 0.062316, 0.017435, 0.041437, 0.064568,
-0.039656, 0.060726, -0.003402, 0.036854, -0.056503, -0.058554,
0.068588, -0.034879, 0.01352, -0.09962, -0.01434, -0.039505,
0.065133, 0.024321, 0.038473, 0.062438}};
for (int i = 0; i < lstm.sequence_length(); i++) {
float* batch0_start = lstm_input[0] + i * lstm.num_inputs();
float* batch0_end = batch0_start + lstm.num_inputs();
lstm.SetInput(2 * i * lstm.num_inputs(), batch0_start, batch0_end);
float* batch1_start = lstm_input[1] + i * lstm.num_inputs();
float* batch1_end = batch1_start + lstm.num_inputs();
lstm.SetInput((2 * i + 1) * lstm.num_inputs(), batch1_start, batch1_end);
}
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
std::vector<float> expected;
for (int i = 0; i < lstm.sequence_length(); i++) {
float* golden_start_batch0 =
lstm_fw_golden_output[0] + i * lstm.num_fw_outputs();
float* golden_end_batch0 = golden_start_batch0 + lstm.num_fw_outputs();
float* golden_start_batch1 =
lstm_fw_golden_output[1] + i * lstm.num_fw_outputs();
float* golden_end_batch1 = golden_start_batch1 + lstm.num_fw_outputs();
expected.insert(expected.end(), golden_start_batch0, golden_end_batch0);
expected.insert(expected.end(), golden_start_batch1, golden_end_batch1);
}
EXPECT_THAT(lstm.GetFwOutput(), ElementsAreArray(ArrayFloatNear(expected)));
expected.clear();
for (int i = 0; i < lstm.sequence_length(); i++) {
float* golden_start_batch0 =
lstm_combined_golden_output[0] + i * lstm.num_fw_outputs();
float* golden_end_batch0 = golden_start_batch0 + lstm.num_fw_outputs();
float* golden_start_batch1 =
lstm_combined_golden_output[1] + i * lstm.num_fw_outputs();
float* golden_end_batch1 = golden_start_batch1 + lstm.num_fw_outputs();
expected.insert(expected.end(), golden_start_batch0, golden_end_batch0);
expected.insert(expected.end(), golden_start_batch1, golden_end_batch1);
}
std::vector<float> combined;
for (int i = 0; i < lstm.GetFwOutput().size(); ++i) {
combined.push_back(lstm.GetFwOutput()[i] + lstm.GetBwOutput()[i]);
}
EXPECT_THAT(combined, ElementsAreArray(ArrayFloatNear(expected)));
}
TEST(LSTMOpTest, BlackBoxTestWithPeepholeWithProjectionNoClippingBatchMajor) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
BidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, false,
true, true,
false, false,
false, 0.0,
0.0, false, false,
{
{n_batch, sequence_length, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, sequence_length, 0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
{0},
});
lstm.SetInputToInputWeights(
{0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677});
lstm.SetInputToForgetWeights(
{-0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236,
-0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505,
-0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495,
0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421,
-0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887,
-0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791,
0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068,
0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905,
0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605,
-0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506,
-0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063,
-0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375,
0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353,
0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717,
-0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371,
0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496});
lstm.SetInputToCellWeights(
{-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042});
lstm.SetInputToOutputWeights(
{-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956});
lstm.SetInputGateBias(
{0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216,
-0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339,
-0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818,
0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196});
lstm.SetForgetGateBias({0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739});
lstm.SetCellBias({-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027});
lstm.SetOutputGateBias(
{0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795,
0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895,
0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149,
-0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877});
lstm.SetRecurrentToInputWeights(
{-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447});
lstm.SetRecurrentToForgetWeights(
{-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027});
lstm.SetRecurrentToCellWeights(
{-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404});
lstm.SetRecurrentToOutputWeights({
0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415,
-0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349,
-0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948,
-0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125,
-0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224,
-0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088,
0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728,
0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607,
-0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928,
-0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879,
0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698,
-0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146,
0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166,
0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203,
0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743,
0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618,
0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891,
-0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015,
0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886,
0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396,
-0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282,
-0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575,
-0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277,
-0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719,
-0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483,
0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102,
-0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775,
0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656,
-0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286,
-0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309,
0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754,
0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831,
-0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697,
0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222,
-0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989,
-0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827,
-0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819,
-0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954,
0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228,
-0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939,
-0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556,
-0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718,
0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974,
-0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485,
0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856,
0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019,
0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024,
0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994,
0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621,
});
lstm.SetCellToInputWeights(
{0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175});
lstm.SetCellToForgetWeights(
{-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355});
lstm.SetCellToOutputWeights(
{0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733});
lstm.SetProjectionWeights(
{-0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832,
0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683,
-0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931,
-0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067,
0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787,
0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588,
0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949,
-0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768,
-0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929,
0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946,
0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117,
0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253,
0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552,
0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797,
-0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272,
0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922,
-0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548,
0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786,
-0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318,
-0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776,
-0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307,
0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593,
-0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515,
-0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288,
0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097,
-0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209,
0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268,
0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707,
0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871,
0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553,
-0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615,
0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187,
-0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388,
-0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263,
0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777,
0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935,
-0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996,
-0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318,
0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437,
-0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237,
0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415,
-0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124,
-0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311,
0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013,
-0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364,
-0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102,
0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906,
0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955,
0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656});
static float lstm_input[][20] = {
{
0.787926, 0.151646, 0.071352, 0.118426, 0.458058, 0.596268, 0.998386,
0.568695, 0.864524, 0.571277, 0.073204, 0.296072, 0.743333, 0.069199,
0.045348, 0.867394, 0.291279, 0.013714, 0.482521, 0.626339},
{
0.295743, 0.544053, 0.690064, 0.858138, 0.497181, 0.642421, 0.524260,
0.134799, 0.003639, 0.162482, 0.640394, 0.930399, 0.050782, 0.432485,
0.988078, 0.082922, 0.563329, 0.865614, 0.333232, 0.259916}};
static float lstm_fw_golden_output[][64] = {
{
-0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576,
-0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004,
-0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147,
0.0134203, -0.0166936, 0.0381209, 0.000889694, 0.0143363,
-0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322,
-0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308,
0.0155969, 0.0312091, -0.0213783, 0.0350169, 0.000324794,
0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474,
0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827,
0.0136115, 0.0243435, 0.0354492, -0.0189322, 0.0464512,
-0.00251373, 0.0225745, -0.0308346, -0.0317124, 0.0460407,
-0.0189395, 0.0149363, -0.0530162, -0.0150767, -0.0340193,
0.0286833, 0.00824207, 0.0264887, 0.0305169},
{
-0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926,
-0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232,
0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954,
0.02168, -0.0141913, 0.0322082, 0.00227024, 0.0260507,
-0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039,
-0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233,
0.0214762, 0.0293641, -0.0204549, 0.0450315, -0.00117378,
0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034,
0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789,
0.00790065, 0.0220157, 0.0333314, -0.0264787, 0.0387855,
-0.000764675, 0.0217599, -0.037537, -0.0335206, 0.0431679,
-0.0211424, 0.010203, -0.062785, -0.00832363, -0.025181,
0.0412031, 0.0118723, 0.0239643, 0.0394009}};
static float lstm_combined_golden_output[][64] = {
{-0.022014, 0.073544, -0.002235, 0.040068, -0.037136, -0.052788,
0.075325, -0.029378, 0.024298, -0.07733, -0.030674, -0.060229,
0.040599, 0.011608, 0.042005, 0.045977, -0.039225, 0.076294,
0.000735, 0.032852, -0.069869, -0.053312, 0.073527, -0.028136,
0.021585, -0.102679, -0.004327, -0.043304, 0.072861, 0.027077,
0.034558, 0.068292, -0.036292, 0.069832, -0.003032, 0.053829,
-0.043821, -0.072713, 0.085029, -0.040374, 0.020014, -0.104521,
-0.034504, -0.059759, 0.062569, 0.025652, 0.049306, 0.061189,
-0.025146, 0.079643, -0.005188, 0.033080, -0.048079, -0.048082,
0.069369, -0.028900, 0.024572, -0.077547, -0.022517, -0.054477,
0.038857, 0.013336, 0.043234, 0.044788},
{-0.039186, 0.070792, -0.005913, 0.02642, -0.068274, -0.05022,
0.061444, -0.031241, 0.014996, -0.094544, -0.004146, -0.03464,
0.058981, 0.026097, 0.039781, 0.058408, -0.031887, 0.069252,
0.00576, 0.054062, -0.042801, -0.059974, 0.085272, -0.034453,
0.026097, -0.0959, -0.031164, -0.058699, 0.06839, 0.020512,
0.044727, 0.063609, -0.039863, 0.084819, -0.003909, 0.028666,
-0.075677, -0.045125, 0.070379, -0.033895, 0.022111, -0.097184,
-0.004921, -0.040851, 0.062316, 0.017435, 0.041437, 0.064568,
-0.039656, 0.060726, -0.003402, 0.036854, -0.056503, -0.058554,
0.068588, -0.034879, 0.01352, -0.09962, -0.01434, -0.039505,
0.065133, 0.024321, 0.038473, 0.062438}};
const int input_sequence_size = lstm.sequence_length() * lstm.num_inputs();
EXPECT_EQ(input_sequence_size, 20);
float* batch0_start = lstm_input[0];
float* batch0_end = batch0_start + input_sequence_size;
lstm.SetInput(0, batch0_start, batch0_end);
float* batch1_start = lstm_input[1];
float* batch1_end = batch1_start + input_sequence_size;
lstm.SetInput(input_sequence_size, batch1_start, batch1_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
const int output_sequence_size =
lstm.sequence_length() * lstm.num_fw_outputs();
EXPECT_EQ(output_sequence_size, 64);
std::vector<float> expected;
const float* golden_start_batch0 = lstm_fw_golden_output[0];
const float* golden_end_batch0 = golden_start_batch0 + output_sequence_size;
expected.insert(expected.end(), golden_start_batch0, golden_end_batch0);
const float* golden_start_batch1 = lstm_fw_golden_output[1];
const float* golden_end_batch1 = golden_start_batch1 + output_sequence_size;
expected.insert(expected.end(), golden_start_batch1, golden_end_batch1);
EXPECT_THAT(lstm.GetFwOutput(), ElementsAreArray(ArrayFloatNear(expected)));
expected.clear();
golden_start_batch0 = lstm_combined_golden_output[0];
golden_end_batch0 = golden_start_batch0 + output_sequence_size;
expected.insert(expected.end(), golden_start_batch0, golden_end_batch0);
golden_start_batch1 = lstm_combined_golden_output[1];
golden_end_batch1 = golden_start_batch1 + output_sequence_size;
expected.insert(expected.end(), golden_start_batch1, golden_end_batch1);
std::vector<float> combined;
for (int i = 0; i < lstm.GetFwOutput().size(); ++i) {
combined.push_back(lstm.GetFwOutput()[i] + lstm.GetBwOutput()[i]);
}
EXPECT_THAT(combined, ElementsAreArray(ArrayFloatNear(expected)));
}
TEST_P(LSTMOpTest, BlackBoxTestWithAuxInputZeroAuxWeight) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, false,
false, false,
false, false,
true, 0.0,
0.0, quantize_weights, true,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, n_output},
{n_batch, n_cell},
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
},
asymmetric_quantize_inputs);
lstm.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524});
lstm.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113,
-0.29909778});
lstm.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212});
lstm.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077, -0.1556896,
0.19487578});
lstm.SetInputGateBias({0., 0., 0., 0.});
lstm.SetCellBias({0., 0., 0., 0.});
lstm.SetForgetGateBias({1., 1., 1., 1.});
lstm.SetOutputGateBias({0., 0., 0., 0.});
lstm.SetRecurrentToInputWeights(
{-0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
-0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
-0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296});
lstm.SetRecurrentToCellWeights(
{-0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
-0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
-0.46367589, 0.26016325, -0.03894562, -0.16368064});
lstm.SetRecurrentToForgetWeights(
{-0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
-0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
0.28053468, 0.01560611, -0.20127171, -0.01140004});
lstm.SetRecurrentToOutputWeights(
{0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
-0.51818722, -0.15390486, 0.0468148, 0.39922136});
static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
static float lstm_fw_golden_output[] = {
-0.02973187, 0.1229473, 0.20885126, -0.15358765,
-0.03716109, 0.12507336, 0.41193449, -0.20860538,
-0.15053082, 0.09120187, 0.24278517, -0.12222792};
static float lstm_bw_golden_output[] = {
-0.0806187, 0.139077, 0.400476, -0.197842, -0.0332076, 0.123838,
0.309777, -0.17621, -0.0490733, 0.0739237, 0.067706, -0.0208124};
float* batch0_start = lstm_input;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
lstm.SetAuxInput(0, batch0_start, batch0_end);
std::vector<float> dummy_weights(n_cell * n_input, 0.0f);
lstm.SetAuxInputToInputWeights(dummy_weights);
lstm.SetAuxInputToForgetWeights(dummy_weights);
lstm.SetAuxInputToCellWeights(dummy_weights);
lstm.SetAuxInputToOutputWeights(dummy_weights);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
float* fw_golden_start = lstm_fw_golden_output;
float* fw_golden_end =
fw_golden_start + lstm.num_fw_outputs() * lstm.sequence_length();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), fw_golden_start, fw_golden_end);
EXPECT_THAT(lstm.GetFwOutput(),
ElementsAreArray(
ArrayFloatNear(fw_expected, quantize_weights ? 1e-2 : 1e-5)));
float* bw_golden_start = lstm_bw_golden_output;
float* bw_golden_end =
bw_golden_start + lstm.num_bw_outputs() * lstm.sequence_length();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), bw_golden_start, bw_golden_end);
EXPECT_THAT(lstm.GetBwOutput(),
ElementsAreArray(
ArrayFloatNear(bw_expected, quantize_weights ? 1e-2 : 1e-5)));
}
TEST_P(LSTMOpTest, BlackBoxTestWithAuxInput) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, false,
false, false,
false, false,
true, 0.0,
0.0, quantize_weights, true,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_batch, n_output},
{n_batch, n_cell},
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
},
asymmetric_quantize_inputs);
lstm.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524});
lstm.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113,
-0.29909778});
lstm.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212});
lstm.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077, -0.1556896,
0.19487578});
lstm.SetInputGateBias({0., 0., 0., 0.});
lstm.SetCellBias({0., 0., 0., 0.});
lstm.SetForgetGateBias({1., 1., 1., 1.});
lstm.SetOutputGateBias({0., 0., 0., 0.});
lstm.SetRecurrentToInputWeights(
{-0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
-0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
-0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296});
lstm.SetRecurrentToCellWeights(
{-0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
-0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
-0.46367589, 0.26016325, -0.03894562, -0.16368064});
lstm.SetRecurrentToForgetWeights(
{-0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
-0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
0.28053468, 0.01560611, -0.20127171, -0.01140004});
lstm.SetRecurrentToOutputWeights(
{0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
-0.51818722, -0.15390486, 0.0468148, 0.39922136});
static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
static float lstm_fw_golden_output[] = {
0.153335, 0.542754, 0.708602, 0.742855, 0.247581, 0.835739,
0.947797, 0.958177, 0.410892, 0.672268, 0.761909, 0.829133};
static float lstm_bw_golden_output[] = {
0.342275, 0.883431, 0.955930, 0.975621, 0.204939, 0.806858,
0.914849, 0.934871, 0.123236, 0.373087, 0.465377, 0.517630};
lstm.SetAuxInputToInputWeights({0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
lstm.SetAuxInputToForgetWeights({0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1.0});
lstm.SetAuxInputToCellWeights({0.5, 0.6, 0.7, 0.8, 0.5, 0.6, 0.7, 0.8});
lstm.SetAuxInputToOutputWeights({0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
float* batch0_start = lstm_input;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
lstm.SetAuxInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
float* fw_golden_start = lstm_fw_golden_output;
float* fw_golden_end =
fw_golden_start + lstm.num_fw_outputs() * lstm.sequence_length();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), fw_golden_start, fw_golden_end);
EXPECT_THAT(lstm.GetFwOutput(),
ElementsAreArray(
ArrayFloatNear(fw_expected, quantize_weights ? 1e-2 : 1e-5)));
float* bw_golden_start = lstm_bw_golden_output;
float* bw_golden_end =
bw_golden_start + lstm.num_bw_outputs() * lstm.sequence_length();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), bw_golden_start, bw_golden_end);
EXPECT_THAT(lstm.GetBwOutput(),
ElementsAreArray(
ArrayFloatNear(bw_expected, quantize_weights ? 1e-2 : 1e-5)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bidirectional_sequence_lstm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bidirectional_sequence_lstm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c8c878a-fc1d-4763-a675-7908c10fe949 | cpp | tensorflow/tensorflow | unique | tensorflow/lite/kernels/unique.cc | tensorflow/lite/kernels/unique_test.cc | #include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace unique {
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void Free(TfLiteContext* context, void* buffer) {}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
static const int kOutputUniqueTensor = 0;
static const int kOutputIndexTensor = 1;
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output_unique_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputUniqueTensor,
&output_unique_tensor));
TfLiteTensor* output_index_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputIndexTensor,
&output_index_tensor));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
TfLiteIntArray* output_index_shape = TfLiteIntArrayCopy(input->dims);
SetTensorToDynamic(output_unique_tensor);
return context->ResizeTensor(context, output_index_tensor,
output_index_shape);
}
namespace {
template <typename T, typename I>
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* input,
TfLiteNode* node) {
std::map<T, int> unique_values;
TfLiteTensor* output_indexes;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &output_indexes));
std::vector<T> output_values;
I* indexes = GetTensorData<I>(output_indexes);
const T* data = GetTensorData<T>(input);
const int num_elements = NumElements(input);
for (int i = 0; i < num_elements; ++i) {
const auto element_it = unique_values.find(data[i]);
if (element_it != unique_values.end()) {
indexes[i] = element_it->second;
} else {
const int unique_index = unique_values.size();
unique_values[data[i]] = unique_index;
indexes[i] = unique_index;
output_values.push_back(data[i]);
}
}
TfLiteTensor* unique_output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &unique_output));
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
TfLiteIntArrayCreate(NumDimensions(input)), TfLiteIntArrayFree);
shape->data[0] = unique_values.size();
TF_LITE_ENSURE_STATUS(
context->ResizeTensor(context, unique_output, shape.release()));
T* output_unique_values = GetTensorData<T>(unique_output);
for (int i = 0; i < output_values.size(); ++i) {
output_unique_values[i] = output_values[i];
}
return kTfLiteOk;
}
template <typename T>
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* input,
TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteUniqueParams*>(node->builtin_data);
if (params == nullptr) {
TF_LITE_KERNEL_LOG(context, "Null params passed");
return kTfLiteError;
}
switch (params->index_out_type) {
case kTfLiteInt32:
return EvalImpl<T, int32_t>(context, input, node);
case kTfLiteInt64:
return EvalImpl<T, int64_t>(context, input, node);
default:
TF_LITE_KERNEL_LOG(
context,
"Unique index output array can only be Int32 or In64, requested: %s",
TfLiteTypeGetName(params->index_out_type));
}
return kTfLiteError;
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output_index_tensor;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, 1, &output_index_tensor));
TF_LITE_ENSURE_EQ(context, NumElements(output_index_tensor),
NumElements(input));
switch (input->type) {
case kTfLiteInt8:
TF_LITE_ENSURE_STATUS(EvalImpl<int8_t>(context, input, node));
break;
case kTfLiteInt16:
TF_LITE_ENSURE_STATUS(EvalImpl<int16_t>(context, input, node));
break;
case kTfLiteInt32:
TF_LITE_ENSURE_STATUS(EvalImpl<int32_t>(context, input, node));
break;
case kTfLiteInt64:
TF_LITE_ENSURE_STATUS(EvalImpl<int64_t>(context, input, node));
break;
case kTfLiteFloat32:
TF_LITE_ENSURE_STATUS(EvalImpl<float>(context, input, node));
break;
case kTfLiteUInt8:
TF_LITE_ENSURE_STATUS(EvalImpl<uint8_t>(context, input, node));
break;
default:
TF_LITE_KERNEL_LOG(context, "Currently Unique doesn't support type: %s",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_UNIQUE() {
static TfLiteRegistration r = {unique::Init, unique::Free, unique::Prepare,
unique::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <typename T, typename I>
class UniqueOpModel : public SingleOpModel {
public:
UniqueOpModel(const TensorData& input, TensorType input_type,
TensorType index_out_type) {
input_id_ = AddInput(input);
output_id_ = AddOutput(input_type);
output_index_id_ = AddOutput(index_out_type);
SetBuiltinOp(BuiltinOperator_UNIQUE, BuiltinOptions_UniqueOptions,
CreateUniqueOptions(builder_, index_out_type).Union());
BuildInterpreter({GetShape(input_id_)});
}
int input_tensor_id() { return input_id_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_id_); }
std::vector<I> GetIndexesOutput() {
return ExtractVector<I>(output_index_id_);
}
protected:
int input_id_;
int output_id_;
int output_index_id_;
};
TEST(UniqueOpModelTest, OneElement) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {1}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(), {5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_THAT(model.GetIndexesOutput(), ElementsAreArray({0}));
}
TEST(UniqueOpModelTest, MultipleElements_AllUnique) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {8}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(),
{5, 2, 3, 51, 6, 72, 7, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5, 2, 3, 51, 6, 72, 7, 8}));
EXPECT_THAT(model.GetIndexesOutput(),
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7}));
}
TEST(UniqueOpModelTest, MultipleElements_AllDuplicates) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {7}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(), {5, 5, 5, 5, 5, 5, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_THAT(model.GetIndexesOutput(),
ElementsAreArray({0, 0, 0, 0, 0, 0, 0}));
}
TEST(UniqueOpModelTest, MultipleElements_SomeDuplicates) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {7}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(), {2, 3, 5, 7, 2, 7, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({2, 3, 5, 7}));
EXPECT_THAT(model.GetIndexesOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 3, 1}));
}
TEST(UniqueOpModelTest, MultipleElements_RepeatedDuplicates) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {6}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(),
{-1, -1, -2, -2, -3, -3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({-1, -2, -3}));
EXPECT_THAT(model.GetIndexesOutput(), ElementsAreArray({0, 0, 1, 1, 2, 2}));
}
TEST(UniqueOpModelTest, MultipleElements_SomeDuplicates_IndexInt64) {
UniqueOpModel<float, int64_t> model({TensorType_FLOAT32, {7}},
TensorType_FLOAT32, TensorType_INT64);
model.PopulateTensor<float>(model.input_tensor_id(), {2, 3, 5, 7, 2, 7, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({2, 3, 5, 7}));
EXPECT_THAT(model.GetIndexesOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 3, 1}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unique.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unique_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b4fdb4f-feb5-418d-8789-f60f2726fc08 | cpp | tensorflow/tensorflow | depth_to_space | tensorflow/lite/kernels/depth_to_space.cc | tensorflow/lite/delegates/xnnpack/depth_to_space_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace depth_to_space {
enum KernelType {
kReference,
kGenericOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
auto data_type = output->type;
TF_LITE_ENSURE(context,
data_type == kTfLiteFloat32 || data_type == kTfLiteUInt8 ||
data_type == kTfLiteInt8 || data_type == kTfLiteInt32 ||
data_type == kTfLiteInt64);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
const int block_size = params->block_size;
TF_LITE_ENSURE(context, block_size > 0);
const int input_height = input->dims->data[1];
const int input_width = input->dims->data[2];
const int input_channels = input->dims->data[3];
int output_height = input_height * block_size;
int output_width = input_width * block_size;
int output_channels = input_channels / block_size / block_size;
TF_LITE_ENSURE_EQ(context, input_height, output_height / block_size);
TF_LITE_ENSURE_EQ(context, input_width, output_width / block_size);
TF_LITE_ENSURE_EQ(context, input_channels,
output_channels * block_size * block_size);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = output_height;
output_size->data[2] = output_width;
output_size->data[3] = output_channels;
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
#define TF_LITE_DEPTH_TO_SPACE(type, scalar) \
tflite::DepthToSpaceParams op_params; \
op_params.block_size = params->block_size; \
type::DepthToSpace(op_params, GetTensorShape(input), \
GetTensorData<scalar>(input), GetTensorShape(output), \
GetTensorData<scalar>(output))
switch (input->type) {
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, float);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, float);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, uint8_t);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, uint8_t);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, int8_t);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, int8_t);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, int32_t);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, int32_t);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, int64_t);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, int64_t);
}
break;
default:
TF_LITE_KERNEL_LOG(context, "Type '%s' not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
#undef TF_LITE_DEPTH_TO_SPACE
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DEPTH_TO_SPACE_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, depth_to_space::Prepare,
depth_to_space::Eval<depth_to_space::kReference>};
return &r;
}
TfLiteRegistration* Register_DEPTH_TO_SPACE_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, depth_to_space::Prepare,
depth_to_space::Eval<depth_to_space::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_DEPTH_TO_SPACE() {
return Register_DEPTH_TO_SPACE_GENERIC_OPT();
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/depth_to_space_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(DepthToSpace, SinglePixel) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(1)
.InputWidth(1)
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(DepthToSpace, SingleRow) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto width_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(1)
.InputWidth(width_rng())
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(DepthToSpace, SingleColumn) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto height_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(height_rng())
.InputWidth(1)
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(DepthToSpace, FullImage) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(DepthToSpace, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/depth_to_space.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/depth_to_space_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6faa1ee0-6de7-43eb-8611-02cba56c2d4f | cpp | tensorflow/tensorflow | tensor_slice_util | tensorflow/lite/kernels/tensor_slice_util.cc | tensorflow/lite/kernels/tensor_slice_util_test.cc | #include "tensorflow/lite/kernels/tensor_slice_util.h"
#include <cstdint>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
namespace tflite {
namespace ops {
namespace builtin {
template <typename IndexType>
Index<IndexType> ReadIndexVector(const TfLiteTensor* indices_tensor,
const RuntimeShape& tensor_shape,
const Index<IndexType>& other_indices,
int64_t dim_to_read) {
Index<IndexType> index;
index.reserve(tensor_shape.DimensionsCount());
int shift = 0;
for (int64_t dim = 0; dim < tensor_shape.DimensionsCount(); ++dim) {
if (dim == dim_to_read) {
index.push_back(0);
shift = 1;
} else {
index.push_back(other_indices[dim - shift]);
}
}
int64_t index_vector_size = tensor_shape.Dims(dim_to_read);
Index<IndexType> result;
result.reserve(index_vector_size);
for (IndexType index_vector_idx = 0; index_vector_idx < index_vector_size;
++index_vector_idx) {
index[dim_to_read] = index_vector_idx;
IndexType flat_index = TensorIndexToFlat(
index.data(), tensor_shape.DimensionsCount(), tensor_shape);
const IndexType* tensor_data = GetTensorData<IndexType>(indices_tensor);
result.push_back(tensor_data[flat_index]);
}
return result;
}
template Index<int32_t> ReadIndexVector(const TfLiteTensor* indices_tensor,
const RuntimeShape& tensor_shape,
const Index<int32_t>& other_indices,
int64_t dim_to_read);
template Index<int64_t> ReadIndexVector(const TfLiteTensor* indices_tensor,
const RuntimeShape& tensor_shape,
const Index<int64_t>& other_indices,
int64_t dim_to_read);
}
}
} | #include "tensorflow/lite/kernels/tensor_slice_util.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
using ::testing::ElementsAreArray;
TEST(TensorSliceUtil, ArrayContains) {
std::vector<int64_t> array = {1, 2, 3};
EXPECT_TRUE(ArrayContains(array.data(), array.size(), 2));
EXPECT_FALSE(ArrayContains(array.data(), array.size(), 0));
}
TEST(TensorSliceUtil, ArrayContainsWorkOnEmptyArray) {
std::vector<int64_t> array = {};
EXPECT_FALSE(ArrayContains(array.data(), 0, 2));
}
TEST(TensorSliceUtil, ScatterIndexHandlesNullPtr) {
Index<int64_t> index = {3, 5};
std::vector<int64_t> scatter_dims = {1, 0};
Index<int64_t>* result = nullptr;
TfLiteStatus status =
ScatterIndex(index, scatter_dims.data(), scatter_dims.size(), 3, result);
EXPECT_THAT(status, kTfLiteError);
}
TEST(TensorSliceUtil, ScatterIndexHandlesOutOfBoundIndices) {
Index<int64_t> index = {3, 5};
std::vector<int64_t> scatter_dims = {4, 0};
Index<int64_t> result;
TfLiteStatus status =
ScatterIndex(index, scatter_dims.data(), scatter_dims.size(), 3, &result);
EXPECT_THAT(status, kTfLiteError);
}
TEST(TensorSliceUtil, ScatterIndex) {
Index<int64_t> index = {3, 5};
std::vector<int64_t> scatter_dims = {1, 0};
Index<int64_t> result;
ScatterIndex(index, scatter_dims.data(), scatter_dims.size(), 3, &result);
EXPECT_THAT(result, ElementsAreArray({5, 3, 0}));
}
TEST(TensorSliceUtil, TensorIndexToFlatWorksForScalars) {
Index<int64_t> index = {0};
RuntimeShape shape(0);
EXPECT_EQ(TensorIndexToFlat(index.data(), index.size(), shape), 0);
}
TEST(TensorSliceUtil, TensorIndexToFlat) {
Index<int64_t> index = {2, 4};
RuntimeShape shape({3, 5});
EXPECT_EQ(TensorIndexToFlat(index.data(), index.size(), shape), 14);
}
TEST(TensorSliceUtil, AddIndices) {
Index<int64_t> index1 = {1, 2, 3};
Index<int64_t> index2 = {2, 7, 5};
EXPECT_THAT(AddIndices(index1, index2), ElementsAreArray({3, 9, 8}));
}
TEST(TensorSliceUtil, ExpandDimsHandlesEmptyIndex) {
Index<int64_t> index = {};
std::vector<int64_t> avoided_dims = {0, 1};
Index<int64_t> result;
ExpandDims(index, avoided_dims.data(), avoided_dims.size(), &result);
EXPECT_THAT(result, ElementsAreArray({0, 0}));
}
TEST(TensorSliceUtil, ExpandDims) {
Index<int64_t> index = {2, 4};
std::vector<int64_t> avoided_dims = {0, 2};
Index<int64_t> result;
ExpandDims(index, avoided_dims.data(), avoided_dims.size(), &result);
EXPECT_THAT(result, ElementsAreArray({0, 2, 0, 4}));
}
TEST(TensorSliceUtil, ReadIndexVector) {
TfLiteTensor tensor;
tensor.type = kTfLiteInt64;
std::vector<int64_t> tensor_data = {0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9};
TfLitePtrUnion ptr_union;
ptr_union.i64 = tensor_data.data();
tensor.data = ptr_union;
RuntimeShape shape = {2, 3, 2};
Index<int64_t> other_indices = {1, 1};
int64_t dim_to_read = 1;
EXPECT_THAT(ReadIndexVector(&tensor, shape, other_indices, dim_to_read),
ElementsAreArray({1, 0, 9}));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/tensor_slice_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/tensor_slice_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits