ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
5bd54841-8a92-4042-9218-e8e29fc3ef54 | cpp | tensorflow/tensorflow | generate_testspec | tensorflow/lite/testing/generate_testspec.cc | tensorflow/lite/testing/generate_testspec_test.cc | #include "tensorflow/lite/testing/generate_testspec.h"
#include <iostream>
#include <random>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
#include "tensorflow/lite/testing/test_runner.h"
#include "tensorflow/lite/testing/tf_driver.h"
#include "tensorflow/lite/testing/tflite_driver.h"
namespace tflite {
namespace testing {
namespace {
template <typename T, typename RandomEngine, typename RandomDistribution>
void GenerateCsv(const string& name, const std::vector<int>& shape,
RandomEngine* engine, RandomDistribution distribution,
std::pair<string, string>* out) {
std::vector<T> data =
GenerateRandomTensor<T>(shape, [&]() { return distribution(*engine); });
*out = std::make_pair(name, Join(data.data(), data.size(), ","));
}
template <typename RandomEngine>
std::vector<std::pair<string, string>> GenerateInputValues(
RandomEngine* engine, const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape) {
std::vector<std::pair<string, string>> input_values;
input_values.resize(input_layer.size());
for (int i = 0; i < input_layer.size(); i++) {
tensorflow::DataType type;
CHECK(DataTypeFromString(input_layer_type[i], &type));
auto shape = Split<int>(input_layer_shape[i], ",");
const auto& name = input_layer[i];
switch (type) {
case tensorflow::DT_FLOAT:
GenerateCsv<float>(name, shape, engine,
std::uniform_real_distribution<float>(-0.5, 0.5),
&input_values[i]);
break;
case tensorflow::DT_UINT8:
GenerateCsv<uint8_t>(name, shape, engine,
std::uniform_int_distribution<uint32_t>(0, 255),
&input_values[i]);
break;
case tensorflow::DT_INT32:
GenerateCsv<int32_t>(name, shape, engine,
std::uniform_int_distribution<int32_t>(-100, 100),
&input_values[i]);
break;
case tensorflow::DT_INT64:
GenerateCsv<int64_t>(name, shape, engine,
std::uniform_int_distribution<int64_t>(-100, 100),
&input_values[i]);
break;
case tensorflow::DT_BOOL:
GenerateCsv<int>(name, shape, engine,
std::uniform_int_distribution<int>(0, 1),
&input_values[i]);
break;
default:
fprintf(stderr, "Unsupported type %d (%s) when generating testspec.\n",
type, input_layer_type[i].c_str());
input_values.clear();
return input_values;
}
}
return input_values;
}
bool GenerateTestSpecFromRunner(std::iostream& stream, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer,
TestRunner* runner) {
auto input_size = input_layer.size();
if (input_layer_shape.size() != input_size ||
input_layer_type.size() != input_size) {
fprintf(stderr,
"Input size not match. Expected %lu, got %lu input types, %lu "
"input shapes.\n",
input_size, input_layer_type.size(), input_layer_shape.size());
return false;
}
stream << "reshape {\n";
for (int i = 0; i < input_size; i++) {
const auto& name = input_layer[i];
const auto& shape = input_layer_shape[i];
stream << " input { key: \"" << name << "\" value: \"" << shape
<< "\" }\n";
}
stream << "}\n";
std::mt19937 random_engine;
for (int i = 0; i < num_invocations; ++i) {
auto input_values = GenerateInputValues(
&random_engine, input_layer, input_layer_type, input_layer_shape);
if (input_values.empty()) {
std::cerr << "Unable to generate input values for the TensorFlow model. "
"Make sure the correct values are defined for "
"input_layer, input_layer_type, and input_layer_shape."
<< std::endl;
return false;
}
runner->Invoke(input_values);
if (!runner->IsValid()) {
std::cerr << runner->GetErrorMessage() << std::endl;
return false;
}
stream << "invoke {\n";
for (const auto& entry : input_values) {
stream << " input { key: \"" << entry.first << "\" value: \""
<< entry.second << "\" }\n";
}
for (const auto& name : output_layer) {
stream << " output { key: \"" << name << "\" value: \""
<< runner->ReadOutput(name) << "\" }\n";
if (!runner->IsValid()) {
std::cerr << runner->GetErrorMessage() << std::endl;
return false;
}
}
stream << "}\n";
}
return true;
}
}
bool GenerateTestSpecFromTensorflowModel(
std::iostream& stream, const string& tensorflow_model_path,
const string& tflite_model_path, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer) {
CHECK_EQ(input_layer.size(), input_layer_type.size());
CHECK_EQ(input_layer.size(), input_layer_shape.size());
TfDriver runner(input_layer, input_layer_type, input_layer_shape,
output_layer);
if (!runner.IsValid()) {
std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
runner.LoadModel(tensorflow_model_path);
if (!runner.IsValid()) {
std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
stream << "load_model: " << tflite_model_path << "\n";
return GenerateTestSpecFromRunner(stream, num_invocations, input_layer,
input_layer_type, input_layer_shape,
output_layer, &runner);
}
bool GenerateTestSpecFromTFLiteModel(
std::iostream& stream, const string& tflite_model_path, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer) {
TfLiteDriver runner;
runner.LoadModel(tflite_model_path);
if (!runner.IsValid()) {
std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
runner.AllocateTensors();
return GenerateTestSpecFromRunner(stream, num_invocations, input_layer,
input_layer_type, input_layer_shape,
output_layer, &runner);
}
}
} | #include "tensorflow/lite/testing/generate_testspec.h"
#include <random>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
TEST(GenerateRandomTensor, FloatValue) {
std::mt19937 random_engine;
auto random_func = [&]() {
return std::uniform_real_distribution<float>(-0.5, 0.5)(random_engine);
};
std::set<float> values;
float sum_x_square = 0.0f;
float sum_x = 0.0f;
for (int i = 0; i < 100; i++) {
const auto& data = GenerateRandomTensor<float>({1, 3, 4}, random_func);
for (float value : data) {
values.insert(value);
sum_x_square += value * value;
sum_x += value;
}
}
EXPECT_GT(values.size(), 200);
int num = 1 * 3 * 4 * 100;
float stddev = sum_x_square / num - (sum_x / num) * (sum_x / num);
float minstddev = 1.0f / 12 / 2;
EXPECT_GT(stddev, minstddev);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/generate_testspec.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/generate_testspec_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a5d35579-b3e4-43f1-ab9f-4b67dee50526 | cpp | tensorflow/tensorflow | tflite_driver | tensorflow/lite/testing/tflite_driver.cc | tensorflow/lite/testing/tflite_driver_test.cc | #include "tensorflow/lite/testing/tflite_driver.h"
#include <algorithm>
#include <complex>
#include <cstdint>
#include <cstring>
#include <iostream>
#include <map>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "Eigen/Core"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/testing/result_expectations.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
#if !defined(__APPLE__)
#include "tensorflow/lite/delegates/flex/delegate.h"
#endif
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/gradient/gradient_ops.h"
#include "tensorflow/lite/kernels/parse_example/parse_example.h"
#include "tensorflow/lite/kernels/perception/perception_ops.h"
#include "tensorflow/lite/kernels/register_ref.h"
#include "tensorflow/lite/kernels/test_delegate_providers.h"
#include "tensorflow/lite/signature_runner.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
namespace tflite {
namespace testing {
namespace {
const double kRelativeThreshold = 1e-2f;
const double kAbsoluteThreshold = 1e-4f;
const char kDefaultSignatureKey[] = "serving_default";
const int kQuantizationErrorMultiplier = 4;
template <typename T>
void SetTensorData(const std::vector<T>& values, void* data) {
T* input_ptr = static_cast<T*>(data);
std::copy(values.begin(), values.end(), input_ptr);
}
bool InterpretAsQuantized(const TfLiteTensor& tensor) {
if (tensor.quantization.type == kTfLiteNoQuantization) return false;
if (tensor.type == kTfLiteUInt8) return false;
if (tensor.quantization.params != nullptr) {
auto* quantization =
reinterpret_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
if (quantization->scale != nullptr && quantization->scale->size == 1 &&
quantization->zero_point != nullptr &&
quantization->zero_point->size == 1) {
return true;
}
}
return false;
}
}
bool TfLiteDriver::InitTestDelegateProviders(int* argc, const char** argv) {
return tflite::KernelTestDelegateProviders::Get()->InitFromCmdlineArgs(argc,
argv);
}
TfLiteDriver::TfLiteDriver(DelegateType delegate_type, bool reference_kernel)
: delegate_(nullptr, nullptr),
relative_threshold_(kRelativeThreshold),
absolute_threshold_(kAbsoluteThreshold),
quantization_error_multiplier_(kQuantizationErrorMultiplier) {
if (reference_kernel) {
resolver_ = std::make_unique<ops::builtin::BuiltinRefOpResolver>();
} else {
resolver_ = std::make_unique<
ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
ops::builtin::BuiltinOpResolver* builtin_op_resolver_ =
reinterpret_cast<ops::builtin::BuiltinOpResolver*>(resolver_.get());
builtin_op_resolver_->AddCustom("IRFFT2D",
tflite::ops::custom::Register_IRFFT2D());
builtin_op_resolver_->AddCustom(
"AvgPool3D", tflite::ops::custom::Register_AVG_POOL_3D());
builtin_op_resolver_->AddCustom(
"MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
builtin_op_resolver_->AddCustom("Roll",
tflite::ops::custom::Register_ROLL());
tflite::ops::custom::AddGradientOps(builtin_op_resolver_);
tflite::ops::custom::AddParseExampleOp(builtin_op_resolver_);
tflite::ops::custom::AddPerceptionOps(builtin_op_resolver_);
}
switch (delegate_type) {
case DelegateType::kNone:
break;
case DelegateType::kNnapi:
delegate_ = evaluation::CreateNNAPIDelegate();
break;
case DelegateType::kGpu:
delegate_ = evaluation::CreateGPUDelegate();
break;
case DelegateType::kFlex:
#if !defined(__APPLE__)
delegate_ = FlexDelegate::Create();
#endif
break;
}
}
TfLiteDriver::~TfLiteDriver() {
for (auto t : tensors_to_deallocate_) {
DeallocateStringTensor(t.second);
}
}
void TfLiteDriver::AllocateTensors() {
if (must_allocate_tensors_) {
if (interpreter_->AllocateTensors() != kTfLiteOk) {
Invalidate("Failed to allocate tensors");
return;
}
ResetLSTMStateTensors();
must_allocate_tensors_ = false;
}
}
void TfLiteDriver::LoadModel(const std::string& bin_file_path,
const std::string& signature) {
if (!IsValid()) return;
model_ = FlatBufferModel::BuildFromFile(GetFullPath(bin_file_path).c_str());
if (!model_) {
Invalidate("Failed to mmap model " + bin_file_path);
return;
}
InterpreterBuilder(*model_, *resolver_)(&interpreter_);
if (!interpreter_) {
Invalidate("Failed build interpreter");
return;
}
if (delegate_) {
if (interpreter_->ModifyGraphWithDelegate(delegate_.get()) != kTfLiteOk) {
Invalidate("Unable to the build graph using the delegate");
return;
}
} else {
auto* delegate_providers = tflite::KernelTestDelegateProviders::Get();
for (auto& one : delegate_providers->CreateAllDelegates()) {
if (interpreter_->ModifyGraphWithDelegate(std::move(one.delegate)) !=
kTfLiteOk) {
Invalidate(
"Unable to the build graph using the delegate initialized from "
"tflite::KernelTestDelegateProviders");
return;
}
}
}
must_allocate_tensors_ = true;
signature_runner_ = interpreter_->GetSignatureRunner(signature.c_str());
if (signature_runner_) {
signature_inputs_ = interpreter_->signature_inputs(signature.c_str());
signature_outputs_ = interpreter_->signature_outputs(signature.c_str());
} else {
Invalidate("Unable to the fetch signature runner.");
}
}
void TfLiteDriver::LoadModel(const std::string& bin_file_path) {
LoadModel(bin_file_path, kDefaultSignatureKey);
}
void TfLiteDriver::ReshapeTensor(const std::string& name,
const std::string& csv_values) {
if (!IsValid()) return;
if (signature_runner_->ResizeInputTensor(
name.c_str(), testing::Split<int>(csv_values, ",")) != kTfLiteOk) {
Invalidate("Failed to resize input tensor " + name);
return;
}
must_allocate_tensors_ = true;
}
void TfLiteDriver::ResetTensor(const std::string& name) {
if (!IsValid()) return;
auto* tensor = signature_runner_->input_tensor(name.c_str());
memset(tensor->data.raw, 0, tensor->bytes);
}
void TfLiteDriver::Invoke(
const std::vector<std::pair<std::string, std::string>>& inputs) {
if (!IsValid()) return;
for (const auto& input : inputs) {
SetInput(input.first, input.second);
}
if (signature_runner_->Invoke() != kTfLiteOk) {
Invalidate("Failed to invoke interpreter");
}
}
std::string TfLiteDriver::ReadOutput(const std::string& name) {
if (!IsValid()) return "";
return TensorValueToCsvString(signature_runner_->output_tensor(name.c_str()));
}
bool TfLiteDriver::CheckResults(
const std::vector<std::pair<std::string, std::string>>& expected_outputs,
const std::vector<std::pair<std::string, std::string>>&
expected_output_shapes) {
if (!IsValid()) return false;
bool success = true;
for (const auto& output : expected_outputs) {
SetExpectation(output.first, output.second);
}
for (const auto& shape : expected_output_shapes) {
SetShapeExpectation(shape.first, shape.second);
}
for (const auto& p : expected_output_) {
int id = p.first;
auto* tensor = interpreter_->tensor(id);
if (!p.second->Check(false, *tensor)) {
std::cerr << "TfLiteDriver: There were errors in invocation '"
<< GetInvocationId() << "', validating output tensor '" << id
<< "':" << std::endl;
p.second->Check(true, *tensor);
success = false;
SetOverallSuccess(false);
}
}
for (const auto& p : expected_output_shape_) {
int id = p.first;
auto* tensor = interpreter_->tensor(id);
if (!p.second->CheckShape(false, *tensor)) {
std::cerr << "TfLiteDriver: There were errors in invocation '"
<< GetInvocationId()
<< "', validating the shape of output tensor '" << id
<< "':" << std::endl;
p.second->CheckShape(true, *tensor);
success = false;
SetOverallSuccess(false);
}
}
expected_output_.clear();
return success;
}
std::vector<std::string> TfLiteDriver::GetOutputNames() {
if (!IsValid()) return {};
std::vector<std::string> names;
for (const auto* name : signature_runner_->output_names()) {
names.push_back(name);
}
return names;
}
void TfLiteDriver::SetInput(const std::string& name,
const std::string& csv_values) {
auto id = signature_inputs_[name];
auto* tensor = signature_runner_->input_tensor(name.c_str());
switch (tensor->type) {
case kTfLiteFloat64: {
const auto& values = testing::Split<double>(csv_values, ",");
if (!CheckSizes<double>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteFloat32: {
const auto& values = testing::Split<float>(csv_values, ",");
if (!CheckSizes<float>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteInt32: {
const auto& values = testing::Split<int32_t>(csv_values, ",");
if (!CheckSizes<int32_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteUInt32: {
const auto& values = testing::Split<uint32_t>(csv_values, ",");
if (!CheckSizes<uint32_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteInt64: {
const auto& values = testing::Split<int64_t>(csv_values, ",");
if (!CheckSizes<int64_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteUInt64: {
const auto& values = testing::Split<uint64_t>(csv_values, ",");
if (!CheckSizes<uint64_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteUInt8: {
const auto& values = testing::Split<uint8_t>(csv_values, ",");
if (!CheckSizes<uint8_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteInt8: {
const auto& values = testing::Split<int8_t>(csv_values, ",");
if (!CheckSizes<int8_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteInt16: {
const auto& values = testing::Split<int16_t>(csv_values, ",");
if (!CheckSizes<int16_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteUInt16: {
const auto& values = testing::Split<uint16_t>(csv_values, ",");
if (!CheckSizes<uint16_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteBool: {
const auto& values = testing::Split<bool>(csv_values, ",");
if (!CheckSizes<bool>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteString: {
std::string s = absl::HexStringToBytes(csv_values);
DeallocateStringTensor(tensors_to_deallocate_[id]);
AllocateStringTensor(id, s.size(), tensor);
memcpy(tensor->data.raw, s.data(), s.size());
break;
}
case kTfLiteComplex64: {
const auto& values = testing::Split<std::complex<float>>(csv_values, ",");
if (!CheckSizes<std::complex<float>>(tensor->bytes, values.size()))
return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteComplex128: {
const auto& values =
testing::Split<std::complex<double>>(csv_values, ",");
if (!CheckSizes<std::complex<double>>(tensor->bytes, values.size()))
return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteFloat16: {
const auto& values = testing::Split<Eigen::half>(csv_values, ",");
for (auto k : values) {
TFLITE_LOG(INFO) << "input" << k;
}
if (!CheckSizes<Eigen::half>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
TfLiteTypeGetName(tensor->type),
" in TfLiteDriver::SetInput"));
return;
}
}
void TfLiteDriver::SetThreshold(double relative_threshold,
double absolute_threshold) {
relative_threshold_ = relative_threshold;
absolute_threshold_ = absolute_threshold;
}
void TfLiteDriver::SetQuantizationErrorMultiplier(
int quantization_error_multiplier) {
quantization_error_multiplier_ = quantization_error_multiplier;
}
void TfLiteDriver::SetExpectation(const std::string& name,
const std::string& csv_values) {
auto id = signature_outputs_[name];
auto* tensor = signature_runner_->output_tensor(name.c_str());
if (expected_output_.count(id) != 0) {
Invalidate(absl::StrCat("Overridden expectation for tensor '", id, "'"));
}
expected_output_[id] = std::make_unique<DataExpectation>(
relative_threshold_, absolute_threshold_, quantization_error_multiplier_);
if (InterpretAsQuantized(*tensor)) {
expected_output_[id]->SetData<float>(csv_values);
return;
}
switch (tensor->type) {
case kTfLiteFloat32:
expected_output_[id]->SetData<float>(csv_values);
break;
case kTfLiteInt32:
expected_output_[id]->SetData<int32_t>(csv_values);
break;
case kTfLiteUInt32:
expected_output_[id]->SetData<uint32_t>(csv_values);
break;
case kTfLiteInt64:
expected_output_[id]->SetData<int64_t>(csv_values);
break;
case kTfLiteUInt64:
expected_output_[id]->SetData<uint64_t>(csv_values);
break;
case kTfLiteUInt8:
expected_output_[id]->SetData<uint8_t>(csv_values);
break;
case kTfLiteInt8:
expected_output_[id]->SetData<int8_t>(csv_values);
break;
case kTfLiteUInt16:
expected_output_[id]->SetData<uint16_t>(csv_values);
break;
case kTfLiteInt16:
expected_output_[id]->SetData<int16_t>(csv_values);
break;
case kTfLiteBool:
expected_output_[id]->SetData<bool>(csv_values);
break;
case kTfLiteString:
expected_output_[id]->SetData<std::string>(csv_values);
break;
case kTfLiteFloat64:
expected_output_[id]->SetData<double>(csv_values);
break;
case kTfLiteComplex64:
expected_output_[id]->SetData<std::complex<float>>(csv_values);
break;
case kTfLiteComplex128:
expected_output_[id]->SetData<std::complex<double>>(csv_values);
break;
case kTfLiteFloat16:
expected_output_[id]->SetData<Eigen::half>(csv_values);
break;
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
TfLiteTypeGetName(tensor->type),
" in TfLiteDriver::SetExpectation"));
return;
}
}
void TfLiteDriver::SetShapeExpectation(const std::string& name,
const std::string& csv_values) {
auto id = signature_outputs_[name];
if (expected_output_shape_.count(id) != 0) {
Invalidate(
absl::StrCat("Overridden shape expectation for tensor '", id, "'"));
}
expected_output_shape_[id] = std::make_unique<ShapeExpectation>(csv_values);
}
void TfLiteDriver::ResetLSTMStateTensors() {
interpreter_->ResetVariableTensors();
}
std::string TfLiteDriver::TensorValueToCsvString(const TfLiteTensor* tensor) {
int num_elements = 1;
for (int i = 0; i < tensor->dims->size; ++i) {
num_elements *= tensor->dims->data[i];
}
switch (tensor->type) {
case kTfLiteFloat32:
return JoinDefault(tensor->data.f, num_elements, ",");
case kTfLiteInt32:
return JoinDefault(tensor->data.i32, num_elements, ",");
case kTfLiteUInt32:
return JoinDefault(tensor->data.u32, num_elements, ",");
case kTfLiteInt64:
return JoinDefault(tensor->data.i64, num_elements, ",");
case kTfLiteUInt64:
return JoinDefault(tensor->data.u64, num_elements, ",");
case kTfLiteUInt8:
return Join(tensor->data.uint8, num_elements, ",");
case kTfLiteInt8:
return Join(tensor->data.int8, num_elements, ",");
case kTfLiteUInt16:
return Join(tensor->data.ui16, num_elements, ",");
case kTfLiteInt16:
return Join(tensor->data.i16, num_elements, ",");
case kTfLiteBool:
return JoinDefault(tensor->data.b, num_elements, ",");
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
TfLiteTypeGetName(tensor->type),
" in TfLiteDriver::ReadOutput"));
return "";
}
}
}
} | #include "tensorflow/lite/testing/tflite_driver.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/test_runner.h"
namespace tflite {
namespace testing {
namespace {
using ::testing::ElementsAre;
TEST(TfliteDriverTest, SimpleTest) {
std::unique_ptr<TestRunner> runner(new TfLiteDriver);
runner->SetModelBaseDir("tensorflow/lite");
runner->LoadModel("testdata/multi_add.bin", "serving_default");
ASSERT_TRUE(runner->IsValid());
ASSERT_THAT(runner->GetOutputNames(), ElementsAre("x", "y"));
for (const auto& i : {"a", "b", "c", "d"}) {
runner->ReshapeTensor(i, "1,2,2,1");
}
ASSERT_TRUE(runner->IsValid());
runner->AllocateTensors();
runner->ResetTensor("c");
runner->Invoke({{"a", "0.1,0.2,0.3,0.4"},
{"b", "0.001,0.002,0.003,0.004"},
{"d", "0.01,0.02,0.03,0.04"}});
ASSERT_TRUE(runner->IsValid());
ASSERT_EQ(runner->ReadOutput("x"), "0.101,0.202,0.303,0.404");
ASSERT_EQ(runner->ReadOutput("y"), "0.011,0.022,0.033,0.044");
ASSERT_TRUE(runner->CheckResults(
{{"x", "0.101,0.202,0.303,0.404"}, {"y", "0.011,0.022,0.033,0.044"}},
{}));
}
TEST(TfliteDriverTest, SingleAddOpTest) {
std::unique_ptr<TestRunner> runner(new TfLiteDriver(
TfLiteDriver::DelegateType::kNone,
true));
runner->SetModelBaseDir("tensorflow/lite");
runner->LoadModel("testdata/multi_add.bin");
ASSERT_TRUE(runner->IsValid());
for (const auto& i : {"a", "b", "c", "d"}) {
runner->ReshapeTensor(i, "1,2,2,1");
}
ASSERT_TRUE(runner->IsValid());
runner->AllocateTensors();
runner->ResetTensor("c");
runner->Invoke({{"a", "0.1,0.2,0.3,0.4"},
{"b", "0.001,0.002,0.003,0.004"},
{"d", "0.01,0.02,0.03,0.04"}});
ASSERT_TRUE(runner->IsValid());
ASSERT_TRUE(runner->CheckResults(
{{"x", "0.101,0.202,0.303,0.404"}, {"y", "0.011,0.022,0.033,0.044"}},
{}));
EXPECT_EQ(runner->ReadOutput("x"), "0.101,0.202,0.303,0.404");
EXPECT_EQ(runner->ReadOutput("y"), "0.011,0.022,0.033,0.044");
}
TEST(TfliteDriverTest, AddOpWithNaNTest) {
std::unique_ptr<TestRunner> runner(new TfLiteDriver(
TfLiteDriver::DelegateType::kNone,
true));
runner->SetModelBaseDir("tensorflow/lite");
runner->LoadModel("testdata/multi_add.bin");
ASSERT_TRUE(runner->IsValid());
for (const auto& i : {"a", "b", "c", "d"}) {
runner->ReshapeTensor(i, "1,2,2,1");
}
ASSERT_TRUE(runner->IsValid());
runner->AllocateTensors();
runner->ResetTensor("c");
runner->Invoke({{"a", "0.1,nan,0.3,0.4"},
{"b", "0.001,0.002,0.003,0.004"},
{"d", "0.01,0.02,0.03,nan"}});
ASSERT_TRUE(runner->IsValid());
ASSERT_TRUE(runner->CheckResults(
{{"x", "0.101,nan,0.303,0.404"}, {"y", "0.011,0.022,0.033,nan"}},
{}));
EXPECT_EQ(runner->ReadOutput("x"), "0.101,nan,0.303,0.404");
EXPECT_EQ(runner->ReadOutput("y"), "0.011,0.022,0.033,nan");
}
TEST(TfliteDriverTest, AddQuantizedInt8Test) {
std::unique_ptr<TestRunner> runner(new TfLiteDriver());
runner->SetModelBaseDir("tensorflow/lite");
runner->LoadModel("testdata/add_quantized_int8.bin");
ASSERT_TRUE(runner->IsValid());
runner->ReshapeTensor("a", "1,2,2,1");
ASSERT_TRUE(runner->IsValid());
runner->AllocateTensors();
runner->Invoke({{"a", "1,1,1,1"}});
ASSERT_TRUE(runner->IsValid());
ASSERT_TRUE(runner->CheckResults({{"x", "0.0117,0.0117,0.0117,0.0117"}}, {}));
EXPECT_EQ(runner->ReadOutput("x"), "3,3,3,3");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tflite_driver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tflite_driver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f2dd5f2-a8cd-4856-b362-5cee75f05ca3 | cpp | tensorflow/tensorflow | message | tensorflow/lite/testing/message.cc | tensorflow/lite/testing/message_test.cc | #include "tensorflow/lite/testing/message.h"
#include <stack>
#include <string>
#include "tensorflow/lite/testing/tokenize.h"
namespace tflite {
namespace testing {
class MessageStack : public TokenProcessor {
public:
explicit MessageStack(Message* first_node) {
nodes_.push(first_node);
valid_ = true;
}
void ConsumeToken(std::string* token) override {
if (!valid_) return;
Message* current_node = nodes_.top();
if (*token == "{") {
if (previous_token_.empty()) {
valid_ = false;
return;
}
nodes_.push(current_node ? current_node->AddChild(previous_token_)
: nullptr);
previous_token_.clear();
} else if (*token == "}") {
if (nodes_.size() == 1 || !previous_token_.empty()) {
valid_ = false;
return;
}
if (current_node) {
current_node->Finish();
}
nodes_.pop();
} else if (*token == ":") {
if (previous_token_.empty()) {
valid_ = false;
return;
}
} else {
if (previous_token_.empty()) {
previous_token_.swap(*token);
} else {
if (current_node) {
current_node->SetField(previous_token_, *token);
}
previous_token_.clear();
}
}
}
bool valid() const { return valid_; }
private:
std::stack<Message*> nodes_;
std::string previous_token_;
bool valid_;
};
bool Message::Read(std::istream* input, Message* message) {
MessageStack stack(message);
Tokenize(input, &stack);
return stack.valid();
}
}
} | #include "tensorflow/lite/testing/message.h"
#include <map>
#include <string>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
class TestMessage : public Message {
public:
TestMessage() {}
explicit TestMessage(const std::string& text_to_parse) {
std::stringstream ss(text_to_parse);
finished_ = Message::Read(&ss, this);
}
void SetField(const std::string& name, const std::string& value) override {
fields_[name] = value;
}
Message* AddChild(const std::string& name) override {
TestMessage* m = new TestMessage;
m->name_ = name;
return Store(m);
}
void Finish() override { finished_ = true; }
int NumChildren() const { return Children().size(); }
const TestMessage* GetChild(int i) const {
return dynamic_cast<TestMessage*>(Children()[i].get());
}
int NumFields() const { return fields_.size(); }
const std::string& GetField(const std::string& key) const {
return fields_.at(key);
}
const std::string& name() const { return name_; }
bool finished() const { return finished_; }
protected:
std::string name_;
std::map<std::string, std::string> fields_;
bool finished_ = false;
};
TEST(MessageTest, Simple) {
TestMessage message("x{a:1 b:2} y{} z{c:3} d:4");
ASSERT_TRUE(message.finished());
ASSERT_EQ(message.NumFields(), 1);
EXPECT_EQ(message.GetField("d"), "4");
ASSERT_EQ(message.NumChildren(), 3);
auto* x = message.GetChild(0);
EXPECT_EQ(x->name(), "x");
ASSERT_EQ(x->NumFields(), 2);
EXPECT_EQ(x->GetField("a"), "1");
EXPECT_EQ(x->GetField("b"), "2");
auto* y = message.GetChild(1);
EXPECT_EQ(y->name(), "y");
ASSERT_EQ(y->NumFields(), 0);
auto* z = message.GetChild(2);
EXPECT_EQ(z->name(), "z");
ASSERT_EQ(z->NumFields(), 1);
EXPECT_EQ(z->GetField("c"), "3");
}
TEST(MessageTest, Unnamed) {
TestMessage message("x{c:3} {} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 1);
}
TEST(MessageTest, TooManyBraces) {
TestMessage message("x{c:3} } y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 1);
}
TEST(MessageTest, LeftoverToken) {
TestMessage message("x{c:3} z{test} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
TEST(MessageTest, MissingKey) {
TestMessage message("x{c:3} z{:test} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
TEST(MessageTest, MissingValue) {
TestMessage message("x{c:3} z{test:} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/message.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/message_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc4bf973-daf3-4afb-ac93-1887b4320c70 | cpp | tensorflow/tensorflow | tf_driver | tensorflow/lite/testing/tf_driver.cc | tensorflow/lite/testing/tf_driver_test.cc | #include "tensorflow/lite/testing/tf_driver.h"
#include <fstream>
#include <iostream>
#include <string>
#include "absl/log/check.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
namespace tflite {
namespace testing {
namespace {
tensorflow::Tensor CreateTensor(const tensorflow::DataType type,
const std::vector<int64_t>& dim) {
tensorflow::TensorShape shape{absl::Span<const int64_t>{
reinterpret_cast<const int64_t*>(dim.data()), dim.size()}};
return {type, shape};
}
template <typename T>
int FillTensorWithData(tensorflow::Tensor* tensor,
const string& values_as_string) {
const auto& values = testing::Split<T>(values_as_string, ",");
if (values.size() == tensor->NumElements()) {
auto data = tensor->flat<T>();
for (int i = 0; i < values.size(); i++) {
data(i) = values[i];
}
}
return values.size();
}
int FillTensorWithTfLiteHexString(tensorflow::Tensor* tensor,
const string& values_as_string) {
string s = absl::HexStringToBytes(values_as_string);
int num_strings = values_as_string.empty() ? 0 : GetStringCount(s.data());
if (num_strings == tensor->NumElements()) {
auto data = tensor->flat<tensorflow::tstring>();
for (size_t i = 0; i < num_strings; ++i) {
auto ref = GetString(s.data(), i);
data(i).assign(ref.str, ref.len);
}
}
return num_strings;
}
template <typename T>
void FillTensorWithZeros(tensorflow::Tensor* tensor) {
auto data = tensor->flat<T>();
for (int i = 0; i < tensor->NumElements(); i++) {
data(i) = 0;
}
}
template <typename T>
string TensorDataToCsvString(const tensorflow::Tensor& tensor) {
const auto& data = tensor.flat<T>();
return Join(data.data(), data.size(), ",");
}
string TensorDataToTfLiteHexString(const tensorflow::Tensor& tensor) {
DynamicBuffer dynamic_buffer;
auto data = tensor.flat<tensorflow::tstring>();
for (int i = 0; i < tensor.NumElements(); ++i) {
dynamic_buffer.AddString(data(i).data(), data(i).size());
}
char* char_buffer = nullptr;
size_t size = dynamic_buffer.WriteToBuffer(&char_buffer);
string s = absl::BytesToHexString({char_buffer, size});
free(char_buffer);
return s;
}
}
TfDriver::TfDriver(const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer)
: input_names_(input_layer), output_names_(output_layer) {
CHECK_EQ(input_layer.size(), input_layer_type.size());
CHECK_EQ(input_layer.size(), input_layer_shape.size());
input_ids_.resize(input_layer.size());
input_tensors_.reserve(input_layer.size());
input_types_.resize(input_layer.size());
input_shapes_.resize(input_layer.size());
for (int i = 0; i < input_layer.size(); i++) {
input_ids_[i] = i;
input_tensors_[input_layer[i]] = {};
CHECK(DataTypeFromString(input_layer_type[i], &input_types_[i]));
input_shapes_[i] = Split<int64_t>(input_layer_shape[i], ",");
input_name_to_id_[input_layer[i]] = i;
}
output_ids_.resize(output_layer.size());
output_tensors_.reserve(output_layer.size());
for (int i = 0; i < output_layer.size(); i++) {
output_ids_[i] = i;
output_name_to_id_[output_layer[i]] = i;
}
}
void TfDriver::LoadModel(const string& bin_file_path) {
if (!IsValid()) return;
std::ifstream model(bin_file_path);
if (model.fail()) {
Invalidate("Failed to find the model " + bin_file_path);
return;
}
tensorflow::GraphDef graphdef;
if (!graphdef.ParseFromIstream(&model)) {
Invalidate("Failed to parse tensorflow graphdef");
return;
}
tensorflow::SessionOptions options;
session_.reset(tensorflow::NewSession(options));
auto status = session_->Create(graphdef);
if (!status.ok()) {
Invalidate(absl::StrCat("Failed to create session. ", status.message()));
}
}
void TfDriver::ReshapeTensor(const string& name, const string& csv_values) {
if (!IsValid()) return;
int id = input_name_to_id_[name];
input_shapes_[id] = Split<int64_t>(csv_values, ",");
input_tensors_[input_names_[id]] =
CreateTensor(input_types_[id], input_shapes_[id]);
ResetTensor(name);
}
void TfDriver::ResetTensor(const std::string& name) {
if (!IsValid()) return;
int id = input_name_to_id_[name];
auto tensor = input_tensors_[input_names_[id]];
switch (input_types_[id]) {
case tensorflow::DT_FLOAT: {
FillTensorWithZeros<float>(&tensor);
break;
}
case tensorflow::DT_INT32: {
FillTensorWithZeros<int32_t>(&tensor);
break;
}
default:
Invalidate(absl::StrCat("Unsupported tensor type ", input_types_[id],
tensorflow::DataType_Name(input_types_[id]),
" in ResetInput"));
return;
}
}
string TfDriver::ReadOutput(const string& name) {
if (!IsValid()) return "";
return ReadOutput(output_tensors_[output_name_to_id_[name]]);
}
void TfDriver::Invoke(const std::vector<std::pair<string, string>>& inputs) {
if (!IsValid()) return;
for (const auto& input : inputs) {
auto id = input_name_to_id_[input.first];
auto tensor = CreateTensor(input_types_[id], input_shapes_[id]);
SetInput(input.second, &tensor);
input_tensors_[input_names_[id]] = tensor;
}
auto status = session_->Run({input_tensors_.begin(), input_tensors_.end()},
output_names_, {}, &output_tensors_);
if (!status.ok()) {
Invalidate(
absl::StrCat("TensorFlow failed to run graph:", status.message()));
}
}
void TfDriver::SetInput(const string& values_as_string,
tensorflow::Tensor* tensor) {
int num_values_available = 0;
switch (tensor->dtype()) {
case tensorflow::DT_FLOAT:
num_values_available =
FillTensorWithData<float>(tensor, values_as_string);
break;
case tensorflow::DT_INT32:
num_values_available =
FillTensorWithData<int32_t>(tensor, values_as_string);
break;
case tensorflow::DT_UINT32:
num_values_available =
FillTensorWithData<uint32_t>(tensor, values_as_string);
break;
case tensorflow::DT_UINT8:
num_values_available =
FillTensorWithData<uint8_t>(tensor, values_as_string);
break;
case tensorflow::DT_STRING:
num_values_available =
FillTensorWithTfLiteHexString(tensor, values_as_string);
break;
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
tensorflow::DataType_Name(tensor->dtype()),
" in SetInput"));
return;
}
if (tensor->NumElements() != num_values_available) {
Invalidate(absl::StrCat("Needed ", tensor->NumElements(),
" values for input tensor, but was given ",
num_values_available, " instead."));
}
}
string TfDriver::ReadOutput(const tensorflow::Tensor& tensor) {
switch (tensor.dtype()) {
case tensorflow::DT_FLOAT:
return TensorDataToCsvString<float>(tensor);
case tensorflow::DT_INT32:
return TensorDataToCsvString<int32_t>(tensor);
case tensorflow::DT_UINT32:
return TensorDataToCsvString<uint32_t>(tensor);
case tensorflow::DT_INT64:
return TensorDataToCsvString<int64_t>(tensor);
case tensorflow::DT_UINT8:
return TensorDataToCsvString<uint8_t>(tensor);
case tensorflow::DT_STRING:
return TensorDataToTfLiteHexString(tensor);
case tensorflow::DT_BOOL:
return TensorDataToCsvString<bool>(tensor);
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
tensorflow::DataType_Name(tensor.dtype()),
" in ReadOutput"));
return "";
}
}
}
} | #include "tensorflow/lite/testing/tf_driver.h"
#include <algorithm>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace testing {
namespace {
class TestDriver : public TfDriver {
public:
TestDriver() : TfDriver({}, {}, {}, {}) {}
string WriteAndReadBack(tensorflow::DataType type,
const std::vector<int64_t>& shape,
const string& values) {
tensorflow::Tensor t = {
type,
tensorflow::TensorShape{absl::Span<const int64_t>{
reinterpret_cast<const int64_t*>(shape.data()), shape.size()}}};
SetInput(values, &t);
return ReadOutput(t);
}
};
TEST(TfDriverTest, ReadingAndWritingValues) {
TestDriver driver;
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_FLOAT, {1, 2, 2},
"0.10,0.20,0.30,0.40"),
"0.100000001,0.200000003,0.300000012,0.400000006");
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_INT32, {1, 2, 2},
"10,40,100,-100"),
"10,40,100,-100");
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_UINT8, {1, 2, 2},
"48,49,121, 122"),
"0,1,y,z");
}
TEST(TfDriverTest, ReadingAndWritingValuesStrings) {
TestDriver driver;
auto set_buffer = [](const std::vector<string>& values, string* buffer) {
DynamicBuffer dynamic_buffer;
for (const string& s : values) {
dynamic_buffer.AddString(s.data(), s.size());
}
char* char_b = nullptr;
int size = dynamic_buffer.WriteToBuffer(&char_b);
*buffer = absl::BytesToHexString(absl::string_view(char_b, size));
free(char_b);
};
string buffer;
set_buffer({"", "", "", ""}, &buffer);
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, buffer),
buffer);
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, ""),
buffer);
set_buffer({"AB", "ABC", "X", "YZ"}, &buffer);
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, buffer),
buffer);
}
TEST(TfDriverTest, SimpleTest) {
std::unique_ptr<TfDriver> runner(
new TfDriver({"a", "b", "c", "d"}, {"float", "float", "float", "float"},
{"1,8,8,3", "1,8,8,3", "1,8,8,3", "1,8,8,3"}, {"x", "y"}));
runner->LoadModel("tensorflow/lite/testdata/multi_add.pb");
EXPECT_TRUE(runner->IsValid()) << runner->GetErrorMessage();
for (const auto& i : {"a", "b", "c", "d"}) {
runner->ReshapeTensor(i, "1,2,2,1");
}
ASSERT_TRUE(runner->IsValid());
runner->ResetTensor("c");
runner->Invoke({{"a", "0.1,0.2,0.3,0.4"},
{"b", "0.001,0.002,0.003,0.004"},
{"d", "0.01,0.02,0.03,0.04"}});
ASSERT_EQ(runner->ReadOutput("x"),
"0.101000004,0.202000007,0.303000003,0.404000014");
ASSERT_EQ(runner->ReadOutput("y"),
"0.0109999999,0.0219999999,0.0329999998,0.0439999998");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tf_driver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tf_driver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4b147009-6a3e-4072-8cc5-d9ee8044f788 | cpp | tensorflow/tensorflow | diff_analyzer | tensorflow/lite/testing/kernel_test/diff_analyzer.cc | tensorflow/lite/testing/kernel_test/diff_analyzer_test.cc | #include "tensorflow/lite/testing/kernel_test/diff_analyzer.h"
#include <algorithm>
#include <cmath>
#include <fstream>
#include <string>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/testing/split.h"
namespace tflite {
namespace testing {
namespace {
float CalculateNormalizedMaxDiff(const std::vector<float>& base,
const std::vector<float>& test) {
float diff = 0;
float base_max = 1e-6;
for (int i = 0; i < base.size(); i++) {
diff = std::max(diff, std::abs(base[i] - test[i]));
base_max = std::max(base_max, base[i]);
}
return diff / base_max;
}
float CalculateNormalizedL2Norm(const std::vector<float>& base,
const std::vector<float>& test) {
float l2_error = 0;
float base_max = 1e-6;
for (int i = 0; i < base.size(); i++) {
float diff = base[i] - test[i];
l2_error += diff * diff;
base_max = std::max(base_max, base[i]);
}
l2_error /= base.size();
return std::sqrt(l2_error) / base_max;
}
TfLiteStatus Populate(const string& filename,
std::unordered_map<string, std::vector<float>>* tensors) {
if (filename.empty()) {
fprintf(stderr, "Empty input file name.");
return kTfLiteError;
}
std::ifstream file(filename);
string content;
while (std::getline(file, content, '\n')) {
auto parts = Split<string>(content, ":");
if (parts.size() != 2) {
fprintf(stderr, "Expected <name>:<value>, got %s", content.c_str());
return kTfLiteError;
}
tensors->insert(std::make_pair(parts[0], Split<float>(parts[1], ",")));
}
file.close();
return kTfLiteOk;
}
}
TfLiteStatus DiffAnalyzer::ReadFiles(const string& base, const string& test) {
TF_LITE_ENSURE_STATUS(Populate(base, &base_tensors_));
TF_LITE_ENSURE_STATUS(Populate(test, &test_tensors_));
if (base_tensors_.size() != test_tensors_.size()) {
fprintf(stderr, "Golden and test tensor dimensions don't match.");
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus DiffAnalyzer::WriteReport(const string& filename) {
if (filename.empty()) {
fprintf(stderr, "Empty output file name.");
return kTfLiteError;
}
std::ofstream output_file;
output_file.open(filename, std::fstream::out | std::fstream::trunc);
if (!output_file) {
fprintf(stderr, "Failed to open output file %s.", filename.c_str());
return kTfLiteError;
}
output_file << "Normalized L2 Error"
<< ","
<< "Normalized Max Diff"
<< "\n";
for (const auto& item : base_tensors_) {
const auto& name = item.first;
if (!test_tensors_.count(name)) {
fprintf(stderr, "Missing tensor %s in test tensors.", name.c_str());
continue;
}
float l2_error =
CalculateNormalizedL2Norm(base_tensors_[name], test_tensors_[name]);
float max_diff =
CalculateNormalizedMaxDiff(base_tensors_[name], test_tensors_[name]);
output_file << name << ":" << l2_error << "," << max_diff << "\n";
}
output_file.close();
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/testing/kernel_test/diff_analyzer.h"
#include <fstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/io/path.h"
namespace tflite {
namespace testing {
namespace {
TEST(DiffAnalyzerTest, ZeroDiff) {
DiffAnalyzer diff_analyzer;
string filename =
"tensorflow/lite/testing/kernel_test/testdata/test_input.csv";
ASSERT_EQ(diff_analyzer.ReadFiles(filename, filename), kTfLiteOk);
string output_file =
tensorflow::io::JoinPath(::testing::TempDir(), "diff_report.csv");
ASSERT_EQ(diff_analyzer.WriteReport(output_file), kTfLiteOk);
std::string content;
std::ifstream file(output_file);
std::getline(file, content);
std::getline(file, content);
ASSERT_EQ(content, "a:0,0");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/kernel_test/diff_analyzer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/kernel_test/diff_analyzer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2c06befe-7026-4dfe-9872-70e10918a4b2 | cpp | tensorflow/tensorflow | input_generator | tensorflow/lite/testing/kernel_test/input_generator.cc | tensorflow/lite/testing/kernel_test/input_generator_test.cc | #include "tensorflow/lite/testing/kernel_test/input_generator.h"
#include <cstdio>
#include <fstream>
#include <limits>
#include <random>
#include <string>
#include <unordered_map>
#include <utility>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
namespace tflite {
namespace testing {
namespace {
static constexpr char kDefaultServingSignatureDefKey[] = "serving_default";
template <typename T>
std::vector<T> GenerateRandomTensor(TfLiteIntArray* dims,
const std::function<T(int)>& random_func) {
int64_t num_elements = 1;
for (int i = 0; i < dims->size; i++) {
num_elements *= dims->data[i];
}
std::vector<T> result(num_elements);
for (int i = 0; i < num_elements; i++) {
result[i] = random_func(i);
}
return result;
}
template <typename T>
std::vector<T> GenerateUniform(TfLiteIntArray* dims, float min, float max) {
auto random_float = [](float min, float max) {
return min + (max - min) * static_cast<float>(rand()) / RAND_MAX;
};
std::function<T(int)> random_t = [&](int) {
return static_cast<T>(random_float(min, max));
};
std::vector<T> data = GenerateRandomTensor(dims, random_t);
return data;
}
template <typename T>
std::vector<T> GenerateGaussian(TfLiteIntArray* dims, float min, float max) {
auto random_float = [](float min, float max) {
static std::default_random_engine generator;
static std::normal_distribution<double> distribution(0.5, 1.0 / 3);
auto rand_n = distribution(generator);
while (rand_n < 0 || rand_n >= 1) {
rand_n = distribution(generator);
}
return min + (max - min) * static_cast<float>(rand_n);
};
std::function<T(int)> random_t = [&](int) {
return static_cast<T>(random_float(min, max));
};
std::vector<T> data = GenerateRandomTensor(dims, random_t);
return data;
}
}
TfLiteStatus InputGenerator::LoadModel(const string& model_dir) {
return LoadModel(model_dir, kDefaultServingSignatureDefKey);
}
TfLiteStatus InputGenerator::LoadModel(const string& model_dir,
const string& signature) {
model_ = FlatBufferModel::BuildFromFile(model_dir.c_str());
if (!model_) {
fprintf(stderr, "Cannot load model %s", model_dir.c_str());
return kTfLiteError;
}
::tflite::ops::builtin::BuiltinOpResolver builtin_ops;
InterpreterBuilder(*model_, builtin_ops)(&interpreter_);
if (!interpreter_) {
fprintf(stderr, "Failed to build interpreter.");
return kTfLiteError;
}
signature_runner_ = interpreter_->GetSignatureRunner(signature.c_str());
if (!signature_runner_) {
fprintf(stderr, "Failed to get SignatureRunner.\n");
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus InputGenerator::ReadInputsFromFile(const string& filename) {
if (filename.empty()) {
fprintf(stderr, "Empty input file name.");
return kTfLiteError;
}
std::ifstream input_file(filename);
string input;
while (std::getline(input_file, input, '\n')) {
std::vector<string> parts = Split<string>(input, ":");
if (parts.size() != 2) {
fprintf(stderr, "Expected <name>:<value>, got %s", input.c_str());
return kTfLiteError;
}
inputs_.push_back(std::make_pair(parts[0], parts[1]));
}
input_file.close();
return kTfLiteOk;
}
TfLiteStatus InputGenerator::WriteInputsToFile(const string& filename) {
if (filename.empty()) {
fprintf(stderr, "Empty input file name.");
return kTfLiteError;
}
std::ofstream output_file;
output_file.open(filename, std::fstream::out | std::fstream::trunc);
if (!output_file) {
fprintf(stderr, "Failed to open output file %s.", filename.c_str());
return kTfLiteError;
}
for (const auto& input : inputs_) {
output_file << input.first << ":" << input.second << "\n";
}
output_file.close();
return kTfLiteOk;
}
TfLiteStatus InputGenerator::GenerateInput(const string& distribution) {
auto input_tensor_names = signature_runner_->input_names();
for (const char* name : input_tensor_names) {
auto* tensor = signature_runner_->input_tensor(name);
if (distribution == "UNIFORM") {
switch (tensor->type) {
case kTfLiteInt8: {
auto data = GenerateUniform<int8_t>(
tensor->dims, std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteUInt8: {
auto data = GenerateUniform<uint8_t>(
tensor->dims, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteFloat32: {
auto data = GenerateUniform<float>(tensor->dims, -1, 1);
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
default:
fprintf(stderr, "Unsupported input tensor type %s.",
TfLiteTypeGetName(tensor->type));
break;
}
} else if (distribution == "GAUSSIAN") {
switch (tensor->type) {
case kTfLiteInt8: {
auto data = GenerateGaussian<int8_t>(
tensor->dims, std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteUInt8: {
auto data = GenerateGaussian<uint8_t>(
tensor->dims, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteFloat32: {
auto data = GenerateGaussian<float>(tensor->dims, -1, 1);
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
default:
fprintf(stderr, "Unsupported input tensor type %s.",
TfLiteTypeGetName(tensor->type));
break;
}
} else {
fprintf(stderr, "Unsupported distribution %s.", distribution.c_str());
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/testing/kernel_test/input_generator.h"
#include <fstream>
#include <map>
#include <string>
#include <unordered_map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
TEST(InputGeneratorTest, LoadModel) {
InputGenerator input_generator;
ASSERT_EQ(input_generator.LoadModel(
"tensorflow/lite/testdata/multi_add.bin"),
kTfLiteOk);
}
TEST(InputGeneratorTest, ReadWriteSimpleFile) {
InputGenerator input_generator;
ASSERT_EQ(
input_generator.ReadInputsFromFile("tensorflow/lite/testing/"
"kernel_test/testdata/test_input.csv"),
kTfLiteOk);
std::string content = "1";
for (int i = 0; i < 1 * 8 * 8 * 3 - 1; i++) {
content.append(",1");
}
std::vector<std::pair<string, string>> inputs = {{"a", content}};
ASSERT_EQ(input_generator.GetInputs(), inputs);
auto output_filename = ::testing::TempDir() + "/out.csv";
ASSERT_EQ(input_generator.WriteInputsToFile(output_filename), kTfLiteOk);
std::ifstream in(output_filename);
std::string out;
std::getline(in, out, '\n');
std::string expected_out = "a:";
expected_out.append(content);
ASSERT_EQ(out, expected_out);
}
TEST(InputGeneratorTest, GenerateUniformInput) {
InputGenerator input_generator;
ASSERT_EQ(input_generator.LoadModel(
"tensorflow/lite/testdata/multi_add.bin"),
kTfLiteOk);
input_generator.GenerateInput("UNIFORM");
auto inputs = input_generator.GetInputs();
ASSERT_EQ(inputs.size(), 4);
}
TEST(InputGeneratorTest, GenerateGaussianInput) {
InputGenerator input_generator;
ASSERT_EQ(input_generator.LoadModel(
"tensorflow/lite/testdata/multi_add.bin"),
kTfLiteOk);
input_generator.GenerateInput("GAUSSIAN");
auto inputs = input_generator.GetInputs();
ASSERT_EQ(inputs.size(), 4);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/kernel_test/input_generator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/kernel_test/input_generator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db9bf754-48fc-47e9-953c-1b2d185fbf79 | cpp | tensorflow/tensorflow | generator | tensorflow/lite/schema/builtin_ops_header/generator.cc | tensorflow/lite/schema/builtin_ops_header/generator_test.cc | #include "tensorflow/lite/schema/builtin_ops_header/generator.h"
#include <string>
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace builtin_ops_header {
namespace {
const char* kFileHeader =
R"(
#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_
#define TENSORFLOW_LITE_BUILTIN_OPS_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
)";
const char* kFileFooter =
R"(} TfLiteBuiltinOperator;
#ifdef __cplusplus
}
#endif
#endif
)";
}
bool IsValidInputEnumName(const std::string& name) {
const char* begin = name.c_str();
const char* ch = begin;
while (*ch != '\0') {
if (ch != begin) {
if (*ch != '_') {
return false;
}
++ch;
}
bool empty = true;
while (isupper(*ch) || isdigit(*ch)) {
empty = false;
++ch;
}
if (empty) {
return false;
}
}
return true;
}
std::string ConstantizeVariableName(const std::string& name) {
std::string result = "kTfLiteBuiltin";
bool uppercase = true;
for (char input_char : name) {
if (input_char == '_') {
uppercase = true;
} else if (uppercase) {
result += toupper(input_char);
uppercase = false;
} else {
result += tolower(input_char);
}
}
return result;
}
bool GenerateHeader(std::ostream& os) {
auto enum_names = tflite::EnumNamesBuiltinOperator();
for (auto enum_value : EnumValuesBuiltinOperator()) {
auto enum_name = enum_names[enum_value];
if (!IsValidInputEnumName(enum_name)) {
std::cerr << "Invalid input enum name: " << enum_name << std::endl;
return false;
}
}
os << kFileHeader;
for (auto enum_value : EnumValuesBuiltinOperator()) {
auto enum_name = enum_names[enum_value];
os << " ";
os << ConstantizeVariableName(enum_name);
os << " = ";
os << enum_value;
os << ",\n";
}
os << kFileFooter;
return true;
}
}
} | #include "tensorflow/lite/schema/builtin_ops_header/generator.h"
#include <fstream>
#include <gtest/gtest.h>
namespace {
using tflite::builtin_ops_header::ConstantizeVariableName;
using tflite::builtin_ops_header::IsValidInputEnumName;
TEST(TestIsValidInputEnumName, TestWithValidInputNames) {
EXPECT_TRUE(IsValidInputEnumName("ADD"));
EXPECT_TRUE(IsValidInputEnumName("CONV_2D"));
EXPECT_TRUE(IsValidInputEnumName("L2_POOL_2D"));
}
TEST(TestIsValidInputEnumName, TestWithLeadingUnderscore) {
EXPECT_FALSE(IsValidInputEnumName("_ADD"));
EXPECT_FALSE(IsValidInputEnumName("_CONV_2D"));
}
TEST(TestIsValidInputEnumName, TestWithLowerCase) {
EXPECT_FALSE(IsValidInputEnumName("_AdD"));
EXPECT_FALSE(IsValidInputEnumName("_COnV_2D"));
}
TEST(TestIsValidInputEnumName, TestWithOtherCharacters) {
EXPECT_FALSE(IsValidInputEnumName("_AdD!2D"));
EXPECT_FALSE(IsValidInputEnumName("_COnV?2D"));
}
TEST(TestIsValidInputEnumName, TestWithDoubleUnderscores) {
EXPECT_FALSE(IsValidInputEnumName("ADD__2D"));
EXPECT_FALSE(IsValidInputEnumName("CONV__2D"));
}
TEST(TestConstantizeVariableName, TestWithValidInputNames) {
EXPECT_EQ(ConstantizeVariableName("ADD"), "kTfLiteBuiltinAdd");
EXPECT_EQ(ConstantizeVariableName("CONV_2D"), "kTfLiteBuiltinConv2d");
EXPECT_EQ(ConstantizeVariableName("L2_POOL_2D"), "kTfLiteBuiltinL2Pool2d");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/schema/builtin_ops_header/generator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/schema/builtin_ops_header/generator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49a0b7e1-3c93-45c7-a6f6-1ca098732f77 | cpp | tensorflow/tensorflow | nnapi_handler | tensorflow/lite/nnapi/nnapi_handler.cc | tensorflow/lite/nnapi/nnapi_handler_test.cc | #include "tensorflow/lite/nnapi/nnapi_handler.h"
#include <cstdio>
#include <string>
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
namespace tflite {
namespace nnapi {
const char NnApiHandler::kNnapiReferenceDeviceName[] = "nnapi-reference";
const int NnApiHandler::kNnapiReferenceDevice = 1;
const int NnApiHandler::kNnapiDevice = 2;
char* NnApiHandler::nnapi_device_name_ = nullptr;
int NnApiHandler::nnapi_device_feature_level_;
const NnApi* NnApiPassthroughInstance() {
static const NnApi orig_nnapi_copy = *NnApiImplementation();
return &orig_nnapi_copy;
}
NnApiHandler* NnApiHandler::Instance() {
NnApiPassthroughInstance();
static NnApiHandler handler{const_cast<NnApi*>(NnApiImplementation())};
return &handler;
}
void NnApiHandler::Reset() {
*nnapi_ = *NnApiPassthroughInstance();
}
void NnApiHandler::SetAndroidSdkVersion(int version,
bool set_unsupported_ops_to_null) {
nnapi_->android_sdk_version = version;
nnapi_->nnapi_runtime_feature_level = version;
if (!set_unsupported_ops_to_null) {
return;
}
if (version < 29) {
nnapi_->ANeuralNetworks_getDeviceCount = nullptr;
nnapi_->ANeuralNetworks_getDevice = nullptr;
nnapi_->ANeuralNetworksDevice_getName = nullptr;
nnapi_->ANeuralNetworksDevice_getVersion = nullptr;
nnapi_->ANeuralNetworksDevice_getFeatureLevel = nullptr;
nnapi_->ANeuralNetworksDevice_getType = nullptr;
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices = nullptr;
nnapi_->ANeuralNetworksCompilation_createForDevices = nullptr;
nnapi_->ANeuralNetworksCompilation_setCaching = nullptr;
nnapi_->ANeuralNetworksExecution_compute = nullptr;
nnapi_->ANeuralNetworksExecution_getOutputOperandRank = nullptr;
nnapi_->ANeuralNetworksExecution_getOutputOperandDimensions = nullptr;
nnapi_->ANeuralNetworksBurst_create = nullptr;
nnapi_->ANeuralNetworksBurst_free = nullptr;
nnapi_->ANeuralNetworksExecution_burstCompute = nullptr;
nnapi_->ANeuralNetworksMemory_createFromAHardwareBuffer = nullptr;
nnapi_->ANeuralNetworksExecution_setMeasureTiming = nullptr;
nnapi_->ANeuralNetworksExecution_getDuration = nullptr;
nnapi_->ANeuralNetworksDevice_getExtensionSupport = nullptr;
nnapi_->ANeuralNetworksModel_getExtensionOperandType = nullptr;
nnapi_->ANeuralNetworksModel_getExtensionOperationType = nullptr;
nnapi_->ANeuralNetworksModel_setOperandExtensionData = nullptr;
}
if (version < 28) {
nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16 = nullptr;
}
}
void NnApiHandler::SetDeviceName(const std::string& name) {
delete[] nnapi_device_name_;
nnapi_device_name_ = new char[name.size() + 1];
std::strcpy(nnapi_device_name_, name.c_str());
}
void NnApiHandler::GetDeviceNameReturnsName(const std::string& name) {
NnApiHandler::SetDeviceName(name);
GetDeviceNameReturns<0>();
}
void NnApiHandler::SetNnapiSupportedDevice(const std::string& name,
int feature_level) {
NnApiHandler::SetDeviceName(name);
nnapi_device_feature_level_ = feature_level;
GetDeviceCountReturnsCount<2>();
nnapi_->ANeuralNetworks_getDevice =
[](uint32_t devIndex, ANeuralNetworksDevice** device) -> int {
if (devIndex > 1) {
return ANEURALNETWORKS_BAD_DATA;
}
if (devIndex == 1) {
*device =
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice);
} else {
*device = reinterpret_cast<ANeuralNetworksDevice*>(
NnApiHandler::kNnapiReferenceDevice);
}
return ANEURALNETWORKS_NO_ERROR;
};
nnapi_->ANeuralNetworksDevice_getName =
[](const ANeuralNetworksDevice* device, const char** name) -> int {
if (device ==
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice)) {
*name = NnApiHandler::nnapi_device_name_;
return ANEURALNETWORKS_NO_ERROR;
}
if (device == reinterpret_cast<ANeuralNetworksDevice*>(
NnApiHandler::kNnapiReferenceDevice)) {
*name = NnApiHandler::kNnapiReferenceDeviceName;
return ANEURALNETWORKS_NO_ERROR;
}
return ANEURALNETWORKS_BAD_DATA;
};
nnapi_->ANeuralNetworksDevice_getFeatureLevel =
[](const ANeuralNetworksDevice* device, int64_t* featureLevel) -> int {
if (device ==
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice)) {
*featureLevel = NnApiHandler::nnapi_device_feature_level_;
return ANEURALNETWORKS_NO_ERROR;
}
if (device == reinterpret_cast<ANeuralNetworksDevice*>(
NnApiHandler::kNnapiReferenceDevice)) {
*featureLevel = 1000;
return ANEURALNETWORKS_NO_ERROR;
}
return ANEURALNETWORKS_BAD_DATA;
};
}
}
} | #include "tensorflow/lite/nnapi/nnapi_handler.h"
#include <cstdint>
#include <cstdio>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
namespace tflite {
namespace nnapi {
using testing::Eq;
using testing::Ne;
using testing::NotNull;
void ExpectEquals(const NnApi& left, const NnApi& right);
class NnApiHandlerTest : public ::testing::Test {
protected:
~NnApiHandlerTest() override { NnApiHandler::Instance()->Reset(); }
};
TEST_F(NnApiHandlerTest, ShouldAlterNnApiInstanceBehaviour) {
const NnApi* nnapi = NnApiImplementation();
const auto device_count_stub = [](uint32_t* device_count) -> int {
*device_count = 999;
return ANEURALNETWORKS_NO_ERROR;
};
NnApiHandler::Instance()->StubGetDeviceCountWith(device_count_stub);
ASSERT_THAT(nnapi->ANeuralNetworks_getDeviceCount, NotNull());
uint32_t device_count = 0;
nnapi->ANeuralNetworks_getDeviceCount(&device_count);
EXPECT_THAT(device_count, Eq(999));
}
TEST_F(NnApiHandlerTest, ShouldRestoreNnApiToItsOriginalValueWithReset) {
NnApi nnapi_orig_copy = *NnApiImplementation();
auto device_count_override = [](uint32_t* device_count) -> int {
*device_count = 777;
return ANEURALNETWORKS_NO_ERROR;
};
NnApiHandler::Instance()->StubGetDeviceCountWith(device_count_override);
EXPECT_THAT(nnapi_orig_copy.ANeuralNetworks_getDeviceCount,
Ne(NnApiImplementation()->ANeuralNetworks_getDeviceCount));
NnApiHandler::Instance()->Reset();
ExpectEquals(nnapi_orig_copy, *NnApiImplementation());
}
int (*device_count_ptr)(uint32_t*);
TEST_F(NnApiHandlerTest, ShouldSupportPassthroughCalls) {
const NnApi* nnapi = NnApiImplementation();
device_count_ptr = nnapi->ANeuralNetworks_getDeviceCount;
NnApiHandler::Instance()->StubGetDeviceCountWith(
[](uint32_t* device_count) -> int {
return NnApiPassthroughInstance()->ANeuralNetworks_getDeviceCount ==
device_count_ptr;
});
uint32_t device_count = 0;
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount(&device_count), Eq(1));
}
TEST_F(NnApiHandlerTest, ShouldSetNnApiMembersToNullAsPerSdkVersion_NNAPI11) {
auto* handler = NnApiHandler::Instance();
handler->SetNnapiSupportedDevice("devvice", 1000);
handler->GetSupportedOperationsForDevicesReturns<1>();
handler->CompilationCreateForDevicesReturns<1>();
handler->ExecutionComputeReturns<1>();
handler->MemoryCreateFromFdReturns<1>();
handler->SetAndroidSdkVersion(28, true);
const NnApi* nnapi = NnApiImplementation();
using ::testing::IsNull;
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworks_getDevice, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getName, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getVersion, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getFeatureLevel, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_createForDevices, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_setCaching, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_compute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandRank, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_create, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_free, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_burstCompute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_setMeasureTiming, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getDuration, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getExtensionSupport, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperandType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperationType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_setOperandExtensionData, IsNull());
}
TEST_F(NnApiHandlerTest, ShouldSetNnApiMembersToNullAsPerSdkVersion_NNAPI10) {
auto* handler = NnApiHandler::Instance();
handler->SetNnapiSupportedDevice("devvice", 1000);
handler->GetSupportedOperationsForDevicesReturns<1>();
handler->CompilationCreateForDevicesReturns<1>();
handler->ExecutionComputeReturns<1>();
handler->MemoryCreateFromFdReturns<1>();
handler->SetAndroidSdkVersion(27, true);
const NnApi* nnapi = NnApiImplementation();
using ::testing::IsNull;
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworks_getDevice, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getName, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getVersion, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getFeatureLevel, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_createForDevices, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_setCaching, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_compute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandRank, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_create, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_free, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_burstCompute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_setMeasureTiming, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getDuration, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getExtensionSupport, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperandType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperationType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_setOperandExtensionData, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
IsNull());
}
void ExpectEquals(const NnApi& left, const NnApi& right) {
#define EXPECT_NNAPI_MEMBER_EQ(name) EXPECT_EQ(left.name, right.name)
EXPECT_NNAPI_MEMBER_EQ(nnapi_exists);
EXPECT_NNAPI_MEMBER_EQ(android_sdk_version);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_createFromFd);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_finish);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_addOperand);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_setOperandValue);
EXPECT_NNAPI_MEMBER_EQ(
ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_setOperandValueFromMemory);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_addOperation);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_identifyInputsAndOutputs);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_setPreference);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_finish);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setInput);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setInputFromMemory);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setOutput);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setOutputFromMemory);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_startCompute);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksEvent_wait);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksEvent_free);
EXPECT_NNAPI_MEMBER_EQ(ASharedMemory_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworks_getDeviceCount);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworks_getDevice);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getName);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getVersion);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getFeatureLevel);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getType);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_createForDevices);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_setCaching);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_compute);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getOutputOperandRank);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getOutputOperandDimensions);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksBurst_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksBurst_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_burstCompute);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setMeasureTiming);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getDuration);
#undef EXPECT_NNAPI_MEMBER_EQ
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/nnapi/nnapi_handler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/nnapi/nnapi_handler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f828fe0-405a-42a5-8483-81e379c54f44 | cpp | tensorflow/tensorflow | nnapi_implementation | tensorflow/lite/nnapi/nnapi_implementation.cc | tensorflow/lite/nnapi/nnapi_implementation_test.cc | #include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include <dlfcn.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include <algorithm>
#include <cstdlib>
#include <memory>
#include "tensorflow/lite/nnapi/sl/public/NeuralNetworksSupportLibraryImpl.h"
#ifdef __ANDROID__
#include <sys/system_properties.h>
#endif
#define EXPAND_VA_ARGS(...) , ##__VA_ARGS__
#define NNAPI_LOG(format, ...) \
fprintf(stderr, format "\n" EXPAND_VA_ARGS(__VA_ARGS__));
namespace {
#ifdef __ANDROID__
const int kFirstIsolatedUid = 99000;
const int kLastIsolatedUid = 99999;
const int kFirstAppZygoteIsolatedUid = 90000;
const int kLastAppZygoteIsolatedUid = 98999;
bool IsIsolatedProcess() {
int uid = getuid();
return (uid >= kFirstIsolatedUid && uid <= kLastIsolatedUid) ||
(uid >= kFirstAppZygoteIsolatedUid &&
uid <= kLastAppZygoteIsolatedUid);
}
int32_t GetAndroidSdkVersion() {
const char* sdkProp = "ro.build.version.sdk";
char sdkVersion[PROP_VALUE_MAX];
int length = __system_property_get(sdkProp, sdkVersion);
if (length != 0) {
int32_t result = 0;
for (int i = 0; i < length; ++i) {
int digit = sdkVersion[i] - '0';
if (digit < 0 || digit > 9) {
return 0xffff;
}
result = result * 10 + digit;
}
return result;
}
return 0;
}
#endif
void* LoadFunction(void* handle, const char* name, bool optional) {
if (handle == nullptr) {
return nullptr;
}
void* fn = dlsym(handle, name);
if (fn == nullptr && !optional) {
NNAPI_LOG("nnapi error: unable to open function %s", name);
}
return fn;
}
#ifndef __ANDROID__
int ASharedMemory_create(const char* name, size_t size) {
int fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0644);
if (fd < 0) {
return fd;
}
int result = ftruncate(fd, size);
if (result < 0) {
close(fd);
return -1;
}
return fd;
}
uint32_t CalculateAndroidSdkVersion(NnApi const& nnapi) {
bool has_10 = nnapi.ANeuralNetworksMemory_createFromFd != nullptr;
bool has_11 =
nnapi.ANeuralNetworksModel_relaxComputationFloat32toFloat16 != nullptr;
bool has_12 = nnapi.ANeuralNetworks_getDeviceCount != nullptr;
bool has_13 = nnapi.ANeuralNetworksCompilation_setTimeout != nullptr;
bool has_14 = nnapi.ANeuralNetworks_getRuntimeFeatureLevel != nullptr;
uint32_t sdk_version = 0;
if (has_10) {
sdk_version = 27;
}
if (sdk_version == 27 && has_11) {
sdk_version = 28;
}
if (sdk_version == 28 && has_12) {
sdk_version = 29;
}
if (sdk_version == 29 && has_13) {
sdk_version = 30;
}
if (sdk_version == 30 && has_14) {
sdk_version = 31;
}
return sdk_version;
}
#else
ASharedMemory_create_fn getASharedMemory_create() {
void* libandroid = nullptr;
libandroid = dlopen("libandroid.so", RTLD_LAZY | RTLD_LOCAL);
if (libandroid != nullptr) {
return reinterpret_cast<ASharedMemory_create_fn>(
LoadFunction(libandroid, "ASharedMemory_create", false));
}
std::string libandroid_error = dlerror();
void* cutils_handle = dlopen("libcutils.so", RTLD_LAZY | RTLD_LOCAL);
if (cutils_handle != nullptr) {
return reinterpret_cast<ASharedMemory_create_fn>(
LoadFunction(cutils_handle, "ashmem_create_region", false));
}
NNAPI_LOG(
"nnapi error: unable to open both library %s (%s) and library %s "
"(%s)",
"libandroid.so", libandroid_error.c_str(), "libcutils.so", dlerror());
return nullptr;
}
#endif
#define LOAD_FUNCTION(handle, name) \
nnapi.name = reinterpret_cast<name##_fn>( \
LoadFunction(handle, #name, false));
#define LOAD_FUNCTION_OPTIONAL(handle, name) \
nnapi.name = reinterpret_cast<name##_fn>( \
LoadFunction(handle, #name, true));
#define LOAD_FUNCTION_RENAME(handle, name, symbol) \
nnapi.name = reinterpret_cast<name##_fn>( \
LoadFunction(handle, symbol, false));
const NnApi LoadNnApi() {
NnApi nnapi = {};
nnapi.android_sdk_version = 0;
#ifdef __ANDROID__
nnapi.android_sdk_version = GetAndroidSdkVersion();
if (nnapi.android_sdk_version < 27) {
NNAPI_LOG("nnapi error: requires android sdk version to be at least %d",
27);
nnapi.nnapi_exists = false;
return nnapi;
}
if (nnapi.android_sdk_version <= 33 && IsIsolatedProcess()) {
NNAPI_LOG("NNAPI is disabled in an isolated process");
nnapi.nnapi_exists = false;
return nnapi;
}
#endif
void* libneuralnetworks = nullptr;
static const char nnapi_library_name[] = "libneuralnetworks.so";
libneuralnetworks = dlopen(nnapi_library_name, RTLD_LAZY | RTLD_LOCAL);
#ifdef __ANDROID__
if (libneuralnetworks == nullptr) {
const char* error = dlerror();
if (error) {
NNAPI_LOG("%s\n", error);
}
NNAPI_LOG("nnapi error: unable to open library %s", nnapi_library_name);
}
#endif
nnapi.nnapi_exists = libneuralnetworks != nullptr;
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksMemory_createFromFd);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksMemory_free);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_create);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_free);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_finish);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_addOperand);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_setOperandValue);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
LOAD_FUNCTION(libneuralnetworks,
ANeuralNetworksModel_setOperandValueFromMemory);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_addOperation);
LOAD_FUNCTION(libneuralnetworks,
ANeuralNetworksModel_identifyInputsAndOutputs);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksCompilation_create);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksCompilation_free);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksCompilation_setPreference);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksCompilation_finish);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_create);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_free);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_setInput);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_setInputFromMemory);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_setOutput);
LOAD_FUNCTION(libneuralnetworks,
ANeuralNetworksExecution_setOutputFromMemory);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_startCompute);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksEvent_wait);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksEvent_free);
#ifdef __ANDROID__
nnapi.ASharedMemory_create = getASharedMemory_create();
#else
if (libneuralnetworks != nullptr) {
nnapi.ASharedMemory_create = ASharedMemory_create;
}
#endif
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_relaxComputationFloat32toFloat16);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworks_getDeviceCount);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworks_getDevice);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksDevice_getName);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksDevice_getVersion);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksDevice_getFeatureLevel);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksDevice_getType);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_getSupportedOperationsForDevices);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksCompilation_createForDevices);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksCompilation_setCaching);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksExecution_compute);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_getOutputOperandRank);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_getOutputOperandDimensions);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksBurst_create);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksBurst_free);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_burstCompute);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemory_createFromAHardwareBuffer);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_setMeasureTiming);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_getDuration);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksDevice_getExtensionSupport);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_getExtensionOperandType);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_getExtensionOperationType);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_setOperandExtensionData);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksCompilation_setTimeout);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksCompilation_setPriority);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_setTimeout);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_setLoopTimeout);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksMemoryDesc_create);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksMemoryDesc_free);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemoryDesc_addInputRole);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemoryDesc_addOutputRole);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemoryDesc_setDimensions);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksMemoryDesc_finish);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemory_createFromDesc);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksMemory_copy);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksEvent_createFromSyncFenceFd);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksEvent_getSyncFenceFd);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_startComputeWithDependencies);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworks_getRuntimeFeatureLevel);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_enableInputAndOutputPadding);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_setReusable);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getSessionId);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getNnApiVersion);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getModelArchHash);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getDeviceIds);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getErrorCode);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getInputDataClass);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getOutputDataClass);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getCompilationTimeNanos);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_isCachingEnabled);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_isControlFlowUsed);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_areDynamicTensorsUsed);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getSessionId);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getNnApiVersion);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getModelArchHash);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getDeviceIds);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getExecutionMode);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getInputDataClass);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getOutputDataClass);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getErrorCode);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getRuntimeExecutionTimeNanos);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getDriverExecutionTimeNanos);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getHardwareExecutionTimeNanos);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_isCachingEnabled);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_isControlFlowUsed);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_areDynamicTensorsUsed);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
SL_ANeuralNetworksDiagnostic_registerCallbacks);
#ifndef __ANDROID__
if (nnapi.nnapi_exists && nnapi.android_sdk_version == 0) {
nnapi.android_sdk_version = CalculateAndroidSdkVersion(nnapi);
}
#endif
if (nnapi.ANeuralNetworks_getRuntimeFeatureLevel) {
nnapi.nnapi_runtime_feature_level =
nnapi.ANeuralNetworks_getRuntimeFeatureLevel();
} else {
nnapi.nnapi_runtime_feature_level = nnapi.android_sdk_version;
}
return nnapi;
}
}
std::unique_ptr<const NnApi> CreateNnApiFromSupportLibrary(
const NnApiSLDriverImplFL5* nnapi_support_library_driver) {
auto nnapi = std::make_unique<NnApi>();
nnapi->nnapi_exists = true;
nnapi->android_sdk_version = ANEURALNETWORKS_FEATURE_LEVEL_5;
nnapi->nnapi_runtime_feature_level =
nnapi_support_library_driver->base.implFeatureLevel;
#define ASSIGN_SL_FUNCTION_TO_NNAPI(name) \
nnapi->name = nnapi_support_library_driver->name;
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_createFromFd);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_create);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_finish);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_addOperand);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_setOperandValue);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_setOperandValueFromMemory);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_addOperation);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_identifyInputsAndOutputs);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksModel_relaxComputationFloat32toFloat16);
nnapi->ANeuralNetworksCompilation_create = nullptr;
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_setPreference);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_finish);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_create);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setInput);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setInputFromMemory);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setOutput);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setOutputFromMemory);
nnapi->ANeuralNetworksExecution_startCompute = nullptr;
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksEvent_wait);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksEvent_free);
#ifdef __ANDROID__
nnapi->ASharedMemory_create = getASharedMemory_create();
#else
nnapi->ASharedMemory_create = ASharedMemory_create;
#endif
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworks_getDeviceCount);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworks_getDevice);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getName);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getVersion);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getFeatureLevel);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getType);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksModel_getSupportedOperationsForDevices);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_createForDevices);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_setCaching);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_setTimeout);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_setPriority);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_compute);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setTimeout);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setLoopTimeout);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_getOutputOperandRank);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksExecution_getOutputOperandDimensions);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksBurst_create);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksBurst_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_burstCompute);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_createFromAHardwareBuffer);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setMeasureTiming);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_getDuration);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getExtensionSupport);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_getExtensionOperandType);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_getExtensionOperationType);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_setOperandExtensionData);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_create);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_addInputRole);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_addOutputRole);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_setDimensions);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_finish);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_createFromDesc);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_copy);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksEvent_createFromSyncFenceFd);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksEvent_getSyncFenceFd);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksExecution_startComputeWithDependencies);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksExecution_enableInputAndOutputPadding);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setReusable);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworks_getRuntimeFeatureLevel);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getSessionId);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getNnApiVersion);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getModelArchHash);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getDeviceIds);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getErrorCode);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getInputDataClass);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getOutputDataClass);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getCompilationTimeNanos);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_isCachingEnabled);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_isControlFlowUsed);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_areDynamicTensorsUsed);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getSessionId);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getNnApiVersion);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getModelArchHash);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getDeviceIds);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getExecutionMode);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getInputDataClass);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getOutputDataClass);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getErrorCode);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getRuntimeExecutionTimeNanos);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getDriverExecutionTimeNanos);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getHardwareExecutionTimeNanos);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_isCachingEnabled);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_isControlFlowUsed);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_areDynamicTensorsUsed);
ASSIGN_SL_FUNCTION_TO_NNAPI(SL_ANeuralNetworksDiagnostic_registerCallbacks);
return nnapi;
}
const NnApi* NnApiImplementation() {
static const NnApi nnapi = LoadNnApi();
return &nnapi;
} | #include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include <gtest/gtest.h>
namespace {
TEST(NnapiLibTest, NnApiImplementation) {
const NnApi* nnapi = NnApiImplementation();
EXPECT_NE(nnapi, nullptr);
#ifdef __ANDROID__
EXPECT_GT(nnapi->android_sdk_version, 0);
if (nnapi.android_sdk_version < 27) {
EXPECT_FALSE(nnapi->nnapi_exists);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_EQ(nnapi->ASharedMemory_create, nullptr);
} else {
EXPECT_TRUE(nnapi->nnapi_exists);
EXPECT_NE(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
if (nnapi->android_sdk_version >= 28) {
EXPECT_NE(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
} else {
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
}
EXPECT_NE(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_NE(nnapi->ASharedMemory_create, nullptr);
}
#else
EXPECT_FALSE(nnapi->nnapi_exists);
EXPECT_EQ(nnapi->android_sdk_version, 0);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_EQ(nnapi->ASharedMemory_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworks_getDeviceCount, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworks_getDevice, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getName, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getVersion, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getFeatureLevel, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_createForDevices, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setCaching, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_compute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getOutputOperandRank, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksBurst_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksBurst_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_burstCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setMeasureTiming, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getDuration, nullptr);
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/nnapi/nnapi_implementation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/nnapi/nnapi_implementation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
387b62c0-4dcf-4a70-bea4-c87442267e17 | cpp | tensorflow/tensorflow | subgraph_tensor_profiler | tensorflow/lite/profiling/subgraph_tensor_profiler.cc | tensorflow/lite/profiling/subgraph_tensor_profiler_test.cc | #include "tensorflow/lite/profiling/subgraph_tensor_profiler.h"
#include <cstring>
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite::profiling {
SubgraphTensorProfiler::SubgraphTensorProfiler(const Interpreter& interpreter,
CallbackT callback)
: interpreter_(interpreter), callback_(callback) {
events_.reserve(interpreter.subgraphs_size());
}
uint32_t SubgraphTensorProfiler::BeginEvent(const char* tag,
EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
if (strcmp(tag, "Invoke")) {
return 0;
}
events_.push_back(event_metadata2);
return events_.size();
}
void SubgraphTensorProfiler::EndEvent(uint32_t event_handle) {
if (!event_handle || events_.size() < event_handle) {
return;
}
const Subgraph* subgraph = interpreter_.subgraph(events_[event_handle - 1]);
for (int i = 0; i < subgraph->tensors_size(); ++i) {
callback_(subgraph->tensor(i));
}
}
} | #include "tensorflow/lite/profiling/subgraph_tensor_profiler.h"
#include <functional>
#include <string>
#include <unordered_set>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
namespace tflite::profiling {
namespace {
using ::testing::IsSupersetOf;
using ::testing::Not;
constexpr const char* kIfSubgraphTensorNames[] = {
"if_cond",
"if_input2",
"if_input3",
"if_output1",
};
constexpr const char* kAddSubgraphTensorNames[] = {
"add_input1",
"add_input2",
"add_output1",
};
constexpr const char* kMulSubgraphTensorNames[] = {
"mul_input1",
"mul_input2",
"mul_output1",
};
struct TensorGatherer {
void operator()(const TfLiteTensor* tensor) { tensors.insert(tensor->name); }
std::unordered_set<std::string> tensors;
};
class SubgraphTensorProfilerTest
: public subgraph_test_util::ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildMulSubgraph(interpreter_->subgraph(2));
builder_->BuildIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[2]), {1, 2});
NameTensors();
}
private:
void NameTensors() {
auto set_names = [](Subgraph* subgraph, auto names) {
for (int j = 0; j < subgraph->tensors_size(); ++j) {
subgraph->tensor(j)->name = names[j];
}
};
set_names(interpreter_->subgraph(0), kIfSubgraphTensorNames);
set_names(interpreter_->subgraph(1), kAddSubgraphTensorNames);
set_names(interpreter_->subgraph(2), kMulSubgraphTensorNames);
}
};
TEST_F(SubgraphTensorProfilerTest, TestMulSubgraph) {
TensorGatherer tensor_gatherer;
tflite::profiling::SubgraphTensorProfiler profiler(*interpreter_,
std::ref(tensor_gatherer));
interpreter_->AddProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kIfSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kMulSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors,
Not(IsSupersetOf(kAddSubgraphTensorNames)));
}
TEST_F(SubgraphTensorProfilerTest, TestAddSubgraph) {
TensorGatherer tensor_gatherer;
tflite::profiling::SubgraphTensorProfiler profiler(*interpreter_,
std::ref(tensor_gatherer));
interpreter_->AddProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kIfSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kAddSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors,
Not(IsSupersetOf(kMulSubgraphTensorNames)));
}
TEST_F(SubgraphTensorProfilerTest, TestBeginEvent) {
TensorGatherer tensor_gatherer;
tflite::profiling::SubgraphTensorProfiler profiler(*interpreter_,
std::ref(tensor_gatherer));
const int subgraph_id = 1;
uint32_t valid_event = profiler.BeginEvent(
"Invoke", Profiler::EventType::DEFAULT, 0, subgraph_id);
EXPECT_EQ(valid_event, 1);
uint32_t invalid_event = profiler.BeginEvent(
"NotInvoke", Profiler::EventType::DEFAULT, 0, subgraph_id);
EXPECT_EQ(invalid_event, 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/subgraph_tensor_profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/subgraph_tensor_profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
19a510be-3fd3-4823-a79c-06b9ca26c3ec | cpp | tensorflow/tensorflow | model_runtime_info | tensorflow/lite/profiling/model_runtime_info.cc | tensorflow/lite/profiling/model_runtime_info_test.cc | #include "tensorflow/lite/profiling/model_runtime_info.h"
#include <cstdint>
#include <cstdio>
#include <fstream>
#include <ios>
#include <iostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "google/protobuf/repeated_field.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/proto/model_runtime_info.pb.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace profiling {
namespace {
Edge::DataType GetEdgeDataTypeFromTfLiteType(TfLiteType type) {
if (static_cast<int>(Edge::DataType_MIN) <= static_cast<int>(type) &&
static_cast<int>(type) <= static_cast<int>(Edge::DataType_MAX)) {
return static_cast<Edge::DataType>(type);
}
TFLITE_LOG(ERROR) << "Mapping TfLiteType to Edge::DataType failed: " << type;
return Edge::UNKNOWN_TYPE;
}
TfLiteStatus TfliteIntArrayToRepeatedField(
const TfLiteIntArray* array, google::protobuf::RepeatedField<int32_t>* repeated_field,
bool check_for_null = false) {
if (array == nullptr) {
return check_for_null ? kTfLiteError : kTfLiteOk;
}
repeated_field->Reserve(array->size);
for (int i = 0; i < array->size; ++i) {
repeated_field->Add(array->data[i]);
}
return kTfLiteOk;
}
TfLiteStatus TfliteTensorToEdge(const TfLiteTensor& tensor, int tensor_index,
Edge& edge_proto) {
edge_proto.set_id(tensor_index);
const std::string tensor_name =
tensor.name == nullptr ? "" : std::string(tensor.name);
edge_proto.set_name(tensor_name);
edge_proto.set_data_type(GetEdgeDataTypeFromTfLiteType(tensor.type));
edge_proto.set_size(tensor.bytes);
edge_proto.set_layout_type(Edge::UNKNOWN);
edge_proto.set_allocation_type(AllocTypeName(tensor.allocation_type));
const auto status =
TfliteIntArrayToRepeatedField(tensor.dims, edge_proto.mutable_shape());
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to convert tensor.dims to RepeatedField as it "
"is null for tensor "
<< tensor_name << " with index " << tensor_index;
return status;
}
return kTfLiteOk;
}
TfLiteStatus TfliteNodeToNode(const TfLiteNode& node,
const TfLiteRegistration& reg, int node_index,
bool is_node_delegated,
int32_t delegated_to_node_id, Node& node_proto) {
node_proto.set_id(node_index);
if (reg.custom_name != nullptr) {
node_proto.set_name(reg.custom_name);
node_proto.set_type((is_node_delegated ? "Delegate/" : "") +
std::string(reg.custom_name));
} else {
node_proto.set_name(EnumNamesBuiltinOperator()[reg.builtin_code]);
node_proto.set_type(std::to_string(reg.builtin_code));
}
auto status = TfliteIntArrayToRepeatedField(
node.inputs, node_proto.mutable_inputs(), true);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to convert node.inputs to RepeatedField as it "
"is null for node "
<< node_proto.name() << " with index " << node_index;
return status;
}
status = TfliteIntArrayToRepeatedField(
node.outputs, node_proto.mutable_outputs(), true);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR)
<< "Failed to convert node.outputs to RepeatedField as it "
"is null for node "
<< node_proto.name() << " with index " << node_index;
return status;
}
status = TfliteIntArrayToRepeatedField(node.intermediates,
node_proto.mutable_intermediates());
if (status != kTfLiteOk) {
return status;
}
status = TfliteIntArrayToRepeatedField(node.temporaries,
node_proto.mutable_temporaries());
if (status != kTfLiteOk) {
return status;
}
if (is_node_delegated) {
node_proto.set_delegated_to_node_id(delegated_to_node_id);
} else if (node.delegate != nullptr) {
auto delegate_node_details = node_proto.mutable_delegate_node_details();
delegate_node_details->set_delegate_name(reg.custom_name);
auto* delegate_params =
static_cast<TfLiteDelegateParams*>(node.builtin_data);
status = TfliteIntArrayToRepeatedField(
delegate_params->nodes_to_replace,
delegate_node_details->mutable_tflite_node_ids_replaced(),
true);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to convert delegate_params->nodes_to_replace"
" to RepeatedField as it is null for node "
<< node_proto.name() << " with index " << node_index;
return status;
}
}
return kTfLiteOk;
}
}
TfLiteStatus GenerateModelRuntimeInfo(const tflite::Interpreter& interpreter,
absl::string_view output_file_path) {
tflite::profiling::ModelRuntimeDetails model_runtime_details;
const size_t num_subgraphs = interpreter.subgraphs_size();
for (int i = 0; i < num_subgraphs; ++i) {
RuntimeSubgraph* runtime_subgraph = model_runtime_details.add_subgraphs();
runtime_subgraph->set_subgraph_id(i);
runtime_subgraph->set_subgraph_type(RuntimeSubgraph::TFLITE_SUBGRAPH);
const tflite::Subgraph& subgraph = *(interpreter.subgraph(i));
for (size_t tensor_index = 0; tensor_index < subgraph.tensors_size();
tensor_index++) {
const TfLiteTensor* tensor =
subgraph.tensor(static_cast<int>(tensor_index));
Edge* edge = runtime_subgraph->add_edges();
auto status = TfliteTensorToEdge(*tensor, tensor_index, *edge);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to convert tensor to edge, tensor index: "
<< tensor_index;
return status;
}
}
const SubgraphDelegationMetadata delegation_metadata =
GetNodeDelegationMetadata(subgraph);
for (size_t node_index = 0; node_index < subgraph.nodes_size();
node_index++) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(static_cast<int>(node_index));
const TfLiteNode& node = node_and_reg->first;
const TfLiteRegistration& reg = node_and_reg->second;
Node* runtime_node = runtime_subgraph->add_nodes();
const bool is_node_delegated =
node.delegate == nullptr &&
delegation_metadata.is_node_delegated[node_index];
TfLiteStatus status = TfliteNodeToNode(
node, reg, node_index, is_node_delegated,
is_node_delegated ? delegation_metadata.replaced_by_node[node_index]
: -1,
*runtime_node);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to convert node to runtime node, node "
"index: "
<< node_index;
return status;
}
}
runtime_subgraph->mutable_execution_plan()->Add(
subgraph.execution_plan().begin(), subgraph.execution_plan().end());
}
std::ofstream ofs(std::string(output_file_path),
std::ios::out | std::ios::binary);
if (ofs.good()) {
model_runtime_details.SerializeToOstream(&ofs);
ofs.close();
} else {
TFLITE_LOG(ERROR) << "Failed to open file: " << output_file_path;
TFLITE_LOG(INFO) << model_runtime_details.DebugString();
}
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/profiling/model_runtime_info.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <fstream>
#include <ios>
#include <iostream>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/profiling/buffered_profiler.h"
#include "tensorflow/lite/profiling/proto/model_runtime_info.pb.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace profiling {
class PadAndConv2DModel : public MultiOpModel {
public:
explicit PadAndConv2DModel(TfLiteDelegate* delegate = nullptr) {
input_ = AddInput({TensorType_FLOAT32, {1, 3, 3, 1}});
int pad_out = AddInnerTensor<float>({TensorType_FLOAT32, {1, 5, 5, 1}});
output_ = AddOutput({TensorType_FLOAT32, {1, 5, 5, 1}});
int padding_in_ =
AddConstInput({TensorType_INT32, {4, 2}}, {0, 0, 1, 1, 1, 1, 0, 0});
int conv_filter_ =
AddConstInput({TensorType_FLOAT32, {1, 2, 2, 1}}, {0, 1, 1, 0});
int conv_bias_ = AddConstInput({TensorType_FLOAT32, {1}}, {3});
AddBuiltinOp(tflite::BuiltinOperator_PAD, tflite::BuiltinOptions_PadOptions,
CreatePadOptions(builder_).Union(), {input_, padding_in_},
{pad_out});
AddBuiltinOp(
tflite::BuiltinOperator_CONV_2D, tflite::BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(builder_, tflite::Padding_SAME, 1, 1).Union(),
{pad_out, conv_filter_, conv_bias_}, {output_});
SetDelegate(delegate);
BuildInterpreter({GetShape(input_)}, -1,
false,
delegate != nullptr,
false);
}
int input() const { return input_; }
int output() const { return output_; }
void SetProfiler(Profiler* profiler) { interpreter_->SetProfiler(profiler); }
Interpreter* interpreter() const { return interpreter_.get(); }
void Initialize(Profiler* profiler) {
if (profiler != nullptr) {
SetProfiler(profiler);
}
AllocateAndDelegate(true);
}
void ResetProfilerAndInvoke(profiling::BufferedProfiler* profiler) {
profiler->Reset();
profiler->StartProfiling();
PopulateTensor(input(),
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f});
ASSERT_EQ(kTfLiteOk, Invoke());
profiler->StopProfiling();
}
private:
int input_;
int output_;
};
bool AreRepeatedIntFieldsEqual(const google::protobuf::RepeatedField<int32_t>& field_1,
const google::protobuf::RepeatedField<int32_t>& field_2) {
return std::equal(field_1.begin(), field_1.end(), field_2.begin(),
field_2.end());
}
bool AreEdgesEqual(const Edge& edge_1, const Edge& edge_2) {
auto proto_to_tuple = [](const Edge& edge) {
return std::make_tuple(edge.id(), edge.name(), edge.data_type(),
edge.size(), edge.layout_type(),
edge.allocation_type());
};
return proto_to_tuple(edge_1) == proto_to_tuple(edge_2) &&
AreRepeatedIntFieldsEqual(edge_1.shape(), edge_2.shape());
}
bool AreNodesEqual(const Node& node_1, const Node& node_2) {
auto proto_to_tuple = [](const Node& node) {
return std::make_tuple(node.id(), node.name(), node.type());
};
return proto_to_tuple(node_1) == proto_to_tuple(node_2) &&
AreRepeatedIntFieldsEqual(node_1.inputs(), node_2.inputs()) &&
AreRepeatedIntFieldsEqual(node_1.outputs(), node_2.outputs()) &&
AreRepeatedIntFieldsEqual(node_1.intermediates(),
node_2.intermediates()) &&
AreRepeatedIntFieldsEqual(node_1.temporaries(), node_2.temporaries());
}
bool AreRuntimeSubgraphsEqual(const RuntimeSubgraph& subgraph_1,
const RuntimeSubgraph& subgraph_2) {
auto proto_to_tuple = [](const RuntimeSubgraph& subgraph) {
return std::make_tuple(subgraph.subgraph_id(), subgraph.subgraph_type(),
subgraph.execution_plan().size(),
subgraph.nodes_size(), subgraph.edges_size());
};
if (proto_to_tuple(subgraph_1) == proto_to_tuple(subgraph_2) &&
AreRepeatedIntFieldsEqual(subgraph_1.execution_plan(),
subgraph_2.execution_plan())) {
for (size_t i = 0; i < subgraph_1.nodes_size(); ++i) {
if (!AreNodesEqual(subgraph_1.nodes(i), subgraph_2.nodes(i))) {
return false;
}
}
for (size_t i = 0; i < subgraph_1.edges_size(); ++i) {
if (!AreEdgesEqual(subgraph_1.edges(i), subgraph_2.edges(i))) {
return false;
}
}
return true;
}
return false;
}
bool AreModelRuntimeDetailsEqual(const ModelRuntimeDetails& model_details_1,
const ModelRuntimeDetails& model_details_2) {
auto proto_to_tuple = [](const ModelRuntimeDetails& model_details) {
return std::make_tuple(model_details.model_name(),
model_details.subgraphs_size());
};
if (proto_to_tuple(model_details_1) == proto_to_tuple(model_details_2)) {
for (size_t i = 0; i < model_details_1.subgraphs_size(); ++i) {
if (!AreRuntimeSubgraphsEqual(model_details_1.subgraphs(i),
model_details_2.subgraphs(i))) {
return false;
}
}
return true;
}
return false;
}
ModelRuntimeDetails CreateExpectedModelRuntimeDetails(
bool is_xnnpack_delegate) {
ModelRuntimeDetails expected_model_runtime_details;
RuntimeSubgraph* subgraph = expected_model_runtime_details.add_subgraphs();
subgraph->set_subgraph_id(0);
subgraph->set_subgraph_type(RuntimeSubgraph::TFLITE_SUBGRAPH);
if (is_xnnpack_delegate) {
subgraph->add_execution_plan(2);
} else {
subgraph->add_execution_plan(0);
subgraph->add_execution_plan(1);
}
Node* node = subgraph->add_nodes();
node->set_id(0);
node->set_name("PAD");
node->set_type("34");
node->add_inputs(0);
node->add_inputs(3);
node->add_outputs(1);
Node* node_2 = subgraph->add_nodes();
node_2->set_id(1);
node_2->set_name("CONV_2D");
node_2->set_type("3");
node_2->add_inputs(1);
node_2->add_inputs(4);
node_2->add_inputs(5);
node_2->add_outputs(2);
node_2->add_temporaries(6);
if (is_xnnpack_delegate) {
node->set_delegated_to_node_id(2);
node_2->set_delegated_to_node_id(2);
Node* node_3 = subgraph->add_nodes();
node_3->set_id(2);
node_3->set_name("TfLiteXNNPackDelegate");
node_3->set_type("TfLiteXNNPackDelegate");
node_3->add_inputs(0);
node_3->add_inputs(3);
node_3->add_inputs(4);
node_3->add_inputs(5);
node_3->add_outputs(2);
DelegateNodeDetails* delegate_node_details =
node_3->mutable_delegate_node_details();
delegate_node_details->set_delegate_name("TfLiteXNNPackDelegate");
delegate_node_details->add_tflite_node_ids_replaced(0);
delegate_node_details->add_tflite_node_ids_replaced(1);
}
Edge* edge = subgraph->add_edges();
edge->set_id(0);
edge->set_name("");
edge->set_data_type(Edge::FLOAT32);
edge->set_size(36);
edge->set_layout_type(Edge::UNKNOWN);
edge->add_shape(1);
edge->add_shape(3);
edge->add_shape(3);
edge->add_shape(1);
edge->set_allocation_type("kTfLiteArenaRw");
edge = subgraph->add_edges();
edge->set_id(1);
edge->set_name("");
edge->set_data_type(Edge::FLOAT32);
edge->set_size(100);
edge->set_layout_type(Edge::UNKNOWN);
edge->add_shape(1);
edge->add_shape(5);
edge->add_shape(5);
edge->add_shape(1);
edge->set_allocation_type("kTfLiteArenaRw");
edge = subgraph->add_edges();
edge->set_id(2);
edge->set_name("");
edge->set_data_type(Edge::FLOAT32);
edge->set_size(100);
edge->set_layout_type(Edge::UNKNOWN);
edge->add_shape(1);
edge->add_shape(5);
edge->add_shape(5);
edge->add_shape(1);
edge->set_allocation_type("kTfLiteArenaRw");
edge = subgraph->add_edges();
edge->set_id(3);
edge->set_name("");
edge->set_data_type(Edge::INT32);
edge->set_size(32);
edge->set_layout_type(Edge::UNKNOWN);
edge->add_shape(4);
edge->add_shape(2);
edge->set_allocation_type("kTfLiteMmapRo");
edge = subgraph->add_edges();
edge->set_id(4);
edge->set_name("");
edge->set_data_type(Edge::FLOAT32);
edge->set_size(16);
edge->set_layout_type(Edge::UNKNOWN);
edge->add_shape(1);
edge->add_shape(2);
edge->add_shape(2);
edge->add_shape(1);
edge->set_allocation_type("kTfLiteMmapRo");
edge = subgraph->add_edges();
edge->set_id(5);
edge->set_name("");
edge->set_data_type(Edge::FLOAT32);
edge->set_size(4);
edge->set_layout_type(Edge::UNKNOWN);
edge->add_shape(1);
edge->set_allocation_type("kTfLiteMmapRo");
edge = subgraph->add_edges();
edge->set_id(6);
edge->set_data_type(Edge::FLOAT32);
edge->set_layout_type(Edge::UNKNOWN);
edge->set_allocation_type("kTfLiteArenaRwPersistent");
#if __ANDROID__ && (__aarch64__ || __arm__ || __aarch32__)
edge->set_name("");
edge->set_size(is_xnnpack_delegate ? 0 : 400);
edge->add_shape(1);
edge->add_shape(5);
edge->add_shape(5);
edge->add_shape(4);
edge->set_allocation_type("kTfLiteArenaRw");
#else
edge->set_name("Conv_hwcn_weights");
edge->set_size(is_xnnpack_delegate ? 0 : 16);
edge->add_shape(4);
edge->add_shape(1);
edge->set_allocation_type("kTfLiteArenaRwPersistent");
#endif
return expected_model_runtime_details;
}
TEST(MODEL_RUNTIME_INFO_TEST, PadAndConv2DNoDelegate) {
auto profiler = std::make_unique<profiling::BufferedProfiler>(1024, false);
PadAndConv2DModel model(nullptr);
model.Initialize(profiler.get());
model.ResetProfilerAndInvoke(profiler.get());
#ifdef __ANDROID__
std::string file_name = "/data/local/tmp/test_file.textproto";
#else
std::string file_name = "/tmp/test_file.textproto";
#endif
auto status = GenerateModelRuntimeInfo(*model.interpreter(), file_name);
ASSERT_TRUE(status == kTfLiteOk);
ModelRuntimeDetails model_runtime_details;
std::ifstream file(file_name, std::ios::binary);
ASSERT_TRUE(file.good());
model_runtime_details.ParseFromIstream(&file);
file.close();
ModelRuntimeDetails expected_model_runtime_details =
CreateExpectedModelRuntimeDetails(false);
ASSERT_TRUE(AreModelRuntimeDetailsEqual(model_runtime_details,
expected_model_runtime_details));
}
TEST(MODEL_RUNTIME_INFO_TEST, PadAndConv2DWithXnnpackDelegate) {
auto profiler = std::make_unique<profiling::BufferedProfiler>(1024, false);
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
PadAndConv2DModel xnnpack_model(xnnpack_delegate.get());
xnnpack_model.Initialize(profiler.get());
xnnpack_model.ResetProfilerAndInvoke(profiler.get());
#ifdef __ANDROID__
std::string file_name = "/data/local/tmp/test_file.textproto";
#else
std::string file_name = "/tmp/test_file.textproto";
#endif
auto status =
GenerateModelRuntimeInfo(*xnnpack_model.interpreter(), file_name);
ASSERT_TRUE(status == kTfLiteOk);
ModelRuntimeDetails model_runtime_details;
std::ifstream file(file_name, std::ios::binary);
ASSERT_TRUE(file.good());
model_runtime_details.ParseFromIstream(&file);
file.close();
ModelRuntimeDetails expected_model_runtime_details =
CreateExpectedModelRuntimeDetails(true);
ASSERT_TRUE(AreModelRuntimeDetailsEqual(model_runtime_details,
expected_model_runtime_details));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/model_runtime_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/model_runtime_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9b79f15a-5ad8-4f05-b7e9-569a601d059c | cpp | tensorflow/tensorflow | profile_summary_formatter | tensorflow/lite/profiling/profile_summary_formatter.cc | tensorflow/lite/profiling/profile_summary_formatter_test.cc | #include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include <fstream>
#include <iomanip>
#include <ios>
#include <map>
#include <memory>
#include <ostream>
#include <queue>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/util/stat_summarizer_options.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/profiling/proto/profiling_info.pb.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace profiling {
std::string ProfileSummaryDefaultFormatter::GetOutputString(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
return GenerateReport("profile", true,
stats_calculator_map, delegate_stats_calculator,
subgraph_name_map);
}
std::string ProfileSummaryDefaultFormatter::GetShortSummary(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
return GenerateReport("summary", false,
stats_calculator_map, delegate_stats_calculator,
subgraph_name_map);
}
std::string ProfileSummaryDefaultFormatter::GenerateReport(
const std::string& tag, bool include_output_string,
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
std::stringstream stream;
bool has_non_primary_graph =
(stats_calculator_map.size() - stats_calculator_map.count(0)) > 0;
for (const auto& stats_calc : stats_calculator_map) {
auto subgraph_index = stats_calc.first;
auto subgraph_stats = stats_calc.second.get();
std::string subgraph_name = "";
if (subgraph_name_map.find(subgraph_index) != subgraph_name_map.end()) {
subgraph_name = subgraph_name_map.at(subgraph_index);
}
if (has_non_primary_graph) {
if (subgraph_index == 0) {
stream << "Primary graph (name: " << subgraph_name << ") " << tag << ":"
<< std::endl;
} else {
stream << "Subgraph (index: " << subgraph_index
<< ", name: " << subgraph_name << ") " << tag << ":"
<< std::endl;
}
}
if (include_output_string) {
stream << subgraph_stats->GetOutputString();
}
if (subgraph_index != 0) {
stream << "Subgraph (index: " << subgraph_index
<< ", name: " << subgraph_name << ") ";
}
stream << subgraph_stats->GetShortSummary() << std::endl;
}
if (delegate_stats_calculator.num_runs() > 0) {
stream << "Delegate internal: " << std::endl;
if (include_output_string) {
stream << delegate_stats_calculator.GetOutputString();
}
stream << delegate_stats_calculator.GetShortSummary() << std::endl;
}
return stream.str();
}
void ProfileSummaryDefaultFormatter::HandleOutput(
const std::string& init_output, const std::string& run_output,
std::string output_file_path) const {
std::ofstream output_file(output_file_path);
std::ostream* output_stream = nullptr;
if (output_file.good()) {
output_stream = &output_file;
}
if (!init_output.empty()) {
WriteOutput("Profiling Info for Benchmark Initialization:", init_output,
output_stream == nullptr ? &TFLITE_LOG(INFO) : output_stream);
}
if (!run_output.empty()) {
WriteOutput(
"Operator-wise Profiling Info for Regular Benchmark Runs:", run_output,
output_stream == nullptr ? &TFLITE_LOG(INFO) : output_stream);
}
}
tensorflow::StatSummarizerOptions
ProfileSummaryDefaultFormatter::GetStatSummarizerOptions() const {
auto options = tensorflow::StatSummarizerOptions();
options.show_summary = false;
options.show_memory = false;
return options;
}
tensorflow::StatSummarizerOptions
ProfileSummaryCSVFormatter::GetStatSummarizerOptions() const {
auto options = ProfileSummaryDefaultFormatter::GetStatSummarizerOptions();
options.format_as_csv = true;
return options;
}
std::vector<tensorflow::StatsCalculator::Detail>
ProfileSummaryProtoFormatter::GetDetailsSortedByRunOrder(
const tensorflow::StatsCalculator* stats_calculator) const {
std::vector<tensorflow::StatsCalculator::Detail> details;
std::map<std::string, tensorflow::StatsCalculator::Detail> unsorted_details =
stats_calculator->GetDetails();
std::priority_queue<
std::pair<std::string, const tensorflow::StatsCalculator::Detail*>>
sorted_list;
const int num_nodes = unsorted_details.size();
for (const auto& det : unsorted_details) {
const tensorflow::StatsCalculator::Detail* detail = &(det.second);
std::stringstream stream_for_sort;
stream_for_sort << std::setw(20) << std::right << std::setprecision(10)
<< std::fixed;
stream_for_sort << num_nodes - detail->run_order;
sorted_list.emplace(stream_for_sort.str(), detail);
}
while (!sorted_list.empty()) {
auto entry = sorted_list.top();
sorted_list.pop();
details.push_back(*entry.second);
}
return details;
}
void ProfileSummaryProtoFormatter::GenerateOpProfileDataFromDetail(
const tensorflow::StatsCalculator::Detail* detail,
const tensorflow::StatsCalculator* stats_calculator,
OpProfileData* const op_profile_data) const {
if (detail == nullptr) {
return;
}
op_profile_data->set_node_type(detail->type);
OpProfilingStat* inference_stat =
op_profile_data->mutable_inference_microseconds();
inference_stat->set_first(detail->elapsed_time.first());
inference_stat->set_last(detail->elapsed_time.newest());
inference_stat->set_avg(detail->elapsed_time.avg());
inference_stat->set_stddev(detail->elapsed_time.std_deviation());
inference_stat->set_variance(detail->elapsed_time.variance());
inference_stat->set_min(detail->elapsed_time.min());
inference_stat->set_max(detail->elapsed_time.max());
inference_stat->set_sum(detail->elapsed_time.sum());
inference_stat->set_count(detail->elapsed_time.count());
OpProfilingStat* memory_stat = op_profile_data->mutable_mem_kb();
memory_stat->set_first(detail->mem_used.first() / 1000.0);
memory_stat->set_last(detail->mem_used.newest() / 1000.0);
memory_stat->set_avg(detail->mem_used.avg() / 1000.0);
memory_stat->set_stddev(detail->mem_used.std_deviation() / 1000.0);
memory_stat->set_variance(detail->mem_used.variance() / 1000000.0);
memory_stat->set_min(detail->mem_used.min() / 1000.0);
memory_stat->set_max(detail->mem_used.max() / 1000.0);
memory_stat->set_sum(detail->mem_used.sum() / 1000.0);
memory_stat->set_count(detail->mem_used.count());
op_profile_data->set_times_called(detail->times_called /
stats_calculator->num_runs());
op_profile_data->set_name(detail->name);
op_profile_data->set_run_order(detail->run_order);
}
void ProfileSummaryProtoFormatter::GenerateSubGraphProfilingData(
const tensorflow::StatsCalculator* stats_calculator, int subgraph_index,
const std::map<uint32_t, std::string>& subgraph_name_map,
SubGraphProfilingData* const sub_graph_profiling_data) const {
sub_graph_profiling_data->set_subgraph_index(subgraph_index);
std::string subgraph_name = "";
if (subgraph_name_map.find(subgraph_index) != subgraph_name_map.end()) {
subgraph_name = subgraph_name_map.at(subgraph_index);
}
sub_graph_profiling_data->set_subgraph_name(subgraph_name);
for (tensorflow::StatsCalculator::Detail& detail :
GetDetailsSortedByRunOrder(stats_calculator)) {
OpProfileData* const op_profile_data =
sub_graph_profiling_data->add_per_op_profiles();
GenerateOpProfileDataFromDetail(&detail, stats_calculator, op_profile_data);
}
}
void ProfileSummaryProtoFormatter::GenerateDelegateProfilingData(
const tensorflow::StatsCalculator* stats_calculator,
DelegateProfilingData* const delegate_profiling_data) const {
for (const tensorflow::StatsCalculator::Detail& detail :
GetDetailsSortedByRunOrder(stats_calculator)) {
OpProfileData* const op_profile_data =
delegate_profiling_data->add_per_op_profiles();
GenerateOpProfileDataFromDetail(&detail, stats_calculator, op_profile_data);
}
}
std::string ProfileSummaryProtoFormatter::GetShortSummary(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
TFLITE_LOG(ERROR) << "GetShortSummary is not supported for proto formatter.";
return "";
}
std::string ProfileSummaryProtoFormatter::GetOutputString(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
ModelProfilingData model_profiling_data;
for (const auto& stats_calc : stats_calculator_map) {
auto subgraph_index = stats_calc.first;
tensorflow::StatsCalculator* subgraph_stats = stats_calc.second.get();
SubGraphProfilingData* const sub_graph_profiling_data =
model_profiling_data.add_subgraph_profiles();
GenerateSubGraphProfilingData(subgraph_stats, subgraph_index,
subgraph_name_map, sub_graph_profiling_data);
}
if (delegate_stats_calculator.num_runs() > 0) {
DelegateProfilingData* const delegate_profiling_data =
model_profiling_data.add_delegate_profiles();
GenerateDelegateProfilingData(&delegate_stats_calculator,
delegate_profiling_data);
}
return model_profiling_data.SerializeAsString();
}
tensorflow::StatSummarizerOptions
ProfileSummaryProtoFormatter::GetStatSummarizerOptions() const {
auto options = tensorflow::StatSummarizerOptions();
options.show_summary = false;
options.show_memory = false;
return options;
}
void ProfileSummaryProtoFormatter::HandleOutput(
const std::string& init_output, const std::string& run_output,
std::string output_file_path) const {
std::ofstream output_file(output_file_path, std::ios_base::binary);
std::ostream* output_stream = nullptr;
if (output_file.good()) {
output_stream = &output_file;
}
BenchmarkProfilingData benchmark_profiling_data;
if (!init_output.empty()) {
benchmark_profiling_data.mutable_init_profile()->ParseFromString(
init_output);
}
if (!run_output.empty()) {
benchmark_profiling_data.mutable_runtime_profile()->ParseFromString(
run_output);
}
if (output_stream == nullptr) {
TFLITE_LOG(INFO) << benchmark_profiling_data.DebugString();
} else {
benchmark_profiling_data.SerializeToOstream(output_stream);
}
}
}
} | #include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include <cstddef>
#include <fstream>
#include <ios>
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/core/util/stat_summarizer_options.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/profiling/proto/profiling_info.pb.h"
namespace tflite {
namespace profiling {
namespace {
bool AreOpProfilingStatEqual(const OpProfilingStat& op_profiling_stat_1,
const OpProfilingStat& op_profiling_stat_2) {
auto proto_to_tuple = [](const OpProfilingStat& op_profiling_stat) {
return std::make_tuple(op_profiling_stat.first(), op_profiling_stat.last(),
op_profiling_stat.avg(), op_profiling_stat.stddev(),
op_profiling_stat.variance(),
op_profiling_stat.min(), op_profiling_stat.max(),
op_profiling_stat.sum(), op_profiling_stat.count());
};
return proto_to_tuple(op_profiling_stat_1) ==
proto_to_tuple(op_profiling_stat_2);
}
bool AreOpProfileDataEqual(const OpProfileData& op_profile_data_1,
const OpProfileData& op_profile_data_2) {
auto proto_to_tuple = [](const OpProfileData& op_profile_data) {
return std::make_tuple(op_profile_data.node_type(),
op_profile_data.times_called(),
op_profile_data.name(), op_profile_data.run_order());
};
return (proto_to_tuple(op_profile_data_1) ==
proto_to_tuple(op_profile_data_2)) &&
AreOpProfilingStatEqual(op_profile_data_1.inference_microseconds(),
op_profile_data_2.inference_microseconds()) &&
(AreOpProfilingStatEqual(op_profile_data_1.mem_kb(),
op_profile_data_2.mem_kb()));
}
bool AreSubGraphProfilingDataEqual(
const SubGraphProfilingData& subgraph_profiling_data_1,
const SubGraphProfilingData& subgraph_profiling_data_2) {
auto proto_to_tuple =
[](const SubGraphProfilingData& subgraph_profiling_data) {
return std::make_tuple(
subgraph_profiling_data.subgraph_name(),
subgraph_profiling_data.per_op_profiles().size());
};
if (proto_to_tuple(subgraph_profiling_data_1) ==
proto_to_tuple(subgraph_profiling_data_2)) {
for (size_t i = 0; i < subgraph_profiling_data_1.per_op_profiles().size();
++i) {
auto op_profile_data_1 = subgraph_profiling_data_1.per_op_profiles(i);
auto op_profile_data_2 = subgraph_profiling_data_2.per_op_profiles(i);
if (!AreOpProfileDataEqual(op_profile_data_1, op_profile_data_2)) {
return false;
}
}
return true;
}
return false;
}
bool AreDelegateProfilingDataEqual(
const DelegateProfilingData& delegate_profiling_data_1,
const DelegateProfilingData& delegate_profiling_data_2) {
auto proto_to_tuple =
[](const DelegateProfilingData& delegate_profiling_data) {
return std::make_tuple(
delegate_profiling_data.delegate_name(),
delegate_profiling_data.per_op_profiles().size());
};
if (proto_to_tuple(delegate_profiling_data_1) ==
proto_to_tuple(delegate_profiling_data_2)) {
for (size_t i = 0; i < delegate_profiling_data_1.per_op_profiles().size();
++i) {
auto op_profile_data_1 = delegate_profiling_data_1.per_op_profiles(i);
auto op_profile_data_2 = delegate_profiling_data_2.per_op_profiles(i);
if (!AreOpProfileDataEqual(op_profile_data_1, op_profile_data_2)) {
return false;
}
}
return true;
}
return false;
}
bool AreModelProfilingDataEqual(
const ModelProfilingData& model_profiling_data_1,
const ModelProfilingData& model_profiling_data_2) {
if (model_profiling_data_1.subgraph_profiles().size() !=
model_profiling_data_2.subgraph_profiles().size()) {
return false;
}
for (size_t i = 0; i < model_profiling_data_1.subgraph_profiles().size();
++i) {
auto subgraph_profile_1 = model_profiling_data_1.subgraph_profiles(i);
auto subgraph_profile_2 = model_profiling_data_2.subgraph_profiles(i);
if (!AreSubGraphProfilingDataEqual(subgraph_profile_1,
subgraph_profile_2)) {
return false;
}
}
if (model_profiling_data_1.delegate_profiles().size() !=
model_profiling_data_2.delegate_profiles().size()) {
return false;
}
for (size_t i = 0; i < model_profiling_data_1.delegate_profiles().size();
++i) {
auto delegate_profile_1 = model_profiling_data_1.delegate_profiles(i);
auto delegate_profile_2 = model_profiling_data_2.delegate_profiles(i);
if (!AreDelegateProfilingDataEqual(delegate_profile_1,
delegate_profile_2)) {
return false;
}
}
return true;
}
TEST(SummaryWriterTest, SummaryOptionStdOut) {
ProfileSummaryDefaultFormatter writer;
tensorflow::StatSummarizerOptions options = writer.GetStatSummarizerOptions();
EXPECT_EQ(options.show_summary, false);
EXPECT_EQ(options.show_memory, false);
EXPECT_EQ(options.format_as_csv, false);
}
TEST(SummaryWriterTest, SummaryOptionCSV) {
ProfileSummaryCSVFormatter writer;
tensorflow::StatSummarizerOptions options = writer.GetStatSummarizerOptions();
EXPECT_EQ(options.show_summary, false);
EXPECT_EQ(options.show_memory, false);
EXPECT_EQ(options.format_as_csv, true);
}
TEST(SummaryWriterTest, EmptyOutputString) {
ProfileSummaryDefaultFormatter writer;
std::string output = writer.GetOutputString(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()), {});
EXPECT_EQ(output.size(), 0);
}
TEST(SummaryWriterTest, EmptyShortSummary) {
ProfileSummaryDefaultFormatter writer;
std::string output = writer.GetShortSummary(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()), {});
EXPECT_EQ(output.size(), 0);
}
TEST(SummaryWriterTest, SingleSubgraphOutputString) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetOutputString(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()), {});
ASSERT_TRUE(absl::StrContains(output, "Run Order"));
ASSERT_TRUE(absl::StrContains(output, "Top by Computation Time"));
ASSERT_TRUE(!absl::StrContains(output, "Top by Memory Use"));
ASSERT_TRUE(absl::StrContains(output, "Summary by node type"));
ASSERT_TRUE(absl::StrContains(output, "nodes observed"));
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, SingleSubgraphShortSummary) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetShortSummary(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}});
ASSERT_TRUE(!absl::StrContains(output, "Run Order"));
ASSERT_TRUE(!absl::StrContains(output, "Top by Computation Time"));
ASSERT_TRUE(!absl::StrContains(output, "Top by Memory Use"));
ASSERT_TRUE(!absl::StrContains(output, "Summary by node type"));
ASSERT_TRUE(absl::StrContains(output, "nodes observed"));
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, MultiSubgraphOutputString) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
stats_calculator_map[1] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetOutputString(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}, {1, "Subgraph 1"}});
ASSERT_TRUE(absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, MultiSubgraphOutputStringForProto) {
ProfileSummaryProtoFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string kernel_name_1 = "Kernel 1";
std::string kernel_name_2 = "Kernel 2";
std::string kernel_name_3 = "Kernel 3";
std::string op_name_1 = "Convolution";
std::string op_name_2 = "Reshape";
std::string op_name_3 = "Convolution";
stats_calculator_map[0]->AddNodeStats(kernel_name_1, op_name_1, 1, 10, 10000);
stats_calculator_map[0]->AddNodeStats(kernel_name_1, op_name_1, 1, 20, 20000);
stats_calculator_map[0]->AddNodeStats(kernel_name_2, op_name_2, 2, 15, 10000);
stats_calculator_map[0]->UpdateRunTotalUs(25);
stats_calculator_map[1] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
stats_calculator_map[1]->AddNodeStats(kernel_name_3, op_name_3, 3, 10, 10000);
stats_calculator_map[1]->UpdateRunTotalUs(10);
std::string output = writer.GetOutputString(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}, {1, "Subgraph 1"}});
ModelProfilingData model_profiling_data;
model_profiling_data.ParseFromString(output);
ASSERT_TRUE(absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
ASSERT_EQ(model_profiling_data.subgraph_profiles().size(), 2);
ASSERT_EQ(model_profiling_data.subgraph_profiles(0).subgraph_name(),
"Primary graph");
ASSERT_EQ(model_profiling_data.subgraph_profiles(0).per_op_profiles().size(),
2);
OpProfileData op_profile_data_1;
op_profile_data_1.set_node_type(op_name_1);
OpProfilingStat* inference_microseconds_stat_1 =
op_profile_data_1.mutable_inference_microseconds();
inference_microseconds_stat_1->set_first(10);
inference_microseconds_stat_1->set_last(20);
inference_microseconds_stat_1->set_max(20);
inference_microseconds_stat_1->set_min(10);
inference_microseconds_stat_1->set_avg(15);
inference_microseconds_stat_1->set_stddev(5);
inference_microseconds_stat_1->set_variance(25);
inference_microseconds_stat_1->set_sum(30);
inference_microseconds_stat_1->set_count(2);
OpProfilingStat* memory_stat_1 = op_profile_data_1.mutable_mem_kb();
memory_stat_1->set_first(10);
memory_stat_1->set_last(20);
memory_stat_1->set_max(20);
memory_stat_1->set_min(10);
memory_stat_1->set_avg(15);
memory_stat_1->set_stddev(5);
memory_stat_1->set_variance(25);
memory_stat_1->set_sum(30);
memory_stat_1->set_count(2);
op_profile_data_1.set_name(kernel_name_1);
op_profile_data_1.set_run_order(1);
op_profile_data_1.set_times_called(2);
EXPECT_TRUE(AreOpProfileDataEqual(
model_profiling_data.subgraph_profiles(0).per_op_profiles(0),
op_profile_data_1));
OpProfileData op_profile_data_2;
op_profile_data_2.set_node_type(op_name_2);
OpProfilingStat* inference_microseconds_stat_2 =
op_profile_data_2.mutable_inference_microseconds();
inference_microseconds_stat_2->set_first(15);
inference_microseconds_stat_2->set_last(15);
inference_microseconds_stat_2->set_max(15);
inference_microseconds_stat_2->set_min(15);
inference_microseconds_stat_2->set_avg(15);
inference_microseconds_stat_2->set_stddev(0);
inference_microseconds_stat_2->set_variance(0);
inference_microseconds_stat_2->set_sum(15);
inference_microseconds_stat_2->set_count(1);
OpProfilingStat* memory_stat_2 = op_profile_data_2.mutable_mem_kb();
memory_stat_2->set_first(10);
memory_stat_2->set_last(10);
memory_stat_2->set_max(10);
memory_stat_2->set_min(10);
memory_stat_2->set_avg(10);
memory_stat_2->set_stddev(0);
memory_stat_2->set_variance(0);
memory_stat_2->set_sum(10);
memory_stat_2->set_count(1);
op_profile_data_2.set_times_called(1);
op_profile_data_2.set_name(kernel_name_2);
op_profile_data_2.set_run_order(2);
EXPECT_TRUE(AreOpProfileDataEqual(
model_profiling_data.subgraph_profiles(0).per_op_profiles(1),
op_profile_data_2));
ASSERT_EQ(model_profiling_data.subgraph_profiles(1).subgraph_name(),
"Subgraph 1");
ASSERT_EQ(model_profiling_data.subgraph_profiles(1).per_op_profiles().size(),
1);
OpProfileData op_profile_data_3;
op_profile_data_3.set_node_type(op_name_3);
OpProfilingStat* inference_microseconds_stat_3 =
op_profile_data_3.mutable_inference_microseconds();
inference_microseconds_stat_3->set_first(10);
inference_microseconds_stat_3->set_last(10);
inference_microseconds_stat_3->set_max(10);
inference_microseconds_stat_3->set_min(10);
inference_microseconds_stat_3->set_avg(10);
inference_microseconds_stat_3->set_stddev(0);
inference_microseconds_stat_3->set_variance(0);
inference_microseconds_stat_3->set_sum(10);
inference_microseconds_stat_3->set_count(1);
OpProfilingStat* memory_stat_3 = op_profile_data_3.mutable_mem_kb();
memory_stat_3->set_first(10);
memory_stat_3->set_last(10);
memory_stat_3->set_max(10);
memory_stat_3->set_min(10);
memory_stat_3->set_avg(10);
memory_stat_3->set_stddev(0);
memory_stat_3->set_variance(0);
memory_stat_3->set_sum(10);
memory_stat_3->set_count(1);
op_profile_data_3.set_times_called(1);
op_profile_data_3.set_name(kernel_name_3);
op_profile_data_3.set_run_order(3);
EXPECT_TRUE(AreOpProfileDataEqual(
model_profiling_data.subgraph_profiles(1).per_op_profiles(0),
op_profile_data_3));
}
TEST(SummaryWriterTest, MultiSubgraphHandleOutputForProto) {
ProfileSummaryProtoFormatter writer;
ModelProfilingData model_profiling_data_run;
SubGraphProfilingData* subgraph_profiling_data =
model_profiling_data_run.add_subgraph_profiles();
subgraph_profiling_data->set_subgraph_name("Primary graph");
OpProfileData* op_profile_data_1 =
subgraph_profiling_data->add_per_op_profiles();
op_profile_data_1->set_node_type("Convolution");
OpProfilingStat* inference_stat_1 =
op_profile_data_1->mutable_inference_microseconds();
inference_stat_1->set_first(10);
inference_stat_1->set_avg(10);
OpProfilingStat* mem_stat_1 = op_profile_data_1->mutable_mem_kb();
mem_stat_1->set_first(10);
mem_stat_1->set_avg(10);
op_profile_data_1->set_times_called(1);
op_profile_data_1->set_name("Kernel 1");
op_profile_data_1->set_run_order(1);
OpProfileData* op_profile_data_2 =
subgraph_profiling_data->add_per_op_profiles();
op_profile_data_2->set_node_type("Reshape");
OpProfilingStat* inference_stat_2 =
op_profile_data_2->mutable_inference_microseconds();
inference_stat_2->set_first(15);
inference_stat_2->set_avg(15);
OpProfilingStat* mem_stat_2 = op_profile_data_2->mutable_mem_kb();
mem_stat_2->set_first(10);
mem_stat_2->set_avg(10);
op_profile_data_2->set_times_called(1);
op_profile_data_2->set_name("Kernel 2");
op_profile_data_2->set_run_order(2);
SubGraphProfilingData* subgraph_profiling_data_1 =
model_profiling_data_run.add_subgraph_profiles();
subgraph_profiling_data_1->set_subgraph_name("Subgraph 1");
OpProfileData* op_profile_data_3 =
subgraph_profiling_data_1->add_per_op_profiles();
op_profile_data_3->set_node_type("Convolution");
OpProfilingStat* inference_stat_3 =
op_profile_data_3->mutable_inference_microseconds();
inference_stat_3->set_first(10);
inference_stat_3->set_avg(10);
OpProfilingStat* mem_stat_3 = op_profile_data_3->mutable_mem_kb();
mem_stat_3->set_first(10);
mem_stat_3->set_avg(10);
op_profile_data_3->set_times_called(1);
op_profile_data_3->set_name("Kernel 3");
op_profile_data_3->set_run_order(3);
DelegateProfilingData* delegate_profiling_data =
model_profiling_data_run.add_delegate_profiles();
OpProfileData* op_profile_data_4 =
delegate_profiling_data->add_per_op_profiles();
op_profile_data_4->set_node_type("Convolution");
OpProfilingStat* inference_stat_4 =
op_profile_data_4->mutable_inference_microseconds();
inference_stat_4->set_first(10);
inference_stat_4->set_avg(10);
OpProfilingStat* mem_stat_4 = op_profile_data_4->mutable_mem_kb();
mem_stat_4->set_first(10);
mem_stat_4->set_avg(10);
op_profile_data_4->set_times_called(1);
op_profile_data_4->set_name("Kernel 4");
op_profile_data_4->set_run_order(4);
ModelProfilingData model_profiling_data_init;
SubGraphProfilingData* subgraph_profiling_data_init =
model_profiling_data_init.add_subgraph_profiles();
subgraph_profiling_data_init->set_subgraph_name("Primary graph");
OpProfileData* op_profile_data_init_1 =
subgraph_profiling_data_init->add_per_op_profiles();
op_profile_data_init_1->set_node_type("Convolution");
OpProfilingStat* inference_stat_init_1 =
op_profile_data_init_1->mutable_inference_microseconds();
inference_stat_init_1->set_first(10);
inference_stat_init_1->set_avg(10);
op_profile_data_init_1->set_times_called(1);
OpProfilingStat* mem_stat_init_1 = op_profile_data_init_1->mutable_mem_kb();
mem_stat_init_1->set_first(10);
mem_stat_init_1->set_avg(10);
op_profile_data_init_1->set_name("ModifyGraphWithDelegate");
op_profile_data_init_1->set_run_order(1);
#ifdef __ANDROID__
std::string file_name = "/data/local/tmp/test_file.proto";
#else
std::string file_name = "/tmp/test_file.proto";
#endif
writer.HandleOutput(model_profiling_data_init.SerializeAsString(),
model_profiling_data_run.SerializeAsString(), file_name);
std::ifstream file(file_name, std::ios::binary);
ASSERT_TRUE(file.good());
BenchmarkProfilingData benchmark_profiling_data;
benchmark_profiling_data.ParseFromIstream(&file);
file.close();
ASSERT_TRUE(benchmark_profiling_data.model_name().empty());
EXPECT_TRUE(AreModelProfilingDataEqual(
benchmark_profiling_data.init_profile(), model_profiling_data_init));
EXPECT_TRUE(AreModelProfilingDataEqual(
benchmark_profiling_data.runtime_profile(), model_profiling_data_run));
}
TEST(SummaryWriterTest, MultiSubgraphShortSummary) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
stats_calculator_map[1] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetShortSummary(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}, {1, "Subgraph 1"}});
ASSERT_TRUE(absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, DelegationOutputString) {
ProfileSummaryDefaultFormatter writer;
auto delegate_stats_calculator =
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions());
delegate_stats_calculator.UpdateRunTotalUs(1);
std::string output = writer.GetOutputString(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
delegate_stats_calculator, {});
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, DelegationShortSummary) {
ProfileSummaryDefaultFormatter writer;
auto delegate_stats_calculator =
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions());
delegate_stats_calculator.UpdateRunTotalUs(1);
std::string output = writer.GetShortSummary(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
delegate_stats_calculator, {});
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(absl::StrContains(output, "Delegate internal"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_summary_formatter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_summary_formatter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fae8d13e-8242-493d-860a-3755cf23bf05 | cpp | tensorflow/tensorflow | profile_summarizer | tensorflow/lite/profiling/profile_summarizer.cc | tensorflow/lite/profiling/profile_summarizer_test.cc | #include "tensorflow/lite/profiling/profile_summarizer.h"
#include <memory>
#include <sstream>
#include <string>
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/profiling/memory_info.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
#include "tensorflow/lite/profiling/profile_summary_formatter.h"
namespace tflite {
namespace profiling {
namespace {
struct OperatorDetails {
uint32_t subgraph_index;
uint32_t node_index;
std::string op_description;
std::vector<std::string> inputs;
std::vector<std::string> outputs;
};
std::string GetTensorName(const tflite::Interpreter& interpreter,
int tensor_index) {
const auto tensor = interpreter.tensor(tensor_index);
if (tensor == nullptr || tensor->name == nullptr) {
return "Unknown";
}
return tensor->name;
}
std::vector<std::string> GetTensorNames(const tflite::Interpreter& interpreter,
const TfLiteIntArray* tensor_indices) {
std::vector<std::string> tensors;
tensors.reserve(tensor_indices->size);
for (int i = 0; i < tensor_indices->size; i++) {
tensors.push_back(GetTensorName(interpreter, tensor_indices->data[i]));
}
return tensors;
}
std::string ToString(const std::vector<std::string>& str_vector) {
std::stringstream stream;
stream << "[";
bool first = true;
for (const auto& s : str_vector) {
if (!first) {
stream << ", ";
} else {
first = false;
}
stream << s;
}
stream << "]";
return stream.str();
}
OperatorDetails GetOperatorDetails(const tflite::Interpreter& interpreter,
uint32_t subgraph_index,
uint32_t node_index) {
auto subgraph =
const_cast<tflite::Interpreter&>(interpreter).subgraph(subgraph_index);
auto node_reg = subgraph->node_and_registration(node_index);
auto inputs = node_reg->first.inputs;
auto outputs = node_reg->first.outputs;
const char* profiling_string =
interpreter.OpProfilingString(node_reg->second, &node_reg->first);
OperatorDetails details;
if (profiling_string) {
details.op_description = std::string(profiling_string);
}
details.inputs = GetTensorNames(interpreter, inputs);
details.outputs = GetTensorNames(interpreter, outputs);
return details;
}
}
ProfileSummarizer::ProfileSummarizer(
std::shared_ptr<ProfileSummaryFormatter> summary_formatter)
: summary_formatter_(summary_formatter) {
stats_calculator_map_[0] = std::make_unique<tensorflow::StatsCalculator>(
summary_formatter_->GetStatSummarizerOptions());
delegate_stats_calculator_ = std::make_unique<tensorflow::StatsCalculator>(
summary_formatter_->GetStatSummarizerOptions());
}
void ProfileSummarizer::ProcessProfiles(
const std::vector<const ProfileEvent*>& profile_stats,
const tflite::Interpreter& interpreter) {
if (profile_stats.empty()) return;
int node_num = 0;
std::map<uint32_t, int64_t> total_us_per_subgraph_map;
int64_t delegate_internal_total_us = 0;
for (auto event : profile_stats) {
const auto subgraph_index = event->extra_event_metadata;
auto stats_calculator = GetStatsCalculator(subgraph_index);
int64_t node_exec_time = event->elapsed_time;
if (event->event_type == Profiler::EventType::OPERATOR_INVOKE_EVENT) {
const auto node_index = event->event_metadata;
const auto op_details =
GetOperatorDetails(interpreter, subgraph_index, node_index);
std::string type_in_stats(event->tag);
if (!op_details.op_description.empty()) {
type_in_stats += "/" + op_details.op_description;
}
const auto node_name = ToString(op_details.outputs);
const auto node_name_in_stats =
node_name + ":" + std::to_string(node_index);
stats_calculator->AddNodeStats(node_name_in_stats, type_in_stats,
node_num, node_exec_time, 0 );
} else if (event->event_type ==
Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT) {
const std::string node_name(event->tag);
const auto node_name_in_stats =
"Delegate/" + node_name + ":" + std::to_string(event->event_metadata);
delegate_stats_calculator_->AddNodeStats(node_name_in_stats,
"DelegateOpInvoke", node_num,
node_exec_time, 0 );
} else if (event->event_type ==
Profiler::EventType::DELEGATE_PROFILED_OPERATOR_INVOKE_EVENT) {
const std::string node_name(event->tag);
const std::string type_in_stats(node_name);
const auto node_name_in_stats =
"Delegate/" + node_name + ":" + std::to_string(event->event_metadata);
stats_calculator->AddNodeStats(node_name_in_stats, type_in_stats,
node_num, node_exec_time, 0 );
} else {
const memory::MemoryUsage node_mem_usage =
event->end_mem_usage - event->begin_mem_usage;
std::string node_name(event->tag);
if (node_name == "Invoke") {
continue;
}
node_name += "/" + std::to_string(event->extra_event_metadata);
stats_calculator->AddNodeStats(node_name, event->tag, node_num,
node_exec_time,
node_mem_usage.mem_footprint_kb * 1000.0);
}
if (event->event_type !=
Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT) {
total_us_per_subgraph_map[subgraph_index] += node_exec_time;
} else {
delegate_internal_total_us += node_exec_time;
}
++node_num;
}
for (auto& total_us_per_subgraph_pair : total_us_per_subgraph_map) {
auto stats_calculator =
GetStatsCalculator(total_us_per_subgraph_pair.first);
stats_calculator->UpdateRunTotalUs(total_us_per_subgraph_pair.second);
}
if (delegate_internal_total_us > 0) {
delegate_stats_calculator_->UpdateRunTotalUs(delegate_internal_total_us);
}
SetSubgraphNameMap(interpreter);
}
tensorflow::StatsCalculator* ProfileSummarizer::GetStatsCalculator(
uint32_t subgraph_index) {
if (stats_calculator_map_.count(subgraph_index) == 0) {
stats_calculator_map_[subgraph_index] =
std::make_unique<tensorflow::StatsCalculator>(
summary_formatter_->GetStatSummarizerOptions());
}
return stats_calculator_map_[subgraph_index].get();
}
}
} | #include "tensorflow/lite/profiling/profile_summarizer.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/profiling/buffered_profiler.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace profiling {
namespace {
const char* kOpName = "SimpleOpEval";
TfLiteStatus SimpleOpEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, 0, &output));
int32_t* output_data = output->data.i32;
*output_data = *(input1->data.i32) + *(input2->data.i32);
return kTfLiteOk;
}
const char* SimpleOpProfilingString(const TfLiteContext* context,
const TfLiteNode* node) {
return "Profile";
}
TfLiteRegistration* RegisterSimpleOp() {
static TfLiteRegistration registration = {
nullptr, nullptr, nullptr,
SimpleOpEval, nullptr, tflite::BuiltinOperator_CUSTOM,
"SimpleOpEval", 1};
return ®istration;
}
TfLiteRegistration* RegisterSimpleOpWithProfilingDetails() {
static TfLiteRegistration registration = {nullptr,
nullptr,
nullptr,
SimpleOpEval,
SimpleOpProfilingString,
tflite::BuiltinOperator_CUSTOM,
kOpName,
1};
return ®istration;
}
class SimpleOpModel : public SingleOpModel {
public:
void Init(const std::function<TfLiteRegistration*()>& registration);
tflite::Interpreter* GetInterpreter() { return interpreter_.get(); }
void SetInputs(int32_t x, int32_t y) {
PopulateTensor(inputs_[0], {x});
PopulateTensor(inputs_[1], {y});
}
int32_t GetOutput() { return ExtractVector<int32_t>(output_)[0]; }
private:
int inputs_[2];
int output_;
};
void SimpleOpModel::Init(
const std::function<TfLiteRegistration*()>& registration) {
inputs_[0] = AddInput({TensorType_INT32, {1}});
inputs_[1] = AddInput({TensorType_INT32, {1}});
output_ = AddOutput({TensorType_INT32, {}});
SetCustomOp(kOpName, {}, registration);
BuildInterpreter({GetShape(inputs_[0]), GetShape(inputs_[1])});
}
TEST(ProfileSummarizerTest, Empty) {
ProfileSummarizer summarizer;
std::string output = summarizer.GetOutputString();
EXPECT_GT(output.size(), 0);
}
TEST(ProfileSummarizerTest, Interpreter) {
BufferedProfiler profiler(1024);
SimpleOpModel m;
m.Init(RegisterSimpleOp);
auto interpreter = m.GetInterpreter();
interpreter->SetProfiler(&profiler);
profiler.StartProfiling();
m.SetInputs(1, 2);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_EQ(m.GetOutput(), 3);
profiler.StopProfiling();
ProfileSummarizer summarizer;
auto events = profiler.GetProfileEvents();
EXPECT_EQ(2, events.size());
summarizer.ProcessProfiles(profiler.GetProfileEvents(), *interpreter);
auto output = summarizer.GetOutputString();
ASSERT_TRUE(output.find("SimpleOpEval") != std::string::npos) << output;
ASSERT_TRUE(output.find("Invoke") == std::string::npos) << output;
}
TEST(ProfileSummarizerTest, InterpreterPlusProfilingDetails) {
BufferedProfiler profiler(1024);
SimpleOpModel m;
m.Init(RegisterSimpleOpWithProfilingDetails);
auto interpreter = m.GetInterpreter();
interpreter->SetProfiler(&profiler);
profiler.StartProfiling();
m.SetInputs(1, 2);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_EQ(m.GetOutput(), 3);
profiler.StopProfiling();
ProfileSummarizer summarizer;
auto events = profiler.GetProfileEvents();
EXPECT_EQ(2, events.size());
summarizer.ProcessProfiles(profiler.GetProfileEvents(), *interpreter);
auto output = summarizer.GetOutputString();
ASSERT_TRUE(output.find("SimpleOpEval/Profile") != std::string::npos)
<< output;
}
class ProfileSummarizerIfOpTest : public subgraph_test_util::ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildMulSubgraph(interpreter_->subgraph(2));
builder_->BuildIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[2]), {1, 2});
}
};
TEST_F(ProfileSummarizerIfOpTest, TestIfTrue) {
BufferedProfiler profiler(1024);
interpreter_->SetProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
profiler.StartProfiling();
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
profiler.StopProfiling();
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, {1, 2}, {6, 9});
auto events = profiler.GetProfileEvents();
EXPECT_EQ(5, events.size());
int event_count_of_subgraph_zero = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 0; });
int event_count_of_subgraph_one = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 1; });
int event_count_of_subgraph_two = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 2; });
EXPECT_EQ(2, event_count_of_subgraph_zero);
EXPECT_EQ(3, event_count_of_subgraph_one);
EXPECT_EQ(0, event_count_of_subgraph_two);
}
TEST_F(ProfileSummarizerIfOpTest, TestIfFalse) {
BufferedProfiler profiler(1024);
interpreter_->SetProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
profiler.StartProfiling();
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
profiler.StopProfiling();
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, {1, 2}, {5, 14});
auto events = profiler.GetProfileEvents();
EXPECT_EQ(5, events.size());
int event_count_of_subgraph_zero = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 0; });
int event_count_of_subgraph_one = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 1; });
int event_count_of_subgraph_two = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 2; });
EXPECT_EQ(2, event_count_of_subgraph_zero);
EXPECT_EQ(0, event_count_of_subgraph_one);
EXPECT_EQ(3, event_count_of_subgraph_two);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_summarizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_summarizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab694cd8-38e1-4516-a33b-db2d3a7779e5 | cpp | tensorflow/tensorflow | memory_info | tensorflow/lite/profiling/memory_info.cc | tensorflow/lite/profiling/memory_info_test.cc | #include "tensorflow/lite/profiling/memory_info.h"
#include <stddef.h>
#include <ostream>
#ifdef __linux__
#include <malloc.h>
#include <sys/resource.h>
#include <sys/time.h>
#elif defined(__APPLE__)
#include <mach/mach.h>
#include <malloc/malloc.h>
#endif
namespace tflite {
namespace profiling {
namespace memory {
const size_t MemoryUsage::kValueNotSet = 0;
bool MemoryUsage::IsSupported() {
#if defined(__linux__) || defined(__APPLE__)
return true;
#endif
return false;
}
MemoryUsage GetMemoryUsage() {
MemoryUsage result;
#ifdef __linux__
rusage res;
if (getrusage(RUSAGE_SELF, &res) == 0) {
result.mem_footprint_kb = res.ru_maxrss;
}
#if defined(__NO_MALLINFO__)
result.total_allocated_bytes = -1;
result.in_use_allocated_bytes = -1;
#elif defined(__GLIBC__) && __GLIBC_MINOR__ >= 33
const auto mem = mallinfo2();
result.total_allocated_bytes = mem.arena;
result.in_use_allocated_bytes = mem.uordblks;
#else
const auto mem = mallinfo();
result.total_allocated_bytes = mem.arena;
result.in_use_allocated_bytes = mem.uordblks;
#endif
#elif defined(__APPLE__)
struct task_vm_info vm_info;
mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
auto status = task_info(mach_task_self(), TASK_VM_INFO,
reinterpret_cast<task_info_t>(&vm_info), &count);
if (status == KERN_SUCCESS) {
result.mem_footprint_kb =
static_cast<int64_t>(vm_info.phys_footprint / 1024.0);
}
struct mstats stats = mstats();
result.total_allocated_bytes = stats.bytes_total;
result.in_use_allocated_bytes = stats.bytes_used;
#endif
return result;
}
void MemoryUsage::AllStatsToStream(std::ostream* stream) const {
*stream << "max resident set size/physical footprint = "
<< mem_footprint_kb / 1000.0 << " MB, total non-mmapped heap size = "
<< total_allocated_bytes / 1000.0 / 1000.0
<< " MB, in-use heap size = "
<< in_use_allocated_bytes / 1000.0 / 1000.0 << " MB";
}
}
}
} | #include "tensorflow/lite/profiling/memory_info.h"
#include <memory>
#include <new>
#include <sstream>
#include <string>
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace memory {
TEST(MemoryUsage, AddAndSub) {
MemoryUsage mem1, mem2;
mem1.mem_footprint_kb = 5;
mem1.total_allocated_bytes = 7000;
mem1.in_use_allocated_bytes = 2000;
mem2.mem_footprint_kb = 3;
mem2.total_allocated_bytes = 7000;
mem2.in_use_allocated_bytes = 4000;
const auto add_mem = mem1 + mem2;
EXPECT_EQ(8, add_mem.mem_footprint_kb);
EXPECT_EQ(14000, add_mem.total_allocated_bytes);
EXPECT_EQ(6000, add_mem.in_use_allocated_bytes);
const auto sub_mem = mem1 - mem2;
EXPECT_EQ(2, sub_mem.mem_footprint_kb);
EXPECT_EQ(0, sub_mem.total_allocated_bytes);
EXPECT_EQ(-2000, sub_mem.in_use_allocated_bytes);
}
TEST(MemoryUsage, GetMemoryUsage) {
MemoryUsage result;
EXPECT_EQ(MemoryUsage::kValueNotSet, result.mem_footprint_kb);
EXPECT_EQ(MemoryUsage::kValueNotSet, result.total_allocated_bytes);
EXPECT_EQ(MemoryUsage::kValueNotSet, result.in_use_allocated_bytes);
#if defined(__linux__) || defined(__APPLE__)
constexpr int size = 10 * 1024 * 1024;
std::unique_ptr<unsigned char[]> byte_array(new unsigned char[size]);
for (int i = 0; i < size; ++i) {
byte_array[i] = i % 256;
}
result = GetMemoryUsage();
for (int i = 0; i < size; ++i) {
EXPECT_EQ(byte_array[i], i % 256);
}
EXPECT_GE(result.mem_footprint_kb, size / 1024);
EXPECT_GE(result.total_allocated_bytes, size);
EXPECT_GE(result.in_use_allocated_bytes, size);
#endif
}
TEST(MemoryUsage, OutputMemoryUsageToStream) {
MemoryUsage memory_usage = GetMemoryUsage();
std::stringstream stream;
stream << memory_usage;
std::string message = stream.str();
EXPECT_STRNE(message.c_str(), "");
}
TEST(MemoryUsage, IsSupported) {
#if defined(__linux__) || defined(__APPLE__)
EXPECT_TRUE(MemoryUsage::IsSupported());
#else
EXPECT_FALSE(MemoryUsage::IsSupported());
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/memory_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/memory_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f0272bc-020c-4677-9fbd-22418cfd65da | cpp | tensorflow/tensorflow | atrace_profiler | tensorflow/lite/profiling/atrace_profiler.cc | tensorflow/lite/profiling/atrace_profiler_test.cc | #include "tensorflow/lite/profiling/atrace_profiler.h"
#include <dlfcn.h>
#include "tensorflow/lite/core/api/profiler.h"
#if defined(__ANDROID__)
#include <sys/system_properties.h>
#endif
#include <string>
#include <type_traits>
namespace tflite {
namespace profiling {
class ATraceProfiler : public tflite::Profiler {
public:
using FpIsEnabled = std::add_pointer<bool()>::type;
using FpBeginSection = std::add_pointer<void(const char*)>::type;
using FpEndSection = std::add_pointer<void()>::type;
ATraceProfiler() {
handle_ = dlopen("libandroid.so", RTLD_NOW | RTLD_LOCAL);
if (handle_) {
atrace_is_enabled_ =
reinterpret_cast<FpIsEnabled>(dlsym(handle_, "ATrace_isEnabled"));
atrace_begin_section_ = reinterpret_cast<FpBeginSection>(
dlsym(handle_, "ATrace_beginSection"));
atrace_end_section_ =
reinterpret_cast<FpEndSection>(dlsym(handle_, "ATrace_endSection"));
if (!atrace_is_enabled_ || !atrace_begin_section_ ||
!atrace_end_section_) {
dlclose(handle_);
handle_ = nullptr;
}
}
}
~ATraceProfiler() override {
if (handle_) {
dlclose(handle_);
}
}
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
if (handle_ && atrace_is_enabled_()) {
std::string trace_event_tag = tag;
trace_event_tag += "@";
trace_event_tag += std::to_string(event_metadata1) + "/" +
std::to_string(event_metadata2);
atrace_begin_section_(trace_event_tag.c_str());
}
return 0;
}
void EndEvent(uint32_t event_handle) override {
if (handle_) {
atrace_end_section_();
}
}
private:
void* handle_;
FpIsEnabled atrace_is_enabled_;
FpBeginSection atrace_begin_section_;
FpEndSection atrace_end_section_;
};
std::unique_ptr<tflite::Profiler> MaybeCreateATraceProfiler() {
#if defined(TFLITE_ENABLE_DEFAULT_PROFILER)
return std::unique_ptr<tflite::Profiler>(new ATraceProfiler());
#else
#if defined(__ANDROID__)
constexpr char kTraceProp[] = "debug.tflite.trace";
char trace_enabled[PROP_VALUE_MAX] = "";
int length = __system_property_get(kTraceProp, trace_enabled);
if (length == 1 && trace_enabled[0] == '1') {
return std::unique_ptr<tflite::Profiler>(new ATraceProfiler());
}
#endif
return nullptr;
#endif
}
}
} | #include "tensorflow/lite/profiling/atrace_profiler.h"
#if defined(__ANDROID__)
#include <sys/system_properties.h>
#endif
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace {
TEST(ATraceProfilerTest, MaybeCreateATraceProfiler) {
auto initial_state_profiler = MaybeCreateATraceProfiler();
#if !defined(TFLITE_ENABLE_DEFAULT_PROFILER)
EXPECT_EQ(nullptr, initial_state_profiler.get());
#else
EXPECT_NE(nullptr, initial_state_profiler.get());
#endif
#if defined(__ANDROID__)
if (__system_property_set("debug.tflite.trace", "1") == 0) {
auto on_state_profiler = MaybeCreateATraceProfiler();
EXPECT_NE(nullptr, on_state_profiler.get());
}
if (__system_property_set("debug.tflite.trace", "0") == 0) {
auto off_state_profiler = MaybeCreateATraceProfiler();
#if !defined(TFLITE_ENABLE_DEFAULT_PROFILER)
EXPECT_EQ(nullptr, off_state_profiler.get());
#else
EXPECT_NE(nullptr, off_state_profiler.get());
#endif
}
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/atrace_profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/atrace_profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8fe449ec-1b51-4dea-84dc-1db9f8e42aac | cpp | tensorflow/tensorflow | memory_usage_monitor | tensorflow/lite/profiling/memory_usage_monitor.cc | tensorflow/lite/profiling/memory_usage_monitor_test.cc | #include "tensorflow/lite/profiling/memory_usage_monitor.h"
#include <memory>
#include <utility>
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/memory_info.h"
namespace tflite {
namespace profiling {
namespace memory {
constexpr float MemoryUsageMonitor::kInvalidMemUsageMB;
MemoryUsageMonitor::MemoryUsageMonitor(int sampling_interval_ms,
std::unique_ptr<Sampler> sampler)
: sampler_(std::move(sampler)),
is_supported_(false),
sampling_interval_(absl::Milliseconds(sampling_interval_ms)) {
is_supported_ = (sampler_ != nullptr && sampler_->IsSupported());
if (!is_supported_) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Getting memory usage isn't supported on this platform!\n");
return;
}
}
void MemoryUsageMonitor::Start() {
if (!is_supported_) return;
if (check_memory_thd_ != nullptr) {
TFLITE_LOG(TFLITE_LOG_INFO, "Memory monitoring has already started!\n");
return;
}
stop_signal_ = std::make_unique<absl::Notification>();
check_memory_thd_ = std::make_unique<std::thread>(([this]() {
while (true) {
const auto mem_info = sampler_->GetMemoryUsage();
if (mem_info.mem_footprint_kb > peak_mem_footprint_kb_) {
peak_mem_footprint_kb_ = mem_info.mem_footprint_kb;
}
if (stop_signal_->HasBeenNotified()) break;
sampler_->SleepFor(sampling_interval_);
}
}));
}
void MemoryUsageMonitor::Stop() {
if (!is_supported_) return;
if (check_memory_thd_ == nullptr) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Memory monitoring hasn't started yet or has stopped!\n");
return;
}
StopInternal();
}
void MemoryUsageMonitor::StopInternal() {
if (check_memory_thd_ == nullptr) return;
stop_signal_->Notify();
if (check_memory_thd_ != nullptr) {
check_memory_thd_->join();
}
stop_signal_.reset(nullptr);
check_memory_thd_.reset(nullptr);
}
}
}
} | #include "tensorflow/lite/profiling/memory_usage_monitor.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/lite/profiling/memory_info.h"
namespace tflite {
namespace profiling {
namespace memory {
class MemoryUsageNotSupportedSampler : public MemoryUsageMonitor::Sampler {
public:
bool IsSupported() override { return false; }
};
TEST(MemoryUsageMonitor, NotSupported) {
MemoryUsageMonitor monitor1(50, std::unique_ptr<MemoryUsageMonitor::Sampler>(
new MemoryUsageNotSupportedSampler()));
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor1.GetPeakMemUsageInMB());
MemoryUsageMonitor monitor2(50, nullptr);
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor2.GetPeakMemUsageInMB());
}
class MemoryUsageMonitorTest : public ::testing::Test {
protected:
class FakeMemoryUsageSampler : public MemoryUsageMonitor::Sampler {
public:
explicit FakeMemoryUsageSampler(int64_t* num_sleeps)
: sleep_cnt_(num_sleeps) {}
bool IsSupported() override { return true; }
MemoryUsage GetMemoryUsage() override {
MemoryUsage result;
result.mem_footprint_kb = 5 * ((*sleep_cnt_) + 1) * 1024;
return result;
}
void SleepFor(const absl::Duration& duration) override {
(*sleep_cnt_)++;
absl::SleepFor(duration);
}
private:
int64_t* const sleep_cnt_ = nullptr;
};
void SetUp() override {
monitor_ = std::make_unique<MemoryUsageMonitor>(
50, std::unique_ptr<MemoryUsageMonitor::Sampler>(
new FakeMemoryUsageSampler(&num_sleeps_)));
}
int64_t num_sleeps_ = 0;
std::unique_ptr<MemoryUsageMonitor> monitor_ = nullptr;
};
TEST_F(MemoryUsageMonitorTest, StartAndStop) {
monitor_->Start();
monitor_->Stop();
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, NoStartAndStop) {
monitor_->Stop();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, StartAndNoStop) {
monitor_->Start();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, StopFirst) {
monitor_->Stop();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
monitor_->Start();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, MultiStartAndStops) {
monitor_->Start();
monitor_->Start();
monitor_->Stop();
monitor_->Stop();
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, StartStopPairs) {
monitor_->Start();
monitor_->Stop();
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
monitor_->Start();
absl::SleepFor(absl::Milliseconds(100));
monitor_->Stop();
EXPECT_GE(num_sleeps_, 1);
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/memory_usage_monitor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/memory_usage_monitor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9341881-a6be-430e-8b9b-db2749301f2e | cpp | tensorflow/tensorflow | profile_buffer | tensorflow/lite/profiling/profile_buffer.cc | tensorflow/lite/profiling/profile_buffer_test.cc | #include "tensorflow/lite/profiling/profile_buffer.h"
#include <utility>
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/memory_info.h"
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace profiling {
uint32_t ProfileBuffer::BeginEvent(const char* tag,
ProfileEvent::EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
if (!enabled_) {
return kInvalidEventHandle;
}
uint64_t timestamp = time::NowMicros();
const auto next_index = GetNextEntryIndex();
if (next_index.second) {
return next_index.first;
}
const int index = next_index.first;
event_buffer_[index].tag = tag;
event_buffer_[index].event_type = event_type;
event_buffer_[index].event_metadata = event_metadata1;
event_buffer_[index].extra_event_metadata = event_metadata2;
event_buffer_[index].begin_timestamp_us = timestamp;
event_buffer_[index].elapsed_time = 0;
if (event_type != Profiler::EventType::OPERATOR_INVOKE_EVENT) {
event_buffer_[index].begin_mem_usage = memory::GetMemoryUsage();
}
current_index_++;
return index;
}
void ProfileBuffer::EndEvent(uint32_t event_handle,
const int64_t* event_metadata1,
const int64_t* event_metadata2) {
if (!enabled_ || event_handle == kInvalidEventHandle ||
event_handle > current_index_) {
return;
}
const uint32_t max_size = event_buffer_.size();
if (current_index_ > (max_size + event_handle)) {
return;
}
int event_index = event_handle % max_size;
event_buffer_[event_index].elapsed_time =
time::NowMicros() - event_buffer_[event_index].begin_timestamp_us;
if (event_buffer_[event_index].event_type !=
Profiler::EventType::OPERATOR_INVOKE_EVENT) {
event_buffer_[event_index].end_mem_usage = memory::GetMemoryUsage();
}
if (event_metadata1) {
event_buffer_[event_index].event_metadata = *event_metadata1;
}
if (event_metadata2) {
event_buffer_[event_index].extra_event_metadata = *event_metadata2;
}
}
const struct ProfileEvent* ProfileBuffer::At(size_t index) const {
size_t size = Size();
if (index >= size) {
return nullptr;
}
const uint32_t max_size = event_buffer_.size();
uint32_t start =
(current_index_ > max_size) ? current_index_ % max_size : max_size;
index = (index + start) % max_size;
return &event_buffer_[index];
}
void ProfileBuffer::AddEvent(const char* tag,
ProfileEvent::EventType event_type,
uint64_t elapsed_time, int64_t event_metadata1,
int64_t event_metadata2) {
if (!enabled_) {
return;
}
const auto next_index = GetNextEntryIndex();
if (next_index.second) {
return;
}
const int index = next_index.first;
event_buffer_[index].tag = tag;
event_buffer_[index].event_type = event_type;
event_buffer_[index].event_metadata = event_metadata1;
event_buffer_[index].extra_event_metadata = event_metadata2;
event_buffer_[index].begin_timestamp_us = 0;
event_buffer_[index].elapsed_time = elapsed_time;
current_index_++;
}
std::pair<int, bool> ProfileBuffer::GetNextEntryIndex() {
int index = current_index_ % event_buffer_.size();
if (current_index_ == 0 || index != 0) {
return std::make_pair(index, false);
}
if (!allow_dynamic_expansion_) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"Warning: Dropping ProfileBuffer event.");
return std::make_pair(current_index_, true);
} else {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"Warning: Doubling internal profiling buffer.");
event_buffer_.resize(current_index_ * 2);
return std::make_pair(current_index_, false);
}
}
}
} | #include "tensorflow/lite/profiling/profile_buffer.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace {
std::vector<const ProfileEvent*> GetProfileEvents(const ProfileBuffer& buffer) {
std::vector<const ProfileEvent*> events;
for (size_t i = 0; i < buffer.Size(); i++) {
events.push_back(buffer.At(i));
}
return events;
}
TEST(ProfileBufferTest, Empty) {
ProfileBuffer buffer( 0, true);
EXPECT_EQ(0, buffer.Size());
}
TEST(ProfileBufferTest, AddEvent) {
ProfileBuffer buffer( 10, true);
EXPECT_EQ(0, buffer.Size());
auto event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
EXPECT_GE(event_handle, 0);
EXPECT_EQ(1, buffer.Size());
auto event = GetProfileEvents(buffer)[0];
EXPECT_EQ(event->tag, "hello");
EXPECT_GT(event->begin_timestamp_us, 0);
EXPECT_EQ(event->event_type, ProfileEvent::EventType::DEFAULT);
EXPECT_EQ(event->event_metadata, 42);
buffer.EndEvent(event_handle);
EXPECT_EQ(1, buffer.Size());
EXPECT_GE(event->elapsed_time, 0);
}
TEST(ProfileBufferTest, EndEventWithMetadata) {
ProfileBuffer buffer( 10, true);
EXPECT_EQ(0, buffer.Size());
auto event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
const int64_t kEventMetadata1 = 18;
const int64_t kEventMetadata2 = 36;
buffer.EndEvent(event_handle, &kEventMetadata1, &kEventMetadata2);
EXPECT_GE(event_handle, 0);
EXPECT_EQ(1, buffer.Size());
auto event = GetProfileEvents(buffer)[0];
EXPECT_EQ(event->tag, "hello");
EXPECT_GT(event->begin_timestamp_us, 0);
EXPECT_EQ(event->event_type, ProfileEvent::EventType::DEFAULT);
EXPECT_EQ(event->event_metadata, kEventMetadata1);
EXPECT_EQ(event->extra_event_metadata, kEventMetadata2);
EXPECT_EQ(1, buffer.Size());
EXPECT_GE(event->elapsed_time, 0);
}
TEST(ProfileBufferTest, OverFlow) {
const int max_size = 4;
ProfileBuffer buffer{max_size, true};
std::vector<std::string> eventNames = {"first", "second", "third", "fourth"};
for (int i = 0; i < 2 * max_size; i++) {
buffer.BeginEvent(eventNames[i % 4].c_str(),
ProfileEvent::EventType::DEFAULT, i, 0);
size_t expected_size = std::min(i + 1, max_size);
EXPECT_EQ(expected_size, buffer.Size());
}
EXPECT_EQ(max_size, buffer.Size());
for (size_t j = 0; j < buffer.Size(); ++j) {
auto event = buffer.At(j);
EXPECT_EQ(eventNames[j % 4], event->tag);
EXPECT_EQ(ProfileEvent::EventType::DEFAULT, event->event_type);
EXPECT_EQ(j, event->event_metadata);
}
}
TEST(ProfileBufferTest, DynamicIncrease) {
const int max_initial_size = 4;
ProfileBuffer buffer{max_initial_size, true,
true };
std::vector<std::string> eventNames = {"first", "second", "third", "fourth"};
for (int i = 0; i < 2 * max_initial_size; i++) {
buffer.BeginEvent(eventNames[i % 4].c_str(),
ProfileEvent::EventType::DEFAULT, i, 0);
const size_t expected_size = i + 1;
EXPECT_EQ(expected_size, buffer.Size());
}
EXPECT_EQ(2 * max_initial_size, buffer.Size());
for (size_t j = 0; j < buffer.Size(); ++j) {
auto event = buffer.At(j);
EXPECT_EQ(eventNames[j % 4], event->tag);
EXPECT_EQ(ProfileEvent::EventType::DEFAULT, event->event_type);
EXPECT_EQ(j, event->event_metadata);
}
}
TEST(ProfileBufferTest, Enable) {
ProfileBuffer buffer( 10, false);
EXPECT_EQ(0, buffer.Size());
auto event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
EXPECT_EQ(kInvalidEventHandle, event_handle);
EXPECT_EQ(0, buffer.Size());
buffer.SetEnabled(true);
event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
EXPECT_GE(event_handle, 0);
EXPECT_EQ(1, buffer.Size());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
67244fbc-8978-44b7-bdd5-90b0b63a64ce | cpp | tensorflow/tensorflow | root_profiler | tensorflow/lite/profiling/root_profiler.cc | tensorflow/lite/profiling/root_profiler_test.cc | #include "tensorflow/lite/profiling/root_profiler.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/api/profiler.h"
namespace tflite {
namespace profiling {
void RootProfiler::AddProfiler(Profiler* profiler) {
if (profiler == nullptr) return;
profilers_.push_back(profiler);
}
void RootProfiler::AddProfiler(std::unique_ptr<Profiler>&& profiler) {
if (profiler == nullptr) return;
owned_profilers_.emplace_back(std::move(profiler));
profilers_.push_back(owned_profilers_.back().get());
}
uint32_t RootProfiler::BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
if (profilers_.size() == 1) {
return profilers_[0]->BeginEvent(tag, event_type, event_metadata1,
event_metadata2);
}
auto id = next_event_id_++;
std::vector<uint32_t> event_ids;
event_ids.reserve(profilers_.size());
for (auto* profiler : profilers_) {
event_ids.push_back(profiler->BeginEvent(tag, event_type, event_metadata1,
event_metadata2));
}
events_.emplace(id, std::move(event_ids));
return id;
}
void RootProfiler::EndEvent(uint32_t event_handle, int64_t event_metadata1,
int64_t event_metadata2) {
if (profilers_.size() == 1) {
return profilers_[0]->EndEvent(event_handle, event_metadata1,
event_metadata2);
}
if (const auto it = events_.find(event_handle); it != events_.end()) {
const auto& event_ids = it->second;
for (auto idx = 0; idx < event_ids.size(); idx++) {
profilers_[idx]->EndEvent(event_ids[idx], event_metadata1,
event_metadata2);
}
events_.erase(it);
}
}
void RootProfiler::EndEvent(uint32_t event_handle) {
if (profilers_.size() == 1) {
return profilers_[0]->EndEvent(event_handle);
}
if (const auto it = events_.find(event_handle); it != events_.end()) {
const auto& event_ids = it->second;
for (auto idx = 0; idx < event_ids.size(); idx++) {
profilers_[idx]->EndEvent(event_ids[idx]);
}
events_.erase(it);
}
}
void RootProfiler::AddEvent(const char* tag, EventType event_type,
uint64_t metric, int64_t event_metadata1,
int64_t event_metadata2) {
for (auto* profiler : profilers_) {
profiler->AddEvent(tag, event_type, metric, event_metadata1,
event_metadata2);
}
}
void RootProfiler::AddEventWithData(const char* tag, EventType event_type,
const void* data) {
for (auto* profiler : profilers_) {
profiler->AddEventWithData(tag, event_type, data);
}
}
void RootProfiler::RemoveChildProfilers() {
owned_profilers_.clear();
profilers_.clear();
events_.clear();
}
}
} | #include "tensorflow/lite/profiling/root_profiler.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/profiler.h"
using ::testing::_;
using ::testing::StrictMock;
namespace tflite {
namespace profiling {
namespace {
constexpr char kTag[] = "tag";
class MockProfiler : public Profiler {
public:
MOCK_METHOD(uint32_t, BeginEvent,
(const char* tag, EventType event_type, int64_t event_metadata1,
int64_t event_metadata2),
(override));
MOCK_METHOD(void, EndEvent, (uint32_t event_handle), (override));
MOCK_METHOD(void, EndEvent,
(uint32_t event_handle, int64_t event_metadata1,
int64_t event_metadata2),
(override));
MOCK_METHOD(void, AddEvent,
(const char* tag, EventType event_type, uint64_t metric,
int64_t event_metadata1, int64_t event_metadata2),
(override));
MOCK_METHOD(void, AddEventWithData,
(const char* tag, EventType event_type, const void* data),
(override));
};
using MockProfilerT = StrictMock<MockProfiler>;
TEST(RootProfilerTest, ChildProfilerTest) {
auto mock_profiler = std::make_unique<MockProfilerT>();
auto* mock = mock_profiler.get();
RootProfiler root;
root.AddProfiler(mock_profiler.get());
ON_CALL(*mock, BeginEvent(_, _, _, _)).WillByDefault(testing::Return(42));
EXPECT_CALL(*mock, BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2));
EXPECT_CALL(*mock, EndEvent(42, 3, 4));
EXPECT_CALL(*mock, AddEvent(kTag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
5, 6, 7));
EXPECT_CALL(*mock, AddEventWithData(kTag, Profiler::EventType::DEFAULT, _));
auto begin = root.BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2);
root.EndEvent(begin, 3, 4);
root.AddEvent(kTag, Profiler::EventType::OPERATOR_INVOKE_EVENT, 5, 6, 7);
root.AddEventWithData(kTag, Profiler::EventType::DEFAULT, nullptr);
}
TEST(RootProfilerTest, OwnedProfilerTest) {
auto mock_profiler = std::make_unique<MockProfilerT>();
auto* mock = mock_profiler.get();
RootProfiler root;
root.AddProfiler(std::move(mock_profiler));
ON_CALL(*mock, BeginEvent(_, _, _, _)).WillByDefault(testing::Return(42));
EXPECT_CALL(*mock, BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2));
EXPECT_CALL(*mock, EndEvent(42));
EXPECT_CALL(*mock, AddEvent(kTag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
3, 4, 5));
auto begin = root.BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2);
root.EndEvent(begin);
root.AddEvent(kTag, Profiler::EventType::OPERATOR_INVOKE_EVENT, 3, 4, 5);
}
TEST(RootProfilerTest, MultipleProfilerTest) {
auto mock_profiler0 = std::make_unique<MockProfilerT>();
auto* mock0 = mock_profiler0.get();
auto mock_profiler1 = std::make_unique<MockProfilerT>();
auto* mock1 = mock_profiler1.get();
RootProfiler root;
root.AddProfiler(std::move(mock_profiler0));
root.AddProfiler(std::move(mock_profiler1));
ON_CALL(*mock0, BeginEvent(_, _, _, _)).WillByDefault(testing::Return(42));
ON_CALL(*mock1, BeginEvent(_, _, _, _)).WillByDefault(testing::Return(24));
EXPECT_CALL(*mock0, BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2));
EXPECT_CALL(*mock0, EndEvent(42));
EXPECT_CALL(*mock1, BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2));
EXPECT_CALL(*mock1, EndEvent(24));
auto begin = root.BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2);
root.EndEvent(begin);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/root_profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/root_profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f22e62fc-1f6e-4485-be68-31adc5cae6ff | cpp | tensorflow/tensorflow | time | tensorflow/lite/profiling/time.cc | tensorflow/lite/profiling/time_test.cc | #include "tensorflow/lite/profiling/time.h"
#if defined(_MSC_VER)
#include <chrono>
#include <thread>
#else
#include <sys/time.h>
#include <time.h>
#endif
namespace tflite {
namespace profiling {
namespace time {
#if defined(_MSC_VER)
uint64_t NowMicros() {
return static_cast<uint64_t>(
std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count());
}
void SleepForMicros(uint64_t micros) {
std::this_thread::sleep_for(std::chrono::microseconds(micros));
}
#else
uint64_t NowMicros() {
#if defined(__APPLE__)
return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW) / 1e3;
#else
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<uint64_t>(ts.tv_sec) * 1e6 +
static_cast<uint64_t>(ts.tv_nsec) / 1e3;
#endif
}
void SleepForMicros(uint64_t micros) {
timespec sleep_time;
sleep_time.tv_sec = micros / 1e6;
micros -= sleep_time.tv_sec * 1e6;
sleep_time.tv_nsec = micros * 1e3;
nanosleep(&sleep_time, nullptr);
}
#endif
}
}
} | #include "tensorflow/lite/profiling/time.h"
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace time {
TEST(TimeTest, NowMicros) {
auto now0 = NowMicros();
EXPECT_GT(now0, 0);
auto now1 = NowMicros();
EXPECT_GE(now1, now0);
}
TEST(TimeTest, SleepForMicros) {
SleepForMicros(0);
auto now0 = NowMicros();
SleepForMicros(50);
auto now1 = NowMicros();
EXPECT_GE(now1, now0 + 50);
now0 = NowMicros();
SleepForMicros(1e6 + 50);
now1 = NowMicros();
EXPECT_GE(now1, now0 + 1e6 + 50);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/time.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/time_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
573359dd-8239-415a-9913-bdd322cdacc7 | cpp | tensorflow/tensorflow | telemetry | tensorflow/lite/delegates/telemetry.cc | tensorflow/lite/delegates/telemetry_test.cc | #include "tensorflow/lite/delegates/telemetry.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace delegates {
TfLiteStatus ReportDelegateSettings(TfLiteContext* context,
TfLiteDelegate* delegate,
const TFLiteSettings& settings) {
auto* profiler = reinterpret_cast<Profiler*>(context->profiler);
const int64_t event_metadata1 = reinterpret_cast<int64_t>(delegate);
const int64_t event_metadata2 = reinterpret_cast<int64_t>(&settings);
TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT(profiler, kDelegateSettingsTag,
event_metadata1, event_metadata2);
return kTfLiteOk;
}
TfLiteStatus ReportDelegateStatus(TfLiteContext* context,
TfLiteDelegate* delegate,
const DelegateStatus& status) {
auto* profiler = reinterpret_cast<Profiler*>(context->profiler);
TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT(profiler, kDelegateStatusTag,
status.full_status(),
static_cast<int64_t>(kTfLiteOk));
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/delegates/telemetry.h"
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
namespace tflite {
namespace delegates {
namespace {
constexpr int32_t kDummyCode = 2;
constexpr bool kDummyGpuPrecisionLossAllowed = true;
constexpr tflite::Delegate kDummyDelegate = tflite::Delegate_GPU;
constexpr DelegateStatusSource kDummySource =
DelegateStatusSource::TFLITE_NNAPI;
TEST(TelemetryTest, StatusConversion) {
DelegateStatus status(kDummySource, kDummyCode);
int64_t serialized_int = status.full_status();
DelegateStatus deserialized_status(serialized_int);
EXPECT_EQ(kDummyCode, deserialized_status.code());
EXPECT_EQ(kDummySource, deserialized_status.source());
EXPECT_EQ(serialized_int, deserialized_status.full_status());
}
class DelegateProfiler : public Profiler {
public:
DelegateProfiler() {}
~DelegateProfiler() override = default;
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
int event_handle = -1;
if (event_type ==
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT &&
std::string(tag) == kDelegateSettingsTag) {
event_buffer_.emplace_back();
event_handle = event_buffer_.size();
EXPECT_NE(event_metadata1, 0);
auto* delegate = reinterpret_cast<TfLiteDelegate*>(event_metadata1);
EXPECT_EQ(delegate->flags, kTfLiteDelegateFlagsNone);
EXPECT_NE(event_metadata2, 0);
auto* settings = reinterpret_cast<TFLiteSettings*>(event_metadata2);
EXPECT_EQ(settings->delegate(), kDummyDelegate);
EXPECT_EQ(settings->gpu_settings()->is_precision_loss_allowed(),
kDummyGpuPrecisionLossAllowed);
} else if (event_type ==
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT &&
std::string(tag) == kDelegateStatusTag) {
event_buffer_.emplace_back();
event_handle = event_buffer_.size();
EXPECT_EQ(event_metadata2, static_cast<int64_t>(kTfLiteOk));
DelegateStatus reported_status(event_metadata1);
EXPECT_EQ(reported_status.source(), kDummySource);
EXPECT_EQ(reported_status.code(), kDummyCode);
}
EXPECT_NE(-1, event_handle);
return event_handle;
}
void EndEvent(uint32_t event_handle) override {
EXPECT_EQ(event_handle, event_buffer_.size());
}
int NumRecordedEvents() { return event_buffer_.size(); }
private:
std::vector<profiling::ProfileEvent> event_buffer_;
};
TEST(TelemetryTest, DelegateStatusReport) {
DelegateProfiler profiler;
TfLiteDelegate delegate = TfLiteDelegateCreate();
TfLiteContext context;
context.profiler = &profiler;
DelegateStatus status(kDummySource, kDummyCode);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 2);
}
TEST(TelemetryTest, DelegateSettingsReport) {
DelegateProfiler profiler;
TfLiteDelegate delegate = TfLiteDelegateCreate();
TfLiteContext context;
context.profiler = &profiler;
flatbuffers::FlatBufferBuilder flatbuffer_builder;
flatbuffers::Offset<tflite::GPUSettings> gpu_settings =
tflite::CreateGPUSettings(
flatbuffer_builder,
kDummyGpuPrecisionLossAllowed);
auto* tflite_settings_ptr = flatbuffers::GetTemporaryPointer(
flatbuffer_builder,
CreateTFLiteSettings(flatbuffer_builder, kDummyDelegate,
0,
gpu_settings));
EXPECT_EQ(ReportDelegateSettings(&context, &delegate, *tflite_settings_ptr),
kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 1);
DelegateStatus status(kDummySource, kDummyCode);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/telemetry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/telemetry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7efae547-41ee-469e-b08b-82b375f6c277 | cpp | tensorflow/tensorflow | label_image | tensorflow/lite/examples/label_image/label_image.cc | tensorflow/lite/examples/label_image/label_image_test.cc | #include "tensorflow/lite/examples/label_image/label_image.h"
#include <fcntl.h>
#include <getopt.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
#include "tensorflow/lite/examples/label_image/get_top_n.h"
#include "tensorflow/lite/examples/label_image/log.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
#include "tensorflow/lite/profiling/profiler.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace label_image {
double get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }
using TfLiteDelegatePtr = tflite::Interpreter::TfLiteDelegatePtr;
using ProvidedDelegateList = tflite::tools::ProvidedDelegateList;
class DelegateProviders {
public:
DelegateProviders() : delegate_list_util_(¶ms_) {
delegate_list_util_.AddAllDelegateParams();
delegate_list_util_.AppendCmdlineFlags(flags_);
params_.RemoveParam("help");
delegate_list_util_.RemoveCmdlineFlag(flags_, "help");
}
bool InitFromCmdlineArgs(int* argc, const char** argv) {
return Flags::Parse(argc, argv, flags_);
}
void MergeSettingsIntoParams(const Settings& s) {
if (s.gl_backend) {
if (!params_.HasParam("use_gpu")) {
LOG(WARN) << "GPU delegate execution provider isn't linked or GPU "
"delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_gpu", true);
if (params_.HasParam("gpu_inference_for_sustained_speed")) {
params_.Set<bool>("gpu_inference_for_sustained_speed", true);
}
params_.Set<bool>("gpu_precision_loss_allowed", s.allow_fp16);
}
}
if (s.accel) {
if (!params_.HasParam("use_nnapi")) {
LOG(WARN) << "NNAPI delegate execution provider isn't linked or NNAPI "
"delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_nnapi", true);
params_.Set<bool>("nnapi_allow_fp16", s.allow_fp16);
}
}
if (s.hexagon_delegate) {
if (!params_.HasParam("use_hexagon")) {
LOG(WARN) << "Hexagon delegate execution provider isn't linked or "
"Hexagon delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_hexagon", true);
params_.Set<bool>("hexagon_profiling", s.profiling);
}
}
if (s.xnnpack_delegate) {
if (!params_.HasParam("use_xnnpack")) {
LOG(WARN) << "XNNPACK delegate execution provider isn't linked or "
"XNNPACK delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_xnnpack", true);
params_.Set<int32_t>("num_threads", s.number_of_threads);
}
}
}
std::vector<ProvidedDelegateList::ProvidedDelegate> CreateAllDelegates()
const {
return delegate_list_util_.CreateAllRankedDelegates();
}
std::string GetHelpMessage(const std::string& cmdline) const {
return Flags::Usage(cmdline, flags_);
}
private:
tflite::tools::ToolParams params_;
ProvidedDelegateList delegate_list_util_;
std::vector<tflite::Flag> flags_;
};
TfLiteStatus ReadLabelsFile(const string& file_name,
std::vector<string>* result,
size_t* found_label_count) {
std::ifstream file(file_name);
if (!file) {
LOG(ERROR) << "Labels file " << file_name << " not found";
return kTfLiteError;
}
result->clear();
string line;
while (std::getline(file, line)) {
result->push_back(line);
}
*found_label_count = result->size();
const int padding = 16;
while (result->size() % padding) {
result->emplace_back();
}
return kTfLiteOk;
}
void PrintProfilingInfo(const profiling::ProfileEvent* e,
uint32_t subgraph_index, uint32_t op_index,
TfLiteRegistration registration) {
LOG(INFO) << std::fixed << std::setw(10) << std::setprecision(3)
<< (e->elapsed_time) / 1000.0 << ", Subgraph " << std::setw(3)
<< std::setprecision(3) << subgraph_index << ", Node "
<< std::setw(3) << std::setprecision(3) << op_index << ", OpCode "
<< std::setw(3) << std::setprecision(3) << registration.builtin_code
<< ", "
<< EnumNameBuiltinOperator(
static_cast<BuiltinOperator>(registration.builtin_code));
}
void RunInference(Settings* settings,
const DelegateProviders& delegate_providers) {
if (!settings->model_name.c_str()) {
LOG(ERROR) << "no model file name";
exit(-1);
}
std::unique_ptr<tflite::FlatBufferModel> model;
std::unique_ptr<tflite::Interpreter> interpreter;
model = tflite::FlatBufferModel::BuildFromFile(settings->model_name.c_str());
if (!model) {
LOG(ERROR) << "Failed to mmap model " << settings->model_name;
exit(-1);
}
settings->model = model.get();
LOG(INFO) << "Loaded model " << settings->model_name;
model->error_reporter();
LOG(INFO) << "resolved reporter";
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder(*model, resolver)(&interpreter);
if (!interpreter) {
LOG(ERROR) << "Failed to construct interpreter";
exit(-1);
}
interpreter->SetAllowFp16PrecisionForFp32(settings->allow_fp16);
if (settings->verbose) {
LOG(INFO) << "tensors size: " << interpreter->tensors_size();
LOG(INFO) << "nodes size: " << interpreter->nodes_size();
LOG(INFO) << "inputs: " << interpreter->inputs().size();
LOG(INFO) << "input(0) name: " << interpreter->GetInputName(0);
int t_size = interpreter->tensors_size();
for (int i = 0; i < t_size; i++) {
if (interpreter->tensor(i)->name)
LOG(INFO) << i << ": " << interpreter->tensor(i)->name << ", "
<< interpreter->tensor(i)->bytes << ", "
<< interpreter->tensor(i)->type << ", "
<< interpreter->tensor(i)->params.scale << ", "
<< interpreter->tensor(i)->params.zero_point;
}
}
if (settings->number_of_threads != -1) {
interpreter->SetNumThreads(settings->number_of_threads);
}
int image_width = 224;
int image_height = 224;
int image_channels = 3;
std::vector<uint8_t> in = read_bmp(settings->input_bmp_name, &image_width,
&image_height, &image_channels, settings);
int input = interpreter->inputs()[0];
if (settings->verbose) LOG(INFO) << "input: " << input;
const std::vector<int> inputs = interpreter->inputs();
const std::vector<int> outputs = interpreter->outputs();
if (settings->verbose) {
LOG(INFO) << "number of inputs: " << inputs.size();
LOG(INFO) << "number of outputs: " << outputs.size();
}
auto profiler = std::make_unique<profiling::Profiler>(
settings->max_profiling_buffer_entries);
interpreter->SetProfiler(profiler.get());
auto delegates = delegate_providers.CreateAllDelegates();
for (auto& delegate : delegates) {
const auto delegate_name = delegate.provider->GetName();
if (interpreter->ModifyGraphWithDelegate(std::move(delegate.delegate)) !=
kTfLiteOk) {
LOG(ERROR) << "Failed to apply " << delegate_name << " delegate.";
exit(-1);
} else {
LOG(INFO) << "Applied " << delegate_name << " delegate.";
}
}
if (interpreter->AllocateTensors() != kTfLiteOk) {
LOG(ERROR) << "Failed to allocate tensors!";
exit(-1);
}
if (settings->verbose) PrintInterpreterState(interpreter.get());
TfLiteIntArray* dims = interpreter->tensor(input)->dims;
int wanted_height = dims->data[1];
int wanted_width = dims->data[2];
int wanted_channels = dims->data[3];
settings->input_type = interpreter->tensor(input)->type;
switch (settings->input_type) {
case kTfLiteFloat32:
resize<float>(interpreter->typed_tensor<float>(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteInt8:
resize<int8_t>(interpreter->typed_tensor<int8_t>(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteUInt8:
resize<uint8_t>(interpreter->typed_tensor<uint8_t>(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
default:
LOG(ERROR) << "cannot handle input type "
<< interpreter->tensor(input)->type << " yet";
exit(-1);
}
if (settings->profiling) profiler->StartProfiling();
for (int i = 0; i < settings->number_of_warmup_runs; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
struct timeval start_time, stop_time;
gettimeofday(&start_time, nullptr);
for (int i = 0; i < settings->loop_count; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
gettimeofday(&stop_time, nullptr);
LOG(INFO) << "invoked";
LOG(INFO) << "average time: "
<< (get_us(stop_time) - get_us(start_time)) /
(settings->loop_count * 1000)
<< " ms";
if (settings->profiling) {
profiler->StopProfiling();
auto profile_events = profiler->GetProfileEvents();
for (int i = 0; i < profile_events.size(); i++) {
auto subgraph_index = profile_events[i]->extra_event_metadata;
auto op_index = profile_events[i]->event_metadata;
const auto subgraph = interpreter->subgraph(subgraph_index);
const auto node_and_registration =
subgraph->node_and_registration(op_index);
const TfLiteRegistration registration = node_and_registration->second;
PrintProfilingInfo(profile_events[i], subgraph_index, op_index,
registration);
}
}
const float threshold = 0.001f;
std::vector<std::pair<float, int>> top_results;
int output = interpreter->outputs()[0];
TfLiteIntArray* output_dims = interpreter->tensor(output)->dims;
auto output_size = output_dims->data[output_dims->size - 1];
switch (interpreter->tensor(output)->type) {
case kTfLiteFloat32:
get_top_n<float>(interpreter->typed_output_tensor<float>(0), output_size,
settings->number_of_results, threshold, &top_results,
settings->input_type);
break;
case kTfLiteInt8:
get_top_n<int8_t>(interpreter->typed_output_tensor<int8_t>(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
case kTfLiteUInt8:
get_top_n<uint8_t>(interpreter->typed_output_tensor<uint8_t>(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
default:
LOG(ERROR) << "cannot handle output type "
<< interpreter->tensor(output)->type << " yet";
exit(-1);
}
std::vector<string> labels;
size_t label_count;
if (ReadLabelsFile(settings->labels_file_name, &labels, &label_count) !=
kTfLiteOk)
exit(-1);
for (const auto& result : top_results) {
const float confidence = result.first;
const int index = result.second;
LOG(INFO) << confidence << ": " << index << " " << labels[index];
}
interpreter.reset();
}
void display_usage(const DelegateProviders& delegate_providers) {
LOG(INFO)
<< "\n"
<< delegate_providers.GetHelpMessage("label_image")
<< "\t--accelerated, -a: [0|1] use Android NNAPI or not\n"
<< "\t--allow_fp16, -f: [0|1], allow running fp32 models with fp16 or "
"not\n"
<< "\t--count, -c: loop interpreter->Invoke() for certain times\n"
<< "\t--gl_backend, -g: [0|1]: use GL GPU Delegate on Android\n"
<< "\t--hexagon_delegate, -j: [0|1]: use Hexagon Delegate on Android\n"
<< "\t--input_mean, -b: input mean\n"
<< "\t--input_std, -s: input standard deviation\n"
<< "\t--image, -i: image_name.bmp\n"
<< "\t--labels, -l: labels for the model\n"
<< "\t--tflite_model, -m: model_name.tflite\n"
<< "\t--profiling, -p: [0|1], profiling or not\n"
<< "\t--num_results, -r: number of results to show\n"
<< "\t--threads, -t: number of threads\n"
<< "\t--verbose, -v: [0|1] print more information\n"
<< "\t--warmup_runs, -w: number of warmup runs\n"
<< "\t--xnnpack_delegate, -x [0:1]: xnnpack delegate\n"
<< "\t--help, -h: Print this help message\n";
}
int Main(int argc, char** argv) {
DelegateProviders delegate_providers;
bool parse_result = delegate_providers.InitFromCmdlineArgs(
&argc, const_cast<const char**>(argv));
if (!parse_result) {
display_usage(delegate_providers);
return EXIT_FAILURE;
}
Settings s;
int c;
while (true) {
static struct option long_options[] = {
{"accelerated", required_argument, nullptr, 'a'},
{"allow_fp16", required_argument, nullptr, 'f'},
{"count", required_argument, nullptr, 'c'},
{"verbose", required_argument, nullptr, 'v'},
{"image", required_argument, nullptr, 'i'},
{"labels", required_argument, nullptr, 'l'},
{"tflite_model", required_argument, nullptr, 'm'},
{"profiling", required_argument, nullptr, 'p'},
{"threads", required_argument, nullptr, 't'},
{"input_mean", required_argument, nullptr, 'b'},
{"input_std", required_argument, nullptr, 's'},
{"num_results", required_argument, nullptr, 'r'},
{"max_profiling_buffer_entries", required_argument, nullptr, 'e'},
{"warmup_runs", required_argument, nullptr, 'w'},
{"gl_backend", required_argument, nullptr, 'g'},
{"hexagon_delegate", required_argument, nullptr, 'j'},
{"xnnpack_delegate", required_argument, nullptr, 'x'},
{"help", no_argument, nullptr, 'h'},
{nullptr, 0, nullptr, 0}};
int option_index = 0;
c = getopt_long(argc, argv, "a:b:c:d:e:f:g:i:j:l:m:p:r:s:t:v:w:x:h",
long_options, &option_index);
if (c == -1) break;
switch (c) {
case 'a':
s.accel = strtol(optarg, nullptr, 10);
break;
case 'b':
s.input_mean = strtod(optarg, nullptr);
break;
case 'c':
s.loop_count =
strtol(optarg, nullptr, 10);
break;
case 'e':
s.max_profiling_buffer_entries =
strtol(optarg, nullptr, 10);
break;
case 'f':
s.allow_fp16 =
strtol(optarg, nullptr, 10);
break;
case 'g':
s.gl_backend =
strtol(optarg, nullptr, 10);
break;
case 'i':
s.input_bmp_name = optarg;
break;
case 'j':
s.hexagon_delegate = optarg;
break;
case 'l':
s.labels_file_name = optarg;
break;
case 'm':
s.model_name = optarg;
break;
case 'p':
s.profiling =
strtol(optarg, nullptr, 10);
break;
case 'r':
s.number_of_results =
strtol(optarg, nullptr, 10);
break;
case 's':
s.input_std = strtod(optarg, nullptr);
break;
case 't':
s.number_of_threads = strtol(
optarg, nullptr, 10);
break;
case 'v':
s.verbose =
strtol(optarg, nullptr, 10);
break;
case 'w':
s.number_of_warmup_runs =
strtol(optarg, nullptr, 10);
break;
case 'x':
s.xnnpack_delegate =
strtol(optarg, nullptr, 10);
break;
case 'h':
case '?':
display_usage(delegate_providers);
exit(-1);
default:
exit(-1);
}
}
delegate_providers.MergeSettingsIntoParams(s);
RunInference(&s, delegate_providers);
return 0;
}
}
}
int main(int argc, char** argv) {
return tflite::label_image::Main(argc, argv);
} | #include "tensorflow/lite/examples/label_image/label_image.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
#include "tensorflow/lite/examples/label_image/get_top_n.h"
namespace tflite {
namespace label_image {
TEST(LabelImageTest, GraceHopper) {
std::string lena_file =
"tensorflow/lite/examples/label_image/testdata/"
"grace_hopper.bmp";
int height, width, channels;
Settings s;
s.input_type = kTfLiteUInt8;
std::vector<uint8_t> input =
read_bmp(lena_file, &width, &height, &channels, &s);
ASSERT_EQ(height, 606);
ASSERT_EQ(width, 517);
ASSERT_EQ(channels, 3);
std::vector<uint8_t> output(606 * 517 * 3);
resize<uint8_t>(output.data(), input.data(), 606, 517, 3, 214, 214, 3, &s);
ASSERT_EQ(output[0], 0x15);
ASSERT_EQ(output[214 * 214 * 3 - 1], 0x11);
}
TEST(LabelImageTest, GetTopN) {
uint8_t in[] = {1, 1, 2, 2, 4, 4, 16, 32, 128, 64};
std::vector<std::pair<float, int>> top_results;
get_top_n<uint8_t>(in, 10, 5, 0.025, &top_results, kTfLiteUInt8);
ASSERT_EQ(top_results.size(), 4);
ASSERT_EQ(top_results[0].second, 8);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/examples/label_image/label_image.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/examples/label_image/label_image_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ed64317-64de-4f9b-a9f5-58e30cb2b26b | cpp | tensorflow/tensorflow | serialization | tensorflow/lite/delegates/gpu/gl/serialization.cc | tensorflow/lite/delegates/gpu/gl/serialization_test.cc | #include "tensorflow/lite/delegates/gpu/gl/serialization.h"
#include <string>
#include <utility>
#include <variant>
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
using flatbuffers::Offset;
using flatbuffers::Vector;
namespace {
struct ParameterValueGetter {
Offset<void> operator()(int32_t value) {
auto offset = builder->CreateVector(std::vector<int32_t>{value});
data::DataInt32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const int2& value) {
auto offset = builder->CreateVector(std::vector<int32_t>{value.x, value.y});
data::DataInt32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const int4& value) {
auto offset = builder->CreateVector(
std::vector<int32_t>{value.x, value.y, value.z, value.w});
data::DataInt32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const std::vector<int2>& value) {
std::vector<int32_t> d(value.size() * 2);
for (size_t i = 0; i < value.size(); ++i) {
d[i * 2] = value[i].x;
d[i * 2 + 1] = value[i].y;
}
auto offset = builder->CreateVector(d);
data::DataInt32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(uint32_t value) {
auto offset = builder->CreateVector(std::vector<uint32_t>{value});
data::DataUint32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const uint4& value) {
auto offset = builder->CreateVector(
std::vector<uint32_t>{value.x, value.y, value.z, value.w});
data::DataUint32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(float value) {
auto offset = builder->CreateVector(std::vector<float>{value});
data::DataFloatBuilder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const float2& value) {
auto offset = builder->CreateVector(std::vector<float>{value.x, value.y});
data::DataFloatBuilder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const float4& value) {
auto offset = builder->CreateVector(
std::vector<float>{value.x, value.y, value.z, value.w});
data::DataFloatBuilder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const std::vector<float4>& value) {
std::vector<float> d(value.size() * 4);
for (size_t i = 0; i < value.size(); ++i) {
d[i * 4] = value[i].x;
d[i * 4 + 1] = value[i].y;
d[i * 4 + 2] = value[i].z;
d[i * 4 + 3] = value[i].w;
}
auto offset = builder->CreateVector(d);
data::DataFloatBuilder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
::flatbuffers::FlatBufferBuilder* builder;
};
struct DataVariantTypeGetter {
data::DataVariant operator()(int32_t) const {
return data::DataVariant::DataInt32;
}
data::DataVariant operator()(const int2&) const {
return data::DataVariant::DataInt32;
}
data::DataVariant operator()(const int4&) const {
return data::DataVariant::DataInt32;
}
data::DataVariant operator()(const std::vector<int2>&) const {
return data::DataVariant::DataInt32;
}
data::DataVariant operator()(uint32_t) const {
return data::DataVariant::DataUint32;
}
data::DataVariant operator()(const uint4&) const {
return data::DataVariant::DataUint32;
}
data::DataVariant operator()(float) const {
return data::DataVariant::DataFloat;
}
data::DataVariant operator()(const float2&) const {
return data::DataVariant::DataFloat;
}
data::DataVariant operator()(const float4&) const {
return data::DataVariant::DataFloat;
}
data::DataVariant operator()(const std::vector<float4>&) const {
return data::DataVariant::DataFloat;
}
};
struct ParameterTypeGetter {
data::ParameterType operator()(int32_t) const {
return data::ParameterType::INT32;
}
data::ParameterType operator()(const int2&) const {
return data::ParameterType::INT32;
}
data::ParameterType operator()(const int4&) const {
return data::ParameterType::INT32;
}
data::ParameterType operator()(const std::vector<int2>&) const {
return data::ParameterType::INT32_2;
}
data::ParameterType operator()(uint32_t) const {
return data::ParameterType::UINT32;
}
data::ParameterType operator()(const uint4&) const {
return data::ParameterType::UINT32;
}
data::ParameterType operator()(float) const {
return data::ParameterType::FLOAT32;
}
data::ParameterType operator()(const float2&) const {
return data::ParameterType::FLOAT32;
}
data::ParameterType operator()(const float4&) const {
return data::ParameterType::FLOAT32;
}
data::ParameterType operator()(const std::vector<float4>&) const {
return data::ParameterType::FLOAT32;
}
};
data::DataType ToFB(DataType type) {
switch (type) {
case DataType::INT16:
return data::DataType::INT16;
case DataType::INT32:
return data::DataType::INT32;
case DataType::FLOAT16:
return data::DataType::FLOAT16;
case DataType::FLOAT32:
return data::DataType::FLOAT32;
default:
return data::DataType::UNKNOWN;
}
}
data::ObjectType ToFB(ObjectType type) {
switch (type) {
case ObjectType::TEXTURE:
return data::ObjectType::TEXTURE;
case ObjectType::BUFFER:
return data::ObjectType::BUFFER;
default:
return data::ObjectType::UNKNOWN;
}
}
struct ObjectSizeGetter {
Offset<void> operator()(const uint3& shape) {
data::Uint3Builder shape_builder(*builder);
shape_builder.add_x(shape.x);
shape_builder.add_y(shape.y);
shape_builder.add_z(shape.z);
return shape_builder.Finish().Union();
}
Offset<void> operator()(const uint2& shape) {
data::Uint2Builder shape_builder(*builder);
shape_builder.add_x(shape.x);
shape_builder.add_y(shape.y);
return shape_builder.Finish().Union();
}
Offset<void> operator()(uint32_t shape) {
data::Uint1Builder shape_builder(*builder);
shape_builder.add_x(shape);
return shape_builder.Finish().Union();
}
::flatbuffers::FlatBufferBuilder* builder;
};
struct ObjectSizeTypeGetter {
data::ObjectSize operator()(const uint3&) const {
return data::ObjectSize::Uint3;
}
data::ObjectSize operator()(const uint2&) const {
return data::ObjectSize::Uint2;
}
data::ObjectSize operator()(const uint32_t) const {
return data::ObjectSize::Uint1;
}
};
struct ObjectGetter {
Offset<void> operator()(const ObjectData& data) {
auto fb_data = builder->CreateVector(data);
data::ObjectDataBuilder data_builder(*builder);
data_builder.add_data(fb_data);
return data_builder.Finish().Union();
}
Offset<void> operator()(ObjectRef ref) {
data::ObjectRefBuilder ref_builder(*builder);
ref_builder.add_global_id(ref);
return ref_builder.Finish().Union();
}
::flatbuffers::FlatBufferBuilder* builder;
};
struct ObjectTypeGetter {
data::ObjectVariant operator()(const ObjectData&) const {
return data::ObjectVariant::ObjectData;
}
data::ObjectVariant operator()(const ObjectRef&) const {
return data::ObjectVariant::ObjectRef;
}
};
data::AccessType ToFB(AccessType type) {
switch (type) {
case AccessType::READ:
return data::AccessType::READ;
case AccessType::WRITE:
return data::AccessType::WRITE;
case AccessType::READ_WRITE:
return data::AccessType::READ_WRITE;
}
}
Offset<data::Uint3> Encode(const uint3& v,
::flatbuffers::FlatBufferBuilder* builder) {
data::Uint3Builder uint3_builder(*builder);
uint3_builder.add_x(v.x);
uint3_builder.add_y(v.y);
uint3_builder.add_z(v.z);
return uint3_builder.Finish();
}
Offset<data::Parameters> Encode(const CompiledModelOptions& options,
::flatbuffers::FlatBufferBuilder* builder) {
data::ParametersBuilder params_builder(*builder);
params_builder.add_dynamic_batch(options.dynamic_batch);
return params_builder.Finish();
}
}
void SerializedCompiledModelBuilder::AddShader(const std::string& shader_src) {
shaders_.push_back(builder_.CreateString(shader_src));
}
void SerializedCompiledModelBuilder::AddProgram(
const std::vector<Variable>& parameters, const std::vector<Object>& objects,
const uint3& workgroup_size, const uint3& num_workgroups,
size_t shader_index) {
Offset<data::Uint3> fb_workgroups = Encode(num_workgroups, &builder_);
Offset<data::Uint3> fb_workgroup_size = Encode(workgroup_size, &builder_);
Offset<Vector<Offset<data::UniformParameter>>> fb_params;
{
std::vector<Offset<data::UniformParameter>> offsets;
for (const Variable& param : parameters) {
auto name = builder_.CreateString(param.name);
auto data = std::visit(ParameterValueGetter{&builder_}, param.value);
data::UniformParameterBuilder builder(builder_);
builder.add_name(name);
builder.add_data_type(std::visit(DataVariantTypeGetter{}, param.value));
builder.add_data(data);
builder.add_type(std::visit(ParameterTypeGetter{}, param.value));
offsets.push_back(builder.Finish());
}
fb_params = builder_.CreateVector(offsets);
}
Offset<Vector<Offset<data::Object>>> fb_objects;
{
std::vector<Offset<data::Object>> offsets;
for (const Object& object : objects) {
auto object_variant = std::visit(ObjectGetter{&builder_}, object.object);
auto size = std::visit(ObjectSizeGetter{&builder_}, object.size);
data::ObjectBuilder builder(builder_);
builder.add_access(ToFB(object.access));
builder.add_binding(object.binding);
builder.add_type(ToFB(object.object_type));
builder.add_data_type(ToFB(object.data_type));
builder.add_size_type(std::visit(ObjectSizeTypeGetter{}, object.size));
builder.add_size(size);
builder.add_object_type(std::visit(ObjectTypeGetter{}, object.object));
builder.add_object(object_variant);
offsets.push_back(builder.Finish());
}
fb_objects = builder_.CreateVector(offsets);
}
data::ProgramBuilder program_builder(builder_);
program_builder.add_number_workgroups(fb_workgroups);
program_builder.add_workgroup_size(fb_workgroup_size);
program_builder.add_parameters(fb_params);
program_builder.add_objects(fb_objects);
program_builder.add_shader_index(shader_index);
programs_.push_back(program_builder.Finish());
}
absl::Span<const uint8_t> SerializedCompiledModelBuilder::Finalize(
const CompiledModelOptions& options) {
auto shaders = builder_.CreateVector(shaders_);
auto programs = builder_.CreateVector(programs_);
auto parameters = Encode(options, &builder_);
data::CompiledModelBuilder model_builder(builder_);
model_builder.add_shaders(shaders);
model_builder.add_programs(programs);
model_builder.add_parameters(parameters);
data::FinishCompiledModelBuffer(builder_, model_builder.Finish());
return absl::MakeConstSpan(builder_.GetBufferPointer(), builder_.GetSize());
}
namespace {
absl::Status ParseParameter(const data::UniformParameter& fb_parameter,
Variable* parameter) {
parameter->name = fb_parameter.name()->str();
switch (fb_parameter.type()) {
case data::ParameterType::INT32: {
auto* ptr = fb_parameter.data_as_DataInt32();
if (ptr == nullptr) {
return absl::InvalidArgumentError("Unexpected data type '" +
parameter->name + "'");
}
switch (ptr->data()->size()) {
case 1:
parameter->value = (*ptr->data())[0];
break;
case 2:
parameter->value = int2((*ptr->data())[0], (*ptr->data())[1]);
break;
case 4:
parameter->value = int4((*ptr->data())[0], (*ptr->data())[1],
(*ptr->data())[2], (*ptr->data())[3]);
break;
default:
return absl::InvalidArgumentError("Unexpected size for parameter '" +
parameter->name + "'");
}
break;
}
case data::ParameterType::UINT32: {
auto* ptr = fb_parameter.data_as_DataUint32();
if (ptr == nullptr) {
return absl::InvalidArgumentError("Unexpected data type '" +
parameter->name + "'");
}
switch (ptr->data()->size()) {
case 1:
parameter->value = (*ptr->data())[0];
break;
case 4:
parameter->value = uint4((*ptr->data())[0], (*ptr->data())[1],
(*ptr->data())[2], (*ptr->data())[3]);
break;
default:
return absl::InvalidArgumentError("Unexpected size for parameter '" +
parameter->name + "'");
}
break;
}
case data::ParameterType::FLOAT32: {
auto* ptr = fb_parameter.data_as_DataFloat();
if (ptr == nullptr) {
return absl::InvalidArgumentError("Unexpected data type '" +
parameter->name + "'");
}
switch (ptr->data()->size()) {
case 1:
parameter->value = (*ptr->data())[0];
break;
case 2:
parameter->value = float2((*ptr->data())[0], (*ptr->data())[1]);
break;
case 4:
parameter->value = float4((*ptr->data())[0], (*ptr->data())[1],
(*ptr->data())[2], (*ptr->data())[3]);
break;
default:
return absl::InvalidArgumentError("Unexpected size for parameter '" +
parameter->name + "'");
}
break;
}
case data::ParameterType::INT32_2: {
auto* ptr = fb_parameter.data_as_DataInt32();
if (ptr == nullptr) {
return absl::InvalidArgumentError("Unexpected data type '" +
parameter->name + "'");
}
if (ptr->data()->size() % 2 != 0) {
return absl::InvalidArgumentError("Unexpected size for parameter '" +
parameter->name + "'");
}
std::vector<int2> values(ptr->data()->size() / 2);
for (int i = 0; i < values.size(); ++i) {
values[i] = int2((*ptr->data())[i * 2], (*ptr->data())[i * 2 + 1]);
}
parameter->value = values;
break;
}
}
return absl::OkStatus();
}
DataType ToEnum(data::DataType type) {
switch (type) {
case data::DataType::INT16:
return DataType::INT16;
case data::DataType::INT32:
return DataType::INT32;
case data::DataType::FLOAT16:
return DataType::FLOAT16;
case data::DataType::FLOAT32:
return DataType::FLOAT32;
default:
return DataType::UNKNOWN;
}
}
ObjectType ToEnum(data::ObjectType type) {
switch (type) {
case data::ObjectType::TEXTURE:
return ObjectType::TEXTURE;
case data::ObjectType::BUFFER:
return ObjectType::BUFFER;
default:
return ObjectType::UNKNOWN;
}
}
AccessType ToEnum(data::AccessType type) {
switch (type) {
case data::AccessType::READ:
return AccessType::READ;
case data::AccessType::WRITE:
return AccessType::WRITE;
case data::AccessType::READ_WRITE:
return AccessType::READ_WRITE;
}
}
absl::Status ParseObject(const data::Object& fb_object, Object* object) {
object->access = ToEnum(fb_object.access());
object->binding = fb_object.binding();
object->object_type = ToEnum(fb_object.type());
object->data_type = ToEnum(fb_object.data_type());
switch (fb_object.size_type()) {
case data::ObjectSize::Uint3: {
auto* size = fb_object.size_as_Uint3();
object->size = uint3(size->x(), size->y(), size->z());
break;
}
case data::ObjectSize::Uint2: {
auto* size = fb_object.size_as_Uint2();
object->size = uint2(size->x(), size->y());
break;
}
case data::ObjectSize::Uint1: {
auto* size = fb_object.size_as_Uint1();
object->size = size->x();
break;
}
case data::ObjectSize::NONE:
return absl::InvalidArgumentError("Texture size is not set");
}
switch (fb_object.object_type()) {
case data::ObjectVariant::ObjectData: {
auto* fb_data = fb_object.object_as_ObjectData();
object->object = std::vector<uint8_t>(
fb_data->data()->data(),
fb_data->data()->data() + fb_data->data()->size());
break;
}
case data::ObjectVariant::ObjectRef: {
auto* fb_ref = fb_object.object_as_ObjectRef();
object->object = fb_ref->global_id();
break;
}
case data::ObjectVariant::NONE: {
return absl::InvalidArgumentError("Object is not set");
}
}
return absl::OkStatus();
}
CompiledModelOptions ParseParameters(const data::Parameters& fb_parameters) {
CompiledModelOptions options;
options.dynamic_batch = fb_parameters.dynamic_batch();
return options;
}
}
absl::Status DeserializeCompiledModel(absl::Span<const uint8_t> serialized,
DeserializationHandler* handler) {
flatbuffers::Verifier verifier(serialized.data(), serialized.size());
if (!data::VerifyCompiledModelBuffer(verifier)) {
return absl::InvalidArgumentError("Serialized model is corrupted.");
}
auto model = data::GetCompiledModel(serialized.data());
for (auto shader : *model->shaders()) {
RETURN_IF_ERROR(
handler->OnShader(absl::MakeSpan(shader->c_str(), shader->size())));
}
std::vector<Variable> parameters;
std::vector<Object> objects;
for (auto program : *model->programs()) {
parameters.clear();
objects.clear();
for (auto fb_parameter : *program->parameters()) {
Variable parameter;
RETURN_IF_ERROR(ParseParameter(*fb_parameter, ¶meter));
parameters.push_back(std::move(parameter));
}
for (auto fb_object : *program->objects()) {
Object object;
RETURN_IF_ERROR(ParseObject(*fb_object, &object));
objects.push_back(std::move(object));
}
uint3 workgroup_size(program->workgroup_size()->x(),
program->workgroup_size()->y(),
program->workgroup_size()->z());
uint3 num_workgroups(program->number_workgroups()->x(),
program->number_workgroups()->y(),
program->number_workgroups()->z());
RETURN_IF_ERROR(handler->OnProgram(parameters, objects, workgroup_size,
num_workgroups,
program->shader_index()));
}
handler->OnOptions(ParseParameters(*model->parameters()));
return absl::OkStatus();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/serialization.h"
#include <stddef.h>
#include <sys/types.h>
#include <cstdint>
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
struct ProgramDesc {
std::vector<Variable> parameters;
std::vector<Object> objects;
uint3 workgroup_size;
uint3 num_workgroups;
size_t shader_index;
};
struct Handler : public DeserializationHandler {
absl::Status OnShader(absl::Span<const char> shader_src) final {
shaders.push_back(std::string(shader_src.data(), shader_src.size()));
return absl::OkStatus();
}
absl::Status OnProgram(const std::vector<Variable>& parameters,
const std::vector<Object>& objects,
const uint3& workgroup_size,
const uint3& num_workgroups,
size_t shader_index) final {
programs.push_back(
{parameters, objects, workgroup_size, num_workgroups, shader_index});
return absl::OkStatus();
}
void OnOptions(const CompiledModelOptions& o) final { options = o; }
std::vector<std::string> shaders;
std::vector<ProgramDesc> programs;
CompiledModelOptions options;
};
struct ParameterComparator {
bool operator()(int32_t value) const {
return value == std::get<int32_t>(a.value);
}
bool operator()(const int2& value) const {
auto v = std::get<int2>(a.value);
return value.x == v.x && value.y == v.y;
}
bool operator()(const int4& value) const {
auto v = std::get<int4>(a.value);
return value.x == v.x && value.y == v.y && value.z == v.z && value.w == v.w;
}
bool operator()(const std::vector<int2>& value) const {
auto v = std::get<std::vector<int2>>(a.value);
if (v.size() != value.size()) {
return false;
}
for (int i = 0; i < v.size(); ++i) {
if (v[i].x != value[i].x || v[i].y != value[i].y) {
return false;
}
}
return true;
}
bool operator()(uint32_t value) const {
return value == std::get<uint32_t>(a.value);
}
bool operator()(const uint4& value) const {
auto v = std::get<uint4>(a.value);
return value.x == v.x && value.y == v.y && value.z == v.z && value.w == v.w;
}
bool operator()(float value) const {
return value == std::get<float>(a.value);
}
bool operator()(float2 value) const {
auto v = std::get<float2>(a.value);
return value.x == v.x && value.y == v.y;
}
bool operator()(const float4& value) const {
auto v = std::get<float4>(a.value);
return value.x == v.x && value.y == v.y && value.z == v.z && value.w == v.w;
}
bool operator()(const std::vector<float4>& value) const {
auto v = std::get<std::vector<float4>>(a.value);
if (v.size() != value.size()) {
return false;
}
for (int i = 0; i < v.size(); ++i) {
if (v[i].x != value[i].x || v[i].y != value[i].y) {
return false;
}
}
return true;
}
Variable a;
};
bool Eq(const Variable& a, const Variable& b) {
return a.name == b.name && std::visit(ParameterComparator{a}, b.value);
}
struct ObjectComparator {
bool operator()(const ObjectData& data) const {
return std::get<ObjectData>(a.object) == data;
}
bool operator()(const ObjectRef& ref) const {
return std::get<ObjectRef>(a.object) == ref;
}
Object a;
};
bool Eq(const Object& a, const Object& b) {
return a.access == b.access && a.binding == b.binding &&
std::visit(ObjectComparator{a}, b.object);
}
TEST(Smoke, Read) {
std::string shader1 = "A";
std::string shader2 = "B";
SerializedCompiledModelBuilder builder;
builder.AddShader(shader1);
builder.AddShader(shader2);
std::vector<Variable> parameters;
parameters.push_back({"1", int32_t(1)});
parameters.push_back({"2", int2(1, 2)});
parameters.push_back({"3", int4(1, 2, 3, 4)});
parameters.push_back({"4", uint32_t(10)});
parameters.push_back({"5", uint4(10, 20, 30, 40)});
parameters.push_back({"6", -2.0f});
parameters.push_back({"7", float2(1, -1)});
parameters.push_back({"8", float4(1, -1, 2, -2)});
parameters.push_back(
{"9", std::vector<int2>{int2(1, 2), int2(3, 4), int2(5, 6)}});
std::vector<Object> objects;
objects.push_back(MakeReadonlyBuffer(std::vector<float>{1, 2, 3, 4}));
objects.push_back(Object{AccessType::WRITE, DataType::FLOAT32,
ObjectType::TEXTURE, 5, uint3(1, 2, 3), 100u});
objects.push_back(Object{AccessType::READ_WRITE, DataType::INT8,
ObjectType::BUFFER, 6, uint2(2, 1),
std::vector<uint8_t>{7, 9}});
uint3 num_workgroups(10, 20, 30);
uint3 workgroup_size(1, 2, 3);
builder.AddProgram(parameters, objects, workgroup_size, num_workgroups, 1);
Handler handler;
CompiledModelOptions options;
options.dynamic_batch = true;
ASSERT_TRUE(
DeserializeCompiledModel(builder.Finalize(options), &handler).ok());
EXPECT_EQ(num_workgroups.data_, handler.programs[0].num_workgroups.data_);
EXPECT_EQ(workgroup_size.data_, handler.programs[0].workgroup_size.data_);
EXPECT_THAT(handler.shaders, ::testing::ElementsAre(shader1, shader2));
EXPECT_EQ(handler.programs[0].parameters.size(), parameters.size());
for (int i = 0; i < parameters.size(); ++i) {
EXPECT_TRUE(Eq(parameters[i], handler.programs[0].parameters[i])) << i;
}
EXPECT_EQ(handler.programs[0].objects.size(), objects.size());
for (int i = 0; i < objects.size(); ++i) {
EXPECT_TRUE(Eq(objects[i], handler.programs[0].objects[i])) << i;
}
EXPECT_TRUE(handler.options.dynamic_batch);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/serialization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/serialization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0dfe0675-3405-4e60-9aaa-06befc7c3acc | cpp | tensorflow/tensorflow | interpreter_utils | tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.cc | tensorflow/lite/delegates/interpreter_utils_test.cc | #include "tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.h"
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace gpu {
namespace testing {
absl::Status InterpreterInvokeWithOpResolver(
const ::tflite::Model* model, TfLiteDelegate* delegate,
const OpResolver& op_resolver, const std::vector<TensorFloat32>& inputs,
std::vector<TensorFloat32>* outputs) {
auto interpreter = std::make_unique<Interpreter>();
if (InterpreterBuilder(model, op_resolver)(&interpreter) != kTfLiteOk) {
return absl::InternalError("Unable to create TfLite InterpreterBuilder");
}
if (delegate && interpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
return absl::InternalError(
"Unable to modify TfLite graph with the delegate");
}
interpreter->SetNumThreads(1);
if (interpreter->AllocateTensors() != kTfLiteOk) {
return absl::InternalError("Unable to allocate TfLite tensors");
}
for (int i = 0; i < inputs.size(); ++i) {
if (interpreter->tensor(interpreter->inputs()[i])->type != kTfLiteFloat32) {
return absl::InternalError("input data_type is not float32");
}
float* tflite_data =
interpreter->typed_tensor<float>(interpreter->inputs()[i]);
if (inputs[i].data.size() * sizeof(float) >
interpreter->tensor(interpreter->inputs()[i])->bytes) {
return absl::InternalError("too big input data");
}
std::memcpy(tflite_data, inputs[i].data.data(),
inputs[i].data.size() * sizeof(float));
}
if (interpreter->Invoke() != kTfLiteOk) {
return absl::InternalError("Unable to invoke TfLite interpreter");
}
if (!outputs || !outputs->empty()) {
return absl::InternalError("Invalid outputs pointer");
}
outputs->reserve(interpreter->outputs().size());
for (auto t : interpreter->outputs()) {
const TfLiteTensor* out_tensor = interpreter->tensor(t);
TensorFloat32 bhwc;
bhwc.id = t;
if (out_tensor->dims->data[0] != 1) {
return absl::InternalError("Batch dimension is expected to be 1");
}
bhwc.shape.b = out_tensor->dims->data[0];
switch (out_tensor->dims->size) {
case 2:
bhwc.shape.h = 1;
bhwc.shape.w = 1;
bhwc.shape.c = out_tensor->dims->data[1];
break;
case 3:
bhwc.shape.h = 1;
bhwc.shape.w = out_tensor->dims->data[1];
bhwc.shape.c = out_tensor->dims->data[2];
break;
case 4:
bhwc.shape.h = out_tensor->dims->data[1];
bhwc.shape.w = out_tensor->dims->data[2];
bhwc.shape.c = out_tensor->dims->data[3];
break;
default:
return absl::InternalError("Unsupported dimensions size " +
std::to_string(out_tensor->dims->size));
}
bhwc.data = std::vector<float>(
out_tensor->data.f,
out_tensor->data.f + out_tensor->bytes / sizeof(float));
outputs->push_back(bhwc);
}
return absl::OkStatus();
}
absl::Status InterpreterInvoke(const ::tflite::Model* model,
TfLiteDelegate* delegate,
const std::vector<TensorFloat32>& inputs,
std::vector<TensorFloat32>* outputs) {
ops::builtin::BuiltinOpResolver builtin_op_resolver;
return InterpreterInvokeWithOpResolver(model, delegate, builtin_op_resolver,
inputs, outputs);
}
}
}
} | #include "tensorflow/lite/delegates/interpreter_utils.h"
#include <string.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/delegate_test_util.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite {
namespace delegates {
using test_utils::SimpleDelegate;
using test_utils::TestDelegate;
using test_utils::TestFP16Delegation;
namespace {
TEST_F(TestDelegate, DelegateNodeInvokeFailureFallback) {
delegate_ = std::unique_ptr<SimpleDelegate>(new SimpleDelegate(
{0, 1, 2}, kTfLiteDelegateFlagsNone, false ,
0 , true ));
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate_->get_tf_lite_delegate()),
kTfLiteOk);
ASSERT_EQ(interpreter_->execution_plan().size(), 1);
std::vector<float> input = {1.0f, 2.0f, 3.0f};
std::vector<float> expected_output = {2.0f, 4.0f, 6.0f};
constexpr int kOutputTensorIndex = 3;
memcpy(interpreter_->typed_tensor<float>(0), input.data(), 3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), 3 * sizeof(float));
EXPECT_EQ(
delegates::InterpreterUtils::InvokeWithCPUFallback(interpreter_.get()),
kTfLiteDelegateError);
ASSERT_EQ(interpreter_->execution_plan().size(), 3);
TfLiteTensor* tensor = interpreter_->tensor(kOutputTensorIndex);
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(tensor->data.f[i], expected_output[i]) << i;
}
}
TEST_F(TestDelegate, TestFallbackWithMultipleDelegates) {
delegate_ = std::unique_ptr<SimpleDelegate>(
new SimpleDelegate({0}, kTfLiteDelegateFlagsAllowDynamicTensors));
delegate2_ = std::unique_ptr<SimpleDelegate>(new SimpleDelegate(
{1, 2}, kTfLiteDelegateFlagsNone, false ,
0 , true ));
ASSERT_EQ(interpreter_->execution_plan().size(), 3);
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate_->get_tf_lite_delegate()),
kTfLiteOk);
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate2_->get_tf_lite_delegate()),
kTfLiteOk);
ASSERT_EQ(interpreter_->execution_plan().size(), 2);
std::vector<float> input = {1.0f, 2.0f, 3.0f};
std::vector<float> expected_output = {2.0f, 4.0f, 6.0f};
constexpr int kOutputTensorIndex = 2;
TfLiteTensor* tensor = interpreter_->tensor(kOutputTensorIndex);
memcpy(interpreter_->typed_tensor<float>(0), input.data(), 3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), 3 * sizeof(float));
EXPECT_EQ(
delegates::InterpreterUtils::InvokeWithCPUFallback(interpreter_.get()),
kTfLiteDelegateError);
EXPECT_EQ(interpreter_->execution_plan().size(), 3);
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(tensor->data.f[i], expected_output[i]) << i;
}
}
TEST_P(TestFP16Delegation, DelegateInvokeWithCPUFallback) {
delegate_ = std::make_unique<FP16Delegate>(
GetParam(), false,
true);
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate_->get_tf_lite_delegate()),
kTfLiteOk);
std::vector<float> input = {3.0f};
std::vector<float> expected_output = {16.0f};
const int input_tensor_idx = interpreter_->inputs()[0];
const int output_tensor_idx = interpreter_->outputs()[0];
memcpy(interpreter_->typed_tensor<float>(input_tensor_idx), input.data(),
sizeof(float));
EXPECT_EQ(
delegates::InterpreterUtils::InvokeWithCPUFallback(interpreter_.get()),
kTfLiteDelegateError);
TfLiteTensor* output_tensor = interpreter_->tensor(output_tensor_idx);
for (int i = 0; i < 1; ++i) {
EXPECT_EQ(output_tensor->data.f[i], expected_output[i]) << i;
}
ASSERT_EQ(interpreter_->execution_plan().size(), 8);
VerifyInvoke();
}
INSTANTIATE_TEST_SUITE_P(TestFP16Delegation, TestFP16Delegation,
::testing::Values(1, 2));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/interpreter_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5130d7d4-9f23-4265-be36-863bc3775da5 | cpp | tensorflow/tensorflow | api | tensorflow/lite/delegates/gpu/cl/api.cc | tensorflow/core/api_def/api_test.cc | #include "tensorflow/lite/delegates/gpu/cl/api.h"
#include <utility>
#ifndef CL_DELEGATE_NO_GL
#define CL_DELEGATE_ALLOW_GL
#endif
#include <algorithm>
#include <cstring>
#include <memory>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_command_queue.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_errors.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_event.h"
#include "tensorflow/lite/delegates/gpu/cl/environment.h"
#include "tensorflow/lite/delegates/gpu/cl/inference_context.h"
#include "tensorflow/lite/delegates/gpu/cl/kernels/converter.h"
#include "tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor_type_util.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/precision.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/tflite_profile.h"
#ifdef CL_DELEGATE_ALLOW_GL
#include <EGL/eglext.h>
#include "tensorflow/lite/delegates/gpu/cl/egl_sync.h"
#include "tensorflow/lite/delegates/gpu/cl/gl_interop.h"
#endif
namespace tflite {
namespace gpu {
namespace cl {
namespace {
class NoopTensorTie : public TensorTie {
public:
NoopTensorTie(const TensorTieDef& def, TensorObject obj)
: TensorTie(def), obj_(obj) {}
static bool IsSupported(const TensorTieDef& def) {
return def.external_def == def.internal_def;
}
absl::Status SetExternalObject(TensorObject obj) final {
if (!def().external_def.object_def.user_provided) {
return absl::InvalidArgumentError("Tensor object is readonly.");
}
if (!IsValid(def().external_def, obj)) {
return absl::InvalidArgumentError("Given object is not valid");
}
obj_ = obj;
return absl::OkStatus();
}
TensorObject GetExternalObject() final { return obj_; }
absl::Status CopyToExternalObject() final { return absl::OkStatus(); }
absl::Status CopyFromExternalObject() final { return absl::OkStatus(); }
private:
TensorObject obj_;
};
class DefaultTensorTie : public TensorTie {
public:
DefaultTensorTie(const TensorTieDef& def, TensorObject internal_obj)
: TensorTie(def), internal_obj_(internal_obj) {}
static bool IsSupported(
const TensorTieDef& def,
const TensorObjectConverterBuilder& converter_builder) {
auto object_type = def.external_def.object_def.object_type;
#ifdef CL_DELEGATE_ALLOW_GL
if (def.external_def.object_def.user_provided &&
GlClBufferCopier::IsSupported(def.external_def.object_def,
def.internal_def.object_def)) {
return true;
}
#endif
return (object_type == ObjectType::OPENCL_BUFFER ||
object_type == ObjectType::OPENCL_TEXTURE ||
object_type == ObjectType::CPU_MEMORY) &&
converter_builder.IsSupported(def.internal_def, def.external_def) &&
converter_builder.IsSupported(def.external_def, def.internal_def);
}
static absl::Status New(const TensorTieDef& def, TensorObject internal_object,
TensorObjectConverterBuilder* converter_builder,
Environment* env, std::unique_ptr<TensorTie>* tie) {
auto tie_impl = std::make_unique<DefaultTensorTie>(def, internal_object);
RETURN_IF_ERROR(tie_impl->Init(converter_builder, env));
*tie = std::move(tie_impl);
return absl::OkStatus();
}
absl::Status CopyToExternalObject() final {
if (!converter_to_) {
return absl::UnavailableError("Conversion is not available");
}
return converter_to_->Convert(internal_obj_, GetExternalObject());
}
absl::Status CopyFromExternalObject() final {
if (!converter_from_) {
return absl::UnavailableError("Conversion is not available");
}
return converter_from_->Convert(GetExternalObject(), internal_obj_);
}
absl::Status SetExternalObject(TensorObject obj) final {
if (!def().external_def.object_def.user_provided) {
return absl::InvalidArgumentError("External object is read-only");
}
if (!IsValid(def().external_def, obj)) {
return absl::InvalidArgumentError("Given object is not valid");
}
external_obj_ = obj;
return absl::OkStatus();
}
TensorObject GetExternalObject() final { return external_obj_; }
private:
absl::Status Init(TensorObjectConverterBuilder* converter_builder,
Environment* env) {
#ifdef CL_DELEGATE_ALLOW_GL
if (def().external_def.object_def.user_provided &&
GlClBufferCopier::IsSupported(def().external_def.object_def,
def().internal_def.object_def)) {
converter_from_ = std::make_unique<GlClBufferCopier>(
def().internal_def, def().external_def, env);
} else {
RETURN_IF_ERROR(converter_builder->MakeConverter(
def().external_def, def().internal_def, &converter_from_));
}
if (def().external_def.object_def.user_provided &&
GlClBufferCopier::IsSupported(def().internal_def.object_def,
def().external_def.object_def)) {
converter_to_ = std::make_unique<GlClBufferCopier>(
def().internal_def, def().external_def, env);
} else {
RETURN_IF_ERROR(converter_builder->MakeConverter(
def().internal_def, def().external_def, &converter_to_));
}
#else
RETURN_IF_ERROR(converter_builder->MakeConverter(
def().external_def, def().internal_def, &converter_from_));
RETURN_IF_ERROR(converter_builder->MakeConverter(
def().internal_def, def().external_def, &converter_to_));
#endif
return MaybeAllocateExternalObject(env);
}
absl::Status MaybeAllocateExternalObject(Environment* env) {
const TensorObjectDef& d = def().external_def;
if (d.object_def.user_provided) {
return absl::OkStatus();
}
switch (d.object_def.object_type) {
case ObjectType::CPU_MEMORY: {
size_t bytes_size = NumElements(d) * SizeOf(d.object_def.data_type);
cpu_memory_.resize(bytes_size);
external_obj_ = CpuMemory{cpu_memory_.data(), cpu_memory_.size()};
break;
}
case ObjectType::OPENCL_TEXTURE:
case ObjectType::OPENCL_BUFFER: {
auto& dims = d.dimensions;
const BHWC shape(dims.b, dims.h, dims.w, dims.c);
TensorStorageType storage_type = ToTensorStorageType(
d.object_def.object_type, d.object_def.data_layout);
TensorDescriptor desc = CreateBhwcTensorDescriptor(
d.object_def.data_type, storage_type, shape);
RETURN_IF_ERROR(
AllocateTensorMemory(env->context(), desc, &cl_memory_));
if (d.object_def.object_type == ObjectType::OPENCL_TEXTURE) {
external_obj_ = OpenClTexture{cl_memory_.memory()};
} else {
external_obj_ = OpenClBuffer{cl_memory_.memory()};
}
break;
}
default:
return absl::InternalError("Unexpected object type");
}
return absl::OkStatus();
}
const TensorObject internal_obj_;
TensorObject external_obj_;
CLMemory cl_memory_;
std::vector<uint8_t> cpu_memory_;
std::unique_ptr<TensorObjectConverter> converter_to_;
std::unique_ptr<TensorObjectConverter> converter_from_;
};
class TwoStepTensorTie : public TensorTie {
public:
explicit TwoStepTensorTie(const TensorTieDef& def) : TensorTie(def) {}
static bool IsSupported(
const TensorTieDef& def,
const TensorObjectConverterBuilder& converter_builder) {
auto defs = MakeOuterInnerDefs(def);
return DefaultTensorTie::IsSupported(defs.first, converter_builder) &&
DefaultTensorTie::IsSupported(defs.second, converter_builder);
}
static absl::Status New(const TensorTieDef& def, TensorObject internal_object,
TensorObjectConverterBuilder* converter_builder,
Environment* env, std::unique_ptr<TensorTie>* tie) {
auto tie_impl = std::make_unique<TwoStepTensorTie>(def);
RETURN_IF_ERROR(tie_impl->Init(internal_object, converter_builder, env));
*tie = std::move(tie_impl);
return absl::OkStatus();
}
absl::Status CopyToExternalObject() final {
RETURN_IF_ERROR(inner_tie_->CopyToExternalObject());
return outer_tie_->CopyToExternalObject();
}
absl::Status CopyFromExternalObject() final {
RETURN_IF_ERROR(outer_tie_->CopyFromExternalObject());
return inner_tie_->CopyFromExternalObject();
}
absl::Status SetExternalObject(TensorObject obj) final {
return outer_tie_->SetExternalObject(obj);
}
TensorObject GetExternalObject() final {
return outer_tie_->GetExternalObject();
}
private:
static std::pair<TensorTieDef, TensorTieDef> MakeOuterInnerDefs(
const TensorTieDef& def) {
TensorTieDef outer_def;
outer_def.external_def = def.external_def;
outer_def.internal_def = def.external_def;
outer_def.internal_def.object_def.object_type = ObjectType::OPENCL_BUFFER;
outer_def.internal_def.object_def.user_provided = true;
TensorTieDef inner_def;
inner_def.external_def = outer_def.internal_def;
inner_def.external_def.object_def.user_provided = false;
inner_def.internal_def = def.internal_def;
return std::make_pair(outer_def, inner_def);
}
absl::Status Init(TensorObject internal_object,
TensorObjectConverterBuilder* converter_builder,
Environment* env) {
auto defs = MakeOuterInnerDefs(def());
RETURN_IF_ERROR(DefaultTensorTie::New(defs.second, internal_object,
converter_builder, env, &inner_tie_));
return DefaultTensorTie::New(defs.first, inner_tie_->GetExternalObject(),
converter_builder, env, &outer_tie_);
}
std::unique_ptr<TensorTie> inner_tie_;
std::unique_ptr<TensorTie> outer_tie_;
};
#ifdef CL_DELEGATE_ALLOW_GL
class GlBufferHolder : public TensorTie {
public:
GlBufferHolder(const TensorTieDef& def, GlInteropFabric* gl_interop_fabric,
Environment* env)
: TensorTie(def),
gl_interop_fabric_(gl_interop_fabric),
environment_(env) {}
static bool IsSupported(
const TensorTieDef& def,
const TensorObjectConverterBuilder& converter_builder) {
if (!def.external_def.object_def.user_provided ||
def.external_def.object_def.object_type != ObjectType::OPENGL_SSBO) {
return false;
}
return DefaultTensorTie::IsSupported(MakeClDef(def), converter_builder);
}
static absl::Status New(const TensorTieDef& def, TensorObject internal_object,
TensorObjectConverterBuilder* converter_builder,
GlInteropFabric* gl_interop_fabric, Environment* env,
std::unique_ptr<TensorTie>* tie) {
auto tie_impl =
std::make_unique<GlBufferHolder>(def, gl_interop_fabric, env);
RETURN_IF_ERROR(DefaultTensorTie::New(MakeClDef(def), internal_object,
converter_builder, env,
&tie_impl->tie_));
*tie = std::move(tie_impl);
return absl::OkStatus();
}
absl::Status SetExternalObject(TensorObject obj) final {
auto ssbo = std::get_if<OpenGlBuffer>(&obj);
if (!ssbo) {
return absl::InvalidArgumentError("Missing OpenGL SSBO");
}
auto old_ssbo = std::get_if<OpenGlBuffer>(&external_obj_);
if (old_ssbo && ssbo->id == old_ssbo->id) {
return absl::OkStatus();
}
if (cl_object_.memory()) {
gl_interop_fabric_->UnregisterMemory(cl_object_.memory());
}
RETURN_IF_ERROR(CreateClMemoryFromGlBuffer(
ssbo->id, def().access_type, &environment_->context(), &cl_object_));
external_obj_ = obj;
RETURN_IF_ERROR(tie_->SetExternalObject(OpenClBuffer{cl_object_.memory()}));
gl_interop_fabric_->RegisterMemory(cl_object_.memory());
return absl::OkStatus();
}
TensorObject GetExternalObject() final { return external_obj_; }
absl::Status CopyFromExternalObject() final {
return tie_->CopyFromExternalObject();
}
absl::Status CopyToExternalObject() final {
return tie_->CopyToExternalObject();
}
private:
static TensorTieDef MakeClDef(const TensorTieDef& def) {
auto cl_def = def;
cl_def.external_def.object_def.object_type = ObjectType::OPENCL_BUFFER;
cl_def.external_def.object_def.user_provided = true;
return cl_def;
}
CLMemory cl_object_;
GlInteropFabric* gl_interop_fabric_;
Environment* environment_;
std::unique_ptr<TensorTie> tie_;
TensorObject external_obj_;
};
#endif
TensorObject TensorToObj(const Tensor& tensor) {
if (tensor.GetStorageType() == TensorStorageType::BUFFER) {
return OpenClBuffer{tensor.GetMemoryPtr()};
}
if (tensor.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
return OpenClBuffer{tensor.GetMemoryPtrForWriting()};
}
return OpenClTexture{tensor.GetMemoryPtr()};
}
class TensorTieFactory {
public:
TensorTieFactory(Environment* env, InferenceContext* context
#ifdef CL_DELEGATE_ALLOW_GL
,
GlInteropFabric* gl_interop_fabric
#endif
)
: env_(*env),
context_(*context),
#ifdef CL_DELEGATE_ALLOW_GL
gl_interop_fabric_(gl_interop_fabric),
#endif
converter_builder_(NewConverterBuilder(env)) {
}
bool IsSupported(const TensorTieDef& def) const {
return IsValid(def.external_def.object_def) &&
(NoopTensorTie::IsSupported(def) ||
DefaultTensorTie::IsSupported(def, *converter_builder_) ||
#ifdef CL_DELEGATE_ALLOW_GL
(gl_interop_fabric_ &&
GlBufferHolder::IsSupported(def, *converter_builder_)) ||
#endif
TwoStepTensorTie::IsSupported(def, *converter_builder_));
}
absl::Status NewTensorTie(const TensorTieDef& def,
std::unique_ptr<TensorTie>* tie) {
TensorObject internal_object = TensorToObj(*context_.GetTensor(def.id));
auto converter = converter_builder_.get();
if (NoopTensorTie::IsSupported(def)) {
*tie = std::make_unique<NoopTensorTie>(def, internal_object);
return absl::OkStatus();
}
if (DefaultTensorTie::IsSupported(def, *converter)) {
return DefaultTensorTie::New(def, internal_object, converter, &env_, tie);
}
#ifdef CL_DELEGATE_ALLOW_GL
if (gl_interop_fabric_ && GlBufferHolder::IsSupported(def, *converter)) {
return GlBufferHolder::New(def, internal_object, converter,
gl_interop_fabric_, &env_, tie);
}
#endif
if (TwoStepTensorTie::IsSupported(def, *converter)) {
return TwoStepTensorTie::New(def, internal_object, converter, &env_, tie);
}
return absl::UnimplementedError("Unsupported tensor tie definition.");
}
private:
Environment& env_;
InferenceContext& context_;
#ifdef CL_DELEGATE_ALLOW_GL
GlInteropFabric* gl_interop_fabric_;
#endif
std::unique_ptr<TensorObjectConverterBuilder> converter_builder_;
};
class InferenceRunnerImpl : public CLInferenceRunner {
public:
InferenceRunnerImpl(Environment* environment,
std::unique_ptr<InferenceContext> context
#ifdef CL_DELEGATE_ALLOW_GL
,
std::unique_ptr<GlInteropFabric> gl_interop_fabric
#endif
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
,
int gpu_invoke_loop_times
#endif
)
: queue_(environment->queue()),
profiling_queue_(environment->profiling_queue()),
context_(std::move(context))
#ifdef CL_DELEGATE_ALLOW_GL
,
gl_interop_fabric_(std::move(gl_interop_fabric))
#endif
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
,
gpu_invoke_loop_times_(gpu_invoke_loop_times)
#endif
{
}
absl::Status Initialize(const std::vector<TensorTieDef>& inputs,
const std::vector<TensorTieDef>& outputs,
TensorTieFactory* factory) {
RETURN_IF_ERROR(LinkTensors(inputs, factory, &inputs_));
return LinkTensors(outputs, factory, &outputs_);
}
std::vector<TensorObjectDef> inputs() const override {
return GetExternalDefinitions(inputs_);
}
std::vector<TensorObjectDef> outputs() const override {
return GetExternalDefinitions(outputs_);
}
absl::Status GetInputObject(int index, TensorObject* object) override {
if (index < 0 || index >= inputs_.size()) {
return absl::OutOfRangeError("Index is out of range");
}
*object = inputs_[index]->GetExternalObject();
return absl::OkStatus();
}
absl::Status GetOutputObject(int index, TensorObject* object) override {
if (index < 0 || index >= outputs_.size()) {
return absl::OutOfRangeError("Index is out of range");
}
*object = outputs_[index]->GetExternalObject();
return absl::OkStatus();
}
absl::Status SetInputObject(int index, TensorObject object) override {
if (index < 0 || index >= inputs_.size()) {
return absl::OutOfRangeError("Input index is out of range");
}
return inputs_[index]->SetExternalObject(object);
}
absl::Status SetOutputObject(int index, TensorObject object) override {
if (index < 0 || index >= outputs_.size()) {
return absl::OutOfRangeError("Output index is out of range");
}
return outputs_[index]->SetExternalObject(object);
}
absl::Status CopyFromExternalInput(int index) override {
if (index > inputs_.size()) {
return absl::NotFoundError(
absl::StrCat("Input id ", index, " is an invalid input index."));
}
return inputs_[index]->CopyFromExternalObject();
}
absl::Status CopyToExternalOutput(int index) override {
if (index > outputs_.size()) {
return absl::NotFoundError(
absl::StrCat("Output id ", index, " is an invalid output index"));
}
return outputs_[index]->CopyToExternalObject();
}
absl::Status Run() override {
#ifdef CL_DELEGATE_ALLOW_GL
if (gl_interop_fabric_) {
RETURN_IF_ERROR(gl_interop_fabric_->Start());
}
#endif
for (const auto& input : inputs_) {
RETURN_IF_ERROR(input->CopyFromExternalObject());
}
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
if (gpu_invoke_loop_times_ <= 0) {
return absl::InvalidArgumentError(
"gpu_invoke_loop_times must be positive");
}
for (int i = 0; i < gpu_invoke_loop_times_; i++) {
RETURN_IF_ERROR(RunWithoutExternalBufferCopy());
}
#else
RETURN_IF_ERROR(RunWithoutExternalBufferCopy());
#endif
bool has_async_copies = false;
for (const auto& output : outputs_) {
RETURN_IF_ERROR(output->CopyToExternalObject());
if (output->def().external_def.object_def.object_type ==
ObjectType::CPU_MEMORY) {
has_async_copies = true;
}
}
#ifdef CL_DELEGATE_ALLOW_GL
if (gl_interop_fabric_) {
RETURN_IF_ERROR(gl_interop_fabric_->Finish());
}
#endif
if (has_async_copies) {
RETURN_IF_ERROR(queue_->WaitForCompletion());
}
return absl::OkStatus();
}
absl::Status RunWithoutExternalBufferCopy() override {
if (IsTfLiteProfilerActive()) {
ProfilingInfo profiling_info;
RETURN_IF_ERROR(context_->Profile(profiling_queue_, &profiling_info));
AddTfLiteProfilerEvents(&profiling_info);
}
RETURN_IF_ERROR(context_->AddToQueue(queue_));
context_->FlushQueue(queue_);
return absl::OkStatus();
}
private:
static absl::Status LinkTensors(
const std::vector<TensorTieDef>& defs, TensorTieFactory* factory,
std::vector<std::unique_ptr<TensorTie>>* objects) {
objects->reserve(defs.size());
for (auto& def : defs) {
std::unique_ptr<TensorTie> object;
RETURN_IF_ERROR(factory->NewTensorTie(def, &object));
objects->push_back(std::move(object));
}
return absl::OkStatus();
}
static std::vector<TensorObjectDef> GetExternalDefinitions(
const std::vector<std::unique_ptr<TensorTie>>& objects) {
std::vector<TensorObjectDef> defs;
defs.reserve(objects.size());
for (auto& obj : objects) {
defs.push_back(obj->def().external_def);
}
return defs;
}
CLCommandQueue* queue_;
ProfilingCommandQueue* profiling_queue_;
std::unique_ptr<InferenceContext> context_;
#ifdef CL_DELEGATE_ALLOW_GL
std::unique_ptr<GlInteropFabric> gl_interop_fabric_;
#endif
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
int gpu_invoke_loop_times_;
#endif
std::vector<std::unique_ptr<TensorTie>> inputs_;
std::vector<std::unique_ptr<TensorTie>> outputs_;
};
TensorObjectDef TensorToDef(const Tensor& tensor) {
TensorObjectDef def;
def.dimensions.b = tensor.Batch();
def.dimensions.h = tensor.Height();
def.dimensions.w = tensor.Width();
def.dimensions.c = tensor.Channels();
def.object_def.data_layout = ToDataLayout(tensor.GetStorageType());
def.object_def.data_type = tensor.GetDataType();
def.object_def.object_type = ToObjectType(tensor.GetStorageType());
def.object_def.user_provided = false;
return def;
}
CalculationsPrecision GetPrecision(const Environment& env,
const InferenceOptions& options) {
CalculationsPrecision precision;
switch (GetPosition(options, InferencePriority::MAX_PRECISION)) {
case 1:
precision = CalculationsPrecision::F32;
break;
case 2:
precision = CalculationsPrecision::F32_F16;
break;
case 3:
precision = CalculationsPrecision::F16;
break;
default:
precision = CalculationsPrecision::F16;
break;
}
if (!env.IsSupported(precision)) {
precision = CalculationsPrecision::F32_F16;
if (!env.IsSupported(precision)) {
precision = CalculationsPrecision::F32;
}
}
return precision;
}
TensorStorageType GetStorageTypeFromOptions(const Environment& env,
const InferenceOptions& options) {
std::vector<TensorStorageType> preferred_storage_types;
if (GetRelativeImportance(options, InferencePriority::MIN_LATENCY,
InferencePriority::MIN_MEMORY_USAGE) ==
PriorityImportance::HIGHER) {
preferred_storage_types = {GetFastestStorageType(env.device().GetInfo()),
TensorStorageType::BUFFER};
} else {
preferred_storage_types = {
GetStorageTypeWithMinimalMemoryConsumption(env.device().GetInfo()),
TensorStorageType::BUFFER};
}
for (TensorStorageType storage_type : preferred_storage_types) {
if (env.IsSupported(storage_type)) {
return storage_type;
}
}
return TensorStorageType::UNKNOWN;
}
CreateGpuModelInfo GetCreateInfo(const Environment& environment,
const InferenceOptions& options) {
CreateGpuModelInfo create_info;
create_info.precision = GetPrecision(environment, options);
create_info.storage_type = GetStorageTypeFromOptions(environment, options);
if (options.usage == InferenceUsage::FAST_SINGLE_ANSWER) {
create_info.hints.Add(ModelHints::kReduceKernelsCount);
create_info.hints.Add(ModelHints::kFastTuning);
} else if (options.usage == InferenceUsage::BALANCED) {
create_info.hints.Add(ModelHints::kReduceKernelsCount);
} else if (options.usage == InferenceUsage::SUSTAINED_SPEED) {
create_info.hints.Add(ModelHints::kAllowSpecialKernels);
}
if (GetRelativeImportance(options, InferencePriority::MIN_MEMORY_USAGE,
InferencePriority::MIN_LATENCY) ==
PriorityImportance::HIGHER) {
create_info.hints.Add(ModelHints::kNoWinogradOptimizations);
create_info.hints.Add(ModelHints::kReuseConvWeights);
}
return create_info;
}
class InferenceBuilderImpl : public InferenceBuilder {
public:
explicit InferenceBuilderImpl(Environment* environment)
: environment_(environment) {}
absl::Status Initialize(const InferenceOptions& options,
const InferenceEnvironmentOptions& env_options,
const GraphFloat32& graph) {
context_ = std::make_unique<InferenceContext>();
CreateGpuModelInfo create_info = GetCreateInfo(*environment_, options);
RETURN_IF_ERROR(context_->InitFromGraph(create_info, graph, environment_));
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
gpu_invoke_loop_times_ = options.gpu_invoke_loop_times;
#endif
#ifdef CL_DELEGATE_ALLOW_GL
if (env_options.IsGlAware() &&
IsGlSharingSupported(environment_->device())) {
gl_interop_fabric_ = std::make_unique<GlInteropFabric>(
env_options.egl_display, environment_);
}
tie_factory_ = std::make_unique<TensorTieFactory>(
environment_, context_.get(), gl_interop_fabric_.get());
#else
tie_factory_ =
std::make_unique<TensorTieFactory>(environment_, context_.get());
#endif
inputs_ = LinkTensors(context_->GetInputIds(), AccessType::READ);
outputs_ = LinkTensors(context_->GetOutputIds(), AccessType::WRITE);
return absl::OkStatus();
}
absl::Status Initialize(const InferenceEnvironmentOptions& env_options,
const absl::Span<const uint8_t> serialized_model) {
context_ = std::make_unique<InferenceContext>();
RETURN_IF_ERROR(
context_->RestoreDeserialized(serialized_model, environment_));
#ifdef CL_DELEGATE_ALLOW_GL
if (env_options.IsGlAware() &&
IsGlSharingSupported(environment_->device())) {
gl_interop_fabric_ = std::make_unique<GlInteropFabric>(
env_options.egl_display, environment_);
}
tie_factory_ = std::make_unique<TensorTieFactory>(
environment_, context_.get(), gl_interop_fabric_.get());
#else
tie_factory_ =
std::make_unique<TensorTieFactory>(environment_, context_.get());
#endif
inputs_ = LinkTensors(context_->GetInputIds(), AccessType::READ);
outputs_ = LinkTensors(context_->GetOutputIds(), AccessType::WRITE);
return absl::OkStatus();
}
std::vector<TensorObjectDef> inputs() const override {
return GetExternalDefinitions(inputs_);
}
std::vector<TensorObjectDef> outputs() const override {
return GetExternalDefinitions(outputs_);
}
absl::Status SetInputShape(int index, const Dimensions& dimensions) override {
if (index < 0 || index >= inputs_.size()) {
return absl::OutOfRangeError("Index is out of range");
}
return absl::UnimplementedError("Changing input shapes is not supported");
}
absl::Status SetInputObjectDef(int index, ObjectDef new_def) override {
if (index < 0 || index >= inputs_.size()) {
return absl::OutOfRangeError("Input index is out of range");
}
auto def = inputs_[index];
def.external_def.object_def = new_def;
if (!tie_factory_->IsSupported(def)) {
return absl::InvalidArgumentError(
"New input object definition is not supported.");
}
inputs_[index] = def;
return absl::OkStatus();
}
absl::Status SetOutputObjectDef(int index, ObjectDef new_def) override {
if (index < 0 || index >= outputs_.size()) {
return absl::OutOfRangeError("Output index is out of range");
}
auto def = outputs_[index];
def.external_def.object_def = new_def;
if (!tie_factory_->IsSupported(def)) {
return absl::InvalidArgumentError(
"New output object definition is not supported.");
}
outputs_[index] = def;
return absl::OkStatus();
}
absl::Status Build(std::unique_ptr<InferenceRunner>* runner) override {
#ifdef CL_DELEGATE_ALLOW_GL
if (gl_interop_fabric_ && !HasGlObjects()) {
gl_interop_fabric_.reset(nullptr);
}
auto runner_impl = std::make_unique<InferenceRunnerImpl>(
environment_, std::move(context_), std::move(gl_interop_fabric_)
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
,
gpu_invoke_loop_times_
#endif
);
#else
auto runner_impl =
std::make_unique<InferenceRunnerImpl>(environment_, std::move(context_)
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
,
gpu_invoke_loop_times_
#endif
);
#endif
RETURN_IF_ERROR(
runner_impl->Initialize(inputs_, outputs_, tie_factory_.get()));
*runner = std::move(runner_impl);
return absl::OkStatus();
}
private:
std::vector<TensorTieDef> LinkTensors(const std::vector<ValueId>& ids,
AccessType access) {
std::vector<TensorTieDef> links;
links.reserve(ids.size());
for (const auto& id : ids) {
TensorObjectDef def = TensorToDef(*context_->GetTensor(id));
links.push_back({id, access, def, def});
}
return links;
}
bool HasGlObjects() const {
#ifdef CL_DELEGATE_ALLOW_GL
auto is_gl = [](ObjectType t) {
return t == ObjectType::OPENGL_SSBO || t == ObjectType::OPENGL_TEXTURE;
};
for (const TensorTieDef& def : inputs_) {
if (is_gl(def.external_def.object_def.object_type)) {
return true;
}
}
for (const TensorTieDef& def : outputs_) {
if (is_gl(def.external_def.object_def.object_type)) {
return true;
}
}
#endif
return false;
}
static std::vector<TensorObjectDef> GetExternalDefinitions(
const std::vector<TensorTieDef>& links) {
std::vector<TensorObjectDef> defs;
defs.reserve(links.size());
for (auto& desc : links) {
defs.push_back(desc.external_def);
}
return defs;
}
std::unique_ptr<InferenceContext> context_;
#ifdef CL_DELEGATE_ALLOW_GL
std::unique_ptr<GlInteropFabric> gl_interop_fabric_;
#endif
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
int gpu_invoke_loop_times_;
#endif
Environment* environment_;
std::vector<TensorTieDef> inputs_;
std::vector<TensorTieDef> outputs_;
std::unique_ptr<TensorTieFactory> tie_factory_;
};
class InferenceEnvironmentImpl : public InferenceEnvironment {
public:
explicit InferenceEnvironmentImpl(const InferenceEnvironmentOptions& options)
: options_(options) {}
absl::Status Init() {
RETURN_IF_ERROR(LoadOpenCL());
properties_.is_opencl_available = true;
CLDevice device;
if (options_.device) {
cl_platform_id platform;
RETURN_IF_ERROR(GetDeviceInfo<cl_platform_id>(
options_.device, CL_DEVICE_PLATFORM, &platform));
device = CLDevice(options_.device, platform);
} else {
RETURN_IF_ERROR(CreateDefaultGPUDevice(&device));
}
#ifdef CL_DELEGATE_ALLOW_GL
properties_.is_gl_sharing_supported = IsGlSharingSupported(device);
properties_.is_gl_to_cl_fast_sync_supported =
IsClEventFromEglSyncSupported(device);
properties_.is_cl_to_gl_fast_sync_supported =
IsEglSyncFromClEventSupported();
#endif
CLContext context;
if (options_.context) {
#ifdef CL_DELEGATE_ALLOW_GL
if (options_.IsGlAware()) {
return absl::InvalidArgumentError(
"OpenCL context and EGL parameters are set in the same time.");
}
#endif
context =
CLContext(options_.context, false, device);
} else {
#ifdef CL_DELEGATE_ALLOW_GL
if (options_.IsGlAware() && properties_.is_gl_sharing_supported) {
RETURN_IF_ERROR(CreateCLGLContext(
device,
reinterpret_cast<cl_context_properties>(options_.egl_context),
reinterpret_cast<cl_context_properties>(options_.egl_display),
&context));
} else {
RETURN_IF_ERROR(CreateCLContext(device, &context));
}
#else
RETURN_IF_ERROR(CreateCLContext(device, &context));
#endif
}
CLCommandQueue queue;
if (options_.command_queue) {
queue =
CLCommandQueue(options_.command_queue, false);
} else {
RETURN_IF_ERROR(CreateCLCommandQueue(device, context, &queue));
}
ProfilingCommandQueue profiling_queue;
RETURN_IF_ERROR(
CreateProfilingCommandQueue(device, context, &profiling_queue));
environment_ = Environment(std::move(device), std::move(context),
std::move(queue), std::move(profiling_queue));
return environment_.Init();
}
absl::Status BuildSerializedModel(
const InferenceOptions& options, GraphFloat32 model,
std::vector<uint8_t>* serialized_model) final {
if (!IsValid(options)) {
return absl::InvalidArgumentError("InferenceOptions are invalid.");
}
InferenceOptions resolved_options = options;
ResolveAutoPriority(&resolved_options);
if (environment_.program_cache() &&
!options_.serialized_binary_cache.empty()) {
environment_.program_cache()
->AddSerializedCache(environment_.context(), environment_.device(),
options_.serialized_binary_cache)
.IgnoreError();
}
RETURN_IF_ERROR(RunGraphTransformsForGpuModel(&model));
InferenceContext context;
CreateGpuModelInfo create_info = GetCreateInfo(environment_, options);
RETURN_IF_ERROR(context.InitFromGraph(create_info, model, &environment_,
serialized_model));
return absl::OkStatus();
}
absl::Status NewInferenceBuilder(
const InferenceOptions& options, GraphFloat32 model,
std::unique_ptr<InferenceBuilder>* builder) final {
if (!IsValid(options)) {
return absl::InvalidArgumentError("InferenceOptions are invalid.");
}
InferenceOptions resolved_options = options;
ResolveAutoPriority(&resolved_options);
if (environment_.program_cache() &&
!options_.serialized_binary_cache.empty()) {
environment_.program_cache()
->AddSerializedCache(environment_.context(), environment_.device(),
options_.serialized_binary_cache)
.IgnoreError();
}
RETURN_IF_ERROR(RunGraphTransformsForGpuModel(&model));
auto builder_impl = std::make_unique<InferenceBuilderImpl>(&environment_);
RETURN_IF_ERROR(
builder_impl->Initialize(resolved_options, options_, model));
*builder = std::move(builder_impl);
return absl::OkStatus();
}
absl::Status NewInferenceBuilder(
const absl::Span<const uint8_t> serialized_model,
std::unique_ptr<InferenceBuilder>* builder) final {
if (environment_.program_cache() &&
!options_.serialized_binary_cache.empty()) {
environment_.program_cache()
->AddSerializedCache(environment_.context(), environment_.device(),
options_.serialized_binary_cache)
.IgnoreError();
}
auto builder_impl = std::make_unique<InferenceBuilderImpl>(&environment_);
RETURN_IF_ERROR(builder_impl->Initialize(options_, serialized_model));
*builder = std::move(builder_impl);
return absl::OkStatus();
}
std::vector<uint8_t> GetSerializedBinaryCache() const final {
std::vector<uint8_t> data;
environment_.program_cache()
->GetSerializedCache(environment_.device(), &data)
.IgnoreError();
return data;
}
const InferenceEnvironmentProperties& properties() const {
return properties_;
}
private:
const InferenceEnvironmentOptions options_;
Environment environment_;
InferenceEnvironmentProperties properties_;
};
}
absl::Status NewInferenceEnvironment(
const InferenceEnvironmentOptions& options,
std::unique_ptr<InferenceEnvironment>* environment,
InferenceEnvironmentProperties* properties) {
auto env_impl = std::make_unique<InferenceEnvironmentImpl>(options);
absl::Status status = env_impl->Init();
if (properties) {
*properties = env_impl->properties();
}
RETURN_IF_ERROR(status);
*environment = std::move(env_impl);
return absl::OkStatus();
}
}
}
} | #include <ctype.h>
#include <algorithm>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/api_def/excluded_ops.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
constexpr char kApiDefFilePattern[] = "api_def_*.pbtxt";
string DefaultApiDefDir() {
return GetDataDependencyFilepath(
io::JoinPath("tensorflow", "core", "api_def", "base_api"));
}
string PythonApiDefDir() {
return GetDataDependencyFilepath(
io::JoinPath("tensorflow", "core", "api_def", "python_api"));
}
void GetGoldenApiDefs(Env* env, const string& api_files_dir,
std::unordered_map<string, ApiDef>* name_to_api_def) {
std::vector<string> matching_paths;
TF_CHECK_OK(env->GetMatchingPaths(
io::JoinPath(api_files_dir, kApiDefFilePattern), &matching_paths));
for (auto& file_path : matching_paths) {
string file_contents;
TF_CHECK_OK(ReadFileToString(env, file_path, &file_contents));
file_contents = PBTxtFromMultiline(file_contents);
ApiDefs api_defs;
QCHECK(tensorflow::protobuf::TextFormat::ParseFromString(file_contents,
&api_defs))
<< "Failed to load " << file_path;
CHECK_EQ(api_defs.op_size(), 1);
(*name_to_api_def)[api_defs.op(0).graph_op_name()] = api_defs.op(0);
}
}
void TestAllApiDefsHaveCorrespondingOp(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
std::unordered_set<string> op_names;
for (const auto& op : ops.op()) {
op_names.insert(op.name());
}
for (const auto& name_and_api_def : api_defs_map) {
ASSERT_TRUE(op_names.find(name_and_api_def.first) != op_names.end())
<< name_and_api_def.first << " op has ApiDef but missing from ops. "
<< "Does api_def_" << name_and_api_def.first << " need to be deleted?";
}
}
void TestAllApiDefInputArgsAreValid(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& op : ops.op()) {
const auto api_def_iter = api_defs_map.find(op.name());
if (api_def_iter == api_defs_map.end()) {
continue;
}
const auto& api_def = api_def_iter->second;
for (const auto& api_def_arg : api_def.in_arg()) {
bool found_arg = false;
for (const auto& op_arg : op.input_arg()) {
if (api_def_arg.name() == op_arg.name()) {
found_arg = true;
break;
}
}
ASSERT_TRUE(found_arg)
<< "Input argument " << api_def_arg.name()
<< " (overwritten in api_def_" << op.name()
<< ".pbtxt) is not defined in OpDef for " << op.name();
}
}
}
void TestAllApiDefOutputArgsAreValid(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& op : ops.op()) {
const auto api_def_iter = api_defs_map.find(op.name());
if (api_def_iter == api_defs_map.end()) {
continue;
}
const auto& api_def = api_def_iter->second;
for (const auto& api_def_arg : api_def.out_arg()) {
bool found_arg = false;
for (const auto& op_arg : op.output_arg()) {
if (api_def_arg.name() == op_arg.name()) {
found_arg = true;
break;
}
}
ASSERT_TRUE(found_arg)
<< "Output argument " << api_def_arg.name()
<< " (overwritten in api_def_" << op.name()
<< ".pbtxt) is not defined in OpDef for " << op.name();
}
}
}
void TestAllApiDefAttributeNamesAreValid(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& op : ops.op()) {
const auto api_def_iter = api_defs_map.find(op.name());
if (api_def_iter == api_defs_map.end()) {
continue;
}
const auto& api_def = api_def_iter->second;
for (const auto& api_def_attr : api_def.attr()) {
bool found_attr = false;
for (const auto& op_attr : op.attr()) {
if (api_def_attr.name() == op_attr.name()) {
found_attr = true;
}
}
ASSERT_TRUE(found_attr)
<< "Attribute " << api_def_attr.name() << " (overwritten in api_def_"
<< op.name() << ".pbtxt) is not defined in OpDef for " << op.name();
}
}
}
void TestDeprecatedAttributesSetCorrectly(
const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& name_and_api_def : api_defs_map) {
int num_deprecated_endpoints = 0;
const auto& api_def = name_and_api_def.second;
for (const auto& endpoint : api_def.endpoint()) {
if (endpoint.deprecated()) {
++num_deprecated_endpoints;
}
}
const auto& name = name_and_api_def.first;
ASSERT_TRUE(api_def.deprecation_message().empty() ||
num_deprecated_endpoints == 0)
<< "Endpoints are set to 'deprecated' for deprecated op " << name
<< ". If an op is deprecated (i.e. deprecation_message is set), "
<< "all the endpoints are deprecated implicitly and 'deprecated' "
<< "field should not be set.";
if (num_deprecated_endpoints > 0) {
ASSERT_NE(num_deprecated_endpoints, api_def.endpoint_size())
<< "All " << name << " endpoints are deprecated. Please, set "
<< "deprecation_message in api_def_" << name << ".pbtxt instead. "
<< "to indicate that the op is deprecated.";
}
}
}
void TestDeprecationVersionSetCorrectly(
const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& name_and_api_def : api_defs_map) {
const auto& name = name_and_api_def.first;
const auto& api_def = name_and_api_def.second;
if (api_def.deprecation_version() != 0) {
ASSERT_TRUE(api_def.deprecation_version() > 0)
<< "Found ApiDef with negative deprecation_version";
ASSERT_FALSE(api_def.deprecation_message().empty())
<< "ApiDef that includes deprecation_version > 0 must also specify "
<< "a deprecation_message. Op " << name
<< " has deprecation_version > 0 but deprecation_message is not set.";
}
}
}
class BaseApiTest : public ::testing::Test {
protected:
BaseApiTest() {
OpRegistry::Global()->Export(false, &ops_);
const std::vector<string> multi_line_fields = {"description"};
Env* env = Env::Default();
GetGoldenApiDefs(env, DefaultApiDefDir(), &api_defs_map_);
}
OpList ops_;
std::unordered_map<string, ApiDef> api_defs_map_;
};
TEST_F(BaseApiTest, AllOpsAreInApiDef) {
auto* excluded_ops = GetExcludedOps();
for (const auto& op : ops_.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
EXPECT_TRUE(api_defs_map_.find(op.name()) != api_defs_map_.end())
<< op.name() << " op does not have api_def_*.pbtxt file. "
<< "Please add api_def_" << op.name() << ".pbtxt file "
<< "under tensorflow/core/api_def/base_api/ directory.";
}
}
TEST_F(BaseApiTest, AllApiDefsHaveCorrespondingOp) {
TestAllApiDefsHaveCorrespondingOp(ops_, api_defs_map_);
}
string GetOpDefHasDocStringError(const string& op_name) {
return strings::Printf(
"OpDef for %s has a doc string. "
"Doc strings must be defined in ApiDef instead of OpDef. "
"Please, add summary and descriptions in api_def_%s"
".pbtxt file instead",
op_name.c_str(), op_name.c_str());
}
TEST_F(BaseApiTest, OpDefsShouldNotHaveDocs) {
auto* excluded_ops = GetExcludedOps();
for (const auto& op : ops_.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
ASSERT_TRUE(op.summary().empty()) << GetOpDefHasDocStringError(op.name());
ASSERT_TRUE(op.description().empty())
<< GetOpDefHasDocStringError(op.name());
for (const auto& arg : op.input_arg()) {
ASSERT_TRUE(arg.description().empty())
<< GetOpDefHasDocStringError(op.name());
}
for (const auto& arg : op.output_arg()) {
ASSERT_TRUE(arg.description().empty())
<< GetOpDefHasDocStringError(op.name());
}
for (const auto& attr : op.attr()) {
ASSERT_TRUE(attr.description().empty())
<< GetOpDefHasDocStringError(op.name());
}
}
}
TEST_F(BaseApiTest, AllApiDefInputArgsAreValid) {
TestAllApiDefInputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(BaseApiTest, AllApiDefOutputArgsAreValid) {
TestAllApiDefOutputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(BaseApiTest, AllApiDefAttributeNamesAreValid) {
TestAllApiDefAttributeNamesAreValid(ops_, api_defs_map_);
}
TEST_F(BaseApiTest, DeprecationSetCorrectly) {
TestDeprecatedAttributesSetCorrectly(api_defs_map_);
}
TEST_F(BaseApiTest, DeprecationVersionSetCorrectly) {
TestDeprecationVersionSetCorrectly(api_defs_map_);
}
class PythonApiTest : public ::testing::Test {
protected:
PythonApiTest() {
OpRegistry::Global()->Export(false, &ops_);
const std::vector<string> multi_line_fields = {"description"};
Env* env = Env::Default();
GetGoldenApiDefs(env, PythonApiDefDir(), &api_defs_map_);
}
OpList ops_;
std::unordered_map<string, ApiDef> api_defs_map_;
};
TEST_F(PythonApiTest, AllApiDefsHaveCorrespondingOp) {
TestAllApiDefsHaveCorrespondingOp(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, AllApiDefInputArgsAreValid) {
TestAllApiDefInputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, AllApiDefOutputArgsAreValid) {
TestAllApiDefOutputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, AllApiDefAttributeNamesAreValid) {
TestAllApiDefAttributeNamesAreValid(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, DeprecationSetCorrectly) {
TestDeprecatedAttributesSetCorrectly(api_defs_map_);
}
TEST_F(PythonApiTest, DeprecationVersionSetCorrectly) {
TestDeprecationVersionSetCorrectly(api_defs_map_);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/api_def/api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f8fa38a8-c603-4c99-9ab0-8297cb05f5a3 | cpp | tensorflow/tensorflow | delegate | tensorflow/lite/delegates/flex/delegate.cc | tensorflow/lite/delegates/flex/delegate_test.cc | #include "tensorflow/lite/delegates/flex/delegate.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/macros.h"
#include "tensorflow/lite/delegates/flex/buffer_map.h"
#include "tensorflow/lite/delegates/flex/kernel.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
TfLiteDelegateUniquePtr FlexDelegate::Create(
std::unique_ptr<FlexDelegate> base_delegate) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"Created TensorFlow Lite delegate for select TF ops.");
if (base_delegate == nullptr) {
base_delegate.reset(new FlexDelegate());
}
auto flex_delegate = TfLiteDelegateFactory::Create(std::move(base_delegate));
flex_delegate->flags |= kTfLiteDelegateFlagsAllowDynamicTensors;
flex_delegate->flags |= kTfLiteDelegateFlagsPerOperatorProfiling;
reinterpret_cast<FlexDelegate*>(flex_delegate->data_)->base_delegate_ =
flex_delegate.get();
return flex_delegate;
}
TfLiteStatus FlexDelegate::Initialize(TfLiteContext* context) {
tensorflow::SessionOptions session_options;
session_options.config.set_inter_op_parallelism_threads(-1);
if (context->recommended_num_threads > 0) {
session_options.config.set_intra_op_parallelism_threads(
context->recommended_num_threads);
}
auto status = delegate_data_.Prepare(
session_options, reinterpret_cast<Subgraph*>(context->impl_),
base_delegate_);
if (!status.ok()) {
TF_LITE_KERNEL_LOG(context, "Failed to initialize TensorFlow context: %s",
absl::StatusMessageAsCStr(status));
return kTfLiteError;
}
if (!cancellation_manager_) {
cancellation_manager_ = std::make_unique<tensorflow::CancellationManager>();
delegate_data_.SetCancellationManager(cancellation_manager_.get());
}
return kTfLiteOk;
}
const char* FlexDelegate::Name() const {
static constexpr char kName[] = "TfLiteFlexDelegate";
return kName;
}
bool FlexDelegate::IsNodeSupportedByDelegate(
const TfLiteRegistration* registration, const TfLiteNode* node,
TfLiteContext* context) const {
return IsFlexOp(registration->custom_name);
}
std::unique_ptr<SimpleDelegateKernelInterface>
FlexDelegate::CreateDelegateKernelInterface() {
return std::unique_ptr<SimpleDelegateKernelInterface>(
new tflite::flex::DelegateKernel());
}
TfLiteStatus FlexDelegate::CopyFromBufferHandle(
TfLiteContext* context, TfLiteBufferHandle buffer_handle,
TfLiteTensor* output) {
flex::BufferMap* buffer_map = delegate_data_.GetBufferMap(context);
if (!buffer_map->HasTensor(buffer_handle)) {
TF_LITE_KERNEL_LOG(context, "Invalid tensor index %d.", buffer_handle);
return kTfLiteError;
}
tensorflow::Tensor t = buffer_map->GetTensor(buffer_handle);
if (output->type == kTfLiteString) {
if (t.dtype() != tensorflow::DT_STRING) {
TF_LITE_KERNEL_LOG(context,
"Inconsistent type for TF string tensor index %d.",
buffer_handle);
return kTfLiteError;
}
DynamicBuffer dynamic_buffer;
auto tf_data = t.flat<tensorflow::tstring>();
for (int i = 0; i < t.NumElements(); ++i) {
dynamic_buffer.AddString(tf_data(i).data(), tf_data(i).size());
}
dynamic_buffer.WriteToTensor(output, nullptr);
return kTfLiteOk;
}
if (IsResourceOrVariant(output)) {
const size_t required_bytes = sizeof(tensorflow::Tensor**);
const tensorflow::Tensor** tf_tensor_ptr =
reinterpret_cast<const tensorflow::Tensor**>(malloc(required_bytes));
*tf_tensor_ptr = buffer_map->GetTensorPtr(buffer_handle);
TfLiteTensorDataFree(output);
output->data.raw = reinterpret_cast<char*>(tf_tensor_ptr);
output->bytes = required_bytes;
output->data_is_stale = true;
return kTfLiteOk;
}
tensorflow::StringPiece t_data = t.tensor_data();
if (output->bytes != t_data.size()) {
TF_LITE_KERNEL_LOG(context,
absl::StrCat("The given ", output->bytes,
" bytes are not enough to store "
"TensorFlow's aligned buffer of size ",
t_data.size(), " bytes.")
.c_str());
return kTfLiteError;
}
memcpy(output->data.raw, t_data.data(), t_data.size());
return kTfLiteOk;
}
void FlexDelegate::Cancel() { cancellation_manager_->StartCancel(); }
bool FlexDelegate::HasCancelled(void* data) {
if (data == nullptr) {
return false;
}
auto* flex_delegate = static_cast<FlexDelegate*>(data);
return flex_delegate->cancellation_manager_->IsCancelled();
}
} | #include "tensorflow/lite/delegates/flex/delegate.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/flex/test_util.h"
#include "tensorflow/lite/shared_library.h"
namespace tflite {
namespace flex {
namespace {
using ::testing::ElementsAre;
class DelegateTest : public testing::FlexModelTest {
public:
DelegateTest() : delegate_(FlexDelegate::Create()) {
flex_delegate_ = static_cast<FlexDelegate*>(delegate_->data_);
interpreter_ = std::make_unique<Interpreter>(&error_reporter_);
}
~DelegateTest() override {
interpreter_.reset();
delegate_.reset();
}
void ConfigureDelegate() {
interpreter_->SetCancellationFunction(flex_delegate_,
FlexDelegate::HasCancelled);
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteOk);
}
void Cancel() { flex_delegate_->Cancel(); }
private:
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)> delegate_;
FlexDelegate* flex_delegate_;
};
TEST_F(DelegateTest, FullGraph) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(8), kTfLiteFloat32);
}
TEST_F(DelegateTest, NonFloatTypeInference) {
AddTensors(3, {0, 1}, {2}, kTfLiteInt32, {2});
AddTfOp(testing::kAdd, {0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2});
SetTypedValues<int>(0, {1, 2, 3, 4});
SetShape(1, {2, 2});
SetTypedValues<int>(1, {4, 3, 2, 1});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2));
ASSERT_THAT(GetTypedValues<int>(2), ElementsAre(5, 5, 5, 5));
ASSERT_EQ(GetType(2), kTfLiteInt32);
}
TEST_F(DelegateTest, StringInference) {
AddTensors(3, {0, 1}, {2}, kTfLiteString, {2});
AddTfOp(testing::kAdd, {0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2});
SetStringValues(0, {"1", "2", "3", "4"});
SetShape(1, {2, 2});
SetStringValues(1, {"4", "3", "2", "1"});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2));
ASSERT_THAT(GetStringValues(2), ElementsAre("14", "23", "32", "41"));
ASSERT_EQ(GetType(2), kTfLiteString);
}
TEST_F(DelegateTest, MixedGraph) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfLiteMulOp({6, 7}, {8});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
}
TEST_F(DelegateTest, SplitGraph) {
AddTensors(10, {0}, {9}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kAdd, {1, 2}, {3});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfLiteMulOp({4, 5}, {6});
AddTfOp(testing::kUnpack, {6}, {7, 8});
AddTfOp(testing::kAdd, {7, 8}, {9});
ConfigureDelegate();
SetShape(0, {2, 2, 2, 1});
SetValues(0, {3.0f, 1.0f, 0.5f, -1.0f, 0.0f, 1.0f, 1.5f, 3.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(9), ElementsAre(1));
ASSERT_THAT(GetValues(9), ElementsAre(10.0f));
}
TEST_F(DelegateTest, OnlyTFLite) {
AddTensors(10, {0, 1}, {2}, kTfLiteFloat32, {3});
AddTfLiteMulOp({0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(1, {2, 2, 1});
SetValues(1, {1.0f, 2.0f, 3.0f, 4.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2, 1));
ASSERT_THAT(GetValues(2), ElementsAre(1.1f, 4.4f, 9.9f, 17.6f));
}
TEST_F(DelegateTest, MultipleInvokeCalls) {
AddTensors(10, {0, 1}, {2}, kTfLiteFloat32, {3});
AddTfLiteMulOp({0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(1, {2, 2, 1});
SetValues(1, {1.0f, 2.0f, 3.0f, 4.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2, 1));
ASSERT_THAT(GetValues(2), ElementsAre(1.1f, 4.4f, 9.9f, 17.6f));
SetShape(0, {2, 2, 1});
SetValues(1, {4.0f, 3.0f, 2.0f, 1.0f});
SetShape(1, {2, 2, 1});
SetValues(0, {4.4f, 3.3f, 2.2f, 1.1f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2, 1));
ASSERT_THAT(GetValues(2), ElementsAre(17.6f, 9.9f, 4.4f, 1.1f));
}
TEST_F(DelegateTest, MultipleInterpretersSameDelegate) {
{
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
}
std::unique_ptr<Interpreter> interpreter(new Interpreter(&error_reporter_));
interpreter_.swap(interpreter);
{
AddTensors(10, {0}, {9}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kAdd, {1, 2}, {3});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfLiteMulOp({4, 5}, {6});
AddTfOp(testing::kUnpack, {6}, {7, 8});
AddTfOp(testing::kAdd, {7, 8}, {9});
ConfigureDelegate();
SetShape(0, {2, 2, 2, 1});
SetValues(0, {3.0f, 1.0f, 0.5f, -1.0f, 0.0f, 1.0f, 1.5f, 3.0f});
}
interpreter_.swap(interpreter);
{
ASSERT_TRUE(Invoke());
EXPECT_THAT(GetShape(8), ElementsAre(2, 1));
EXPECT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
}
interpreter_.swap(interpreter);
{
ASSERT_TRUE(Invoke());
EXPECT_THAT(GetShape(9), ElementsAre(1));
EXPECT_THAT(GetValues(9), ElementsAre(10.0f));
}
}
TEST_F(DelegateTest, SingleThreaded) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
interpreter_->SetNumThreads(1);
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(8), kTfLiteFloat32);
}
TEST_F(DelegateTest, MultiThreaded) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
interpreter_->SetNumThreads(4);
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(8), kTfLiteFloat32);
}
#if !defined(__ANDROID__)
TEST_F(DelegateTest, TF_AcquireFlexDelegate) {
auto TF_AcquireFlexDelegate =
reinterpret_cast<Interpreter::TfLiteDelegatePtr (*)()>(
SharedLibrary::GetSymbol("TF_AcquireFlexDelegate"));
ASSERT_TRUE(TF_AcquireFlexDelegate);
auto delegate_ptr = TF_AcquireFlexDelegate();
ASSERT_TRUE(delegate_ptr != nullptr);
}
#endif
TEST_F(DelegateTest, StaticOutput) {
AddTensors(7, {0, 1, 2, 3}, {6}, kTfLiteFloat32, {2});
AddTfOp(testing::kAdd, {0, 2}, {4});
AddTfOp(testing::kAdd, {1, 3}, {5});
AddTfOp(testing::kMul, {4, 5}, {6});
ConfigureDelegate();
SetShape(0, {2});
SetShape(1, {2});
SetShape(2, {2});
SetShape(3, {2});
SetValues(0, {1.1f, 2.2f});
SetValues(1, {3.3f, 4.4f});
SetValues(2, {1.1f, 2.2f});
SetValues(3, {3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(6), ElementsAre(2));
ASSERT_THAT(GetValues(6), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(6), kTfLiteFloat32);
ASSERT_FALSE(IsDynamicTensor(6));
}
TEST_F(DelegateTest, StaticOutputRFFT) {
AddTensors(4, {0, 1}, {3}, kTfLiteFloat32, {3, 257});
int32_t rfft_length[] = {512};
SetConstTensor(1, {1}, kTfLiteInt32,
reinterpret_cast<const char*>(&rfft_length),
sizeof(rfft_length));
AddTfOp(testing::kRfft, {0, 1}, {2});
AddTfOp(testing::kImag, {2}, {3});
ConfigureDelegate();
SetShape(0, {3, 512});
SetValues(0, std::vector<float>(3 * 512, 1.0f));
ASSERT_TRUE(Invoke());
ASSERT_EQ(GetType(3), kTfLiteFloat32);
ASSERT_FALSE(IsDynamicTensor(3));
}
TEST_F(DelegateTest, DynamicOutputAfterReshape) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(8), kTfLiteFloat32);
ASSERT_TRUE(IsDynamicTensor(8));
}
TEST_F(DelegateTest, TestCancellation1) {
AddTensors(3, {0, 1}, {2}, kTfLiteInt32, {2});
AddTfOp(testing::kAdd, {0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2});
SetTypedValues<int>(0, {1, 2, 3, 4});
SetShape(1, {2, 2});
SetTypedValues<int>(1, {4, 3, 2, 1});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2));
ASSERT_THAT(GetTypedValues<int>(2), ElementsAre(5, 5, 5, 5));
ASSERT_EQ(GetType(2), kTfLiteInt32);
Cancel();
ASSERT_FALSE(Invoke());
EXPECT_EQ(error_reporter_.error_messages(),
"Client requested cancel during Invoke()");
}
TEST_F(DelegateTest, TestCancellation2) {
AddTensors(2, {0}, {1}, kTfLiteBool, {1});
AddTfOp(testing::kLoopCond, {0}, {1});
ConfigureDelegate();
SetShape(0, {1});
ASSERT_TRUE(Invoke());
Cancel();
ASSERT_FALSE(Invoke());
EXPECT_EQ(error_reporter_.error_messages(),
"Client requested cancel during Invoke()");
}
TEST_F(DelegateTest, TestCancellationTwoThreads) {
AddTensors(3, {0, 1}, {2}, kTfLiteInt32, {2});
AddTfOp(testing::kAdd, {0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2});
SetTypedValues<int>(0, {1, 2, 3, 4});
SetShape(1, {2, 2});
SetTypedValues<int>(1, {4, 3, 2, 1});
std::thread invoke_thread([this]() {
bool result = true;
result = this->Invoke();
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
result = this->Invoke();
ASSERT_FALSE(result);
});
std::thread cancel_thread([this]() { this->Cancel(); });
invoke_thread.join();
cancel_thread.join();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
edecf673-cc94-4a5f-b4eb-dbb5af005541 | cpp | tensorflow/tensorflow | async_buffers | tensorflow/lite/delegates/gpu/async_buffers.cc | tensorflow/lite/delegates/gpu/async_buffers_test.cc | #include "tensorflow/lite/delegates/gpu/async_buffers.h"
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <GLES2/gl2ext.h>
#include <GLES3/gl31.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_errors.h"
namespace {
PFNGLBUFFERSTORAGEEXTERNALEXTPROC glBufferStorageExternalEXT;
PFNEGLGETNATIVECLIENTBUFFERANDROIDPROC eglGetNativeClientBufferANDROID;
bool IsGlSupported() {
static const bool extensions_allowed = [] {
eglGetNativeClientBufferANDROID =
reinterpret_cast<PFNEGLGETNATIVECLIENTBUFFERANDROIDPROC>(
eglGetProcAddress("eglGetNativeClientBufferANDROID"));
glBufferStorageExternalEXT =
reinterpret_cast<PFNGLBUFFERSTORAGEEXTERNALEXTPROC>(
eglGetProcAddress("glBufferStorageExternalEXT"));
return eglGetNativeClientBufferANDROID && glBufferStorageExternalEXT;
}();
return extensions_allowed;
}
}
namespace tflite {
namespace gpu {
absl::Status AsyncBuffer::MapAHardwareBufferToGlBuffer() {
if (!IsGlSupported()) {
return absl::UnknownError(
"No GL extension functions found to bind AHardwareBuffer and "
"OpenGL buffer");
}
EGLClientBuffer native_buffer = eglGetNativeClientBufferANDROID(ahwb_);
if (!native_buffer) {
return absl::UnknownError("Can't get native buffer");
}
glBufferStorageExternalEXT(GL_SHADER_STORAGE_BUFFER, 0, bytes_, native_buffer,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT |
GL_MAP_COHERENT_BIT_EXT |
GL_MAP_PERSISTENT_BIT_EXT);
return gl::GetOpenGlErrors();
}
absl::Status AsyncBuffer::AllocateOpenGlBuffer() {
if (opengl_buffer_ == GL_INVALID_INDEX) {
glGenBuffers(1, &opengl_buffer_);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, opengl_buffer_);
absl::Status status = MapAHardwareBufferToGlBuffer();
if (!status.ok()) {
if (ahwb_ != nullptr) {
if (OptionalAndroidHardwareBuffer::Instance().Supported()) {
OptionalAndroidHardwareBuffer::Instance().Release(ahwb_);
}
ahwb_ = nullptr;
}
glBufferData(GL_SHADER_STORAGE_BUFFER, bytes_, nullptr, GL_STREAM_COPY);
}
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
}
return absl::OkStatus();
}
absl::Status AsyncBuffer::GetOpenGlBuffer(GLuint& buffer_ref) {
if (!valid_) {
absl::Status status = AllocateOpenGlBuffer();
if (!status.ok()) {
return status;
}
}
valid_ = true;
buffer_ref = opengl_buffer_;
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/async_buffers.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include "tensorflow/lite/delegates/gpu/api.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
namespace tflite {
namespace gpu {
namespace {
TEST(AsyncBufferTest, DuplicateTest) {
if (__builtin_available(android 26, *)) {
auto Instance = OptionalAndroidHardwareBuffer::Instance;
TensorObjectDef* tie = new TensorObjectDef();
tie->object_def.data_type = DataType::FLOAT32;
tie->object_def.data_layout = DataLayout::BHWC;
tie->dimensions = Dimensions(2, 2, 2, 2);
AHardwareBuffer_Desc buffDesc = {};
buffDesc.width = 1000;
buffDesc.height = 1;
buffDesc.layers = 1;
buffDesc.format = AHARDWAREBUFFER_FORMAT_BLOB;
buffDesc.usage = AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
AHardwareBuffer* ahwb;
EXPECT_TRUE(Instance().IsSupported(&buffDesc));
EXPECT_EQ(Instance().Allocate(&buffDesc, &ahwb), 0);
std::unique_ptr<gl::EglEnvironment> env;
EXPECT_OK(gl::EglEnvironment::NewEglEnvironment(&env));
AsyncBuffer async_buffer1 = AsyncBuffer(*tie, ahwb);
GLuint buffer1, buffer2;
EXPECT_OK(async_buffer1.GetOpenGlBuffer(buffer1));
EXPECT_GE(buffer1, 0);
EXPECT_OK(async_buffer1.GetOpenGlBuffer(buffer2));
EXPECT_EQ(buffer1, buffer2);
AsyncBuffer async_buffer2 = AsyncBuffer(*tie, ahwb);
EXPECT_OK(async_buffer2.GetOpenGlBuffer(buffer2));
EXPECT_NE(buffer1, buffer2);
} else {
GTEST_SKIP();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/async_buffers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/async_buffers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94a1be68-c733-423d-b882-6f72879d2d5f | cpp | tensorflow/tensorflow | android_hardware_buffer | tensorflow/lite/delegates/gpu/android_hardware_buffer.cc | tensorflow/lite/delegates/gpu/android_hardware_buffer_test.cc | #include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include <dlfcn.h>
namespace tflite::gpu {
OptionalAndroidHardwareBuffer::OptionalAndroidHardwareBuffer() {
#ifdef __ANDROID__
dlopen_handle_ = dlopen("libnativewindow.so", RTLD_NOW);
if (dlopen_handle_ == nullptr) {
supported_ = false;
return;
}
allocate_ = reinterpret_cast<decltype(allocate_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_allocate"));
acquire_ = reinterpret_cast<decltype(acquire_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_acquire"));
release_ = reinterpret_cast<decltype(release_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_release"));
describe_ = reinterpret_cast<decltype(describe_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_describe"));
is_supported_ = reinterpret_cast<decltype(is_supported_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_isSupported"));
supported_ =
(allocate_ != nullptr && acquire_ != nullptr && release_ != nullptr &&
describe_ != nullptr && is_supported_ != nullptr);
#else
dlopen_handle_ = nullptr;
allocate_ = nullptr;
acquire_ = nullptr;
release_ = nullptr;
describe_ = nullptr;
is_supported_ = nullptr;
supported_ = false;
#endif
}
} | #include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include <gtest/gtest.h>
using tflite::gpu::OptionalAndroidHardwareBuffer;
auto Instance = OptionalAndroidHardwareBuffer::Instance;
namespace {
#ifndef __ANDROID__
TEST(OptionalAndroidHardwareBufferTest, NotSupportedOnNonAndroid) {
EXPECT_EQ(Instance().Supported(), false);
}
#else
TEST(OptionalAndroidHardwareBufferTest, SupportedOnAndroid) {
EXPECT_EQ(Instance().Supported(), true);
}
TEST(OptionalAndroidHardwareBufferTest, CanAllocateAndReleaseOnAndroid) {
EXPECT_EQ(Instance().Supported(), true);
AHardwareBuffer* buffer;
AHardwareBuffer_Desc description{};
description.width = 1600;
description.height = 1;
description.layers = 1;
description.rfu0 = 0;
description.rfu1 = 0;
description.stride = 1;
description.format = AHARDWAREBUFFER_FORMAT_BLOB;
description.usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN;
EXPECT_TRUE(Instance().IsSupported(&description));
EXPECT_EQ(Instance().Allocate(&description, &buffer), 0);
Instance().Release(buffer);
}
TEST(OptionalAndroidHardwareBufferTest, CanAcquireAndReleaseOnAndroid) {
EXPECT_EQ(Instance().Supported(), true);
AHardwareBuffer* buffer;
AHardwareBuffer_Desc description{};
description.width = 1600;
description.height = 1;
description.layers = 1;
description.rfu0 = 0;
description.rfu1 = 0;
description.stride = 1;
description.format = AHARDWAREBUFFER_FORMAT_BLOB;
description.usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN;
EXPECT_TRUE(Instance().IsSupported(&description));
EXPECT_EQ(Instance().Allocate(&description, &buffer), 0);
Instance().Acquire(buffer);
Instance().Release(buffer);
Instance().Release(buffer);
}
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/android_hardware_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/android_hardware_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f71fad68-e03b-400c-b9ab-ee61e52d3905 | cpp | tensorflow/tensorflow | buffer | tensorflow/lite/delegates/gpu/cl/buffer.cc | tensorflow/lite/delegates/gpu/cl/buffer_test.cc | #include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include <string>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
absl::Status CreateBuffer(size_t size_in_bytes, bool gpu_read_only,
const void* data, CLContext* context,
Buffer* result) {
cl_mem buffer;
RETURN_IF_ERROR(CreateCLBuffer(context->context(), size_in_bytes,
gpu_read_only, const_cast<void*>(data),
&buffer));
*result = Buffer(buffer, size_in_bytes);
return absl::OkStatus();
}
absl::Status CreateSubBuffer(const Buffer& parent, size_t origin_in_bytes,
size_t size_in_bytes, bool gpu_read_only,
CLContext* context, Buffer* result) {
cl_mem buffer;
if (parent.IsSubBuffer()) {
return absl::InvalidArgumentError(
"Cannot create a sub-buffer from a sub-buffer!");
}
RETURN_IF_ERROR(CreateCLSubBuffer(context->context(), parent.GetMemoryPtr(),
origin_in_bytes, size_in_bytes,
gpu_read_only, &buffer));
*result = Buffer(buffer, size_in_bytes, true);
return absl::OkStatus();
}
}
Buffer::Buffer(cl_mem buffer, size_t size_in_bytes, bool is_sub_buffer)
: buffer_(buffer), size_(size_in_bytes), is_sub_buffer_(is_sub_buffer) {}
Buffer::Buffer(cl_mem buffer)
: buffer_(buffer), size_(0), is_sub_buffer_(false), owner_(false) {}
Buffer::Buffer(Buffer&& buffer)
: buffer_(buffer.buffer_),
size_(buffer.size_),
is_sub_buffer_(buffer.is_sub_buffer_),
owner_(buffer.owner_) {
buffer.buffer_ = nullptr;
buffer.size_ = 0;
buffer.is_sub_buffer_ = false;
}
Buffer& Buffer::operator=(Buffer&& buffer) {
if (this != &buffer) {
Release();
std::swap(size_, buffer.size_);
std::swap(buffer_, buffer.buffer_);
std::swap(is_sub_buffer_, buffer.is_sub_buffer_);
std::swap(owner_, buffer.owner_);
}
return *this;
}
void Buffer::Release() {
if (owner_ && buffer_) {
clReleaseMemObject(buffer_);
buffer_ = nullptr;
size_ = 0;
is_sub_buffer_ = false;
}
}
absl::Status Buffer::GetGPUResources(const GPUObjectDescriptor* obj_ptr,
GPUResourcesWithValue* resources) const {
const auto* buffer_desc = dynamic_cast<const BufferDescriptor*>(obj_ptr);
if (!buffer_desc) {
return absl::InvalidArgumentError("Expected BufferDescriptor on input.");
}
resources->buffers.push_back({"buffer", buffer_});
return absl::OkStatus();
}
absl::Status Buffer::CreateFromBufferDescriptor(const BufferDescriptor& desc,
CLContext* context) {
bool read_only = desc.memory_type == MemoryType::CONSTANT;
uint8_t* data_ptr = desc.data.empty()
? nullptr
: const_cast<unsigned char*>(desc.data.data());
size_ = desc.size;
return CreateCLBuffer(context->context(), desc.size, read_only, data_ptr,
&buffer_);
}
Buffer CreateBufferShared(cl_mem buffer) { return Buffer(buffer); }
absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, CLContext* context,
Buffer* result) {
return CreateBuffer(size_in_bytes, true, nullptr, context, result);
}
absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, const void* data,
CLContext* context, Buffer* result) {
return CreateBuffer(size_in_bytes, true, data, context, result);
}
absl::Status CreateReadWriteBuffer(size_t size_in_bytes, CLContext* context,
Buffer* result) {
return CreateBuffer(size_in_bytes, false, nullptr, context, result);
}
absl::Status CreateReadWriteSubBuffer(const Buffer& parent,
size_t origin_in_bytes,
size_t size_in_bytes, CLContext* context,
Buffer* result) {
return CreateSubBuffer(parent, origin_in_bytes, size_in_bytes,
false, context, result);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLTest, BufferTestFloat) {
const std::vector<float> data = {1.0, 2.0, 3.0, -4.0, 5.1};
Buffer buffer;
ASSERT_OK(CreateReadWriteBuffer(sizeof(float) * 5, &env_.context(), &buffer));
ASSERT_OK(buffer.WriteData(env_.queue(),
absl::MakeConstSpan(data.data(), data.size())));
std::vector<float> gpu_data;
ASSERT_OK(buffer.ReadData<float>(env_.queue(), &gpu_data));
EXPECT_THAT(gpu_data, Pointwise(FloatNear(0.0f), data));
}
TEST_F(OpenCLTest, BufferTestHalf) {
const std::vector<half> data = {half(1.4), half(2.1), half(2.2)};
Buffer buffer;
ASSERT_OK(CreateReadWriteBuffer(sizeof(half) * 3, &env_.context(), &buffer));
ASSERT_OK(buffer.WriteData(env_.queue(),
absl::MakeConstSpan(data.data(), data.size())));
std::vector<half> gpu_data;
ASSERT_OK(buffer.ReadData<half>(env_.queue(), &gpu_data));
EXPECT_THAT(gpu_data, Pointwise(FloatNear(0.0f), data));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8e76a92d-b3d4-4a7b-a177-4658f864cc89 | cpp | tensorflow/tensorflow | model_builder_helper | tensorflow/lite/delegates/gpu/common/model_builder_helper.cc | tensorflow/lite/delegates/gpu/common/model_builder_helper_test.cc | #include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h"
#include <stddef.h>
#include <cstdint>
#include <cstring>
#include <limits>
#include <string>
#include <vector>
#include "fp16.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace gpu {
namespace {
absl::Status NewPassthroughNode(GraphFloat32* graph, Node* node,
const Value* output, Node** passthru_node) {
*passthru_node = graph->NewNode();
RETURN_IF_ERROR(graph->SetProducer((*passthru_node)->id, output->id));
Value* copy_output = graph->NewValue();
RETURN_IF_ERROR(graph->SetProducer(node->id, copy_output->id));
RETURN_IF_ERROR(graph->AddConsumer((*passthru_node)->id, copy_output->id));
copy_output->tensor = output->tensor;
copy_output->tensor.ref = -1;
return absl::OkStatus();
}
}
absl::Status GetNodeAndRegistration(TfLiteContext* context, int node_id,
TfLiteNode** tflite_node,
TfLiteRegistration** registration) {
if (context->GetNodeAndRegistration(context, node_id, tflite_node,
registration) != kTfLiteOk) {
return absl::InvalidArgumentError(absl::StrCat(
"Couldn't get node and registration info for op: ", node_id));
}
return absl::OkStatus();
}
DataType ToDataType(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
return DataType::FLOAT32;
case kTfLiteInt32:
return DataType::INT32;
case kTfLiteInt64:
return DataType::INT64;
case kTfLiteInt8:
return DataType::INT8;
case kTfLiteUInt8:
return DataType::UINT8;
case kTfLiteBool:
return DataType::BOOL;
default:
return DataType::UNKNOWN;
}
}
absl::Status ExtractTensorShape(const TfLiteTensor& tflite_tensor, BHWC* bhwc) {
const TfLiteIntArray* dims = tflite_tensor.dims;
switch (dims->size) {
case 1:
*bhwc = BHWC(dims->data[0], 1, 1, 1);
return absl::OkStatus();
case 2:
*bhwc = BHWC(dims->data[0], 1, 1, dims->data[1]);
return absl::OkStatus();
case 3:
*bhwc = BHWC(dims->data[0], 1, dims->data[1], dims->data[2]);
return absl::OkStatus();
case 4:
*bhwc = BHWC(dims->data[0], dims->data[1], dims->data[2], dims->data[3]);
return absl::OkStatus();
default:
return absl::InvalidArgumentError(absl::StrCat(
"Tensor \"", tflite_tensor.name ? tflite_tensor.name : "nullptr",
"\" has bad input dims size: ", dims->size, "."));
}
}
absl::Status ExtractAxisFromIndex(const TfLiteTensor& tflite_tensor, int index,
Axis* axis) {
const TfLiteIntArray* dims = tflite_tensor.dims;
if (index < 0) {
index = dims->size + index;
}
if (index < 0 || index >= dims->size) {
return absl::OutOfRangeError("Index for axis out of range");
}
std::vector<Axis> index_to_axis;
switch (dims->size) {
case 1:
index_to_axis = {Axis::BATCH};
break;
case 2:
index_to_axis = {Axis::BATCH, Axis::CHANNELS};
break;
case 3:
index_to_axis = {Axis::BATCH, Axis::WIDTH, Axis::CHANNELS};
break;
case 4:
index_to_axis = {Axis::BATCH, Axis::HEIGHT, Axis::WIDTH, Axis::CHANNELS};
break;
default:
return absl::UnavailableError("Unknown layout.");
}
*axis = index_to_axis[index];
return absl::OkStatus();
}
absl::Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor,
TensorRef<BHWC>* tensor_ref) {
tensor_ref->type = ToDataType(tflite_tensor.type);
return ExtractTensorShape(tflite_tensor, &tensor_ref->shape);
}
absl::Status PopulateQuantParams(const TfLiteTensor& tensor,
QuantizationParams* quant_params) {
const TfLiteQuantization& quant = tensor.quantization;
if (quant.type != TfLiteQuantizationType::kTfLiteAffineQuantization) {
return absl::InvalidArgumentError(
absl::StrCat("Tensor not quantized: ", std::string(tensor.name)));
}
const TfLiteAffineQuantization* params =
static_cast<const TfLiteAffineQuantization*>(quant.params);
if (params->scale->size > 1) {
return absl::InvalidArgumentError(
absl::StrCat("Non-constant per-channel quantized tensor: ",
std::string(tensor.name)));
}
const float scale = params->scale->data[0];
const float zero_point = static_cast<float>(params->zero_point->data[0]);
float qmin_value = 0;
float qmax_value = 0;
if (tensor.type == kTfLiteUInt8) {
qmin_value = static_cast<float>(std::numeric_limits<uint8_t>::min());
qmax_value = static_cast<float>(std::numeric_limits<uint8_t>::max());
} else if (tensor.type == kTfLiteInt8) {
qmin_value = static_cast<float>(std::numeric_limits<int8_t>::min());
qmax_value = static_cast<float>(std::numeric_limits<int8_t>::max());
} else {
return absl::InvalidArgumentError(absl::StrCat(
"Type invalid for quantized tensor: ", std::string(tensor.name)));
}
quant_params->min = scale * (static_cast<float>(qmin_value) - zero_point);
quant_params->max = scale * (static_cast<float>(qmax_value) - zero_point);
quant_params->scale = scale;
return absl::OkStatus();
}
int GetNumberOfRuntimeInputsForNode(const TfLiteContext* context,
const TfLiteNode* tflite_node) {
int number_of_runtime_inputs = 0;
for (int i = 0; i < NumInputs(tflite_node); i++) {
const TfLiteTensor* tensor =
GetOptionalInputTensor(context, tflite_node, i);
if (tensor != nullptr && !IsConstantTensor(tensor)) {
number_of_runtime_inputs++;
}
}
return number_of_runtime_inputs;
}
int GetNumberOfConstInputsForNode(const TfLiteContext* context,
const TfLiteNode* tflite_node) {
return NumInputs(tflite_node) -
GetNumberOfRuntimeInputsForNode(context, tflite_node);
}
absl::Status CheckInputsOutputs(const TfLiteContext* context,
const TfLiteNode* tflite_node,
int runtime_inputs, int outputs) {
const int runtime_inputs_from_model =
GetNumberOfRuntimeInputsForNode(context, tflite_node);
if (runtime_inputs_from_model != runtime_inputs) {
return absl::InternalError(absl::StrCat(
"Expected ", runtime_inputs, " runtime input tensor(s), but node has ",
runtime_inputs_from_model, " runtime input(s)."));
}
const int outputs_from_model = NumOutputs(tflite_node);
if (outputs_from_model != outputs) {
return absl::InternalError(absl::StrCat("Expected ", outputs,
" output tensor(s), but node has ",
outputs_from_model, " output(s)."));
}
return absl::OkStatus();
}
absl::Status CheckInputsConstsOutputs(const TfLiteContext* context,
const TfLiteNode* tflite_node,
int runtime_inputs, int const_inputs,
int outputs) {
const int const_inputs_from_model =
GetNumberOfConstInputsForNode(context, tflite_node);
if (const_inputs_from_model != const_inputs) {
return absl::InternalError(absl::StrCat(
"Expected ", const_inputs, " const input tensor(s), but node has ",
const_inputs_from_model, " const input(s)."));
}
return CheckInputsOutputs(context, tflite_node, runtime_inputs, outputs);
}
void ConvertFloat16ToFloat32(size_t num_elements, const uint16_t* src,
float* dst) {
for (size_t i = 0; i < num_elements; i++) {
*dst++ = fp16_ieee_to_fp32_value(*src++);
}
}
template <>
absl::Status CreateVectorCopyData<float>(const TfLiteTensor& src, float* dst) {
switch (src.type) {
case kTfLiteFloat32:
std::memcpy(dst, src.data.f, src.bytes);
return absl::OkStatus();
case kTfLiteFloat16:
ConvertFloat16ToFloat32(NumElements(&src),
reinterpret_cast<uint16_t const*>(src.data.f16),
dst);
return absl::OkStatus();
case kTfLiteInt8:
DequantizeConstantTensor(src, src.data.int8, dst);
return absl::OkStatus();
case kTfLiteUInt8:
DequantizeConstantTensor(src, src.data.uint8, dst);
return absl::OkStatus();
case kTfLiteInt32:
DequantizeConstantTensor(src, src.data.i32, dst);
return absl::OkStatus();
default:
return absl::InvalidArgumentError(
"Unsupported data type for float32 tensor");
}
}
std::string GetDimensionString(const TfLiteIntArray* dimensions) {
return absl::StrJoin(TfLiteIntArrayView(dimensions), "x");
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, Scalar* shape) {
if (dimensions->size < 0) {
return absl::InvalidArgumentError("Invalid Scalar dimensions");
}
for (int i = 0; i < dimensions->size; ++i) {
if (dimensions->data[i] != 1) {
return absl::InvalidArgumentError(absl::StrCat(
GetDimensionString(dimensions), " cannot be reduced to scalar."));
}
}
shape->v = 1;
return absl::OkStatus();
}
absl::Status CheckIfLinearConvertible(const TfLiteIntArray* dimensions) {
if (dimensions->size <= 0) {
return absl::InvalidArgumentError("Dimension is empty.");
}
for (int i = 0; i < dimensions->size - 1; ++i) {
if (dimensions->data[i] != 1) {
return absl::InvalidArgumentError(absl::StrCat(
GetDimensionString(dimensions), " cannot be reduced to linear."));
}
}
return absl::OkStatus();
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, Linear* shape) {
RETURN_IF_ERROR(CheckIfLinearConvertible(dimensions));
shape->v = dimensions->data[dimensions->size - 1];
return absl::OkStatus();
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, HWC* shape) {
if (dimensions->size == 3) {
shape->h = dimensions->data[0];
shape->w = dimensions->data[1];
shape->c = dimensions->data[2];
return absl::OkStatus();
}
if (dimensions->size == 4) {
if (dimensions->data[0] != 1) {
return absl::UnimplementedError("Batch size is not equal to 1.");
}
shape->h = dimensions->data[1];
shape->w = dimensions->data[2];
shape->c = dimensions->data[3];
return absl::OkStatus();
}
return absl::InvalidArgumentError(
absl::StrCat("Expected a 3D tensor of shape HxWxC or a 4D tensor of "
"shape 1xHxWxC but got ",
GetDimensionString(dimensions)));
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, HW* shape) {
if (dimensions->size != 2) {
return absl::InvalidArgumentError(
absl::StrCat("Expected a 2D tensor of shape HxW but got ",
GetDimensionString(dimensions)));
}
shape->h = dimensions->data[0];
shape->w = dimensions->data[1];
return absl::OkStatus();
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, OHWI* shape) {
if (dimensions->size != 4) {
return absl::InvalidArgumentError(
absl::StrCat("Expected a 4D tensor of shape OxHxWxI but got ",
GetDimensionString(dimensions)));
}
shape->o = dimensions->data[0];
shape->h = dimensions->data[1];
shape->w = dimensions->data[2];
shape->i = dimensions->data[3];
return absl::OkStatus();
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, BHWC* shape) {
if (dimensions->size != 4) {
return absl::InvalidArgumentError(
absl::StrCat("Expected a 4D tensor of shape BxHxWxC but got ",
GetDimensionString(dimensions)));
}
shape->b = dimensions->data[0];
shape->h = dimensions->data[1];
shape->w = dimensions->data[2];
shape->c = dimensions->data[3];
return absl::OkStatus();
}
absl::Status MaybeFuseActivation(TfLiteFusedActivation fused_activation,
GraphFloat32* graph, Node* node) {
const auto outputs = graph->FindOutputs(node->id);
if (outputs.size() != 1) {
return absl::InternalError("Number of outputs != 1");
}
switch (fused_activation) {
case kTfLiteActNone:
return absl::OkStatus();
case kTfLiteActRelu:
case kTfLiteActReluN1To1:
case kTfLiteActRelu6: {
ReLUAttributes attr;
attr.activation_max =
fused_activation == kTfLiteActRelu
? 0.0f
: (fused_activation == kTfLiteActReluN1To1 ? 1.0f : 6.0f);
attr.activation_min =
fused_activation == kTfLiteActReluN1To1 ? -1.0f : 0.0f;
Node* activation_node;
RETURN_IF_ERROR(
NewPassthroughNode(graph, node, outputs[0], &activation_node));
activation_node->operation.type = ToString(OperationType::RELU);
activation_node->operation.attributes = attr;
return absl::OkStatus();
}
case kTfLiteActTanh: {
Node* activation_node;
RETURN_IF_ERROR(
NewPassthroughNode(graph, node, outputs[0], &activation_node));
activation_node->operation.type = ToString(OperationType::TANH);
return absl::OkStatus();
}
case kTfLiteActSigmoid: {
Node* activation_node;
RETURN_IF_ERROR(
NewPassthroughNode(graph, node, outputs[0], &activation_node));
activation_node->operation.type = ToString(OperationType::SIGMOID);
return absl::OkStatus();
} break;
default:
return absl::NotFoundError(
absl::StrCat("Unsupported fused activation: ", fused_activation));
}
}
}
} | #include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::ElementsAre;
TEST(ModelBuilderHelperTest, CreateVectorCopyDataDifferentSize) {
TfLiteTensor tflite_tensor;
tflite_tensor.type = kTfLiteInt32;
int32_t src_data[4] = {1, 2, 3, 4};
tflite_tensor.data.i32 = src_data;
tflite_tensor.dims = TfLiteIntArrayCreate(1);
tflite_tensor.dims->data[0] = sizeof(src_data) / sizeof(src_data[0]);
tflite_tensor.bytes = sizeof(src_data);
int16_t dst[4];
ASSERT_OK(CreateVectorCopyData(tflite_tensor, dst));
EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4));
TfLiteIntArrayFree(tflite_tensor.dims);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_builder_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_builder_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
372fea3a-ecdc-4712-95c9-436ca07dd10c | cpp | tensorflow/tensorflow | winograd_util | tensorflow/lite/delegates/gpu/common/winograd_util.cc | tensorflow/lite/delegates/gpu/common/winograd_util_test.cc | #include "tensorflow/lite/delegates/gpu/common/winograd_util.h"
#include <cmath>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
std::vector<float> GetTransposedMatrixForWinograd(int width, int height) {
const float kDelta = std::sqrt(2.0f) / 2.0f;
std::vector<float> px(width);
px[0] = 0.0f;
const int points_count = (width - 1) / 2;
for (int i = 0; i < points_count; ++i) {
px[i * 2 + 1] = kDelta * (i + 1.0f);
px[i * 2 + 2] = -kDelta * (i + 1.0f);
}
px[width - 1] = 1.0f;
std::vector<float> py(width, 1.0f);
py[width - 1] = 0.0f;
std::vector<float> result(height * width);
for (int y = 0; y < width; ++y) {
for (int x = 0; x < height; ++x) {
result[x * width + y] =
std::pow(px[y], 1.0f * x) * std::pow(py[y], (height - 1.0f) - x);
}
}
return result;
}
std::vector<float> GetInversedMatrixForWinograd(int rank) {
auto matrix = GetTransposedMatrixForWinograd(rank, rank);
std::vector<float> inverted(rank * rank, 0.0f);
for (int i = 0; i < rank; ++i) {
inverted[i * rank + i] = 1.0f;
}
for (int i = 1; i < rank - 1; ++i) {
float inv_t = 1.0f / matrix[i * rank + i];
for (int x = i; x < rank; ++x) {
matrix[i * rank + x] *= inv_t;
}
for (int x = 0; x < rank; ++x) {
inverted[i * rank + x] *= inv_t;
}
for (int y = 0; y < rank; ++y) {
if (y == i) continue;
float t = matrix[y * rank + i];
for (int x = i; x < rank; ++x) {
matrix[y * rank + x] -= t * matrix[i * rank + x];
}
for (int x = 0; x < rank; ++x) {
inverted[y * rank + x] -= t * inverted[i * rank + x];
}
}
}
return inverted;
}
std::vector<float> Multiply(const std::vector<float>& a_mat,
const std::vector<float>& b_mat, int m, int n,
int k) {
std::vector<float> result(m * k);
for (int y = 0; y < m; ++y) {
for (int x = 0; x < k; ++x) {
float sum = 0.0f;
for (int i = 0; i < n; ++i) {
sum += a_mat[y * n + i] * b_mat[i * k + x];
}
result[y * k + x] = sum;
}
}
return result;
}
}
std::vector<float> AtMatrixForWinograd4x4To6x6() {
return GetTransposedMatrixForWinograd(6, 4);
}
std::vector<float> BtMatrixForWinograd4x4To6x6() {
return GetInversedMatrixForWinograd(6);
}
void RearrangeWeightsToWinograd4x4To6x6Weights(
const Tensor<OHWI, DataType::FLOAT32>& src_weights,
Tensor<OHWI, DataType::FLOAT32>* dst_weights) {
OHWI dst_shape;
dst_shape.o = src_weights.shape.o;
dst_shape.h = 6;
dst_shape.w = 6;
dst_shape.i = src_weights.shape.i;
dst_weights->shape = dst_shape;
dst_weights->data.resize(dst_shape.DimensionsProduct());
auto gt_mat = GetTransposedMatrixForWinograd(6, 3);
std::vector<float> g_mat(gt_mat.size());
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 6; ++x) {
g_mat[x * 3 + y] = gt_mat[y * 6 + x];
}
}
for (int d = 0; d < src_weights.shape.o; ++d) {
for (int s = 0; s < src_weights.shape.i; ++s) {
std::vector<float> in_vals(9);
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
const int f_index = src_weights.shape.LinearIndex({d, y, x, s});
in_vals[y * 3 + x] = src_weights.data[f_index];
}
}
auto temp_vals = Multiply(g_mat, in_vals, 6, 3, 3);
auto out_vals = Multiply(temp_vals, gt_mat, 6, 3, 6);
for (int y = 0; y < 6; ++y) {
for (int x = 0; x < 6; ++x) {
const int f_index = dst_shape.LinearIndex({d, y, x, s});
dst_weights->data[f_index] = out_vals[y * 6 + x];
}
}
}
}
}
bool IsSuitableForWinograd4x4To6x6(const Convolution2DAttributes& attr) {
return attr.weights.shape.w == 3 && attr.weights.shape.h == 3 &&
attr.dilations == HW(1, 1) && attr.strides == HW(1, 1) &&
attr.groups == 1;
}
}
} | #include "tensorflow/lite/delegates/gpu/common/winograd_util.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
namespace tflite {
namespace gpu {
TEST(Winograd, CorrectAttributesFor4x4To6x6) {
Convolution2DAttributes attr;
attr.padding.prepended = HW(1, 2);
attr.padding.appended = HW(0, 1);
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.weights.shape = OHWI(1, 3, 3, 1);
EXPECT_TRUE(IsSuitableForWinograd4x4To6x6(attr));
}
TEST(Winograd, IncorrectAttributesFor4x4To6x6) {
Convolution2DAttributes attr;
attr.padding.prepended = HW(1, 2);
attr.padding.appended = HW(0, 1);
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.weights.shape = OHWI(1, 2, 3, 1);
EXPECT_FALSE(IsSuitableForWinograd4x4To6x6(attr));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/winograd_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/winograd_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05aaaaf2-d827-4812-8afe-3e128f7a72e5 | cpp | tensorflow/tensorflow | gpu_model | tensorflow/lite/delegates/gpu/common/gpu_model.cc | tensorflow/lite/delegates/gpu/cl/testing/gpu_model_test.cc | #include "tensorflow/lite/delegates/gpu/common/gpu_model.h"
#include <algorithm>
#include <any>
#include <map>
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/operation_selector.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/special_selector.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/subgraph.h"
#include "tensorflow/lite/delegates/gpu/common/task/serialization_base.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/add_bias.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h"
namespace tflite {
namespace gpu {
namespace {
bool IsReady(const absl::flat_hash_set<ValueId>& ready_tensors,
const GpuNode& node) {
for (const ValueId in_id : node.inputs) {
if (ready_tensors.find(in_id) == ready_tensors.end()) {
return false;
}
}
return true;
}
absl::Status MergeGpuNodes(const GpuInfo& gpu_info, GpuNode* src,
GpuNode* dst) {
for (int j = 1; j < src->inputs.size(); ++j) {
dst->inputs.push_back(src->inputs[j]);
}
dst->outputs[0] = src->outputs[0];
dst->name += " -> " + src->name;
return dst->gpu_operation->AddOperation(gpu_info, src->gpu_operation.get());
}
flatbuffers::Offset<data::TensorDescWithId> Encode(
const TensorDescriptor& desc, const ValueId& id,
flatbuffers::FlatBufferBuilder* builder) {
auto desc_fb = Encode(desc, builder);
data::TensorDescWithIdBuilder desc_builder(*builder);
desc_builder.add_desc(desc_fb);
desc_builder.add_id(id);
return desc_builder.Finish();
}
flatbuffers::Offset<data::GpuNode> Encode(
const GpuNode& node, flatbuffers::FlatBufferBuilder* builder) {
auto op_fb = Encode(*node.gpu_operation, builder);
std::vector<int32_t> in_ids(node.inputs.size());
for (int i = 0; i < in_ids.size(); ++i) {
in_ids[i] = node.inputs[i];
}
std::vector<int32_t> out_ids(node.outputs.size());
for (int i = 0; i < out_ids.size(); ++i) {
out_ids[i] = node.outputs[i];
}
auto in_ids_fb = builder->CreateVector(in_ids);
auto out_ids_fb = builder->CreateVector(out_ids);
auto name_fb = builder->CreateString(node.name);
data::GpuNodeBuilder node_builder(*builder);
node_builder.add_gpu_op(op_fb);
node_builder.add_input_ids(in_ids_fb);
node_builder.add_output_ids(out_ids_fb);
node_builder.add_name(name_fb);
return node_builder.Finish();
}
absl::Status Decode(const data::GpuNode* fb_node, GpuNode* node) {
GPUOperation op;
RETURN_IF_ERROR(Decode(fb_node->gpu_op(), &op));
node->gpu_operation = std::make_unique<GPUOperation>(std::move(op));
for (auto in_fb : *fb_node->input_ids()) {
node->inputs.push_back(in_fb);
}
for (auto out_fb : *fb_node->output_ids()) {
node->outputs.push_back(out_fb);
}
node->name = std::string(fb_node->name()->c_str(), fb_node->name()->size());
return absl::OkStatus();
}
bool IsAssociativeLinkableOp(const Node& node,
const std::vector<Value*>& inputs,
const std::vector<Value*>& outputs) {
if (inputs.size() == 1) {
return false;
}
const OperationType op_type = OperationTypeFromString(node.operation.type);
if (op_type != OperationType::ADD && op_type != OperationType::MUL) {
return false;
}
const auto dst_shape = outputs[0]->tensor.shape;
for (int i = 0; i < inputs.size(); ++i) {
const auto src_shape = inputs[i]->tensor.shape;
if (dst_shape.b != src_shape.b && src_shape.b == 1) {
return false;
}
if (dst_shape.h != src_shape.h && src_shape.h == 1) {
return false;
}
if (dst_shape.w != src_shape.w && src_shape.w == 1) {
return false;
}
if (dst_shape.c != src_shape.c && src_shape.c == 1) {
return false;
}
}
return true;
}
absl::Status CheckExternalTensorDescription(const GpuInfo& gpu_info,
const TensorDescriptor& tensor_desc,
const BHWC& shape,
DataType data_type) {
if (tensor_desc.GetDataType() != data_type) {
return absl::InvalidArgumentError(
"Global precision and precision of predefined/external tensors must be "
"synchronized.");
}
if (tensor_desc.HasAxis(Axis::DEPTH)) {
return absl::InvalidArgumentError(
"Currently no support of Depth dimension in predefined/external "
"tensors.");
}
if (tensor_desc.HasAxis(Axis::BATCH) && shape.b == 1) {
return absl::InvalidArgumentError("Wrong layout, batch mismatch.");
}
if (!tensor_desc.HasAxis(Axis::BATCH) && shape.b != 1) {
return absl::InvalidArgumentError("Wrong layout, batch mismatch.");
}
if (!tensor_desc.CanCreateTensorWithShape(gpu_info, shape).ok()) {
return absl::UnavailableError(
"Current device can not allocate tensor with this shape for "
"predefined/external descriptor.");
}
return absl::OkStatus();
}
class TensorReserver {
public:
TensorReserver() : next_(0) {}
ValueId Add(const TensorDescriptor& dummy) {
reservations_[next_] = dummy;
return next_++;
}
void Add(ValueId id, const TensorDescriptor& dummy) {
reservations_[id] = dummy;
}
ValueId GetNewId() { return next_++; }
void SetNext(ValueId id) { next_ = id; }
TensorDescriptor Get(ValueId id) { return reservations_[id]; }
public:
absl::flat_hash_map<ValueId, TensorDescriptor> reservations_;
ValueId next_;
};
absl::Status ReserveGraphTensors(const CreateGpuModelInfo& create_info,
const GpuInfo& gpu_info,
const GraphFloat32& graph,
TensorReserver* tensor_reserver) {
ValueId max_id = 0;
auto tensors = graph.values();
for (auto& t : tensors) {
auto data_type = DeduceDataTypeFromPrecision(create_info.precision);
if (t->tensor.type != DataType::FLOAT32 &&
t->tensor.type != DataType::FLOAT16) {
data_type = t->tensor.type;
}
const auto shape = graph.GetValue(t->id)->tensor.shape;
auto it_predefined = create_info.predefined.find(t->id);
auto it_immutable_external =
create_info.external_immutable_tensors.find(t->id);
auto it_mutable_external = create_info.external_mutable_tensors.find(t->id);
int external_categories_count = 0;
TensorDescriptor tensor_desc;
if (it_predefined != create_info.predefined.end()) {
external_categories_count++;
tensor_desc = it_predefined->second;
}
if (it_immutable_external != create_info.external_immutable_tensors.end()) {
external_categories_count++;
tensor_desc = it_immutable_external->second->GetDescriptor();
}
if (it_mutable_external != create_info.external_mutable_tensors.end()) {
external_categories_count++;
tensor_desc = it_mutable_external->second;
}
if (external_categories_count > 1) {
return absl::InvalidArgumentError(
"Tensors ids from predefined / external_immutable_tensors / "
"external_mutable_tensors should not intersect.");
}
if (external_categories_count == 1) {
if (!(graph.IsGraphInput(t->id) || graph.IsGraphOutput(t->id))) {
return absl::InvalidArgumentError(
"Currently external can be used only for graph inputs/outputs");
}
RETURN_IF_ERROR(CheckExternalTensorDescription(gpu_info, tensor_desc,
shape, data_type));
} else {
TensorStorageType storage_type = create_info.storage_type;
Layout layout = shape.b == 1 ? Layout::HWC : Layout::BHWC;
const bool can_use_single_texture =
storage_type == TensorStorageType::TEXTURE_2D ||
storage_type == TensorStorageType::TEXTURE_3D ||
storage_type == TensorStorageType::TEXTURE_ARRAY;
if (shape.c < 4 && can_use_single_texture &&
TensorDescriptor{data_type, TensorStorageType::SINGLE_TEXTURE_2D,
layout}
.CanCreateTensorWithShape(gpu_info, shape)
.ok()) {
storage_type = TensorStorageType::SINGLE_TEXTURE_2D;
}
tensor_desc = TensorDescriptor{data_type, storage_type, layout};
RETURN_IF_ERROR(
tensor_desc.UpdateToSupportedStorageType(gpu_info, shape));
if (gpu_info.IsApiMetal() &&
storage_type == TensorStorageType::TEXTURE_2D) {
if (!(gpu_info.IsApple() && gpu_info.apple_info.IsFamilyApple1())) {
tensor_desc.SetUseBufferForWriteOnlyTexture2d(true);
}
}
}
tensor_desc.SetBHWCShape(shape);
tensor_reserver->Add(t->id, tensor_desc);
max_id = std::max(max_id, t->id);
}
tensor_reserver->SetNext(max_id + 1);
return absl::OkStatus();
}
absl::Status ConvertOperations(const GpuInfo& gpu_info,
const GraphFloat32& graph,
const CreateGpuModelInfo& create_info,
TensorReserver* tensor_reserver,
GpuModel* gpu_model) {
std::map<ValueId, TensorDescriptor> tensor_descriptors;
const auto values = graph.values();
for (auto value : values) {
tensor_descriptors[value->id] = tensor_reserver->Get(value->id);
}
std::set<NodeId> consumed_nodes;
std::vector<Node*> graph_nodes = graph.nodes();
std::map<ValueId, int>
tensor_usages;
for (const auto& input : gpu_model->input_ids_and_refs) {
tensor_usages[input.first] = -1;
}
std::vector<SharedWeightsConvDesc> shared_conv_weights;
std::vector<SharedWeightsConvDesc>* shared_conv_weights_ptr =
create_info.hints.Check(ModelHints::kReuseConvWeights)
? &shared_conv_weights
: nullptr;
for (int i = 0; i < graph_nodes.size(); ++i) {
const Node& node = *graph_nodes[i];
if (consumed_nodes.find(node.id) != consumed_nodes.end()) {
continue;
}
auto op_type = OperationTypeFromString(node.operation.type);
if (op_type == OperationType::CONSTANT) {
auto attr =
std::any_cast<ConstTensorAttributes>(node.operation.attributes);
auto outputs = graph.FindOutputs(node.id);
gpu_model->const_tensors[outputs[0]->id] =
tensor_reserver->Get(outputs[0]->id);
gpu_model->const_tensors[outputs[0]->id].UploadData(attr.tensor);
continue;
}
GPUOperationsSubgraph gpu_subgraph;
if (GPUSubgraphFromGraph(create_info.hints, gpu_info, create_info.precision,
graph, node.id, tensor_descriptors,
&consumed_nodes, &gpu_subgraph)
.ok()) {
} else {
auto inputs = graph.FindInputs(node.id);
auto outputs = graph.FindOutputs(node.id);
if (IsAssociativeLinkableOp(node, inputs, outputs)) {
int latest_written_tensor_index = 0;
int last_usage = tensor_usages[inputs[0]->id];
for (int j = 1; j < inputs.size(); ++j) {
if (tensor_usages[inputs[j]->id] > last_usage) {
last_usage = tensor_usages[inputs[j]->id];
latest_written_tensor_index = j;
}
}
std::swap(inputs[0], inputs[latest_written_tensor_index]);
}
consumed_nodes.insert(node.id);
OperationDef op_def;
op_def.precision = create_info.precision;
for (int j = 0; j < inputs.size(); ++j) {
op_def.src_tensors.push_back(tensor_reserver->Get(inputs[j]->id));
}
for (int j = 0; j < outputs.size(); ++j) {
op_def.dst_tensors.push_back(tensor_reserver->Get(outputs[j]->id));
}
RETURN_IF_ERROR(GPUOperationFromNode(
gpu_info, op_def, create_info.hints, inputs, outputs, node,
shared_conv_weights_ptr, &gpu_subgraph));
}
absl::flat_hash_map<int, ValueId> mapping_to_global_ids;
for (int j = 0; j < gpu_subgraph.new_tensors.size(); ++j) {
const auto& t = gpu_subgraph.new_tensors[j];
if (!t.GetData().empty()) {
auto global_id = tensor_reserver->GetNewId();
gpu_model->const_tensors[global_id] =
std::move(gpu_subgraph.new_tensors[j]);
mapping_to_global_ids[j] = global_id;
} else {
auto global_id = tensor_reserver->Add(t);
mapping_to_global_ids[j] = global_id;
}
}
if (!shared_conv_weights.empty() && !mapping_to_global_ids.empty()) {
shared_conv_weights.back().RemapIds(mapping_to_global_ids);
}
for (auto& gpu_op : gpu_subgraph.operations) {
GpuNode gpu_node;
gpu_node.gpu_operation = std::move(gpu_op.operation);
gpu_node.inputs.resize(gpu_op.input_ids.size());
for (int j = 0; j < gpu_op.input_ids.size(); ++j) {
int id = gpu_op.input_ids[j];
if (id >= 0) {
gpu_node.inputs[j] = id;
} else {
gpu_node.inputs[j] = mapping_to_global_ids[-(id + 1)];
}
}
gpu_node.outputs.resize(gpu_op.output_ids.size());
for (int j = 0; j < gpu_op.output_ids.size(); ++j) {
int id = gpu_op.output_ids[j];
if (id >= 0) {
gpu_node.outputs[j] = id;
tensor_usages[id] = i;
} else {
gpu_node.outputs[j] = mapping_to_global_ids[-(id + 1)];
}
}
gpu_node.name = gpu_op.name;
gpu_model->nodes.push_back(std::move(gpu_node));
}
}
return absl::OkStatus();
}
absl::Status MergeElementwiseNodes(const GpuInfo& gpu_info,
GpuModel* gpu_model) {
auto& nodes = gpu_model->nodes;
for (int elem_root_index = 1; elem_root_index < nodes.size();
++elem_root_index) {
auto& elem_root = nodes[elem_root_index];
if (!(elem_root.inputs.size() == 1 || elem_root.inputs.size() == 2) ||
elem_root.outputs.size() != 1 ||
!elem_root.gpu_operation->IsLinkable()) {
continue;
}
std::map<int, int> prev_nodes;
for (int j = elem_root_index - 1; j >= 0; --j) {
for (int k = 0; k < elem_root.inputs.size(); ++k) {
if (elem_root.inputs[k] == nodes[j].outputs[0]) {
prev_nodes[k] = j;
break;
}
}
}
if (prev_nodes.size() == 1) {
if (elem_root.inputs.size() != 1) {
continue;
}
const int prev_first_node_index = prev_nodes[0];
auto& prev_node = nodes[prev_first_node_index];
if (prev_node.inputs.size() != 1 || prev_node.outputs.size() != 1 ||
!prev_node.gpu_operation->IsLinkable()) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(FuseSimpleElemWithSimpleElem(
gpu_info, std::move(*prev_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_first_node_index] = std::move(new_node);
elem_root_index = prev_first_node_index;
continue;
}
if (prev_nodes.size() == 2) {
if (elem_root.inputs.size() != 2 ||
elem_root.gpu_operation->GetElementwiseInputsCount() != 2) {
continue;
}
const int prev_first_node_index = prev_nodes[0];
const int prev_second_node_index = prev_nodes[1];
auto& prev_first_node = nodes[prev_first_node_index];
auto& prev_second_node = nodes[prev_second_node_index];
if (prev_first_node.gpu_operation->IsLinkable() &&
!prev_second_node.gpu_operation->IsLinkable() &&
prev_second_node.outputs.size() == 1 &&
prev_first_node.inputs.size() == 1 &&
prev_first_node.outputs.size() == 1) {
int first_node_parent_index = -1;
for (int j = prev_first_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_first_node.inputs[0]) {
first_node_parent_index = j;
break;
}
}
if (first_node_parent_index == -1 ||
first_node_parent_index != prev_second_node_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWithSimpleElemAsFirstInput(
gpu_info, std::move(*prev_first_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_first_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_first_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_first_node_index] = std::move(new_node);
elem_root_index = prev_first_node_index;
continue;
}
if (!prev_first_node.gpu_operation->IsLinkable() &&
prev_second_node.gpu_operation->IsLinkable() &&
prev_first_node.outputs.size() == 1 &&
prev_second_node.inputs.size() == 1 &&
prev_second_node.outputs.size() == 1) {
int second_node_parent_index = -1;
for (int j = prev_second_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_second_node.inputs[0]) {
second_node_parent_index = j;
break;
}
}
if (second_node_parent_index == -1 ||
second_node_parent_index != prev_first_node_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[1]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWithSimpleElemAsSecondInput(
gpu_info, std::move(*prev_second_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_second_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_second_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_second_node_index] = std::move(new_node);
elem_root_index = prev_second_node_index;
continue;
}
if (prev_first_node.gpu_operation->IsLinkable() &&
prev_second_node.gpu_operation->IsLinkable() &&
prev_first_node.inputs.size() == 1 &&
prev_first_node.outputs.size() == 1 &&
prev_second_node.inputs.size() == 1 &&
prev_second_node.outputs.size() == 1) {
int first_node_parent_index = -1;
for (int j = prev_first_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_first_node.inputs[0]) {
first_node_parent_index = j;
break;
}
}
int second_node_parent_index = -1;
for (int j = prev_second_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_second_node.inputs[0]) {
second_node_parent_index = j;
break;
}
}
if (first_node_parent_index == -1 || second_node_parent_index == -1 ||
first_node_parent_index != second_node_parent_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[1]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWith2SimpleElem(
gpu_info, std::move(*prev_first_node.gpu_operation.get()),
std::move(*prev_second_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_first_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_first_node.name + " -> " + prev_second_node.name +
" -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
int first_prev_node_index =
std::min(prev_first_node_index, prev_second_node_index);
int second_prev_node_index =
std::max(prev_first_node_index, prev_second_node_index);
nodes.erase(nodes.begin() + elem_root_index);
nodes.erase(nodes.begin() + second_prev_node_index);
nodes[first_prev_node_index] = std::move(new_node);
elem_root_index = first_prev_node_index - 1;
continue;
}
}
}
return absl::OkStatus();
}
absl::Status MergeNodes(const GpuInfo& gpu_info, GpuModel* gpu_model) {
absl::flat_hash_set<ValueId> ready_tensors;
absl::flat_hash_set<ValueId> output_tensors;
for (const auto& input : gpu_model->input_ids_and_refs) {
ready_tensors.insert(input.first);
}
for (const auto& output : gpu_model->output_ids_and_refs) {
output_tensors.insert(output.first);
}
auto& nodes = gpu_model->nodes;
for (int i = 0; i < nodes.size(); ++i) {
auto& node = nodes[i];
bool node_has_graph_output = false;
for (const auto& out_id : node.outputs) {
ready_tensors.insert(out_id);
if (output_tensors.find(out_id) != output_tensors.end()) {
node_has_graph_output = true;
}
}
if (node_has_graph_output || node.outputs.size() != 1) {
continue;
}
std::vector<int> next_nodes;
int link_index = 0;
for (int j = i + 1; j < nodes.size(); ++j) {
for (int k = 0; k < nodes[j].inputs.size(); ++k) {
if (nodes[j].inputs[k] == node.outputs[0]) {
next_nodes.push_back(j);
link_index = k;
}
}
}
if (next_nodes.size() != 1 || link_index != 0) {
continue;
}
auto& linkable_node = nodes[next_nodes[0]];
if (!linkable_node.gpu_operation->IsLinkable() ||
linkable_node.outputs.size() != 1 ||
!IsReady(ready_tensors, linkable_node)) {
continue;
}
RETURN_IF_ERROR(MergeGpuNodes(gpu_info, &linkable_node, &node));
nodes.erase(nodes.begin() + next_nodes[0]);
i -= 1;
}
return absl::OkStatus();
}
void CopyExternals(const GraphFloat32& graph, GpuModel* gpu_model) {
const auto inputs = graph.inputs();
for (const auto& value : inputs) {
gpu_model->input_ids_and_refs.push_back({value->id, value->tensor.ref});
}
const auto variable_inputs = graph.variable_inputs();
for (const auto& value : variable_inputs) {
gpu_model->variable_ids_and_refs.push_back({value->id, value->tensor.ref});
}
const auto outputs = graph.outputs();
for (const auto& value : outputs) {
gpu_model->output_ids_and_refs.push_back({value->id, value->tensor.ref});
}
}
void RemoveUnusedTensors(GpuModel* gpu_model) {
absl::flat_hash_set<ValueId> used_tensors;
for (const auto& node : gpu_model->nodes) {
for (const auto& id : node.inputs) {
used_tensors.insert(id);
}
for (const auto& id : node.outputs) {
used_tensors.insert(id);
}
}
for (const auto& inputs : gpu_model->input_ids_and_refs) {
used_tensors.insert(inputs.first);
}
for (const auto& outputs : gpu_model->output_ids_and_refs) {
used_tensors.insert(outputs.first);
}
for (auto it = gpu_model->tensors.begin(); it != gpu_model->tensors.end();) {
if (used_tensors.find(it->first) == used_tensors.end()) {
gpu_model->tensors.erase(it++);
} else {
++it;
}
}
}
absl::Status ResolvePolymorphicArgs(GpuModel* gpu_model) {
class DummySpatialTensor : public GpuSpatialTensor {
public:
DummySpatialTensor() = default;
explicit DummySpatialTensor(const BHWDC& shape,
const TensorDescriptor& tensor_desc)
: shape_(shape), tensor_desc_(tensor_desc) {}
~DummySpatialTensor() override = default;
int Width() const override { return shape_.w; }
int Height() const override { return shape_.h; }
int Depth() const override { return shape_.d; }
int Channels() const override { return shape_.c; }
int Slices() const override { return DivideRoundUp(shape_.c, 4); }
int Batch() const override { return shape_.b; }
TensorDescriptor GetDescriptor() const override { return tensor_desc_; }
private:
BHWDC shape_;
TensorDescriptor tensor_desc_;
};
for (auto& node : gpu_model->nodes) {
std::vector<DummySpatialTensor> src_tensors(node.inputs.size());
for (int i = 0; i < node.inputs.size(); ++i) {
const auto& tensor_desc = gpu_model->tensors[node.inputs[i]];
src_tensors[i] =
DummySpatialTensor(tensor_desc.GetBHWDCShape(), tensor_desc);
node.gpu_operation->SetSrc(&src_tensors[i], i);
}
std::vector<DummySpatialTensor> dst_tensors(node.outputs.size());
for (int i = 0; i < node.outputs.size(); ++i) {
const auto& tensor_desc = gpu_model->tensors[node.outputs[i]];
dst_tensors[i] =
DummySpatialTensor(tensor_desc.GetBHWDCShape(), tensor_desc);
node.gpu_operation->SetDst(&dst_tensors[i], i);
}
RETURN_IF_ERROR(
node.gpu_operation->BindArguments(&node.gpu_operation->args_));
node.gpu_operation->RecalculateGridSize();
}
return absl::OkStatus();
}
}
absl::Status GraphToGpuModel(const GraphFloat32& graph,
const CreateGpuModelInfo& create_info,
const GpuInfo& gpu_info, GpuModel* gpu_model) {
TensorReserver tensor_reserver;
RETURN_IF_ERROR(
ReserveGraphTensors(create_info, gpu_info, graph, &tensor_reserver));
CopyExternals(graph, gpu_model);
RETURN_IF_ERROR(ConvertOperations(gpu_info, graph, create_info,
&tensor_reserver, gpu_model));
RETURN_IF_ERROR(MergeElementwiseNodes(gpu_info, gpu_model));
RETURN_IF_ERROR(MergeNodes(gpu_info, gpu_model));
gpu_model->tensors = std::move(tensor_reserver.reservations_);
RemoveUnusedTensors(gpu_model);
for (auto& node : gpu_model->nodes) {
RETURN_IF_ERROR(node.gpu_operation->AssembleCode(gpu_info));
}
return ResolvePolymorphicArgs(gpu_model);
}
flatbuffers::Offset<data::GpuModel> Encode(
const GpuModel& gpu_model, flatbuffers::FlatBufferBuilder* builder) {
std::vector<int32_t> in_ids(gpu_model.input_ids_and_refs.size());
std::vector<int64_t> in_refs(gpu_model.input_ids_and_refs.size());
for (int i = 0; i < in_ids.size(); ++i) {
in_ids[i] = gpu_model.input_ids_and_refs[i].first;
in_refs[i] = gpu_model.input_ids_and_refs[i].second;
}
auto in_ids_fb = builder->CreateVector(in_ids);
auto in_refs_fb = builder->CreateVector(in_refs);
std::vector<int32_t> out_ids(gpu_model.output_ids_and_refs.size());
std::vector<int64_t> out_refs(gpu_model.output_ids_and_refs.size());
for (int i = 0; i < out_ids.size(); ++i) {
out_ids[i] = gpu_model.output_ids_and_refs[i].first;
out_refs[i] = gpu_model.output_ids_and_refs[i].second;
}
auto out_ids_fb = builder->CreateVector(out_ids);
auto out_refs_fb = builder->CreateVector(out_refs);
std::vector<flatbuffers::Offset<data::GpuNode>> nodes_fb;
for (int i = 0; i < gpu_model.nodes.size(); ++i) {
auto node_fb = Encode(gpu_model.nodes[i], builder);
nodes_fb.push_back(node_fb);
}
auto nodes_fb_vec = builder->CreateVector(nodes_fb);
std::vector<flatbuffers::Offset<data::TensorDescWithId>> tensors_fb;
for (const auto& tensor : gpu_model.tensors) {
auto tensor_fb = Encode(tensor.second, tensor.first, builder);
tensors_fb.push_back(tensor_fb);
}
auto tensors_fb_vec = builder->CreateVector(tensors_fb);
std::vector<flatbuffers::Offset<data::TensorDescWithId>> const_tensors_fb;
for (const auto& tensor : gpu_model.const_tensors) {
auto tensor_fb = Encode(tensor.second, tensor.first, builder);
const_tensors_fb.push_back(tensor_fb);
}
auto const_tensors_fb_vec = builder->CreateVector(const_tensors_fb);
std::vector<flatbuffers::Offset<data::PairOfValueIds>>
variable_ids_and_refs_fb;
for (auto& pair : gpu_model.variable_ids_and_refs) {
data::PairOfValueIdsBuilder pair_builder(*builder);
pair_builder.add_first(pair.first);
pair_builder.add_second(pair.second);
variable_ids_and_refs_fb.push_back(pair_builder.Finish());
}
auto variable_ids_and_refs_fb_vec =
builder->CreateVector(variable_ids_and_refs_fb);
data::GpuModelBuilder gpu_model_builder(*builder);
gpu_model_builder.add_nodes(nodes_fb_vec);
gpu_model_builder.add_tensors(tensors_fb_vec);
gpu_model_builder.add_const_tensors(const_tensors_fb_vec);
gpu_model_builder.add_input_ids(in_ids_fb);
gpu_model_builder.add_output_ids(out_ids_fb);
gpu_model_builder.add_variable_ids_and_refs(variable_ids_and_refs_fb_vec);
gpu_model_builder.add_input_refs(in_refs_fb);
gpu_model_builder.add_output_refs(out_refs_fb);
return gpu_model_builder.Finish();
}
absl::Status Decode(const data::GpuModel* fb_gpu_model, GpuModel* gpu_model) {
gpu_model->nodes.resize(fb_gpu_model->nodes()->size());
int counter = 0;
for (auto node_fb : *fb_gpu_model->nodes()) {
RETURN_IF_ERROR(Decode(node_fb, &gpu_model->nodes[counter]));
counter++;
}
for (const auto& tensor_fb : *fb_gpu_model->tensors()) {
TensorDescriptor desc;
Decode(tensor_fb->desc(), &desc);
gpu_model->tensors[tensor_fb->id()] = std::move(desc);
}
for (const auto& tensor_fb : *fb_gpu_model->const_tensors()) {
TensorDescriptor desc;
Decode(tensor_fb->desc(), &desc);
gpu_model->const_tensors[tensor_fb->id()] = std::move(desc);
}
for (int i = 0; i < fb_gpu_model->input_ids()->size(); ++i) {
gpu_model->input_ids_and_refs.push_back(
{(*fb_gpu_model->input_ids())[i], (*fb_gpu_model->input_refs())[i]});
}
for (int i = 0; i < fb_gpu_model->output_ids()->size(); ++i) {
gpu_model->output_ids_and_refs.push_back(
{(*fb_gpu_model->output_ids())[i], (*fb_gpu_model->output_refs())[i]});
}
for (auto variable_id : *fb_gpu_model->variable_ids_and_refs()) {
gpu_model->variable_ids_and_refs.push_back(
{variable_id->first(), variable_id->second()});
}
return absl::OkStatus();
}
absl::Status RunGraphTransformsForGpuModel(GraphFloat32* graph) {
auto merge_padding_transform = NewMergePaddingWithAdd();
auto add_bias_transform = NewAddBias();
auto pooling_to_reduce_op = NewGlobalPoolingToReduceOp();
ModelTransformer transformer(graph);
if (!transformer.Apply("add_bias", add_bias_transform.get())) {
return absl::InternalError("Invalid add_bias transform");
}
if (!transformer.Apply("merge_padding", merge_padding_transform.get())) {
return absl::InternalError("Invalid merge_padding transform");
}
if (!transformer.Apply("global pooling to mean",
pooling_to_reduce_op.get())) {
return absl::InternalError("Invalid global pooling to mean transform");
}
return absl::OkStatus();
}
}
} | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_model_test_util.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, LinkingConvolutionAndCosOp) {
auto status = TestLinkingConvolutionAndCosOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMul) {
auto status = TestLinkingConvolution2InputMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputBroadcastMul2InputMul) {
auto status = TestLinkingConvolution2InputBroadcastMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputBroadcastMul) {
auto status = TestLinkingConvolution2InputMul2InputBroadcastMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMulCos) {
auto status = TestLinkingConvolution2InputMul2InputMulCos(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanh2InputDiff) {
auto status = TestLinkingConvolutionFirstTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionSecondTanh2InputDiff) {
auto status = TestLinkingConvolutionSecondTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanhSecondCos2InputDiff) {
auto status = TestLinkingConvolutionFirstTanhSecondCos2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingComplex0) {
auto status = TestLinkingComplex0(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvElem2InputAddElemsOp) {
auto status = TestLinkingConvElem2InputAddElemsOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingSliceCastOp) {
auto status = TestLinkingSliceCastOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddAddMulOp) {
auto status = TestLinkingAddAddMulOp(&exec_env_,
true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddMulOp) {
auto status =
TestLinkingAddAddMulOp(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/gpu_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/testing/gpu_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
747b49b9-8414-4351-bedf-958d67aaa510 | cpp | tensorflow/tensorflow | convert | tensorflow/lite/delegates/gpu/common/convert.cc | third_party/xla/xla/tests/convert_test.cc | #include "tensorflow/lite/delegates/gpu/common/convert.h"
#include <stdint.h>
#include <string.h>
#include <string>
#include <vector>
#include "fp16.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace {
constexpr int kPhwc4ChannelsInPlane = 4;
constexpr int kPhwo4i4ChannelsInPlane = 4;
constexpr int kPiohw4ChannelsInPlane = 4;
absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const OHWI& shape,
absl::Span<float> out, bool reverse_space) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPHWO4I4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPHWO4I4(shape)));
}
float* output = out.data();
for (int p = 0; p < DivideRoundUp(shape.o, kPhwo4i4ChannelsInPlane); ++p) {
for (int h = 0; h < shape.h; ++h) {
for (int w = 0; w < shape.w; ++w) {
for (int c = 0; c < DivideRoundUp(shape.i, kPhwo4i4ChannelsInPlane);
++c) {
for (int co = 0; co < kPhwo4i4ChannelsInPlane; ++co) {
for (int ci = 0; ci < kPhwo4i4ChannelsInPlane; ++ci) {
float value = 0;
if (c * kPhwo4i4ChannelsInPlane + ci < shape.i &&
p * kPhwo4i4ChannelsInPlane + co < shape.o) {
int tensor_o = p * kPhwo4i4ChannelsInPlane + co;
int tensor_i = c * kPhwo4i4ChannelsInPlane + ci;
const int in_h = reverse_space ? shape.h - 1 - h : h;
const int in_w = reverse_space ? shape.w - 1 - w : w;
value = in[shape.LinearIndex({tensor_o, in_h, in_w, tensor_i})];
}
(*output++) = value;
}
}
}
}
}
}
return absl::OkStatus();
}
}
uint32_t GetElementsSizeForPHWO4I4(const OHWI& shape) {
return AlignByN(shape.i, kPhwo4i4ChannelsInPlane) *
AlignByN(shape.o, kPhwo4i4ChannelsInPlane) * shape.h * shape.w;
}
uint32_t GetElementsSizeForPHWO4I4(const IHWO& shape) {
return AlignByN(shape.i, kPhwo4i4ChannelsInPlane) *
AlignByN(shape.o, kPhwo4i4ChannelsInPlane) * shape.h * shape.w;
}
std::vector<float> ConvertToPHWO4I4(
const Tensor<OHWI, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWO4I4(tensor.shape));
ConvertToPHWO4I4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()),
false)
.IgnoreError();
return transposed;
}
std::vector<float> ConvertToPHWO4I4Transposed(
const Tensor<OHWI, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWO4I4(tensor.shape));
ConvertToPHWO4I4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()),
true)
.IgnoreError();
return transposed;
}
uint3 Get3DSizeForPHWO4I4(const OHWI& shape) {
return uint3(AlignByN(shape.i, 4), shape.h * shape.w,
DivideRoundUp(shape.o, 4));
}
absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const IHWO& shape,
absl::Span<float> out) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPHWO4I4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPHWO4I4(shape)));
}
const int dst_depth = DivideRoundUp(shape.o, 4);
const int src_depth = DivideRoundUp(shape.i, 4);
float* output = out.data();
for (int f = 0; f < dst_depth; ++f) {
for (int y = 0; y < shape.h; ++y) {
for (int x = 0; x < shape.w; ++x) {
for (int ch = 0; ch < src_depth; ++ch) {
for (int co = 0; co < 4; ++co) {
for (int ci = 0; ci < 4; ++ci) {
const int src_channel = ch * 4 + ci;
const int dst_channel = f * 4 + co;
float value = 0;
if (src_channel < shape.i && dst_channel < shape.o) {
value = in[shape.LinearIndex({src_channel, y, x, dst_channel})];
}
(*output++) = value;
}
}
}
}
}
}
return absl::OkStatus();
}
std::vector<float> ConvertToPHWO4I4(
const Tensor<IHWO, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWO4I4(tensor.shape));
ConvertToPHWO4I4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
uint32_t GetElementsSizeForPIOHW4(const OHWI& shape) {
return AlignByN(shape.o * shape.i, kPiohw4ChannelsInPlane) * shape.h *
shape.w;
}
absl::Status ConvertToPIOHW4(absl::Span<const float> in, const OHWI& shape,
absl::Span<float> out) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPIOHW4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPIOHW4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPIOHW4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPIOHW4(shape)));
}
int32_t output_channels = shape.o * shape.i;
int32_t num_planes = DivideRoundUp(output_channels, kPiohw4ChannelsInPlane);
float* output = out.data();
for (int p = 0; p < num_planes; ++p) {
for (int h = 0; h < shape.h; ++h) {
for (int w = 0; w < shape.w; ++w) {
for (int c = 0; c < kPiohw4ChannelsInPlane; ++c) {
int output_c = p * kPiohw4ChannelsInPlane + c;
(*output++) = output_c >= output_channels
? 0
: in[shape.LinearIndex({output_c % shape.o, h, w,
output_c / shape.o})];
}
}
}
}
return absl::OkStatus();
}
std::vector<float> ConvertToPIOHW4(
const Tensor<OHWI, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPIOHW4(tensor.shape));
ConvertToPIOHW4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
template <typename T>
absl::Status ValidateConvertToPHWC4(absl::Span<const float> in,
const BHWC& shape, absl::Span<T> out) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWC4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPHWC4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWC4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPHWC4(shape)));
}
return absl::OkStatus();
}
absl::Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
absl::Span<float> out) {
RETURN_IF_ERROR(ValidateConvertToPHWC4(in, shape, out));
if (shape.c == 4) {
std::memcpy(out.data(), in.data(),
shape.DimensionsProduct() * sizeof(float));
return absl::OkStatus();
}
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
float* dest =
out.data() + b * num_pixels * num_planes * kPhwc4ChannelsInPlane;
for (int p = 0; p < num_full_planes; p++) {
const float* src =
in.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
std::memcpy(dest, src, kPhwc4ChannelsInPlane * sizeof(float));
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
}
}
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const float* src =
in.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
float* dest = out.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
for (int p = 0; p < num_pixels; p++) {
std::memcpy(dest, src, remaining_channels * sizeof(float));
std::memset(dest + remaining_channels, 0,
(4 - remaining_channels) * sizeof(float));
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
}
return absl::OkStatus();
}
absl::Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
absl::Span<HalfBits> out) {
RETURN_IF_ERROR(ValidateConvertToPHWC4(in, shape, out));
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
HalfBits* dest =
out.data() + b * num_pixels * num_planes * kPhwc4ChannelsInPlane;
for (int p = 0; p < num_full_planes; p++) {
const float* src =
in.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = fp16_ieee_from_fp32_value(src[1]);
dest[2] = fp16_ieee_from_fp32_value(src[2]);
dest[3] = fp16_ieee_from_fp32_value(src[3]);
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
}
}
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const float* src =
in.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
HalfBits* dest = out.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
switch (remaining_channels) {
case 1:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = 0;
dest[2] = 0;
dest[3] = 0;
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
break;
case 2:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = fp16_ieee_from_fp32_value(src[1]);
dest[2] = 0;
dest[3] = 0;
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
break;
case 3:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = fp16_ieee_from_fp32_value(src[1]);
dest[2] = fp16_ieee_from_fp32_value(src[2]);
dest[3] = 0;
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
break;
default:
return absl::UnimplementedError(
"ConvertToPHWC4Half: Unsupported channels per planes count.");
}
}
return absl::OkStatus();
}
std::vector<float> ConvertToPHWC4(
const Tensor<BHWC, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWC4(tensor.shape));
ConvertToPHWC4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
std::vector<float> ConvertToPHWC4(
const Tensor<HWC, DataType::FLOAT32>& tensor) {
const BHWC batched_shape =
BHWC(1, tensor.shape.h, tensor.shape.w, tensor.shape.c);
std::vector<float> transposed(GetElementsSizeForPHWC4(batched_shape));
ConvertToPHWC4(tensor.data, batched_shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
uint32_t GetElementsSizeForPHWC4(const BHWC& shape) {
return shape.b * shape.h * shape.w * AlignByN(shape.c, kPhwc4ChannelsInPlane);
}
template <typename T>
absl::Status ValidateConvertFromPHWC4(absl::Span<const T> in, const BHWC& shape,
absl::Span<float> out) {
if (in.size() != GetElementsSizeForPHWC4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertFromPHWC4: Input data size does not match expected size: ",
in.size(), " != ", GetElementsSizeForPHWC4(shape)));
}
if (out.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertFromPHWC4: Output data size does not match expected size: ",
out.size(), " != ", shape.DimensionsProduct()));
}
return absl::OkStatus();
}
absl::Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
absl::Span<float> out) {
RETURN_IF_ERROR(ValidateConvertFromPHWC4(in, shape, out));
if (shape.c == 4) {
std::memcpy(out.data(), in.data(),
shape.DimensionsProduct() * sizeof(float));
return absl::OkStatus();
}
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
const float* src = in.data() + b * padded_size;
for (int p = 0; p < num_full_planes; p++) {
float* dest =
out.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
std::memcpy(dest, src, kPhwc4ChannelsInPlane * sizeof(float));
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
}
}
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const float* src = in.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
float* dest =
out.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
for (int p = 0; p < num_pixels; p++) {
std::memcpy(dest, src, remaining_channels * sizeof(float));
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
}
return absl::OkStatus();
}
absl::Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in,
const BHWC& shape, absl::Span<float> out) {
RETURN_IF_ERROR(ValidateConvertFromPHWC4(in, shape, out));
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
const HalfBits* src = in.data() + b * padded_size;
for (int p = 0; p < num_full_planes; p++) {
float* dest =
out.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
dest[1] = fp16_ieee_to_fp32_value(src[1]);
dest[2] = fp16_ieee_to_fp32_value(src[2]);
dest[3] = fp16_ieee_to_fp32_value(src[3]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
}
}
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const HalfBits* src = in.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
float* dest =
out.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
switch (remaining_channels) {
case 1:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
break;
case 2:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
dest[1] = fp16_ieee_to_fp32_value(src[1]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
break;
case 3:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
dest[1] = fp16_ieee_to_fp32_value(src[1]);
dest[2] = fp16_ieee_to_fp32_value(src[2]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
break;
default:
return absl::UnimplementedError(
"ConvertToPHWC4Half: Unsupported channels per planes count.");
}
}
return absl::OkStatus();
}
}
} | #include <array>
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <random>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/casts.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ConvertTest : public ClientLibraryTestBase {
public:
explicit ConvertTest(se::Platform* platform = nullptr)
: ClientLibraryTestBase(platform) {
mutable_debug_options()->add_xla_disable_hlo_passes("algsimp");
mutable_debug_options()->add_xla_disable_hlo_passes("inline");
mutable_debug_options()->add_xla_disable_hlo_passes(
"simplify-fp-conversions");
mutable_debug_options()->set_xla_allow_excess_precision(false);
}
};
template <typename T>
class ConvertTestT : public ConvertTest {
public:
using ConvertTest::ConvertTest;
};
using FloatingPointTypeList =
::testing::Types<tsl::float8_e5m2, tsl::float8_e4m3, tsl::float8_e4m3fn,
tsl::float8_e5m2fnuz, tsl::float8_e4m3fnuz,
tsl::float8_e3m4, Eigen::half, bfloat16, float, double>;
TYPED_TEST_SUITE(ConvertTestT, FloatingPointTypeList);
template <typename T>
class ConvertTestF16 : public ConvertTest {
public:
using ConvertTest::ConvertTest;
};
using F16TypeList = ::testing::Types<Eigen::half, bfloat16>;
TYPED_TEST_SUITE(ConvertTestF16, F16TypeList);
TEST_F(ConvertTest, ConvertR1S32ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 64});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 64});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {42, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1PRED) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 0, -64});
ConvertElementType(a, PRED);
std::array<bool, 3> expected = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U32ToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint32_t>(&builder, {42, 64});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {42, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U32ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint32_t>(&builder, {42, 64});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U32ToR1PRED) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint32_t>(&builder, {42, 0, 64});
ConvertElementType(a, PRED);
std::array<bool, 3> expected = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1PRED) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.0f, 0.0f, 64.0f});
ConvertElementType(a, PRED);
std::array<bool, 3> expected = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 64});
ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {1, 0, 1};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {1, 0, 1};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
ConvertElementType(a, F32);
std::vector<float> expected = {1., 0., 1.};
ComputeAndCompareR1<float>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1S0S32ToR1S0F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {});
ConvertElementType(a, F32);
std::vector<float> expected = {};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.6, 64.4});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1S64ToR1F32) {
XlaBuilder builder(TestName());
std::vector<int64_t> arg{
-9223371216516022272,
-2,
-1,
-0x7FFFFFFF,
-0x80000000,
0,
1,
2,
1073742145,
1073742656,
0x7FFFFFFF,
0x80000000,
826720496944058148,
4296062029846194332,
0x0007FB72E4000000LL,
0x0007FB72E4000001LL,
0x0007FB72E6000000LL,
0x0007FB72E7000000LL,
0x0007FB72E7FFFFFFLL,
0x0007FB72E8000000LL,
0x0007FB72E8000001LL,
0x0007FB72EA000000LL,
0x0007FB72EB000000LL,
0x0007FB72EBFFFFFFLL,
0x0007FB72EC000000LL,
0x7FFFFF0000000000LL,
0x7FFFFF8000000000LL,
0x7FFFFFFFFFFFFF00,
static_cast<int64_t>(0xFFFFFFFFFFFFFFFF),
static_cast<int64_t>(0x0000f234e67e0001LL),
static_cast<int64_t>(0x8000000000000000),
static_cast<int64_t>(0x8000000000000000LL),
static_cast<int64_t>(0x8000000000000001LL),
static_cast<int64_t>(0x8000008000000000LL),
static_cast<int64_t>(0x8000010000000000LL),
};
Literal arg_literal = LiteralUtil::CreateR1<int64_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, F32);
std::vector<float> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<float>(arg[i]);
}
ComputeAndCompareR1<float>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1U32ToR1F32) {
XlaBuilder builder(TestName());
std::vector<uint32_t> arg{0, 1, 0x1000, 0x7fffffff,
0x80000000, 0x80000001, 0x80000002, 0x80000003,
0x80000080, 0x80000081, 0x80000082, 0xFFFFFFFF};
Literal arg_literal = LiteralUtil::CreateR1<uint32_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, F32);
std::vector<float> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<float>(arg[i]);
}
ComputeAndCompareR1<float>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1U32) {
XlaBuilder builder(TestName());
std::vector<float> arg{0.0f, 1.0f, 16777216.0f,
16777218.0f, 2147483647.0f, 4294967040.0f};
Literal arg_literal = LiteralUtil::CreateR1<float>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, U32);
std::vector<uint32_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<uint32_t>(arg[i]);
}
ComputeAndCompareR1<uint32_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1U32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<uint32_t> arg{0, 1, 0x1000, 0x7fffffff, 0x80000082, 0xFFFFFFFF};
Literal arg_literal = LiteralUtil::CreateR1<uint32_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S64);
std::vector<int64_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<int64_t>(arg[i]);
}
ComputeAndCompareR1<int64_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1S32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<int32_t> arg{0, 1, 0x1000, -1, -0x1000};
Literal arg_literal = LiteralUtil::CreateR1<int32_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S64);
std::vector<int64_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<int64_t>(arg[i]);
}
ComputeAndCompareR1<int64_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<float> arg{0.0f,
0.5f,
0.99f,
1.0f,
1.5f,
1.99f,
2.0f,
2.01f,
2147483648.f,
-0.5f,
-0.99f,
-1.0f,
-1.5f,
-1.99f,
-2.0f,
-2.01f,
9223371487098961920.f,
9223370937343148032.f,
-9223371487098961920.f,
-9223370937343148032.f};
Literal arg_literal = LiteralUtil::CreateR1<float>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S64);
std::vector<int64_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<int64_t>(arg[i]);
}
ComputeAndCompareR1<int64_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {32, 64});
ConvertElementType(a, F32);
std::vector<float> expected = {32.0, 64.0};
ComputeAndCompareR1<float>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {32, 64});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {32, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {32, 64});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {32, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F64) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {32.0f, 64.0f});
ConvertElementType(a, F64);
std::vector<double> expected = {32.0, 64.0};
ComputeAndCompareR1<double>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1F64ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<double>(&builder, {32.0, 64.0});
ConvertElementType(a, F32);
std::vector<float> expected = {32.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertS32Extremes) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max()});
ConvertElementType(a, F32);
std::vector<float> expected = {
static_cast<float>(std::numeric_limits<int32_t>::min()),
static_cast<float>(std::numeric_limits<int32_t>::max())};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
TEST_F(ConvertTest, ConvertMapToS32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(F32, {}), "in");
ConvertElementType(param, S32);
auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertMapToF32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(S32, {}), "in");
ConvertElementType(param, F32);
auto a = ConstantR1<int32_t>(&builder, {42, 64});
Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
TEST_F(ConvertTest, ConvertReshape) {
XlaBuilder builder(TestName());
auto input = ConstantR1<int32_t>(&builder, {42});
auto reshape = Reshape(input, {0}, {});
ConvertElementType(reshape, F32);
ComputeAndCompareR0<float>(&builder, 42.0f, {}, ErrorSpec(0.0001));
}
std::vector<float> GetInterestingF16ConversionTestCases() {
float infinity = std::numeric_limits<float>::infinity();
float half_min_positive_normal = absl::bit_cast<float, uint32_t>(0x38800000);
float half_max_subnormal = absl::bit_cast<float, uint32_t>(0x387fc000);
float half_min_positive_subnormal =
absl::bit_cast<float, uint32_t>(0x33800000);
float half_max = 65504.0f;
std::vector<float> test_cases(
{-infinity, -(half_max * 2 + 1), -half_max, -42.0f, -1.0f,
-half_min_positive_subnormal, -half_max_subnormal,
-half_min_positive_normal, -0.0f, 0.0f, half_min_positive_subnormal,
half_max_subnormal, half_min_positive_normal, 1.0f, 42.0f, half_max,
(half_max * 2 + 1), infinity});
return test_cases;
}
XLA_TEST_F(ConvertTest, ConvertR1F16ToR1F32) {
std::vector<float> test_cases = GetInterestingF16ConversionTestCases();
std::vector<half> input;
absl::c_transform(test_cases, std::back_inserter(input),
[](float f) { return Eigen::half(f); });
std::vector<float> expected_output;
absl::c_transform(input, std::back_inserter(expected_output),
[](Eigen::half h) { return static_cast<float>(h); });
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> dot_lhs_handle,
client_->TransferToServer(LiteralUtil::CreateR1<half>(input)));
XlaBuilder builder(TestName());
ConvertElementType(
Parameter(&builder, 0,
ShapeUtil::MakeShape(F16, {static_cast<int64_t>(input.size())}),
"param"),
F32);
ComputeAndCompareR1<float>(&builder, expected_output, {dot_lhs_handle.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F16) {
std::vector<float> input = GetInterestingF16ConversionTestCases();
std::vector<half> expected_output;
absl::c_transform(input, std::back_inserter(expected_output),
[](float f) { return Eigen::half(f); });
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> dot_lhs_handle,
client_->TransferToServer(LiteralUtil::CreateR1<float>(input)));
XlaBuilder builder(TestName());
ConvertElementType(
Parameter(&builder, 0,
ShapeUtil::MakeShape(F32, {static_cast<int64_t>(input.size())}),
"param"),
F16);
ComputeAndCompareR1<half>(&builder, expected_output, {dot_lhs_handle.get()});
}
XLA_TEST_F(ConvertTest, ConvertC64ToC64) {
XlaBuilder builder(TestName());
std::vector<complex64> x = {{42.0f, 64.0f}};
ConvertElementType(ConstantR1<complex64>(&builder, x), C64);
ComputeAndCompareR1<complex64>(&builder, x, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ConvertTest, ConvertS64S64) {
XlaBuilder builder(TestName());
std::vector<int64_t> x = {{-42, 64}};
ConvertElementType(ConstantR1<int64_t>(&builder, x), S64);
ComputeAndCompareR1<int64_t>(&builder, x, {});
}
XLA_TEST_F(ConvertTest, ConvertU64U64) {
XlaBuilder builder(TestName());
std::vector<uint64_t> x = {{42, 64}};
ConvertElementType(ConstantR1<uint64_t>(&builder, x), U64);
ComputeAndCompareR1<uint64_t>(&builder, x, {});
}
XLA_TEST_F(ConvertTest, ConvertU64S64) {
XlaBuilder builder(TestName());
std::vector<uint64_t> unsigned_x = {{42, UINT64_MAX}};
ConvertElementType(ConstantR1<uint64_t>(&builder, unsigned_x), S64);
std::vector<int64_t> signed_x = {{42, -1}};
ComputeAndCompareR1<int64_t>(&builder, signed_x, {});
}
XLA_TEST_F(ConvertTest, ConvertS64U64) {
XlaBuilder builder(TestName());
std::vector<int64_t> signed_x = {{42, -1, INT64_MIN}};
ConvertElementType(ConstantR1<int64_t>(&builder, signed_x), U64);
std::vector<uint64_t> unsigned_x = {{42, UINT64_MAX, IPow<uint64_t>(2, 63)}};
ComputeAndCompareR1<uint64_t>(&builder, unsigned_x, {});
}
TEST_F(ConvertTest, ConvertR1S4ToR1S8) {
XlaBuilder builder(TestName());
auto a = ConstantR1<s4>(&builder, {s4(0), s4(1), s4(2), s4(-8)});
ConvertElementType(a, S8);
std::vector<int8_t> expected = {0, 1, 2, -8};
ComputeAndCompareR1<int8_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S4ParameterToR1S8) {
XlaBuilder builder(TestName());
Literal arg_literal =
LiteralUtil::CreateR1<s4>({s4(0), s4(1), s4(2), s4(-8)});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S8);
std::vector<int8_t> expected = {0, 1, 2, -8};
ComputeAndCompareR1<int8_t>(&builder, expected, {arg_data.get()});
}
TEST_F(ConvertTest, ConvertR1U4ToR1U8) {
XlaBuilder builder(TestName());
auto a = ConstantR1<u4>(&builder, {u4(0), u4(1), u4(2), u4(15)});
ConvertElementType(a, U8);
std::vector<uint8_t> expected = {0, 1, 2, 15};
ComputeAndCompareR1<uint8_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U4ParameterToR1U8) {
XlaBuilder builder(TestName());
Literal arg_literal =
LiteralUtil::CreateR1<u4>({u4(0), u4(1), u4(2), u4(15)});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, U8);
std::vector<uint8_t> expected = {0, 1, 2, 15};
ComputeAndCompareR1<uint8_t>(&builder, expected, {arg_data.get()});
}
TEST_F(ConvertTest, ConvertR1S8ToR1S4) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int8_t>(&builder, {0, 1, 2, -8});
ConvertElementType(a, S4);
std::vector<s4> expected = {s4(0), s4(1), s4(2), s4(-8)};
ComputeAndCompareR1<s4>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U8ToR1U4) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {0, 1, 2, 15});
ConvertElementType(a, U4);
std::vector<u4> expected = {u4(0), u4(1), u4(2), u4(15)};
ComputeAndCompareR1<u4>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S8ToR1S4Roundtrip) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int8_t>(&builder, {0, 8, -8, -9, 127, -128});
auto b = ConvertElementType(a, S4);
ConvertElementType(b, S8);
std::vector<int8_t> expected = {0, -8, -8, 7, -1, 0};
ComputeAndCompareR1<int8_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1S4) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {0., 2.5, -2.5});
ConvertElementType(a, S4);
std::vector<s4> expected = {s4(0), s4(2), s4(-2)};
ComputeAndCompareR1<s4>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S4ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<s4>(&builder, {s4(0), s4(1), s4(2), s4(-8)});
ConvertElementType(a, F32);
std::vector<float> expected = {0, 1, 2, -8};
ComputeAndCompareR1<float>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertBF16F32) {
XlaBuilder builder(TestName());
std::vector<bfloat16> all_bfloats(1 << 16);
for (int i = 0; i < all_bfloats.size(); ++i) {
all_bfloats[i] =
Eigen::numext::bit_cast<bfloat16>(static_cast<uint16_t>(i));
}
std::vector<uint32_t> expected(all_bfloats.size());
for (int i = 0; i < expected.size(); ++i) {
expected[i] = (1U << 16) * i;
}
xla::XlaOp all_bfloats_bf16 = ConstantR1<bfloat16>(&builder, all_bfloats);
xla::XlaOp all_bfloats_f32 = ConvertElementType(all_bfloats_bf16, F32);
BitcastConvertType(all_bfloats_f32, U32);
TF_ASSERT_OK_AND_ASSIGN(const auto results, ExecuteAndTransfer(&builder, {}));
for (int i = 0; i < expected.size(); ++i) {
const auto result = results.Get<uint32_t>({i});
const auto correct = expected[i];
if (all_bfloats[i] != 0.0f &&
all_bfloats[i] < std::numeric_limits<float>::min()) {
const float same_signed_zero =
Eigen::numext::signbit(all_bfloats[i]) ? -0.0f : 0.0f;
if (result != correct) {
EXPECT_EQ(result, absl::bit_cast<uint32_t>(same_signed_zero));
}
} else if (Eigen::numext::isnan(all_bfloats[i])) {
ASSERT_TRUE(std::isnan(absl::bit_cast<float>(correct)));
EXPECT_TRUE(std::isnan(absl::bit_cast<float>(result)));
} else {
EXPECT_EQ(result, correct);
}
}
}
XLA_TEST_F(ConvertTest, ConvertF32BF16) {
XlaBuilder builder(TestName());
std::vector<float> floats(100);
std::minstd_rand0 generator;
for (int i = 0; i < floats.size(); ++i) {
floats[i] = generator();
if (i < 10) {
auto val = absl::bit_cast<uint32_t>(floats[i]);
val |= 1 << 15;
floats[i] = absl::bit_cast<float>(val);
}
}
floats.push_back(std::numeric_limits<float>::quiet_NaN());
floats.push_back(-std::numeric_limits<float>::quiet_NaN());
floats.push_back(absl::bit_cast<float>(0x7F800001));
floats.push_back(absl::bit_cast<float>(0xFF800001));
std::vector<bfloat16> expected(floats.size());
for (int i = 0; i < expected.size(); ++i) {
expected[i] = static_cast<bfloat16>(floats[i]);
}
xla::XlaOp lit_f32 = ConstantR1<float>(&builder, floats);
xla::XlaOp lit_bf16 = ConvertElementType(lit_f32, BF16);
BitcastConvertType(lit_bf16, U16);
TF_ASSERT_OK_AND_ASSIGN(const auto results, ExecuteAndTransfer(&builder, {}));
for (int i = 0; i < expected.size(); ++i) {
const auto result = results.Get<uint16_t>({i});
const auto correct = absl::bit_cast<uint16_t>(expected[i]);
if (floats[i] != 0.0f && floats[i] < std::numeric_limits<float>::min()) {
const bfloat16 same_signed_zero =
bfloat16(std::signbit(floats[i]) ? -0.0f : 0.0f);
if (result != correct) {
EXPECT_EQ(result, absl::bit_cast<uint16_t>(same_signed_zero));
}
} else if (std::isnan(floats[i])) {
ASSERT_TRUE(std::isnan(absl::bit_cast<bfloat16>(correct)));
EXPECT_TRUE(std::isnan(absl::bit_cast<bfloat16>(result)));
if (client_->platform()->Name() == "Host") {
EXPECT_EQ(result >> 15, correct >> 15);
}
} else {
EXPECT_EQ(result, correct);
}
}
}
XLA_TYPED_TEST(ConvertTestT, ConvertFPToPred) {
XlaBuilder builder(this->TestName());
using FP = TypeParam;
auto a = ConstantR1<FP>(&builder, {FP{0.0}, FP{0.25}, FP{2.0}, FP{-0.0}});
ConvertElementType(a, PRED);
std::array<bool, 4> expected = {false, true, true, false};
this->template ComputeAndCompareR1<bool>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertF16F8e5m2Roundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{inf, inf},
{0x1.2p0, 0x1p0},
{0x1.6p0, 0x1.8p0},
{0x1.Cp15, 0x1.Cp15},
{0x1.DFCp15, 0x1.Cp15},
{0x1.Ep15, inf},
{0x1p16, inf},
{0x1p-14, 0x1p-14},
{0x1.Cp-15, 0x1p-14},
{0x0.8p-14, 0x0.8p-14},
{0x0.Ap-14, 0x0.8p-14},
{0x0.Ep-14, 0x1.0p-14},
{0x0.98p-14, 0x0.8p-14},
{0x0.A8p-14, 0x0.Cp-14},
{0x0.2p-14, 0},
{0x0.204p-14, 0x0.4p-14},
{0x0.DFCp-14, 0x0.Cp-14},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E5M2);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e5m2Roundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{inf, inf},
{0x1.2p0, 0x1p0},
{0x1.6p0, 0x1.8p0},
{0x1.Cp15, 0x1.Cp15},
{0x1.DFFFFEp15, 0x1.Cp15},
{0x1.Ep15, inf},
{0x1p16, inf},
{0x1p-14, 0x1p-14},
{0x1.Cp-15, 0x1p-14},
{0x1.0p-15, 0x0.8p-14},
{0x1.4p-15, 0x0.8p-14},
{0x1.Cp-15, 0x1.0p-14},
{0x1.3p-15, 0x0.8p-14},
{0x1.5p-15, 0x0.Cp-14},
{0x1p-17, 0},
{0x1.000002p-17, 0x0.4p-14},
{0x1.BFFFFEp-15, 0x0.Cp-14},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E5M2);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2RoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e5m2;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E5M2);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2RoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
if constexpr (std::is_same_v<TypeParam, tsl::float8_e3m4>) {
GTEST_SKIP() << "Skipping test for E3M4 as it requires an ml_dtypes "
"release with https:
} else {
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(static_cast<TypeParam>(
Eigen::numext::bit_cast<tsl::float8_e5m2>(static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E5M2);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2RoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e5m2;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e5m2F16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<TypeParam>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E5M2);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3Roundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{-nan, -nan},
{inf, inf},
{-inf, -inf},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep7, 0x1.Ep7},
{0x1.EFCp7, 0x1.Ep7},
{0x1.Fp7, inf},
{0x1p8, inf},
{0x1p-6, 0x1p-6},
{0x1.Ep-7, 0x1p-6},
{0x0.2p-6, 0x0.2p-6},
{0x0.Ep-6, 0x0.Ep-6},
{0x0.8p-6, 0x0.8p-6},
{0x0.9p-6, 0x0.8p-6},
{0x0.Fp-6, 0x0.8p-5},
{0x0.8Fp-6, 0x0.8p-6},
{0x0.91p-6, 0x0.Ap-6},
{0x1p-10, 0},
{0x1.004p-10, 0x0.2p-6},
{0x0.EFCp-6, 0x0.Ep-6},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E4M3);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e4m3Roundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{-nan, -nan},
{inf, inf},
{-inf, -inf},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep7, 0x1.Ep7},
{0x1.EFFFFEp7, 0x1.Ep7},
{0x1.Fp7, inf},
{0x1p8, inf},
{0x1p-6, 0x1p-6},
{0x1.Ep-7, 0x1p-6},
{0x0.2p-6, 0x0.2p-6},
{0x0.Ep-6, 0x0.Ep-6},
{0x0.8p-6, 0x0.8p-6},
{0x0.9p-6, 0x0.8p-6},
{0x0.Fp-6, 0x0.8p-5},
{0x0.8Fp-6, 0x0.8p-6},
{0x0.91p-6, 0x0.Ap-6},
{0x1p-10, 0},
{0x1.000002p-10, 0x0.2p-6},
{0x0.EFFFFEp-6, 0x0.Ep-6},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E4M3);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3RoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E4M3);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3RoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(static_cast<TypeParam>(
Eigen::numext::bit_cast<tsl::float8_e4m3>(static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E4M3);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3RoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e4m3F16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<TypeParam>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E4M3);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3fnRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Cp8, 0x1.Cp8},
{0x1.Dp8, 0x1.Cp8},
{0x1.D04p8, nan},
{0x1p9, nan},
{0x1p-6, 0x1p-6},
{0x1.Ep-7, 0x1p-6},
{0x1.0p-8, 0x0.4p-6},
{0x1.4p-8, 0x0.4p-6},
{0x1.Cp-8, 0x0.8p-6},
{0x1.3p-8, 0x0.4p-6},
{0x1.5p-8, 0x0.6p-6},
{0x1p-10, 0},
{0x1.004p-10, 0x0.2p-6},
{0x1.DFCp-7, 0x0.Ep-6},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E4M3FN);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e4m3fnRoundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Cp8, 0x1.Cp8},
{0x1.Dp8, 0x1.Cp8},
{0x1.D00002p8, nan},
{0x1p9, nan},
{0x1p-6, 0x1p-6},
{0x1.Ep-7, 0x1p-6},
{0x1.0p-8, 0x0.4p-6},
{0x1.4p-8, 0x0.4p-6},
{0x1.Cp-8, 0x0.8p-6},
{0x1.3p-8, 0x0.4p-6},
{0x1.5p-8, 0x0.6p-6},
{0x1p-10, 0},
{0x1.000002p-10, 0x0.2p-6},
{0x1.DFFFFEp-7, 0x0.Ep-6},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E4M3FN);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnRoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3fn;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E4M3FN);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnRoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(static_cast<TypeParam>(
Eigen::numext::bit_cast<tsl::float8_e4m3fn>(static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E4M3FN);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnRoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3fn;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e4m3fnF16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<TypeParam>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E4M3FN);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3b11fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep4, 0x1.Ep4},
{0x1.EFCp4, 0x1.Ep4},
{0x1.Fp4, nan},
{0x1p5, nan},
{0x1p-10, 0x1p-10},
{0x1.Ep-11, 0x1p-10},
{0x1.0p-12, 0x0.4p-10},
{0x1.4p-12, 0x0.4p-10},
{0x1.Cp-12, 0x0.8p-10},
{0x1.3p-12, 0x0.4p-10},
{0x1.5p-12, 0x0.6p-10},
{0x1p-14, 0},
{0x1.004p-14, 0x0.2p-10},
{0x1.DFCp-11, 0x0.Ep-10},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 = ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs),
F8E4M3B11FNUZ);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e4m3b11fnuzRoundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep4, 0x1.Ep4},
{0x1.EFFFFEp4, 0x1.Ep4},
{0x1.Fp4, nan},
{0x1p5, nan},
{0x1p-10, 0x1p-10},
{0x1.Ep-11, 0x1p-10},
{0x1.0p-12, 0x0.4p-10},
{0x1.4p-12, 0x0.4p-10},
{0x1.Cp-12, 0x0.8p-10},
{0x1.3p-12, 0x0.4p-10},
{0x1.5p-12, 0x0.6p-10},
{0x1p-14, 0},
{0x1.000002p-14, 0x0.2p-10},
{0x1.DFFFFEp-11, 0x0.Ep-10},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 =
ConvertElementType(ConstantR1<float>(&builder, inputs), F8E4M3B11FNUZ);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3b11fnuzRoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3b11fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E4M3B11FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3b11fnuzRoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(
static_cast<TypeParam>(Eigen::numext::bit_cast<tsl::float8_e4m3b11fnuz>(
static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E4M3B11FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3b11fnuzRoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3b11fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e4m3b11fnuzF16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f16;
for (int i = 0; i < 65536; i++) {
all_f16.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f16), F8E4M3B11FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e5m2fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{inf, nan},
{0x1.2p0, 0x1p0},
{0x1.6p0, 0x1.8p0},
{0x1.Cp15, 0x1.Cp15},
{0x1.DFCp15, 0x1.Cp15},
{0x1.Ep15, nan},
{0x1p16, nan},
{0x1p-15, 0x1p-15},
{0x1.Cp-16, 0x1p-15},
{0x0.4p-14, 0x0.8p-15},
{0x0.5p-14, 0x0.8p-15},
{0x0.7p-14, 0x1.0p-15},
{0x0.4Cp-14, 0x0.8p-15},
{0x0.54p-14, 0x0.Cp-15},
{0x0.1p-14, 0},
{0x0.104p-14, 0x0.4p-15},
{0x0.6FCp-14, 0x0.Cp-15},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E5M2FNUZ);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF32F8e5m2fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{inf, nan},
{0x1.2p0, 0x1p0},
{0x1.6p0, 0x1.8p0},
{0x1.Cp15, 0x1.Cp15},
{0x1.DFFFFEp15, 0x1.Cp15},
{0x1.Ep15, nan},
{0x1p16, nan},
{0x1p-15, 0x1p-15},
{0x1.Cp-16, 0x1p-15},
{0x1.0p-16, 0x0.8p-15},
{0x1.4p-16, 0x0.8p-15},
{0x1.Cp-16, 0x1.0p-15},
{0x1.3p-16, 0x0.8p-15},
{0x1.5p-16, 0x0.Cp-15},
{0x1p-18, 0},
{0x1.000002p-18, 0x0.4p-15},
{0x1.BFFFFEp-16, 0x0.Cp-15},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E5M2FNUZ);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2fnuzRoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e5m2fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E5M2FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2fnuzRoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
if constexpr (std::is_same_v<TypeParam, tsl::float8_e3m4>) {
GTEST_SKIP() << "Skipping test for E3M4 as it requires an ml_dtypes "
"release with https:
} else {
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(
static_cast<TypeParam>(Eigen::numext::bit_cast<tsl::float8_e5m2fnuz>(
static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E5M2FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2fnuzRoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e5m2fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e5m2fnuzF16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f16;
for (int i = 0; i < 65536; i++) {
all_f16.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f16), F8E5M2FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep7, 0x1.Ep7},
{0x1.EFCp7, 0x1.Ep7},
{0x1.Fp7, nan},
{0x1p8, nan},
{0x1p-7, 0x1p-7},
{0x1.Ep-8, 0x1p-7},
{0x1.0p-9, 0x0.4p-7},
{0x1.4p-9, 0x0.4p-7},
{0x1.Cp-9, 0x0.8p-7},
{0x1.3p-9, 0x0.4p-7},
{0x1.5p-9, 0x0.6p-7},
{0x1p-11, 0},
{0x1.004p-11, 0x0.2p-7},
{0x1.DFCp-8, 0x0.Ep-7},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E4M3FNUZ);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF32F8e4m3fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep7, 0x1.Ep7},
{0x1.EFFFFEp7, 0x1.Ep7},
{0x1.Fp7, nan},
{0x1p8, nan},
{0x1p-7, 0x1p-7},
{0x1.Ep-8, 0x1p-7},
{0x1.0p-9, 0x0.4p-7},
{0x1.4p-9, 0x0.4p-7},
{0x1.Cp-9, 0x0.8p-7},
{0x1.3p-9, 0x0.4p-7},
{0x1.5p-9, 0x0.6p-7},
{0x1p-11, 0},
{0x1.000002p-11, 0x0.2p-7},
{0x1.DFFFFEp-8, 0x0.Ep-7},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E4M3FNUZ);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnuzRoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E4M3FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnuzRoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(
static_cast<TypeParam>(Eigen::numext::bit_cast<tsl::float8_e4m3fnuz>(
static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E4M3FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnuzRoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e4m3fnuzF16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f16;
for (int i = 0; i < 65536; i++) {
all_f16.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f16), F8E4M3FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e3m4Roundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{-nan, -nan},
{inf, inf},
{-inf, -inf},
{0x1.08p0, 0x1p0},
{0x1.18p0, 0x1.2p0},
{0x1.Fp3, 0x1.Fp3},
{0x1.F7Cp3, 0x1.Fp3},
{0x1.F8p3, inf},
{0x1p4, inf},
{0x1p-2, 0x1p-2},
{0x1.Fp-3, 0x1p-2},
{0x0.1p-2, 0x0.1p-2},
{0x0.Fp-2, 0x0.Fp-2},
{0x0.8p-2, 0x0.8p-2},
{0x0.88p-2, 0x0.8p-2},
{0x0.F8p-2, 0x0.8p-1},
{0x0.87p-2, 0x0.8p-2},
{0x0.89p-2, 0x0.9p-2},
{0x1p-7, 0},
{0x1.004p-7, 0x0.1p-2},
{0x0.F7Cp-2, 0x0.Fp-2},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E3M4);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e3m4Roundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{-nan, -nan},
{inf, inf},
{-inf, -inf},
{0x1.08p0, 0x1p0},
{0x1.18p0, 0x1.2p0},
{0x1.Fp3, 0x1.Fp3},
{0x1.F7FFFEp3, 0x1.Fp3},
{0x1.F8p3, inf},
{0x1p4, inf},
{0x1p-2, 0x1p-2},
{0x1.Fp-3, 0x1p-2},
{0x0.1p-2, 0x0.1p-2},
{0x0.Fp-2, 0x0.Fp-2},
{0x0.8p-2, 0x0.8p-2},
{0x0.88p-2, 0x0.8p-2},
{0x0.F8p-2, 0x0.8p-1},
{0x0.87p-2, 0x0.8p-2},
{0x0.89p-2, 0x0.9p-2},
{0x1p-7, 0},
{0x1.000002p-7, 0x0.1p-2},
{0x0.F7FFFEp-2, 0x0.Fp-2},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E3M4);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e3m4RoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e3m4;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E3M4);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e3m4RoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(static_cast<TypeParam>(
Eigen::numext::bit_cast<tsl::float8_e3m4>(static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E3M4);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e3m4RoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e3m4;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e3m4F16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<TypeParam>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E3M4);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/convert.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/convert_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02a0ac9b-8137-474c-8c98-7dfed739d9d5 | cpp | tensorflow/tensorflow | data_type | tensorflow/lite/delegates/gpu/common/data_type.cc | tensorflow/lite/delegates/gpu/common/data_type_test.cc | #include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include <stddef.h>
#include <string>
#include "absl/strings/str_cat.h"
namespace tflite {
namespace gpu {
namespace {
std::string ToGlslType(const std::string& scalar_type,
const std::string& vec_type, int vec_size) {
return vec_size == 1 ? scalar_type : absl::StrCat(vec_type, vec_size);
}
std::string GetGlslPrecisionModifier(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::INT8:
return "lowp ";
case DataType::FLOAT16:
case DataType::INT16:
case DataType::UINT16:
return "mediump ";
case DataType::FLOAT32:
case DataType::INT32:
case DataType::UINT32:
return "highp ";
case DataType::BOOL:
return "";
default:
return "";
}
}
}
size_t SizeOf(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::INT8:
case DataType::BOOL:
return 1;
case DataType::FLOAT16:
case DataType::INT16:
case DataType::UINT16:
return 2;
case DataType::FLOAT32:
case DataType::INT32:
case DataType::UINT32:
return 4;
case DataType::FLOAT64:
case DataType::INT64:
case DataType::UINT64:
return 8;
case DataType::UNKNOWN:
return 0;
}
return 0;
}
std::string ToString(DataType data_type) {
switch (data_type) {
case DataType::FLOAT16:
return "float16";
case DataType::FLOAT32:
return "float32";
case DataType::FLOAT64:
return "float64";
case DataType::INT16:
return "int16";
case DataType::INT32:
return "int32";
case DataType::INT64:
return "int64";
case DataType::INT8:
return "int8";
case DataType::UINT16:
return "uint16";
case DataType::UINT32:
return "uint32";
case DataType::UINT64:
return "uint64";
case DataType::UINT8:
return "uint8";
case DataType::BOOL:
return "bool";
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
std::string ToCLDataType(DataType data_type, int vec_size) {
const std::string postfix = vec_size == 1 ? "" : std::to_string(vec_size);
switch (data_type) {
case DataType::FLOAT16:
return "half" + postfix;
case DataType::FLOAT32:
return "float" + postfix;
case DataType::FLOAT64:
return "double" + postfix;
case DataType::INT16:
return "short" + postfix;
case DataType::INT32:
return "int" + postfix;
case DataType::INT64:
return "long" + postfix;
case DataType::INT8:
return "char" + postfix;
case DataType::UINT16:
return "ushort" + postfix;
case DataType::UINT32:
return "uint" + postfix;
case DataType::UINT64:
return "ulong" + postfix;
case DataType::UINT8:
return "uchar" + postfix;
case DataType::BOOL:
return "bool" + postfix;
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
std::string ToMetalDataType(DataType data_type, int vec_size) {
const std::string postfix = vec_size == 1 ? "" : std::to_string(vec_size);
switch (data_type) {
case DataType::FLOAT16:
return "half" + postfix;
case DataType::FLOAT32:
return "float" + postfix;
case DataType::FLOAT64:
return "double" + postfix;
case DataType::INT16:
return "short" + postfix;
case DataType::INT32:
return "int" + postfix;
case DataType::INT64:
return "long" + postfix;
case DataType::INT8:
return "char" + postfix;
case DataType::UINT16:
return "ushort" + postfix;
case DataType::UINT32:
return "uint" + postfix;
case DataType::UINT64:
return "ulong" + postfix;
case DataType::UINT8:
return "uchar" + postfix;
case DataType::BOOL:
return "bool" + postfix;
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
DataType ToMetalTextureType(DataType data_type) {
switch (data_type) {
case DataType::FLOAT32:
case DataType::FLOAT16:
case DataType::INT32:
case DataType::INT16:
case DataType::UINT32:
case DataType::UINT16:
return data_type;
case DataType::INT8:
return DataType::INT16;
case DataType::UINT8:
case DataType::BOOL:
return DataType::UINT16;
default:
return DataType::UNKNOWN;
}
}
std::string ToGlslShaderDataType(DataType data_type, int vec_size,
bool add_precision, bool explicit_fp16) {
const std::string precision_modifier =
add_precision ? GetGlslPrecisionModifier(data_type) : "";
switch (data_type) {
case DataType::FLOAT16:
if (explicit_fp16) {
return ToGlslType("float16_t", "f16vec", vec_size);
} else {
return precision_modifier + ToGlslType("float", "vec", vec_size);
}
case DataType::FLOAT32:
return precision_modifier + ToGlslType("float", "vec", vec_size);
case DataType::FLOAT64:
return precision_modifier + ToGlslType("double", "dvec", vec_size);
case DataType::INT8:
case DataType::INT16:
case DataType::INT32:
case DataType::INT64:
return precision_modifier + ToGlslType("int", "ivec", vec_size);
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32:
case DataType::UINT64:
return precision_modifier + ToGlslType("uint", "uvec", vec_size);
case DataType::BOOL:
return ToGlslType("bool", "bvec", vec_size);
case DataType::UNKNOWN:
return "unknown";
}
return "unknown";
}
}
} | #include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
namespace {
TEST(DataTypeTest, GlslShaderDataTypes) {
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT16));
EXPECT_EQ("mediump float",
ToGlslShaderDataType(DataType::FLOAT16, 1, true,
false));
EXPECT_EQ("float16_t",
ToGlslShaderDataType(DataType::FLOAT16, 1, false,
true));
EXPECT_EQ("float16_t",
ToGlslShaderDataType(DataType::FLOAT16, 1, true,
true));
EXPECT_EQ("vec4", ToGlslShaderDataType(DataType::FLOAT16, 4));
EXPECT_EQ("mediump vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, true,
false));
EXPECT_EQ("f16vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, false,
true));
EXPECT_EQ("f16vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, true,
true));
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT32));
EXPECT_EQ("highp float",
ToGlslShaderDataType(DataType::FLOAT32, 1, true));
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT32, 1,
false));
EXPECT_EQ("vec2", ToGlslShaderDataType(DataType::FLOAT32, 2));
EXPECT_EQ("highp vec2",
ToGlslShaderDataType(DataType::FLOAT32, 2, true));
EXPECT_EQ("vec2", ToGlslShaderDataType(DataType::FLOAT32, 2,
false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT64, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT32, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT16, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT8, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT64, 1, true));
EXPECT_EQ("highp int",
ToGlslShaderDataType(DataType::INT32, 1, true));
EXPECT_EQ("mediump int",
ToGlslShaderDataType(DataType::INT16, 1, true));
EXPECT_EQ("lowp int",
ToGlslShaderDataType(DataType::INT8, 1, true));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT64, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT32, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT16, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT8, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT64, 1, true));
EXPECT_EQ("highp uint",
ToGlslShaderDataType(DataType::UINT32, 1, true));
EXPECT_EQ("mediump uint",
ToGlslShaderDataType(DataType::UINT16, 1, true));
EXPECT_EQ("lowp uint",
ToGlslShaderDataType(DataType::UINT8, 1, true));
EXPECT_EQ("bool", ToGlslShaderDataType(DataType::BOOL));
EXPECT_EQ("bvec4", ToGlslShaderDataType(DataType::BOOL, 4));
EXPECT_EQ("bool",
ToGlslShaderDataType(DataType::BOOL, 1, true));
EXPECT_EQ("bool", ToGlslShaderDataType(DataType::BOOL, 1,
false));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/data_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/data_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6b0970d5-609b-4189-820c-8068b1d8b2d2 | cpp | tensorflow/tensorflow | memory_management | tensorflow/lite/delegates/gpu/common/memory_management.cc | tensorflow/lite/delegates/gpu/common/memory_management_test.cc | #include "tensorflow/lite/delegates/gpu/common/memory_management.h"
#include <cstddef>
#include <numeric>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_in_order_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace {
size_t TotalSize(const ObjectsAssignment<size_t>& assignment) {
return std::accumulate(assignment.object_sizes.begin(),
assignment.object_sizes.end(), static_cast<size_t>(0));
}
}
OffsetsAssignment ObjectsToOffsets(
const ObjectsAssignment<size_t>& obj_assignment) {
size_t num_tensors = obj_assignment.object_ids.size();
size_t num_objects = obj_assignment.object_sizes.size();
OffsetsAssignment result = {std::vector<size_t>(num_tensors),
0};
std::vector<size_t> ids_to_offset(num_objects);
for (size_t i = 0; i < num_objects; ++i) {
ids_to_offset[i] = result.total_size;
result.total_size += obj_assignment.object_sizes[i];
}
for (size_t i = 0; i < num_tensors; ++i) {
result.offsets[i] = ids_to_offset[obj_assignment.object_ids[i]];
}
return result;
}
absl::Status BestGreedy(
const std::vector<TensorUsageRecord<size_t>>& usage_records,
ObjectsAssignment<size_t>* assignment) {
RETURN_IF_ERROR(
GreedyBySizeDistPriorityAssignment(usage_records, assignment));
ObjectsAssignment<size_t> assignment_by_breadth;
if (GreedyByBreadthAssignment(usage_records, &assignment_by_breadth).ok() &&
TotalSize(assignment_by_breadth) < TotalSize(*assignment)) {
std::swap(*assignment, assignment_by_breadth);
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<size_t>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<size_t>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignmentWithHash(usage_records, assignment);
case MemoryStrategy::GREEDY_IN_ORDER:
return GreedyInOrderAssignment(usage_records, assignment,
reallocation_graph);
case MemoryStrategy::GREEDY_BY_BREADTH:
return GreedyByBreadthAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_BY_SIZE:
return GreedyBySizeDistPriorityAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_BEST:
return BestGreedy(usage_records, assignment);
case MemoryStrategy::MINCOSTFLOW:
return MinCostFlowAssignment(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<BHWC>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<BHWC>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignmentWithHash(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<uint2>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<uint2>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_IN_ORDER:
return GreedyInOrderAssignmentMultidimensional(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<uint3>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<uint3>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_IN_ORDER:
return GreedyInOrderAssignmentMultidimensional(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
absl::Status AssignOffsetsToTensors(
const std::vector<TensorUsageRecord<size_t>>& usage_records,
const MemoryStrategy& strategy, OffsetsAssignment* assignment,
size_t base_addr_align_bytes, const UsageGraph* reallocation_graph) {
if (strategy == MemoryStrategy::GREEDY_BY_SIZE) {
return GreedyBySizeAssignment(usage_records, base_addr_align_bytes,
assignment);
}
ObjectsAssignment<size_t> objects_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(
usage_records, strategy, &objects_assignment, reallocation_graph));
*assignment = ObjectsToOffsets(objects_assignment);
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/memory_management.h"
#include <cstddef>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::ElementsAre;
TEST(Model, EmptyAssignment) {
ObjectsAssignment<size_t> objects_assignment;
OffsetsAssignment result = ObjectsToOffsets(objects_assignment);
EXPECT_TRUE(result.offsets.empty());
EXPECT_EQ(result.total_size, 0);
}
TEST(Model, OneObjectAssignment) {
ObjectsAssignment<size_t> objects_assignment;
objects_assignment.object_sizes = {16};
objects_assignment.object_ids = {0};
OffsetsAssignment result = ObjectsToOffsets(objects_assignment);
EXPECT_EQ(result.total_size, 16);
EXPECT_THAT(result.offsets, ElementsAre(0));
objects_assignment.object_ids = {0, 0, 0};
result = ObjectsToOffsets(objects_assignment);
EXPECT_EQ(result.total_size, 16);
EXPECT_THAT(result.offsets, ElementsAre(0, 0, 0));
}
TEST(Model, ManyObjectsAssignment) {
ObjectsAssignment<size_t> objects_assignment;
objects_assignment.object_sizes = {16, 8, 32, 32, 4, 16};
objects_assignment.object_ids = {2, 0, 2, 1, 3, 3, 1, 5};
OffsetsAssignment result = ObjectsToOffsets(objects_assignment);
EXPECT_THAT(result.offsets, ElementsAre(24, 0, 24, 16, 56, 56, 16, 92));
}
TEST(Model, EmptyRecords) {
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::NAIVE, &assignment).ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::EQUALITY, &assignment).ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::MINCOSTFLOW, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors({}, MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_TRUE(offsets_assignment.offsets.empty());
EXPECT_EQ(offsets_assignment.total_size, 0);
}
TEST(Model, OneRecord) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1}};
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0));
EXPECT_EQ(offsets_assignment.total_size, 16);
}
TEST(Model, ChainRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1},
{8, 1, 2},
{64, 2, 3},
{32, 3, 4},
{8, 4, 5},
};
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16, 8, 64, 32, 8));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 1));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16, 8, 64, 32));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0, 64, 0, 64, 0));
EXPECT_EQ(offsets_assignment.total_size, 96);
}
TEST(Model, ComplexRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{32, 0, 1},
{32, 1, 4},
{8, 2, 5},
{16, 3, 5},
{8, 4, 5},
{64, 5, 7},
{8, 6, 8},
{8, 7, 8},
{16, 8, 9}};
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(32, 32, 8, 16, 8, 64, 8, 8, 16));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 4, 2, 3));
EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 32, 8, 16, 8, 64));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 3, 2, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 64, 8, 8));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 2, 3, 1, 3, 2, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 64, 16, 8));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 4, 2, 1, 3, 0, 2, 3, 1));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 16, 8, 8, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(1, 0, 2, 1, 3, 0, 1, 2, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32, 8, 8));
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_THAT(offsets_assignment.offsets,
ElementsAre(0, 32, 80, 64, 88, 0, 64, 72, 0));
EXPECT_EQ(offsets_assignment.total_size, 96);
}
TEST(Model, BHWCRecords) {
std::vector<TensorUsageRecord<BHWC>> usage_records{
{BHWC(1, 1, 2, 8), 0, 1},
{BHWC(1, 1, 2, 8), 1, 2},
{BHWC(1, 1, 1, 16), 2, 4},
{BHWC(1, 1, 2, 8), 3, 5},
{BHWC(1, 1, 8, 2), 4, 5},
{BHWC(1, 1, 2, 8), 5, 7},
{BHWC(1, 16, 1, 1), 6, 8},
{BHWC(16, 1, 1, 1), 7, 8},
{BHWC(1, 1, 1, 16), 8, 9}};
ObjectsAssignment<BHWC> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(
assignment.object_sizes,
ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16),
BHWC(1, 1, 2, 8), BHWC(1, 1, 8, 2), BHWC(1, 1, 2, 8),
BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1), BHWC(1, 1, 1, 16)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 1, 3, 0, 4, 5, 2));
EXPECT_THAT(
assignment.object_sizes,
ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16),
BHWC(1, 1, 8, 2), BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1)));
}
TEST(Model, UInt2Records) {
std::vector<TensorUsageRecord<uint2>> usage_records{
{uint2(2, 8), 0, 1},
{uint2(2, 8), 1, 2},
{uint2(1, 12), 2, 4},
{uint2(2, 8), 3, 5},
{uint2(8, 2), 4, 5},
{uint2(2, 8), 5, 7},
{uint2(1, 8), 6, 8},
{uint2(2, 8), 7, 8},
{uint2(4, 1), 8, 9}};
ObjectsAssignment<uint2> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(2, 8),
uint2(8, 2), uint2(2, 8), uint2(1, 8), uint2(2, 8),
uint2(4, 1)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 4, 0, 5));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(8, 2),
uint2(1, 8), uint2(4, 1)));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 2, 0, 3));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(8, 2)));
}
TEST(Model, UInt3Records) {
std::vector<TensorUsageRecord<uint3>> usage_records{
{uint3(1, 2, 8), 0, 1},
{uint3(4, 3, 2), 1, 2},
{uint3(1, 1, 1), 2, 4},
{uint3(2, 4, 1), 3, 5},
{uint3(2, 2, 2), 4, 5},
{uint3(8, 1, 2), 5, 7},
{uint3(1, 2, 1), 6, 8},
{uint3(1, 1, 1), 7, 8},
{uint3(2, 2, 2), 8, 9}};
ObjectsAssignment<uint3> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(1, 1, 1),
uint3(2, 4, 1), uint3(2, 2, 2), uint3(8, 1, 2),
uint3(1, 2, 1), uint3(1, 1, 1), uint3(2, 2, 2)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 2, 4));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(1, 1, 1),
uint3(2, 4, 1), uint3(2, 2, 2), uint3(8, 1, 2),
uint3(1, 2, 1)));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 2, 1, 3, 2, 0, 1));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(2, 4, 1),
uint3(8, 1, 2)));
}
TEST(Model, OffsetAssignmentWithAlignment) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1},
{8, 1, 2},
{64, 2, 3},
{32, 3, 4},
{8, 4, 5},
};
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment,
128)
.ok());
EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0, 128, 0, 128, 0));
EXPECT_EQ(offsets_assignment.total_size, 160);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/memory_management.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/memory_management_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bc8d5b74-26e7-4156-b57d-9d1ba2f0bdd2 | cpp | tensorflow/tensorflow | model_builder | tensorflow/lite/delegates/gpu/common/model_builder.cc | tensorflow/lite/delegates/gpu/common/model_builder_test.cc | #include "tensorflow/lite/delegates/gpu/common/model_builder.h"
#include <algorithm>
#include <cstdint>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/gpu/common/custom_parsers.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/lstm_parser.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_internal.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/object_reader.h"
#include "tensorflow/lite/delegates/gpu/common/operation_parser.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/model_transformations.h"
#include "tensorflow/lite/delegates/utils.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace gpu {
namespace {
absl::Status GetFullyConnectedAttributes(int weights_tensor_id,
int bias_tensor_id,
ObjectReader* reader,
FullyConnectedAttributes* attr) {
Tensor<HW, DataType::FLOAT32> weights;
RETURN_IF_ERROR(reader->ReadTensor(weights_tensor_id, &weights));
attr->weights.data = std::move(weights.data);
attr->weights.id = weights.id;
attr->weights.shape.h = 1;
attr->weights.shape.w = 1;
attr->weights.shape.o = weights.shape.h;
attr->weights.shape.i = weights.shape.w;
reader->ReadTensor(bias_tensor_id, &attr->bias).IgnoreError();
return absl::OkStatus();
}
template <typename ParamsT>
absl::Status RetrieveBuiltinData(const TfLiteNode* tflite_node,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(tflite_node->builtin_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve builtin_data.");
}
return absl::OkStatus();
}
template <typename ParamsT>
absl::Status RetrieveCustomInitialData(const TfLiteNode* tflite_node,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(tflite_node->custom_initial_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve custom_initial_data.");
}
return absl::OkStatus();
}
absl::Status NewConstNode(TensorFloat32 t, GraphFloat32* graph, Value** value) {
ConstTensorAttributes attr;
attr.tensor = std::move(t);
Node* node = graph->NewNode();
node->operation.attributes = attr;
node->operation.type = ToString(OperationType::CONSTANT);
*value = graph->NewValue();
RETURN_IF_ERROR(graph->SetProducer(node->id, (*value)->id));
(*value)->tensor.ref = attr.tensor.id;
(*value)->tensor.type = attr.tensor.kType;
(*value)->tensor.shape = attr.tensor.shape;
return absl::OkStatus();
}
template <DataType DataTypeT, typename T>
absl::Status ParseInputsWithConstTensorImpl(
Node* node, ObjectReader* reader,
TensorOrScalarBase<DataTypeT, T>* tensor_or_scalar) {
const std::string& opname = node->operation.type;
const TfLiteTensor* input0 = reader->GetInputTensor(0);
if (!input0) {
return absl::InvalidArgumentError("Couldn't get the 1st input tensor for " +
opname);
}
const TfLiteTensor* input1 = reader->GetInputTensor(1);
if (!input1) {
return absl::InvalidArgumentError("Couldn't get the 2nd input tensor for " +
opname);
}
const bool constant_tensor0 = IsConstantTensor(input0);
const bool constant_tensor1 = IsConstantTensor(input1);
if (constant_tensor0 && constant_tensor1) {
return absl::InvalidArgumentError("No runtime input tensors for " + opname);
}
const bool runtime_tensor0 = !constant_tensor0;
const bool runtime_tensor1 = !constant_tensor1;
if (runtime_tensor0 && runtime_tensor1) {
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
} else {
int runtime_tensor = 0;
int constant_tensor = 1;
TfLiteIntArray* constant_dims = input1->dims;
if (constant_tensor0 && runtime_tensor1) {
runtime_tensor = 1;
constant_tensor = 0;
constant_dims = input0->dims;
}
RETURN_IF_ERROR(reader->AddInput(node, runtime_tensor));
if (constant_dims->size <= 0 || NumElements(constant_dims) == 1) {
Tensor<Scalar, DataTypeT> tensor;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor));
*tensor_or_scalar = static_cast<T>(tensor.data[0]);
} else {
if (CheckIfLinearConvertible(constant_dims).ok()) {
Tensor<Linear, DataTypeT> tensor;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor));
*tensor_or_scalar = std::move(tensor);
} else if (constant_dims->size == 2) {
Tensor<HW, DataTypeT> tensor_hw;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor_hw));
Tensor<HWC, DataTypeT> tensor;
tensor.id = tensor_hw.id;
tensor.shape = HWC(1, tensor_hw.shape.h, tensor_hw.shape.w);
tensor.data = tensor_hw.data;
*tensor_or_scalar = std::move(tensor);
} else {
Tensor<HWC, DataTypeT> tensor;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor));
if (tensor.data.size() == 1) {
*tensor_or_scalar = static_cast<T>(tensor.data[0]);
} else {
*tensor_or_scalar = std::move(tensor);
}
}
}
}
return absl::OkStatus();
}
absl::Status ParseInputsWithConstTensor(Node* node, ObjectReader* reader,
const TfLiteTensor* input0) {
switch (input0->type) {
case kTfLiteBool: {
ElementwiseAttributesBase<DataType::BOOL, bool> attr;
RETURN_IF_ERROR(
ParseInputsWithConstTensorImpl(node, reader, &attr.param));
attr.runtime_tensor_is_second =
IsConstantTensor(reader->GetInputTensor(0));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
case kTfLiteInt32: {
ElementwiseAttributesBase<DataType::INT32, int32_t> attr;
RETURN_IF_ERROR(
ParseInputsWithConstTensorImpl(node, reader, &attr.param));
attr.runtime_tensor_is_second =
IsConstantTensor(reader->GetInputTensor(0));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
default: {
ElementwiseAttributes attr;
RETURN_IF_ERROR(
ParseInputsWithConstTensorImpl(node, reader, &attr.param));
attr.runtime_tensor_is_second =
IsConstantTensor(reader->GetInputTensor(0));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
}
}
absl::Status MaybeFuseActivationForElementwiseNode(
OperationType operation_type, const TfLiteNode* tflite_node,
GraphFloat32* graph, Node* node) {
TfLiteFusedActivation activation = kTfLiteActNone;
switch (operation_type) {
case OperationType::MUL: {
const TfLiteMulParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
case OperationType::ADD: {
const TfLiteAddParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
case OperationType::SUB: {
const TfLiteSubParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
case OperationType::DIV: {
const TfLiteDivParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
default:
activation = kTfLiteActNone;
}
if (activation) {
return MaybeFuseActivation(activation, graph, node);
}
return absl::OkStatus();
}
struct TensorInfo {
std::vector<std::pair<TfLiteNode*, TfLiteRegistration*>> producers;
std::vector<std::pair<TfLiteNode*, TfLiteRegistration*>> consumers;
};
absl::Status GetTensorInfo(const TfLiteContext* context, int tensor_id,
TensorInfo* result) {
TfLiteIntArray* execution_plan = nullptr;
if (context->GetExecutionPlan(const_cast<TfLiteContext*>(context),
&execution_plan) != kTfLiteOk) {
return absl::UnavailableError("Unable to get graph execution plan.");
}
for (int i = 0; i < execution_plan->size; ++i) {
const int node_index = execution_plan->data[i];
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
if (context->GetNodeAndRegistration(const_cast<TfLiteContext*>(context),
node_index, &node,
®istration) != kTfLiteOk) {
return absl::UnavailableError(
"Unable to get node and registration for node.");
}
for (int j = 0; j < node->inputs->size; ++j) {
if (tensor_id == node->inputs->data[j]) {
result->consumers.push_back({node, registration});
}
}
for (int j = 0; j < node->outputs->size; ++j) {
if (tensor_id == node->outputs->data[j]) {
result->producers.push_back({node, registration});
}
}
}
return absl::OkStatus();
}
bool IsLogicalCode(int32_t builtin_code) {
return builtin_code == kTfLiteBuiltinGreater ||
builtin_code == kTfLiteBuiltinGreaterEqual ||
builtin_code == kTfLiteBuiltinLess ||
builtin_code == kTfLiteBuiltinLessEqual ||
builtin_code == kTfLiteBuiltinEqual ||
builtin_code == kTfLiteBuiltinNotEqual;
}
bool IsLogicalOp(tflite::gpu::OperationType op_type) {
return op_type == tflite::gpu::OperationType::GREATER ||
op_type == tflite::gpu::OperationType::GREATER_EQUAL ||
op_type == tflite::gpu::OperationType::LESS ||
op_type == tflite::gpu::OperationType::LESS_EQUAL ||
op_type == tflite::gpu::OperationType::EQUAL ||
op_type == tflite::gpu::OperationType::NOT_EQUAL;
}
class BatchedMatMulOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
if (reader->GetNumberOfRuntimeInputs() == 2) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::BATCHED_MATMUL);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
} else if (reader->GetNumberOfRuntimeInputs() == 1) {
const TfLiteTensor* second_input = reader->GetInputTensor(1);
if (!IsConstantTensor(second_input) || second_input->dims->size != 2) {
return absl::UnavailableError("Not supported batched mat mul case");
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
Tensor<HW, DataType::FLOAT32> weights;
RETURN_IF_ERROR(reader->ReadTensor(1, &weights));
Convolution2DAttributes attr;
attr.weights.data.resize(weights.shape.w * weights.shape.h);
for (int i = 0; i < weights.shape.w; ++i) {
for (int j = 0; j < weights.shape.h; ++j) {
attr.weights.data[i * weights.shape.h + j] =
weights.data[j * weights.shape.w + i];
}
}
attr.weights.id = weights.id;
attr.weights.shape.h = 1;
attr.weights.shape.w = 1;
attr.weights.shape.o = weights.shape.w;
attr.weights.shape.i = weights.shape.h;
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.padding.appended = HW(0, 0);
attr.padding.prepended = HW(0, 0);
node->operation.attributes = std::move(attr);
return absl::OkStatus();
} else {
return absl::UnavailableError("Not supported batched mat mul case");
}
return absl::OkStatus();
}
};
class CastOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
TfLiteType src_type = context->tensors[tflite_node->inputs->data[0]].type;
TfLiteType dst_type = context->tensors[tflite_node->outputs->data[0]].type;
if (src_type == kTfLiteBool &&
(dst_type == kTfLiteFloat16 || dst_type == kTfLiteFloat32)) {
TensorInfo input_tensor_info;
RETURN_IF_ERROR(GetTensorInfo(context, tflite_node->inputs->data[0],
&input_tensor_info));
if (input_tensor_info.producers.size() != 1 ||
input_tensor_info.consumers.size() != 1) {
return absl::UnavailableError("Not supported cast case");
}
TensorInfo output_tensor_info;
RETURN_IF_ERROR(GetTensorInfo(context, tflite_node->outputs->data[0],
&output_tensor_info));
if (output_tensor_info.consumers.size() != 1) {
return absl::UnavailableError(
"Cast from bool not supported for outputs");
}
if (IsLogicalCode(input_tensor_info.producers[0].second->builtin_code)) {
return absl::OkStatus();
}
}
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CAST);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
}
};
class ClampOperationsParser : public TFLiteOperationParser {
public:
explicit ClampOperationsParser(float clamp_a, float clamp_b)
: clamp_a_(clamp_a), clamp_b_(clamp_b) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node_sub = graph->NewNode();
Node* node_relu = graph->NewNode();
Node* node_add = graph->NewNode();
ElementwiseAttributes sub_attr;
sub_attr.param = -clamp_a_;
node_sub->operation.type = ToString(OperationType::ADD);
node_sub->operation.attributes = std::move(sub_attr);
ReLUAttributes relu_attr;
relu_attr.alpha = 0.0f;
relu_attr.activation_max = clamp_b_ - clamp_a_;
node_relu->operation.type = ToString(OperationType::RELU);
node_relu->operation.attributes = relu_attr;
ElementwiseAttributes add_attr;
add_attr.param = clamp_a_;
node_add->operation.type = ToString(OperationType::ADD);
node_add->operation.attributes = std::move(add_attr);
RETURN_IF_ERROR(reader->AddInput(node_sub, 0));
auto input = graph->FindInputs(node_sub->id)[0];
Value* v0 = graph->NewValue();
Value* v1 = graph->NewValue();
v0->tensor.type = input->tensor.type;
v0->tensor.shape = input->tensor.shape;
v1->tensor.type = input->tensor.type;
v1->tensor.shape = input->tensor.shape;
RETURN_IF_ERROR(graph->SetProducer(node_sub->id, v0->id));
RETURN_IF_ERROR(graph->AddConsumer(node_relu->id, v0->id));
RETURN_IF_ERROR(graph->SetProducer(node_relu->id, v1->id));
RETURN_IF_ERROR(graph->AddConsumer(node_add->id, v1->id));
RETURN_IF_ERROR(reader->AddOutputs(node_add));
return absl::OkStatus();
}
private:
const float clamp_a_, clamp_b_;
};
class ConcatenationOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
ConcatAttributes attr;
std::vector<const Value*> inputs;
for (uint32_t idx = 0; idx < tflite_node->inputs->size; ++idx) {
Value* value;
const auto status = reader->ReadValue(idx, &value);
if (status.ok()) {
inputs.push_back(value);
} else {
TensorFloat32 tensor;
RETURN_IF_ERROR(reader->ReadTensor(idx, &tensor));
Value* value;
RETURN_IF_ERROR(NewConstNode(std::move(tensor), graph, &value));
inputs.push_back(value);
}
}
for (int i = 0; i < inputs.size(); ++i) {
for (int j = 0; j < i; ++j) {
if (inputs[i] == inputs[j]) {
Node* node_copy = graph->NewNode();
node_copy->operation.type = ToString(OperationType::COPY);
RETURN_IF_ERROR(graph->AddConsumer(node_copy->id, inputs[j]->id));
Value* copy_value = graph->NewValue();
copy_value->tensor.type = inputs[j]->tensor.type;
copy_value->tensor.shape = inputs[j]->tensor.shape;
RETURN_IF_ERROR(graph->SetProducer(node_copy->id, copy_value->id));
inputs[i] = copy_value;
break;
}
}
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONCAT);
RETURN_IF_ERROR(reader->AddOutputs(node));
for (int i = 0; i < inputs.size(); ++i) {
RETURN_IF_ERROR(graph->AddConsumer(node->id, inputs[i]->id));
}
std::vector<BHWC> input_shapes;
for (auto input : graph->FindInputs(node->id)) {
input_shapes.push_back(input->tensor.shape);
}
RETURN_IF_ERROR(SetAxis(input_shapes, &attr.axis));
BHWC output_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
for (auto input : graph->FindInputs(node->id)) {
if (input->tensor.shape.h != output_shape.h) {
attr.axis = Axis::HEIGHT;
break;
}
if (input->tensor.shape.w != output_shape.w) {
attr.axis = Axis::WIDTH;
break;
}
if (input->tensor.shape.c != output_shape.c) {
attr.axis = Axis::CHANNELS;
break;
}
}
const TfLiteConcatenationParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
absl::Status SetAxis(const std::vector<BHWC>& input_shapes, Axis* axis) {
*axis = Axis::BATCH;
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].h != input_shapes[i].h &&
input_shapes[0].w != input_shapes[i].w &&
input_shapes[0].c != input_shapes[i].c) {
*axis = Axis::HEIGHT;
break;
}
}
if (*axis == Axis::BATCH) return absl::OkStatus();
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].b != input_shapes[i].b &&
input_shapes[0].w != input_shapes[i].w &&
input_shapes[0].c != input_shapes[i].c) {
*axis = Axis::WIDTH;
break;
}
}
if (*axis == Axis::HEIGHT) return absl::OkStatus();
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].b != input_shapes[i].b &&
input_shapes[0].h != input_shapes[i].h &&
input_shapes[0].c != input_shapes[i].c) {
*axis = Axis::CHANNELS;
break;
}
}
if (*axis == Axis::WIDTH) return absl::OkStatus();
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].b != input_shapes[i].b &&
input_shapes[0].w != input_shapes[i].w &&
input_shapes[0].h != input_shapes[i].h) {
return absl::UnimplementedError(
"Can concatenate tensors only by batch, height, width, or "
"channels.");
}
}
return absl::OkStatus();
}
};
class Conv2DOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 6));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
Convolution2DAttributes attr;
RETURN_IF_ERROR(ReadAttributes(tflite_node, tf_options, reader, &attr));
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 2) {
const TfLiteTensor* src_tensor = reader->GetInputTensor(0);
const TfLiteTensor* weights_tensor = reader->GetInputTensor(1);
BHWC src_shape, weights_shape;
RETURN_IF_ERROR(ExtractTensorShape(*src_tensor, &src_shape));
RETURN_IF_ERROR(ExtractTensorShape(*weights_tensor, &weights_shape));
if (src_shape.c != weights_shape.c) {
return absl::InternalError(
"No support of CONVOLUTION_2D with runtime grouped weights.");
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
return absl::OkStatus();
} else {
BHWC src_shape, dst_shape;
RETURN_IF_ERROR(
ExtractTensorShape(*reader->GetInputTensor(0), &src_shape));
RETURN_IF_ERROR(
ExtractTensorShape(*reader->GetOutputTensor(0), &dst_shape));
const int src_group_size = attr.weights.shape.i;
if (attr.weights.shape.i == 1 && src_shape.c == dst_shape.c) {
DepthwiseConvolution2DAttributes dw_attr;
dw_attr.weights.id = attr.weights.id;
dw_attr.weights.shape =
OHWI(attr.weights.shape.i, attr.weights.shape.h,
attr.weights.shape.w, attr.weights.shape.o);
dw_attr.weights.data.resize(dw_attr.weights.shape.DimensionsProduct());
for (int o = 0; o < dw_attr.weights.shape.o; ++o) {
for (int h = 0; h < dw_attr.weights.shape.h; ++h) {
for (int w = 0; w < dw_attr.weights.shape.w; ++w) {
for (int i = 0; i < dw_attr.weights.shape.i; ++i) {
dw_attr.weights
.data[dw_attr.weights.shape.LinearIndex({o, h, w, i})] =
attr.weights
.data[attr.weights.shape.LinearIndex({i, h, w, o})];
}
}
}
}
dw_attr.bias = attr.bias;
dw_attr.strides = attr.strides;
dw_attr.dilations = attr.dilations;
dw_attr.padding = attr.padding;
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::DEPTHWISE_CONVOLUTION);
node->operation.attributes = std::move(dw_attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
RETURN_IF_ERROR(
MaybeFuseActivation(tf_options->activation, graph, node));
return absl::OkStatus();
}
const int dst_group_size = attr.weights.shape.o / attr.groups;
const bool supported_grouped_conv =
src_group_size % 4 == 0 && dst_group_size % 4 == 0;
if (attr.groups != 1 && !supported_grouped_conv) {
return ResolveGroupedConvolution(attr, tf_options, reader, graph);
} else {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
RETURN_IF_ERROR(
MaybeFuseActivation(tf_options->activation, graph, node));
return absl::OkStatus();
}
}
}
private:
absl::Status ReadAttributes(const TfLiteNode* tflite_node,
const TfLiteConvParams* tf_options,
ObjectReader* reader,
Convolution2DAttributes* attr) {
const TfLiteTensor* src_tensor = reader->GetInputTensor(0);
BHWC src_shape;
RETURN_IF_ERROR(ExtractTensorShape(*src_tensor, &src_shape));
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 1) {
RETURN_IF_ERROR(reader->ReadTensor(1, &attr->weights));
attr->groups = src_shape.c / attr->weights.shape.i;
} else {
const TfLiteTensor* weights_tensor = reader->GetInputTensor(1);
if (!weights_tensor) {
return absl::InternalError("Expected second runtime tensor.");
}
BHWC weights_shape;
RETURN_IF_ERROR(ExtractTensorShape(*weights_tensor, &weights_shape));
attr->weights.shape = OHWI(weights_shape.b, weights_shape.h,
weights_shape.w, weights_shape.c);
attr->groups = 1;
}
reader->ReadTensor(2, &attr->bias).IgnoreError();
attr->strides = ToHW(tf_options->stride_height, tf_options->stride_width);
attr->dilations = HW(tf_options->dilation_height_factor,
tf_options->dilation_width_factor);
UpdatePadding(tf_options->padding, src_shape, attr);
return absl::OkStatus();
}
absl::Status ResolveGroupedConvolution(const Convolution2DAttributes& attr,
const TfLiteConvParams* tf_options,
ObjectReader* reader,
GraphFloat32* graph) {
const TfLiteTensor* src_tensor = reader->GetInputTensor(0);
const TfLiteTensor* dst_tensor = reader->GetOutputTensor(0);
BHWC src_shape, dst_shape;
RETURN_IF_ERROR(ExtractTensorShape(*src_tensor, &src_shape));
RETURN_IF_ERROR(ExtractTensorShape(*dst_tensor, &dst_shape));
DataType src_type = DataType::FLOAT32;
if (src_tensor->type == kTfLiteFloat16) {
src_type = DataType::FLOAT16;
}
DataType dst_type = DataType::FLOAT32;
if (dst_tensor->type == kTfLiteFloat16) {
dst_type = DataType::FLOAT16;
}
const int src_group_size = attr.weights.shape.i;
const int dst_group_size = attr.weights.shape.o / attr.groups;
Node* split_node = graph->NewNode();
RETURN_IF_ERROR(reader->AddInput(split_node, 0));
{
SplitAttributes split_attr;
split_attr.axis = Axis::CHANNELS;
split_node->operation.type = ToString(OperationType::SPLIT);
split_node->operation.attributes = split_attr;
}
std::vector<Node*> conv_nodes(attr.groups);
std::vector<Value*> conv_src(attr.groups);
std::vector<Value*> conv_dst(attr.groups);
for (int i = 0; i < attr.groups; ++i) {
conv_nodes[i] = graph->NewNode();
conv_src[i] = graph->NewValue();
conv_dst[i] = graph->NewValue();
conv_src[i]->tensor.shape = src_shape;
conv_src[i]->tensor.type = src_type;
conv_src[i]->tensor.shape.c = src_group_size;
conv_dst[i]->tensor.shape = dst_shape;
conv_dst[i]->tensor.type = dst_type;
conv_dst[i]->tensor.shape.c = dst_group_size;
Convolution2DAttributes conv_attr;
conv_attr = attr;
conv_attr.groups = 1;
conv_attr.weights.id = -1;
conv_attr.weights.shape.o = dst_group_size;
conv_attr.weights.data.resize(
conv_attr.weights.shape.DimensionsProduct());
for (int out_i = 0; out_i < dst_group_size; ++out_i) {
for (int in_i = 0; in_i < src_group_size; ++in_i) {
for (int ky = 0; ky < attr.weights.shape.h; ++ky) {
for (int kx = 0; kx < attr.weights.shape.w; ++kx) {
const int src_index = attr.weights.shape.LinearIndex(
{{i * dst_group_size + out_i, ky, kx, in_i}});
const int dst_index =
conv_attr.weights.shape.LinearIndex({{out_i, ky, kx, in_i}});
conv_attr.weights.data[dst_index] = attr.weights.data[src_index];
}
}
}
}
conv_attr.bias.shape.v = dst_group_size;
conv_attr.bias.data.resize(conv_attr.bias.shape.DimensionsProduct());
for (int out_i = 0; out_i < dst_group_size; ++out_i) {
if (i * dst_group_size + out_i < attr.bias.data.size()) {
conv_attr.bias.data[out_i] =
attr.bias.data[i * dst_group_size + out_i];
} else {
conv_attr.bias.data[out_i] = 0.0f;
}
}
conv_nodes[i]->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv_nodes[i]->operation.attributes = conv_attr;
RETURN_IF_ERROR(graph->SetProducer(split_node->id, conv_src[i]->id));
RETURN_IF_ERROR(graph->AddConsumer(conv_nodes[i]->id, conv_src[i]->id));
RETURN_IF_ERROR(graph->SetProducer(conv_nodes[i]->id, conv_dst[i]->id));
}
Node* concat_node = graph->NewNode();
{
ConcatAttributes concat_attr;
concat_attr.axis = Axis::CHANNELS;
concat_node->operation.type = ToString(OperationType::CONCAT);
concat_node->operation.attributes = concat_attr;
}
for (int i = 0; i < attr.groups; ++i) {
RETURN_IF_ERROR(graph->AddConsumer(concat_node->id, conv_dst[i]->id));
}
RETURN_IF_ERROR(reader->AddOutputs(concat_node));
RETURN_IF_ERROR(
MaybeFuseActivation(tf_options->activation, graph, concat_node));
return absl::OkStatus();
}
};
class CumsumOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
CumsumAttributes attr;
const TfLiteTensor* input_tensor = reader->GetInputTensor(0);
const TfLiteTensor* axis_tensor = reader->GetInputTensor(1);
const TfLiteIntArray* shape = input_tensor->dims;
const int tflite_axis = GetTensorData<int32_t>(axis_tensor)[0];
const Axis axes[4] = {Axis::BATCH, Axis::HEIGHT, Axis::WIDTH,
Axis::CHANNELS};
attr.axis = axes[tflite_axis + 4 - shape->size];
node->operation.type = ToString(OperationType::CUMSUM);
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
}
};
class DensifyOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::DENSIFY);
const TfLiteTensor* const_tensor = reader->GetInputTensor(0);
if (!const_tensor->sparsity) {
return absl::InvalidArgumentError("Input tensor must be sparse.");
}
TensorFloat32 sparse_tensor;
RETURN_IF_ERROR(reader->ReadTensor(0, &sparse_tensor));
DensifyAttributes attributes;
attributes.tensor = std::move(sparse_tensor);
node->operation.attributes = attributes;
return reader->AddOutputs(node);
}
};
class DepthwiseConvolutionOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 6));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::DEPTHWISE_CONVOLUTION);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
DepthwiseConvolution2DAttributes attr;
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 2) {
RETURN_IF_ERROR(reader->AddInput(node, 1));
auto weights_shape = graph->FindInputs(node->id)[1]->tensor.shape;
attr.weights.shape = OHWI(weights_shape.b, weights_shape.h,
weights_shape.w, weights_shape.c);
} else {
RETURN_IF_ERROR(reader->ReadTensor(1, &attr.weights));
}
reader->ReadTensor(2, &attr.bias).IgnoreError();
const TfLiteDepthwiseConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
attr.strides = ToHW(tf_options->stride_height, tf_options->stride_width);
attr.dilations = HW(std::max(1, tf_options->dilation_height_factor),
std::max(1, tf_options->dilation_width_factor));
UpdatePadding(tf_options->padding,
graph->FindInputs(node->id)[0]->tensor.shape, &attr);
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
const int depth_multiplier = tf_options->depth_multiplier;
if (depth_multiplier != 1) {
const TfLiteTensor* input = reader->GetInputTensor(0);
const TfLiteTensor* filter = reader->GetInputTensor(1);
const TfLiteTensor* output = reader->GetOutputTensor(0);
TransposeWeights(input, filter, output, depth_multiplier, &attr);
}
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
private:
static void TransposeWeights(const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* output, int depth_multiplier,
DepthwiseConvolution2DAttributes* attr) {
const int input_depth = input->dims->data[3];
const int filter_height = filter->dims->data[1];
const int filter_width = filter->dims->data[2];
const int output_depth = output->dims->data[3];
Tensor<OHWI, DataType::FLOAT32> weights;
weights.id = attr->weights.id;
weights.shape =
OHWI(output_depth, filter_height, filter_width, input_depth);
weights.data.resize(weights.shape.DimensionsProduct());
float* dst = &weights.data[0];
for (int j = 0; j < output_depth; ++j) {
const float* src = attr->weights.data.data() + j;
for (int i = 0; i < filter_height * filter_width; ++i) {
*dst = *src;
dst++;
src += output_depth;
}
}
attr->weights = std::move(weights);
}
};
class DepthToSpaceOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::DEPTH_TO_SPACE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteDepthToSpaceParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
SpaceToDepthAttributes attr;
attr.block_size = tf_options->block_size;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class DequantizeOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 3));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 0) {
ConstTensorAttributes attr;
RETURN_IF_ERROR(reader->ReadTensor(0, &attr.tensor));
Node* node = graph->NewNode();
node->operation.attributes = attr;
node->operation.type = ToString(OperationType::CONSTANT);
return reader->AddOutputs(node);
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::QUANTIZE_AND_DEQUANTIZE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
auto input_value = graph->FindInputs(node->id)[0];
if (!input_value->quant_params) {
if (runtime_inputs == 1) {
return absl::OkStatus();
}
return absl::InvalidArgumentError(
"Encountered Dequantize input with no quant params");
}
QuantizeAndDequantizeAttributes attr;
attr.min = input_value->quant_params.value().min;
attr.max = input_value->quant_params.value().max;
attr.scale = input_value->quant_params.value().scale;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class ElementwiseOperationParser : public TFLiteOperationParser {
public:
explicit ElementwiseOperationParser(OperationType operation_type)
: operation_type_(operation_type) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
const int kMaxSupportedOpVersion =
operation_type_ == OperationType::MUL ? 3 : 2;
RETURN_IF_ERROR(
CheckMaxSupportedOpVersion(registration, kMaxSupportedOpVersion));
if (IsLogicalOp(operation_type_)) {
TensorInfo output_tensor_info;
RETURN_IF_ERROR(GetTensorInfo(context, tflite_node->outputs->data[0],
&output_tensor_info));
if (output_tensor_info.producers.size() != 1 ||
output_tensor_info.consumers.size() != 1) {
return absl::UnavailableError("Not supported logical op case");
}
const auto& next_node = output_tensor_info.consumers[0];
TfLiteType dst_type =
context->tensors[next_node.first->outputs->data[0]].type;
int next_code = next_node.second->builtin_code;
if ((next_code == kTfLiteBuiltinCast ||
next_code == kTfLiteBuiltinSelect ||
next_code == kTfLiteBuiltinSelectV2) &&
(dst_type == kTfLiteFloat16 || dst_type == kTfLiteFloat32)) {
return absl::OkStatus();
} else {
return absl::UnimplementedError("Not supported logical op case.");
}
}
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(operation_type_);
if (operation_type_ == OperationType::ADD) {
ElementwiseAttributes attr;
node->operation.attributes = std::move(attr);
}
if (IsOneArgumentOperation()) {
RETURN_IF_ERROR(reader->VerifyInputsConstsOutputs(tflite_node,
1,
0,
1));
RETURN_IF_ERROR(reader->AddInput(node, 0));
} else if (IsTwoArgumentOperation() &&
reader
->VerifyInputsConstsOutputs(tflite_node,
2,
0,
1)
.ok()) {
if (tflite_node->inputs->size != 2) {
return absl::InvalidArgumentError("Applies only two input tensors");
}
const TfLiteTensor* input0 = reader->GetInputTensor(0);
const TfLiteTensor* input1 = reader->GetInputTensor(1);
if (input0 == input1) {
if (operation_type_ == OperationType::MUL) {
node->operation.type = ToString(OperationType::SQUARE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
} else if (operation_type_ == OperationType::ADD) {
node->operation.type = ToString(OperationType::MUL);
ElementwiseAttributes attr;
attr.param = 2.0f;
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
} else {
return absl::UnimplementedError(
"No support of few identical inputs in the same operation.");
}
} else {
int input_tensor0 = 0;
int input_tensor1 = 1;
if (operation_type_ == OperationType::MUL ||
operation_type_ == OperationType::ADD) {
BHWC shape0;
RETURN_IF_ERROR(ExtractTensorShape(*input0, &shape0));
BHWC shape1;
RETURN_IF_ERROR(ExtractTensorShape(*input1, &shape1));
if (shape0.h <= shape1.h && shape0.w <= shape1.w &&
shape0.c == shape1.c) {
input_tensor0 = 1;
input_tensor1 = 0;
}
}
RETURN_IF_ERROR(reader->AddInput(node, input_tensor0));
RETURN_IF_ERROR(reader->AddInput(node, input_tensor1));
}
} else if (IsTwoArgumentOperationWithConst()) {
RETURN_IF_ERROR(reader->VerifyInputsConstsOutputs(tflite_node,
1,
1,
1));
const TfLiteTensor* input_tensor0 = reader->GetInputTensor(0);
const TfLiteTensor* constant_tensor = IsConstantTensor(input_tensor0)
? input_tensor0
: reader->GetInputTensor(1);
RETURN_IF_ERROR(
ParseInputsWithConstTensor(node, reader, constant_tensor));
} else {
return absl::InvalidArgumentError("Incorrect operation type passed");
}
RETURN_IF_ERROR(reader->AddOutputs(node));
return MaybeFuseActivationForElementwiseNode(operation_type_, tflite_node,
graph, node);
}
private:
absl::Status GetActivation(const TfLiteNode* tflite_node,
TfLiteFusedActivation* activation) const {
if (operation_type_ == OperationType::DIV) {
const TfLiteDivParams* tf_options;
auto status = RetrieveBuiltinData(tflite_node, &tf_options);
*activation = status.ok() ? tf_options->activation : kTfLiteActNone;
return absl::OkStatus();
}
if (operation_type_ == OperationType::SUB) {
const TfLiteSubParams* tf_options;
auto status = RetrieveBuiltinData(tflite_node, &tf_options);
*activation = status.ok() ? tf_options->activation : kTfLiteActNone;
return absl::OkStatus();
}
*activation = kTfLiteActNone;
return absl::OkStatus();
}
bool IsOneArgumentOperation() const {
switch (operation_type_) {
case OperationType::ABS:
case OperationType::COPY:
case OperationType::COS:
case OperationType::ELU:
case OperationType::EXP:
case OperationType::FLOOR:
case OperationType::GELU:
case OperationType::LOG:
case OperationType::NEG:
case OperationType::RSQRT:
case OperationType::SIGMOID:
case OperationType::SIGN:
case OperationType::SIN:
case OperationType::SQRT:
case OperationType::SQUARE:
case OperationType::TANH:
return true;
default:
return false;
}
}
bool IsTwoArgumentOperation() const {
switch (operation_type_) {
case OperationType::ADD:
case OperationType::DIV:
case OperationType::EQUAL:
case OperationType::FLOOR_DIV:
case OperationType::FLOOR_MOD:
case OperationType::GREATER:
case OperationType::GREATER_EQUAL:
case OperationType::LESS:
case OperationType::LESS_EQUAL:
case OperationType::LOGICAL_AND:
case OperationType::MAXIMUM:
case OperationType::MINIMUM:
case OperationType::MUL:
case OperationType::NOT_EQUAL:
case OperationType::POW:
case OperationType::SQUARED_DIFF:
case OperationType::SUB:
return true;
default:
return false;
}
}
bool IsTwoArgumentOperationWithConst() const {
switch (operation_type_) {
case OperationType::ADD:
case OperationType::DIV:
case OperationType::EQUAL:
case OperationType::FLOOR_DIV:
case OperationType::FLOOR_MOD:
case OperationType::GREATER:
case OperationType::GREATER_EQUAL:
case OperationType::LESS:
case OperationType::LESS_EQUAL:
case OperationType::LOGICAL_AND:
case OperationType::MAXIMUM:
case OperationType::MINIMUM:
case OperationType::MUL:
case OperationType::NOT_EQUAL:
case OperationType::POW:
case OperationType::SQUARED_DIFF:
case OperationType::SUB:
return true;
default:
return false;
}
}
OperationType operation_type_;
};
class FullyConnectedOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 9));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteFullyConnectedParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
if (reader->GetNumberOfRuntimeInputs() == 2) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
const TfLiteTensor* input_tensor = reader->GetInputTensor(0);
BHWC input_shape;
RETURN_IF_ERROR(ExtractTensorShape(*input_tensor, &input_shape));
const TfLiteTensor* input2_tensor = reader->GetInputTensor(1);
BHWC input2_shape;
RETURN_IF_ERROR(ExtractTensorShape(*input2_tensor, &input2_shape));
const TfLiteTensor* output_tensor = reader->GetOutputTensor(0);
BHWC output_shape;
RETURN_IF_ERROR(ExtractTensorShape(*output_tensor, &output_shape));
BHWC output_ref_shape = input_shape;
output_ref_shape.c = input2_shape.b;
if (output_ref_shape != output_shape) {
Value* copy_value = graph->NewValue();
auto input_value = graph->FindInputs(node->id)[0];
copy_value->tensor.type = input_value->tensor.type;
copy_value->tensor.shape = output_ref_shape;
Node* node_reshape = graph->NewNode();
node_reshape->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes reshape_attr;
reshape_attr.new_shape = output_shape;
node_reshape->operation.attributes = reshape_attr;
RETURN_IF_ERROR(graph->SetProducer(node->id, copy_value->id));
RETURN_IF_ERROR(graph->AddConsumer(node_reshape->id, copy_value->id));
RETURN_IF_ERROR(reader->AddOutputs(node_reshape));
} else {
RETURN_IF_ERROR(reader->AddOutputs(node));
}
Convolution2DAttributes attr;
reader->ReadTensor(2, &attr.bias).IgnoreError();
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.padding.appended = HW(0, 0);
attr.padding.prepended = HW(0, 0);
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
Node* node = graph->NewNode();
RETURN_IF_ERROR(reader->AddInput(node, 0));
if (tf_options->weights_format !=
kTfLiteFullyConnectedWeightsFormatDefault) {
return absl::UnimplementedError(
"Unsupported FullyConnected weights format.");
}
FullyConnectedAttributes attr;
RETURN_IF_ERROR(GetFullyConnectedAttributes(1, 2, reader, &attr));
auto input = graph->FindInputs(node->id)[0];
if (input->tensor.shape.c != attr.weights.shape.i) {
return absl::UnimplementedError(
"Amount of input channels should match weights width");
}
Node* conv = node;
if (input->tensor.shape.h != 1 || input->tensor.shape.w != 1) {
Convolution2DAttributes conv_attr;
conv_attr.strides = HW(1, 1);
conv_attr.dilations = HW(1, 1);
conv_attr.padding.appended = HW(0, 0);
conv_attr.padding.prepended = HW(0, 0);
conv_attr.weights = attr.weights;
conv_attr.bias = attr.bias;
conv->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv->operation.attributes = std::move(conv_attr);
} else {
conv->operation.type = ToString(OperationType::FULLY_CONNECTED);
conv->operation.attributes = std::move(attr);
}
RETURN_IF_ERROR(reader->AddOutputs(conv));
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, conv));
return absl::OkStatus();
}
};
class GatherOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::GATHER);
GatherAttributes attr;
const TfLiteTensor* input_tensor = reader->GetInputTensor(0);
const TfLiteGatherParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
RETURN_IF_ERROR(
ExtractAxisFromIndex(*input_tensor, tf_options->axis, &attr.axis));
RETURN_IF_ERROR(reader->AddInput(node, 0));
const TfLiteTensor* idx_tensor = reader->GetInputTensor(1);
if (!IsConstantTensor(idx_tensor)) {
RETURN_IF_ERROR(reader->AddInput(node, 1));
} else {
RETURN_IF_ERROR(reader->ReadTensor(1, &attr.indices));
}
node->operation.attributes = std::move(attr);
return reader->AddOutputs(node);
}
};
class HardSwishOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode*, const TfLiteRegistration*,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::HARD_SWISH);
RETURN_IF_ERROR(reader->AddInput(node, 0));
return reader->AddOutputs(node);
}
};
class LSTMOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 4));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteLSTMParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
switch (tf_options->kernel_type) {
case kTfLiteLSTMFullKernel:
return ParseFull(tflite_node, registration, graph, reader, tf_options);
case kTfLiteLSTMBasicKernel:
return ParseBasic(tflite_node, registration, graph, reader, tf_options);
}
}
absl::flat_hash_map<int, ValueId> GetNewValueIdsForVariableInputNodes()
final {
return new_variable_input_value_map_;
}
private:
absl::Status ParseBasic(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader,
const TfLiteLSTMParams* tf_options) {
if (tflite_node->inputs->size != 5) {
return absl::InvalidArgumentError("LSTM should have 5 input tensors");
}
if (tflite_node->outputs->size != 4) {
return absl::InvalidArgumentError("LSTM should have 4 output tensors");
}
RETURN_IF_ERROR(CheckBasicParameters(tf_options));
Node* concat_node = graph->NewNode();
concat_node->operation.type = ToString(OperationType::CONCAT);
ConcatAttributes concat_attr;
concat_attr.axis = Axis::CHANNELS;
concat_node->operation.attributes = concat_attr;
Node* fc_node = graph->NewNode();
fc_node->operation.type = ToString(OperationType::FULLY_CONNECTED);
FullyConnectedAttributes fc_attr;
RETURN_IF_ERROR(GetFullyConnectedAttributes(2, 3, reader, &fc_attr));
fc_node->operation.attributes = std::move(fc_attr);
Node* lstm_node = graph->NewNode();
lstm_node->operation.type = ToString(OperationType::LSTM);
LstmAttributes lstm_attr;
lstm_attr.kernel_type = LstmKernelType::BASIC;
lstm_node->operation.attributes = lstm_attr;
Value* concat_temp;
int concat_tensor_idx = tflite_node->outputs->data[2];
RETURN_IF_ERROR(
reader->ReadValueByTensorIdx(concat_tensor_idx, &concat_temp));
Value* activ_temp;
int activ_tensor_idx = tflite_node->outputs->data[3];
RETURN_IF_ERROR(
reader->ReadValueByTensorIdx(activ_tensor_idx, &activ_temp));
RETURN_IF_ERROR(reader->AddInput(concat_node, 0));
RETURN_IF_ERROR(reader->AddInput(concat_node, 1));
RETURN_IF_ERROR(graph->SetProducer(concat_node->id, concat_temp->id));
RETURN_IF_ERROR(graph->AddConsumer(fc_node->id, concat_temp->id));
RETURN_IF_ERROR(graph->SetProducer(fc_node->id, activ_temp->id));
RETURN_IF_ERROR(graph->AddConsumer(lstm_node->id, activ_temp->id));
RETURN_IF_ERROR(reader->AddInput(lstm_node, 4));
RETURN_IF_ERROR(reader->AddOutput(lstm_node, 1));
RETURN_IF_ERROR(reader->AddOutput(lstm_node, 0));
return absl::OkStatus();
}
absl::Status CheckBasicParameters(const TfLiteLSTMParams* tf_options) {
if (tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError("Only TANH activation is supported.");
}
if (tf_options->cell_clip != 0.0f) {
return absl::UnimplementedError("cell_clip is not supported.");
}
if (tf_options->proj_clip != 0.0f) {
return absl::UnimplementedError("proj_clip is not supported.");
}
return absl::OkStatus();
}
absl::Status ParseFull(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader,
const TfLiteLSTMParams* tf_options) {
RETURN_IF_ERROR(ParseLSTMAttributes(tflite_node, registration, graph,
reader, tf_options,
&new_variable_input_value_map_));
return absl::OkStatus();
}
absl::Status CheckFullParameters(const TfLiteLSTMParams* tf_options) {
if (tf_options->activation != kTfLiteActSigmoid &&
tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError(
"Only sigmoid or tanh activation is supported.");
}
return absl::OkStatus();
}
absl::flat_hash_map<int, ValueId> new_variable_input_value_map_;
};
class OneHotOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
OneHotAttributes attr;
const TfLiteTensor* on_tensor = reader->GetInputTensor(2);
const TfLiteTensor* off_tensor = reader->GetInputTensor(3);
attr.on_value = GetTensorData<float>(on_tensor)[0];
attr.off_value = GetTensorData<float>(off_tensor)[0];
node->operation.type = ToString(OperationType::ONE_HOT);
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
}
};
class PackOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
if (tflite_node->inputs->size == 1) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
} else {
const TfLitePackParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
std::vector<const Value*> inputs;
for (uint32_t idx = 0; idx < tflite_node->inputs->size; ++idx) {
Value* value;
const auto status = reader->ReadValue(idx, &value);
if (status.ok()) {
inputs.push_back(value);
} else {
TensorFloat32 tensor;
RETURN_IF_ERROR(reader->ReadTensor(idx, &tensor));
Value* value;
RETURN_IF_ERROR(NewConstNode(std::move(tensor), graph, &value));
inputs.push_back(value);
}
}
const TfLiteTensor* output = reader->GetOutputTensor(0);
ConcatAttributes attr;
RETURN_IF_ERROR(
ExtractAxisFromIndex(*output, tf_options->axis, &attr.axis));
BHWC output_shape;
RETURN_IF_ERROR(ExtractTensorShape(*output, &output_shape));
BHWC input_required_shape = output_shape;
input_required_shape.set(attr.axis, 1);
for (int i = 0; i < inputs.size(); ++i) {
BHWC input_shape = inputs[i]->tensor.shape;
if (input_shape != input_required_shape) {
Node* node_reshape = graph->NewNode();
node_reshape->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes reshape_attr;
reshape_attr.new_shape = input_required_shape;
node_reshape->operation.attributes = reshape_attr;
RETURN_IF_ERROR(graph->AddConsumer(node_reshape->id, inputs[i]->id));
Value* copy_value = graph->NewValue();
copy_value->tensor.type = inputs[i]->tensor.type;
copy_value->tensor.shape = input_required_shape;
RETURN_IF_ERROR(graph->SetProducer(node_reshape->id, copy_value->id));
inputs[i] = copy_value;
}
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONCAT);
RETURN_IF_ERROR(reader->AddOutputs(node));
for (const Value* input : inputs) {
RETURN_IF_ERROR(graph->AddConsumer(node->id, input->id));
}
node->operation.attributes = attr;
return absl::OkStatus();
}
}
};
class PReLUOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::PRELU);
RETURN_IF_ERROR(reader->AddInput(node, 0));
auto input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
PReLUAttributes attr;
Tensor<Linear, DataType::FLOAT32> linear_alpha;
absl::Status status = reader->ReadTensor(1, &linear_alpha);
if (status.ok()) {
if (linear_alpha.shape.v != input_shape.c) {
return absl::InvalidArgumentError(
"Linear alpha shape does not match the number of input channels.");
}
attr.alpha = std::move(linear_alpha);
} else {
Tensor<HWC, DataType::FLOAT32> hwc_alpha;
RETURN_IF_ERROR(reader->ReadTensor(1, &hwc_alpha));
if (hwc_alpha.shape.h != input_shape.h ||
hwc_alpha.shape.w != input_shape.w ||
hwc_alpha.shape.c != input_shape.c) {
return absl::InvalidArgumentError(
"Alpha shape does not match input shape.");
}
attr.alpha = std::move(hwc_alpha);
}
node->operation.attributes = std::move(attr);
return reader->AddOutputs(node);
}
};
class PadOperationParser : public TFLiteOperationParser {
public:
explicit PadOperationParser(bool mirror_pad) : mirror_pad_(mirror_pad) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::PAD);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
PadAttributes attr;
if (mirror_pad_) {
attr.type = PaddingContentType::REFLECT;
} else {
attr.type = PaddingContentType::ZEROS;
}
Tensor<HW, DataType::INT32> paddings;
RETURN_IF_ERROR(reader->ReadTensor(1, &paddings));
if (registration->builtin_code == kTfLiteBuiltinPadv2 &&
tflite_node->inputs->size == 3) {
const TfLiteTensor* const_tensor = reader->GetInputTensor(2);
attr.constant_values = GetTensorData<float>(const_tensor)[0];
}
if (paddings.shape.h == 4 && paddings.shape.w == 2) {
attr.prepended = BHWC(paddings.data[0], paddings.data[2],
paddings.data[4], paddings.data[6]);
attr.appended = BHWC(paddings.data[1], paddings.data[3], paddings.data[5],
paddings.data[7]);
} else if (paddings.shape.h == 3 && paddings.shape.w == 2) {
attr.prepended =
BHWC(1, paddings.data[0], paddings.data[2], paddings.data[4]);
attr.appended =
BHWC(1, paddings.data[1], paddings.data[3], paddings.data[5]);
} else {
return absl::InvalidArgumentError(
"Paddings tensor has unexpected shape.");
}
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
bool mirror_pad_ = false;
};
class Pooling2DOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
public:
explicit Pooling2DOperationParser(PoolingType type) : type_(type) {}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::POOLING_2D);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutput(node, 0));
Pooling2DAttributes attr;
attr.type = type_;
auto input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
const TfLitePoolParams* tf_options;
if (!RetrieveCustomInitialData(tflite_node, &tf_options).ok()) {
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
}
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
reader->AddOutput(node, 1).IgnoreError();
auto outputs = graph->FindOutputs(node->id);
attr.output_indices = outputs.size() == 2;
if (attr.output_indices) {
outputs[1]->tensor.type = DataType::INT32;
}
RETURN_IF_ERROR(ParsePoolingAttributes(tf_options, input_shape, &attr));
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
const PoolingType type_;
};
class ReduceOperationParser : public TFLiteOperationParser {
public:
explicit ReduceOperationParser(OperationType operation_type)
: operation_type_(operation_type) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(operation_type_);
RETURN_IF_ERROR(reader->AddInput(node, 0));
const TfLiteReducerParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
ReduceAttributes attr;
const TfLiteTensor* input = reader->GetInputTensor(0);
const TfLiteTensor* axes = reader->GetInputTensor(1);
for (int i = 0; i < NumElements(axes->dims); i++) {
Axis axis;
RETURN_IF_ERROR(ExtractAxisFromIndex(*input, axes->data.i32[i], &axis));
attr.dims.insert(axis);
}
node->operation.attributes = attr;
if (!tf_options->keep_dims) {
const auto& input_tensor = graph->FindInputs(node->id)[0]->tensor;
auto reduce_output_shape = input_tensor.shape;
for (auto axis : attr.dims) {
reduce_output_shape.set(axis, 1);
}
Node* node_reshape = graph->NewNode();
node_reshape->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes reshape_attr;
const TfLiteTensor* output = reader->GetOutputTensor(0);
RETURN_IF_ERROR(ExtractTensorShape(*output, &reshape_attr.new_shape));
node_reshape->operation.attributes = reshape_attr;
Value* reduce_result = graph->NewValue();
reduce_result->tensor.type = input_tensor.type;
reduce_result->tensor.shape = reduce_output_shape;
RETURN_IF_ERROR(graph->SetProducer(node->id, reduce_result->id));
RETURN_IF_ERROR(graph->AddConsumer(node_reshape->id, reduce_result->id));
RETURN_IF_ERROR(reader->AddOutputs(node_reshape));
} else {
RETURN_IF_ERROR(reader->AddOutputs(node));
}
return absl::OkStatus();
}
private:
const OperationType operation_type_;
};
class QuantizeOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::QUANTIZE_AND_DEQUANTIZE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
auto output_value = graph->FindOutputs(node->id)[0];
if (!output_value->quant_params) {
return absl::InvalidArgumentError(
"Encountered Quantize output with no quant params");
}
QuantizeAndDequantizeAttributes attr;
attr.min = output_value->quant_params.value().min;
attr.max = output_value->quant_params.value().max;
attr.scale = output_value->quant_params.value().scale;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class ReLUOperationParser : public TFLiteOperationParser {
public:
explicit ReLUOperationParser(int activation_min, int activation_max)
: activation_min_(activation_min), activation_max_(activation_max) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RELU);
RETURN_IF_ERROR(reader->AddInput(node, 0));
ReLUAttributes attr;
const TfLiteLeakyReluParams* tf_options;
auto status = RetrieveBuiltinData(tflite_node, &tf_options);
attr.alpha = status.ok() ? tf_options->alpha : 0;
attr.activation_min = activation_min_;
attr.activation_max = activation_max_;
node->operation.attributes = attr;
return reader->AddOutputs(node);
}
private:
const int activation_min_;
const int activation_max_;
};
class ResamplerOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
node->operation.type = ToString(OperationType::RESAMPLER);
auto src_shape = graph->FindInputs(node->id)[0]->tensor.shape;
auto warp_shape = graph->FindInputs(node->id)[1]->tensor.shape;
auto output_value = graph->FindOutputs(node->id)[0];
output_value->tensor.shape =
BHWC(src_shape.b, warp_shape.h, warp_shape.w, src_shape.c);
return absl::OkStatus();
}
};
class ReshapeOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class Resize2DOperationParser : public TFLiteOperationParser {
public:
explicit Resize2DOperationParser(SamplingType sampling_type)
: sampling_type_(sampling_type) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 3));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESIZE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
Resize2DAttributes attr;
RETURN_IF_ERROR(GetAlignCornersValue(tflite_node, &attr.align_corners));
RETURN_IF_ERROR(
GetHalfPixelCentersValue(tflite_node, &attr.half_pixel_centers));
attr.type = sampling_type_;
attr.new_shape.CopyAllDefinedAxis(
graph->FindOutputs(node->id)[0]->tensor.shape);
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
absl::Status GetAlignCornersValue(const TfLiteNode* tflite_node,
bool* align_corners) {
switch (sampling_type_) {
case SamplingType::BILINEAR:
return GetAlignCornersValueForType<TfLiteResizeBilinearParams>(
tflite_node, align_corners);
case SamplingType::NEAREST:
return GetAlignCornersValueForType<TfLiteResizeNearestNeighborParams>(
tflite_node, align_corners);
case SamplingType::UNKNOWN:
return absl::InternalError("Sampling type is not specified");
}
return absl::OkStatus();
}
template <class T>
absl::Status GetAlignCornersValueForType(const TfLiteNode* tflite_node,
bool* align_corners) {
const T* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
*align_corners = tf_options->align_corners;
return absl::OkStatus();
}
absl::Status GetHalfPixelCentersValue(const TfLiteNode* tflite_node,
bool* half_pixel_centers) {
if (sampling_type_ == SamplingType::BILINEAR) {
const TfLiteResizeBilinearParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
if (tf_options->align_corners && tf_options->half_pixel_centers) {
return absl::InternalError(
"If half_pixel_centers is True, align_corners must be False.");
}
*half_pixel_centers = tf_options->half_pixel_centers;
} else {
const TfLiteResizeNearestNeighborParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
*half_pixel_centers = tf_options->half_pixel_centers;
}
return absl::OkStatus();
}
SamplingType sampling_type_ = SamplingType::UNKNOWN;
};
class SelectV2OperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
SelectV2Attributes attr;
attr.scalar_cond = NumElements(reader->GetInputTensor(0)) < 2;
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SELECT_V2);
RETURN_IF_ERROR(reader->AddInput(node, 0));
{
const TfLiteTensor* tfl_tensor = reader->GetInputTensor(1);
attr.broadcast_true = NumElements(tfl_tensor) < 2;
if (IsConstantTensor(tfl_tensor)) {
Tensor<BHWC, DataType::FLOAT32> tensor;
if (attr.broadcast_true) {
Tensor<Scalar, DataType::FLOAT32> temp;
RETURN_IF_ERROR(reader->ReadTensor(1, &temp));
tensor.shape = BHWC(1, 1, 1, 1);
tensor.data.push_back(temp.data[0]);
} else {
RETURN_IF_ERROR(reader->ReadTensor(1, &tensor));
}
Value* value;
RETURN_IF_ERROR(NewConstNode(tensor, graph, &value));
RETURN_IF_ERROR(graph->AddConsumer(node->id, value->id));
} else {
RETURN_IF_ERROR(reader->AddInput(node, 1));
}
}
{
const TfLiteTensor* tfl_tensor = reader->GetInputTensor(2);
attr.broadcast_false = NumElements(tfl_tensor) < 2;
if (IsConstantTensor(tfl_tensor)) {
Tensor<BHWC, DataType::FLOAT32> tensor;
if (attr.broadcast_false) {
Tensor<Scalar, DataType::FLOAT32> temp;
RETURN_IF_ERROR(reader->ReadTensor(2, &temp));
tensor.shape = BHWC(1, 1, 1, 1);
tensor.data.push_back(temp.data[0]);
} else if (absl::IsInvalidArgument(reader->ReadTensor(2, &tensor))) {
Tensor<HWC, DataType::FLOAT32> temp;
RETURN_IF_ERROR(reader->ReadTensor(2, &temp));
tensor.shape = BHWC(1, temp.shape.h, temp.shape.w, temp.shape.c);
tensor.id = temp.id;
tensor.data.reserve(temp.data.size());
for (float data : temp.data) tensor.data.push_back(data);
}
Value* value;
RETURN_IF_ERROR(NewConstNode(tensor, graph, &value));
RETURN_IF_ERROR(graph->AddConsumer(node->id, value->id));
} else {
RETURN_IF_ERROR(reader->AddInput(node, 2));
}
}
RETURN_IF_ERROR(reader->AddOutputs(node));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
};
class SliceOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SLICE);
RETURN_IF_ERROR(reader->AddOutputs(node));
Value* input;
RETURN_IF_ERROR(reader->ReadValue(0, &input));
RETURN_IF_ERROR(graph->AddConsumer(node->id, input->id));
const TfLiteTensor* tfl_input = reader->GetInputTensor(0);
const int input_dims = tfl_input->dims->size;
SliceAttributes attr;
attr.strides = BHWC(1, 1, 1, 1);
Tensor<Linear, DataType::INT32> starts, sizes;
RETURN_IF_ERROR(reader->ReadTensor(1, &starts));
RETURN_IF_ERROR(reader->ReadTensor(2, &sizes));
if (starts.data.size() != sizes.data.size()) {
return absl::InvalidArgumentError("Starts amount != sizes amount.");
}
BHWC bhwc_starts(0, 0, 0, 0);
BHWC bhwc_sizes = input->tensor.shape;
if (input_dims == 4) {
if (starts.data.size() == 4) {
bhwc_starts.b = starts.data[0];
bhwc_starts.h = starts.data[1];
bhwc_starts.w = starts.data[2];
bhwc_starts.c = starts.data[3];
bhwc_sizes.b = sizes.data[0];
bhwc_sizes.h = sizes.data[1];
bhwc_sizes.w = sizes.data[2];
bhwc_sizes.c = sizes.data[3];
} else if (starts.data.size() == 3) {
bhwc_starts.h = starts.data[0];
bhwc_starts.w = starts.data[1];
bhwc_starts.c = starts.data[2];
bhwc_sizes.h = sizes.data[0];
bhwc_sizes.w = sizes.data[1];
bhwc_sizes.c = sizes.data[2];
} else {
return absl::UnimplementedError(
"Slicing is supported for 3 or 4 dimensional tensors only.");
}
} else if (input_dims == 3) {
if (starts.data.size() == 3) {
bhwc_starts.b = starts.data[0];
bhwc_starts.w = starts.data[1];
bhwc_starts.c = starts.data[2];
bhwc_sizes.b = sizes.data[0];
bhwc_sizes.w = sizes.data[1];
bhwc_sizes.c = sizes.data[2];
} else {
return absl::UnimplementedError(
"Slicing is supported for 3 or 4 dimensional tensors only.");
}
} else {
return absl::UnimplementedError(
"Slicing is supported for 3 or 4 dimensional tensors only.");
}
const auto& in_shape = input->tensor.shape;
if (bhwc_sizes.b == -1) {
bhwc_sizes.b = in_shape.b - bhwc_starts.b;
}
if (bhwc_sizes.h == -1) {
bhwc_sizes.h = in_shape.h - bhwc_starts.h;
}
if (bhwc_sizes.w == -1) {
bhwc_sizes.w = in_shape.w - bhwc_starts.w;
}
if (bhwc_sizes.c == -1) {
bhwc_sizes.c = in_shape.c - bhwc_starts.c;
}
attr.starts = bhwc_starts;
attr.ends =
BHWC(bhwc_starts.b + bhwc_sizes.b, bhwc_starts.h + bhwc_sizes.h,
bhwc_starts.w + bhwc_sizes.w, bhwc_starts.c + bhwc_sizes.c);
RETURN_IF_ERROR(UpdateIfNegative(in_shape, &attr));
auto out_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
if ((attr.ends.b - attr.starts.b) != out_shape.b) {
return absl::UnimplementedError("Output batch don't match");
}
if ((attr.ends.h - attr.starts.h) != out_shape.h) {
return absl::UnimplementedError("Output height doesn't match");
}
if ((attr.ends.w - attr.starts.w) != out_shape.w) {
return absl::UnimplementedError("Output width doesn't match");
}
if ((attr.ends.c - attr.starts.c) != out_shape.c) {
return absl::UnimplementedError("Output channels don't match");
}
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
absl::Status UpdateIfNegative(const BHWC& input_shape,
SliceAttributes* attr) {
if (attr->ends.h < 0) {
attr->ends.h = input_shape.h + attr->ends.h;
}
if (attr->ends.w < 0) {
attr->ends.w = input_shape.w + attr->ends.w;
}
if (attr->ends.c < 0) {
attr->ends.c = input_shape.c + attr->ends.c;
}
if (attr->ends.b < 0) {
attr->ends.b = input_shape.b + attr->ends.b;
}
return absl::OkStatus();
}
};
class SoftmaxOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SOFTMAX);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteSoftmaxParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
if (tf_options->beta != 1) {
return absl::UnimplementedError("Softmax.beta != 1 is not supported.");
}
SoftmaxAttributes attr;
attr.axis = Axis::CHANNELS;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class SpaceToDepthOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPACE_TO_DEPTH);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteSpaceToDepthParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
SpaceToDepthAttributes attr;
attr.block_size = tf_options->block_size;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class SplitOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteSplitParams* split_params;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &split_params));
if (split_params->num_splits == 1) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
}
const TfLiteTensor* input = reader->GetInputTensor(1);
const TfLiteTensor* axis_tensor = reader->GetInputTensor(0);
SplitAttributes attr;
RETURN_IF_ERROR(
ExtractAxisFromIndex(*input, axis_tensor->data.i32[0], &attr.axis));
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPLIT);
node->operation.attributes = attr;
RETURN_IF_ERROR(reader->AddInput(node, 1));
for (int i = 0; i < tflite_node->outputs->size; ++i) {
RETURN_IF_ERROR(reader->AddOutput(node, i));
}
return absl::OkStatus();
}
};
class SplitVOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteSplitVParams* split_params;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &split_params));
if (split_params->num_splits == 1) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
}
const TfLiteTensor* input = reader->GetInputTensor(0);
const TfLiteTensor* axis_tensor = reader->GetInputTensor(2);
SplitAttributes attr;
RETURN_IF_ERROR(
ExtractAxisFromIndex(*input, axis_tensor->data.i32[0], &attr.axis));
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPLIT);
node->operation.attributes = attr;
RETURN_IF_ERROR(reader->AddInput(node, 0));
for (int i = 0; i < tflite_node->outputs->size; ++i) {
RETURN_IF_ERROR(reader->AddOutput(node, i));
}
return absl::OkStatus();
}
};
class StridedSliceOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 4));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SLICE);
RETURN_IF_ERROR(reader->AddOutputs(node));
Value* input;
RETURN_IF_ERROR(reader->ReadValue(0, &input));
RETURN_IF_ERROR(graph->AddConsumer(node->id, input->id));
Tensor<Linear, DataType::INT32> tmp;
RETURN_IF_ERROR(reader->ReadTensor(1, &tmp));
bool read_without_batch = tmp.data.size() == 3;
bool read_with_batch = tmp.data.size() == 4;
if (!read_without_batch && !read_with_batch) {
return absl::UnimplementedError(
"Slicing is supported for 3 or 4 dimensional tensors only.");
}
const TfLiteStridedSliceParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
RETURN_IF_ERROR(CheckOptionsSupport(tf_options));
auto out_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
SliceAttributes attr;
if (read_without_batch) {
RETURN_IF_ERROR(ReadAttribsWithoutBatch(reader, tf_options,
input->tensor.shape, &attr));
}
if (read_with_batch) {
RETURN_IF_ERROR(
ReadAttribsWithBatch(reader, tf_options, input->tensor.shape, &attr));
}
if (attr.strides.b == 0 || attr.strides.h == 0 || attr.strides.w == 0 ||
attr.strides.c == 0) {
return absl::InvalidArgumentError("stride values must be non-zero");
}
if (attr.strides.b < 0 || attr.strides.h < 0 || attr.strides.w < 0 ||
attr.strides.c < 0) {
return absl::UnimplementedError("Reverse slices are not supported.");
}
if ((attr.ends.b - attr.starts.b + attr.strides.b - 1) / attr.strides.b !=
out_shape.b) {
return absl::UnimplementedError("Output batch don't match");
}
if ((attr.ends.h - attr.starts.h + attr.strides.h - 1) / attr.strides.h !=
out_shape.h) {
return absl::UnimplementedError("Output height doesn't match");
}
if ((attr.ends.w - attr.starts.w + attr.strides.w - 1) / attr.strides.w !=
out_shape.w) {
return absl::UnimplementedError("Output width doesn't match");
}
if ((attr.ends.c - attr.starts.c + attr.strides.c - 1) / attr.strides.c !=
out_shape.c) {
return absl::UnimplementedError("Output channels don't match");
}
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
absl::Status UpdateWithMask(const TfLiteStridedSliceParams* tf_options,
const BHWC& input_shape, int ignore_b,
int ignore_h, int ignore_w, int ignore_c,
SliceAttributes* attr) {
if (tf_options->begin_mask & ignore_h) {
attr->starts.h = 0;
}
if (tf_options->begin_mask & ignore_w) {
attr->starts.w = 0;
}
if (tf_options->begin_mask & ignore_c) {
attr->starts.c = 0;
}
if (tf_options->begin_mask & ignore_b) {
attr->starts.b = 0;
}
if (tf_options->end_mask & ignore_h) {
attr->ends.h = input_shape.h;
}
if (tf_options->end_mask & ignore_w) {
attr->ends.w = input_shape.w;
}
if (tf_options->end_mask & ignore_c) {
attr->ends.c = input_shape.c;
}
if (tf_options->end_mask & ignore_b) {
attr->ends.b = input_shape.b;
}
return absl::OkStatus();
}
absl::Status UpdateIfNegative(const BHWC& input_shape,
SliceAttributes* attr) {
if (attr->ends.h < 0) {
attr->ends.h = input_shape.h + attr->ends.h;
}
if (attr->ends.w < 0) {
attr->ends.w = input_shape.w + attr->ends.w;
}
if (attr->ends.c < 0) {
attr->ends.c = input_shape.c + attr->ends.c;
}
if (attr->ends.b < 0) {
attr->ends.b = input_shape.b + attr->ends.b;
}
if (attr->starts.h < 0) {
attr->starts.h = input_shape.h + attr->starts.h;
}
if (attr->starts.w < 0) {
attr->starts.w = input_shape.w + attr->starts.w;
}
if (attr->starts.c < 0) {
attr->starts.c = input_shape.c + attr->starts.c;
}
if (attr->starts.b < 0) {
attr->starts.b = input_shape.b + attr->starts.b;
}
return absl::OkStatus();
}
absl::Status ReadAttribsWithBatch(const ObjectReader* reader,
const TfLiteStridedSliceParams* tf_options,
const BHWC& input_shape,
SliceAttributes* attr) {
auto read_bhwc = [&](int tensor_index, BHWC* bhwc) -> absl::Status {
Tensor<Linear, DataType::INT32> t;
RETURN_IF_ERROR(reader->ReadTensor(tensor_index, &t));
*bhwc = BHWC(t.data[0], t.data[1], t.data[2], t.data[3]);
return absl::OkStatus();
};
RETURN_IF_ERROR(read_bhwc(1, &attr->starts));
RETURN_IF_ERROR(read_bhwc(2, &attr->ends));
RETURN_IF_ERROR(read_bhwc(3, &attr->strides));
RETURN_IF_ERROR(UpdateIfNegative(input_shape, attr));
RETURN_IF_ERROR(UpdateWithMask(tf_options, input_shape, 1, 2, 4, 8, attr));
return absl::OkStatus();
}
absl::Status ReadAttribsWithoutBatch(
const ObjectReader* reader, const TfLiteStridedSliceParams* tf_options,
const BHWC& input_shape, SliceAttributes* attr) {
auto read_hwc = [&](int tensor_index, BHWC* bhwc) -> absl::Status {
Tensor<Linear, DataType::INT32> t;
RETURN_IF_ERROR(reader->ReadTensor(tensor_index, &t));
*bhwc = BHWC(0, t.data[0], t.data[1], t.data[2]);
return absl::OkStatus();
};
RETURN_IF_ERROR(read_hwc(1, &attr->starts));
RETURN_IF_ERROR(read_hwc(2, &attr->ends));
RETURN_IF_ERROR(read_hwc(3, &attr->strides));
RETURN_IF_ERROR(UpdateIfNegative(input_shape, attr));
RETURN_IF_ERROR(UpdateWithMask(tf_options, input_shape, 0, 1, 2, 4, attr));
attr->starts.b = 0;
attr->ends.b = input_shape.b;
attr->strides.b = 1;
return absl::OkStatus();
}
absl::Status CheckOptionsSupport(const TfLiteStridedSliceParams* tf_options) {
if (tf_options->ellipsis_mask) {
return absl::UnimplementedError("Slice does not support ellipsis_mask.");
}
if (tf_options->new_axis_mask) {
return absl::UnimplementedError("Slice does not support new_axis_mask.");
}
if (tf_options->shrink_axis_mask) {
return absl::UnimplementedError(
"Slice does not support shrink_axis_mask parameter. ");
}
return absl::OkStatus();
}
};
class TileOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::TILE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
}
};
class TransposeConvBuiltinOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 3));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_TRANSPOSED);
Value* input;
RETURN_IF_ERROR(reader->ReadValue(2, &input));
RETURN_IF_ERROR(graph->AddConsumer(node->id, input->id));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteTransposeConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
ConvolutionTransposedAttributes attr;
attr.stride = tf_options
? HW(tf_options->stride_height, tf_options->stride_width)
: HW(1, 1);
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 2) {
RETURN_IF_ERROR(reader->AddInput(node, 1));
auto weights_shape = graph->FindInputs(node->id)[1]->tensor.shape;
attr.weights.shape = OHWI(weights_shape.b, weights_shape.h,
weights_shape.w, weights_shape.c);
} else {
RETURN_IF_ERROR(reader->ReadTensor(1, &attr.weights));
}
reader->ReadTensor(3, &attr.bias).IgnoreError();
UpdatePadding(tf_options->padding,
graph->FindInputs(node->id)[0]->tensor.shape, &attr);
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
};
class TransposeConvCustomOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_TRANSPOSED);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteTransposeConvParams* tf_options;
auto status = RetrieveCustomInitialData(tflite_node, &tf_options);
ConvolutionTransposedAttributes attr;
attr.stride = status.ok()
? HW(tf_options->stride_height, tf_options->stride_width)
: HW(1, 1);
RETURN_IF_ERROR(reader->ReadTensor(1, &attr.weights));
reader->ReadTensor(2, &attr.bias).IgnoreError();
UpdatePadding(status.ok() ? tf_options->padding : kTfLitePaddingUnknown,
graph->FindInputs(node->id)[0]->tensor.shape, &attr);
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
};
class TransposeOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 4));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::TRANSPOSE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
TransposeAttributes attr;
Tensor<Linear, DataType::INT32> perm;
RETURN_IF_ERROR(reader->ReadTensor(1, &perm));
std::map<Axis, int> axis_to_index = {{Axis::BATCH, 0},
{Axis::HEIGHT, 1},
{Axis::WIDTH, 2},
{Axis::CHANNELS, 3}};
if (perm.data.size() == 4) {
attr.perm = BHWC(perm.data[0], perm.data[1], perm.data[2], perm.data[3]);
} else if (perm.data.size() == 3) {
std::vector<Axis> index_to_axis = {Axis::BATCH, Axis::WIDTH,
Axis::CHANNELS};
attr.perm.b = axis_to_index[index_to_axis[perm.data[0]]];
attr.perm.h = 1;
attr.perm.w = axis_to_index[index_to_axis[perm.data[1]]];
attr.perm.c = axis_to_index[index_to_axis[perm.data[2]]];
} else if (perm.data.size() == 2) {
std::vector<Axis> index_to_axis = {Axis::BATCH, Axis::CHANNELS};
attr.perm.b = axis_to_index[index_to_axis[perm.data[0]]];
attr.perm.h = 1;
attr.perm.w = 2;
attr.perm.c = axis_to_index[index_to_axis[perm.data[1]]];
} else {
return absl::InvalidArgumentError(
"Permutation for transpose is invalid.");
}
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class UnpackOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteUnpackParams* unpack_params;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &unpack_params));
if (unpack_params->num == 1) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
}
const TfLiteTensor* input = reader->GetInputTensor(0);
BHWC input_shape;
RETURN_IF_ERROR(ExtractTensorShape(*input, &input_shape));
SplitAttributes attr;
RETURN_IF_ERROR(
ExtractAxisFromIndex(*input, unpack_params->axis, &attr.axis));
BHWC output_required_shape = input_shape;
output_required_shape.set(attr.axis, 1);
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPLIT);
node->operation.attributes = attr;
RETURN_IF_ERROR(reader->AddInput(node, 0));
auto input_value = graph->FindInputs(node->id)[0];
for (int i = 0; i < tflite_node->outputs->size; ++i) {
const TfLiteTensor* output = reader->GetOutputTensor(i);
BHWC output_shape;
RETURN_IF_ERROR(ExtractTensorShape(*output, &output_shape));
if (output_shape != output_required_shape) {
Value* copy_value = graph->NewValue();
copy_value->tensor.type = input_value->tensor.type;
copy_value->tensor.shape = output_required_shape;
RETURN_IF_ERROR(graph->SetProducer(node->id, copy_value->id));
Node* node_reshape = graph->NewNode();
node_reshape->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes reshape_attr;
reshape_attr.new_shape = output_shape;
node_reshape->operation.attributes = reshape_attr;
RETURN_IF_ERROR(graph->AddConsumer(node_reshape->id, copy_value->id));
RETURN_IF_ERROR(reader->AddOutput(node_reshape, i));
} else {
RETURN_IF_ERROR(reader->AddOutput(node, i));
}
}
return absl::OkStatus();
}
};
class Unpooling2DOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::MAX_UNPOOLING_2D);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
auto input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
MaxUnpooling2DAttributes attr;
const TfLitePoolParams* tf_options;
RETURN_IF_ERROR(RetrieveCustomInitialData(tflite_node, &tf_options));
attr.kernel = ToHW(tf_options->filter_height, tf_options->filter_width);
attr.strides = ToHW(tf_options->stride_height, tf_options->stride_width);
UpdatePadding(tf_options->padding, input_shape, &attr);
node->operation.attributes = attr;
auto output_value = graph->FindOutputs(node->id)[0];
output_value->tensor.shape = CalculateOutputShape(input_shape, attr);
return absl::OkStatus();
}
};
class BatchToSpaceOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::BATCH_TO_SPACE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
BatchToSpaceAttributes bs_attr;
Tensor<Linear, DataType::INT32> block;
RETURN_IF_ERROR(reader->ReadTensor(1, &block));
if (block.shape.v != 2) {
return absl::InternalError("Space has to be HxW.");
}
bs_attr.block.h = block.data[0];
bs_attr.block.w = block.data[1];
Tensor<HW, DataType::INT32> crop;
RETURN_IF_ERROR(reader->ReadTensor(2, &crop));
auto crop_shape = crop.shape;
if (crop_shape.h != 2 && crop_shape.w != 2) {
return absl::InternalError("Space has to be HxW.");
}
bs_attr.crop.prepended.h = crop.data[0];
bs_attr.crop.prepended.w = crop.data[2];
bs_attr.crop.appended.h = crop.data[1];
bs_attr.crop.appended.w = crop.data[3];
node->operation.attributes = std::move(bs_attr);
return absl::OkStatus();
}
};
class SpaceToBatchOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPACE_TO_BATCH);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
SpaceToBatchAttributes sb_attr;
Tensor<Linear, DataType::INT32> block;
RETURN_IF_ERROR(reader->ReadTensor(1, &block));
if (block.shape.v != 2) {
return absl::InternalError("Space has to be HxW.");
}
sb_attr.block.h = block.data[0];
sb_attr.block.w = block.data[1];
Tensor<HW, DataType::INT32> padding;
RETURN_IF_ERROR(reader->ReadTensor(2, &padding));
auto padding_shape = padding.shape;
if (padding_shape.h != 2 && padding_shape.w != 2) {
return absl::InternalError("Space has to be HxW.");
}
sb_attr.padding.prepended.h = padding.data[0];
sb_attr.padding.prepended.w = padding.data[2];
sb_attr.padding.appended.h = padding.data[1];
sb_attr.padding.appended.w = padding.data[3];
node->operation.attributes = std::move(sb_attr);
return absl::OkStatus();
}
};
class MeanOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::MEAN);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
MeanAttributes attr;
const TfLiteTensor* input = reader->GetInputTensor(0);
const TfLiteTensor* axes = reader->GetInputTensor(1);
for (int i = 0; i < NumElements(axes->dims); i++) {
Axis axis;
RETURN_IF_ERROR(ExtractAxisFromIndex(*input, axes->data.i32[i], &axis));
attr.dims.insert(axis);
}
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class UnsupportedOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return absl::UnimplementedError("Operation is not supported.");
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
return absl::UnimplementedError("Operation is not supported.");
}
};
absl::Status IsSupported(
const TfLiteContext* context, TfLiteNode* node,
const TfLiteRegistration* registration, bool allow_quant_ops = false,
const absl::flat_hash_set<TfLiteBuiltinOperator>* excluded_ops = nullptr) {
return NewOperationParser(registration, allow_quant_ops, excluded_ops)
->IsSupported(context, node, registration);
}
bool IsAllAllowedTensors(TfLiteContext* context,
const TfLiteIntArray* tensor_indices,
const std::vector<TfLiteType>& allowed_types) {
for (int i = 0; i < tensor_indices->size; ++i) {
int tensor_idx = tensor_indices->data[i];
if (tensor_idx == kTfLiteOptionalTensor) continue;
const TfLiteTensor* t = &context->tensors[tensor_idx];
if (t->dims && t->dims->size >= 5) {
return false;
}
bool type_supported = false;
for (auto allowed_type : allowed_types) {
if (t->type == allowed_type) {
type_supported = true;
break;
}
}
if (t->allocation_type == kTfLiteArenaRw && !type_supported) {
return false;
}
}
return true;
}
}
std::unique_ptr<TFLiteOperationParser> NewOperationParser(
const TfLiteRegistration* registration, bool allow_quant_ops,
const absl::flat_hash_set<TfLiteBuiltinOperator>* excluded_ops) {
const auto builtin_code = registration->builtin_code;
if (excluded_ops != nullptr &&
excluded_ops->contains(
static_cast<TfLiteBuiltinOperator>(builtin_code))) {
return std::make_unique<UnsupportedOperationParser>();
}
switch (builtin_code) {
case kTfLiteBuiltinAbs:
return std::make_unique<ElementwiseOperationParser>(OperationType::ABS);
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinAddN:
return std::make_unique<ElementwiseOperationParser>(OperationType::ADD);
case kTfLiteBuiltinAveragePool2d:
return std::make_unique<Pooling2DOperationParser>(PoolingType::AVERAGE);
case kTfLiteBuiltinBatchMatmul:
return std::make_unique<BatchedMatMulOperationParser>();
case kTfLiteBuiltinCast:
return std::make_unique<CastOperationParser>();
case kTfLiteBuiltinConcatenation:
return std::make_unique<ConcatenationOperationParser>();
case kTfLiteBuiltinConv2d:
return std::make_unique<Conv2DOperationParser>();
case kTfLiteBuiltinCos:
return std::make_unique<ElementwiseOperationParser>(OperationType::COS);
case kTfLiteBuiltinCumsum:
return std::make_unique<CumsumOperationParser>();
case kTfLiteBuiltinDensify:
return std::make_unique<DensifyOperationParser>();
case kTfLiteBuiltinDepthwiseConv2d:
return std::make_unique<DepthwiseConvolutionOperationParser>();
case kTfLiteBuiltinDepthToSpace:
return std::make_unique<DepthToSpaceOperationParser>();
case kTfLiteBuiltinDequantize:
if (allow_quant_ops) {
return std::make_unique<DequantizeOperationParser>();
}
break;
case kTfLiteBuiltinDiv:
return std::make_unique<ElementwiseOperationParser>(OperationType::DIV);
case kTfLiteBuiltinEqual:
return std::make_unique<ElementwiseOperationParser>(OperationType::EQUAL);
case kTfLiteBuiltinElu:
return std::make_unique<ElementwiseOperationParser>(OperationType::ELU);
case kTfLiteBuiltinExp:
return std::make_unique<ElementwiseOperationParser>(OperationType::EXP);
case kTfLiteBuiltinFloor:
return std::make_unique<ElementwiseOperationParser>(OperationType::FLOOR);
case kTfLiteBuiltinFloorDiv:
return std::make_unique<ElementwiseOperationParser>(
OperationType::FLOOR_DIV);
case kTfLiteBuiltinFloorMod:
return std::make_unique<ElementwiseOperationParser>(
OperationType::FLOOR_MOD);
case kTfLiteBuiltinFullyConnected:
return std::make_unique<FullyConnectedOperationParser>();
case kTfLiteBuiltinGather:
return std::make_unique<GatherOperationParser>();
case kTfLiteBuiltinGelu:
return std::make_unique<ElementwiseOperationParser>(OperationType::GELU);
case kTfLiteBuiltinGreater:
return std::make_unique<ElementwiseOperationParser>(
OperationType::GREATER);
case kTfLiteBuiltinGreaterEqual:
return std::make_unique<ElementwiseOperationParser>(
OperationType::GREATER_EQUAL);
case kTfLiteBuiltinHardSwish:
return std::make_unique<HardSwishOperationParser>();
case kTfLiteBuiltinLess:
return std::make_unique<ElementwiseOperationParser>(OperationType::LESS);
case kTfLiteBuiltinLessEqual:
return std::make_unique<ElementwiseOperationParser>(
OperationType::LESS_EQUAL);
case kTfLiteBuiltinLogistic:
return std::make_unique<ElementwiseOperationParser>(
OperationType::SIGMOID);
case kTfLiteBuiltinLog:
return std::make_unique<ElementwiseOperationParser>(OperationType::LOG);
case kTfLiteBuiltinLogicalAnd:
return std::make_unique<ElementwiseOperationParser>(
OperationType::LOGICAL_AND);
case kTfLiteBuiltinLstm:
return std::make_unique<LSTMOperationParser>();
case kTfLiteBuiltinMaximum:
return std::make_unique<ElementwiseOperationParser>(
OperationType::MAXIMUM);
case kTfLiteBuiltinMaxPool2d:
return std::make_unique<Pooling2DOperationParser>(PoolingType::MAX);
case kTfLiteBuiltinMean:
return std::make_unique<MeanOperationParser>();
case kTfLiteBuiltinMinimum:
return std::make_unique<ElementwiseOperationParser>(
OperationType::MINIMUM);
case kTfLiteBuiltinMirrorPad:
return std::make_unique<PadOperationParser>(true);
case kTfLiteBuiltinMul:
return std::make_unique<ElementwiseOperationParser>(OperationType::MUL);
case kTfLiteBuiltinNeg:
return std::make_unique<ElementwiseOperationParser>(OperationType::NEG);
case kTfLiteBuiltinNotEqual:
return std::make_unique<ElementwiseOperationParser>(
OperationType::NOT_EQUAL);
case kTfLiteBuiltinOneHot:
return std::make_unique<OneHotOperationParser>();
case kTfLiteBuiltinPack:
return std::make_unique<PackOperationParser>();
case kTfLiteBuiltinPad:
return std::make_unique<PadOperationParser>(false);
case kTfLiteBuiltinPadv2:
return std::make_unique<PadOperationParser>(false);
case kTfLiteBuiltinPow:
return std::make_unique<ElementwiseOperationParser>(OperationType::POW);
case kTfLiteBuiltinReduceMax:
return std::make_unique<ReduceOperationParser>(
OperationType::REDUCE_MAXIMUM);
case kTfLiteBuiltinReduceMin:
return std::make_unique<ReduceOperationParser>(
OperationType::REDUCE_MINIMUM);
case kTfLiteBuiltinReduceProd:
return std::make_unique<ReduceOperationParser>(
OperationType::REDUCE_PRODUCT);
case kTfLiteBuiltinQuantize:
if (allow_quant_ops) {
return std::make_unique<QuantizeOperationParser>();
}
break;
case kTfLiteBuiltinRelu:
return std::make_unique<ReLUOperationParser>(0, 0);
case kTfLiteBuiltinRelu6:
return std::make_unique<ReLUOperationParser>(0, 6);
case kTfLiteBuiltinReluN1To1:
return std::make_unique<ReLUOperationParser>(-1.0, 1.0);
case kTfLiteBuiltinLeakyRelu:
return std::make_unique<ReLUOperationParser>(0, 0);
case kTfLiteBuiltinPrelu:
return std::make_unique<PReLUOperationParser>();
case kTfLiteBuiltinReshape:
return std::make_unique<ReshapeOperationParser>();
case kTfLiteBuiltinResizeBilinear:
return std::make_unique<Resize2DOperationParser>(SamplingType::BILINEAR);
case kTfLiteBuiltinResizeNearestNeighbor:
return std::make_unique<Resize2DOperationParser>(SamplingType::NEAREST);
case kTfLiteBuiltinRsqrt:
return std::make_unique<ElementwiseOperationParser>(OperationType::RSQRT);
case kTfLiteBuiltinSelect:
case kTfLiteBuiltinSelectV2:
return std::make_unique<SelectV2OperationParser>();
case kTfLiteBuiltinSign:
return std::make_unique<ElementwiseOperationParser>(OperationType::SIGN);
case kTfLiteBuiltinSin:
return std::make_unique<ElementwiseOperationParser>(OperationType::SIN);
case kTfLiteBuiltinSlice:
return std::make_unique<SliceOperationParser>();
case kTfLiteBuiltinSoftmax:
return std::make_unique<SoftmaxOperationParser>();
case kTfLiteBuiltinSpaceToDepth:
return std::make_unique<SpaceToDepthOperationParser>();
case kTfLiteBuiltinSplit:
return std::make_unique<SplitOperationParser>();
case kTfLiteBuiltinSplitV:
return std::make_unique<SplitVOperationParser>();
case kTfLiteBuiltinSqrt:
return std::make_unique<ElementwiseOperationParser>(OperationType::SQRT);
case kTfLiteBuiltinSquare:
return std::make_unique<ElementwiseOperationParser>(
OperationType::SQUARE);
case kTfLiteBuiltinSquaredDifference:
return std::make_unique<ElementwiseOperationParser>(
OperationType::SQUARED_DIFF);
case kTfLiteBuiltinStridedSlice:
return std::make_unique<StridedSliceOperationParser>();
case kTfLiteBuiltinSub:
return std::make_unique<ElementwiseOperationParser>(OperationType::SUB);
case kTfLiteBuiltinSum:
return std::make_unique<ReduceOperationParser>(OperationType::REDUCE_SUM);
case kTfLiteBuiltinTanh:
return std::make_unique<ElementwiseOperationParser>(OperationType::TANH);
case kTfLiteBuiltinTile:
return std::make_unique<TileOperationParser>();
case kTfLiteBuiltinTranspose:
return std::make_unique<TransposeOperationParser>();
case kTfLiteBuiltinTransposeConv:
return std::make_unique<TransposeConvBuiltinOperationParser>();
case kTfLiteBuiltinUnpack:
return std::make_unique<UnpackOperationParser>();
case kTfLiteBuiltinCustom: {
const absl::string_view custom_name = registration->custom_name;
if (custom_name == "Convolution2DTransposeBias") {
return std::make_unique<TransposeConvCustomOperationParser>();
}
if (custom_name == "MaxPoolingWithArgmax2D") {
return std::make_unique<Pooling2DOperationParser>(PoolingType::MAX);
}
if (custom_name == "MaxUnpooling2D") {
return std::make_unique<Unpooling2DOperationParser>();
}
if (custom_name == "Resampler") {
return std::make_unique<ResamplerOperationParser>();
}
return NewCustomOperationParser(registration->custom_name);
}
}
return std::make_unique<UnsupportedOperationParser>();
}
TfLiteIntArray* GetOpsToReplace(
TfLiteContext* context, bool allow_quant_ops, int max_delegated_partitions,
const absl::flat_hash_set<TfLiteBuiltinOperator>* excluded_ops,
int start_node_index, int end_node_index) {
delegates::IsNodeSupportedFn node_supported_fn =
[=](TfLiteContext* context, TfLiteNode* node,
TfLiteRegistration* registration,
std::string* unsupported_details) -> bool {
const auto status =
IsSupported(context, node, registration, allow_quant_ops, excluded_ops);
if (!status.ok()) {
if (unsupported_details) {
*unsupported_details = std::string(status.message());
}
return false;
}
std::vector<TfLiteType> allowed_in_types = {kTfLiteFloat32, kTfLiteFloat16};
std::vector<TfLiteType> allowed_out_types = {kTfLiteFloat32,
kTfLiteFloat16};
if (allow_quant_ops) {
allowed_in_types.push_back(kTfLiteInt8);
allowed_in_types.push_back(kTfLiteUInt8);
allowed_out_types.push_back(kTfLiteInt8);
allowed_out_types.push_back(kTfLiteUInt8);
}
if (IsLogicalCode(registration->builtin_code)) {
allowed_out_types.push_back(kTfLiteBool);
}
if (registration->builtin_code == kTfLiteBuiltinCast) {
allowed_in_types.push_back(kTfLiteBool);
allowed_in_types.push_back(kTfLiteFloat32);
allowed_in_types.push_back(kTfLiteInt32);
allowed_out_types.push_back(kTfLiteFloat32);
allowed_out_types.push_back(kTfLiteInt32);
allowed_out_types.push_back(kTfLiteBool);
}
if (registration->builtin_code == kTfLiteBuiltinOneHot) {
allowed_in_types.push_back(kTfLiteInt32);
}
if (registration->builtin_code == kTfLiteBuiltinSelect ||
registration->builtin_code == kTfLiteBuiltinSelectV2) {
allowed_in_types.push_back(kTfLiteBool);
}
if (registration->builtin_code == kTfLiteBuiltinLogicalAnd) {
allowed_in_types.push_back(kTfLiteBool);
allowed_out_types.push_back(kTfLiteBool);
}
if (registration->builtin_code == kTfLiteBuiltinGather) {
allowed_in_types.push_back(kTfLiteInt32);
}
if (!IsAllAllowedTensors(context, node->inputs, allowed_in_types) ||
!IsAllAllowedTensors(context, node->outputs, allowed_out_types)) {
if (unsupported_details) {
*unsupported_details =
"OP is supported, but tensor type/shape isn't compatible.";
}
return false;
}
return true;
};
delegates::FP16GraphPartitionHelper partition_helper(context,
node_supported_fn);
std::set<std::string> unsupported_nodes_info;
#ifndef TFLITE_DEBUG_DELEGATE
auto res = partition_helper.Partition(&unsupported_nodes_info);
#else
auto res = partition_helper.Partition(&unsupported_nodes_info,
start_node_index, end_node_index);
#endif
if (res != kTfLiteOk) {
return TfLiteIntArrayCreate(0);
}
std::vector<int> ops_to_replace =
partition_helper.GetNodesOfFirstNLargestPartitions(
max_delegated_partitions);
if (!unsupported_nodes_info.empty() &&
partition_helper.num_total_nodes() > ops_to_replace.size()) {
std::string unsupported = absl::StrJoin(unsupported_nodes_info, "\n");
std::string error_message = absl::StrCat(
"Following operations are not supported by GPU delegate:\n",
unsupported, "\n");
if (!ops_to_replace.empty()) {
absl::StrAppend(
&error_message, ops_to_replace.size(),
" operations will run on the GPU, and the remaining ",
partition_helper.num_total_nodes() - ops_to_replace.size());
} else {
absl::StrAppend(&error_message,
"No operations will run on the GPU, and all ",
partition_helper.num_total_nodes());
}
absl::StrAppend(&error_message, " operations will run on the CPU.");
TF_LITE_KERNEL_LOG(context, error_message.c_str());
}
return ConvertVectorToTfLiteIntArray(ops_to_replace);
}
absl::Status PrecreateInputTensors(
TfLiteContext* context, GraphFloat32* graph,
const std::vector<int>& input_ids,
absl::flat_hash_map<int, int>* quant_conversion_map,
absl::flat_hash_map<int, Value*>* tensor_to_value) {
for (const auto& id : input_ids) {
const TfLiteTensor& tflite_tensor = context->tensors[id];
if (tflite::IsConstantTensor(&tflite_tensor)) continue;
RETURN_IF_ERROR(ObjectReader::ReadNonConstantTensor(
context, tensor_to_value, quant_conversion_map, graph, id));
}
return absl::OkStatus();
}
absl::Status PrecreateOutputTensors(
TfLiteContext* context, GraphFloat32* graph,
const std::vector<int>& output_ids,
absl::flat_hash_map<int, int>* quant_conversion_map,
absl::flat_hash_map<int, Value*>* tensor_to_value) {
for (const auto& id : output_ids) {
const TfLiteTensor& tflite_tensor = context->tensors[id];
if (tflite::IsConstantTensor(&tflite_tensor)) continue;
Value* value;
RETURN_IF_ERROR(ObjectReader::ReadNonConstantTensor(
context, tensor_to_value, quant_conversion_map, graph, id, &value));
graph->AddKnownGraphOutput(value);
}
return absl::OkStatus();
}
absl::Status CopyVariableTensorOutputs(
TfLiteNode* tflite_node, TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader& reader,
const absl::flat_hash_map<int, ValueId>& new_variable_tensor_values) {
absl::flat_hash_map<int, ValueId> new_variable_tensor_values_copy(
new_variable_tensor_values);
for (int i = 0; i < tflite_node->inputs->size; i++) {
int tensor_idx = tflite_node->inputs->data[i];
Value* value;
if (!reader.ReadValueByTensorIdx(tensor_idx, &value).ok()) continue;
if (value->tensor.is_variable_input) {
if (new_variable_tensor_values_copy.find(i) ==
new_variable_tensor_values_copy.end()) {
return absl::InvalidArgumentError(
absl::StrCat(GetOpNameByRegistration(*registration),
" did not provide a new value for the variable input "
"tensor with index ",
tensor_idx));
} else {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::COPY);
RETURN_IF_ERROR(graph->AddConsumer(
node->id, new_variable_tensor_values_copy.at(i)));
RETURN_IF_ERROR(reader.AddUpdate(node, i));
new_variable_tensor_values_copy.erase(
new_variable_tensor_values_copy.find(i));
}
}
}
if (!new_variable_tensor_values_copy.empty()) {
return absl::InvalidArgumentError(
"More input variable tensors asked to be copied than present on the "
"node");
}
return absl::OkStatus();
}
absl::Status BuildModel(TfLiteContext* context,
const TfLiteDelegateParams* delegate_params,
GraphFloat32* graph,
absl::flat_hash_map<int, int>* quant_conversion_map) {
std::vector<int> inputs(delegate_params->input_tensors->size);
std::vector<int> outputs(delegate_params->output_tensors->size);
for (int i = 0; i < delegate_params->input_tensors->size; i++) {
inputs[i] = delegate_params->input_tensors->data[i];
}
for (int i = 0; i < delegate_params->output_tensors->size; i++) {
outputs[i] = delegate_params->output_tensors->data[i];
}
return BuildModelEnforceIO(context, delegate_params, inputs, outputs, graph,
quant_conversion_map);
}
absl::Status BuildModelEnforceIO(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
const std::vector<int>& input_ids, const std::vector<int>& output_ids,
GraphFloat32* graph, absl::flat_hash_map<int, int>* quant_conversion_map) {
std::vector<std::unique_ptr<TFLiteOperationParser>> operations;
std::vector<int> tflite_nodes;
for (int i = 0; i < delegate_params->nodes_to_replace->size; ++i) {
TfLiteNode* tflite_node = nullptr;
TfLiteRegistration* registration = nullptr;
RETURN_IF_ERROR(GetNodeAndRegistration(
context, delegate_params->nodes_to_replace->data[i], &tflite_node,
®istration));
if (registration->builtin_code == kTfLiteBuiltinDequantize &&
context->tensors[tflite_node->inputs->data[0]].type ==
TfLiteType::kTfLiteFloat16 &&
context->tensors[tflite_node->inputs->data[0]].allocation_type ==
TfLiteAllocationType::kTfLiteMmapRo) {
continue;
}
auto op_parser = NewOperationParser(
registration, quant_conversion_map != nullptr);
if (!op_parser) {
return absl::UnimplementedError(
absl::StrCat("Operation ", registration->builtin_code, "(",
registration->custom_name,
") is not supported by TFLite GPU Delegate."));
}
operations.push_back(std::move(op_parser));
tflite_nodes.push_back(i);
}
absl::flat_hash_map<int, Value*> tensor_to_value;
std::vector<ValueId> variable_inputs_to_value_id;
RETURN_IF_ERROR(PrecreateInputTensors(
context, graph, input_ids, quant_conversion_map, &tensor_to_value));
RETURN_IF_ERROR(PrecreateOutputTensors(
context, graph, output_ids, quant_conversion_map, &tensor_to_value));
for (int i = 0; i < operations.size(); ++i) {
TfLiteNode* tflite_node;
TfLiteRegistration* registration;
RETURN_IF_ERROR(GetNodeAndRegistration(
context, delegate_params->nodes_to_replace->data[tflite_nodes[i]],
&tflite_node, ®istration));
ObjectReader reader(graph, context, tflite_node, &tensor_to_value,
quant_conversion_map);
const auto status =
operations[i]->Parse(tflite_node, registration, graph, &reader);
if (!status.ok()) {
return absl::InternalError(absl::StrCat(
GetOpNameByRegistration(*registration), ": ", status.message()));
}
absl::flat_hash_map<int, ValueId> new_value_for_variable_input_tensors =
operations[i]->GetNewValueIdsForVariableInputNodes();
RETURN_IF_ERROR(
CopyVariableTensorOutputs(tflite_node, registration, graph, reader,
new_value_for_variable_input_tensors));
}
for (auto value_id : variable_inputs_to_value_id) {
if (!graph->IsGraphOutput(value_id)) {
return absl::InvalidArgumentError(
absl::StrCat("Variable input tensors must be a graph output. Value ",
value_id, " is not a graph output"));
}
}
return absl::OkStatus();
}
absl::Status BuildFinalModel(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
GraphFloat32* graph, absl::flat_hash_map<int, int>* quant_conversion_map) {
RETURN_IF_ERROR(
BuildModel(context, delegate_params, graph, quant_conversion_map));
ModelTransformer transformer(graph);
if (!ApplyModelTransformations(&transformer)) {
return absl::InternalError("Graph transformations failed");
}
return absl::OkStatus();
}
namespace {
class DelegateContext {
public:
struct DelegateData {
std::vector<int> input_ids;
std::vector<int> output_ids;
GraphFloat32* graph;
std::unique_ptr<absl::flat_hash_map<int, int>> quant_conversion_map;
};
bool Init(TfLiteContext* context,
const TfLiteDelegateParams* delegate_params) {
const auto* delegate_data =
reinterpret_cast<DelegateData*>(delegate_params->delegate->data_);
return delegate_data->graph &&
BuildModelEnforceIO(context, delegate_params,
delegate_data->input_ids,
delegate_data->output_ids, delegate_data->graph,
delegate_data->quant_conversion_map.get())
.ok();
}
};
TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
TfLiteRegistration registration{};
registration.init = [](TfLiteContext* context, const char* buffer,
size_t) -> void* {
auto* delegate_context = new DelegateContext();
if (!delegate_context->Init(
context, reinterpret_cast<const TfLiteDelegateParams*>(buffer))) {
delete delegate_context;
return nullptr;
}
return delegate_context;
};
registration.free = [](TfLiteContext* context, void* buffer) -> void {
delete reinterpret_cast<DelegateContext*>(buffer);
};
registration.prepare = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
return node->user_data ? kTfLiteOk : kTfLiteError;
};
const auto* delegate_data =
reinterpret_cast<const DelegateContext::DelegateData*>(delegate->data_);
TfLiteIntArray* ops_to_replace = GetOpsToReplace(
context, static_cast<bool>(delegate_data->quant_conversion_map));
const auto status = context->ReplaceNodeSubsetsWithDelegateKernels(
context, registration, ops_to_replace, delegate);
TfLiteIntArrayFree(ops_to_replace);
return status;
}
}
absl::Status BuildFromFlatBuffer(const tflite::FlatBufferModel& flatbuffer,
const tflite::OpResolver& op_resolver,
GraphFloat32* graph, bool allow_quant_ops) {
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::InterpreterBuilder interpreter_builder(flatbuffer, op_resolver);
if (interpreter_builder(&interpreter) != kTfLiteOk || !interpreter) {
return absl::InternalError("Unable to prepare TfLite interpreter.");
}
TfLiteDelegate delegate;
DelegateContext::DelegateData delegate_data{interpreter->inputs(),
interpreter->outputs(), graph};
if (allow_quant_ops) {
delegate_data.quant_conversion_map =
std::make_unique<absl::flat_hash_map<int, int>>();
}
delegate.data_ = &delegate_data;
delegate.flags = kTfLiteDelegateFlagsNone;
delegate.Prepare = DelegatePrepare;
delegate.CopyFromBufferHandle = nullptr;
delegate.CopyToBufferHandle = nullptr;
delegate.FreeBufferHandle = nullptr;
if (interpreter->ModifyGraphWithDelegate(&delegate) != kTfLiteOk) {
return absl::InternalError("Conversion from TfLite model failed.");
}
ModelTransformer transformer(graph);
if (!ApplyModelTransformations(&transformer)) {
return absl::InternalError("Graph transformations failed");
}
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/model_builder.h"
#include <stddef.h>
#include <stdint.h>
#include <cstdlib>
#include <cstring>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_internal.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite {
namespace gpu {
namespace {
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank0) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteFloat32;
tflite_tensor.dims = TfLiteIntArrayCreate(1);
tflite_tensor.dims->data[0] = 4;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::FLOAT32);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 1, 1, 1));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank1) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteInt32;
tflite_tensor.dims = TfLiteIntArrayCreate(2);
tflite_tensor.dims->data[0] = 4;
tflite_tensor.dims->data[1] = 5;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::INT32);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 1, 1, 5));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank2) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteInt64;
tflite_tensor.dims = TfLiteIntArrayCreate(3);
tflite_tensor.dims->data[0] = 4;
tflite_tensor.dims->data[1] = 5;
tflite_tensor.dims->data[2] = 6;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::INT64);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 1, 5, 6));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank3) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteUInt8;
tflite_tensor.dims = TfLiteIntArrayCreate(4);
tflite_tensor.dims->data[0] = 4;
tflite_tensor.dims->data[1] = 5;
tflite_tensor.dims->data[2] = 6;
tflite_tensor.dims->data[3] = 7;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::UINT8);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 5, 6, 7));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefFailsForRankLT0) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteFloat32;
tflite_tensor.dims = TfLiteIntArrayCreate(0);
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_FALSE(status.ok());
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefFailsForRankGT3) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteFloat32;
tflite_tensor.dims = TfLiteIntArrayCreate(5);
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_FALSE(status.ok());
}
class DelegatedInterpreter {
public:
explicit DelegatedInterpreter(int num_nodes) {
exec_plan_ = TfLiteIntArrayCreate(num_nodes);
}
virtual ~DelegatedInterpreter() {
TfLiteIntArrayFree(exec_plan_);
for (auto params : delegate_params_) {
TfLiteIntArrayFree(params.nodes_to_replace);
TfLiteIntArrayFree(params.input_tensors);
TfLiteIntArrayFree(params.output_tensors);
}
}
TfLiteContext* context() { return interpreter_.primary_subgraph().context(); }
TfLiteNode* node(int index) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_registration =
interpreter_.primary_subgraph().node_and_registration(index);
return const_cast<TfLiteNode*>(&node_and_registration->first);
}
TfLiteRegistration* registration(int index) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_registration =
interpreter_.primary_subgraph().node_and_registration(index);
return const_cast<TfLiteRegistration*>(&node_and_registration->second);
}
TfLiteIntArray* exec_plan() {
const int num_nodes = exec_plan_->size;
TfLiteIntArray* new_array = TfLiteIntArrayCreate(num_nodes);
std::memcpy(new_array->data, exec_plan_->data, num_nodes * sizeof(int32_t));
TfLiteIntArrayFree(exec_plan_);
exec_plan_ = new_array;
return exec_plan_;
}
TfLiteDelegateParams* add_delegate_params() {
delegate_params_.push_back(TfLiteDelegateParams());
return &delegate_params_.back();
}
TfLiteDelegateParams* delegate_params() { return &delegate_params_.front(); }
int num_delegate_params() { return delegate_params_.size(); }
protected:
Interpreter interpreter_;
private:
TfLiteIntArray* exec_plan_ = nullptr;
std::vector<TfLiteDelegateParams> delegate_params_;
};
class InterpreterFp16 : public DelegatedInterpreter {
public:
explicit InterpreterFp16(TfLiteBuiltinOperator op,
bool const_dequantize_inputs = true)
: DelegatedInterpreter(3) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(5), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 1}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({4}), kTfLiteOk);
const TfLiteRegistration reg_dequant0 = {
nullptr, nullptr, nullptr, nullptr, nullptr, kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_dequant0),
kTfLiteOk);
const TfLiteRegistration reg_dequant1 = {
nullptr, nullptr, nullptr, nullptr, nullptr, kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{2}, {3}, nullptr,
0, nullptr,
®_dequant1),
kTfLiteOk);
const TfLiteRegistration reg_op0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
op};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 3}, {4}, nullptr,
0,
builtin_data,
®_op0),
kTfLiteOk);
const std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteFloat16, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat16, "t2", dims, quantization, false),
kTfLiteOk);
if (const_dequantize_inputs) {
auto* tensor0 = interpreter_.tensor(0);
auto* tensor2 = interpreter_.tensor(2);
tensor0->allocation_type = kTfLiteMmapRo;
tensor2->allocation_type = kTfLiteMmapRo;
}
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
}
};
InterpreterFp16* interpreter_fp16_add_op =
new InterpreterFp16(kTfLiteBuiltinAdd);
TEST(ModelBuilderTest, GetOpsToReplaceAcceptsFp16DequantizeNodes) {
TfLiteContext* context = interpreter_fp16_add_op->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_add_op->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_add_op->node(node_index);
*registration = interpreter_fp16_add_op->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
auto params = interpreter_fp16_add_op->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 2;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 4;
*partition_params_array = interpreter_fp16_add_op->delegate_params();
*num_partitions = interpreter_fp16_add_op->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 3);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, 2, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterFp16* interpreter_fp16_non_constant =
new InterpreterFp16(kTfLiteBuiltinAdd, false);
TEST(ModelBuilderTest, GetOpsToReplaceRejectsNonConstantFp16DequantizeNodes) {
TfLiteContext* context = interpreter_fp16_non_constant->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_non_constant->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_non_constant->node(node_index);
*registration = interpreter_fp16_non_constant->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
auto params = interpreter_fp16_non_constant->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 2;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 4;
*partition_params_array =
interpreter_fp16_non_constant->delegate_params();
*num_partitions = interpreter_fp16_non_constant->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 1);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, ops_to_replace->data[0], &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat32);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat32);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterFp16* interpreter_fp16_gt_op =
new InterpreterFp16(kTfLiteBuiltinGreater);
TEST(ModelBuilderTest, GetOpsToReplaceRejectsFp16DequantizeNodes) {
TfLiteContext* context = interpreter_fp16_gt_op->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_gt_op->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_gt_op->node(node_index);
*registration = interpreter_fp16_gt_op->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 0);
*partition_params_array = nullptr;
*num_partitions = 0;
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 0);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
const int kGreaterOpIndex = 2;
context->GetNodeAndRegistration(context, kGreaterOpIndex, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat32);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat32);
TfLiteIntArrayFree(ops_to_replace);
}
class InterpreterFp32 : public DelegatedInterpreter {
public:
InterpreterFp32() : DelegatedInterpreter(2) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(4), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 2}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({3}), kTfLiteOk);
const TfLiteRegistration reg_dequant0 = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_dequant0),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 2}, {3}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteUInt8, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat32, "t2", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
}
};
InterpreterFp32* interpreter_fp32 = new InterpreterFp32();
TEST(ModelBuilderTest, GetOpsToReplaceDoesNotPruneUint8) {
TfLiteContext* context = interpreter_fp32->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp32->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp32->node(node_index);
*registration = interpreter_fp32->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
auto params = interpreter_fp32->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 1;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 2;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 3;
*partition_params_array = interpreter_fp32->delegate_params();
*num_partitions = interpreter_fp32->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 1);
EXPECT_EQ(1, ops_to_replace->data[0]);
TfLiteIntArrayFree(ops_to_replace);
}
class Interpreter2Fp32 : public DelegatedInterpreter {
public:
Interpreter2Fp32() : DelegatedInterpreter(4) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(8), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 2, 4, 6}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({7}), kTfLiteOk);
const TfLiteRegistration reg_dequant = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_dequant),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 2}, {3}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const TfLiteRegistration reg_pack = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinPack};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{3, 4}, {5}, nullptr,
0, nullptr,
®_pack),
kTfLiteOk);
const TfLiteRegistration reg_add1 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int[2]);
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{5, 6}, {7}, nullptr,
0,
builtin_data,
®_add1),
kTfLiteOk);
std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteUInt8, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat32, "t2", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
4, TfLiteType::kTfLiteFloat32, "t4", dims, quantization, false),
kTfLiteOk);
dims.push_back(2);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
5, TfLiteType::kTfLiteFloat32, "t5", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
6, TfLiteType::kTfLiteFloat32, "t6", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
exec_plan()->data[3] = 3;
}
};
Interpreter2Fp32* interpreter2_fp32 = new Interpreter2Fp32();
TEST(ModelBuilderTest, GetOpsToReplaceMultiplePartitions) {
TfLiteContext* context = interpreter2_fp32->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter2_fp32->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter2_fp32->node(node_index);
*registration = interpreter2_fp32->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
auto params = interpreter2_fp32->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 1;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 2;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 3;
params = interpreter2_fp32->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 3;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 5;
params->input_tensors->data[1] = 6;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 7;
*partition_params_array = interpreter2_fp32->delegate_params();
*num_partitions = interpreter2_fp32->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(
context, false, 2);
ASSERT_EQ(ops_to_replace->size, 2);
EXPECT_THAT(absl::MakeConstSpan(ops_to_replace->data, 2),
testing::UnorderedElementsAre(1, 3));
TfLiteIntArrayFree(ops_to_replace);
}
class InterpreterMultiNode : public DelegatedInterpreter {
public:
explicit InterpreterMultiNode(bool both_ops_supported = true)
: DelegatedInterpreter(5) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(8), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 1, 2}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({6, 7}), kTfLiteOk);
for (int i = 0; i < 3; ++i) {
const TfLiteRegistration reg_dequant = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{i}, {i + 3}, nullptr,
0, nullptr,
®_dequant),
kTfLiteOk);
}
if (both_ops_supported) {
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{4, 5}, {7}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const TfLiteRegistration reg_add1 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{3, 4}, {6}, nullptr,
0,
builtin_data,
®_add1),
kTfLiteOk);
} else {
const TfLiteRegistration reg_greater = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinGreater};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{3, 4}, {6}, nullptr,
0,
builtin_data,
®_greater),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{4, 5}, {7}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
}
const std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteFloat16, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat16, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat16, "t2", dims, quantization, false),
kTfLiteOk);
auto* tensor0 = interpreter_.tensor(0);
auto* tensor1 = interpreter_.tensor(1);
auto* tensor2 = interpreter_.tensor(2);
tensor0->allocation_type = kTfLiteMmapRo;
tensor1->allocation_type = kTfLiteMmapRo;
tensor2->allocation_type = kTfLiteMmapRo;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
4, TfLiteType::kTfLiteFloat32, "t4", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
5, TfLiteType::kTfLiteFloat32, "t5", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
6, TfLiteType::kTfLiteFloat32, "t5", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
7, TfLiteType::kTfLiteFloat32, "t5", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
exec_plan()->data[3] = 3;
exec_plan()->data[4] = 4;
}
};
InterpreterMultiNode* interpreter_mn =
new InterpreterMultiNode( false);
TEST(ModelBuilderTest,
GetOpsToReplaceSelectsCorrectFp16Nodes_SingleDelegatedPartition) {
TfLiteContext* context = interpreter_mn->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_mn->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_mn->node(node_index);
*registration = interpreter_mn->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
EXPECT_EQ(nodes_to_replace->data[0], 4);
auto params = interpreter_mn->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 4;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 7;
*partition_params_array = interpreter_mn->delegate_params();
*num_partitions = interpreter_mn->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 1);
EXPECT_EQ(ops_to_replace->data[0], 4);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, ops_to_replace->data[0], &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterMultiNode* interpreter_mn2 =
new InterpreterMultiNode( true);
TEST(ModelBuilderTest,
GetOpsToReplaceSelectsCorrectFp16Nodes_MultipleDelegatedPartitions) {
TfLiteContext* context = interpreter_mn2->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_mn2->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_mn2->node(node_index);
*registration = interpreter_mn2->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 2);
EXPECT_EQ(nodes_to_replace->data[0], 3);
EXPECT_EQ(nodes_to_replace->data[1], 4);
auto params = interpreter_mn2->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 3;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 3;
params->input_tensors->data[1] = 4;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 6;
params = interpreter_mn2->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 4;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 4;
params->input_tensors->data[1] = 5;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 7;
*partition_params_array = interpreter_mn2->delegate_params();
*num_partitions = interpreter_mn2->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(
context, false, 2);
EXPECT_EQ(ops_to_replace->size, 5);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, 3, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
context->GetNodeAndRegistration(context, 4, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
TfLiteIntArrayFree(ops_to_replace);
}
class InterpreterQuantized : public DelegatedInterpreter {
public:
InterpreterQuantized() : DelegatedInterpreter(4) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(6), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 3}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({5}), kTfLiteOk);
const TfLiteRegistration reg_quant0 = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinQuantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_quant0),
kTfLiteOk);
const TfLiteRegistration reg_quant1 = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinQuantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{3}, {2}, nullptr,
0, nullptr,
®_quant1),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 2}, {4}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const TfLiteRegistration reg_dequant0 = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{4}, {5}, nullptr,
0, nullptr,
®_dequant0),
kTfLiteOk);
const std::vector<int> dims = {1, 3, 3, 2};
TfLiteQuantization no_quantization;
no_quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteFloat32, "t0", dims, no_quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, no_quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
5, TfLiteType::kTfLiteFloat32, "t5", dims, no_quantization, false),
kTfLiteOk);
float scale = 0.5f;
int32_t zero_point = 12;
TfLiteQuantization rw_quantization;
rw_quantization.type = kTfLiteAffineQuantization;
auto* rw_affine_quantization = static_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
rw_affine_quantization->scale = TfLiteFloatArrayCreate(1);
rw_affine_quantization->zero_point = TfLiteIntArrayCreate(1);
rw_affine_quantization->scale->data[0] = scale;
rw_affine_quantization->zero_point->data[0] = zero_point;
rw_quantization.params = rw_affine_quantization;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteInt8, "t1", dims, rw_quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteInt8, "t2", dims, rw_quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
4, TfLiteType::kTfLiteInt8, "t4", dims, rw_quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
exec_plan()->data[3] = 3;
}
};
InterpreterQuantized* interpreter_quant = new InterpreterQuantized();
TEST(ModelBuilderTest, GetOpsToReplace_AllowQuantOps) {
TfLiteContext* context = interpreter_quant->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_quant->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_quant->node(node_index);
*registration = interpreter_quant->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
if (nodes_to_replace->size == 0) {
*num_partitions = 0;
return kTfLiteOk;
} else if (nodes_to_replace->size == 4) {
auto params = interpreter_quant->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(4);
params->nodes_to_replace->data[0] = 0;
params->nodes_to_replace->data[1] = 1;
params->nodes_to_replace->data[2] = 2;
params->nodes_to_replace->data[2] = 3;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 0;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 5;
*partition_params_array = interpreter_quant->delegate_params();
*num_partitions = interpreter_quant->num_delegate_params();
return kTfLiteOk;
} else {
return kTfLiteError;
}
};
TfLiteIntArray* ops_to_replace =
GetOpsToReplace(context, true);
EXPECT_EQ(ops_to_replace->size, 4);
TfLiteIntArray* ops_to_replace_without_quant =
GetOpsToReplace(context, false);
EXPECT_EQ(ops_to_replace_without_quant->size, 0);
TfLiteIntArrayFree(ops_to_replace);
TfLiteIntArrayFree(ops_to_replace_without_quant);
}
InterpreterFp16* interpreter_fp16_split_op =
new InterpreterFp16(kTfLiteBuiltinSplit);
TEST(ModelBuilderTest, GetOpsToReplaceAcceptsSplitOpCl) {
TfLiteContext* context = interpreter_fp16_split_op->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_split_op->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_split_op->node(node_index);
*registration = interpreter_fp16_split_op->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
auto params = interpreter_fp16_split_op->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 2;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 4;
*partition_params_array = interpreter_fp16_split_op->delegate_params();
*num_partitions = interpreter_fp16_split_op->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 3);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, 2, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterFp16* interpreter_fp16_split_op2 =
new InterpreterFp16(kTfLiteBuiltinSplit);
TEST(ModelBuilderTest, GetOpsToReplaceRejectsSplitOpGl) {
TfLiteContext* context = interpreter_fp16_split_op2->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_split_op2->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_split_op2->node(node_index);
*registration = interpreter_fp16_split_op2->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 0);
*partition_params_array = nullptr;
*num_partitions = 0;
return kTfLiteOk;
};
absl::flat_hash_set<TfLiteBuiltinOperator> excluded_ops = {
kTfLiteBuiltinSplit};
TfLiteIntArray* ops_to_replace =
GetOpsToReplace(context, false,
1, &excluded_ops);
EXPECT_EQ(ops_to_replace->size, 0);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, 2, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat32);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat32);
TfLiteIntArrayFree(ops_to_replace);
}
class StubTfLiteContext : public TfLiteContext {
public:
StubTfLiteContext(const int builtin_code, const int op_version,
const int num_inputs)
: TfLiteContext({0}) {
exec_plan_ = TfLiteIntArrayCreate(3);
for (int i = 0; i < 3; ++i) exec_plan_->data[i] = i;
int tensor_no = 0;
std::memset(nodes_, 0, sizeof(nodes_));
std::memset(registrations_, 0, sizeof(registrations_));
nodes_[0].inputs = TfLiteIntArrayCreate(1);
nodes_[0].inputs->data[0] = tensor_no++;
nodes_[0].outputs = TfLiteIntArrayCreate(1);
nodes_[0].outputs->data[0] = tensor_no;
nodes_[0].builtin_data = nullptr;
nodes_[1].inputs = TfLiteIntArrayCreate(num_inputs);
for (int i = 0; i < num_inputs; i++) {
nodes_[1].inputs->data[i] = tensor_no++;
}
nodes_[1].outputs = TfLiteIntArrayCreate(1);
nodes_[1].outputs->data[0] = tensor_no;
nodes_[1].builtin_data = malloc(1024);
std::memset(nodes_[1].builtin_data, 0, 1024);
nodes_[2].inputs = TfLiteIntArrayCreate(1);
nodes_[2].inputs->data[0] = tensor_no++;
nodes_[2].outputs = TfLiteIntArrayCreate(1);
nodes_[2].outputs->data[0] = tensor_no++;
nodes_[2].builtin_data = nullptr;
tensors_.resize(tensor_no);
for (size_t i = 0; i < tensors_.size(); i++) {
std::memset(&tensors_[i], 0, sizeof(tensors_[i]));
tensors_[i].buffer_handle = kTfLiteNullBufferHandle;
tensors_[i].type = kTfLiteFloat32;
tensors_[i].dims = TfLiteIntArrayCreate(4);
for (int d = 0; d < 4; d++) {
tensors_[i].dims->data[d] = 1;
}
}
tensors = tensors_.data();
tensors_size = tensors_.size();
registrations_[0].builtin_code = kTfLiteBuiltinAdd;
registrations_[1].builtin_code = builtin_code;
registrations_[1].version = op_version;
registrations_[2].builtin_code = kTfLiteBuiltinAdd;
this->GetExecutionPlan = StubGetExecutionPlan;
this->GetNodeAndRegistration = StubGetNodeAndRegistration;
}
~StubTfLiteContext() {
for (auto& node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
if (node.builtin_data) {
free(node.builtin_data);
}
}
for (auto& tensor : tensors_) {
TfLiteIntArrayFree(tensor.dims);
}
TfLiteIntArrayFree(exec_plan_);
}
TfLiteIntArray* exec_plan() const { return exec_plan_; }
TfLiteNode* node() { return &nodes_[1]; }
TfLiteRegistration* registration() { return ®istrations_[1]; }
TfLiteNode* node(int node_index) { return &nodes_[node_index]; }
TfLiteRegistration* registration(int reg_index) {
return ®istrations_[reg_index];
}
TfLiteTensor* tensor(int tensor_index) { return &tensors_[tensor_index]; }
private:
static TfLiteStatus StubGetExecutionPlan(TfLiteContext* context,
TfLiteIntArray** execution_plan) {
StubTfLiteContext* stub = reinterpret_cast<StubTfLiteContext*>(context);
*execution_plan = stub->exec_plan();
return kTfLiteOk;
}
static TfLiteStatus StubGetNodeAndRegistration(
TfLiteContext* context, int node_index, TfLiteNode** node,
TfLiteRegistration** registration) {
StubTfLiteContext* stub = reinterpret_cast<StubTfLiteContext*>(context);
*node = stub->node(node_index);
*registration = stub->registration(node_index);
return kTfLiteOk;
}
TfLiteIntArray* exec_plan_;
TfLiteNode nodes_[3];
TfLiteRegistration registrations_[3];
std::vector<TfLiteTensor> tensors_;
};
TEST(AddOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAdd,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAdd,
2,
2);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(BatchMatMulOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinBatchMatmul,
1,
3);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinBatchMatmul,
1,
2);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(CastOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCast,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCast,
1,
1);
context->tensor(1)->type = kTfLiteFloat32;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt32;
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt8;
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteBool;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->registration(0)->builtin_code = kTfLiteBuiltinGreater;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ClampOperationsParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReluN1To1,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ConcatenationOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinConcatenation,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinConcatenation,
2,
2);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(Conv2DOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinConv2d,
6,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinConv2d,
5,
2);
TfLiteConvParams* tf_options =
static_cast<TfLiteConvParams*>(context->node()->builtin_data);
tf_options->stride_width = 0;
tf_options->stride_height = 0;
tf_options->dilation_width_factor = 0;
tf_options->dilation_height_factor = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 0;
tf_options->dilation_height_factor = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->activation = kTfLiteActRelu;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(DensifyOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDensify,
2,
0);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDensify,
1,
0);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(DepthwiseConvolutionOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDepthwiseConv2d,
7,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDepthwiseConv2d,
6,
2);
TfLiteDepthwiseConvParams* tf_options =
static_cast<TfLiteDepthwiseConvParams*>(context->node()->builtin_data);
tf_options->stride_width = 0;
tf_options->stride_height = 0;
tf_options->dilation_width_factor = 0;
tf_options->dilation_height_factor = 0;
tf_options->depth_multiplier = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 0;
tf_options->dilation_height_factor = 0;
tf_options->depth_multiplier = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->depth_multiplier = 1;
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->depth_multiplier = 0;
tf_options->activation = kTfLiteActRelu;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->depth_multiplier = 1;
tf_options->activation = kTfLiteActRelu;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(DepthToSpaceOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDepthToSpace,
1,
0);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDepthToSpace,
1,
1);
TfLiteDepthToSpaceParams* d2s_params =
static_cast<TfLiteDepthToSpaceParams*>(context->node()->builtin_data);
d2s_params->block_size = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
d2s_params->block_size = 2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(DequantizeOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDequantize,
4,
1);
auto parser =
NewOperationParser(context->registration(), true);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDequantize,
1,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDequantize,
1,
1);
context->tensor(1)->type = kTfLiteInt16;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt8;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(LogicalElementwiseOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinEqual,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinEqual,
2,
1);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->registration(2)->builtin_code = kTfLiteBuiltinCast;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->registration(2)->builtin_code = kTfLiteBuiltinSelect;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->registration(2)->builtin_code = kTfLiteBuiltinSelectV2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ArithmeticUnaryElementwiseOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAbs,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAbs,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ArithmeticBinaryElementwiseOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDiv,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDiv,
2,
2);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(FullyConnectedOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinFullyConnected,
10,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinFullyConnected,
9,
3);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinFullyConnected,
9,
2);
TfLiteFullyConnectedParams* tf_options =
static_cast<TfLiteFullyConnectedParams*>(context->node()->builtin_data);
tf_options->weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
tf_options->keep_num_dims = true;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->dims->size = 3;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(GatherOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
3);
context->tensor(2)->dims->size = 1;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
2);
context->tensor(2)->dims->size = 2;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
2);
context->tensor(2)->dims->size = 1;
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
2);
context->tensor(2)->dims->size = 1;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
2);
context->tensor(2)->dims->size = 1;
context->tensor(2)->type = kTfLiteInt32;
context->tensor(2)->allocation_type = kTfLiteMmapRo;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(HardSwishOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinHardSwish,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinHardSwish,
1,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(LSTMOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLstm,
5,
24);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLstm,
1,
1);
TfLiteLSTMParams* tf_options =
static_cast<TfLiteLSTMParams*>(context->node()->builtin_data);
tf_options->kernel_type = kTfLiteLSTMFullKernel;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLstm,
1,
24);
tf_options = static_cast<TfLiteLSTMParams*>(context->node()->builtin_data);
tf_options->kernel_type = kTfLiteLSTMFullKernel;
tf_options->activation = kTfLiteActRelu;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->activation = kTfLiteActSigmoid;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(MulOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMul,
4,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMul,
3,
2);
TfLiteMulParams* tf_options =
static_cast<TfLiteMulParams*>(context->node()->builtin_data);
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->activation = kTfLiteActSigmoid;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->dims->data[0] = 256;
context->tensor(2)->dims->data[1] = 256;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(PackOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPack,
1,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(PReLUOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPrelu,
2,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPrelu,
1,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(PadOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPad,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPad,
2,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPad,
2,
2);
context->tensor(2)->allocation_type = kTfLiteMmapRo;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->dims->size = 2;
context->tensor(2)->dims->data[0] = 4;
context->tensor(2)->dims->data[1] = 2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->dims->size = 2;
context->tensor(2)->dims->data[0] = 4;
context->tensor(2)->dims->data[1] = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(MirrorPadOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMirrorPad,
3,
1);
TfLiteMirrorPaddingParams* tf_options =
static_cast<TfLiteMirrorPaddingParams*>(context->node()->builtin_data);
tf_options->mode = kTfLiteMirrorPaddingSymmetric;
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMirrorPad,
3,
1);
tf_options =
static_cast<TfLiteMirrorPaddingParams*>(context->node()->builtin_data);
tf_options->mode = kTfLiteMirrorPaddingReflect;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMirrorPad,
2,
2);
tf_options =
static_cast<TfLiteMirrorPaddingParams*>(context->node()->builtin_data);
tf_options->mode = kTfLiteMirrorPaddingReflect;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMirrorPad,
2,
2);
tf_options =
static_cast<TfLiteMirrorPaddingParams*>(context->node()->builtin_data);
tf_options->mode = kTfLiteMirrorPaddingReflect;
context->tensor(2)->allocation_type = kTfLiteMmapRo;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->dims->size = 2;
context->tensor(2)->dims->data[0] = 4;
context->tensor(2)->dims->data[1] = 2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->dims->size = 2;
context->tensor(2)->dims->data[0] = 4;
context->tensor(2)->dims->data[1] = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(AveragePooling2DOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAveragePool2d,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAveragePool2d,
2,
1);
TfLitePoolParams* tf_options =
static_cast<TfLitePoolParams*>(context->node()->builtin_data);
tf_options->filter_height = 0;
tf_options->filter_width = 0;
tf_options->stride_width = 0;
tf_options->stride_height = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 0;
tf_options->filter_width = 0;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 1;
tf_options->filter_width = 1;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 1;
tf_options->filter_width = 1;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->activation = kTfLiteActTanh;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(MaxPooling2DOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMaxPool2d,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMaxPool2d,
2,
1);
TfLitePoolParams* tf_options =
static_cast<TfLitePoolParams*>(context->node()->builtin_data);
tf_options->filter_height = 0;
tf_options->filter_width = 0;
tf_options->stride_width = 0;
tf_options->stride_height = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 0;
tf_options->filter_width = 0;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 1;
tf_options->filter_width = 1;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 1;
tf_options->filter_width = 1;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->activation = kTfLiteActTanh;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(CustomMaxPooling2DOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
2,
1);
context->registration()->custom_name = "MaxPoolingWithArgmax2D";
TfLitePoolParams tf_options;
context->node()->custom_initial_data = &tf_options;
TfLiteIntArrayFree(context->node()->outputs);
context->node()->outputs = TfLiteIntArrayCreate(2);
context->node()->outputs->data[0] = 2;
context->node()->outputs->data[1] = 3;
auto parser = NewOperationParser(context->registration());
tf_options.filter_height = 0;
tf_options.filter_width = 0;
tf_options.stride_width = 0;
tf_options.stride_height = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 0;
tf_options.filter_width = 0;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 1;
tf_options.filter_width = 1;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
tf_options.activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 1;
tf_options.filter_width = 1;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
tf_options.activation = kTfLiteActTanh;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReduceMaxOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReduceMax,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReduceMinOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReduceMin,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReduceProductOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReduceProd,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(QuantizeOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinQuantize,
3,
1);
auto parser =
NewOperationParser(context->registration(), true);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinQuantize,
2,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinQuantize,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReLUOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinRelu,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinRelu,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReLU6OperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinRelu6,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinRelu6,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(LeakyReLUOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLeakyRelu,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLeakyRelu,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ResamplerOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
1);
context->registration()->custom_name = "Resampler";
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "Resampler";
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReshapeOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReshape,
2,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReshape,
1,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReshape,
1,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(Resize2DBilinearOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeBilinear,
4,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeBilinear,
3,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeBilinear,
3,
1);
TfLiteResizeBilinearParams* tf_options =
static_cast<TfLiteResizeBilinearParams*>(context->node()->builtin_data);
tf_options->half_pixel_centers = true;
tf_options->align_corners = true;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = true;
tf_options->align_corners = false;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = false;
tf_options->align_corners = true;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = false;
tf_options->align_corners = false;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(Resize2DNearestNeighborOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeNearestNeighbor,
4,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeNearestNeighbor,
3,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeNearestNeighbor,
3,
1);
TfLiteResizeNearestNeighborParams* tf_options =
static_cast<TfLiteResizeNearestNeighborParams*>(
context->node()->builtin_data);
tf_options->half_pixel_centers = true;
tf_options->align_corners = true;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = true;
tf_options->align_corners = false;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = false;
tf_options->align_corners = true;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = false;
tf_options->align_corners = false;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SliceOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSlice,
3,
3);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSlice,
2,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSlice,
2,
3);
context->tensor(1)->dims->size = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSlice,
2,
3);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SoftmaxOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSoftmax,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSoftmax,
2,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSoftmax,
2,
1);
TfLiteSoftmaxParams* tf_options =
static_cast<TfLiteSoftmaxParams*>(context->node()->builtin_data);
tf_options->beta = 2;
tf_options->beta = 1;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SplitOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSplit,
1,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SplitVOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSplitV,
1,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(StridedSliceOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinStridedSlice,
5,
4);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinStridedSlice,
4,
3);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinStridedSlice,
4,
4);
context->tensor(1)->dims->size = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinStridedSlice,
4,
5);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(TileOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTile,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTile,
1,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(TransposeConvBuiltinOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTransposeConv,
4,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTransposeConv,
3,
3);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTransposeConv,
3,
2);
TfLiteTransposeConvParams* tf_options =
static_cast<TfLiteTransposeConvParams*>(context->node()->builtin_data);
tf_options->stride_width = 0;
tf_options->stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(TransposeConvCustomOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
1);
context->registration()->custom_name = "Convolution2DTransposeBias";
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "Convolution2DTransposeBias";
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "Convolution2DTransposeBias";
TfLiteTransposeConvParams tf_options;
context->node()->custom_initial_data = &tf_options;
tf_options.stride_width = 0;
tf_options.stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.stride_width = 1;
tf_options.stride_height = 1;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(TransposeOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTranspose,
5,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTranspose,
4,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTranspose,
4,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(Unpooling2DOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
1);
context->registration()->custom_name = "MaxUnpooling2D";
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "MaxUnpooling2D";
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "MaxUnpooling2D";
TfLitePoolParams tf_options;
context->node()->custom_initial_data = &tf_options;
tf_options.filter_height = 0;
tf_options.filter_width = 0;
tf_options.stride_width = 0;
tf_options.stride_height = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 0;
tf_options.filter_width = 1;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 1;
tf_options.filter_width = 1;
tf_options.stride_width = 0;
tf_options.stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 1;
tf_options.filter_width = 1;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(MeanOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMean,
1,
3);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMean,
1,
2);
context->tensor(2)->allocation_type = kTfLiteArenaRw;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMean,
1,
2);
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMean,
1,
2);
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(CumsumOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCumsum,
1,
3);
context->tensor(2)->type = kTfLiteFloat32;
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCumsum,
1,
2);
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt32;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteFloat32;
context->tensor(2)->type = kTfLiteInt32;
context->tensor(2)->allocation_type = kTfLiteMmapRo;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(OneHotOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinOneHot,
1,
4);
auto parser = NewOperationParser(context->registration());
auto status = parser->IsSupported(context.get(), context->node(),
context->registration());
context->tensor(1)->dims->data[1] = 2;
context->tensor(1)->dims->data[2] = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
auto* params =
reinterpret_cast<TfLiteOneHotParams*>(malloc(sizeof(TfLiteOneHotParams)));
params->axis = -1;
if (context->node(1)->builtin_data) {
free(context->node(1)->builtin_data);
}
context->node(1)->builtin_data = params;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->dims->data[1] = 1;
context->tensor(1)->dims->data[2] = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(3)->dims);
context->tensor(3)->dims = TfLiteIntArrayCreate(1);
context->tensor(3)->dims->data[0] = 1;
context->tensor(3)->allocation_type = kTfLiteMmapRo;
TfLiteIntArrayFree(context->tensor(4)->dims);
context->tensor(4)->dims = TfLiteIntArrayCreate(1);
context->tensor(4)->dims->data[0] = 1;
context->tensor(4)->allocation_type = kTfLiteMmapRo;
params->axis =
context->tensor(1)->dims->data[context->tensor(1)->dims->size - 1];
context->node(1)->builtin_data = params;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->dims->data[0] = 2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SelectV2OperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSelectV2,
1,
3);
auto parser = NewOperationParser(context->registration());
auto status = parser->IsSupported(context.get(), context->node(),
context->registration());
context->tensor(1)->dims->data[0] = 1;
context->tensor(1)->dims->data[1] = 2;
context->tensor(1)->dims->data[2] = 1;
context->tensor(1)->dims->data[3] = 4;
context->tensor(4)->dims->data[0] = 1;
context->tensor(4)->dims->data[1] = 2;
context->tensor(4)->dims->data[2] = 3;
context->tensor(4)->dims->data[3] = 4;
context->tensor(1)->type = kTfLiteInt32;
context->tensor(2)->type = kTfLiteInt32;
context->tensor(3)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(3)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(2)->dims);
context->tensor(2)->dims = TfLiteIntArrayCreate(2);
context->tensor(2)->dims->data[0] = 2;
context->tensor(2)->dims->data[1] = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(2)->dims);
context->tensor(2)->dims = TfLiteIntArrayCreate(1);
context->tensor(2)->dims->data[0] = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(3)->dims);
context->tensor(3)->dims = TfLiteIntArrayCreate(2);
context->tensor(3)->dims->data[0] = 2;
context->tensor(3)->dims->data[1] = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(3)->dims);
context->tensor(3)->dims = TfLiteIntArrayCreate(4);
for (int i = 0; i < context->tensor(4)->dims->size; ++i) {
context->tensor(3)->dims->data[i] = context->tensor(4)->dims->data[i];
}
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37dadc8c-122b-4df4-a56e-0b8662e07769 | cpp | tensorflow/tensorflow | convolution_transposed_3x3 | tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3.cc | tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
ConvolutionTransposed3x3::ConvolutionTransposed3x3(
const OperationDef& definition, const GpuInfo& gpu_info, int2 padding)
: GPUOperation(definition), padding_(padding) {
work_group_size_ = int3(8, 4, 1);
work_group_launch_order_ = int3(2, 0, 1);
if (gpu_info.IsApple()) {
if (gpu_info.apple_info.IsBionic()) {
weights_upload_type_ = WeightsUploadType::GLOBAL_MEM;
} else {
weights_upload_type_ = WeightsUploadType::LOCAL_MEM_BY_THREADS;
}
} else if (gpu_info.IsPowerVR()) {
weights_upload_type_ = WeightsUploadType::LOCAL_MEM_ASYNC;
} else if (gpu_info.IsNvidia() || gpu_info.IsIntel()) {
weights_upload_type_ = WeightsUploadType::LOCAL_MEM_BY_THREADS;
} else if (gpu_info.IsAMD()) {
weights_upload_type_ = WeightsUploadType::CONSTANT_MEM;
} else {
weights_upload_type_ = WeightsUploadType::GLOBAL_MEM;
}
if (gpu_info.IsApple()) {
weights_layout_ = WeightsLayout::kOICustomSpatialO4I4;
} else {
weights_layout_ = WeightsLayout::kOICustomSpatialI4O4;
}
code_ = GenerateConvolutionTransposedCode(gpu_info, definition_,
weights_upload_type_, padding_,
work_group_launch_order_);
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsPowerVR()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
}
std::string ConvolutionTransposed3x3::GenerateConvolutionTransposedCode(
const GpuInfo& gpu_info, const OperationDef& op_def,
ConvolutionTransposed3x3::WeightsUploadType weights_upload_type,
int2 padding, int3 work_group_launch_order) {
auto src_desc = op_def.src_tensors[0];
AddSrcTensor("src_tensor", src_desc);
AddDstTensor("dst_tensor", op_def.src_tensors[0]);
if (op_def.src_tensors.size() == 2) {
BufferDescriptor desc;
desc.element_type = op_def.src_tensors[1].GetDataType();
desc.element_size = 4;
desc.memory_type =
weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
AddSrcBuffer("weights", desc);
}
args_.AddInt("filter_offset");
args_.AddInt("padding_x");
args_.AddInt("padding_y");
const bool need_local_mem =
weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_BY_THREADS ||
weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_ASYNC;
std::string c;
if (GetWeightsDescription().IsI4O4()) {
switch (op_def.precision) {
case CalculationsPrecision::F32:
case CalculationsPrecision::F16:
c += "#define CONV(R, SRC, F) \\\n";
c += " R += SRC.x * weights_cache[F]; \\\n";
c += " R += SRC.y * weights_cache[F + 1]; \\\n";
c += " R += SRC.z * weights_cache[F + 2]; \\\n";
c += " R += SRC.w * weights_cache[F + 3]; \n";
break;
case CalculationsPrecision::F32_F16:
c += "#define CONV(R, SRC, F) \\\n";
c += " R += TO_ACCUM_TYPE(SRC.x * weights_cache[F] + SRC.y * "
"weights_cache[F + 1] + SRC.z * weights_cache[F + 2] + SRC.w * "
"weights_cache[F + 3]);\n";
break;
}
} else {
c += "#define CONV(R, SRC, F) \\\n";
c += " R.x += dot(SRC, weights_cache[F]); \\\n";
c += " R.y += dot(SRC, weights_cache[F + 1]); \\\n";
c += " R.z += dot(SRC, weights_cache[F + 2]); \\\n";
c += " R.w += dot(SRC, weights_cache[F + 3]); \n";
}
const int wg_total_size =
work_group_size_.x * work_group_size_.y * work_group_size_.z;
const std::string barrier =
wg_total_size == 32 && gpu_info.IsWaveSizeEqualTo32()
? "SIMD_LOCAL_MEM_BARRIER"
: "LOCAL_MEM_BARRIER";
const std::string weights_space =
weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::CONSTANT_MEM
? "__constant"
: "__global";
if (gpu_info.IsApiOpenCl()) {
c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n";
}
c += "MAIN_FUNCTION($0) {\n";
int3 launch_remap;
launch_remap[work_group_launch_order.x] = 0;
launch_remap[work_group_launch_order.y] = 1;
launch_remap[work_group_launch_order.z] = 2;
auto GetGlobalID = [&](int id) {
std::string result;
const std::string sid = std::to_string(id);
if (work_group_launch_order[id] == id) {
return "GLOBAL_ID_" + sid;
} else {
return "GROUP_ID_" + std::to_string(launch_remap[id]) + " * GROUP_SIZE_" +
sid + " + LOCAL_ID_" + sid;
}
};
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = " + GetGlobalID(0) + ";\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = " + GetGlobalID(0) + ";\n";
}
c += " int DST_X = X * 2;\n";
c += " int SRC_X = X + args.padding_x;\n";
c += " int Y = " + GetGlobalID(1) + ";\n";
c += " int DST_Y = Y * 2;\n";
c += " int SRC_Y = Y + args.padding_y;\n";
c += " int Z = " + GetGlobalID(2) + ";\n";
if (!need_local_mem) {
c += " if (DST_X >= args.dst_tensor.Width() || DST_Y >= "
"args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) return;\n";
}
c += " ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n";
c += " int f_offset = Z * args.filter_offset;\n";
if (need_local_mem) {
c += " __local FLT4 weights_cache[36];\n";
}
if (weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
c += " int local_id = LOCAL_ID_1 * 8 + LOCAL_ID_0;\n";
}
if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool in_x0 = SRC_X >= 0 && SRC_X < args.src_tensor.Width();\n";
c += " bool in_x1 = SRC_X + 1 >= 0 && SRC_X + 1 < "
"args.src_tensor.Width();\n";
}
if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool in_y0 = SRC_Y >= 0 && SRC_Y < args.src_tensor.Height();\n";
c += " bool in_y1 = SRC_Y + 1 >= 0 && SRC_Y + 1 < "
"args.src_tensor.Height();\n";
}
auto generate_check = [&](int x, int y) {
std::string check;
const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT};
const std::vector<std::string> names{"in_x" + std::to_string(x),
"in_y" + std::to_string(y)};
for (int i = 0; i < axes.size(); ++i) {
const auto& axis = axes[i];
if (src_desc.HasAxis(axis) &&
!src_desc.SupportsZeroClamp(axis, gpu_info)) {
if (!check.empty()) {
check += " && ";
}
check += names[i];
}
}
return check;
};
if (src_desc.IsLinear()) {
if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) {
c += " int addr_0 = args.src_tensor.GetAddress(SRC_X, SRC_Y, 0);\n";
c += " int addr_1 = args.src_tensor.GetAddress(SRC_X + 1, SRC_Y, 0);\n";
c += " int addr_2 = args.src_tensor.GetAddress(SRC_X, SRC_Y + 1, 0);\n";
c += " int addr_3 = args.src_tensor.GetAddress(SRC_X+1, SRC_Y+1, 0);\n";
c += " addr_0 = select(-1, addr_0, (in_x0 && in_y0));\n";
c += " addr_1 = select(-1, addr_1, (in_x1 && in_y0));\n";
c += " addr_2 = select(-1, addr_2, (in_x0 && in_y1));\n";
c += " addr_3 = select(-1, addr_3, (in_x1 && in_y1));\n";
c += " int dz_0 = select(0, args.src_tensor.SliceStride(), (in_x0 && "
"in_y0));\n";
c += " int dz_1 = select(0, args.src_tensor.SliceStride(), (in_x1 && "
"in_y0));\n";
c += " int dz_2 = select(0, args.src_tensor.SliceStride(), (in_x0 && "
"in_y1));\n";
c += " int dz_3 = select(0, args.src_tensor.SliceStride(), (in_x1 && "
"in_y1));\n";
} else {
c += " int xc0 = clamp(SRC_X, 0, args.src_tensor.Width() - 1);\n";
c += " int xc1 = clamp(SRC_X + 1, 0, args.src_tensor.Width() - 1);\n";
c += " int yc0 = clamp(SRC_Y, 0, args.src_tensor.Height() - 1);\n";
c += " int yc1 = clamp(SRC_Y + 1, 0, args.src_tensor.Height() - 1);\n";
c += " int addr_0 = args.src_tensor.GetAddress(xc0, yc0, 0);\n";
c += " int addr_1 = args.src_tensor.GetAddress(xc1, yc0, 0);\n";
c += " int addr_2 = args.src_tensor.GetAddress(xc0, yc1, 0);\n";
c += " int addr_3 = args.src_tensor.GetAddress(xc1, yc1, 0);\n";
c += " int dz = args.src_tensor.SliceStride();\n";
}
}
auto read_src = [&](int x, int y) {
if (src_desc.IsLinear()) {
const std::string id = std::to_string(y * 2 + x);
const std::string addr = "addr_" + std::to_string(y * 2 + x);
if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) {
return "args.src_tensor.Read(" + addr + "); " + addr + " += dz_" + id +
";\n";
} else {
return "args.src_tensor.Read(" + addr + ") * INIT_FLT(in_x" +
std::to_string(x) + " && in_y" + std::to_string(y) + "); " +
addr + " += dz;\n";
}
} else {
std::string check = generate_check(x, y);
if (!check.empty()) {
check = " * INIT_FLT(" + check + ")";
}
return "args.src_tensor.Read(SRC_X + " + std::to_string(x) +
", SRC_Y + " + std::to_string(y) + ", s)" + check + ";\n";
}
};
const int padding_x_rem = abs(padding.x) % 2;
const int padding_y_rem = abs(padding.y) % 2;
std::vector<std::pair<int, int>> permutation;
if (padding_x_rem == 1 && padding_y_rem == 1) {
permutation = {{0, 0}, {1, 0}, {1, 1}, {2, 0}, {2, 2},
{3, 0}, {3, 1}, {3, 2}, {3, 3}};
} else if (padding_x_rem == 0 && padding_y_rem == 1) {
permutation = {{0, 0}, {0, 1}, {1, 1}, {2, 0}, {2, 1},
{2, 2}, {2, 3}, {3, 1}, {3, 3}};
} else if (padding_x_rem == 1 && padding_y_rem == 0) {
permutation = {{0, 0}, {0, 2}, {1, 0}, {1, 1}, {1, 2},
{1, 3}, {2, 2}, {3, 2}, {3, 3}};
} else {
permutation = {{0, 0}, {0, 1}, {0, 2}, {0, 3}, {1, 1},
{1, 3}, {2, 2}, {2, 3}, {3, 3}};
}
c += " for (int s = 0; s < args.src_tensor.Slices(); ++s) {\n";
if (need_local_mem) {
c += " " + barrier + ";\n";
}
if (weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_ASYNC) {
c += " async_work_group_copy(weights_cache, "
"args.weights.GetPtr(f_offset), 36, "
"0);\n";
} else if (weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::
LOCAL_MEM_BY_THREADS) {
c += " weights_cache[local_id] = args.weights.Read(f_offset + "
"local_id);\n";
c += " if (local_id < 4) {\n";
c += " weights_cache[local_id + 32] = args.weights.Read(f_offset + "
"local_id + "
"32);\n";
c += " };\n";
} else {
c += " " + weights_space +
" FLT4* weights_cache = args.weights.GetPtr(f_offset);\n";
}
c += " FLT4 src0 = " + read_src(0, 0);
c += " FLT4 src1 = " + read_src(1, 0);
c += " FLT4 src2 = " + read_src(0, 1);
c += " FLT4 src3 = " + read_src(1, 1);
c += " f_offset += 36;\n";
if (need_local_mem) {
c += " " + barrier + ";\n";
}
for (int i = 0; i < 9; ++i) {
const std::string r_name = "r" + std::to_string(permutation[i].first);
const std::string s_name = "src" + std::to_string(permutation[i].second);
const std::string w_name = std::to_string(i * 4);
c += " CONV(" + r_name + ", " + s_name + ", " + w_name + ");\n";
}
c += " }\n";
if (need_local_mem) {
c += " if (DST_X >= args.dst_tensor.Width() || DST_Y >= "
"args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) return;\n";
}
c += " FLT4 bias_val = args.biases.Read(Z);\n";
for (int y = 0; y < 2; ++y) {
for (int x = 0; x < 2; ++x) {
const std::string s_x = std::to_string(x);
const std::string s_y = std::to_string(y);
const std::string id = std::to_string(y * 2 + x);
const std::string x_c = "DST_X + " + s_x;
const std::string y_c = "DST_Y + " + s_y;
c += " if (" + x_c + " < args.dst_tensor.Width() && " + y_c +
" < args.dst_tensor.Height()) {\n";
c += " FLT4 res0 = TO_FLT4(r" + id + ") + bias_val;\n";
c += " args.dst_tensor.Write(res0, " + x_c + ", " + y_c + ", Z);\n";
c += " }\n";
}
}
c += "}\n";
return c;
}
absl::Status ConvolutionTransposed3x3::BindArguments(ArgumentsBinder* args) {
RETURN_IF_ERROR(args->SetInt("filter_offset", 4 * 9 * src_[0]->Slices()));
const int padding_x =
padding_.x >= 1 ? (padding_.x - 1) / 2 : (padding_.x - 2) / 2;
const int padding_y =
padding_.y >= 1 ? (padding_.y - 1) / 2 : (padding_.y - 2) / 2;
RETURN_IF_ERROR(args->SetInt("padding_x", padding_x));
return args->SetInt("padding_y", padding_y);
}
void ConvolutionTransposed3x3::GetPossibleKernelWorkGroups(
TuningType tuning_type, const GpuInfo& gpu_info,
const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
if (weights_upload_type_ == WeightsUploadType::LOCAL_MEM_ASYNC ||
weights_upload_type_ == WeightsUploadType::LOCAL_MEM_BY_THREADS) {
work_groups->push_back(work_group_size_);
return;
}
GetPossibleWorkGroupsConv(tuning_type, gpu_info, kernel_info, grid_size_,
work_groups);
}
int3 ConvolutionTransposed3x3::GetGridSize() const {
const int grid_x = DivideRoundUp(dst_[0]->Width(), 2) * dst_[0]->Batch();
const int grid_y = DivideRoundUp(dst_[0]->Height(), 2);
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
std::vector<int> ConvolutionTransposed3x3::GetSpatialWeightsRemap() const {
const int padding_x_rem = abs(padding_.x) % 2;
const int padding_y_rem = abs(padding_.y) % 2;
std::vector<int> remap;
if (padding_x_rem == 1 && padding_y_rem == 1) {
return std::vector<int>{4, 5, 3, 7, 1, 8, 6, 2, 0};
} else if (padding_x_rem == 0 && padding_y_rem == 1) {
return std::vector<int>{5, 3, 4, 8, 6, 2, 0, 7, 1};
} else if (padding_x_rem == 1 && padding_y_rem == 0) {
return std::vector<int>{7, 1, 8, 6, 2, 0, 4, 5, 3};
} else {
return std::vector<int>{8, 6, 2, 0, 7, 1, 5, 3, 4};
}
}
void ConvolutionTransposed3x3::UploadWeights(
const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights) {
const auto weights_desc = GetWeightsDescription();
const int flt_count =
GetTotalElementsCountForLayout(weights_desc, weights.shape);
BufferDescriptor desc;
desc.element_type = weights_desc.type;
desc.element_size = 4;
desc.memory_type =
weights_upload_type_ ==
ConvolutionTransposed3x3::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
desc.size = flt_count * SizeOf(desc.element_type);
desc.data.resize(desc.size);
RearrangeWeights(weights, weights_desc, absl::MakeSpan(desc.data));
args_.AddObject("weights",
std::make_unique<BufferDescriptor>(std::move(desc)));
}
bool IsConvolutionTransposed3x3Supported(
const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
return attr.weights.shape.w == 3 && attr.weights.shape.h == 3 &&
attr.stride.w == 2 && attr.stride.h == 2;
}
ConvolutionTransposed3x3 CreateConvolutionTransposed3x3(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
const int2 padding = int2(attr.padding.prepended.w, attr.padding.prepended.h);
ConvolutionTransposed3x3 result(definition, gpu_info, padding);
result.UploadWeights(attr.weights);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
ConvolutionTransposed3x3 CreateConvolutionTransposed3x3DynamicWeights(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
OperationDef new_def = definition;
new_def.src_tensors = {
definition.src_tensors[0]};
const DataType weights_type = definition.GetDataType();
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::BUFFER, Layout::HWC});
const int2 padding = int2(attr.padding.prepended.w, attr.padding.prepended.h);
ConvolutionTransposed3x3 result(new_def, gpu_info, padding);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvolutionTransposed3x3) {
auto status = ConvolutionTransposed3x3Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
78e90436-828f-4070-94d9-65ae8cf4adb5 | cpp | tensorflow/tensorflow | max_unpooling | tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.cc | tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class MaxUnpooling : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr =
std::any_cast<const MaxUnpooling2DAttributes&>(ctx.op_attr);
std::vector<Variable> parameters = {
{"stride", int2(attr.strides.w, attr.strides.h)},
{"offset", int2(attr.padding.prepended.w, attr.padding.prepended.h)},
{"window_h", attr.kernel.h},
{"window_w", attr.kernel.w},
};
std::string source = R"(
ivec2 coord = (gid.xy + $offset$) / $stride$;
ivec4 indices = $input_data_1[coord.x, coord.y, gid.z]$;
vec4 input_ = $input_data_0[coord.x, coord.y, gid.z]$;
coord = coord * $stride$ - $offset$;
for (int i = 0; i < 4; ++i) {
ivec2 t = coord + ivec2(indices[i] % $window_w$, indices[i] / $window_w$);
if (t.x == gid.x && t.y == gid.y) {
value_0[i] = input_[i];
}
}
)";
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewMaxUnpoolingNodeShader() {
return std::make_unique<MaxUnpooling>();
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/max_unpooling_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, MaxUnpooling) {
auto status = MaxUnpoolingTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6b473baa-c4fb-484c-959f-9e7b529ae340 | cpp | tensorflow/tensorflow | mean_stddev_normalization | tensorflow/lite/delegates/gpu/common/tasks/mean_stddev_normalization.cc | tensorflow/lite/delegates/gpu/cl/kernels/mean_stddev_normalization_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/mean_stddev_normalization.h"
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace {
absl::Status CheckIfValidNodeOfType(const Node* node,
OperationType required_type) {
if (node == nullptr) {
return absl::NotFoundError("Invalid node.");
}
if (OperationTypeFromString(node->operation.type) != required_type) {
return absl::NotFoundError("Type mismatch.");
}
return absl::OkStatus();
}
absl::Status GetElementwiseScalarValue(const Node* node, float* result) {
auto attr = absl::any_cast<ElementwiseAttributes>(node->operation.attributes);
const float* value = absl::get_if<float>(&attr.param);
if (!value) {
return absl::NotFoundError("Not a scalar value inside attributes.");
}
*result = *value;
return absl::OkStatus();
}
absl::Status GetNextSingleNode(const GraphFloat32& graph, const Node& node,
OperationType next_type, Node** next_node) {
auto consumers = graph.FindConsumers(graph.FindOutputs(node.id)[0]->id);
if (consumers.size() != 1) {
return absl::NotFoundError("Not a single consumer.");
}
RETURN_IF_ERROR(CheckIfValidNodeOfType(consumers[0], next_type));
*next_node = consumers[0];
return absl::OkStatus();
}
std::string GetReduceCode(const std::string& value, int3 work_group_size,
bool two_step) {
int reduction_size = work_group_size.z;
std::string mem_name = work_group_size.x * work_group_size.y != 1
? "shared_mem[LOCAL_ID_1][LOCAL_ID_0]"
: "shared_mem";
if (reduction_size <= 8) {
std::string result;
result += " {
result += " " + mem_name + "[local_id] = " + value + ";\n";
result += " LOCAL_MEM_BARRIER;\n";
result += " if (LOCAL_ID_2 == 0) {\n";
result += " " + value + " = " + mem_name + "[0];\n";
for (int i = 1; i < reduction_size; ++i) {
result += " " + value + " += " + mem_name + "[" + std::to_string(i) +
"];\n";
}
result += " " + mem_name + "[0] = " + value + ";\n";
result += " }\n";
result += " LOCAL_MEM_BARRIER;\n";
result += " " + value + " = " + mem_name + "[0];\n";
if (two_step) {
result += " LOCAL_MEM_BARRIER;\n";
}
result += " }\n";
return result;
} else {
return absl::Substitute(R"(
{
$2[local_id] = $1;
LOCAL_MEM_BARRIER;
int reduction_size = $0;
while (reduction_size > 1) {
int active_thread_limit = reduction_size / 2;
int offset = (reduction_size + 1) / 2;
if (local_id < active_thread_limit) {
$1 += $2[local_id + offset];
$2[local_id] = $1;
}
LOCAL_MEM_BARRIER;
reduction_size = offset;
}
$1 = $2[0];
}
)",
reduction_size, value, mem_name);
}
}
std::string ZeroClampVec4Code(const std::string& slice_name,
const std::string& channels_name,
const std::string& value_name) {
return absl::Substitute(R"(
if ($0 * 4 + 1 >= $1) { $2.y = 0.0f; }
if ($0 * 4 + 2 >= $1) { $2.z = 0.0f; }
if ($0 * 4 + 3 >= $1) { $2.w = 0.0f; }
)",
slice_name, channels_name, value_name);
}
bool UseWorkGroupReduction(const GpuInfo& gpu_info, const BHWC& shape) {
const int tensor_slices = DivideRoundUp(shape.c, 4);
if (gpu_info.IsAdreno() && tensor_slices <= 32 &&
shape.w * shape.h * shape.b >= 128) {
return false;
} else {
return true;
}
}
int3 GetRecommendedWorkGroupSize(const GpuInfo& gpu_info, const BHWC& shape) {
const int tensor_slices = DivideRoundUp(shape.c, 4);
int desired_work_group_size = gpu_info.GetMaxWorkGroupSizeForZ();
if (gpu_info.IsMali()) {
desired_work_group_size = 64;
}
if (gpu_info.IsAdreno()) {
AdrenoInfo info = gpu_info.adreno_info;
desired_work_group_size = 256;
if (info.IsAdreno3xx()) {
if (info.adreno_gpu == AdrenoGpu::kAdreno320 ||
info.adreno_gpu == AdrenoGpu::kAdreno330) {
desired_work_group_size = 128;
} else {
desired_work_group_size = 64;
}
} else if (info.IsAdreno4xx()) {
if (info.adreno_gpu == AdrenoGpu::kAdreno430) {
desired_work_group_size = 256;
} else {
desired_work_group_size = 128;
}
} else if (info.IsAdreno5xx()) {
if (info.adreno_gpu == AdrenoGpu::kAdreno530 ||
info.adreno_gpu == AdrenoGpu::kAdreno540) {
desired_work_group_size = 256;
} else {
desired_work_group_size = 128;
}
}
}
if (gpu_info.IsPowerVR()) {
desired_work_group_size = 64;
}
if (gpu_info.IsApple()) {
desired_work_group_size = 64;
}
if (gpu_info.IsAMD()) {
desired_work_group_size = 512;
}
int3 work_group_size(1, 1, 1);
if (shape.w * shape.h == 1) {
desired_work_group_size =
std::min(desired_work_group_size, gpu_info.GetMaxWorkGroupSizeForZ());
while (desired_work_group_size >= tensor_slices * 2) {
desired_work_group_size /= 2;
}
work_group_size.x = 1;
work_group_size.y = 1;
work_group_size.z = desired_work_group_size;
} else {
if (tensor_slices >= 16) {
work_group_size.z = 8;
} else if (tensor_slices >= 10) {
work_group_size.z = 4;
} else {
std::map<int, int> slices_to_group_size = {
{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 3},
{6, 3}, {7, 4}, {8, 4}, {9, 3},
};
work_group_size.z = slices_to_group_size[tensor_slices];
}
desired_work_group_size =
std::min(desired_work_group_size, gpu_info.GetMaxWorkGroupTotalSize());
work_group_size.x = 1;
work_group_size.y =
desired_work_group_size / AlignByN(work_group_size.z, 4);
while (work_group_size.y > work_group_size.x) {
work_group_size.y /= 2;
work_group_size.x *= 2;
}
}
return work_group_size;
}
std::string GetVarianceCalculationCode(const GpuInfo& gpu_info,
bool work_group_reduction,
const int3& work_group_size,
bool has_batch, bool channels_x4,
bool two_step) {
std::string c;
if (work_group_reduction && gpu_info.IsApiOpenCl()) {
c += "__attribute__((reqd_work_group_size(" +
std::to_string(work_group_size.x) + ", " +
std::to_string(work_group_size.y) + ", " +
std::to_string(work_group_size.z) + ")))\n";
}
c += "MAIN_FUNCTION($0) {\n";
if (work_group_reduction) {
std::string accum_type = two_step ? "float" : "float2";
if (work_group_size.x * work_group_size.y == 1) {
c += "__local " + accum_type + " shared_mem[" +
std::to_string(work_group_size.z) + "];\n";
} else {
c += "__local " + accum_type + " shared_mem[" +
std::to_string(work_group_size.y) + "][" +
std::to_string(work_group_size.x) + "][" +
std::to_string(work_group_size.z) + "];\n";
}
}
if (has_batch) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
if (!work_group_reduction) {
c += " if (X >= args.dst_tensor.Width()) { return; }\n";
c += " if (Y >= args.dst_tensor.Height()) { return; }\n";
}
if (!two_step) {
c += " float4 private_sum4_sq = INIT_FLOAT4(0.0f);\n";
}
if (work_group_reduction) {
c += " int local_id = LOCAL_ID_2;\n";
c += " int reduction_group_size = GROUP_SIZE_2;\n";
} else {
c += " int local_id = 0;\n";
c += " int reduction_group_size = 1;\n";
}
c += R"(
float4 private_sum4 = INIT_FLOAT4(0.0f);
for (int S = local_id; S < args.src_tensor.Slices(); S += reduction_group_size) {
int x_clamped = min(X, args.src_tensor.Width() - 1);
int y_clamped = min(Y, args.src_tensor.Height() - 1);
float4 t = args.src_tensor.Read<float>(x_clamped, y_clamped, S);
)";
if (!channels_x4) {
c += ZeroClampVec4Code("S", "args.src_tensor.Channels()", "t");
}
if (two_step) {
c += " private_sum4 += t;\n";
c += " }\n";
c += " float sum = dot(private_sum4, INIT_FLOAT4(1.0f));\n";
} else {
c += " private_sum4 += t;\n";
c += " private_sum4_sq += t * t;\n";
c += " }\n";
c += " float2 sum;\n";
c += " sum.x = dot(private_sum4, INIT_FLOAT4(1.0f));\n";
c += " sum.y = dot(private_sum4_sq, INIT_FLOAT4(1.0f));\n";
}
if (work_group_reduction) {
c += GetReduceCode("sum", work_group_size, two_step);
}
if (two_step) {
c += R"(
float mean = sum * args.inv_ch_count;
float4 private_sum_diff_sq4 = INIT_FLOAT4(0.0f);
for (int S = local_id; S < args.src_tensor.Slices(); S += reduction_group_size) {
int x_clamped = min(X, args.src_tensor.Width() - 1);
int y_clamped = min(Y, args.src_tensor.Height() - 1);
float4 t = args.src_tensor.Read<float>(x_clamped, y_clamped, S);
float4 diff = t - mean;)";
if (!channels_x4) {
c += ZeroClampVec4Code("S", "args.src_tensor.Channels()", "diff");
}
c += R"(
private_sum_diff_sq4 += diff * diff;
}
float sum_diff_sq = dot(private_sum_diff_sq4, INIT_FLOAT4(1.0f));
)";
if (work_group_reduction) {
c += GetReduceCode("sum_diff_sq", work_group_size, two_step);
}
c += " float variance = sum_diff_sq * args.inv_ch_count;\n";
} else {
c += " float mean = sum.x * args.inv_ch_count;\n";
c += " float mean_sq = sum.y * args.inv_ch_count;\n";
c += " float variance = mean_sq - mean * mean;\n";
}
if (work_group_reduction) {
c += "
c += " if (X >= args.dst_tensor.Width()) { return; }\n";
c += " if (Y >= args.dst_tensor.Height()) { return; }\n";
}
return c;
}
}
MeanStdDevNormalization::MeanStdDevNormalization(const OperationDef& definition,
const GpuInfo& gpu_info,
const BHWC& shape,
float variance_bias,
bool two_step)
: GPUOperation(definition) {
work_group_reduction_ = UseWorkGroupReduction(gpu_info, shape);
if (work_group_reduction_) {
work_group_size_ = GetRecommendedWorkGroupSize(gpu_info, shape);
} else {
work_group_size_ = int3(8, 8, 1);
}
args_.AddFloat("variance_bias", variance_bias);
args_.AddFloat("inv_ch_count", 1.0f / shape.c);
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
AddDstTensor("dst_tensor", definition_.dst_tensors[0]);
code_ = GetNormalizationCode(gpu_info, shape.c % 4 == 0, two_step);
}
std::string MeanStdDevNormalization::GetNormalizationCode(
const GpuInfo& gpu_info, bool channels_x4, bool two_step) {
std::string c = GetVarianceCalculationCode(
gpu_info, work_group_reduction_, work_group_size_,
definition_.dst_tensors[0].HasAxis(Axis::BATCH), channels_x4, two_step);
c += R"(
float stddev_inv = rsqrt(variance + args.variance_bias);
for (int S = local_id; S < args.src_tensor.Slices(); S += reduction_group_size) {
float4 t = args.src_tensor.Read<float>(X, Y, S);
FLT4 result = TO_FLT4((t - mean) * stddev_inv);
args.dst_tensor.Write(result, X, Y, S);
}
})";
return c;
}
int3 MeanStdDevNormalization::GetGridSize() const {
const int grid_x = dst_[0]->Width() * dst_[0]->Batch();
const int grid_y = dst_[0]->Height();
const int grid_z = work_group_reduction_ ? work_group_size_.z : 1;
return int3(grid_x, grid_y, grid_z);
}
MeanStdDevNormalization CreateMeanStdDevNormalization(
const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape,
float variance_bias, bool two_step) {
return MeanStdDevNormalization(definition, gpu_info, shape, variance_bias,
two_step);
}
absl::Status TryMeanStdDevNormalization(
const GpuInfo& gpu_info, CalculationsPrecision precision,
const GraphFloat32& graph, NodeId first_node_id,
const std::map<ValueId, TensorDescriptor>& tensor_descriptors,
std::set<NodeId>* consumed_nodes, GPUOperationsSubgraph* gpu_subgraph) {
Node* first_mean_node = graph.GetNode(first_node_id);
RETURN_IF_ERROR(CheckIfValidNodeOfType(first_mean_node, OperationType::MEAN));
auto first_mean_attr =
absl::any_cast<MeanAttributes>(first_mean_node->operation.attributes);
if (first_mean_attr.dims != std::set<Axis>{Axis::CHANNELS}) {
return absl::NotFoundError("MeanStdDevNormalization not suitable.");
}
Node* sub_node;
RETURN_IF_ERROR(GetNextSingleNode(graph, *first_mean_node, OperationType::SUB,
&sub_node));
auto sub_inputs = graph.FindInputs(sub_node->id);
if (sub_inputs.size() != 2) {
return absl::NotFoundError("MeanStdDevNormalization not suitable.");
} else {
Node* sub_first_parent = graph.FindProducer(sub_inputs[0]->id);
Node* sub_second_parent = graph.FindProducer(sub_inputs[1]->id);
if (sub_second_parent != first_mean_node) {
return absl::NotFoundError("MeanStdDevNormalization not suitable.");
}
auto mean_inputs = graph.FindInputs(first_mean_node->id);
Node* mean_parent = graph.FindProducer(mean_inputs[0]->id);
if (mean_parent != sub_first_parent) {
return absl::NotFoundError("MeanStdDevNormalization not suitable.");
}
}
auto sub_output = graph.FindOutputs(sub_node->id)[0]->id;
auto consumers = graph.FindConsumers(sub_output);
if (consumers.size() != 2) {
return absl::NotFoundError("MeanStdDevNormalization not suitable.");
}
Node* square_node = consumers[0];
Node* sub_child_mul_node = consumers[1];
if (!CheckIfValidNodeOfType(square_node, OperationType::SQUARE).ok()) {
square_node = consumers[1];
sub_child_mul_node = consumers[0];
}
RETURN_IF_ERROR(CheckIfValidNodeOfType(square_node, OperationType::SQUARE));
RETURN_IF_ERROR(
CheckIfValidNodeOfType(sub_child_mul_node, OperationType::MUL));
Node* second_mean_node;
RETURN_IF_ERROR(GetNextSingleNode(graph, *square_node, OperationType::MEAN,
&second_mean_node));
auto second_mean_attr =
absl::any_cast<MeanAttributes>(second_mean_node->operation.attributes);
if (second_mean_attr.dims != std::set<Axis>{Axis::CHANNELS}) {
return absl::NotFoundError("MeanStdDevNormalization not suitable.");
}
Node* add_node;
RETURN_IF_ERROR(GetNextSingleNode(graph, *second_mean_node,
OperationType::ADD, &add_node));
float add_value;
RETURN_IF_ERROR(GetElementwiseScalarValue(add_node, &add_value));
Node* rsqrt_node;
RETURN_IF_ERROR(
GetNextSingleNode(graph, *add_node, OperationType::RSQRT, &rsqrt_node));
Node* mul_node;
RETURN_IF_ERROR(
GetNextSingleNode(graph, *rsqrt_node, OperationType::MUL, &mul_node));
if (sub_child_mul_node != mul_node) {
return absl::NotFoundError("MeanStdDevNormalization not suitable.");
}
OperationDef op_def;
op_def.precision = precision;
auto input_id = graph.FindInputs(first_mean_node->id)[0]->id;
auto it = tensor_descriptors.find(input_id);
if (it != tensor_descriptors.end()) {
op_def.src_tensors.push_back(it->second);
}
auto output_id = graph.FindOutputs(mul_node->id)[0]->id;
it = tensor_descriptors.find(output_id);
if (it != tensor_descriptors.end()) {
op_def.dst_tensors.push_back(it->second);
}
auto subgraph_inputs = graph.FindInputs(first_mean_node->id);
auto subgraph_outputs = graph.FindOutputs(mul_node->id);
std::unique_ptr<GPUOperation>* gpu_op =
InitSingleOpSubgraph(subgraph_inputs, subgraph_outputs, gpu_subgraph);
*gpu_op =
std::make_unique<MeanStdDevNormalization>(CreateMeanStdDevNormalization(
op_def, gpu_info, subgraph_inputs[0]->tensor.shape, add_value,
false));
consumed_nodes->insert(first_mean_node->id);
consumed_nodes->insert(sub_node->id);
consumed_nodes->insert(square_node->id);
consumed_nodes->insert(second_mean_node->id);
consumed_nodes->insert(add_node->id);
consumed_nodes->insert(rsqrt_node->id);
consumed_nodes->insert(mul_node->id);
return absl::OkStatus();
}
LayerNormalization::LayerNormalization(
const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape,
float variance_bias, const Tensor<Linear, DataType::FLOAT32>& mul_linear,
const Tensor<Linear, DataType::FLOAT32>& sub_linear, bool two_step)
: GPUOperation(definition) {
work_group_reduction_ = UseWorkGroupReduction(gpu_info, shape);
if (work_group_reduction_) {
work_group_size_ = GetRecommendedWorkGroupSize(gpu_info, shape);
} else {
work_group_size_ = int3(8, 8, 1);
}
args_.AddFloat("variance_bias", variance_bias);
args_.AddFloat("inv_ch_count", 1.0f / shape.c);
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
AddDstTensor("dst_tensor", definition_.dst_tensors[0]);
TensorDescriptor mul_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), mul_linear);
args_.AddObject("mul_linear", std::make_unique<TensorDescriptor>(
std::move(mul_tensor_desc)));
TensorDescriptor sub_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), sub_linear);
args_.AddObject("sub_linear", std::make_unique<TensorDescriptor>(
std::move(sub_tensor_desc)));
code_ = GetNormalizationCode(gpu_info, shape.c % 4 == 0, two_step);
}
std::string LayerNormalization::GetNormalizationCode(const GpuInfo& gpu_info,
bool channels_x4,
bool two_step) {
std::string c = GetVarianceCalculationCode(
gpu_info, work_group_reduction_, work_group_size_,
definition_.dst_tensors[0].HasAxis(Axis::BATCH), channels_x4, two_step);
c += R"(
float stddev_inv = rsqrt(variance + args.variance_bias);
for (int S = local_id; S < args.src_tensor.Slices(); S += reduction_group_size) {
float4 t = args.src_tensor.Read<float>(X, Y, S);
float4 mul0_res = stddev_inv * args.mul_linear.Read<float>(S);
float4 mul1_res = mul0_res * t;
float4 mul2_res = mul0_res * mean;
float4 sub_res = args.sub_linear.Read<float>(S) - mul2_res;
FLT4 result = TO_FLT4(mul1_res + sub_res);
args.dst_tensor.Write(result, X, Y, S);
}
})";
return c;
}
int3 LayerNormalization::GetGridSize() const {
const int grid_x = dst_[0]->Width() * dst_[0]->Batch();
const int grid_y = dst_[0]->Height();
const int grid_z = work_group_reduction_ ? work_group_size_.z : 1;
return int3(grid_x, grid_y, grid_z);
}
LayerNormalization CreateLayerNormalization(
const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape,
float variance_bias, const Tensor<Linear, DataType::FLOAT32>& mul_linear,
const Tensor<Linear, DataType::FLOAT32>& sub_linear, bool two_step) {
return LayerNormalization(definition, gpu_info, shape, variance_bias,
mul_linear, sub_linear, two_step);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/mean_stddev_normalization_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, MeanStddevNormSeparateBatches) {
auto status = MeanStddevNormSeparateBatchesTest(0.0f, 0.0f, 0.0f, &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status = MeanStddevNormSeparateBatchesTest(0.0f, 0.01f, 2.63e-4f, &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status =
MeanStddevNormSeparateBatchesTest(0.0f, 100.0f, 2.63e-4f, &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status = MeanStddevNormSeparateBatchesTest(0.01f, 0.0f, 0.0f, &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status =
MeanStddevNormSeparateBatchesTest(0.01f, 0.01f, 3.57e-4f, &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status =
MeanStddevNormSeparateBatchesTest(1.0f, 100.0f, 2.63e-4f, &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status = MeanStddevNormSeparateBatchesTest(100.0f, 0.0f, 0.0f, &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status =
MeanStddevNormSeparateBatchesTest(100.0f, 1.0f, 2.63e-4f, &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status =
MeanStddevNormSeparateBatchesTest(100.0f, 100.0f, 2.63e-4f, &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, MeanStddevNormalizationAllBatches) {
auto status = MeanStddevNormalizationAllBatchesTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, MeanStddevNormalizationLargeVector) {
auto status = MeanStddevNormalizationLargeVectorTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/mean_stddev_normalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/mean_stddev_normalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1e1b571f-877a-4ea3-8859-49202fc0709f | cpp | tensorflow/tensorflow | resampler | tensorflow/lite/delegates/gpu/gl/kernels/resampler.cc | tensorflow/lite/delegates/gpu/cl/kernels/resampler_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/resampler.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Resampler : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::vector<Variable> parameters = {
{"src_height", static_cast<int>(ctx.input_shapes[0][1])},
{"src_width", static_cast<int>(ctx.input_shapes[0][2])},
};
std::string source = R"(
highp int X = int(gid.x);
highp int Y = int(gid.y);
highp int S = int(gid.z);
highp vec2 f_coords = ($input_data_1[X, Y, 0]$).xy;
highp vec2 f_coords_floor = floor(f_coords);
highp ivec4 st;
st.xy = ivec2(f_coords_floor.x, f_coords_floor.y);
st.zw = st.xy + ivec2(1, 1);
highp vec2 t = f_coords - f_coords_floor;
bool stx_in = st.x >= 0 && st.x < $src_width$;
bool stz_in = st.z >= 0 && st.z < $src_width$;
bool sty_in = st.y >= 0 && st.y < $src_height$;
bool stw_in = st.w >= 0 && st.w < $src_height$;
vec4 src0 = (stx_in && sty_in) ? $input_data_0[st.x, st.y, S]$ : vec4(0.0);
vec4 src1 = (stz_in && sty_in) ? $input_data_0[st.z, st.y, S]$ : vec4(0.0);
vec4 src2 = (stx_in && stw_in) ? $input_data_0[st.x, st.w, S]$ : vec4(0.0);
vec4 src3 = (stz_in && stw_in) ? $input_data_0[st.z, st.w, S]$ : vec4(0.0);
value_0 = mix(mix(src0, src1, t.x), mix(src2, src3, t.x), t.y);
)";
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewResamplerNodeShader() {
return std::make_unique<Resampler>();
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/resampler_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ResamplerIdentity) {
auto status = ResamplerIdentityTest(BHWC(1, 2, 2, 1), &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status = ResamplerIdentityTest(BHWC(1, 3, 5, 3), &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status = ResamplerIdentityTest(BHWC(1, 6, 1, 7), &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/resampler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/resampler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a493795-3ee7-4cb2-a7b7-639d43aba311 | cpp | tensorflow/tensorflow | softmax | tensorflow/compiler/tf2tensorrt/convert/ops/softmax.cc | tensorflow/lite/delegates/xnnpack/softmax_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class ConvertSoftmax : public OpConverterBase<ConvertSoftmax> {
public:
explicit ConvertSoftmax(const OpConverterParams *params)
: OpConverterBase<ConvertSoftmax>(params) {}
static constexpr std::array<DataType, 3> AllowedDataTypes() {
return {DataType::DT_FLOAT, DataType::DT_HALF};
}
static constexpr std::array<InputArgSpec, 1> InputSpec() {
return std::array<InputArgSpec, 1>{
InputArgSpec::Create("logits", TrtInputArg::kTensor)};
}
Status Validate() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
if (!num_trt_dims && params.use_implicit_batch) {
return errors::InvalidArgument(
"TensorRT Softmax cannot apply on the batch dimension");
}
return OkStatus();
}
Status Convert() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
const auto &node_def = params.node_def;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
nvinfer1::ISoftMaxLayer *layer =
params.converter->network()->addSoftMax(*logits_tensor->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params.converter->SetLayerName(layer, node_def);
layer->setAxes(1 << (num_trt_dims - 1));
ITensorProxyPtr output_tensor = layer->getOutput(0);
params.outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertSoftmax>(),
"Softmax");
}
}
}
#endif | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/softmax_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Softmax, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Softmax, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester().Shape({batch, width, channels}).Test(xnnpack_delegate.get());
}
TEST(Softmax, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
SoftmaxTester().Shape({batch, channels}).Test(xnnpack_delegate.get());
}
TEST(Softmax, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
SoftmaxTester().Shape({batch}).Test(xnnpack_delegate.get());
}
TEST(Softmax, DISABLED_Beta) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester()
.Shape({batch, height, width, channels})
.Beta(0.1f)
.Test(xnnpack_delegate.get());
SoftmaxTester()
.Shape({batch, height, width, channels})
.Beta(10.0f)
.Test(xnnpack_delegate.get());
}
TEST(Softmax, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/softmax.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/softmax_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e700953-00ce-48d8-ab45-ea6a1b6f1a9f | cpp | tensorflow/tensorflow | prelu | tensorflow/lite/delegates/gpu/gl/kernels/prelu.cc | tensorflow/lite/delegates/xnnpack/prelu_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/prelu.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class PReLULinearAlpha : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const PReLUAttributes&>(ctx.op_attr);
auto alpha = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.alpha);
if (!alpha) {
return absl::InvalidArgumentError("Alpha is missing");
}
if (alpha->shape.v != ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError(
"Alpha shape does not match the number of channels.");
}
*generated_code = GeneratedCode{
{},
{{"alpha", MakeReadonlyObject(alpha->data)}},
{},
uint3(static_cast<int>(ctx.output_shapes[0][2]),
static_cast<int>(ctx.output_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)),
uint3(),
"value_0 = max(value_0, 0.0) + $alpha[gid.z]$ * min(value_0, "
"0.0);",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class PReLUFull : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const PReLUAttributes&>(ctx.op_attr);
auto alpha = std::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.alpha);
if (!alpha) {
return absl::InvalidArgumentError("Alpha is missing");
}
if (alpha->shape.h != ctx.output_shapes[0][1] ||
alpha->shape.w != ctx.output_shapes[0][2] ||
alpha->shape.c != ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError(
"Alpha shape does not match input shape.");
}
ObjectSize obj_size =
uint3(static_cast<int>(ctx.output_shapes[0][2]),
static_cast<int>(ctx.output_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4));
*generated_code = GeneratedCode{
{},
{{"alpha", MakeReadonlyObject(obj_size, ConvertToPHWC4(*alpha))}},
{},
uint3(static_cast<int>(ctx.output_shapes[0][2]),
static_cast<int>(ctx.output_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)),
uint3(),
"value_0 = max(value_0, 0.0) + $alpha[gid.x, gid.y, gid.z]$ "
"* min(value_0, 0.0);",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class PReLU : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const PReLUAttributes&>(ctx.op_attr);
auto* alpha = std::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.alpha);
return alpha ? full_.GenerateCode(ctx, generated_code)
: linear_.GenerateCode(ctx, generated_code);
}
private:
PReLULinearAlpha linear_;
PReLUFull full_;
};
}
std::unique_ptr<NodeShader> NewPReLUNodeShader() {
return std::make_unique<PReLU>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/prelu_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Prelu, DISABLED_4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({1, 1, 1, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({1, 1, width, 1})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({1, height, 1, 1})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({batch, 1, 1, 1})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({1, height, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({height, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_3DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, width, channels})
.SlopeShape({batch, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, 3DBy3DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, width, channels})
.SlopeShape({1, 1, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_3DBy3DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, width, channels})
.SlopeShape({1, width, 1})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_3DBy3DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, width, channels})
.SlopeShape({batch, 1, 1})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_3DBy3DBroadcastWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, width, channels})
.SlopeShape({1, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_3DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, width, channels})
.SlopeShape({width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, 3DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, width, channels})
.SlopeShape({channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_3DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, width, channels})
.SlopeShape({})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, channels})
.SlopeShape({batch, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, 2DBy2DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, channels})
.SlopeShape({1, channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_2DBy2DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, channels})
.SlopeShape({batch, 1})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, channels})
.SlopeShape({channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, channels})
.SlopeShape({})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, 1DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
PreluTester().InputShape({batch}).SlopeShape({batch}).Test(
xnnpack_delegate.get());
}
TEST(Prelu, DISABLED_1DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
PreluTester().InputShape({batch}).SlopeShape({}).Test(xnnpack_delegate.get());
}
TEST(Prelu, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({channels})
.FP16Weights()
.Test(xnnpack_delegate.get());
}
TEST(Prelu, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({channels})
.INT8Weights()
.Test(xnnpack_delegate.get());
}
TEST(Prelu, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({channels})
.INT8ChannelWiseWeights()
.Test(xnnpack_delegate.get());
}
TEST(Prelu, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({channels})
.SparseWeights()
.Test(xnnpack_delegate.get());
}
TEST(Prelu, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({channels})
.Test(xnnpack_delegate.get());
}
TEST(Prelu, WeightsCache) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
std::unique_ptr<TfLiteXNNPackDelegateWeightsCache,
decltype(&TfLiteXNNPackDelegateWeightsCacheDelete)>
weights_cache(TfLiteXNNPackDelegateWeightsCacheCreate(),
TfLiteXNNPackDelegateWeightsCacheDelete);
delegate_options.weights_cache = weights_cache.get();
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
PreluTester()
.InputShape({batch, height, width, channels})
.SlopeShape({channels})
.WeightsCache(weights_cache.get())
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/prelu.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/prelu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5cf01089-e79f-4ce7-a37b-9125f745fbbb | cpp | tensorflow/tensorflow | convolution_transposed | tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed.cc | tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/weights_layout.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
bool UseBufferForWeights(const GpuInfo& gpu_info) {
return gpu_info.IsMali() || gpu_info.IsApple() || gpu_info.IsAMD();
}
}
ConvolutionTransposed::ConvolutionTransposed(
const OperationDef& definition, const ConvolutionTransposedAttributes& attr,
const GpuInfo& gpu_info)
: GPUOperation(definition),
stride_(attr.stride.w, attr.stride.h, 1, 1),
block_size_(2, 2, 1, 2) {
if (UseBufferForWeights(gpu_info)) {
if (gpu_info.IsApple()) {
weights_layout_ = WeightsLayout::kOSpatialIOGroupO4I4;
} else {
weights_layout_ = WeightsLayout::kOSpatialIOGroupI4O4;
}
} else {
if (gpu_info.IsApple()) {
weights_layout_ = WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4;
} else {
weights_layout_ = WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4;
}
}
const bool is_f16 = definition.precision == CalculationsPrecision::F16;
if (gpu_info.IsMali()) {
if (gpu_info.mali_info.IsMidgard()) {
block_size_ = is_f16 ? int4(2, 1, 1, 2) : int4(2, 1, 1, 1);
} else {
block_size_ = is_f16 ? int4(2, 2, 1, 2) : int4(2, 2, 1, 1);
}
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
if (dst_depth == 1 || dst_depth == 3) {
if (!gpu_info.IsMali()) {
block_size_.y *= block_size_.w;
}
block_size_.w = 1;
}
args_.AddInt("stride_x", stride_.x);
args_.AddInt("stride_y", stride_.y);
args_.AddInt("padding_x", attr.padding.prepended.w);
args_.AddInt("padding_y", attr.padding.prepended.h);
args_.AddInt("kernel_size_x", attr.weights.shape.w);
args_.AddInt("kernel_size_y", attr.weights.shape.h);
code_ = GenerateConvolutionTransposedCode(definition_, gpu_info, block_size_);
}
ConvolutionTransposed::ConvolutionTransposed(
const OperationDef& definition,
const ConvolutionTransposed3DAttributes& attr, const GpuInfo& gpu_info)
: GPUOperation(definition),
stride_(attr.stride.w, attr.stride.h, attr.stride.d, 1),
block_size_(2, 2, 1, 2) {
if (UseBufferForWeights(gpu_info)) {
if (gpu_info.IsApple()) {
weights_layout_ = WeightsLayout::kOSpatialIOGroupO4I4;
} else {
weights_layout_ = WeightsLayout::kOSpatialIOGroupI4O4;
}
} else {
if (gpu_info.IsApple()) {
weights_layout_ = WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4;
} else {
weights_layout_ = WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4;
}
}
const bool is_f16 = definition.precision == CalculationsPrecision::F16;
if (gpu_info.IsMali()) {
if (gpu_info.mali_info.IsMidgard()) {
block_size_ = is_f16 ? int4(2, 1, 1, 2) : int4(2, 1, 1, 1);
} else {
block_size_ = is_f16 ? int4(2, 2, 1, 2) : int4(2, 2, 1, 1);
}
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
if (dst_depth == 1 || dst_depth == 3) {
if (!gpu_info.IsMali()) {
block_size_.y *= block_size_.w;
}
block_size_.w = 1;
}
args_.AddInt("stride_x", stride_.x);
args_.AddInt("stride_y", stride_.y);
args_.AddInt("stride_z", stride_.z);
args_.AddInt("padding_x", attr.padding.prepended.w);
args_.AddInt("padding_y", attr.padding.prepended.h);
args_.AddInt("padding_z", attr.padding.prepended.d);
args_.AddInt("kernel_size_x", attr.weights.shape.w);
args_.AddInt("kernel_size_y", attr.weights.shape.h);
args_.AddInt("kernel_size_z", attr.weights.shape.d);
args_.AddInt("grid_size_y");
code_ = GenerateConvolutionTransposedCode(definition_, gpu_info, block_size_);
}
std::string ConvolutionTransposed::GenerateConvolutionTransposedCode(
const OperationDef& op_def, const GpuInfo& gpu_info,
const int4& block_size) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
if (op_def.src_tensors.size() != 1) {
if (weights_layout_ == WeightsLayout::kOSpatialIOGroupI4O4 ||
weights_layout_ == WeightsLayout::kOSpatialIOGroupO4I4) {
BufferDescriptor desc;
desc.element_type = op_def.src_tensors[1].GetDataType();
desc.element_size = 4;
desc.memory_type = MemoryType::GLOBAL;
AddSrcBuffer("weights", desc);
} else {
for (int i = 0; i < 4; ++i) {
const std::string name = "weights" + std::to_string(i);
AddSrcTensor(name, definition_.src_tensors[1 + i]);
}
}
}
const auto& src_def = op_def.src_tensors[0];
std::string c;
const bool weights_are_buffer = UseBufferForWeights(gpu_info);
for (int s = 0; s < block_size.w; ++s) {
std::string f0, f1, f2, f3;
if (weights_are_buffer) {
if (gpu_info.SupportsPointersInKernels()) {
f0 = "weights_cache[" + std::to_string(s * 4 + 0) + "]";
f1 = "weights_cache[" + std::to_string(s * 4 + 1) + "]";
f2 = "weights_cache[" + std::to_string(s * 4 + 2) + "]";
f3 = "weights_cache[" + std::to_string(s * 4 + 3) + "]";
} else {
f0 = "f0";
f1 = "f1";
f2 = "f2";
f3 = "f3";
}
} else {
f0 = "f" + std::to_string(s * 4 + 0);
f1 = "f" + std::to_string(s * 4 + 1);
f2 = "f" + std::to_string(s * 4 + 2);
f3 = "f" + std::to_string(s * 4 + 3);
}
bool use_fma = gpu_info.IsAMD() && gpu_info.IsApiOpenCl();
if (GetWeightsDescription().IsI4O4()) {
switch (op_def.precision) {
case CalculationsPrecision::F32:
case CalculationsPrecision::F16:
if (use_fma) {
c += "#define CONV" + std::to_string(s) + "(R, S) \\\n";
c += "R = fma(" + f0 + ", S.x, R); \\\n";
c += "R = fma(" + f1 + ", S.y, R); \\\n";
c += "R = fma(" + f2 + ", S.z, R); \\\n";
c += "R = fma(" + f3 + ", S.w, R); \n";
} else {
c += "#define CONV" + std::to_string(s) + "(R, S) \\\n";
c += "R += S.x * " + f0 + "; \\\n";
c += "R += S.y * " + f1 + "; \\\n";
c += "R += S.z * " + f2 + "; \\\n";
c += "R += S.w * " + f3 + "; \n";
}
break;
case CalculationsPrecision::F32_F16:
c += "#define CONV" + std::to_string(s) + "(R, S) \\\n";
c += "R += TO_ACCUM_TYPE(S.x * " + f0 + " + S.y * " + f1 +
" + S.z * " + f2 + " + S.w * " + f3 + ");\n";
break;
}
} else {
c += "#define CONV" + std::to_string(s) + "(R, S) \\\n";
c += "R.x += dot(S, " + f0 + "); \\\n";
c += "R.y += dot(S, " + f1 + "); \\\n";
c += "R.z += dot(S, " + f2 + "); \\\n";
c += "R.w += dot(S, " + f3 + "); \n";
}
}
auto generate_id = [&](const std::string& x, const std::string& y,
const std::string& z) {
std::string id;
if (src_def.HasAxis(Axis::WIDTH)) {
id += "_w" + x;
}
if (src_def.HasAxis(Axis::HEIGHT)) {
id += "_h" + y;
}
if (src_def.HasAxis(Axis::DEPTH)) {
id += "_d" + z;
}
return id;
};
auto generate_id_full = [&](const std::string& x, const std::string& y,
const std::string& z, const std::string& s) {
return generate_id(x, y, z) + "_s" + s;
};
auto generate_check = [&](const std::string& x, const std::string& y,
const std::string& z) {
std::string check;
const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH};
const std::vector<std::string> names{"in_x", "in_y", "in_z"};
const std::vector<std::string> coords{x, y, z};
for (int i = 0; i < axes.size(); ++i) {
const auto& axis = axes[i];
if (src_def.HasAxis(axis) && !src_def.SupportsZeroClamp(axis, gpu_info) &&
block_size[i] != 1) {
if (!check.empty()) {
check += " && ";
}
check += names[i] + coords[i];
}
}
return check;
};
c += "MAIN_FUNCTION($0) {\n";
if (op_def.IsBatchSupported()) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int dst_x = (linear_id / args.dst_tensor.Batch());\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += " args.src_tensor.SetBatchRef(B);\n";
} else {
c += " int dst_x = GLOBAL_ID_0;\n";
}
c += " int rem_x = dst_x % args.stride_x;\n";
c += " int ceil_x = dst_x / args.stride_x;\n";
c += " dst_x = ceil_x * args.stride_x * " + std::to_string(block_size.x) +
" + rem_x;\n";
if (src_def.HasAxis(Axis::DEPTH)) {
c += " int linear_id_y = GLOBAL_ID_1;\n";
c += " int dst_y = linear_id_y % args.grid_size_y;\n";
c += " int dst_z = linear_id_y / args.grid_size_y;\n";
c += " int rem_z = dst_z % args.stride_z;\n";
c += " int ceil_z = dst_z / args.stride_z;\n";
c += " dst_z = ceil_z * args.stride_z * " + std::to_string(block_size.z) +
" + rem_z;\n";
c += " if (dst_z >= args.dst_tensor.Depth()) return;\n";
} else {
c += " int dst_y = GLOBAL_ID_1;\n";
}
c += " int rem_y = dst_y % args.stride_y;\n";
c += " int ceil_y = dst_y / args.stride_y;\n";
c += " dst_y = ceil_y * args.stride_y * " + std::to_string(block_size.y) +
" + rem_y;\n";
c += " int dst_s = GLOBAL_ID_2 * " + std::to_string(block_size.w) + ";\n";
c += " if (dst_x >= args.dst_tensor.Width() || dst_y >= "
"args.dst_tensor.Height() || dst_s >= "
"args.dst_tensor.Slices()) return;\n";
if (weights_are_buffer) {
c += " int f_base = dst_s * args.src_tensor.Slices() * args.kernel_size_x "
"* args.kernel_size_y";
if (src_def.HasAxis(Axis::DEPTH)) {
c += " * args.kernel_size_z";
}
c += " * 4;\n";
}
for (int s = 0; s < block_size.w; ++s) {
const std::string sind = std::to_string(s);
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
c += " ACCUM_FLT4 r" + generate_id_full(xind, yind, zind, sind) +
" = INIT_ACCUM_FLT4(0.0f);\n";
}
}
}
}
c += " int kernel_first_dst_x = dst_x + args.padding_x;\n";
c += " int kernel_first_dst_y = dst_y + args.padding_y;\n";
c += " int kernel_last_dst_x = kernel_first_dst_x - args.kernel_size_x;\n";
c += " int kernel_last_dst_y = kernel_first_dst_y - args.kernel_size_y;\n";
c += " int offset_x = abs(args.padding_x);\n";
c += " int offset_x_strided = offset_x * args.stride_x;\n";
c +=
" int src_x = (kernel_first_dst_x + offset_x_strided) / args.stride_x - "
"offset_x;\n";
c += " int offset_y = abs(args.padding_y);\n";
c += " int offset_y_strided = offset_y * args.stride_y;\n";
c +=
" int src_y = (kernel_first_dst_y + offset_y_strided) / args.stride_y - "
"offset_y;\n";
if (src_def.HasAxis(Axis::DEPTH)) {
c += " int kernel_first_dst_z = dst_z + args.padding_z;\n";
c += " int kernel_last_dst_z = kernel_first_dst_z - args.kernel_size_z;\n";
c += " int offset_z = abs(args.padding_z);\n";
c += " int offset_z_strided = offset_z * args.stride_z;\n";
c += " int src_z = (kernel_first_dst_z + offset_z_strided) / "
"args.stride_z - offset_z;\n";
c += " int src_as_dst_z = src_z * args.stride_z;\n";
c +=
" for (;src_as_dst_z > kernel_last_dst_z; src_z -= 1, src_as_dst_z -= "
"args.stride_z) {\n";
for (int z = 0; z < block_size.z; ++z) {
const std::string zindex = std::to_string(z);
c += " int sz" + zindex + " = src_z + " + zindex + ";\n";
if (!src_def.SupportsZeroClamp(Axis::DEPTH, gpu_info)) {
c += " bool in_z" + zindex + " = sz" + zindex + " >= 0 && sz" +
zindex + " < args.src_tensor.Depth();\n";
if (!src_def.CanReadOutOfBorder(Axis::DEPTH)) {
c += " sz" + zindex + " = clamp(sz" + zindex +
", 0, args.src_tensor.Depth() - 1);\n";
}
}
}
if (block_size.z == 1 &&
!src_def.SupportsZeroClamp(Axis::DEPTH, gpu_info)) {
c += " if (!in_z0) continue;\n";
}
c += " int kernel_z = kernel_first_dst_z - src_as_dst_z;\n";
c += " int src_as_dst_y = src_y * args.stride_y;\n";
c += " int src_y_copy = src_y;\n";
c += " for (;src_as_dst_y > kernel_last_dst_y; src_y_copy -= 1, "
"src_as_dst_y -= args.stride_y) {\n";
} else {
c += " int src_as_dst_y = src_y * args.stride_y;\n";
c += " for (;src_as_dst_y > kernel_last_dst_y; src_y -= 1, src_as_dst_y "
"-= args.stride_y) {\n";
}
for (int y = 0; y < block_size.y; ++y) {
const std::string yindex = std::to_string(y);
const std::string src_y =
src_def.HasAxis(Axis::DEPTH) ? "src_y_copy" : "src_y";
c += " int sy" + yindex + " = " + src_y + " + " + yindex + ";\n";
if (!src_def.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool in_y" + yindex + " = sy" + yindex + " >= 0 && sy" +
yindex + " < args.src_tensor.Height();\n";
if (!src_def.CanReadOutOfBorder(Axis::HEIGHT)) {
c += " sy" + yindex + " = clamp(sy" + yindex +
", 0, args.src_tensor.Height() - 1);\n";
}
}
}
if (block_size.y == 1 && !src_def.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " if (!in_y0) continue;\n";
}
c += " int kernel_y = kernel_first_dst_y - src_as_dst_y;\n";
c += " int src_as_dst_x = src_x * args.stride_x;\n";
c += " int src_x_copy = src_x;\n";
c += " for (;src_as_dst_x > kernel_last_dst_x; src_x_copy -= 1, "
"src_as_dst_x "
"-= args.stride_x) {\n";
for (int x = 0; x < block_size.x; ++x) {
const std::string xindex = std::to_string(x);
c += " int sx" + xindex + " = src_x_copy + " + xindex + ";\n";
if (!src_def.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool in_x" + xindex + " = sx" + xindex + " >= 0 && sx" +
xindex + " < args.src_tensor.Width();\n";
if (!src_def.CanReadOutOfBorder(Axis::WIDTH)) {
c += " sx" + xindex + " = clamp(sx" + xindex +
", 0, args.src_tensor.Width() - 1);\n";
}
}
}
if (block_size.x == 1 && !src_def.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " if (!in_x0) continue;\n";
}
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string id = generate_id(xind, yind, zind);
const std::string check = generate_check(xind, yind, zind);
std::string coords = "sx" + xind + ", sy" + yind;
if (src_def.HasAxis(Axis::DEPTH)) {
coords += ", sz" + zind;
}
if (src_def.IsLinear()) {
c += " int addr" + id + " = args.src_tensor.GetAddress(" +
coords + ", 0);\n";
if (src_def.ReturnsZeroForNegOneRead(gpu_info)) {
c += " addr" + id + " = select(-1, addr" + id + ", (" + check +
"));\n";
c += " int ds" + id +
" = select(0, args.src_tensor.SliceStride(), (" + check +
"));\n";
}
}
}
}
}
if (src_def.IsLinear() && !src_def.ReturnsZeroForNegOneRead(gpu_info)) {
c += " int ds = args.src_tensor.SliceStride();\n";
}
c += " int kernel_x = kernel_first_dst_x - src_as_dst_x;\n";
if (src_def.HasAxis(Axis::DEPTH)) {
c += " int kernel_index = (kernel_z * args.kernel_size_y + kernel_y) "
"* args.kernel_size_x + kernel_x;\n";
} else {
c += " int kernel_index = kernel_y * args.kernel_size_x + kernel_x;\n";
}
if (weights_are_buffer) {
c += " int f_offset = f_base + kernel_index * "
"args.src_tensor.Slices() * " +
std::to_string(block_size.w * 4) + ";\n";
} else {
c += " int x_c = kernel_index * args.src_tensor.Slices();\n";
}
c += " for (int s = 0; s < args.src_tensor.Slices(); ++s) {\n";
const bool conditional_read = gpu_info.IsMali();
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string id = generate_id(xind, yind, zind);
std::string address;
if (src_def.IsLinear()) {
address = "addr" + id;
} else {
address = "sx" + xind + ", sy" + yind;
if (src_def.HasAxis(Axis::DEPTH)) {
address += ", sz" + zind;
}
address += ", s";
}
if (src_def.ReturnsZeroForNegOneRead(gpu_info)) {
c += " FLT4 src" + id + " = args.src_tensor.Read(" + address +
"); " + address + " += ds" + id + ";\n";
} else {
const std::string check = generate_check(xind, yind, zind);
if (!check.empty()) {
if (conditional_read) {
c += " FLT4 src" + id + " = " + check +
" ? args.src_tensor.Read(" + address +
") : INIT_FLT4(0.0f);\n";
} else {
c += " FLT4 src" + id + " = args.src_tensor.Read(" +
address + ") * INIT_FLT(" + check + ");\n";
}
} else {
c += " FLT4 src" + id + " = args.src_tensor.Read(" +
address + ");\n";
}
if (src_def.IsLinear()) {
c += " addr" + id + " += ds;\n";
}
}
}
}
}
if (weights_are_buffer) {
if (gpu_info.SupportsPointersInKernels()) {
c += " __global FLT4* weights_cache = "
"args.weights.GetPtr(f_offset);\n";
}
} else {
for (int s = 0; s < block_size.w; ++s) {
c += absl::Substitute(
R"( FLT4 f$1 = args.weights0.Read(dst_s + $0, x_c);
FLT4 f$2 = args.weights1.Read(dst_s + $0, x_c);
FLT4 f$3 = args.weights2.Read(dst_s + $0, x_c);
FLT4 f$4 = args.weights3.Read(dst_s + $0, x_c);
)",
s, s * 4 + 0, s * 4 + 1, s * 4 + 2, s * 4 + 3);
}
c += " x_c++;\n";
}
if (weights_are_buffer && !gpu_info.SupportsPointersInKernels()) {
c += " FLT4 f0, f1, f2, f3;\n";
}
for (int s = 0; s < block_size.w; ++s) {
if (weights_are_buffer && !gpu_info.SupportsPointersInKernels()) {
c += " f0 = args.weights.Read(f_offset + " +
std::to_string(s * 4 + 0) + ");\n";
c += " f1 = args.weights.Read(f_offset + " +
std::to_string(s * 4 + 1) + ");\n";
c += " f2 = args.weights.Read(f_offset + " +
std::to_string(s * 4 + 2) + ");\n";
c += " f3 = args.weights.Read(f_offset + " +
std::to_string(s * 4 + 3) + ");\n";
}
const std::string sind = std::to_string(s);
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string id = generate_id(xind, yind, zind);
const std::string full_id = generate_id_full(xind, yind, zind, sind);
c += " CONV" + sind + "(r" + full_id + ", src" + id + ");\n";
}
}
}
}
if (weights_are_buffer) {
c += " f_offset += " + std::to_string(block_size.w * 4) + ";\n";
}
c += " }\n";
c += " }\n";
c += " }\n";
if (src_def.HasAxis(Axis::DEPTH)) {
c += " }\n";
}
for (int s = 0; s < block_size.w; ++s) {
const std::string sind = std::to_string(s);
c += " if (dst_s < args.dst_tensor.Slices()) {\n";
c += " FLT4 bias_val = args.biases.Read(dst_s);\n";
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string id = generate_id_full(xind, yind, zind, sind);
std::string checks =
"xc < args.dst_tensor.Width() && yc < args.dst_tensor.Height()";
std::string coords = "xc, yc";
c += " {\n";
c += " int xc = dst_x + args.stride_x * " + xind + ";\n";
c += " int yc = dst_y + args.stride_y * " + yind + ";\n";
if (src_def.HasAxis(Axis::DEPTH)) {
c += " int zc = dst_z + args.stride_z * " + zind + ";\n";
checks += " && zc < args.dst_tensor.Depth()";
coords += ", zc";
}
c += " if (" + checks + ") {\n";
c += " FLT4 res = TO_FLT4(r" + id + ") + bias_val;\n";
c += " args.dst_tensor.Write(res, " + coords + ", dst_s);\n";
c += " }\n";
c += " }\n";
}
}
}
c += " }\n";
c += " dst_s++;\n";
}
c += "}\n";
return c;
}
absl::Status ConvolutionTransposed::BindArguments(ArgumentsBinder* args) {
if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) {
const int aligned_h =
AlignByN(dst_[0]->Height(), stride_.y * block_size_.y);
RETURN_IF_ERROR(
args->SetInt("grid_size_y", DivideRoundUp(aligned_h, block_size_.y)));
}
return absl::OkStatus();
}
int3 ConvolutionTransposed::GetGridSize() const {
const int aligned_w = AlignByN(dst_[0]->Width(), stride_.x * block_size_.x);
const int aligned_h = AlignByN(dst_[0]->Height(), stride_.y * block_size_.y);
const int aligned_d = AlignByN(dst_[0]->Depth(), stride_.z * block_size_.z);
const int grid_x = DivideRoundUp(aligned_w, block_size_.x) * dst_[0]->Batch();
const int grid_y = DivideRoundUp(aligned_h, block_size_.y) *
DivideRoundUp(aligned_d, block_size_.z);
const int grid_z = DivideRoundUp(dst_[0]->Slices(), block_size_.w);
return int3(grid_x, grid_y, grid_z);
}
void ConvolutionTransposed::GetPossibleKernelWorkGroups(
TuningType tuning_type, const GpuInfo& gpu_info,
const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
GetPossibleWorkGroupsConv(tuning_type, gpu_info, kernel_info, grid_size_,
work_groups);
}
ConvolutionTransposed CreateConvolutionTransposed(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
ConvolutionTransposed result(definition, attr, gpu_info);
result.UploadWeights(attr.weights, UseBufferForWeights(gpu_info));
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
ConvolutionTransposed CreateConvolutionTransposed3D(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposed3DAttributes& attr) {
ConvolutionTransposed result(definition, attr, gpu_info);
result.UploadWeights(attr.weights, UseBufferForWeights(gpu_info));
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
ConvolutionTransposed CreateConvolutionTransposedDynamicWeights(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
OperationDef new_def = definition;
new_def.src_tensors = {
definition.src_tensors[0]};
const DataType weights_type = definition.GetDataType();
if (UseBufferForWeights(gpu_info)) {
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::BUFFER, Layout::HWC});
} else {
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::TEXTURE_2D, Layout::HW});
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::TEXTURE_2D, Layout::HW});
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::TEXTURE_2D, Layout::HW});
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::TEXTURE_2D, Layout::HW});
}
ConvolutionTransposed result(new_def, attr, gpu_info);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, ConvolutionTransposedSimpleWeights) {
auto status = ConvolutionTransposedSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvolutionTransposed) {
auto status = ConvolutionTransposedTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ec564571-bff9-401b-a529-0fcf0afbe055 | cpp | tensorflow/tensorflow | convolution_transposed_thin | tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin.cc | tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
ConvolutionTransposedThin::ConvolutionTransposedThin(
const OperationDef& definition, const ConvolutionTransposedAttributes& attr,
const GpuInfo& gpu_info)
: GPUOperation(definition) {
code_ = GenerateConvolutionTransposedCode(
definition_, DivideRoundUp(attr.weights.shape.i, 4), attr.weights.shape.o,
int2(attr.weights.shape.w, attr.weights.shape.h));
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx()) {
compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd);
}
}
ConvolutionTransposedThin::ConvolutionTransposedThin(
ConvolutionTransposedThin&& operation)
: GPUOperation(std::move(operation)) {}
ConvolutionTransposedThin& ConvolutionTransposedThin::operator=(
ConvolutionTransposedThin&& operation) {
if (this != &operation) {
GPUOperation::operator=(std::move(operation));
}
return *this;
}
std::string ConvolutionTransposedThin::GenerateConvolutionTransposedCode(
const OperationDef& op_def, int src_depth, int dst_channels,
const int2& kernel_size) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
const std::string channel_x = dst_channels == 1 ? "" : ".x";
const std::vector<std::string> postfix = {channel_x, ".y", ".z", ".w"};
const std::vector<std::string> channel = {".x", ".y", ".z", ".w"};
const std::string type_postfix =
dst_channels == 1 ? "" : std::to_string(dst_channels);
std::string accum_type;
switch (op_def.precision) {
case CalculationsPrecision::F32:
case CalculationsPrecision::F32_F16:
accum_type = "float" + type_postfix;
break;
case CalculationsPrecision::F16:
accum_type = "half" + type_postfix;
break;
}
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.IsBatchSupported()) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += " args.src_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " if (X >= args.src_tensor.Width() || Y >= args.src_tensor.Height()) "
"return;\n";
c += " " + accum_type + " r[" + std::to_string(kernel_size.y) + "][" +
std::to_string(kernel_size.x) + "];\n";
c += " {\n";
c += " FLT4 src = args.src_tensor.Read(X, Y, 0);\n";
int index = 0;
for (int y = 0; y < kernel_size.y; ++y) {
for (int x = 0; x < kernel_size.x; ++x) {
std::string r_s =
" r[" + std::to_string(y) + "][" + std::to_string(x) + "]";
for (int d = 0; d < dst_channels; ++d) {
c += r_s + postfix[d] + " = dot(src, args.weights.Read(" +
std::to_string(index) + "));\n";
index++;
}
}
}
c += " }\n";
for (int i = 1; i < src_depth; ++i) {
c += " if (X > " + std::to_string(-i) +
") {
c +=
" FLT4 src = args.src_tensor.Read(X, Y, " + std::to_string(i) + ");\n";
for (int y = 0; y < kernel_size.y; ++y) {
for (int x = 0; x < kernel_size.x; ++x) {
std::string r_s =
" r[" + std::to_string(y) + "][" + std::to_string(x) + "]";
for (int d = 0; d < dst_channels; ++d) {
c += r_s + postfix[d] + " += dot(src, args.weights.Read(" +
std::to_string(index) + "));\n";
index++;
}
}
}
c += " }\n";
}
c += " X *= " + std::to_string(kernel_size.x) + ";\n";
c += " Y *= " + std::to_string(kernel_size.y) + ";\n";
for (int y = 0; y < kernel_size.y; ++y) {
for (int x = 0; x < kernel_size.x; ++x) {
const std::string x_coord = "X + " + std::to_string(x);
const std::string y_coord = "Y + " + std::to_string(y);
c += " if (" + x_coord + " < args.dst_tensor.Width() && " + y_coord +
" < args.dst_tensor.Height()) {\n";
c += " FLT4 result = args.weights.Read(" + std::to_string(index) +
");\n";
for (int d = 0; d < dst_channels; ++d) {
c += " result" + channel[d] + " += r[" + std::to_string(y) + "][" +
std::to_string(x) + "]" + postfix[d] + ";\n";
}
c += " args.dst_tensor.Write(result, " + x_coord + ", " + y_coord +
", 0);\n";
c += " }\n";
}
}
c += "}\n";
return c;
}
int3 ConvolutionTransposedThin::GetGridSize() const {
const int grid_x = src_[0]->Width() * dst_[0]->Batch();
const int grid_y = src_[0]->Height();
const int grid_z = 1;
return int3(grid_x, grid_y, grid_z);
}
bool IsConvolutionTransposedThinSupported(
const ConvolutionTransposedAttributes& attr) {
return attr.weights.shape.o <= 4 && attr.weights.shape.w == attr.stride.w &&
attr.weights.shape.h == attr.stride.h &&
attr.padding.prepended.w == 0 && attr.padding.prepended.h == 0 &&
attr.padding.appended.w == 0 && attr.padding.appended.h == 0;
}
ConvolutionTransposedThin CreateConvolutionTransposedThin(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
ConvolutionTransposedThin result(definition, attr, gpu_info);
result.UploadData(attr.weights, attr.bias);
return result;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvolutionTransposedThinSimpleWeights) {
auto status = ConvolutionTransposedThinSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvolutionTransposedThin) {
auto status = ConvolutionTransposedThinTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a4879a0a-281e-4b1f-9aaa-7bf86d954051 | cpp | tensorflow/tensorflow | convolution_transposed_3x3_thin | tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_thin.cc | tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_thin.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/precision.h"
#include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h"
#include "tensorflow/lite/delegates/gpu/common/task/weights_conversion.h"
namespace tflite {
namespace gpu {
namespace {
std::string ConvInstr(CalculationsPrecision precision, bool is_i4_o4,
const std::string& dst_name, const std::string& src_name,
int weights_offset) {
std::string c;
if (is_i4_o4) {
switch (precision) {
case CalculationsPrecision::F32:
case CalculationsPrecision::F16:
c += " $0 += $1.x * args.weights.Read($2); \n";
c += " $0 += $1.y * args.weights.Read($3); \n";
c += " $0 += $1.z * args.weights.Read($4); \n";
c += " $0 += $1.w * args.weights.Read($5); \n";
break;
case CalculationsPrecision::F32_F16:
c += " $0 += TO_ACCUM_TYPE($1.x * args.weights.Read($2) + $1.y * "
"args.weights.Read($3) + $1.z * args.weights.Read($4) + $1.w * "
"args.weights.Read($5)); \n";
break;
}
} else {
c += " $0.x += dot($1, args.weights.Read($2)); \n";
c += " $0.y += dot($1, args.weights.Read($3)); \n";
c += " $0.z += dot($1, args.weights.Read($4)); \n";
c += " $0.w += dot($1, args.weights.Read($5)); \n";
}
return absl::Substitute(c, dst_name, src_name, weights_offset,
weights_offset + 1, weights_offset + 2,
weights_offset + 3);
}
}
ConvolutionTransposed3x3Thin::ConvolutionTransposed3x3Thin(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr)
: GPUOperation(definition) {
if (gpu_info.IsApple()) {
weights_layout_ = WeightsLayout::kOICustomSpatialO4I4;
} else {
weights_layout_ = WeightsLayout::kOICustomSpatialI4O4;
}
code_ = GenerateConvolutionTransposedCode(
definition_, gpu_info, DivideRoundUp(attr.weights.shape.i, 4),
DivideRoundUp(attr.weights.shape.o, 4));
}
std::string ConvolutionTransposed3x3Thin::GenerateConvolutionTransposedCode(
const OperationDef& op_def, const GpuInfo& gpu_info, int src_depth,
int dst_depth) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
if (op_def.src_tensors.size() == 2) {
BufferDescriptor desc;
desc.element_type = op_def.src_tensors[1].GetDataType();
desc.element_size = 4;
desc.memory_type = MemoryType::CONSTANT;
AddSrcBuffer("weights", desc);
}
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.IsBatchSupported()) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += " args.src_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " if (X >= args.src_tensor.Width() || Y >= args.src_tensor.Height()) "
"return;\n";
for (int d = 0; d < dst_depth; ++d) {
const std::string layer = std::to_string(d);
c += " ACCUM_FLT4 r" + layer + "[2][2];\n";
c += " r" + layer + "[0][0] = INIT_ACCUM_FLT4(0.0f);\n";
c += " r" + layer + "[0][1] = INIT_ACCUM_FLT4(0.0f);\n";
c += " r" + layer + "[1][0] = INIT_ACCUM_FLT4(0.0f);\n";
c += " r" + layer + "[1][1] = INIT_ACCUM_FLT4(0.0f);\n";
}
for (int s = 0; s < src_depth; ++s) {
const std::string z = std::to_string(s);
c += " {\n";
if (op_def.src_tensors[0].SupportsZeroClamp(Axis::WIDTH, gpu_info) &&
op_def.src_tensors[0].SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " FLT4 src0 = args.src_tensor.Read(X, Y, " + z + ");\n";
c += " FLT4 src1 = args.src_tensor.Read(X + 1, Y, " + z + ");\n";
c += " FLT4 src2 = args.src_tensor.Read(X, Y + 1, " + z + ");\n";
c += " FLT4 src3 = args.src_tensor.Read(X + 1, Y + 1, " + z + ");\n";
} else if (op_def.src_tensors[0].IsLinear() &&
op_def.src_tensors[0].ReturnsZeroForNegOneRead(gpu_info)) {
c += " int c0 = args.src_tensor.GetAddress(X, Y, " + z + ");\n";
c += " int c1 = args.src_tensor.GetAddress(X + 1, Y, " + z + ");\n";
c += " int c2 = args.src_tensor.GetAddress(X, Y + 1, " + z + ");\n";
c += " int c3 = args.src_tensor.GetAddress(X + 1, Y + 1, " + z + ");\n";
c += " bool x_in = X + 1 < args.src_tensor.Width();\n";
c += " bool y_in = Y + 1 < args.src_tensor.Height();\n";
c += " c1 = select(-1, c1, x_in);\n";
c += " c2 = select(-1, c2, y_in);\n";
c += " c3 = select(-1, c3, x_in && y_in);\n";
c += " FLT4 src0 = args.src_tensor.Read(c0);\n";
c += " FLT4 src1 = args.src_tensor.Read(c1);\n";
c += " FLT4 src2 = args.src_tensor.Read(c2);\n";
c += " FLT4 src3 = args.src_tensor.Read(c3);\n";
} else {
c += " bool x_in = X + 1 < args.src_tensor.Width();\n";
c += " bool y_in = Y + 1 < args.src_tensor.Height();\n";
c += " FLT4 src0 = args.src_tensor.Read(X, Y, " + z + ");\n";
c += " FLT4 src1 = INIT_FLT4(0.0);\n";
c += " FLT4 src2 = INIT_FLT4(0.0);\n";
c += " FLT4 src3 = INIT_FLT4(0.0);\n";
c += " if (x_in) {\n";
c += " src1 = args.src_tensor.Read(X + 1, Y, " + z + ");\n";
c += " }\n";
c += " if (y_in) {\n";
c += " src2 = args.src_tensor.Read(X, Y + 1, " + z + ");\n";
c += " }\n";
c += " if (x_in && y_in) {\n";
c += " src3 = args.src_tensor.Read(X + 1, Y + 1, " + z + ");\n";
c += " }\n";
}
for (int d = 0; d < dst_depth; ++d) {
const std::string layer = std::to_string(d);
const int filters_index = (s * dst_depth + d) * 36;
const bool is_i4_o4 = GetWeightsDescription().IsI4O4();
c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[0][0]", "src0",
filters_index);
c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[0][1]", "src0",
filters_index + 4);
c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[0][1]", "src1",
filters_index + 8);
c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][0]", "src0",
filters_index + 12);
c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][0]", "src2",
filters_index + 16);
c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][1]", "src0",
filters_index + 20);
c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][1]", "src1",
filters_index + 24);
c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][1]", "src2",
filters_index + 28);
c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][1]", "src3",
filters_index + 32);
}
c += " }\n";
}
c += " X *= 2;\n";
c += " Y *= 2;\n";
for (int d = 0; d < dst_depth; ++d) {
const std::string layer = std::to_string(d);
c += " {\n";
c += " FLT4 bias_val = args.biases.Read(" + layer + ");\n";
for (int y = 0; y < 2; ++y) {
for (int x = 0; x < 2; ++x) {
const std::string x_coord = "X + " + std::to_string(x);
const std::string y_coord = "Y + " + std::to_string(y);
c += " {\n";
c += " FLT4 result = TO_FLT4(r" + layer + "[" + std::to_string(y) +
"][" + std::to_string(x) + "]) + bias_val;\n";
c += " args.dst_tensor.Write(result, " + x_coord + ", " + y_coord +
", " + layer + ");\n";
c += " }\n";
}
}
c += " }\n";
}
c += "}\n";
return c;
}
int3 ConvolutionTransposed3x3Thin::GetGridSize() const {
const int grid_x = src_[0]->Width() * dst_[0]->Batch();
const int grid_y = src_[0]->Height();
const int grid_z = 1;
return int3(grid_x, grid_y, grid_z);
}
std::vector<int> ConvolutionTransposed3x3Thin::GetSpatialWeightsRemap() const {
return std::vector<int>{4, 5, 3, 7, 1, 8, 6, 2, 0};
}
void ConvolutionTransposed3x3Thin::UploadWeights(
const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights) {
const auto weights_desc = GetWeightsDescription();
const int flt_count =
GetTotalElementsCountForLayout(weights_desc, weights.shape);
BufferDescriptor desc;
desc.element_type = weights_desc.type;
desc.element_size = 4;
desc.memory_type = MemoryType::CONSTANT;
desc.size = flt_count * SizeOf(desc.element_type);
desc.data.resize(desc.size);
RearrangeWeights(weights, weights_desc, absl::MakeSpan(desc.data));
args_.AddObject("weights",
std::make_unique<BufferDescriptor>(std::move(desc)));
}
bool IsConvolutionTransposed3x3ThinSupported(
const ConvolutionTransposedAttributes& attr) {
return attr.weights.shape.o <= 8 && attr.weights.shape.w == 3 &&
attr.weights.shape.h == 3 && attr.stride.w == 2 &&
attr.stride.h == 2 && attr.padding.prepended.w == 1 &&
attr.padding.prepended.h == 1 && attr.padding.appended.w == 1 &&
attr.padding.appended.h == 1;
}
ConvolutionTransposed3x3Thin CreateConvolutionTransposed3x3Thin(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
ConvolutionTransposed3x3Thin result(gpu_info, definition, attr);
result.UploadWeights(attr.weights);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
ConvolutionTransposed3x3Thin CreateConvolutionTransposed3x3ThinDynamicWeights(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
OperationDef new_def = definition;
new_def.src_tensors = {
definition.src_tensors[0]};
const DataType weights_type = definition.GetDataType();
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::BUFFER, Layout::HWC});
ConvolutionTransposed3x3Thin result(gpu_info, new_def, attr);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_thin_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvolutionTransposed3x3ThinSimpleWeights) {
auto status = ConvolutionTransposed3x3ThinSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvolutionTransposed3x3Thin) {
auto status = ConvolutionTransposed3x3ThinTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_thin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8136dfc5-5900-48d6-82d2-d733ec8bfd59 | cpp | tensorflow/tensorflow | relu | tensorflow/lite/delegates/gpu/gl/kernels/relu.cc | tensorflow/lite/delegates/xnnpack/relu_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/relu.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ReLU : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const ReLUAttributes&>(ctx.op_attr);
std::vector<Variable> params;
std::string min;
if (attr.alpha == 0) {
min = "vec4($activation_min$)";
params.push_back({"activation_min", attr.activation_min});
} else {
min = "min($alpha$ * value_0, 0.0)";
params.push_back({"alpha", attr.alpha});
}
std::string code;
if (attr.activation_max == 0) {
code = "value_0 = max(value_0, " + min + ");";
} else {
code = "value_0 = clamp(value_0, " + min + ", vec4($activation_max$));";
params.push_back({"activation_max", attr.activation_max});
}
*generated_code = {
std::move(params),
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewReLUNodeShader() {
return std::make_unique<ReLU>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Relu, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_RELU, xnnpack_delegate.get());
}
TEST(Relu, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_RELU, xnnpack_delegate.get());
}
TEST(Relu, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_RELU, xnnpack_delegate.get());
}
TEST(Relu, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_RELU,
xnnpack_delegate.get());
}
TEST(Relu, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_RELU, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/relu.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/relu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6558b08c-70ef-430e-a29e-211a324b5e75 | cpp | tensorflow/tensorflow | convolution_transposed_4x4 | tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4.cc | tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
ConvolutionTransposed4x4::WeightsUploadType GetBestWeightsUploadType(
const GpuInfo& gpu_info) {
ConvolutionTransposed4x4::WeightsUploadType weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::GLOBAL_MEM;
if (gpu_info.IsApple()) {
if (gpu_info.apple_info.IsBionic()) {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::GLOBAL_MEM;
} else {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS;
}
} else if (gpu_info.IsPowerVR()) {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_ASYNC;
} else if (gpu_info.IsNvidia() || gpu_info.IsIntel()) {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS;
} else if (gpu_info.IsAMD()) {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM;
} else {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::GLOBAL_MEM;
}
return weights_upload_type;
}
}
ConvolutionTransposed4x4::ConvolutionTransposed4x4(
const OperationDef& definition, const GpuInfo& gpu_info)
: GPUOperation(definition) {
work_group_size_ = int3(8, 4, 1);
if (gpu_info.IsApple()) {
work_group_launch_order_ = int3(2, 0, 1);
}
if (gpu_info.IsApple()) {
weights_layout_ = WeightsLayout::kOICustomSpatialO4I4;
} else {
weights_layout_ = WeightsLayout::kOICustomSpatialI4O4;
}
code_ = GenerateConvolutionTransposedCode(gpu_info, definition_,
GetBestWeightsUploadType(gpu_info));
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsPowerVR()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
}
std::string ConvolutionTransposed4x4::GenerateConvolutionTransposedCode(
const GpuInfo& gpu_info, const OperationDef& op_def,
WeightsUploadType weights_upload_type) {
auto src_desc = op_def.src_tensors[0];
AddSrcTensor("src_tensor", src_desc);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
if (op_def.src_tensors.size() == 2) {
BufferDescriptor desc;
desc.element_type = op_def.src_tensors[1].GetDataType();
desc.element_size = 4;
desc.memory_type =
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
AddSrcBuffer("weights", desc);
}
args_.AddInt("filter_offset");
const bool need_local_mem =
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS ||
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_ASYNC;
const int wg_total_size =
work_group_size_.x * work_group_size_.y * work_group_size_.z;
const std::string barrier =
wg_total_size == 32 && gpu_info.IsWaveSizeEqualTo32()
? "SIMD_LOCAL_MEM_BARRIER"
: "LOCAL_MEM_BARRIER";
std::string c;
if (GetWeightsDescription().IsI4O4()) {
switch (op_def.precision) {
case CalculationsPrecision::F32:
case CalculationsPrecision::F16:
c += "#define CONV(R, SRC, F) \\\n";
c += " R += SRC.x * weights_cache[F]; \\\n";
c += " R += SRC.y * weights_cache[F + 1]; \\\n";
c += " R += SRC.z * weights_cache[F + 2]; \\\n";
c += " R += SRC.w * weights_cache[F + 3]; \n";
break;
case CalculationsPrecision::F32_F16:
c += "#define CONV(R, SRC, F) \\\n";
c += " R += TO_ACCUM_TYPE(SRC.x * weights_cache[F] + SRC.y * "
"weights_cache[F + 1] + SRC.z * weights_cache[F + 2] + SRC.w * "
"weights_cache[F + 3]);\n";
break;
}
} else {
c += "#define CONV(R, SRC, F) \\\n";
c += " R.x += dot(SRC, weights_cache[F]); \\\n";
c += " R.y += dot(SRC, weights_cache[F + 1]); \\\n";
c += " R.z += dot(SRC, weights_cache[F + 2]); \\\n";
c += " R.w += dot(SRC, weights_cache[F + 3]); \n";
}
const std::string weights_space =
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM
? "__constant"
: "__global";
if (gpu_info.IsApiOpenCl()) {
c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n";
}
c += "MAIN_FUNCTION($0) {\n";
std::string grid_coords[3];
int3 launch_remap;
launch_remap[work_group_launch_order_.x] = 0;
launch_remap[work_group_launch_order_.y] = 1;
launch_remap[work_group_launch_order_.z] = 2;
if (work_group_launch_order_[0] == 0) {
grid_coords[0] = "GLOBAL_ID_0";
} else {
grid_coords[0] = "(GROUP_ID_" + std::to_string(launch_remap[0]) +
" * GROUP_SIZE_0 + LOCAL_ID_0);\n";
}
if (work_group_launch_order_[1] == 1) {
grid_coords[1] = "GLOBAL_ID_1";
} else {
grid_coords[1] = "(GROUP_ID_" + std::to_string(launch_remap[1]) +
" * GROUP_SIZE_1 + LOCAL_ID_1);\n";
}
if (work_group_launch_order_[2] == 2) {
grid_coords[2] = "GLOBAL_ID_2";
} else {
grid_coords[2] = "(GROUP_ID_" + std::to_string(launch_remap[2]) +
" * GROUP_SIZE_2 + LOCAL_ID_2);\n";
}
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = " + grid_coords[0] + ";\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = " + grid_coords[0] + ";\n";
}
c += " int Y = " + grid_coords[1] + ";\n";
c += " int Z = " + grid_coords[2] + ";\n";
if (!need_local_mem) {
c += " if (X * 2 > args.dst_tensor.Width() || Y * 2 > "
"args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) "
"return;\n";
}
c += " ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n";
c += " int f_offset = Z * args.filter_offset;\n";
if (need_local_mem) {
c += " __local FLT4 weights_cache[64];\n";
}
if (weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
c += " int local_id = LOCAL_ID_1 * 8 + LOCAL_ID_0;\n";
}
if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool in_x0 = X - 1 >= 0 && X - 1 < args.src_tensor.Width();\n";
c += " bool in_x1 = X >= 0 && X < args.src_tensor.Width();\n";
}
if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool in_y0 = Y - 1 >= 0 && Y - 1 < args.src_tensor.Height();\n";
c += " bool in_y1 = Y >= 0 && Y < args.src_tensor.Height();\n";
}
auto generate_check = [&](int x, int y) {
std::string check;
const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT};
const std::vector<std::string> names{"in_x" + std::to_string(x),
"in_y" + std::to_string(y)};
for (int i = 0; i < axes.size(); ++i) {
const auto& axis = axes[i];
if (src_desc.HasAxis(axis) &&
!src_desc.SupportsZeroClamp(axis, gpu_info)) {
if (!check.empty()) {
check += " && ";
}
check += names[i];
}
}
return check;
};
if (src_desc.IsLinear()) {
if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) {
c += " int addr_0 = args.src_tensor.GetAddress(X - 1, Y - 1, 0);\n";
c += " int addr_1 = args.src_tensor.GetAddress(X, Y - 1, 0);\n";
c += " int addr_2 = args.src_tensor.GetAddress(X - 1, Y, 0);\n";
c += " int addr_3 = args.src_tensor.GetAddress(X, Y, 0);\n";
c += " addr_0 = select(-1, addr_0, (in_x0 && in_y0));\n";
c += " addr_1 = select(-1, addr_1, (in_x1 && in_y0));\n";
c += " addr_2 = select(-1, addr_2, (in_x0 && in_y1));\n";
c += " addr_3 = select(-1, addr_3, (in_x1 && in_y1));\n";
c += " int dz_0 = select(0, args.src_tensor.SliceStride(), (in_x0 && "
"in_y0));\n";
c += " int dz_1 = select(0, args.src_tensor.SliceStride(), (in_x1 && "
"in_y0));\n";
c += " int dz_2 = select(0, args.src_tensor.SliceStride(), (in_x0 && "
"in_y1));\n";
c += " int dz_3 = select(0, args.src_tensor.SliceStride(), (in_x1 && "
"in_y1));\n";
} else {
c += " int xc0 = clamp(X - 1, 0, args.src_tensor.Width() - 1);\n";
c += " int xc1 = clamp(X, 0, args.src_tensor.Width() - 1);\n";
c += " int yc0 = clamp(Y - 1, 0, args.src_tensor.Height() - 1);\n";
c += " int yc1 = clamp(Y, 0, args.src_tensor.Height() - 1);\n";
c += " int addr_0 = args.src_tensor.GetAddress(xc0, yc0, 0);\n";
c += " int addr_1 = args.src_tensor.GetAddress(xc1, yc0, 0);\n";
c += " int addr_2 = args.src_tensor.GetAddress(xc0, yc1, 0);\n";
c += " int addr_3 = args.src_tensor.GetAddress(xc1, yc1, 0);\n";
c += " int dz = args.src_tensor.SliceStride();\n";
}
}
auto read_src = [&](int x, int y) {
if (src_desc.IsLinear()) {
const std::string id = std::to_string(y * 2 + x);
const std::string addr = "addr_" + std::to_string(y * 2 + x);
if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) {
return "args.src_tensor.Read(" + addr + "); " + addr + " += dz_" + id +
";";
} else {
return "args.src_tensor.Read(" + addr + ") * INIT_FLT(in_x" +
std::to_string(x) + " && in_y" + std::to_string(y) + "); " +
addr + " += dz;";
}
} else {
std::string check = generate_check(x, y);
if (!check.empty()) {
check = " * INIT_FLT(" + check + ")";
}
return "args.src_tensor.Read(X + " + std::to_string(x - 1) + ", Y + " +
std::to_string(y - 1) + ", s)" + check + ";";
}
};
c += " for (int s = 0; s < args.src_tensor.Slices(); ++s) {\n";
if (need_local_mem) {
c += " " + barrier + ";\n";
}
if (weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_ASYNC) {
c += " async_work_group_copy(weights_cache, "
"args.weights.GetPtr(f_offset), 64, "
"0);\n";
} else if (weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::
LOCAL_MEM_BY_THREADS) {
c += " weights_cache[local_id] = args.weights.Read(f_offset + "
"local_id);\n";
c += " weights_cache[local_id + 32] = args.weights.Read(f_offset + "
"local_id + "
"32);\n";
} else {
c += " " + weights_space +
" FLT4* weights_cache = args.weights.GetPtr(f_offset);\n";
}
c += " FLT4 src0 = " + read_src(0, 0) + ";\n";
c += " FLT4 src1 = " + read_src(1, 0) + ";\n";
c += " FLT4 src2 = " + read_src(0, 1) + ";\n";
c += " FLT4 src3 = " + read_src(1, 1) + ";\n";
c += " f_offset += 64;\n";
if (need_local_mem) {
c += " " + barrier + ";\n";
}
c += " CONV(r0, src0, 0);\n";
c += " CONV(r1, src0, 4);\n";
c += " CONV(r2, src0, 8);\n";
c += " CONV(r3, src0, 12);\n";
c += " CONV(r0, src1, 16);\n";
c += " CONV(r1, src1, 20);\n";
c += " CONV(r2, src1, 24);\n";
c += " CONV(r3, src1, 28);\n";
c += " CONV(r0, src2, 32);\n";
c += " CONV(r1, src2, 36);\n";
c += " CONV(r2, src2, 40);\n";
c += " CONV(r3, src2, 44);\n";
c += " CONV(r0, src3, 48);\n";
c += " CONV(r1, src3, 52);\n";
c += " CONV(r2, src3, 56);\n";
c += " CONV(r3, src3, 60);\n";
c += " }\n";
c += "\n";
if (need_local_mem) {
c += " if (X * 2 > args.dst_tensor.Width() || Y * 2 > "
"args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) "
"return;\n";
}
c += " X = X * 2 - 1;\n";
c += " Y = Y * 2 - 1;\n";
c += "\n";
c += " FLT4 bias_val = args.biases.Read(Z);\n";
c += " if (X >= 0 && Y >= 0) {\n";
c += " FLT4 result = TO_FLT4(r0) + bias_val;\n";
c += " args.dst_tensor.Write(result, X, Y, Z);\n";
c += " }\n";
c += " if (X + 1 < args.dst_tensor.Width() && Y >= 0) {\n";
c += " FLT4 result = TO_FLT4(r1) + bias_val;\n";
c += " args.dst_tensor.Write(result, X + 1, Y, Z);\n";
c += " }\n";
c += " if (X >= 0 && Y + 1 < args.dst_tensor.Height()) {\n";
c += " FLT4 result = TO_FLT4(r2) + bias_val;\n";
c += " args.dst_tensor.Write(result, X, Y + 1, Z);\n";
c += " }\n";
c += " if (X + 1 < args.dst_tensor.Width() && Y + 1 < "
"args.dst_tensor.Height()) {\n";
c += " FLT4 result = TO_FLT4(r3) + bias_val;\n";
c += " args.dst_tensor.Write(result, X + 1, Y + 1, Z);\n";
c += " }\n";
c += "}\n";
return c;
}
absl::Status ConvolutionTransposed4x4::BindArguments(ArgumentsBinder* args) {
return args->SetInt("filter_offset", 4 * 16 * src_[0]->Slices());
}
int3 ConvolutionTransposed4x4::GetGridSize() const {
const int grid_x = DivideRoundUp(dst_[0]->Width() + 2, 2) * dst_[0]->Batch();
const int grid_y = DivideRoundUp(dst_[0]->Height() + 2, 2);
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
std::vector<int> ConvolutionTransposed4x4::GetSpatialWeightsRemap() const {
return std::vector<int>{10, 11, 14, 15, 8, 9, 12, 13, 2, 3, 6, 7, 0, 1, 4, 5};
}
void ConvolutionTransposed4x4::UploadWeights(
const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights,
WeightsUploadType weights_upload_type) {
const auto weights_desc = GetWeightsDescription();
const int flt_count =
GetTotalElementsCountForLayout(weights_desc, weights.shape);
BufferDescriptor desc;
desc.element_type = weights_desc.type;
desc.element_size = 4;
desc.memory_type =
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
desc.size = flt_count * SizeOf(desc.element_type);
desc.data.resize(desc.size);
RearrangeWeights(weights, weights_desc, absl::MakeSpan(desc.data));
args_.AddObject("weights",
std::make_unique<BufferDescriptor>(std::move(desc)));
}
bool IsConvolutionTransposed4x4Supported(
const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
return attr.weights.shape.w == 4 && attr.weights.shape.h == 4 &&
attr.stride.w == 2 && attr.stride.h == 2 &&
attr.padding.prepended.w == 1 && attr.padding.prepended.h == 1;
}
ConvolutionTransposed4x4 CreateConvolutionTransposed4x4(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
ConvolutionTransposed4x4 result(definition, gpu_info);
result.UploadWeights(attr.weights, GetBestWeightsUploadType(gpu_info));
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
ConvolutionTransposed4x4 CreateConvolutionTransposed4x4DynamicWeights(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
OperationDef new_def = definition;
new_def.src_tensors = {
definition.src_tensors[0]};
const DataType weights_type = definition.GetDataType();
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::BUFFER, Layout::HWC});
ConvolutionTransposed4x4 result(new_def, gpu_info);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, ConvolutionTransposed4x4SimpleWeights) {
auto status = ConvolutionTransposed4x4SimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
179b2409-311b-4b45-94bb-c7d370c11b9f | cpp | tensorflow/tensorflow | conv_generic | tensorflow/lite/delegates/gpu/common/tasks/conv_generic.cc | tensorflow/lite/delegates/gpu/cl/kernels/conv_generic_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/conv_generic.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
std::string GenerateUploadByThreads(
const std::string& local_ptr_name, const std::string& name, bool use_ptrs,
const std::string& global_offset_name, const std::string type_conversion,
const std::string& lid_name, int total_work_items, int elements_to_upload) {
std::string c;
std::string offset =
global_offset_name.empty() ? "" : global_offset_name + " + ";
const int groups = elements_to_upload / total_work_items;
const int reminder = elements_to_upload % total_work_items;
const std::string access_start = name + (use_ptrs ? "[" : ".Read(");
const std::string access_end = use_ptrs ? "]" : ")";
for (int i = 0; i < groups; ++i) {
const std::string value = access_start + offset + lid_name + " + " +
std::to_string(total_work_items * i) + access_end;
c += " " + local_ptr_name + "[" + lid_name + " + " +
std::to_string(total_work_items * i) +
"] = " + absl::Substitute(type_conversion, value) + ";\n";
}
if (reminder != 0) {
const std::string value = access_start + offset + lid_name + " + " +
std::to_string(total_work_items * groups) +
access_end;
c += " if (" + lid_name + " < " + std::to_string(reminder) + ") {\n";
c += " " + local_ptr_name + "[" + lid_name + " + " +
std::to_string(total_work_items * groups) +
"] = " + absl::Substitute(type_conversion, value) + ";\n";
c += " }\n";
}
return c;
}
std::string GenerateAsyncUpload(const std::string& local_ptr_name,
const std::string& global_ptr_name,
const std::string& global_offset_name,
int elements_to_upload) {
std::string c;
std::string offset =
global_offset_name.empty() ? "" : " + " + global_offset_name;
c += " async_work_group_copy(" + local_ptr_name + ", " + global_ptr_name +
offset + ", " + std::to_string(elements_to_upload) + ", 0);\n";
return c;
}
std::string GenerateBlockCoords(const int4& block_size,
const int3& work_group_launch_order,
bool linear_spatial, bool linear_all,
bool need_depth, bool need_batch) {
std::string c;
int3 launch_remap;
launch_remap[work_group_launch_order.x] = 0;
launch_remap[work_group_launch_order.y] = 1;
launch_remap[work_group_launch_order.z] = 2;
if (linear_all) {
c += " int linear_all = GLOBAL_ID_0;\n";
if (need_batch) {
c += " int B = linear_all % args.task_size_b;\n";
c += " linear_all = linear_all / args.task_size_b;\n";
}
c += " int DST_X = linear_all % args.task_size_x;\n";
c += " linear_all = linear_all / args.task_size_x;\n";
c += " int DST_Y = linear_all % args.task_size_y;\n";
c += " linear_all = linear_all / args.task_size_y;\n";
if (need_depth) {
c += " int DST_Z = linear_all % args.task_size_z;\n";
c += " linear_all = linear_all / args.task_size_z;\n";
}
c += " int DST_S = linear_all;\n";
} else if (linear_spatial) {
if (work_group_launch_order[0] == 0) {
c += " int linear_spatial = GLOBAL_ID_0;\n";
} else {
c += " int linear_spatial = GROUP_ID_" +
std::to_string(launch_remap[0]) + " * GROUP_SIZE_0 + LOCAL_ID_0;\n";
}
if (need_batch) {
c += " int B = linear_spatial % args.task_size_b;\n";
c += " linear_spatial = linear_spatial / args.task_size_b;\n";
}
c += " int DST_X = linear_spatial % args.task_size_x;\n";
c += " linear_spatial = linear_spatial / args.task_size_x;\n";
c += " int DST_Y = linear_spatial % args.task_size_y;\n";
c += " linear_spatial = linear_spatial / args.task_size_y;\n";
if (need_depth) {
c += " int DST_Z = linear_spatial;\n";
}
if (work_group_launch_order[1] == 1) {
c += " int DST_S = GLOBAL_ID_1;\n";
} else {
c += " int DST_S = GROUP_ID_" + std::to_string(launch_remap[1]) +
" * GROUP_SIZE_1 + LOCAL_ID_1;\n";
}
} else {
if (work_group_launch_order[0] == 0) {
c += " int DST_X = GLOBAL_ID_0;\n";
} else {
c += " int DST_X = GROUP_ID_" + std::to_string(launch_remap[0]) +
" * GROUP_SIZE_0 + LOCAL_ID_0;\n";
}
if (need_batch) {
c += " int B = DST_X % args.task_size_b;\n";
c += " DST_X = DST_X / args.task_size_b;\n";
}
std::string global_id_1;
if (work_group_launch_order[1] == 1) {
global_id_1 = "GLOBAL_ID_1";
} else {
global_id_1 = "GROUP_ID_" + std::to_string(launch_remap[1]) +
" * GROUP_SIZE_1 + LOCAL_ID_1";
}
if (need_depth) {
c += " int linear_id_1 = " + global_id_1 + ";\n";
c += " int DST_Y = linear_id_1 % args.task_size_y;\n";
c += " int DST_Z = linear_id_1 / args.task_size_y;\n";
} else {
c += " int DST_Y = " + global_id_1 + ";\n";
}
if (work_group_launch_order[2] == 2) {
c += " int DST_S = GLOBAL_ID_2;\n";
} else {
c += " int DST_S = GROUP_ID_" + std::to_string(launch_remap[2]) +
" * GROUP_SIZE_2 + LOCAL_ID_2;\n";
}
}
if (block_size.x != 1) {
c += " DST_X *= " + std::to_string(block_size.x) + ";\n";
}
if (block_size.y != 1) {
c += " DST_Y *= " + std::to_string(block_size.y) + ";\n";
}
if (need_depth && block_size.z != 1) {
c += " DST_Z *= " + std::to_string(block_size.z) + ";\n";
}
if (block_size.w != 1) {
c += " DST_S *= " + std::to_string(block_size.w) + ";\n";
}
return c;
}
}
ConvGeneric::ConvGeneric(const OperationDef& definition,
const Convolution2DAttributes& attr,
const GpuInfo& gpu_info, const BHWC* dst_shape)
: GPUOperation(definition),
stride_(attr.strides.w, attr.strides.h, 1, 1),
padding_(-attr.padding.prepended.w, -attr.padding.prepended.h, 0, 0),
kernel_size_(attr.weights.shape.w, attr.weights.shape.h, 1, 1),
dilation_(attr.dilations.w, attr.dilations.h, 1, 1),
conv_params_(GuessBestParams(gpu_info, definition, attr, dst_shape)) {
const int src_slices = DivideRoundUp(attr.weights.shape.i, 4);
const int dst_slices = DivideRoundUp(attr.weights.shape.o, 4);
if (attr.groups != 1) {
conv_params_.groups_support = true;
const int dst_group_slices = dst_slices / attr.groups;
if (dst_group_slices % conv_params_.block_size.w != 0) {
if (conv_params_.block_size.w == 4 && dst_group_slices % 2 == 0) {
conv_params_.block_size.w = 2;
} else {
conv_params_.block_size.w = 1;
}
}
args_.AddInt("src_group_size", src_slices);
args_.AddInt("dst_group_size", dst_slices / attr.groups);
}
}
ConvGeneric::ConvGeneric(const OperationDef& definition,
const Convolution2DAttributes& attr,
const BHWC& weights_shape, const GpuInfo& gpu_info,
const BHWC* dst_shape)
: GPUOperation(definition),
stride_(attr.strides.w, attr.strides.h, 1, 1),
padding_(-attr.padding.prepended.w, -attr.padding.prepended.h, 0, 0),
kernel_size_(weights_shape.w, weights_shape.h, 1, 1),
dilation_(attr.dilations.w, attr.dilations.h, 1, 1),
conv_params_(GuessBestParams(gpu_info, definition, attr, weights_shape,
dst_shape)) {}
ConvGeneric::ConvGeneric(const OperationDef& definition,
const FullyConnectedAttributes& attr,
const GpuInfo& gpu_info, const BHWC* dst_shape)
: GPUOperation(definition),
stride_(1, 1, 1, 1),
padding_(0, 0, 0, 0),
kernel_size_(1, 1, 1, 1),
dilation_(1, 1, 1, 1),
conv_params_(GuessBestParams(gpu_info, definition, attr, dst_shape)) {}
ConvGeneric::ConvGeneric(const OperationDef& definition)
: GPUOperation(definition),
stride_(1, 1, 1, 1),
padding_(0, 0, 0, 0),
kernel_size_(1, 1, 1, 1),
dilation_(1, 1, 1, 1) {}
ConvGeneric::ConvGeneric(ConvGeneric&& operation)
: GPUOperation(std::move(operation)),
stride_(operation.stride_),
padding_(operation.padding_),
kernel_size_(operation.kernel_size_),
dilation_(operation.dilation_),
conv_params_(operation.conv_params_) {}
ConvGeneric::ConvGeneric(const OperationDef& definition,
const Convolution3DAttributes& attr,
const GpuInfo& gpu_info, const BHWDC* dst_shape)
: GPUOperation(definition),
stride_(attr.strides.w, attr.strides.h, attr.strides.d, 1),
padding_(-attr.padding.prepended.w, -attr.padding.prepended.h,
-attr.padding.prepended.d, 0),
kernel_size_(attr.weights.shape.w, attr.weights.shape.h,
attr.weights.shape.d, 1),
dilation_(attr.dilations.w, attr.dilations.h, attr.dilations.d, 1),
conv_params_(GuessBestParams(gpu_info, definition, attr, dst_shape)) {}
ConvGeneric& ConvGeneric::operator=(ConvGeneric&& operation) {
if (this != &operation) {
std::swap(stride_, operation.stride_);
std::swap(padding_, operation.padding_);
std::swap(kernel_size_, operation.kernel_size_);
std::swap(dilation_, operation.dilation_);
std::swap(conv_params_, operation.conv_params_);
GPUOperation::operator=(std::move(operation));
}
return *this;
}
void ConvGeneric::GenerateCode(const GpuInfo& gpu_info) {
if (conv_params_.linear_all) {
grid_dimension_ = 1;
} else if (conv_params_.linear_spatial) {
grid_dimension_ = 2;
}
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
AddDstTensor("dst_tensor", definition_.dst_tensors[0]);
if (definition_.src_tensors.size() == 2) {
const DataType weights_type = definition_.GetDataType();
if (conv_params_.weights_layout == WeightsLayout::kOSpatialIOGroupI4O4 ||
conv_params_.weights_layout == WeightsLayout::kOSpatialIOGroupO4I4) {
definition_.src_tensors[1] = {weights_type, TensorStorageType::BUFFER,
Layout::HWC};
BufferDescriptor desc;
desc.element_type = weights_type;
desc.element_size = 4;
desc.memory_type = conv_params_.weights_upload_type ==
ConvGeneric::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
AddSrcBuffer("weights", desc);
} else {
TensorDescriptor desc{weights_type, TensorStorageType::TEXTURE_2D,
Layout::HW};
definition_.src_tensors[1] = desc;
definition_.src_tensors.push_back(desc);
definition_.src_tensors.push_back(desc);
definition_.src_tensors.push_back(desc);
for (int i = 0; i < 4; ++i) {
const std::string name = "weights" + std::to_string(i);
AddSrcTensor(name, definition_.src_tensors[1 + i]);
}
}
}
code_ = GenerateConv(gpu_info, definition_, conv_params_);
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsPowerVR()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
if (gpu_info.IsMali()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
compiler_options_.push_back(CompilerOptions::kClRegisterAllocation64);
}
if (conv_params_.IsPrivateMemBroadcast() &&
(gpu_info.IsCL20OrHigher() || gpu_info.opencl_info.IsCLVK())) {
compiler_options_.push_back(CompilerOptions::kCl20);
}
bool kernel_is_trivial =
conv_params_.x_kernel_is_1 && conv_params_.y_kernel_is_1;
if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) {
kernel_is_trivial = kernel_is_trivial & conv_params_.z_kernel_is_1;
}
if (gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx() &&
definition_.precision == CalculationsPrecision::F16 &&
kernel_is_trivial) {
compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd);
}
}
absl::Status ConvGeneric::BindArguments(ArgumentsBinder* args) {
const int task_size_b = dst_[0]->Batch();
const int task_size_x =
DivideRoundUp(dst_[0]->Width(), conv_params_.block_size.x);
const int task_size_y =
DivideRoundUp(dst_[0]->Height(), conv_params_.block_size.y);
const int task_size_z =
DivideRoundUp(dst_[0]->Depth(), conv_params_.block_size.z);
RETURN_IF_ERROR(args->SetInt("task_size_b", task_size_b));
RETURN_IF_ERROR(args->SetInt("task_size_x", task_size_x));
RETURN_IF_ERROR(args->SetInt("task_size_y", task_size_y));
RETURN_IF_ERROR(args->SetInt("task_size_z", task_size_z));
return absl::OkStatus();
}
int3 ConvGeneric::GetGridSize() const {
const int task_size_b = dst_[0]->Batch();
const int task_size_x =
DivideRoundUp(dst_[0]->Width(), conv_params_.block_size.x);
const int task_size_y =
DivideRoundUp(dst_[0]->Height(), conv_params_.block_size.y);
const int task_size_z =
DivideRoundUp(dst_[0]->Depth(), conv_params_.block_size.z);
const int task_size_s =
DivideRoundUp(dst_[0]->Slices(), conv_params_.block_size.w);
int3 wg;
if (conv_params_.linear_all) {
return int3(
task_size_x * task_size_b * task_size_y * task_size_z * task_size_s, 1,
1);
} else if (conv_params_.linear_spatial) {
return int3(task_size_x * task_size_b * task_size_y * task_size_z,
task_size_s, 1);
} else {
return int3(task_size_x * task_size_b, task_size_y * task_size_z,
task_size_s);
}
}
void ConvGeneric::GetPossibleKernelWorkGroups(
TuningType tuning_type, const GpuInfo& gpu_info,
const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
if (conv_params_.weights_upload_type ==
WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP ||
conv_params_.weights_upload_type ==
WeightsUploadType::LOCAL_MEM_BY_THREADS ||
conv_params_.fixed_work_group_size) {
work_groups->push_back(work_group_size_);
return;
}
GetPossibleWorkGroupsConv(tuning_type, gpu_info, kernel_info, grid_size_,
work_groups);
}
std::string ConvGeneric::GenerateConv(const GpuInfo& gpu_info,
const OperationDef& op_def,
const ConvParams& conv_params) {
const auto& src_def = op_def.src_tensors[0];
auto generate_id = [&](const std::string& x, const std::string& y,
const std::string& z) {
std::string id;
if (src_def.HasAxis(Axis::WIDTH)) {
id += "_w" + x;
}
if (src_def.HasAxis(Axis::HEIGHT)) {
id += "_h" + y;
}
if (src_def.HasAxis(Axis::DEPTH)) {
id += "_d" + z;
}
return id;
};
auto generate_id_full = [&](const std::string& x, const std::string& y,
const std::string& z, const std::string& s) {
return generate_id(x, y, z) + "_s" + s;
};
auto generate_check = [&](const std::string& x, const std::string& y,
const std::string& z) {
std::string check;
const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH};
const std::vector<std::string> names{"in_x", "in_y", "in_z"};
const std::vector<bool> is_1{conv_params_.x_kernel_is_1,
conv_params_.y_kernel_is_1,
conv_params_.z_kernel_is_1};
const std::vector<std::string> coords{x, y, z};
for (int i = 0; i < axes.size(); ++i) {
const auto& axis = axes[i];
if (src_def.HasAxis(axis) && !src_def.SupportsZeroClamp(axis, gpu_info) &&
!is_1[i]) {
if (!check.empty()) {
check += " && ";
}
check += names[i] + coords[i];
}
}
return check;
};
if (!conv_params_.x_kernel_is_1) {
args_.AddInt("stride_x", stride_.x);
args_.AddInt("padding_x", padding_.x);
args_.AddInt("kernel_size_x", kernel_size_.x);
args_.AddInt("dilation_x", dilation_.x);
}
if (!conv_params_.y_kernel_is_1) {
args_.AddInt("stride_y", stride_.y);
args_.AddInt("padding_y", padding_.y);
args_.AddInt("kernel_size_y", kernel_size_.y);
args_.AddInt("dilation_y", dilation_.y);
}
if (src_def.HasAxis(Axis::DEPTH) && !conv_params_.z_kernel_is_1) {
args_.AddInt("stride_z", stride_.z);
args_.AddInt("padding_z", padding_.z);
args_.AddInt("kernel_size_z", kernel_size_.z);
args_.AddInt("dilation_z", dilation_.z);
}
args_.AddInt("task_size_b");
args_.AddInt("task_size_x");
args_.AddInt("task_size_y");
args_.AddInt("task_size_z");
const int wg_total_size =
work_group_size_.x * work_group_size_.y * work_group_size_.z;
const std::string barrier =
wg_total_size == 32 && gpu_info.IsWaveSizeEqualTo32()
? "SIMD_LOCAL_MEM_BARRIER"
: "LOCAL_MEM_BARRIER";
const bool need_local_mem =
conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS ||
conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP;
const int local_mem_size =
conv_params.block_size.w * 4 * conv_params.src_depth_loop_size;
const bool use_simd_broadcast = conv_params.IsPrivateMemBroadcast();
const int simd_size = conv_params.simd_size;
const bool late_oob_check = need_local_mem || use_simd_broadcast;
const std::string weights_space =
conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::CONSTANT_MEM
? "__constant"
: "__global";
std::string c;
if (use_simd_broadcast && gpu_info.IsApiOpenCl()) {
if (gpu_info.opencl_info.cl_version == OpenClVersion::kCl2_0 ||
gpu_info.SupportsExtension("cl_khr_subgroups")) {
c += "#pragma OPENCL EXTENSION cl_khr_subgroups : enable\n";
} else if (gpu_info.SupportsExtension("cl_intel_subgroups")) {
c += "#pragma OPENCL EXTENSION cl_intel_subgroups : enable\n";
}
}
const int4 block_size = conv_params.block_size;
if (conv_params.fixed_work_group_size && gpu_info.IsApiOpenCl()) {
c += "__attribute__((reqd_work_group_size(" +
std::to_string(work_group_size_.x) + ", " +
std::to_string(work_group_size_.y) + ", " +
std::to_string(work_group_size_.z) + ")))\n";
}
if (use_simd_broadcast && gpu_info.IsApiOpenCl() &&
gpu_info.SupportsExtension("cl_intel_required_subgroup_size")) {
c += "__attribute__((intel_reqd_sub_group_size(" +
std::to_string(simd_size) + ")))\n";
}
std::string dst_oob_check;
if (src_def.HasAxis(Axis::DEPTH)) {
if (conv_params.linear_all) {
dst_oob_check = "DST_S >= args.dst_tensor.Slices()";
} else if (conv_params.linear_spatial) {
dst_oob_check =
"DST_Z >= args.dst_tensor.Depth() || DST_S >= "
"args.dst_tensor.Slices()";
} else {
dst_oob_check =
"DST_X >= args.dst_tensor.Width() || DST_Z >= "
"args.dst_tensor.Depth() || DST_S >= args.dst_tensor.Slices()";
}
} else {
if (conv_params.linear_all) {
dst_oob_check = "DST_S >= args.dst_tensor.Slices()";
} else if (conv_params.linear_spatial) {
dst_oob_check =
"DST_Y >= args.dst_tensor.Height() || DST_S >= "
"args.dst_tensor.Slices()";
} else {
dst_oob_check =
"DST_X >= args.dst_tensor.Width() || DST_Y >= "
"args.dst_tensor.Height() || DST_S >= args.dst_tensor.Slices()";
}
}
c += "MAIN_FUNCTION($0) {\n";
c += GenerateBlockCoords(conv_params.block_size, work_group_launch_order_,
conv_params.linear_spatial, conv_params.linear_all,
src_def.HasAxis(Axis::DEPTH),
src_def.HasAxis(Axis::BATCH));
if (src_def.HasAxis(Axis::BATCH)) {
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
}
if (!conv_params.need_dst_loop) {
c += " DST_S = 0;\n";
}
c += " if (DST_S >= args.dst_tensor.Slices()) return;\n";
if (!late_oob_check) {
c += " if (" + dst_oob_check + ") {\n";
c += " return;\n";
c += " }\n";
}
if (conv_params.groups_support) {
c += " int conv_group_id = DST_S / args.dst_group_size;\n";
c += " int src_start_slice = conv_group_id * args.src_group_size;\n";
c += " int src_end_slice = src_start_slice + args.src_group_size;\n";
}
const std::string src_group_start_slice =
conv_params.groups_support ? "src_start_slice" : "0";
const std::string src_group_end_slice =
conv_params.groups_support ? "src_end_slice" : "args.src_tensor.Slices()";
const std::string src_group_slices = conv_params.groups_support
? "args.src_group_size"
: "args.src_tensor.Slices()";
if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
if (conv_params.linear_spatial) {
c += " int lid = LOCAL_ID_0;\n";
} else {
c += " int lid = LOCAL_ID_1 * " + std::to_string(work_group_size_.x) +
" + LOCAL_ID_0;\n";
}
}
if (use_simd_broadcast) {
c += " int simd_id = SUB_GROUP_LOCAL_ID;\n";
}
for (int s = 0; s < block_size.w; ++s) {
const std::string sind = std::to_string(s);
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
c += " ACCUM_FLT4 r" + generate_id_full(xind, yind, zind, sind) +
" = INIT_ACCUM_FLT4(0.0f);\n";
}
}
}
}
if (!conv_params_.x_kernel_is_1) {
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string xc = "(DST_X + " + xind + ")";
c += " int xc" + xind + " = " + xc +
" * args.stride_x + args.padding_x;\n";
}
} else {
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
c += " int xc" + xind + " = DST_X + " + xind + ";\n";
if (!src_def.CanReadOutOfBorder(Axis::WIDTH)) {
c += " xc" + xind + " = clamp(xc" + xind +
", 0, args.src_tensor.Width() - 1);\n";
}
}
}
if (!conv_params_.y_kernel_is_1) {
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
const std::string yc = "(DST_Y + " + yind + ")";
c += " int yc" + yind + " = " + yc +
" * args.stride_y + args.padding_y;\n";
}
} else {
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
c += " int yc" + yind + " = DST_Y + " + yind + ";\n";
if (!src_def.CanReadOutOfBorder(Axis::HEIGHT)) {
c += " yc" + yind + " = clamp(yc" + yind +
", 0, args.src_tensor.Height() - 1);\n";
}
}
}
if (src_def.HasAxis(Axis::DEPTH)) {
if (!conv_params_.z_kernel_is_1) {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
const std::string zc = "(DST_Z + " + zind + ")";
c += " int zc" + zind + " = " + zc +
" * args.stride_z + args.padding_z;\n";
}
} else {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
c += " int zc" + zind + " = DST_Z + " + zind + ";\n";
if (!src_def.CanReadOutOfBorder(Axis::DEPTH)) {
c += " zc" + zind + " = clamp(zc" + zind +
", 0, args.src_tensor.Depth() - 1);\n";
}
}
}
}
bool trivial_kernel_size =
conv_params_.x_kernel_is_1 && conv_params_.y_kernel_is_1;
if (src_def.HasAxis(Axis::DEPTH)) {
trivial_kernel_size = trivial_kernel_size && conv_params_.z_kernel_is_1;
}
const std::string weights_global_ptr =
weights_space + " " + ToCLDataType(conv_params.weights_data_type, 4) +
"*";
DataType summable_data_type = conv_params.weights_data_type;
if (gpu_info.IsPowerVR() &&
op_def.precision == CalculationsPrecision::F32_F16 &&
conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
summable_data_type = DataType::FLOAT32;
}
if (need_local_mem) {
c += " __local " + ToCLDataType(summable_data_type, 4) +
" weights_cache[" + std::to_string(local_mem_size) + "];\n";
} else if (conv_params.AreWeightsBuffer() &&
gpu_info.SupportsPointersInKernels()) {
c += " " + weights_global_ptr + " weights_cache;\n";
} else if (!trivial_kernel_size) {
c += " int filter_offset = 0;\n";
}
if (conv_params.AreWeightsBuffer()) {
std::string offset;
if (conv_params.different_weights_for_height) {
offset = "(DST_S * args.src_tensor.Height() + DST_Y * " +
std::to_string(block_size.w) +
") * 4 * args.src_tensor.Slices()";
} else {
std::string kernel_spatial_offset = "";
if (!conv_params_.x_kernel_is_1) {
kernel_spatial_offset += " * args.kernel_size_x";
}
if (!conv_params_.y_kernel_is_1) {
kernel_spatial_offset += " * args.kernel_size_y";
}
if (src_def.HasAxis(Axis::DEPTH) && !conv_params_.z_kernel_is_1) {
kernel_spatial_offset += " * args.kernel_size_z";
}
offset = "DST_S * 4 * " + src_group_slices + kernel_spatial_offset;
}
if (gpu_info.SupportsPointersInKernels()) {
c += " " + weights_global_ptr +
" filters_loc = args.weights.GetPtr() + " + offset + ";\n";
} else {
c += " int filters_offset = " + offset + ";\n";
}
}
if (src_def.HasAxis(Axis::DEPTH) && !conv_params_.z_kernel_is_1) {
c += " for (int kz = 0; kz < args.kernel_size_z; ++kz) {\n";
for (int z = 0; z < block_size.z; ++z) {
const std::string zck = "zck" + std::to_string(z);
c += " int zck" + std::to_string(z) + " = kz * args.dilation_z + zc" +
std::to_string(z) + ";\n";
if (!src_def.SupportsZeroClamp(Axis::DEPTH, gpu_info)) {
c += " bool in_z" + std::to_string(z) + " = " + zck + " >= 0 && " +
zck + " < args.src_tensor.Depth();\n";
if (!src_def.CanReadOutOfBorder(Axis::DEPTH)) {
c += " " + zck + " = clamp(" + zck +
", 0, args.src_tensor.Depth() - 1);\n";
}
}
}
}
if (!conv_params_.y_kernel_is_1) {
c += " for (int ky = 0; ky < args.kernel_size_y; ++ky) {\n";
for (int y = 0; y < block_size.y; ++y) {
const std::string yck = "yck" + std::to_string(y);
c += " int " + yck + " = ky * args.dilation_y + yc" + std::to_string(y) +
";\n";
if (!src_def.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool in_y" + std::to_string(y) + " = " + yck + " >= 0 && " +
yck + " < args.src_tensor.Height();\n";
if (!src_def.CanReadOutOfBorder(Axis::HEIGHT)) {
c += " " + yck + " = clamp(" + yck +
", 0, args.src_tensor.Height() - 1);\n";
}
}
}
}
if (!conv_params_.x_kernel_is_1) {
c += " for (int kx = 0; kx < args.kernel_size_x; ++kx) {\n";
for (int x = 0; x < block_size.x; ++x) {
const std::string xck = "xck" + std::to_string(x);
c += " int xck" + std::to_string(x) + " = kx * args.dilation_x + xc" +
std::to_string(x) + ";\n";
if (!src_def.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool in_x" + std::to_string(x) + " = " + xck + " >= 0 && " +
xck + " < args.src_tensor.Width();\n";
if (!src_def.CanReadOutOfBorder(Axis::WIDTH)) {
c += " " + xck + " = clamp(" + xck +
", 0, args.src_tensor.Width() - 1);\n";
}
}
}
}
const bool need_multiple_slice_strides =
src_def.ReturnsZeroForNegOneRead(gpu_info) && !trivial_kernel_size;
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
std::string xc = conv_params.x_kernel_is_1 ? "xc" + xind : "xck" + xind;
std::string yc = conv_params.y_kernel_is_1 ? "yc" + yind : "yck" + yind;
const std::string id = generate_id(xind, yind, zind);
std::string coords = "" + xc + ", " + yc;
if (src_def.HasAxis(Axis::DEPTH)) {
std::string zc =
conv_params.z_kernel_is_1 ? "zc" + zind : "zck" + zind;
coords += ", " + zc;
}
if (src_def.IsLinear()) {
c += " int addr" + id + " = args.src_tensor.GetAddress(" + coords +
", " + src_group_start_slice + ");\n";
if (need_multiple_slice_strides) {
const std::string check = generate_check(xind, yind, zind);
c += " addr" + id + " = select(-1, addr" + id + ", (" + check +
"));\n";
c += " int ds" + id +
" = select(0, args.src_tensor.SliceStride(), (" + check +
"));\n";
}
}
}
}
}
if (src_def.IsLinear() && !need_multiple_slice_strides) {
c += " int ds = args.src_tensor.SliceStride();\n";
}
auto declare_src = [&]() {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string id = generate_id(xind, yind, zind);
c += " " + ToCLDataType(summable_data_type, 4) + " src" + id +
";\n";
}
}
}
};
const bool conditional_read = gpu_info.IsMali();
auto read_src = [&]() {
const std::string read_as_type = ToCLDataType(summable_data_type);
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
std::string id = generate_id(xind, yind, zind);
const std::string check = generate_check(xind, yind, zind);
std::string address;
if (src_def.IsLinear()) {
address = "addr" + id;
} else {
std::string xc =
conv_params.x_kernel_is_1 ? "xc" + xind : "xck" + xind;
std::string yc =
conv_params.y_kernel_is_1 ? "yc" + yind : "yck" + yind;
address = "" + xc + ", " + yc;
if (src_def.HasAxis(Axis::DEPTH)) {
std::string zc =
conv_params.z_kernel_is_1 ? "zc" + zind : "zck" + zind;
address += ", " + zc;
}
address += ", s";
}
if (src_def.ReturnsZeroForNegOneRead(gpu_info)) {
c += " src" + id + " = args.src_tensor.Read<" + read_as_type +
">(" + address + ");\n";
const std::string ds = trivial_kernel_size ? "ds" : "ds" + id;
c += " " + address + " += " + ds + ";\n";
} else {
if (!check.empty()) {
if (conditional_read) {
c += " src" + id + " = " + check +
" ? args.src_tensor.Read<" + read_as_type + ">(" +
address + ") : INIT_FLT4(0.0f);\n";
} else {
c += " src" + id + " = args.src_tensor.Read<" +
read_as_type + ">(" + address + ") * INIT_FLT(" + check +
");\n";
}
} else {
c += " src" + id + " = args.src_tensor.Read<" + read_as_type +
">(" + address + ");\n";
}
if (src_def.IsLinear()) {
c += " " + address + " += ds;\n";
}
}
}
}
}
};
bool use_fma = gpu_info.IsAMD() && gpu_info.IsApiOpenCl();
auto conv_core = [&](int shared_offset) {
const std::string channels[] = {"x", "y", "z", "w"};
for (int s = 0; s < block_size.w; ++s) {
const std::string sind = std::to_string(s);
if (op_def.precision != CalculationsPrecision::F32_F16 ||
summable_data_type == DataType::FLOAT32) {
for (int ch = 0; ch < 4; ++ch) {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
std::string R = "r" + generate_id_full(xind, yind, zind, sind);
std::string S = "src" + generate_id(xind, yind, zind);
if (use_simd_broadcast) {
int simd_id = (s * 4 + ch + shared_offset) / simd_size;
int thread_id = (s * 4 + ch + shared_offset) % simd_size;
std::string w_val_x = "SUB_GROUP_BROADCAST(simd_w" +
std::to_string(simd_id) + ".x, " +
std::to_string(thread_id) + "u)";
std::string w_val_y = "SUB_GROUP_BROADCAST(simd_w" +
std::to_string(simd_id) + ".y, " +
std::to_string(thread_id) + "u)";
std::string w_val_z = "SUB_GROUP_BROADCAST(simd_w" +
std::to_string(simd_id) + ".z, " +
std::to_string(thread_id) + "u)";
std::string w_val_w = "SUB_GROUP_BROADCAST(simd_w" +
std::to_string(simd_id) + ".w, " +
std::to_string(thread_id) + "u)";
if (GetWeightsDescription().IsI4O4()) {
c += " " + R + ".x += " + w_val_x + " * " + S + "." +
channels[ch] + ";\n";
c += " " + R + ".y += " + w_val_y + " * " + S + "." +
channels[ch] + ";\n";
c += " " + R + ".z += " + w_val_z + " * " + S + "." +
channels[ch] + ";\n";
c += " " + R + ".w += " + w_val_w + " * " + S + "." +
channels[ch] + ";\n";
} else {
c += " " + R + "." + channels[ch] + " += " + w_val_x +
" * " + S + ".x;\n";
c += " " + R + "." + channels[ch] + " += " + w_val_y +
" * " + S + ".y;\n";
c += " " + R + "." + channels[ch] + " += " + w_val_z +
" * " + S + ".z;\n";
c += " " + R + "." + channels[ch] + " += " + w_val_w +
" * " + S + ".w;\n";
}
} else {
const std::string weight_id =
std::to_string(s * 4 + ch + shared_offset);
std::string w_val;
if (conv_params.AreWeightsBuffer()) {
if (need_local_mem ||
gpu_info.SupportsPointersInKernels()) {
w_val = "weights_cache[" + weight_id + "]";
} else {
w_val = "args.weights.Read(filters_offset + " +
weight_id + ")";
}
} else {
w_val = "f" + weight_id;
}
if (GetWeightsDescription().IsI4O4()) {
if (use_fma) {
c += " " + R + " = fma(" + w_val + ", " + S + "." +
channels[ch] + ", " + R + ");\n";
} else {
c += " " + R + " += " + w_val + " * " + S + "." +
channels[ch] + ";\n";
}
} else {
c += " " + R + "." + channels[ch] + " += dot(" + w_val +
", " + S + ");\n";
}
}
}
}
}
}
} else {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
std::string R = "r" + generate_id_full(xind, yind, zind, sind);
std::string S = "src" + generate_id(xind, yind, zind);
std::vector<std::string> F(4);
for (int i = 0; i < 4; ++i) {
std::string weight_id =
std::to_string(s * 4 + i + shared_offset);
if (conv_params.AreWeightsBuffer()) {
if (need_local_mem || gpu_info.SupportsPointersInKernels()) {
F[i] = "weights_cache[" + weight_id + "]";
} else {
F[i] =
"args.weights.Read(filters_offset + " + weight_id + ")";
}
} else {
F[i] = "f" + weight_id;
}
}
if (GetWeightsDescription().IsI4O4()) {
c += " " + R + " += TO_ACCUM_TYPE(" + S + ".x * " + F[0] +
" + " + S + ".y * " + F[1] + " + " + S + ".z * " + F[2] +
" + " + S + ".w * " + F[3] + ");\n";
} else {
c += " " + R + ".x += dot(" + S + ", " + F[0] + ");\n";
c += " " + R + ".y += dot(" + S + ", " + F[1] + ");\n";
c += " " + R + ".z += dot(" + S + ", " + F[2] + ");\n";
c += " " + R + ".w += dot(" + S + ", " + F[3] + ");\n";
}
}
}
}
}
}
};
c += " int s = " + src_group_start_slice + ";\n";
if (conv_params.need_src_loop) {
c += " do {\n";
}
declare_src();
const int total_work_items =
work_group_size_.x * work_group_size_.y * work_group_size_.z;
const std::string type_conversion = GetTypeConversion(
gpu_info, conv_params.weights_data_type, summable_data_type, 4);
if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP) {
c += GenerateAsyncUpload("weights_cache", "filters_loc",
"", local_mem_size);
} else if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
if (gpu_info.IsApiMetal() && wg_total_size == 32 &&
gpu_info.IsWaveSizeEqualTo32()) {
c += " SIMDGROUP_BARRIER(mem_flags::mem_none);\n";
} else {
c += " " + barrier + ";\n";
}
if (gpu_info.SupportsPointersInKernels()) {
c += GenerateUploadByThreads("weights_cache", "filters_loc",
true,
"", type_conversion,
"lid", total_work_items, local_mem_size);
} else {
c += GenerateUploadByThreads("weights_cache", "args.weights",
false, "filters_offset",
type_conversion, "lid", total_work_items,
local_mem_size);
}
} else if (use_simd_broadcast) {
int parts = local_mem_size / simd_size;
int reminder = local_mem_size % simd_size;
const std::string read_start = gpu_info.SupportsPointersInKernels()
? "filters_loc["
: "args.weights.Read(filters_offset + ";
const std::string read_end =
gpu_info.SupportsPointersInKernels() ? "]" : ")";
for (int i = 0; i < parts; ++i) {
const std::string weights_index =
"simd_id + " + std::to_string(i * simd_size);
c += " FLT4 simd_w" + std::to_string(i) + " = " + read_start +
weights_index + read_end + ";\n";
}
if (reminder) {
const std::string weights_index =
"simd_id + " + std::to_string(parts * simd_size);
c += " FLT4 simd_w" + std::to_string(parts) + ";\n";
c += " if (simd_id < " + std::to_string(reminder) + ") {\n";
c += " simd_w" + std::to_string(parts) + " = " + read_start +
weights_index + read_end + ";\n";
c += " }\n";
}
} else if (conv_params.AreWeightsBuffer()) {
if (gpu_info.SupportsPointersInKernels()) {
c += " weights_cache = filters_loc;\n";
}
} else {
for (int dst_s = 0; dst_s < block_size.w; ++dst_s) {
std::string f_y = trivial_kernel_size ? "s" : "filter_offset";
if (trivial_kernel_size && conv_params.groups_support) {
f_y = "s - src_start_slice";
}
if (conv_params.different_weights_for_height) {
f_y = "DST_Y * args.src_tensor.Slices() + s";
}
c += absl::Substitute(
R"( FLT4 f$2 = args.weights0.Read(DST_S + $0, $1);
FLT4 f$3 = args.weights1.Read(DST_S + $0, $1);
FLT4 f$4 = args.weights2.Read(DST_S + $0, $1);
FLT4 f$5 = args.weights3.Read(DST_S + $0, $1);
)",
dst_s, f_y, dst_s * 4 + 0, dst_s * 4 + 1, dst_s * 4 + 2,
dst_s * 4 + 3);
}
if (!trivial_kernel_size) {
c += " filter_offset++;\n";
}
}
read_src();
c += " s += 1;\n";
if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
c += " " + barrier + ";\n";
}
conv_core(0);
for (int i = 1; i < conv_params.src_depth_loop_size; ++i) {
read_src();
conv_core(i * block_size.w * 4);
c += " s += 1;\n";
}
if (conv_params.AreWeightsBuffer()) {
if (gpu_info.SupportsPointersInKernels()) {
c += " filters_loc += " + std::to_string(local_mem_size) + ";\n";
} else {
c += " filters_offset += " + std::to_string(local_mem_size) + ";\n";
}
}
if (conv_params.need_src_loop) {
c += " } while (s < " + src_group_end_slice + ");\n";
}
if (!conv_params.x_kernel_is_1) {
c += " };\n";
}
if (!conv_params.y_kernel_is_1) {
c += " };\n";
}
if (src_def.HasAxis(Axis::DEPTH) && !conv_params_.z_kernel_is_1) {
c += " };\n";
}
if (conv_params.AreWeightsBuffer()) {
if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP) {
c += GenerateAsyncUpload("weights_cache", "args.biases.GetPtr()", "DST_S",
block_size.w);
} else if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
c += " " + barrier + ";\n";
c += GenerateUploadByThreads("weights_cache", "args.biases",
false, "DST_S", type_conversion,
"lid", total_work_items, block_size.w);
c += " " + barrier + ";\n";
} else if (gpu_info.SupportsPointersInKernels()) {
c += " weights_cache = args.biases.GetPtr() + DST_S;\n";
}
}
if (late_oob_check) {
c += " if (" + dst_oob_check + ") {\n";
c += " return;\n";
c += " }\n";
}
auto generate_dst_check = [&](int x, int y, int z) {
std::string check;
const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH};
const std::vector<std::string> names{"Width()", "Height()", "Depth()"};
std::vector<std::string> coords(3);
coords[0] = "DST_X + " + std::to_string(x);
coords[1] = "DST_Y + " + std::to_string(y);
coords[2] = "DST_Z + " + std::to_string(z);
const std::vector<int> ids{x, y, z};
for (int i = 0; i < axes.size(); ++i) {
const auto& axis = axes[i];
if (src_def.HasAxis(axis) && ids[i] != 0) {
if (!check.empty()) {
check += " && ";
}
check += coords[i] + " < args.dst_tensor." + names[i];
}
}
return check;
};
for (int s = 0; s < block_size.w; ++s) {
const std::string sind = std::to_string(s);
c += " if (DST_S + " + sind + " >= args.dst_tensor.Slices()) return;\n";
c += " {\n";
if (conv_params.AreWeightsBuffer() &&
(need_local_mem || gpu_info.SupportsPointersInKernels())) {
c += " FLT4 bias_val = TO_FLT4(weights_cache[" + sind + "]);\n";
} else {
c += " FLT4 bias_val = args.biases.Read(DST_S + " + sind + ");\n";
}
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string id = generate_id_full(xind, yind, zind, sind);
const std::string check = generate_dst_check(x, y, z);
std::string coords = "DST_X + " + xind + ", DST_Y + " + yind;
if (src_def.HasAxis(Axis::DEPTH)) {
coords += ", DST_Z + " + zind;
}
coords += ", DST_S + " + sind;
if (!check.empty()) {
c += " if (" + check + ") {\n";
} else {
c += " {\n";
}
c += " FLT4 res = TO_FLT4(r" + id + ") + bias_val;\n";
c += " args.dst_tensor.Write(res, " + coords + ");\n";
c += " }\n";
}
}
}
c += " }\n";
}
c += "}\n";
return c;
}
int GetGroupsCount(const BHWC& dst_shape, const int3& wg_size,
const int4& block_size) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int grid_x = DivideRoundUp(dst_shape.w, block_size.x) * dst_shape.b;
int grid_y = DivideRoundUp(dst_shape.h, block_size.y);
int grid_z = DivideRoundUp(dst_slices, block_size.w);
return DivideRoundUp(grid_x, wg_size.x) * DivideRoundUp(grid_y, wg_size.y) *
DivideRoundUp(grid_z, wg_size.z);
}
int GetGroupsCountForLinearWH(const BHWC& dst_shape, const int3& wg_size,
const int4& block_size) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int grid_x = DivideRoundUp(dst_shape.w, block_size.x) * dst_shape.b;
int grid_y = DivideRoundUp(dst_shape.h, block_size.y);
int grid_z = DivideRoundUp(dst_slices, block_size.w);
return DivideRoundUp(grid_x * grid_y, wg_size.x) *
DivideRoundUp(grid_z, wg_size.y);
}
int GetGroupsCountForLinearWHS(const BHWC& dst_shape, const int3& wg_size,
const int4& block_size) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int grid_x = DivideRoundUp(dst_shape.w, block_size.x) * dst_shape.b;
int grid_y = DivideRoundUp(dst_shape.h, block_size.y);
int grid_z = DivideRoundUp(dst_slices, block_size.w);
return DivideRoundUp(grid_x * grid_y * grid_z, wg_size.x);
}
bool IsKernelXIs1(const Convolution2DAttributes& attr) {
return attr.weights.shape.w == 1 && attr.strides.w == 1 &&
attr.dilations.w == 1 && attr.padding.prepended.w == 0 &&
attr.padding.appended.w == 0;
}
bool IsKernelYIs1(const Convolution2DAttributes& attr) {
return attr.weights.shape.h == 1 && attr.strides.h == 1 &&
attr.dilations.h == 1 && attr.padding.prepended.h == 0 &&
attr.padding.appended.h == 0;
}
int GetMaximumPossibleWavesCount(const AppleInfo& apple_info,
const BHWC& dst_shape) {
if (apple_info.IsLocalMemoryPreferredOverGlobal()) {
return GetGroupsCountForLinearWH(dst_shape, {32, 1, 1}, int4(1, 1, 1, 1));
} else {
return GetGroupsCountForLinearWHS(dst_shape, {32, 1, 1}, int4(1, 1, 1, 1));
}
}
int GetRecommendedBlockSize(const AppleInfo& apple_info,
const BHWC& dst_shape) {
const int max_waves = GetMaximumPossibleWavesCount(apple_info, dst_shape);
const int cu_count = apple_info.GetComputeUnitsCount();
if (max_waves >= cu_count * 64) {
return 8;
} else if (max_waves >= cu_count * 32) {
return 4;
} else if (max_waves >= cu_count * 16) {
return 2;
} else {
return 1;
}
}
struct WorkGroupSizeOption {
enum class ThreadMapping { kDefault, kLinearSpatial, kLinearAll };
int3 work_group_size;
int work_groups_count;
ThreadMapping thread_mapping;
float penalty = 1.0f;
};
WorkGroupSizeOption CreateWorkGroupSizeOption(
const int3& work_group_size,
WorkGroupSizeOption::ThreadMapping mapping_type, float penalty,
const BHWC& dst_shape, const int4& block_size) {
WorkGroupSizeOption wg;
wg.work_group_size = work_group_size;
wg.thread_mapping = mapping_type;
wg.penalty = penalty;
if (mapping_type == WorkGroupSizeOption::ThreadMapping::kDefault) {
wg.work_groups_count =
GetGroupsCount(dst_shape, work_group_size, block_size);
} else if (mapping_type ==
WorkGroupSizeOption::ThreadMapping::kLinearSpatial) {
wg.work_groups_count =
GetGroupsCountForLinearWH(dst_shape, work_group_size, block_size);
} else if (mapping_type == WorkGroupSizeOption::ThreadMapping::kLinearAll) {
wg.work_groups_count =
GetGroupsCountForLinearWHS(dst_shape, work_group_size, block_size);
}
return wg;
}
ConvGeneric::ConvParams GetConvParamsForA7A8(const AppleInfo& apple_info,
bool x_kernel_is_1,
bool y_kernel_is_1, int src_slices,
const BHWC& dst_shape) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int blk_total_size = GetRecommendedBlockSize(apple_info, dst_shape);
int3 block_size = int3(1, 1, 1);
if (blk_total_size >= 4 && (dst_slices % 4 == 0 || dst_slices >= 16)) {
block_size.z = 4;
blk_total_size /= 4;
} else if (blk_total_size >= 2 && (dst_slices % 2 == 0 || dst_slices >= 4)) {
block_size.z = 2;
blk_total_size /= 2;
}
if (blk_total_size >= 4) {
block_size.x = 2;
block_size.y = 2;
blk_total_size /= 4;
} else if (blk_total_size >= 2) {
if (dst_shape.w % 2 != 0 && dst_shape.h % 2 == 0) {
block_size.y = 2;
} else {
block_size.x = 2;
}
blk_total_size /= 2;
}
ConvGeneric::ConvParams params;
params.weights_upload_type =
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS;
params.x_kernel_is_1 = x_kernel_is_1;
params.y_kernel_is_1 = y_kernel_is_1;
params.src_depth_loop_size = 1;
params.block_size.x = block_size.x;
params.block_size.y = block_size.y;
params.block_size.z = 1;
params.block_size.w = block_size.z;
params.weights_layout = WeightsLayout::kOSpatialIOGroupO4I4;
std::vector<WorkGroupSizeOption> options;
options.push_back(CreateWorkGroupSizeOption(
{8, 4, 1}, WorkGroupSizeOption::ThreadMapping::kDefault, 1.0f, dst_shape,
params.block_size));
if (!apple_info.IsFamilyApple1()) {
options.push_back(CreateWorkGroupSizeOption(
{4, 4, 1}, WorkGroupSizeOption::ThreadMapping::kDefault, 1.01f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{4, 2, 1}, WorkGroupSizeOption::ThreadMapping::kDefault, 1.25f,
dst_shape, params.block_size));
}
options.push_back(CreateWorkGroupSizeOption(
{32, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearSpatial, 1.0f,
dst_shape, params.block_size));
if (!apple_info.IsFamilyApple1()) {
options.push_back(CreateWorkGroupSizeOption(
{16, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearSpatial, 1.01f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{8, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearSpatial, 1.25f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{32, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearAll, 3.1 * 1.0f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{16, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearAll, 3.1 * 1.01f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{8, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearAll, 3.1 * 1.25f,
dst_shape, params.block_size));
}
float optimum = options[0].work_groups_count * options[0].penalty *
options[0].work_group_size.x * options[0].work_group_size.y *
options[0].work_group_size.z;
int optimum_index = 0;
for (int i = 1; i < options.size(); ++i) {
float local_optimum = options[i].work_groups_count * options[i].penalty *
options[i].work_group_size.x *
options[i].work_group_size.y *
options[i].work_group_size.z;
if (local_optimum < optimum) {
optimum = local_optimum;
optimum_index = i;
}
}
WorkGroupSizeOption optimum_wg = options[optimum_index];
if (optimum_wg.thread_mapping ==
WorkGroupSizeOption::ThreadMapping::kLinearSpatial) {
params.linear_spatial = true;
params.linear_all = false;
params.work_group_size = optimum_wg.work_group_size;
params.work_group_launch_order = int3(1, 0, 2);
} else if (optimum_wg.thread_mapping ==
WorkGroupSizeOption::ThreadMapping::kLinearAll) {
params.linear_spatial = false;
params.linear_all = true;
params.work_group_size = optimum_wg.work_group_size;
params.work_group_launch_order = int3(0, 1, 2);
params.weights_upload_type = ConvGeneric::WeightsUploadType::GLOBAL_MEM;
} else {
params.linear_spatial = false;
params.linear_all = false;
params.work_group_size = optimum_wg.work_group_size;
params.work_group_launch_order = int3(2, 0, 1);
}
int total_elements = params.block_size.x * params.block_size.y *
params.block_size.z * params.block_size.w;
if (total_elements == 1) {
if (src_slices % 4 == 0) {
params.src_depth_loop_size = 4;
} else if (src_slices % 2 == 0) {
params.src_depth_loop_size = 2;
}
} else if (total_elements == 2) {
if (src_slices % 2 == 0) {
params.src_depth_loop_size = 2;
}
}
if (params.src_depth_loop_size == src_slices) {
params.need_src_loop = false;
}
if (params.block_size.w == dst_slices) {
params.need_dst_loop = false;
}
const bool use_filters_constants =
!params.need_dst_loop && !params.need_src_loop && params.x_kernel_is_1 &&
params.y_kernel_is_1;
if (use_filters_constants) {
params.weights_upload_type = ConvGeneric::WeightsUploadType::CONSTANT_MEM;
}
return params;
}
ConvGeneric::ConvParams GetConvParamsForA9AndHigher(const AppleInfo& apple_info,
bool x_kernel_is_1,
bool y_kernel_is_1,
int src_slices,
const BHWC& dst_shape) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int blk_total_size = GetRecommendedBlockSize(apple_info, dst_shape);
int3 block_size = int3(1, 1, 1);
if (blk_total_size >= 2 && apple_info.IsBionic()) {
if (dst_shape.h % 2 != 0 && dst_shape.w % 2 == 0) {
block_size.x = 2;
} else {
block_size.y = 2;
}
blk_total_size /= 2;
}
if (blk_total_size >= 4 && (dst_slices % 4 == 0 || dst_slices >= 16)) {
block_size.z = 4;
blk_total_size /= 4;
} else if (blk_total_size >= 2 && (dst_slices % 2 == 0 || dst_slices >= 4)) {
block_size.z = 2;
blk_total_size /= 2;
}
if (blk_total_size >= 4 && dst_slices == 3) {
block_size.z = 3;
blk_total_size /= 4;
}
ConvGeneric::ConvParams params;
params.weights_upload_type = ConvGeneric::WeightsUploadType::GLOBAL_MEM;
params.x_kernel_is_1 = x_kernel_is_1;
params.y_kernel_is_1 = y_kernel_is_1;
params.src_depth_loop_size = 1;
params.block_size.x = block_size.x;
params.block_size.y = block_size.y;
params.block_size.z = 1;
params.block_size.w = block_size.z;
params.linear_spatial = false;
params.linear_all = false;
params.work_group_size = int3(8, 4, 1);
params.work_group_launch_order = int3(2, 0, 1);
params.weights_layout = WeightsLayout::kOSpatialIOGroupO4I4;
int g1 = GetGroupsCount(dst_shape, params.work_group_size, params.block_size);
int g2 = GetGroupsCountForLinearWH(dst_shape, {32, 1, 1}, params.block_size);
int g3 = GetGroupsCountForLinearWHS(dst_shape, {32, 1, 1}, params.block_size);
if (g2 < g1) {
params.linear_spatial = true;
params.work_group_size = int3(32, 1, 1);
params.work_group_launch_order = int3(0, 1, 2);
}
float precise_threshold = apple_info.IsBionic() ? 1.0f : 1.04f;
float precise_ratio = static_cast<float>(g2) / static_cast<float>(g3);
if (precise_ratio > precise_threshold) {
params.linear_spatial = false;
params.linear_all = true;
params.work_group_size = int3(32, 1, 1);
}
int total_elements = params.block_size.x * params.block_size.y *
params.block_size.z * params.block_size.w;
if (total_elements == 1) {
if (src_slices % 4 == 0) {
params.src_depth_loop_size = 4;
} else if (src_slices % 2 == 0) {
params.src_depth_loop_size = 2;
}
} else if (total_elements == 2) {
if (src_slices % 2 == 0) {
params.src_depth_loop_size = 2;
}
}
if (params.src_depth_loop_size == src_slices) {
params.need_src_loop = false;
}
if (params.block_size.w == dst_slices) {
params.need_dst_loop = false;
}
const bool use_filters_constants =
!params.need_dst_loop && !params.need_src_loop && params.x_kernel_is_1 &&
params.y_kernel_is_1;
if (use_filters_constants) {
params.weights_upload_type = ConvGeneric::WeightsUploadType::CONSTANT_MEM;
}
return params;
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParamsApple(
const GpuInfo& gpu_info, const OperationDef& definition, int src_depth,
int dst_depth, bool x_kernel_is_1, bool y_kernel_is_1,
bool different_weights_for_height, const BHWC& dst_shape) {
if (gpu_info.apple_info.IsLocalMemoryPreferredOverGlobal()) {
return GetConvParamsForA7A8(gpu_info.apple_info, x_kernel_is_1,
y_kernel_is_1, src_depth, dst_shape);
} else {
return GetConvParamsForA9AndHigher(gpu_info.apple_info, x_kernel_is_1,
y_kernel_is_1, src_depth, dst_shape);
}
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition, int src_depth,
int dst_depth, bool x_kernel_is_1, bool y_kernel_is_1,
bool different_weights_for_height, const BHWC* dst_shape) {
ConvParams conv_params;
conv_params.linear_spatial = false;
conv_params.linear_all = false;
conv_params.block_size = int4(1, 1, 1, 1);
conv_params.weights_data_type =
DeduceDataTypeFromPrecision(definition.precision);
conv_params.x_kernel_is_1 = x_kernel_is_1;
conv_params.y_kernel_is_1 = y_kernel_is_1;
conv_params.different_weights_for_height = different_weights_for_height;
if (gpu_info.IsNvidia()) {
if (different_weights_for_height) {
work_group_size_ = int3(32, 1, 1);
work_group_launch_order_ = int3(2, 0, 1);
conv_params.fixed_work_group_size = true;
} else {
conv_params.linear_spatial = true;
work_group_size_ = int3(32, 1, 1);
work_group_launch_order_ = int3(1, 0, 2);
conv_params.fixed_work_group_size = true;
}
conv_params.block_size = int4(2, 1, 1, 4);
conv_params.src_depth_loop_size = 1;
conv_params.weights_upload_type = WeightsUploadType::LOCAL_MEM_BY_THREADS;
if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size.w = 4;
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size.w = 2;
} else {
conv_params.block_size.w = dst_depth;
}
if (dst_shape) {
int task_size = dst_shape->w * dst_shape->b * dst_shape->h * dst_depth;
float task_size_per_cu =
static_cast<float>(task_size) / gpu_info.GetComputeUnitsCount();
int block_size = conv_params.block_size.x * conv_params.block_size.y *
conv_params.block_size.w;
float threads_per_cu = task_size_per_cu / block_size;
float warps_per_cu = threads_per_cu / 32 ;
if (warps_per_cu < 8.0f) {
conv_params.block_size.x = 1;
}
if (warps_per_cu < 4.0f && conv_params.block_size.w >= 4) {
conv_params.block_size.w /= 2;
}
if (warps_per_cu < 2.0f && conv_params.block_size.w >= 2) {
conv_params.block_size.w /= 2;
}
}
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && conv_params.block_size.w <= 2) {
conv_params.src_depth_loop_size = 4;
}
} else if (gpu_info.IsPowerVR()) {
if (gpu_info.IsCL30OrHigher()) {
work_group_size_ =
int3(gpu_info.opencl_info.preferred_work_group_size_multiple, 1, 1);
} else {
work_group_size_ = int3(32, 1, 1);
}
if (different_weights_for_height) {
work_group_launch_order_ = int3(2, 0, 1);
conv_params.fixed_work_group_size = true;
} else {
conv_params.linear_spatial = true;
work_group_launch_order_ = int3(1, 0, 2);
conv_params.fixed_work_group_size = true;
}
conv_params.block_size = int4(1, 1, 1, 4);
conv_params.src_depth_loop_size = 1;
if (!gpu_info.IsApiOpenCl() ||
(gpu_info.IsApiOpenCl() &&
gpu_info.opencl_info.dedicated_local_memory)) {
if (definition.precision == CalculationsPrecision::F32_F16) {
conv_params.weights_upload_type =
WeightsUploadType::LOCAL_MEM_BY_THREADS;
} else {
conv_params.weights_upload_type =
WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP;
}
} else {
conv_params.weights_upload_type = WeightsUploadType::GLOBAL_MEM;
}
if (dst_depth % 8 == 0 || dst_depth >= 32) {
conv_params.block_size.w = 8;
} else if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size.w = 4;
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size.w = 2;
} else {
conv_params.block_size.w = dst_depth;
}
if (definition.precision == CalculationsPrecision::F16) {
conv_params.block_size.w = std::min(4, conv_params.block_size.w);
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && conv_params.block_size.w <= 2) {
conv_params.src_depth_loop_size = 4;
}
if (conv_params.block_size.w == 1) {
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0) {
conv_params.src_depth_loop_size = 4;
}
if (src_depth <= 8) {
conv_params.src_depth_loop_size = src_depth;
}
}
conv_params.block_size.x = 2;
}
} else if (gpu_info.IsAMD()) {
work_group_size_ = int3(8, 4, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = false;
if (gpu_info.IsApiOpenCl()) {
conv_params.weights_upload_type = WeightsUploadType::CONSTANT_MEM;
} else {
conv_params.weights_upload_type = WeightsUploadType::GLOBAL_MEM;
}
if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size = int4(2, 2, 1, 4);
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size = int4(4, 2, 1, 2);
} else {
conv_params.block_size = int4(4, 4, 1, 1);
}
auto reduce_block_size_wzyx = [](int4* block_size) {
if (block_size->w % 2 == 0) {
block_size->w /= 2;
} else if (block_size->z % 2 == 0) {
block_size->z /= 2;
} else if (block_size->y % 2 == 0) {
block_size->y /= 2;
} else if (block_size->x % 2 == 0) {
block_size->x /= 2;
}
};
if (definition_.precision != CalculationsPrecision::F16) {
reduce_block_size_wzyx(&conv_params.block_size);
}
if (dst_shape) {
int task_size = dst_shape->w * dst_shape->b * dst_shape->h * dst_depth;
float task_size_per_cu =
static_cast<float>(task_size) / gpu_info.GetComputeUnitsCount();
int block_size = conv_params.block_size.x * conv_params.block_size.y *
conv_params.block_size.w;
float threads_per_cu = task_size_per_cu / block_size;
float warps_per_cu = threads_per_cu / 64;
if (warps_per_cu < 4.0f) {
reduce_block_size_wzyx(&conv_params.block_size);
}
if (warps_per_cu < 2.0f) {
reduce_block_size_wzyx(&conv_params.block_size);
}
if (warps_per_cu < 1.0f) {
reduce_block_size_wzyx(&conv_params.block_size);
}
if (warps_per_cu < 0.5f) {
reduce_block_size_wzyx(&conv_params.block_size);
}
}
int block_size = conv_params.block_size.x * conv_params.block_size.y *
conv_params.block_size.w;
conv_params.src_depth_loop_size = 1;
if (block_size <= 4 && src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (block_size <= 2 && src_depth % 4 == 0) {
conv_params.src_depth_loop_size = 4;
}
if (block_size <= 1 && src_depth % 8 == 0) {
conv_params.src_depth_loop_size = 8;
}
} else if (gpu_info.IsMali()) {
int block_size = 2;
if (dst_shape) {
int task_size = dst_shape->w * dst_shape->b * dst_shape->h * dst_depth;
block_size = GetRecommendedBlockSizeForConv(
gpu_info, definition.precision, task_size);
}
if (!x_kernel_is_1 || !y_kernel_is_1) {
if (gpu_info.mali_info.IsMidgard() || gpu_info.mali_info.IsBifrost()) {
block_size = std::min(block_size, 4);
}
}
if (block_size == 8) {
if (dst_depth == 1 || dst_depth == 3) {
conv_params.block_size = int4(2, 2, 1, 1);
} else {
conv_params.block_size = int4(2, 2, 1, 2);
}
} else if (block_size == 4) {
if (dst_depth == 1 || dst_depth == 3) {
conv_params.block_size = int4(2, 2, 1, 1);
} else {
conv_params.block_size = int4(2, 1, 1, 1);
if (definition.precision == CalculationsPrecision::F32 &&
gpu_info.mali_info.IsValhall()) {
conv_params.block_size.y = 2;
} else {
conv_params.block_size.w = 2;
}
}
} else if (block_size == 2) {
conv_params.block_size = int4(2, 1, 1, 1);
} else {
conv_params.block_size = int4(1, 1, 1, 1);
}
if (dst_shape) {
if (dst_shape->w == 1) {
conv_params.block_size.y *= conv_params.block_size.x;
conv_params.block_size.x = 1;
}
if (dst_shape->h == 1) {
conv_params.block_size.x *= conv_params.block_size.y;
conv_params.block_size.y = 1;
}
}
conv_params.src_depth_loop_size = 1;
MaliInfo mali_info = gpu_info.mali_info;
if (src_depth % 2 == 0 && block_size <= 2 && !mali_info.IsMidgard()) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && block_size == 1 && !mali_info.IsMidgard() &&
definition.precision == CalculationsPrecision::F16) {
conv_params.src_depth_loop_size = 4;
}
work_group_size_ = int3(4, 4, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = false;
conv_params.weights_upload_type = WeightsUploadType::GLOBAL_MEM;
} else if (gpu_info.IsAdreno()) {
if (dst_shape) {
const int wave_size = gpu_info.adreno_info.GetWaveSize(
definition.precision == CalculationsPrecision::F16);
const double task_size =
1.0 * dst_shape->w * dst_shape->b * dst_shape->h * dst_depth;
const double waves =
task_size / gpu_info.GetComputeUnitsCount() / wave_size;
if (waves <= 6.0f) {
conv_params.block_size = int4(1, 1, 1, 1);
} else if (waves <= 12.0f) {
conv_params.block_size = int4(2, 1, 1, 1);
} else if (waves <= 24.0f) {
conv_params.block_size = int4(2, 1, 1, 2);
} else {
conv_params.block_size = int4(2, 2, 1, 2);
}
} else {
conv_params.block_size = int4(2, 2, 1, 2);
}
if (gpu_info.adreno_info.IsAdreno3xx()) {
if (definition.precision == CalculationsPrecision::F16) {
conv_params.block_size = int4(2, 2, 1, 2);
} else if (definition.precision == CalculationsPrecision::F32_F16) {
conv_params.block_size = int4(2, 1, 1, 2);
} else {
conv_params.block_size = int4(2, 2, 1, 1);
}
}
work_group_size_ = int3(8, 2, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = false;
conv_params.src_depth_loop_size = 1;
conv_params.weights_upload_type = WeightsUploadType::TEXTURES_MEM_X4;
} else if (gpu_info.IsIntel()) {
if (different_weights_for_height) {
work_group_size_ = int3(16, 1, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = true;
} else {
conv_params.linear_spatial = true;
work_group_size_ = int3(16, 1, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = true;
}
conv_params.block_size = int4(1, 1, 1, 4);
conv_params.src_depth_loop_size = 1;
conv_params.weights_upload_type = WeightsUploadType::LOCAL_MEM_BY_THREADS;
if (gpu_info.IsApiMetal() &&
definition.precision != CalculationsPrecision::F32_F16 &&
gpu_info.metal_info.IsMslVersionEqualOrHigher(2)) {
conv_params.weights_upload_type =
WeightsUploadType::PRIVATE_MEM_SIMD_BROADCAST;
conv_params.simd_size = 8;
}
if (gpu_info.IsApiOpenCl() &&
definition.precision != CalculationsPrecision::F32_F16) {
const bool supports_subgroups =
gpu_info.SupportsExtension("cl_khr_subgroups") ||
gpu_info.SupportsExtension("cl_intel_subgroups") ||
gpu_info.opencl_info.IsCLVK();
if (supports_subgroups) {
const int kSubGroupSize = 16;
const bool supports_subgroup_size_control =
gpu_info.SupportsExtension("cl_intel_required_subgroup_size");
int min_subgroup_size;
auto min_subgroup_size_status =
gpu_info.GetMinSubGroupSize(min_subgroup_size);
if (supports_subgroup_size_control &&
gpu_info.SupportsSubGroupWithSize(kSubGroupSize)) {
conv_params.weights_upload_type =
WeightsUploadType::PRIVATE_MEM_SIMD_BROADCAST;
conv_params.simd_size = kSubGroupSize;
} else if (supports_subgroup_size_control &&
min_subgroup_size_status.ok()) {
conv_params.weights_upload_type =
WeightsUploadType::PRIVATE_MEM_SIMD_BROADCAST;
conv_params.simd_size = min_subgroup_size;
work_group_size_ = int3(min_subgroup_size, 1, 1);
} else {
}
}
}
if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size.w = 4;
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size.w = 2;
} else {
conv_params.block_size.w = dst_depth;
}
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && conv_params.block_size.w <= 2) {
conv_params.src_depth_loop_size = 4;
}
} else if (gpu_info.IsApple()) {
BHWC output_shape = BHWC(1, 32, 32, 128);
if (dst_shape) {
output_shape = *dst_shape;
}
conv_params = GuessBestParamsApple(
gpu_info, definition, src_depth, dst_depth, x_kernel_is_1,
y_kernel_is_1, different_weights_for_height, output_shape);
conv_params.fixed_work_group_size = true;
work_group_size_ = conv_params.work_group_size;
work_group_launch_order_ = conv_params.work_group_launch_order;
conv_params.weights_data_type =
DeduceDataTypeFromPrecision(definition.precision);
conv_params.x_kernel_is_1 = x_kernel_is_1;
conv_params.y_kernel_is_1 = y_kernel_is_1;
conv_params.different_weights_for_height = different_weights_for_height;
} else {
conv_params.block_size = int4(1, 1, 1, 4);
work_group_size_ = int3(8, 2, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = false;
conv_params.src_depth_loop_size = 1;
conv_params.weights_upload_type = WeightsUploadType::GLOBAL_MEM;
if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size.w = 4;
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size.w = 2;
} else {
conv_params.block_size.w = dst_depth;
}
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && conv_params.block_size.w <= 2) {
conv_params.src_depth_loop_size = 4;
}
}
if (conv_params.AreWeightsBuffer()) {
if (gpu_info.IsApple()) {
conv_params.weights_layout = WeightsLayout::kOSpatialIOGroupO4I4;
} else {
conv_params.weights_layout = WeightsLayout::kOSpatialIOGroupI4O4;
}
} else {
if (gpu_info.IsApple()) {
conv_params.weights_layout =
WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4;
} else {
conv_params.weights_layout =
WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4;
}
}
return conv_params;
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition,
const Convolution2DAttributes& attr, const BHWC* dst_shape) {
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
const int src_depth = DivideRoundUp(attr.weights.shape.i, 4);
const bool x_kernel_is_1 = attr.weights.shape.w == 1 && attr.strides.w == 1 &&
attr.dilations.w == 1 &&
attr.padding.prepended.w == 0 &&
attr.padding.appended.w == 0;
const bool y_kernel_is_1 = attr.weights.shape.h == 1 && attr.strides.h == 1 &&
attr.dilations.h == 1 &&
attr.padding.prepended.h == 0 &&
attr.padding.appended.h == 0;
return GuessBestParams(gpu_info, definition, src_depth, dst_depth,
x_kernel_is_1, y_kernel_is_1, false, dst_shape);
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition,
const Convolution3DAttributes& attr, const BHWDC* dst_shape) {
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
const int src_depth = DivideRoundUp(attr.weights.shape.i, 4);
const bool x_kernel_is_1 = attr.weights.shape.w == 1 && attr.strides.w == 1 &&
attr.dilations.w == 1 &&
attr.padding.prepended.w == 0 &&
attr.padding.appended.w == 0;
const bool y_kernel_is_1 = attr.weights.shape.h == 1 && attr.strides.h == 1 &&
attr.dilations.h == 1 &&
attr.padding.prepended.h == 0 &&
attr.padding.appended.h == 0;
const bool z_kernel_is_1 = attr.weights.shape.d == 1 && attr.strides.d == 1 &&
attr.dilations.d == 1 &&
attr.padding.prepended.d == 0 &&
attr.padding.appended.d == 0;
ConvGeneric::ConvParams result;
BHWC shape;
if (dst_shape) {
shape.b = dst_shape->b;
shape.h = dst_shape->h * dst_shape->d;
shape.w = dst_shape->w;
shape.c = dst_shape->c;
result = GuessBestParams(gpu_info, definition, src_depth, dst_depth,
x_kernel_is_1, y_kernel_is_1, false, &shape);
} else {
result = GuessBestParams(gpu_info, definition, src_depth, dst_depth,
x_kernel_is_1, y_kernel_is_1, false, nullptr);
}
result.z_kernel_is_1 = z_kernel_is_1;
return result;
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition,
const Convolution2DAttributes& attr, const BHWC& weights_shape,
const BHWC* dst_shape) {
const int dst_depth = DivideRoundUp(weights_shape.b, 4);
const int src_depth = DivideRoundUp(weights_shape.c, 4);
const bool x_kernel_is_1 =
weights_shape.w == 1 && attr.strides.w == 1 && attr.dilations.w == 1 &&
attr.padding.prepended.w == 0 && attr.padding.appended.w == 0;
const bool y_kernel_is_1 =
weights_shape.h == 1 && attr.strides.h == 1 && attr.dilations.h == 1 &&
attr.padding.prepended.h == 0 && attr.padding.appended.h == 0;
return GuessBestParams(gpu_info, definition, src_depth, dst_depth,
x_kernel_is_1, y_kernel_is_1, false, dst_shape);
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition,
const FullyConnectedAttributes& attr, const BHWC* dst_shape) {
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
const int src_depth = DivideRoundUp(attr.weights.shape.i, 4);
ConvGeneric::ConvParams params = GuessBestParams(
gpu_info, definition, src_depth, dst_depth, true, true, false, dst_shape);
work_group_size_.x *= work_group_size_.y;
work_group_size_.y = 1;
params.block_size.x *= params.block_size.y;
params.block_size.y = 1;
return params;
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParamsPointwise(
const GpuInfo& gpu_info, const OperationDef& definition,
const OHWI& weights_shape, const BHWC* dst_shape) {
const int dst_depth = DivideRoundUp(weights_shape.o, 4);
const int src_depth = DivideRoundUp(weights_shape.i, 4);
ConvGeneric::ConvParams params = GuessBestParams(
gpu_info, definition, src_depth, dst_depth, true, true, true, dst_shape);
params.block_size.x *= params.block_size.y;
params.block_size.y = 1;
work_group_size_.x *= work_group_size_.y;
work_group_size_.y = 1;
return params;
}
ConvGeneric CreateConvGeneric(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr,
const BHWC* dst_shape) {
ConvGeneric result(definition, attr, gpu_info, dst_shape);
result.GenerateCode(gpu_info);
result.UploadData(attr.weights, attr.bias);
return result;
}
ConvGeneric CreateConvGeneric(const GpuInfo& gpu_info,
const OperationDef& definition,
const FullyConnectedAttributes& attr,
const BHWC* dst_shape) {
ConvGeneric result(definition, attr, gpu_info, dst_shape);
result.GenerateCode(gpu_info);
result.UploadData(attr.weights, attr.bias);
return result;
}
ConvGeneric CreateConvGenericDynamicWeights(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr,
const BHWC& weights_shape,
const BHWC* dst_shape) {
ConvGeneric result(definition, attr, weights_shape, gpu_info, dst_shape);
result.GenerateCode(gpu_info);
result.UploadBias(attr.bias);
return result;
}
ConvGeneric CreateConvGenericBatchedMatMul(const GpuInfo& gpu_info,
const OperationDef& definition,
const OHWI& weights_shape,
const BHWC* dst_shape) {
ConvGeneric result(definition);
result.conv_params_ = result.GuessBestParamsPointwise(
gpu_info, definition, weights_shape, dst_shape);
result.GenerateCode(gpu_info);
tflite::gpu::Tensor<Linear, DataType::FLOAT32> biases;
biases.shape = Linear(weights_shape.o);
biases.data.resize(weights_shape.o, 0.0f);
result.UploadBias(biases);
return result;
}
ConvGeneric CreateConvGenericWino4x4To6x6(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr,
const BHWC* dst_shape) {
ConvGeneric result(definition);
result.conv_params_ = result.GuessBestParamsPointwise(
gpu_info, definition, attr.weights.shape, dst_shape);
result.GenerateCode(gpu_info);
result.UploadDataForWinograd4x4To6x6(attr.weights);
return result;
}
ConvGeneric CreateConvGeneric3D(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution3DAttributes& attr,
const BHWDC* dst_shape) {
ConvGeneric result(definition, attr, gpu_info, dst_shape);
result.GenerateCode(gpu_info);
result.UploadWeights(attr.weights);
result.UploadBias(attr.bias);
return result;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/conv_generic_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvGeneric1x1SimpleWeights) {
const auto status = ConvGeneric1x1SimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvGeneric1x1) {
const auto status = ConvGeneric1x1Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvGenericSimpleWeights) {
const auto status = ConvGenericSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvGeneric) {
const auto status = ConvGenericTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvGenericGrouped) {
const auto status = ConvGenericGroupedTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/conv_generic.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/conv_generic_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
986cfe0f-77b0-4b65-9c58-58db0f70470c | cpp | tensorflow/tensorflow | softmax1x1 | tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.cc | tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
namespace tflite {
namespace gpu {
namespace {
std::string MakeAccOp(OperationType op_type, const std::string& a,
const std::string& b) {
if (op_type == OperationType::ADD) {
return a + " = " + a + " + " + b;
} else if (op_type == OperationType::MAXIMUM) {
return a + " = max(" + a + ", " + b + ")";
} else {
return a;
}
}
std::string GetReduceCode(const std::string& value, OperationType op_type,
int group_reduction_size) {
std::vector<int> stages;
if (group_reduction_size == 1024) {
stages = {8, 8, 4, 4};
} else if (group_reduction_size == 512) {
stages = {8, 8, 8};
} else if (group_reduction_size == 256) {
stages = {8, 8, 4};
} else if (group_reduction_size == 128) {
stages = {8, 4, 4};
} else if (group_reduction_size == 64) {
stages = {8, 8};
} else if (group_reduction_size == 32) {
stages = {8, 4};
} else if (group_reduction_size == 16) {
stages = {4, 4};
} else if (group_reduction_size <= 8) {
stages = {group_reduction_size};
}
std::string c;
c += " LOCAL_MEM_BARRIER;\n";
c += " loc_mem[tid] = " + value + ";\n";
int stride = 1;
for (int i = 0; i < stages.size(); ++i) {
const bool last_stage = i == stages.size() - 1;
const std::string condition =
last_stage ? "tid == 0"
: "tid % " + std::to_string(stride * stages[i]) + " == 0";
const std::string location = last_stage ? "loc_mem[0]" : "loc_mem[tid]";
c += " LOCAL_MEM_BARRIER;\n";
c += " if (" + condition + ") {\n";
for (int j = 1; j < stages[i]; ++j) {
c += " " +
MakeAccOp(op_type, value,
"loc_mem[tid + " + std::to_string(stride * j) + "]") +
";\n";
}
c += " " + location + " = " + value + ";\n";
c += " }\n";
stride *= stages[i];
}
c += " LOCAL_MEM_BARRIER;\n";
c += " " + value + " = loc_mem[0];\n";
return c;
}
}
Softmax1x1::Softmax1x1(const OperationDef& definition, const GpuInfo& gpu_info,
const BHWC& shape)
: GPUOperation(definition) {
if (gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno7xx()) {
work_group_size_ = int3(512, 1, 1);
} else if (gpu_info.IsMali()) {
work_group_size_ = int3(1024, 1, 1);
} else {
work_group_size_ = int3(128, 1, 1);
}
const int slices = DivideRoundUp(shape.c, 4);
while (work_group_size_.x >= slices * 2) {
work_group_size_.x /= 2;
}
while (work_group_size_.x >= gpu_info.GetMaxWorkGroupSizeForX()) {
work_group_size_.x /= 2;
}
code_ = GetSoftmaxKernelCode(definition_);
}
Softmax1x1::Softmax1x1(Softmax1x1&& kernel) : GPUOperation(std::move(kernel)) {}
Softmax1x1& Softmax1x1::operator=(Softmax1x1&& kernel) {
if (this != &kernel) {
GPUOperation::operator=(std::move(kernel));
}
return *this;
}
std::string Softmax1x1::GetSoftmaxKernelCode(const OperationDef& op_def) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
args_.AddFloat("mask_x");
args_.AddFloat("mask_y");
args_.AddFloat("mask_z");
args_.AddFloat("mask_w");
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GROUP_ID_1;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " if (B >= args.dst_tensor.Batch()) return;\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GROUP_ID_1;\n";
}
c += " int Y = GROUP_ID_2;\n";
c += " if (X >= args.dst_tensor.Width()) return;\n";
c += " if (Y >= args.dst_tensor.Height()) return;\n";
c += " float4 mask = INIT_FLOAT4v4(args.mask_x, args.mask_y, args.mask_z, "
"args.mask_w);\n";
c +=
" float4 maxx4 = INIT_FLOAT4(args.src_tensor.Read<float>(X, Y, 0).x);\n";
c += " int tid = LOCAL_ID_0;\n";
const int group_reduction_size = work_group_size_.x;
c += " for (int s = tid; s < args.src_tensor.Slices(); s += " +
std::to_string(group_reduction_size) + ") {\n";
c += " float4 mask_a = s == args.src_tensor.Slices() - 1 ? mask : "
"INIT_FLOAT4(1.0f);\n";
c += " float4 mask_b = INIT_FLOAT4(1.0f) - mask_a;\n";
c += " float4 src = args.src_tensor.Read<float>(X, Y, s);\n";
c += " src = src * mask_a + mask_b * src.x;\n";
c += " maxx4 = max(maxx4, src);\n";
c += " }\n";
c += " float maximum = max(maxx4.x, maxx4.y);\n";
c += " maximum = max(maximum, maxx4.z);\n";
c += " maximum = max(maximum, maxx4.w);\n";
c += " __local float loc_mem[" + std::to_string(group_reduction_size) +
"];\n";
c += GetReduceCode("maximum", OperationType::MAXIMUM, group_reduction_size);
c += " float sum = 0.0f;\n";
c += " for (int s = tid; s < args.src_tensor.Slices(); s += " +
std::to_string(group_reduction_size) + ") {\n";
c += " float4 mask_temp = s == args.src_tensor.Slices() - 1 ? mask : "
"INIT_FLOAT4(1.0f);\n";
c += " float4 src = args.src_tensor.Read<float>(X, Y, s) - "
"INIT_FLOAT4(maximum);\n";
c += " sum += dot(mask_temp, exp(src));\n";
c += " }\n";
c += GetReduceCode("sum", OperationType::ADD, group_reduction_size);
c += " sum = 1.0f / sum;\n";
c += " int dst_s = GLOBAL_ID_0;\n";
c += " if (dst_s < args.dst_tensor.Slices()) {\n";
c += " float4 src = args.src_tensor.Read<float>(X, Y, dst_s) - "
"INIT_FLOAT4(maximum);\n";
c += " FLT4 res = TO_FLT4(exp(src) * sum);\n";
c += " args.dst_tensor.Write(res, X, Y, dst_s);\n";
c += " }\n";
c += "}\n";
return c;
}
absl::Status Softmax1x1::BindArguments(ArgumentsBinder* args) {
float4 mask = GetMaskForLastPlane(src_[0]->Channels());
RETURN_IF_ERROR(args->SetFloat("mask_x", mask.x));
RETURN_IF_ERROR(args->SetFloat("mask_y", mask.y));
RETURN_IF_ERROR(args->SetFloat("mask_z", mask.z));
RETURN_IF_ERROR(args->SetFloat("mask_w", mask.w));
return absl::OkStatus();
}
int3 Softmax1x1::GetGridSize() const {
return int3(dst_[0]->Slices(), dst_[0]->Width() * dst_[0]->Batch(),
dst_[0]->Height());
}
Softmax1x1 CreateSoftmax1x1(const OperationDef& definition,
const GpuInfo& gpu_info, const BHWC& shape) {
return Softmax1x1(definition, gpu_info, shape);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/softmax_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Softmax1x1) {
auto status = Softmax1x1Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, Softmax1x1BigNumber) {
auto status = Softmax1x1BigNumberTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
529b53b2-4cd1-449f-bb4f-ad35f78a6e09 | cpp | tensorflow/tensorflow | select_v2 | tensorflow/lite/delegates/gpu/common/tasks/select_v2.cc | tensorflow/lite/delegates/gpu/cl/kernels/select_v2_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/select_v2.h"
#include <string>
#include <utility>
namespace tflite {
namespace gpu {
std::string GetSelectV2Code(const OperationDef& op_def,
const SelectV2Attributes& attr, GPUOperation* op) {
op->AddSrcTensor("cond_tensor", op_def.src_tensors[0]);
op->AddSrcTensor("true_tensor", op_def.src_tensors[1]);
op->AddSrcTensor("else_tensor", op_def.src_tensors[2]);
op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.cond_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += attr.broadcast_true ? "" : " args.true_tensor.SetBatchRef(B);\n";
c += attr.broadcast_false ? "" : " args.else_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int Z = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"Z >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " FLT4 true_val, else_val;\n";
if (!attr.broadcast_true) {
c += " true_val = args.true_tensor.Read(X, Y, Z);\n";
} else {
c += " true_val = INIT_FLT4(args.true_tensor.Read(0, 0, 0, 0).x);\n";
}
if (!attr.broadcast_false) {
c += " else_val = args.else_tensor.Read(X, Y, Z);\n";
} else {
c += " else_val = INIT_FLT4(args.else_tensor.Read(0, 0, 0, 0).x);\n";
}
c += " bool should_gather_rows = \n";
if (attr.broadcast_true && attr.broadcast_false) {
c += " true;\n";
} else {
c += " args.dst_tensor.Slices() != args.cond_tensor.Slices();\n";
}
c += " FLT4 res;\n";
if (attr.scalar_cond) {
c += " bool cond = args.cond_tensor.Read<bool>(0, 0, 0).x;\n";
c += " res = cond ? true_val : else_val;\n";
} else {
c += " if (should_gather_rows) {\n";
c += " bool cond = args.cond_tensor.Read<bool>(X, 0, 0).x;\n";
c += " res = cond ? true_val : else_val;\n";
c += " } else {\n";
c += " bool4 cond = args.cond_tensor.Read<bool>(0, Y, Z);\n";
c += " res = true_val;\n";
c += " res.x = cond.x ? true_val.x : else_val.x;\n";
c += " res.y = cond.y ? true_val.y : else_val.y;\n";
c += " res.z = cond.z ? true_val.z : else_val.z;\n";
c += " res.w = cond.w ? true_val.w : else_val.w;\n";
c += " }\n;";
}
c += " args.dst_tensor.Write(res, X, Y, Z);\n";
c += "}\n";
return c;
}
GPUOperation CreateSelectV2(const OperationDef& definition,
const SelectV2Attributes& attr) {
GPUOperation op(definition);
op.code_ = GetSelectV2Code(definition, attr, &op);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
op.args_.AddInt("broadcast_true", attr.broadcast_true);
op.args_.AddInt("broadcast_else", attr.broadcast_false);
return op;
}
}
} | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/select_v2_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, SelectV2) {
auto status = SelectV2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2Batch) {
auto status = SelectV2BatchTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2Channels) {
auto status = SelectV2ChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2ChannelsBatch) {
auto status = SelectV2ChannelsBatchTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2BroadcastTrue) {
auto status = SelectV2BroadcastTrueTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2BroadcastFalse) {
auto status = SelectV2BroadcastFalseTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2BroadcastBoth) {
auto status = SelectV2BroadcastBothTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2ChannelsBroadcastFalse) {
auto status = SelectV2ChannelsBroadcastFalseTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/select_v2.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/select_v2_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e2c616a-8376-4f45-a80a-b75bf4ec95f6 | cpp | tensorflow/tensorflow | conv_weights_converter | tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter.cc | tensorflow/lite/delegates/gpu/cl/kernels/conv_weights_converter_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter.h"
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
namespace tflite {
namespace gpu {
ConverterToConvWeights::ConverterToConvWeights(
const OperationDef& definition, const WeightsDescription& weights_desc,
Layout input_layout)
: GPUOperation(definition),
weights_desc_(weights_desc),
input_layout_(input_layout) {
code_ = GetConverterToConvWeightsCode();
}
std::string ConverterToConvWeights::GetConverterToConvWeightsCode() {
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
args_.AddFloat("mask_x");
args_.AddFloat("mask_y");
args_.AddFloat("mask_z");
args_.AddFloat("mask_w");
args_.AddInt("out_ch");
args_.AddInt("out_ch_x4_groups");
args_.AddInt("in_ch");
args_.AddInt("in_ch_x4_groups");
args_.AddInt("kernel_width");
args_.AddInt("kernel_height");
args_.AddInt("kernel_spatial_size");
if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 ||
weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) {
std::vector<int32_t> remap(weights_desc_.spatial_remap.size());
for (int i = 0; i < remap.size(); ++i) {
remap[i] = weights_desc_.spatial_remap[i];
}
BufferDescriptor desc;
desc.element_type = DataType::INT32;
desc.element_size = 1;
desc.memory_type = MemoryType::GLOBAL;
desc.size = remap.size() * sizeof(int32_t);
desc.data.resize(desc.size);
std::memcpy(desc.data.data(), remap.data(), desc.size);
args_.AddObject("spatial_remap",
std::make_unique<BufferDescriptor>(std::move(desc)));
}
std::string c;
c += "MAIN_FUNCTION($0) {\n";
c += " int O = GLOBAL_ID_0;\n";
c += " int I = GLOBAL_ID_1;\n";
c += " int spatial_linear = GLOBAL_ID_2;\n";
c += " if (O >= args.out_ch_x4_groups) return;\n";
c += " if (I >= args.in_ch_x4_groups) return;\n";
c += " if (spatial_linear >= args.kernel_spatial_size) return;\n";
if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 ||
weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) {
c += " int linear_remap = args.spatial_remap.Read(spatial_linear);\n";
c += " int W = linear_remap % args.kernel_width;\n";
c += " int H = linear_remap / args.kernel_width;\n";
} else {
c += " int W = spatial_linear % args.kernel_width;\n";
c += " int H = spatial_linear / args.kernel_width;\n";
}
c += " FLT4 v0 = INIT_FLT4(0.0f);\n";
c += " FLT4 v1 = INIT_FLT4(0.0f);\n";
c += " FLT4 v2 = INIT_FLT4(0.0f);\n";
c += " FLT4 v3 = INIT_FLT4(0.0f);\n";
if (input_layout_ == Layout::OHWI) {
c += " if (O * 4 < args.out_ch) {\n";
c += " v0 = args.src_tensor.Read(W, H, I, O * 4);\n";
c += " }\n";
c += " if (O * 4 + 1 < args.out_ch) {\n";
c += " v1 = args.src_tensor.Read(W, H, I, O * 4 + 1);\n";
c += " }\n";
c += " if (O * 4 + 2 < args.out_ch) {\n";
c += " v2 = args.src_tensor.Read(W, H, I, O * 4 + 2);\n";
c += " }\n";
c += " if (O * 4 + 3 < args.out_ch) {\n";
c += " v3 = args.src_tensor.Read(W, H, I, O * 4 + 3);\n";
c += " }\n";
c += " if (I == args.src_tensor.Slices() - 1) {\n";
c += " FLT4 mask = INIT_FLT4v4(args.mask_x, args.mask_y, args.mask_z, "
"args.mask_w);\n";
c += " v0 *= mask;\n";
c += " v1 *= mask;\n";
c += " v2 *= mask;\n";
c += " v3 *= mask;\n";
c += " }\n";
} else if (input_layout_ == Layout::HWIO) {
c += " if (I * 4 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v0 = args.src_tensor.Read(I * 4, W, O, H);\n";
c += " }\n";
c += " if (I * 4 + 1 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v1 = args.src_tensor.Read(I * 4 + 1, W, O, H);\n";
c += " }\n";
c += " if (I * 4 + 2 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v2 = args.src_tensor.Read(I * 4 + 2, W, O, H);\n";
c += " }\n";
c += " if (I * 4 + 3 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v3 = args.src_tensor.Read(I * 4 + 3, W, O, H);\n";
c += " }\n";
c += " if (O == args.src_tensor.Slices() - 1) {\n";
c += " FLT4 mask = INIT_FLT4v4(args.mask_x, args.mask_y, args.mask_z, "
"args.mask_w);\n";
c += " v0 *= mask;\n";
c += " v1 *= mask;\n";
c += " v2 *= mask;\n";
c += " v3 *= mask;\n";
c += " }\n";
}
const bool need_transpose =
(input_layout_ == Layout::HWIO && weights_desc_.IsO4I4()) ||
(input_layout_ == Layout::OHWI && weights_desc_.IsI4O4());
if (need_transpose) {
c += " FLT4 r0 = INIT_FLT4v4(v0.x, v1.x, v2.x, v3.x);\n";
c += " FLT4 r1 = INIT_FLT4v4(v0.y, v1.y, v2.y, v3.y);\n";
c += " FLT4 r2 = INIT_FLT4v4(v0.z, v1.z, v2.z, v3.z);\n";
c += " FLT4 r3 = INIT_FLT4v4(v0.w, v1.w, v2.w, v3.w);\n";
} else {
c += " FLT4 r0 = v0;\n";
c += " FLT4 r1 = v1;\n";
c += " FLT4 r2 = v2;\n";
c += " FLT4 r3 = v3;\n";
}
if (weights_desc_.layout ==
WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4 ||
weights_desc_.layout ==
WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4) {
AddDstTensor("dst_tensor0", definition_.dst_tensors[0]);
AddDstTensor("dst_tensor1", definition_.dst_tensors[1]);
AddDstTensor("dst_tensor2", definition_.dst_tensors[2]);
AddDstTensor("dst_tensor3", definition_.dst_tensors[3]);
c += " int yc = spatial_linear * args.in_ch_x4_groups + I;\n";
c += " args.dst_tensor0.Write2D(r0, O, yc);\n";
c += " args.dst_tensor1.Write2D(r1, O, yc);\n";
c += " args.dst_tensor2.Write2D(r2, O, yc);\n";
c += " args.dst_tensor3.Write2D(r3, O, yc);\n";
c += "}\n";
} else {
AddDstTensor("dst_tensor", definition_.dst_tensors[0]);
c += " int OUTPUT_GROUP_SIZE = " +
std::to_string(weights_desc_.GetOutputGroupSize()) + ";\n";
c += " int d_index = (O * 4) / (OUTPUT_GROUP_SIZE * 4);\n";
c += " int k_index = ((O * 4) % (OUTPUT_GROUP_SIZE * 4)) / 4;\n";
std::string index;
if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 ||
weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) {
index =
"(d_index * args.in_ch_x4_groups + I) * args.kernel_spatial_size + "
"spatial_linear";
} else if (weights_desc_.layout == WeightsLayout::kOSpatialIOGroupI4O4 ||
weights_desc_.layout == WeightsLayout::kOSpatialIOGroupO4I4) {
index =
"(d_index * args.kernel_spatial_size + spatial_linear) * "
"args.in_ch_x4_groups + I";
}
c += " int dst_offset = (" + index + ") * OUTPUT_GROUP_SIZE + k_index;\n";
c += " args.dst_tensor.WriteLinear(r0, dst_offset * 4 + 0);\n";
c += " args.dst_tensor.WriteLinear(r1, dst_offset * 4 + 1);\n";
c += " args.dst_tensor.WriteLinear(r2, dst_offset * 4 + 2);\n";
c += " args.dst_tensor.WriteLinear(r3, dst_offset * 4 + 3);\n";
c += "}\n";
}
return c;
}
OHWI ConverterToConvWeights::GetWeightsSize() const {
int output_channels = 0;
int input_channels = 0;
int kernel_width = 0;
int kernel_height = 0;
if (input_layout_ == Layout::HWIO) {
output_channels = src_[0]->Channels();
input_channels = src_[0]->Width();
kernel_width = src_[0]->Height();
kernel_height = src_[0]->Batch();
} else if (input_layout_ == Layout::OHWI) {
output_channels = src_[0]->Batch();
input_channels = src_[0]->Channels();
kernel_width = src_[0]->Width();
kernel_height = src_[0]->Height();
}
return OHWI(output_channels, kernel_height, kernel_width, input_channels);
}
absl::Status ConverterToConvWeights::BindArguments(ArgumentsBinder* args) {
const auto& weights_shape = GetWeightsSize();
const int output_channels_x4_groups = DivideRoundUp(
AlignByN(weights_shape.o, 4 * weights_desc_.GetOutputGroupSize()), 4);
RETURN_IF_ERROR(args->SetInt("out_ch", weights_shape.o));
RETURN_IF_ERROR(args->SetInt("out_ch_x4_groups", output_channels_x4_groups));
RETURN_IF_ERROR(args->SetInt("in_ch", weights_shape.i));
RETURN_IF_ERROR(
args->SetInt("in_ch_x4_groups", DivideRoundUp(weights_shape.i, 4)));
RETURN_IF_ERROR(args->SetInt("kernel_width", weights_shape.w));
RETURN_IF_ERROR(args->SetInt("kernel_height", weights_shape.h));
RETURN_IF_ERROR(
args->SetInt("kernel_spatial_size", weights_shape.w * weights_shape.h));
float4 mask = GetMaskForLastPlane(src_[0]->Channels());
RETURN_IF_ERROR(args->SetFloat("mask_x", mask.x));
RETURN_IF_ERROR(args->SetFloat("mask_y", mask.y));
RETURN_IF_ERROR(args->SetFloat("mask_z", mask.z));
return args->SetFloat("mask_w", mask.w);
}
int3 ConverterToConvWeights::GetGridSize() const {
const auto& weights_shape = GetWeightsSize();
const int out_group_size = weights_desc_.GetOutputGroupSize();
const int grid_x =
DivideRoundUp(AlignByN(weights_shape.o, 4 * out_group_size), 4);
const int grid_y = DivideRoundUp(weights_shape.i, 4);
const int grid_z = weights_shape.w * weights_shape.h;
return int3(grid_x, grid_y, grid_z);
}
ConverterToConvWeights CreateConverterToConvWeights(
const OperationDef& definition, const WeightsDescription& weights_desc,
Layout input_layout) {
return ConverterToConvWeights(definition, weights_desc, input_layout);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX4) {
const auto status = ConverterToConvWeights1x1OutX4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX4Unaligned) {
const auto status = ConverterToConvWeights1x1OutX4UnalignedTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX2) {
const auto status = ConverterToConvWeights1x1OutX2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeightsOutX2) {
const auto status = ConverterToConvWeightsOutX2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvTransposedWeights4x4) {
const auto status = ConverterToConvTransposedWeights4x4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeights4xTextures) {
const auto status = ConverterToConvWeights4xTexturesTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/conv_weights_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a2ad4db8-8d26-408c-a118-7996be926c80 | cpp | tensorflow/tensorflow | winograd | tensorflow/lite/delegates/gpu/common/tasks/winograd.cc | tensorflow/lite/delegates/gpu/cl/kernels/winograd_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/winograd.h"
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
#include "tensorflow/lite/delegates/gpu/common/winograd_util.h"
namespace tflite {
namespace gpu {
namespace {
void VectorToKernelBufferDesc(const std::vector<float>& data,
DataType data_type,
BufferDescriptor* buffer_desc) {
buffer_desc->element_type = data_type;
buffer_desc->element_size = 1;
buffer_desc->memory_type = MemoryType::CONSTANT;
buffer_desc->attributes.push_back("kernel_global_space");
buffer_desc->size = SizeOf(data_type) * data.size();
buffer_desc->data.resize(buffer_desc->size);
if (data_type == DataType::FLOAT32) {
memcpy(buffer_desc->data.data(), data.data(), buffer_desc->size);
} else {
half* hf_ptr = reinterpret_cast<half*>(buffer_desc->data.data());
for (int i = 0; i < data.size(); ++i) {
hf_ptr[i] = data[i];
}
}
}
std::string GetKernelWinograd4x4To36(const GpuInfo& gpu_info,
const OperationDef& op_def) {
std::string c;
const auto src_desc = op_def.src_tensors[0];
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = (linear_id / args.dst_tensor.Batch()) * 4;\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0 * 4;\n";
}
c += R"(
int Y = GLOBAL_ID_1 * 4;
int S = GLOBAL_ID_2;
if (X / 4 >= args.tiles_x || Y / 4 >= args.tiles_y) return;
FLT4 I[6][6];
for (int y = 0; y < 6; ++y) {
for (int x = 0; x < 6; ++x) {
I[y][x] = INIT_FLT4(0.0f);
}
}
)";
for (int y = 0; y < 6; ++y) {
const std::string s_y = std::to_string(y);
c += " {\n";
c += " int coord_y = Y + " + s_y + " + args.padding_y;\n";
if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool in_y = coord_y >= 0 && coord_y < "
"args.src_tensor.Height();\n";
c += " coord_y = clamp(coord_y, 0, args.src_tensor.Height() - 1);\n";
}
for (int x = 0; x < 6; ++x) {
const std::string s_x = std::to_string(x);
c += " {\n";
c += " int coord_x = X + " + s_x + " + args.padding_x;\n";
if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool in_x = coord_x >= 0 && coord_x < "
"args.src_tensor.Width();\n";
c += " coord_x = clamp(coord_x, 0, args.src_tensor.Width()-1);\n";
}
std::string multiplier;
if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info) &&
!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
multiplier = " * INIT_FLT(in_y && in_x)";
} else if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
multiplier = " * INIT_FLT(in_x)";
} else if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
multiplier = " * INIT_FLT(in_y)";
}
c += " FLT4 src = args.src_tensor.Read(coord_x, coord_y, S)" +
multiplier + ";\n";
c += " I[0][" + s_x + "] += args.Bt.Read(" + std::to_string(y) +
") * src;\n";
c += " I[1][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 6) +
") * src;\n";
c += " I[2][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 12) +
") * src;\n";
c += " I[3][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 18) +
") * src;\n";
c += " I[4][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 24) +
") * src;\n";
c += " I[5][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 30) +
") * src;\n";
c += " }\n";
}
c += " }\n";
}
c += R"(
int dst_x = Y / 4 * args.tiles_x + X / 4;
for (int y = 0; y < 6; ++y) {
FLT4 value = I[y][0] + args.Bt.Read(2) * I[y][2] + args.Bt.Read(4) * I[y][4];
args.dst_tensor.Write(value, dst_x, y * 6 + 0, S);
value = args.Bt.Read(7) * I[y][1] + args.Bt.Read(8) * I[y][2] + args.Bt.Read(9) * I[y][3] + args.Bt.Read(10) * I[y][4];
args.dst_tensor.Write(value, dst_x, y * 6 + 1, S);
value = args.Bt.Read(13) * I[y][1] + args.Bt.Read(14) * I[y][2] + args.Bt.Read(15) * I[y][3] + args.Bt.Read(16) * I[y][4];
args.dst_tensor.Write(value, dst_x, y * 6 + 2, S);
value = args.Bt.Read(19) * I[y][1] + args.Bt.Read(20) * I[y][2] + args.Bt.Read(21) * I[y][3] + args.Bt.Read(22) * I[y][4];
args.dst_tensor.Write(value, dst_x, y * 6 + 3, S);
value = args.Bt.Read(25) * I[y][1] + args.Bt.Read(26) * I[y][2] + args.Bt.Read(27) * I[y][3] + args.Bt.Read(28) * I[y][4];
args.dst_tensor.Write(value, dst_x, y * 6 + 4, S);
value = args.Bt.Read(31) * I[y][1] + args.Bt.Read(33) * I[y][3] + I[y][5];
args.dst_tensor.Write(value, dst_x, y * 6 + 5, S);
}
}
)";
return c;
}
std::string GetKernelWinograd36To4x4(const OperationDef& op_def) {
std::string c;
const auto src_desc = op_def.src_tensors[0];
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int tile_id = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int tile_id = GLOBAL_ID_0;\n";
}
c += R"(
int Z = GLOBAL_ID_2;
int tiles_count_x = (args.dst_tensor.Width() + 3) / 4;
int tile_x = (tile_id % tiles_count_x) * 4;
int tile_y = (tile_id / tiles_count_x) * 4;
if (tile_x >= args.dst_tensor.Width() || tile_y >= args.dst_tensor.Height()) return;
FLT4 I[4][6];
for (int y = 0; y < 4; ++y) {
for (int x = 0; x < 6; ++x) {
I[y][x] = INIT_FLT4(0.0f);
}
}
for (int y = 0; y < 6; ++y) {
for (int x = 0; x < 6; ++x) {
FLT4 src = args.src_tensor.Read(tile_id, y * 6 + x, Z);
I[0][x] += src * args.At.Read(y);
I[1][x] += src * args.At.Read(y + 6);
I[2][x] += src * args.At.Read(y + 12);
I[3][x] += src * args.At.Read(y + 18);
}
}
FLT4 bias_val = args.biases.Read(Z);
for (int y = 0; y < 4; ++y) {
FLT4 t0 = I[y][1] + I[y][2];
FLT4 t1 = I[y][3] + I[y][4];
if (tile_x < args.dst_tensor.Width() && tile_y + y < args.dst_tensor.Height()) {
FLT4 value = I[y][0] + t0 + t1 + bias_val;
args.dst_tensor.Write(value, tile_x, tile_y + y, Z);
}
FLT4 t2 = I[y][1] - I[y][2];
FLT4 t3 = I[y][3] - I[y][4];
if (tile_x + 1 < args.dst_tensor.Width() && tile_y + y < args.dst_tensor.Height()) {
FLT4 value = t2 * args.At.Read(7) + t3 * args.At.Read(9) + bias_val;
args.dst_tensor.Write(value, tile_x + 1, tile_y + y, Z);
}
if (tile_x + 2 < args.dst_tensor.Width() && tile_y + y < args.dst_tensor.Height()) {
FLT4 value = t0 * args.At.Read(13) + t1 * args.At.Read(15) + bias_val;
args.dst_tensor.Write(value, tile_x + 2, tile_y + y, Z);
}
if (tile_x + 3 < args.dst_tensor.Width() && tile_y + y < args.dst_tensor.Height()) {
FLT4 value = t2 * args.At.Read(19) + t3 * args.At.Read(21) + I[y][5] + bias_val;
args.dst_tensor.Write(value, tile_x + 3, tile_y + y, Z);
}
}
}
)";
return c;
}
}
int3 Winograd4x4To36::GetGridSize() const {
int new_width =
src_[0]->Width() + padding_.prepended.w + padding_.appended.w - 2;
int new_height =
src_[0]->Height() + padding_.prepended.h + padding_.appended.h - 2;
int tiles_x = DivideRoundUp(new_width, 4);
int tiles_y = DivideRoundUp(new_height, 4);
return int3(tiles_x * dst_[0]->Batch(), tiles_y, src_[0]->Slices());
}
absl::Status Winograd4x4To36::BindArguments(ArgumentsBinder* args) {
int new_width =
src_[0]->Width() + padding_.prepended.w + padding_.appended.w - 2;
int new_height =
src_[0]->Height() + padding_.prepended.h + padding_.appended.h - 2;
int tiles_x = DivideRoundUp(new_width, 4);
int tiles_y = DivideRoundUp(new_height, 4);
RETURN_IF_ERROR(args->SetInt("tiles_x", tiles_x));
RETURN_IF_ERROR(args->SetInt("tiles_y", tiles_y));
return absl::OkStatus();
}
Winograd4x4To36 CreateWinograd4x4To36(const OperationDef& definition,
const Padding2D& padding,
const GpuInfo& gpu_info) {
Winograd4x4To36 desc(definition, padding);
desc.code_ = GetKernelWinograd4x4To36(gpu_info, definition);
desc.AddSrcTensor("src_tensor", definition.src_tensors[0]);
desc.AddDstTensor("dst_tensor", definition.dst_tensors[0]);
desc.args_.AddInt("padding_x", -padding.prepended.w);
desc.args_.AddInt("padding_y", -padding.prepended.h);
desc.args_.AddInt("tiles_x");
desc.args_.AddInt("tiles_y");
BufferDescriptor buffer_desc;
VectorToKernelBufferDesc(BtMatrixForWinograd4x4To6x6(),
definition.GetDataType(), &buffer_desc);
desc.args_.AddObject(
"Bt", std::make_unique<BufferDescriptor>(std::move(buffer_desc)));
desc.work_group_size_ = int3(8, 4, 1);
return desc;
}
Winograd4x4To36TileX6::Winograd4x4To36TileX6(const OperationDef& definition,
const Padding2D& padding,
const GpuInfo& gpu_info)
: GPUOperation(definition), padding_(padding) {
work_group_size_ = int3(32, 1, 1);
code_ = GetWinograd4x4To36TileX6Code(definition_, gpu_info);
if (gpu_info.IsAdreno()) {
compiler_options_.push_back(CompilerOptions::kAdrenoMoreWaves);
}
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsPowerVR()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
}
std::string Winograd4x4To36TileX6::GetWinograd4x4To36TileX6Code(
const OperationDef& op_def, const GpuInfo& gpu_info) {
std::string c;
const auto& src_desc = op_def.src_tensors[0];
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
args_.AddInt("padding_x");
args_.AddInt("padding_y");
args_.AddInt("tiles_total");
args_.AddInt("tiles_x");
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int DST_X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int DST_X = GLOBAL_ID_0;\n";
}
c += " int DST_Y = GLOBAL_ID_1;\n";
c += " int DST_Z = GLOBAL_ID_2;\n";
c += " if (DST_X >= args.tiles_total || DST_Y >= 6 || DST_Z >= "
"args.dst_tensor.Slices()) {\n";
c += " return; \n";
c += " }\n";
c += " int tile_x = (DST_X % args.tiles_x) * 4;\n";
c += " int tile_y = (DST_X / args.tiles_x) * 4;\n";
c += " FLT4 I0, I1, I2, I3, I4, I5;\n";
c += " FLT bt_ar[6];\n";
c += " FLT4 t0 = args.bt_non_uniform.Read(DST_Y * 2 + 0);\n";
c += " FLT4 t1 = args.bt_non_uniform.Read(DST_Y * 2 + 1);\n";
c += " DST_Y *= 6;\n";
c += " bt_ar[0] = t0.x;\n";
c += " bt_ar[1] = t0.y;\n";
c += " bt_ar[2] = t0.z;\n";
c += " bt_ar[3] = t0.w;\n";
c += " bt_ar[4] = t1.x;\n";
c += " bt_ar[5] = t1.y;\n";
auto read_src = [&](const std::string& src, const std::string& xs) {
std::string read_statement;
read_statement = "args.src_tensor.Read(xc" + xs + ", yc, DST_Z)";
std::string multiplier;
if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
multiplier += " * m" + xs + "_x";
}
if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
multiplier += " * INIT_FLT(iny)";
}
c += " FLT4 " + src + " = " + read_statement + multiplier + ";\n";
};
for (int x = 0; x < 6; ++x) {
const std::string xs = std::to_string(x);
c += " int xc" + xs + " = tile_x + args.padding_x + " + xs + ";\n";
if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool inx" + xs + " = (xc" + xs + " >= 0 && xc" + xs +
" < args.src_tensor.Width());\n";
c += " FLT m" + xs + "_x = INIT_FLT(inx" + xs + ");\n";
c += " xc" + xs + " = clamp(xc" + xs +
", 0, args.src_tensor.Width() - 1);\n";
}
}
const bool manual_unroll =
!(op_def.precision == CalculationsPrecision::F32 && gpu_info.IsMali());
if (manual_unroll) {
c += " {\n";
c += " int yc = tile_y + args.padding_y;\n";
if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool iny = (yc >= 0 && yc < args.src_tensor.Height());\n";
c += " yc = clamp(yc, 0, args.src_tensor.Height() - 1);\n";
c += " FLT bt = bt_ar[0] * INIT_FLT(iny);\n";
} else {
c += " FLT bt = bt_ar[0];\n";
}
for (int x = 0; x < 6; ++x) {
const std::string xs = std::to_string(x);
const std::string src = "src" + xs;
read_src(src, xs);
c += " I" + xs + " = bt * " + src + ";\n";
}
c += " }\n";
for (int y = 1; y < 6; ++y) {
const std::string ys = std::to_string(y);
c += " {\n";
c += " int yc = tile_y + args.padding_y + (" + ys + ");\n";
if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool iny = (yc >= 0 && yc < args.src_tensor.Height());\n";
c += " yc = clamp(yc, 0, args.src_tensor.Height() - 1);\n";
c += " FLT bt = bt_ar[" + ys + "] * INIT_FLT(iny);\n";
} else {
c += " FLT bt = bt_ar[" + ys + "];\n";
}
for (int x = 0; x < 6; ++x) {
const std::string xs = std::to_string(x);
const std::string src = "src" + xs;
read_src(src, xs);
c += " I" + xs + " += bt * " + src + ";\n";
}
c += " }\n";
}
} else {
c += " I0 = INIT_FLT4(0.0f);\n";
c += " I1 = INIT_FLT4(0.0f);\n";
c += " I2 = INIT_FLT4(0.0f);\n";
c += " I3 = INIT_FLT4(0.0f);\n";
c += " I4 = INIT_FLT4(0.0f);\n";
c += " I5 = INIT_FLT4(0.0f);\n";
c += " for (int y = 0; y < 6; ++y) {\n";
c += " int yc = tile_y + args.padding_y + y;\n";
if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool iny = (yc >= 0 && yc < args.src_tensor.Height());\n";
c += " yc = clamp(yc, 0, args.src_tensor.Height() - 1);\n";
c += " FLT bt = bt_ar[y] * INIT_FLT(iny);\n";
} else {
c += " FLT bt = bt_ar[y];\n";
}
for (int x = 0; x < 6; ++x) {
const std::string xs = std::to_string(x);
const std::string src = "src" + xs;
read_src(src, xs);
c += " I" + xs + " += bt * " + src + ";\n";
}
c += " }\n";
}
c += " {\n";
c += " FLT4 r0 = I0 + args.Bt.Read(2) * I2 + args.Bt.Read(4) * I4;\n";
c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n";
c += " DST_Y++;\n";
c += " }\n";
c += " {\n";
c += " FLT4 r0 = args.Bt.Read(7) * I1 + args.Bt.Read(8) * I2 + "
"args.Bt.Read(9) * I3 + args.Bt.Read(10) * I4;\n";
c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n";
c += " DST_Y++;\n";
c += " }\n";
c += " {\n";
c += " FLT4 r0 = args.Bt.Read(13) * I1 + args.Bt.Read(14) * I2 + "
"args.Bt.Read(15) * I3 + args.Bt.Read(16) * I4;\n";
c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n";
c += " DST_Y++;\n";
c += " }\n";
c += " {\n";
c += " FLT4 r0 = args.Bt.Read(19) * I1 + args.Bt.Read(20) * I2 + "
"args.Bt.Read(21) * I3 + args.Bt.Read(22) * I4;\n";
c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n";
c += " DST_Y++;\n";
c += " }\n";
c += " {\n";
c += " FLT4 r0 = args.Bt.Read(25) * I1 + args.Bt.Read(26) * I2 + "
"args.Bt.Read(27) * I3 + args.Bt.Read(28) * I4;\n";
c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n";
c += " DST_Y++;\n";
c += " }\n";
c += " {\n";
c += " FLT4 r0 = args.Bt.Read(31) * I1 + args.Bt.Read(33) * I3 + I5;\n";
c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n";
c += " DST_Y++;\n";
c += " }\n";
c += "}\n";
return c;
}
void Winograd4x4To36TileX6::UploadBt() {
tflite::gpu::Tensor<Linear, DataType::FLOAT32> bt_aligned;
bt_aligned.shape = Linear(6 * 8);
bt_aligned.data.resize(6 * 8);
auto bt_mat = BtMatrixForWinograd4x4To6x6();
for (int y = 0; y < 6; ++y) {
for (int x = 0; x < 6; ++x) {
bt_aligned.data[y * 8 + x] = bt_mat[y * 6 + x];
}
bt_aligned.data[y * 8 + 6] = 0.0f;
bt_aligned.data[y * 8 + 7] = 0.0f;
}
TensorDescriptor bt_tensor_desc = CreateConstantLinearTensorDescriptor(
definition_.src_tensors[0].GetDataType(),
definition_.src_tensors[0].GetStorageType(), bt_aligned);
args_.AddObject("bt_non_uniform", std::make_unique<TensorDescriptor>(
std::move(bt_tensor_desc)));
BufferDescriptor buffer_desc;
VectorToKernelBufferDesc(bt_mat, definition_.GetDataType(), &buffer_desc);
args_.AddObject("Bt",
std::make_unique<BufferDescriptor>(std::move(buffer_desc)));
}
int3 Winograd4x4To36TileX6::SelectBestWorkGroup(
const KernelInfo& kernel_info) const {
const std::vector<int3> wgs = {{8, 6, 4}, {8, 6, 2}, {4, 6, 2},
{4, 6, 2}, {2, 6, 2}, {2, 6, 1},
{1, 6, 1}, {1, 3, 1}, {1, 1, 1}};
return GetFirstSuitableWorkGroup(wgs, kernel_info.max_work_group_size);
}
absl::Status Winograd4x4To36TileX6::BindArguments(ArgumentsBinder* args) {
const int tiles_x = DivideRoundUp(
src_[0]->Width() + padding_.prepended.w + padding_.appended.w - 2, 4);
const int tiles_y = DivideRoundUp(
src_[0]->Height() + padding_.prepended.h + padding_.appended.h - 2, 4);
const int tiles_total = tiles_x * tiles_y;
RETURN_IF_ERROR(args->SetInt("padding_x", -padding_.prepended.w));
RETURN_IF_ERROR(args->SetInt("padding_y", -padding_.prepended.h));
RETURN_IF_ERROR(args->SetInt("tiles_total", tiles_total));
RETURN_IF_ERROR(args->SetInt("tiles_x", tiles_x));
return absl::OkStatus();
}
int3 Winograd4x4To36TileX6::GetGridSize() const {
const int grid_x = dst_[0]->Width() * dst_[0]->Batch();
const int grid_y = 6;
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
void Winograd4x4To36TileX6::GetPossibleKernelWorkGroups(
TuningType tuning_type, const GpuInfo& gpu_info,
const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
if (gpu_info.IsIntel()) {
work_groups->push_back(int3(4, 6, 1));
return;
}
switch (tuning_type) {
case TuningType::kExhaustive:
GetPossibleWorkGroups(tuning_type, gpu_info, kernel_info, grid_size_,
work_groups);
return;
case TuningType::kFast:
default:
work_groups->push_back(SelectBestWorkGroup(kernel_info));
return;
}
}
Winograd4x4To36TileX6 CreateWinograd4x4To36TileX6(
const GpuInfo& gpu_info, const OperationDef& definition,
const Padding2D& padding) {
Winograd4x4To36TileX6 result(definition, padding, gpu_info);
result.UploadBt();
return result;
}
int3 Winograd36To4x4::GetGridSize() const {
return int3(src_[0]->Width() * dst_[0]->Batch(), 1, src_[0]->Slices());
}
Winograd36To4x4 CreateWinograd36To4x4(
const OperationDef& definition,
const tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases) {
Winograd36To4x4 desc(definition);
desc.code_ = GetKernelWinograd36To4x4(definition);
desc.AddSrcTensor("src_tensor", definition.src_tensors[0]);
desc.AddDstTensor("dst_tensor", definition.dst_tensors[0]);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
definition.src_tensors[0].GetDataType(),
definition.src_tensors[0].GetStorageType(), biases);
desc.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
BufferDescriptor buffer_desc;
VectorToKernelBufferDesc(AtMatrixForWinograd4x4To6x6(),
definition.GetDataType(), &buffer_desc);
desc.args_.AddObject(
"At", std::make_unique<BufferDescriptor>(std::move(buffer_desc)));
desc.work_group_size_ = int3(32, 1, 1);
return desc;
}
Winograd36To4x4Tile4x1::Winograd36To4x4Tile4x1(const OperationDef& definition,
const GpuInfo& gpu_info)
: GPUOperation(definition) {
work_group_size_ = int3(32, 1, 1);
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsPowerVR()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
code_ = GetWinograd36To4x4Tile4x1Code(definition_, gpu_info);
}
std::string Winograd36To4x4Tile4x1::GetWinograd36To4x4Tile4x1Code(
const OperationDef& op_def, const GpuInfo& gpu_info) {
std::string c;
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
args_.AddInt("tiles_x");
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int tile_id = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int tile_id = GLOBAL_ID_0;\n";
}
c += " int DST_Y = GLOBAL_ID_1;\n";
c += " int DST_Z = GLOBAL_ID_2;\n";
c += " int tile_x = (tile_id % args.tiles_x) * 4;\n";
c += " int tile_y = (tile_id / args.tiles_x) * 4 + DST_Y;\n";
c += " if (tile_x >= args.dst_tensor.Width() || tile_y >= "
"args.dst_tensor.Height() || DST_Z >= args.dst_tensor.Slices()) {\n";
c += " return; \n";
c += " }\n";
c += " FLT4 I0, I1, I2, I3, I4, I5;\n";
c += " FLT at_ar[6];\n";
c += " FLT4 t00 = args.at_non_uniform.Read(DST_Y * 2 + 0);\n";
c += " FLT4 t01 = args.at_non_uniform.Read(DST_Y * 2 + 1);\n";
c += " at_ar[0] = t00.x;\n";
c += " at_ar[1] = t00.y;\n";
c += " at_ar[2] = t00.z;\n";
c += " at_ar[3] = t00.w;\n";
c += " at_ar[4] = t01.x;\n";
c += " at_ar[5] = t01.y;\n";
const bool manual_unroll =
!(op_def.precision == CalculationsPrecision::F32 && gpu_info.IsMali());
if (manual_unroll) {
c += " {\n";
c += " FLT at = at_ar[0];\n";
for (int x = 0; x < 6; ++x) {
const std::string yc = std::to_string(x);
const std::string src = "src" + std::to_string(x);
c += " FLT4 " + src + " = args.src_tensor.Read(tile_id, " + yc +
", DST_Z);\n";
c += " I" + std::to_string(x) + " = at * " + src + ";\n";
}
c += " }\n";
for (int y = 1; y < 6; ++y) {
c += " {\n";
c += " FLT at = at_ar[" + std::to_string(y) + "];\n";
for (int x = 0; x < 6; ++x) {
const std::string yc = std::to_string(y * 6 + x);
const std::string src = "src" + std::to_string(x);
c += " FLT4 " + src + " = args.src_tensor.Read(tile_id, " + yc +
", DST_Z);\n";
c += " I" + std::to_string(x) + " += at * " + src + ";\n";
}
c += " }\n";
}
} else {
c += " I0 = INIT_FLT4(0.0f);\n";
c += " I1 = INIT_FLT4(0.0f);\n";
c += " I2 = INIT_FLT4(0.0f);\n";
c += " I3 = INIT_FLT4(0.0f);\n";
c += " I4 = INIT_FLT4(0.0f);\n";
c += " I5 = INIT_FLT4(0.0f);\n";
c += " for (int y = 0; y < 6; ++y) {\n";
c += " FLT at = at_ar[y];\n";
for (int x = 0; x < 6; ++x) {
const std::string src = "src" + std::to_string(x);
c += " FLT4 " + src + " = args.src_tensor.Read(tile_id, y * 6 + " +
std::to_string(x) + ", DST_Z);\n";
c += " I" + std::to_string(x) + " += at * " + src + ";\n";
}
c += " }\n";
}
c += " FLT4 t0 = I1 + I2;\n";
c += " FLT4 t1 = I3 + I4;\n";
c += " FLT4 bias_val = args.biases.Read(DST_Z);\n";
c += " {\n";
c += " FLT4 r0 = I0 + t0 + t1 + bias_val;\n";
c += " args.dst_tensor.Write(r0, tile_x, tile_y, DST_Z);\n";
c += " tile_x++;\n";
c += " }\n";
c += " FLT4 t2 = I1 - I2;\n";
c += " FLT4 t3 = I3 - I4;\n";
c += " if (tile_x < args.dst_tensor.Width()) {\n";
c +=
" FLT4 r0 = t2 * args.At.Read(7) + t3 * args.At.Read(9) + bias_val;\n";
c += " args.dst_tensor.Write(r0, tile_x, tile_y, DST_Z);\n";
c += " tile_x++;\n";
c += " }\n";
c += " if (tile_x < args.dst_tensor.Width()) {\n";
c += " FLT4 r0 = t0 * args.At.Read(13) + t1 * args.At.Read(15) + "
"bias_val;\n";
c += " args.dst_tensor.Write(r0, tile_x, tile_y, DST_Z);\n";
c += " tile_x++;\n";
c += " }\n";
c += " if (tile_x < args.dst_tensor.Width()) {\n";
c += " FLT4 r0 = t2 * args.At.Read(19) + t3 * args.At.Read(21) + I5 + "
"bias_val;\n";
c += " args.dst_tensor.Write(r0, tile_x, tile_y, DST_Z);\n";
c += " tile_x++;\n";
c += " }\n";
c += "}\n";
return c;
}
void Winograd36To4x4Tile4x1::UploadAt() {
tflite::gpu::Tensor<Linear, DataType::FLOAT32> at_aligned;
at_aligned.shape = Linear(4 * 8);
at_aligned.data.resize(4 * 8);
auto at_mat = AtMatrixForWinograd4x4To6x6();
for (int y = 0; y < 4; ++y) {
for (int x = 0; x < 6; ++x) {
at_aligned.data[y * 8 + x] = at_mat[y * 6 + x];
}
at_aligned.data[y * 8 + 6] = 0.0f;
at_aligned.data[y * 8 + 7] = 0.0f;
}
TensorDescriptor at_tensor_desc = CreateConstantLinearTensorDescriptor(
definition_.src_tensors[0].GetDataType(),
definition_.src_tensors[0].GetStorageType(), at_aligned);
args_.AddObject("at_non_uniform", std::make_unique<TensorDescriptor>(
std::move(at_tensor_desc)));
BufferDescriptor buffer_desc;
VectorToKernelBufferDesc(at_mat, definition_.GetDataType(), &buffer_desc);
args_.AddObject("At",
std::make_unique<BufferDescriptor>(std::move(buffer_desc)));
}
int3 Winograd36To4x4Tile4x1::SelectBestWorkGroup(
const KernelInfo& kernel_info) const {
const std::vector<int3> wgs = {{32, 4, 2}, {16, 4, 2}, {16, 4, 1},
{8, 4, 1}, {4, 4, 1}, {2, 4, 1},
{1, 4, 1}, {1, 2, 1}, {1, 1, 1}};
return GetFirstSuitableWorkGroup(wgs, kernel_info.max_work_group_size);
}
absl::Status Winograd36To4x4Tile4x1::BindArguments(ArgumentsBinder* args) {
const int tiles_x = DivideRoundUp(dst_[0]->Width(), 4);
RETURN_IF_ERROR(args->SetInt("tiles_x", tiles_x));
return absl::OkStatus();
}
int3 Winograd36To4x4Tile4x1::GetGridSize() const {
const int tiles_x = DivideRoundUp(dst_[0]->Width(), 4);
const int tiles_y = DivideRoundUp(dst_[0]->Height(), 4);
const int grid_x = tiles_x * tiles_y * dst_[0]->Batch();
const int grid_y = 4;
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
void Winograd36To4x4Tile4x1::GetPossibleKernelWorkGroups(
TuningType tuning_type, const GpuInfo& gpu_info,
const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
if (gpu_info.IsIntel()) {
work_groups->push_back(int3(8, 4, 1));
return;
}
switch (tuning_type) {
case TuningType::kExhaustive:
GetPossibleWorkGroups(tuning_type, gpu_info, kernel_info, grid_size_,
work_groups);
return;
case TuningType::kFast:
default:
work_groups->push_back(SelectBestWorkGroup(kernel_info));
return;
}
}
Winograd36To4x4Tile4x1 CreateWinograd36To4x4Tile4x1(
const GpuInfo& gpu_info, const OperationDef& definition,
const tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases) {
Winograd36To4x4Tile4x1 result(definition, gpu_info);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), biases);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
result.UploadAt();
return result;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/winograd_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, Winograd4x4To36TileX6) {
auto status = Winograd4x4To36TileX6Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, Winograd36To4x4Tile4x1) {
auto status = Winograd36To4x4Tile4x1Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, Winograd4x4To36) {
auto status = Winograd4x4To36Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, Winograd4x4To36Batch) {
auto status = Winograd4x4To36BatchTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, Winograd36To4x4) {
auto status = Winograd36To4x4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/winograd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/winograd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
10166913-c035-4b72-883c-e0ecb0e36c34 | cpp | tensorflow/tensorflow | depthwise_conv_3x3 | tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3.cc | tensorflow/lite/delegates/gpu/cl/kernels/depthwise_conv_3x3_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3.h"
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
DepthwiseConv3x3::DepthwiseConv3x3(const OperationDef& definition,
bool weights_are_buffer,
bool local_mem_uploads,
const GpuInfo& gpu_info)
: GPUOperation(definition), local_mem_uploads_(local_mem_uploads) {
work_group_size_ = int3(8, 4, 1);
code_ = GenerateDepthwiseConvCode(gpu_info, definition_, weights_are_buffer,
local_mem_uploads_);
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsPowerVR()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
}
DepthwiseConv3x3::DepthwiseConv3x3(DepthwiseConv3x3&& operation)
: GPUOperation(std::move(operation)),
local_mem_uploads_(operation.local_mem_uploads_) {}
DepthwiseConv3x3& DepthwiseConv3x3::operator=(DepthwiseConv3x3&& operation) {
if (this != &operation) {
std::swap(local_mem_uploads_, operation.local_mem_uploads_);
GPUOperation::operator=(std::move(operation));
}
return *this;
}
std::string DepthwiseConv3x3::GenerateDepthwiseConvCode(
const GpuInfo& gpu_info, const OperationDef& op_def,
bool weights_are_buffer, bool local_mem_uploads) {
auto src_desc = op_def.src_tensors[0];
AddSrcTensor("src_tensor", src_desc);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
std::string c;
if (local_mem_uploads && gpu_info.IsApiOpenCl()) {
c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n";
}
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = (linear_id / args.dst_tensor.Batch()) * 2;\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += " args.src_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0 * 2;\n";
}
c += " int Y = GLOBAL_ID_1 * 2;\n";
c += " int S = GLOBAL_ID_2;\n";
c += " ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n";
if (!local_mem_uploads) {
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() "
"|| S >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
}
if (local_mem_uploads) {
c += " __local FLT4 f[10];\n";
if (gpu_info.IsApiOpenCl() && gpu_info.IsPowerVR()) {
c += " event_t e = async_work_group_copy(f, args.weights.GetPtr() + S * "
"10, 10, 0);\n";
c += " wait_group_events(1, &e);\n";
} else {
c += " int local_id = LOCAL_ID_1 * 8 + LOCAL_ID_0;\n";
c += " if (local_id < 10) {\n";
c += " f[local_id] = args.weights.Read(S * 10 + local_id);\n";
c += " }\n";
c += " LOCAL_MEM_BARRIER;\n";
}
} else if (weights_are_buffer && gpu_info.SupportsPointersInKernels()) {
c += " __global FLT4* f = args.weights.GetPtr() + S * 10;\n";
}
c += " FLT4 s0;\n";
c += " FLT4 s1;\n";
c += " FLT4 s2;\n";
c += " FLT4 s3;\n";
std::string W[9] = {"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8"};
std::string bias = "bias";
std::string xc[4] = {"X - 1", "X", "X + 1", "X + 2"};
std::string yc[4] = {"Y - 1", "Y", "Y + 1", "Y + 2"};
if (!weights_are_buffer) {
c += " FLT4 f0 = args.weights.Read(0, S);\n";
c += " FLT4 f1 = args.weights.Read(1, S);\n";
c += " FLT4 f2 = args.weights.Read(2, S);\n";
c += " FLT4 f3 = args.weights.Read(3, S);\n";
c += " FLT4 f4 = args.weights.Read(4, S);\n";
c += " FLT4 f5 = args.weights.Read(5, S);\n";
c += " FLT4 f6 = args.weights.Read(6, S);\n";
c += " FLT4 f7 = args.weights.Read(7, S);\n";
c += " FLT4 f8 = args.weights.Read(8, S);\n";
}
if (!op_def.src_tensors[0].SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " int x0 = X - 1;\n";
c += " int x1 = X;\n";
c += " int x2 = X + 1;\n";
c += " int x3 = X + 2;\n";
c += " bool x0_in = x0 >= 0 && x0 < args.dst_tensor.Width();\n";
c += " bool x1_in = x1 >= 0 && x1 < args.dst_tensor.Width();\n";
c += " bool x2_in = x2 >= 0 && x2 < args.dst_tensor.Width();\n";
c += " bool x3_in = x3 >= 0 && x3 < args.dst_tensor.Width();\n";
c += " x0 = clamp(x0, 0, args.dst_tensor.Width() - 1);\n";
c += " x1 = clamp(x1, 0, args.dst_tensor.Width() - 1);\n";
c += " x2 = clamp(x2, 0, args.dst_tensor.Width() - 1);\n";
c += " x3 = clamp(x3, 0, args.dst_tensor.Width() - 1);\n";
xc[0] = "x0";
xc[1] = "x1";
xc[2] = "x2";
xc[3] = "x3";
}
if (!op_def.src_tensors[0].SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " int y0 = Y - 1;\n";
c += " int y1 = Y;\n";
c += " int y2 = Y + 1;\n";
c += " int y3 = Y + 2;\n";
c += " bool y0_in = y0 >= 0 && y0 < args.dst_tensor.Height();\n";
c += " bool y1_in = y1 >= 0 && y1 < args.dst_tensor.Height();\n";
c += " bool y2_in = y2 >= 0 && y2 < args.dst_tensor.Height();\n";
c += " bool y3_in = y3 >= 0 && y3 < args.dst_tensor.Height();\n";
c += " y0 = clamp(y0, 0, args.dst_tensor.Height() - 1);\n";
c += " y1 = clamp(y1, 0, args.dst_tensor.Height() - 1);\n";
c += " y2 = clamp(y2, 0, args.dst_tensor.Height() - 1);\n";
c += " y3 = clamp(y3, 0, args.dst_tensor.Height() - 1);\n";
yc[0] = "y0";
yc[1] = "y1";
yc[2] = "y2";
yc[3] = "y3";
}
if (local_mem_uploads || weights_are_buffer) {
const bool use_direct_buffer =
!local_mem_uploads && !gpu_info.SupportsPointersInKernels();
const std::string fetch_start =
use_direct_buffer ? "args.weights.Read(S * 10 + " : "f[";
const std::string fetch_end = use_direct_buffer ? ")" : "]";
W[0] = fetch_start + "0" + fetch_end;
W[1] = fetch_start + "1" + fetch_end;
W[2] = fetch_start + "2" + fetch_end;
W[3] = fetch_start + "3" + fetch_end;
W[4] = fetch_start + "4" + fetch_end;
W[5] = fetch_start + "5" + fetch_end;
W[6] = fetch_start + "6" + fetch_end;
W[7] = fetch_start + "7" + fetch_end;
W[8] = fetch_start + "8" + fetch_end;
bias = fetch_start + "9" + fetch_end;
}
auto read_4x_line = [&](int y) {
std::string s0_check, s1_check, s2_check, s3_check;
if (!op_def.src_tensors[0].SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
s0_check += "x0_in";
s1_check += "x1_in";
s2_check += "x2_in";
s3_check += "x3_in";
}
if (!op_def.src_tensors[0].SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
const std::string y_in = "y" + std::to_string(y) + "_in";
s0_check += s0_check.empty() ? y_in : (" && " + y_in);
s1_check += s1_check.empty() ? y_in : (" && " + y_in);
s2_check += s2_check.empty() ? y_in : (" && " + y_in);
s3_check += s3_check.empty() ? y_in : (" && " + y_in);
}
if (!s0_check.empty()) {
s0_check = " * INIT_FLT(" + s0_check + ")";
}
if (!s1_check.empty()) {
s1_check = " * INIT_FLT(" + s1_check + ")";
}
if (!s2_check.empty()) {
s2_check = " * INIT_FLT(" + s2_check + ")";
}
if (!s3_check.empty()) {
s3_check = " * INIT_FLT(" + s3_check + ")";
}
c += " s0 = args.src_tensor.Read(" + xc[0] + ", " + yc[y] + ", S)" +
s0_check + ";\n";
c += " s1 = args.src_tensor.Read(" + xc[1] + ", " + yc[y] + ", S)" +
s1_check + ";\n";
c += " s2 = args.src_tensor.Read(" + xc[2] + ", " + yc[y] + ", S)" +
s2_check + ";\n";
c += " s3 = args.src_tensor.Read(" + xc[3] + ", " + yc[y] + ", S)" +
s3_check + ";\n";
};
c += " {\n";
read_4x_line(0);
c += " r0 += TO_ACCUM_TYPE(" + W[0] + " * s0);\n";
c += " r0 += TO_ACCUM_TYPE(" + W[1] + " * s1);\n";
c += " r1 += TO_ACCUM_TYPE(" + W[0] + " * s1);\n";
c += " r0 += TO_ACCUM_TYPE(" + W[2] + " * s2);\n";
c += " r1 += TO_ACCUM_TYPE(" + W[1] + " * s2);\n";
c += " r1 += TO_ACCUM_TYPE(" + W[2] + " * s3);\n";
c += " }\n";
c += " {\n";
read_4x_line(1);
c += " r0 += TO_ACCUM_TYPE(" + W[3] + " * s0);\n";
c += " r2 += TO_ACCUM_TYPE(" + W[0] + " * s0);\n";
c += " r0 += TO_ACCUM_TYPE(" + W[4] + " * s1);\n";
c += " r1 += TO_ACCUM_TYPE(" + W[3] + " * s1);\n";
c += " r2 += TO_ACCUM_TYPE(" + W[1] + " * s1);\n";
c += " r3 += TO_ACCUM_TYPE(" + W[0] + " * s1);\n";
c += " r0 += TO_ACCUM_TYPE(" + W[5] + " * s2);\n";
c += " r1 += TO_ACCUM_TYPE(" + W[4] + " * s2);\n";
c += " r2 += TO_ACCUM_TYPE(" + W[2] + " * s2);\n";
c += " r3 += TO_ACCUM_TYPE(" + W[1] + " * s2);\n";
c += " r1 += TO_ACCUM_TYPE(" + W[5] + " * s3);\n";
c += " r3 += TO_ACCUM_TYPE(" + W[2] + " * s3);\n";
c += " }\n";
c += " {\n";
read_4x_line(2);
c += " r0 += TO_ACCUM_TYPE(" + W[6] + " * s0);\n";
c += " r2 += TO_ACCUM_TYPE(" + W[3] + " * s0);\n";
c += " r0 += TO_ACCUM_TYPE(" + W[7] + " * s1);\n";
c += " r1 += TO_ACCUM_TYPE(" + W[6] + " * s1);\n";
c += " r2 += TO_ACCUM_TYPE(" + W[4] + " * s1);\n";
c += " r3 += TO_ACCUM_TYPE(" + W[3] + " * s1);\n";
c += " r0 += TO_ACCUM_TYPE(" + W[8] + " * s2);\n";
c += " r1 += TO_ACCUM_TYPE(" + W[7] + " * s2);\n";
c += " r2 += TO_ACCUM_TYPE(" + W[5] + " * s2);\n";
c += " r3 += TO_ACCUM_TYPE(" + W[4] + " * s2);\n";
c += " r1 += TO_ACCUM_TYPE(" + W[8] + " * s3);\n";
c += " r3 += TO_ACCUM_TYPE(" + W[5] + " * s3);\n";
c += " }\n";
c += " {\n";
read_4x_line(3);
c += " r2 += TO_ACCUM_TYPE(" + W[6] + " * s0);\n";
c += " r2 += TO_ACCUM_TYPE(" + W[7] + " * s1);\n";
c += " r3 += TO_ACCUM_TYPE(" + W[6] + " * s1);\n";
c += " r2 += TO_ACCUM_TYPE(" + W[8] + " * s2);\n";
c += " r3 += TO_ACCUM_TYPE(" + W[7] + " * s2);\n";
c += " r3 += TO_ACCUM_TYPE(" + W[8] + " * s3);\n";
c += " }\n";
if (!weights_are_buffer) {
c += " FLT4 bias = args.weights.Read(9, S);\n";
}
c += " r0 += TO_ACCUM_TYPE(" + bias + ");\n";
c += " r1 += TO_ACCUM_TYPE(" + bias + ");\n";
c += " r2 += TO_ACCUM_TYPE(" + bias + ");\n";
c += " r3 += TO_ACCUM_TYPE(" + bias + ");\n";
if (local_mem_uploads) {
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() "
"|| S >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
}
c += " if(X + 0 < args.dst_tensor.Width() && Y + 0 < "
"args.dst_tensor.Height()) {\n";
c += " FLT4 result = TO_FLT4(r0);\n";
c += " args.dst_tensor.Write(result, X + 0, Y + 0, S);\n";
c += " }\n";
c += " if(X + 1 < args.dst_tensor.Width() && Y + 0 < "
"args.dst_tensor.Height()) {\n";
c += " FLT4 result = TO_FLT4(r1);\n";
c += " args.dst_tensor.Write(result, X + 1, Y + 0, S);\n";
c += " }\n";
c += " if(X + 0 < args.dst_tensor.Width() && Y + 1 < "
"args.dst_tensor.Height()) {\n";
c += " FLT4 result = TO_FLT4(r2);\n";
c += " args.dst_tensor.Write(result, X + 0, Y + 1, S);\n";
c += " }\n";
c += " if(X + 1 < args.dst_tensor.Width() && Y + 1 < "
"args.dst_tensor.Height()) {\n";
c += " FLT4 result = TO_FLT4(r3);\n";
c += " args.dst_tensor.Write(result, X + 1, Y + 1, S);\n";
c += " }\n";
c += "}\n";
return c;
}
int3 DepthwiseConv3x3::GetGridSize() const {
const int grid_x = DivideRoundUp(dst_[0]->Width(), 2) * dst_[0]->Batch();
const int grid_y = DivideRoundUp(dst_[0]->Height(), 2);
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
void DepthwiseConv3x3::GetPossibleKernelWorkGroups(
TuningType tuning_type, const GpuInfo& gpu_info,
const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
if (local_mem_uploads_) {
work_groups->push_back(work_group_size_);
} else {
GetPossibleWorkGroups(tuning_type, gpu_info, kernel_info, grid_size_,
work_groups);
}
}
bool IsDepthwiseConv3x3Supported(const GpuInfo& gpu_info,
const DepthwiseConvolution2DAttributes& attr) {
if (gpu_info.IsApiOpenCl() && gpu_info.IsAdreno()) {
const std::string kBadDriver =
"OpenCL 2.0 QUALCOMM build: commit #7daed58 changeid #I7ece6fe30d "
"Date: 10/19/16";
if (absl::StrContains(gpu_info.opencl_info.platform_version, kBadDriver)) {
return false;
}
}
return attr.weights.shape.o == 1 && attr.dilations.w == 1 &&
attr.dilations.h == 1 && attr.weights.shape.w == 3 &&
attr.weights.shape.h == 3 && attr.strides.w == 1 &&
attr.strides.h == 1 && attr.padding.prepended.w == 1 &&
attr.padding.prepended.h == 1 && attr.padding.appended.w == 1 &&
attr.padding.appended.h == 1;
}
DepthwiseConv3x3 CreateDepthwiseConv3x3(
const GpuInfo& gpu_info, const OperationDef& definition,
const DepthwiseConvolution2DAttributes& attr) {
bool weights_are_buffer = !gpu_info.SupportsImages() ||
gpu_info.IsPowerVR() || gpu_info.IsMali() ||
gpu_info.IsApple();
bool local_mem_uploads =
(weights_are_buffer && gpu_info.IsPowerVR() && gpu_info.IsApiOpenCl() &&
gpu_info.opencl_info.dedicated_local_memory) ||
(gpu_info.IsApple() &&
gpu_info.apple_info.IsLocalMemoryPreferredOverGlobal());
DepthwiseConv3x3 result(definition, weights_are_buffer, local_mem_uploads,
gpu_info);
result.UploadWeightsAndBiases(attr.weights, attr.bias, weights_are_buffer);
return result;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3_stride_h2_test_util.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, DepthwiseConv3x3SimpleWeights) {
auto status = DepthwiseConv3x3SimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, DepthwiseConv3x3) {
auto status = DepthwiseConv3x3Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, DepthWiseConv3x3StrideH2SimpleWeights) {
auto status = DepthWiseConv3x3StrideH2SimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/depthwise_conv_3x3_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ee0b0fcd-8545-49e5-8b69-2c9a4f14e780 | cpp | tensorflow/tensorflow | conv_constants | tensorflow/lite/delegates/gpu/common/tasks/conv_constants.cc | tensorflow/lite/delegates/gpu/cl/kernels/conv_constants_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/conv_constants.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
namespace tflite {
namespace gpu {
namespace {
int GetAdrenoOptimalMaxConstantSize(const AdrenoInfo& adreno_info) {
if (adreno_info.IsAdreno3xx() || adreno_info.IsAdreno4xx() ||
adreno_info.IsAdreno5xx()) {
return 256 * 10;
} else {
return 256 * 14;
}
}
int GetOptimalMaxConstantSize(const GpuInfo& gpu_info) {
if (gpu_info.IsAdreno()) {
return GetAdrenoOptimalMaxConstantSize(gpu_info.adreno_info);
} else if (gpu_info.IsAMD()) {
return 4096;
} else {
return 1024;
}
}
void AppendConditionally(const std::string& value, const std::string& delimeter,
std::string* result) {
if (!result->empty()) {
*result += delimeter;
}
*result += value;
}
std::string GenerateConv(int src_size, int dst_size, bool use_dot_conv,
int const_mem_offset, CalculationsPrecision precision,
const std::string& dst, const std::string& src) {
std::string result;
const std::string postfixes[] = {".x", ".y", ".z", ".w"};
if (use_dot_conv) {
const std::string src_postfixes[] = {".x", ".xy", ".xyz", ""};
const std::string src_postfix = src_postfixes[src_size - 1];
for (int i = 0; i < dst_size; ++i) {
result += " " + dst + postfixes[i] + " += dot(" + src +
", args.weights.Read(" + std::to_string(const_mem_offset + i) +
")" + src_postfix + ");\n";
}
} else {
const std::string dst_postfixes[] = {".x", ".xy", ".xyz", ""};
const std::string dst_postfix = dst_postfixes[dst_size - 1];
if (precision == CalculationsPrecision::F32_F16) {
for (int i = 0; i < src_size; ++i) {
if (i != 0) {
result += " + ";
}
std::string src_name = src;
if (src_size != 1) {
src_name += postfixes[i];
}
result += src_name + " * args.weights.Read(" +
std::to_string(const_mem_offset + i) + ")" + dst_postfix;
}
std::string size = dst_size == 1 ? "" : std::to_string(dst_size);
result = " " + dst + dst_postfix + " += TO_ACCUM_FLT" + size + "(" +
result + ");\n";
} else {
for (int i = 0; i < src_size; ++i) {
std::string src_name = src;
if (src_size != 1) {
src_name += postfixes[i];
}
result += " " + dst + dst_postfix + " += " + src_name +
" * args.weights.Read(" +
std::to_string(const_mem_offset + i) + ")" + dst_postfix +
";\n";
}
}
}
return result;
}
std::string GenerateConvolutionConstantCode(const GpuInfo& gpu_info,
const OperationDef& op_def,
const OHWI& weights_shape,
bool x_oob_reads, bool y_oob_reads,
bool use_dot_conv,
GPUOperation* op) {
auto src_desc = op_def.src_tensors[0];
op->AddSrcTensor("src_tensor", src_desc);
op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
const int out_z = DivideRoundUp(weights_shape.o, 4);
const std::string kOutZ = std::to_string(out_z);
const int src_depth = DivideRoundUp(weights_shape.i, 4);
const std::string postfixes[] = {".x", ".xy", ".xyz", ""};
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (src_desc.HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height()) "
"return;\n";
c += " int start_x = X * args.stride_x + args.padding_x;\n";
c += " int start_y = Y * args.stride_y + args.padding_y;\n";
for (int i = 0; i < out_z; ++i) {
c += " ACCUM_FLT4 r" + std::to_string(i) + " = INIT_ACCUM_FLT4(0.0f);\n";
}
std::string check;
if (y_oob_reads && !src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
AppendConditionally("inside_y", " && ", &check);
}
if (x_oob_reads && !src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
AppendConditionally("inside_x", " && ", &check);
}
int filters_counter = 0;
for (int s = 0; s < src_depth; ++s) {
const int src_ch_count = std::min(4, weights_shape.i - s * 4);
const std::string s_count =
src_ch_count == 1 ? "" : std::to_string(src_ch_count);
const std::string s_type = absl::StrCat("FLT", s_count);
const std::string s_postfix = postfixes[src_ch_count - 1];
for (int ky = 0; ky < weights_shape.h; ++ky) {
std::string s_y = absl::StrCat("(start_y + ", ky, " * args.dilation_y)");
c += " {\n";
c += " int y_c = start_y + " + std::to_string(ky) +
" * args.dilation_y;\n";
if (y_oob_reads && !src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c +=
" bool inside_y = y_c >= 0 && y_c < args.src_tensor.Height();\n";
c += " y_c = clamp(y_c, 0, args.src_tensor.Height() - 1);\n";
}
for (int kx = 0; kx < weights_shape.w; ++kx) {
c += " {\n";
c += " int x_c = start_x + " + std::to_string(kx) +
" * args.dilation_x;\n";
if (x_oob_reads && !src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool inside_x = x_c >= 0 && x_c < "
"args.src_tensor.Width();\n";
c += " x_c = clamp(x_c, 0, args.src_tensor.Width() - 1);\n";
}
c += " " + s_type + " src = args.src_tensor.Read(x_c, y_c, " +
std::to_string(s) + ")" + s_postfix + ";\n";
if (!check.empty()) {
c += " src *= INIT_FLT(" + check + ");\n";
}
for (int d = 0; d < out_z; ++d) {
const int dst_ch_count = std::min(4, weights_shape.o - d * 4);
c += GenerateConv(src_ch_count, dst_ch_count, use_dot_conv,
filters_counter, op_def.precision,
"r" + std::to_string(d), "src");
filters_counter += use_dot_conv ? dst_ch_count : src_ch_count;
}
c += " }\n";
}
c += " }\n";
}
}
for (int i = 0; i < out_z; ++i) {
std::string s_i = std::to_string(i);
c += " {\n";
c += " FLT4 res = TO_FLT4(r" + s_i + ") + args.biases.Read(" + s_i +
");\n";
c += " args.dst_tensor.Write(res, X, Y, " + s_i + ");\n";
c += " }\n";
}
c += "}\n";
return c;
}
bool IsDotConvBetter(int src_channels, int dst_channels) {
if (dst_channels % 4 == 0) {
return false;
}
if (src_channels % 4 == 0) {
return true;
}
const int src_depth = DivideRoundUp(src_channels, 4);
const int dst_depth = DivideRoundUp(dst_channels, 4);
return dst_channels * src_depth < src_channels * dst_depth;
}
}
bool IsConvConstantsSupported(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr) {
if (gpu_info.IsApiOpenCl() && gpu_info.IsAdreno()) {
const std::string kBadDriver =
"OpenCL 2.0 QUALCOMM build: commit #7ff4f54 changeid #I4460aa6217 "
"Date: 12/30/18";
if (absl::StrContains(gpu_info.opencl_info.platform_version, kBadDriver)) {
return false;
}
}
if (attr.groups != 1) {
return false;
}
const bool use_dot_conv =
IsDotConvBetter(attr.weights.shape.i, attr.weights.shape.o);
const auto& w_shape = attr.weights.shape;
const int src_depth = DivideRoundUp(w_shape.i, 4);
const int dst_depth = DivideRoundUp(w_shape.o, 4);
const int aligned_ch_count =
use_dot_conv ? w_shape.o * src_depth * 4 : w_shape.i * dst_depth * 4;
const int filters_count = aligned_ch_count * w_shape.h * w_shape.w;
const int float_size = definition.precision == CalculationsPrecision::F32
? sizeof(float)
: sizeof(half);
const int filters_buffer_size = filters_count * float_size;
const int kConstantMaxSize = GetOptimalMaxConstantSize(gpu_info);
const int flt4_registers = DivideRoundUp(w_shape.o, 4);
return filters_buffer_size <= kConstantMaxSize && flt4_registers <= 8;
}
GPUOperation CreateConvConstants(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr) {
const bool use_dot_conv =
IsDotConvBetter(attr.weights.shape.i, attr.weights.shape.o);
GPUOperation op(definition);
UploadWeightsForConvConstants(attr.weights, gpu_info, definition.precision,
use_dot_conv, &op);
op.args_.AddInt("stride_x", attr.strides.w);
op.args_.AddInt("stride_y", attr.strides.h);
op.args_.AddInt("padding_x", -attr.padding.prepended.w);
op.args_.AddInt("padding_y", -attr.padding.prepended.h);
op.args_.AddInt("dilation_x", attr.dilations.w);
op.args_.AddInt("dilation_y", attr.dilations.h);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_ZIs1;
bool x_oob_reads =
attr.padding.appended.w != 0 || attr.padding.prepended.w != 0;
bool y_oob_reads =
attr.padding.appended.h != 0 || attr.padding.prepended.h != 0;
op.code_ = GenerateConvolutionConstantCode(gpu_info, definition,
attr.weights.shape, x_oob_reads,
y_oob_reads, use_dot_conv, &op);
if (definition.precision == CalculationsPrecision::F16 &&
gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx()) {
op.compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd);
}
if (definition.precision != CalculationsPrecision::F32 &&
gpu_info.IsPowerVR()) {
op.compiler_options_.push_back(CompilerOptions::kClDisableOptimizations);
}
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
op.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return op;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/conv_constants_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvConstantsSimpleWeights) {
const auto status = ConvConstantsSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvConstants) {
const auto status = ConvConstantsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/conv_constants.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7bd23866-5039-481b-92b9-5f3198d60d37 | cpp | tensorflow/tensorflow | reshapex4 | tensorflow/lite/delegates/gpu/common/tasks/reshapex4.cc | tensorflow/lite/delegates/gpu/cl/kernels/reshapex4_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/reshapex4.h"
#include <string>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
std::string GetReshapeCode(const OperationDef& op_def) {
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int Z = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"Z >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int dst_bhwc4 = B;\n";
} else {
c += " int dst_bhwc4 = 0;\n";
}
c += " dst_bhwc4 = ((dst_bhwc4 * args.dst_tensor.Height() + Y) * "
"args.dst_tensor.Width() + X) * args.dst_tensor.Slices() + Z;\n";
c += " int src_z = dst_bhwc4 % args.src_tensor.Slices();\n";
c += " dst_bhwc4 = dst_bhwc4 / args.src_tensor.Slices();\n";
c += " int src_x = dst_bhwc4 % args.src_tensor.Width();\n";
c += " dst_bhwc4 = dst_bhwc4 / args.src_tensor.Width();\n";
c += " int src_y = dst_bhwc4 % args.src_tensor.Height();\n";
if (op_def.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int src_b = dst_bhwc4 / args.src_tensor.Height();\n";
c += " args.src_tensor.SetBatchRef(src_b);\n";
}
c += " args.src_tensor::type result = args.src_tensor.Read(src_x, src_y, "
"src_z);\n";
c += " args.dst_tensor.Write(result, X, Y, Z);\n";
c += "}\n";
return c;
}
}
GPUOperation CreateReshapex4(const OperationDef& definition) {
GPUOperation op(definition);
op.AddSrcTensor("src_tensor", definition.src_tensors[0]);
op.AddDstTensor("dst_tensor", definition.dst_tensors[0]);
op.code_ = GetReshapeCode(definition);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
return op;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/reshape_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Reshapex4) {
auto status = Reshapex4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/reshapex4.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db90c9df-2043-44fd-a8f2-b28813345dea | cpp | tensorflow/tensorflow | quantize_and_dequantize | tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.cc | tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.h"
#include <any>
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class QuantizeAndDequantize : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string code = R"(
value_0 = clamp(value_0, vec4($quant_min$), vec4($quant_max$));
value_0 = (value_0 - vec4($quant_min$)) / vec4($quant_scale$);
value_0 = floor(value_0 + vec4(0.5));
value_0 = value_0 * vec4($quant_scale$) + vec4($quant_min$);
)";
const auto& attr =
std::any_cast<const QuantizeAndDequantizeAttributes&>(ctx.op_attr);
*generated_code = {
{{"quant_min", attr.min},
{"quant_max", attr.max},
{"quant_scale", attr.scale}},
{},
{},
uint3(),
uint3(),
code,
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewQuantizeAndDequantizeNodeShader() {
return std::make_unique<QuantizeAndDequantize>();
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/quantize_and_dequantize_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, QuantAndDequant_Dim2Bits8) {
auto status = QuantAndDequant_Dim2Bits8Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, QuantAndDequant_Dim3Bits8_NegativeRange) {
auto status = QuantAndDequant_Dim3Bits8_NegativeRangeTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, QuantAndDequant_Dim3Bits16) {
auto status = QuantAndDequant_Dim3Bits16Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, QuantAndDequant_Dim2Bits16_NegativeRange) {
auto status = QuantAndDequant_Dim2Bits16_NegativeRangeTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
23361921-0139-49e4-8795-9f4dd027c06f | cpp | tensorflow/tensorflow | conv_pointwise | tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.cc | tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.h"
#include <cstdint>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace {
std::string GenerateCode(const ConvPointwiseAttributes& attr) {
std::string c = R"(
MAIN_FUNCTION($0) {
int linear_id = GLOBAL_ID_0;
int X = linear_id / args.dst_tensor.Batch();
int B = linear_id % args.dst_tensor.Batch();
args.weights_tensor.SetBatchRef(B);
args.src_tensor.SetBatchRef(B);
args.dst_tensor.SetBatchRef(B);
int Y = GLOBAL_ID_1;
int S = GLOBAL_ID_2;
if (X >= args.dst_tensor.Width() ||
Y >= args.dst_tensor.Height() ||
S >= args.dst_tensor.Slices()) return;
int4 offset0 = args.offsets.Read(S * 2 + 0, 0);
int4 offset1 = args.offsets.Read(S * 2 + 1, 0);
ACCUM_FLT4 res = INIT_ACCUM_FLT4(0.0f);
FLT4 last_mask;
int last_src_ch = (args.src_tensor.Slices() - 1) * 4;
last_mask.x = INIT_FLT(1.0f);
last_mask.y = last_src_ch + 1 < args.src_tensor.Channels() ? INIT_FLT(1.0f) : INIT_FLT(0.0f);
last_mask.z = last_src_ch + 2 < args.src_tensor.Channels() ? INIT_FLT(1.0f) : INIT_FLT(0.0f);
last_mask.w = last_src_ch + 3 < args.src_tensor.Channels() ? INIT_FLT(1.0f) : INIT_FLT(0.0f);
for (int s = 0; s < args.src_tensor.Slices(); ++s) {
FLT4 src = args.src_tensor.Read(X, Y, s);
FLT4 w0 = args.weights_tensor.Read(X + offset0.x, Y + offset0.y, s);
FLT4 w1 = args.weights_tensor.Read(X + offset0.z, Y + offset0.w, s);
FLT4 w2 = args.weights_tensor.Read(X + offset1.x, Y + offset1.y, s);
FLT4 w3 = args.weights_tensor.Read(X + offset1.z, Y + offset1.w, s);
FLT4 mask = INIT_FLT4(1.0f);
if (s == (args.src_tensor.Slices() - 1)) {
mask = last_mask;
}
src *= mask;
res.x += dot(src, w0);
res.y += dot(src, w1);
res.z += dot(src, w2);
res.w += dot(src, w3);
}
FLT4 result = TO_FLT4(res);
)";
if (attr.mean) {
c += " result = result / INIT_FLT(args.src_tensor.Channels());\n";
}
c += " args.dst_tensor.Write(result, X, Y, S);\n";
c += "}\n";
return c;
}
struct NodeContext {
Node* node;
std::vector<Value*> inputs;
std::vector<Value*> outputs;
};
absl::Status IsNode(const GraphFloat32& graph, OperationType op_type,
int inputs_count, int outputs_count, Node* node,
NodeContext* node_context) {
const std::string op_desc = ToString(op_type);
node_context->node = node;
if (node_context->node == nullptr) {
return absl::NotFoundError(absl::StrCat("Invalid ", op_desc, " node."));
}
if (OperationTypeFromString(node_context->node->operation.type) != op_type) {
return absl::InternalError(
absl::StrCat("Not correct node type. Expected ", op_desc, ", received ",
node_context->node->operation.type));
}
node_context->inputs = graph.FindInputs(node_context->node->id);
node_context->outputs = graph.FindOutputs(node_context->node->id);
if (inputs_count != -1) {
if (node_context->inputs.size() != inputs_count) {
return absl::InternalError(
absl::StrCat("Expected ", inputs_count, " input in a ", op_desc,
" node. Node has ", node_context->inputs.size()));
}
}
if (node_context->outputs.size() != outputs_count) {
return absl::InternalError(
absl::StrCat("Expected ", outputs_count, " output in a ", op_desc,
" node. Node has ", node_context->outputs.size()));
}
return absl::OkStatus();
}
absl::Status IsMeanNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(IsNode(graph, OperationType::MEAN, 1, 1, node, node_context));
auto mean_attr =
absl::any_cast<MeanAttributes>(node_context->node->operation.attributes);
if (mean_attr.dims != std::set<Axis>{Axis::CHANNELS}) {
return absl::InternalError("Expected mean node with channels reduction.");
}
return absl::OkStatus();
}
absl::Status IsReduceSumNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(
IsNode(graph, OperationType::REDUCE_SUM, 1, 1, node, node_context));
auto reduce_attr =
std::any_cast<ReduceAttributes>(node_context->node->operation.attributes);
if (reduce_attr.dims != std::set<Axis>{Axis::CHANNELS}) {
return absl::InternalError(
"Expected reduce_sum node with channels reduction.");
}
return absl::OkStatus();
}
absl::Status IsMulNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(IsNode(graph, OperationType::MUL, 2, 1, node, node_context));
if (node_context->inputs[0]->tensor.shape !=
node_context->inputs[1]->tensor.shape) {
return absl::InternalError("Expected mul node with 2 equal tensors.");
}
return absl::OkStatus();
}
absl::Status IsSliceNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(
IsNode(graph, OperationType::SLICE, 1, 1, node, node_context));
auto slice_attr =
absl::any_cast<SliceAttributes>(node_context->node->operation.attributes);
if (slice_attr.strides != BHWC(1, 1, 1, 1)) {
return absl::InternalError("Not valid attributes in slice node.");
}
return absl::OkStatus();
}
absl::Status IsConcatNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(
IsNode(graph, OperationType::CONCAT, -1, 1, node, node_context));
auto concat_attr = absl::any_cast<ConcatAttributes>(
node_context->node->operation.attributes);
if (concat_attr.axis != Axis::CHANNELS) {
return absl::InternalError("Not valid attributes in concat node.");
}
return absl::OkStatus();
}
absl::Status GetOffset(const GraphFloat32& graph, NodeId concat_input_node,
NodeId second_commom_input_id, int* offset_x,
int* offset_y, std::set<NodeId>* consumed_nodes) {
NodeContext reduce_node, mul_node, slice_node;
absl::Status status =
IsMeanNode(graph, graph.FindProducer(concat_input_node), &reduce_node);
if (!status.ok()) {
RETURN_IF_ERROR(IsReduceSumNode(
graph, graph.FindProducer(concat_input_node), &reduce_node));
}
RETURN_IF_ERROR(IsMulNode(
graph, graph.FindProducer(reduce_node.inputs[0]->id), &mul_node));
const ValueId slice_output_id =
mul_node.inputs[0]->id == second_commom_input_id ? mul_node.inputs[1]->id
: mul_node.inputs[0]->id;
RETURN_IF_ERROR(
IsSliceNode(graph, graph.FindProducer(slice_output_id), &slice_node));
auto slice_attr =
absl::any_cast<SliceAttributes>(slice_node.node->operation.attributes);
*offset_x = slice_attr.starts.w;
*offset_y = slice_attr.starts.h;
consumed_nodes->insert(reduce_node.node->id);
consumed_nodes->insert(mul_node.node->id);
consumed_nodes->insert(slice_node.node->id);
return absl::OkStatus();
}
}
GPUOperation CreateConvPointwise(const OperationDef& definition,
const ConvPointwiseAttributes& attr) {
const int dst_channels = attr.offsets.size();
const int dst_depth = DivideRoundUp(dst_channels, 4);
std::vector<int32_t> offsets_data(dst_depth * 2 * 4, 0);
for (int i = 0; i < attr.offsets.size(); ++i) {
offsets_data[i * 2 + 0] = attr.offsets[i].x;
offsets_data[i * 2 + 1] = attr.offsets[i].y;
}
for (int i = attr.offsets.size(); i < offsets_data.size() / 2; ++i) {
offsets_data[i * 2 + 0] = attr.offsets.back().x;
offsets_data[i * 2 + 1] = attr.offsets.back().y;
}
GPUOperation op(definition);
op.AddSrcTensor("src_tensor", definition.src_tensors[0]);
op.AddSrcTensor("weights_tensor", definition.src_tensors[1]);
op.AddDstTensor("dst_tensor", definition.dst_tensors[0]);
op.code_ = GenerateCode(attr);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
TensorDescriptor desc = CreateConstantHWVec4TensorDescriptor(
DataType::INT32, TensorStorageType::TEXTURE_2D, dst_depth * 2, 1,
reinterpret_cast<uint8_t*>(offsets_data.data()));
op.args_.AddObject("offsets", std::make_unique<TensorDescriptor>(desc));
return op;
}
absl::Status TryFusedPointwiseConv(
const GraphFloat32& graph, NodeId first_node_id,
CalculationsPrecision precision,
const std::map<ValueId, TensorDescriptor>& tensor_descriptors,
std::set<NodeId>* consumed_nodes, GPUOperationsSubgraph* gpu_subgraph) {
NodeContext slice_node;
RETURN_IF_ERROR(
IsSliceNode(graph, graph.GetNode(first_node_id), &slice_node));
const auto& first_commom_input = slice_node.inputs[0];
auto slice_consumers = graph.FindConsumers(slice_node.outputs[0]->id);
if (slice_consumers.size() != 1) {
return absl::NotFoundError("FusedPointwiseConv not suitable.");
}
NodeContext mul_node;
RETURN_IF_ERROR(IsMulNode(graph, slice_consumers[0], &mul_node));
const auto& second_commom_input =
mul_node.inputs[0]->id == slice_node.outputs[0]->id ? mul_node.inputs[1]
: mul_node.inputs[0];
auto mul_consumers = graph.FindConsumers(mul_node.outputs[0]->id);
if (mul_consumers.size() != 1) {
return absl::NotFoundError("FusedPointwiseConv not suitable.");
}
NodeContext reduce_node;
bool mean = true;
absl::Status status = IsMeanNode(graph, mul_consumers[0], &reduce_node);
if (!status.ok()) {
RETURN_IF_ERROR(IsReduceSumNode(graph, mul_consumers[0], &reduce_node));
mean = false;
}
auto reduce_consumers = graph.FindConsumers(reduce_node.outputs[0]->id);
if (reduce_consumers.size() != 1) {
return absl::NotFoundError("FusedPointwiseConv not suitable.");
}
NodeContext concat_node;
RETURN_IF_ERROR(IsConcatNode(graph, reduce_consumers[0], &concat_node));
ConvPointwiseAttributes op_attr;
op_attr.mean = mean;
std::set<NodeId> temp_consumed_nodes;
for (const auto& concat_input : concat_node.inputs) {
int offset_x, offset_y;
RETURN_IF_ERROR(GetOffset(graph, concat_input->id, second_commom_input->id,
&offset_x, &offset_y, &temp_consumed_nodes));
op_attr.offsets.push_back(int2(offset_x, offset_y));
}
consumed_nodes->insert(temp_consumed_nodes.begin(),
temp_consumed_nodes.end());
consumed_nodes->insert(concat_node.node->id);
OperationDef op_def;
op_def.precision = precision;
auto it = tensor_descriptors.find(second_commom_input->id);
if (it != tensor_descriptors.end()) {
op_def.src_tensors.push_back(it->second);
}
it = tensor_descriptors.find(first_commom_input->id);
if (it != tensor_descriptors.end()) {
op_def.src_tensors.push_back(it->second);
}
it = tensor_descriptors.find(concat_node.outputs[0]->id);
if (it != tensor_descriptors.end()) {
op_def.dst_tensors.push_back(it->second);
}
std::unique_ptr<GPUOperation>* gpu_op =
InitSingleOpSubgraph({second_commom_input, first_commom_input},
{concat_node.outputs[0]}, gpu_subgraph);
auto operation = CreateConvPointwise(op_def, op_attr);
*gpu_op = std::make_unique<GPUOperation>(std::move(operation));
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/precision.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/testing_util.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, SliceMulMeanConcat) {
TestExecutionEnvironment* env = &exec_env_;
TensorFloat32 src_tensor;
src_tensor.shape = BHWC(1, 2, 1, 2);
src_tensor.data = {3.0f, 4.0f, 5.0f, 6.0f};
TensorFloat32 weights_tensor;
weights_tensor.shape = BHWC(1, 2, 1, 2);
weights_tensor.data = {1.0f, 2.0f, 1.0f, 2.0f};
ConvPointwiseAttributes op_attr;
op_attr.mean = true;
op_attr.offsets.push_back(int2(0, 0));
for (auto precision : env->GetSupportedPrecisions()) {
auto data_type = DeduceDataTypeFromPrecision(precision);
for (auto storage : env->GetSupportedStorages(data_type)) {
const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-2f;
OperationDef op_def;
op_def.precision = precision;
op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
op_def.dst_tensors.push_back({data_type, storage, Layout::HWC});
TensorFloat32 dst_tensor;
GPUOperation operation = CreateConvPointwise(op_def, op_attr);
ASSERT_OK(env->ExecuteGPUOperation(
{src_tensor, weights_tensor},
std::make_unique<GPUOperation>(std::move(operation)),
BHWC(1, 2, 1, 2), &dst_tensor));
ASSERT_OK(PointWiseNear({5.5f, 5.5f, 8.5f, 8.5f}, dst_tensor.data, eps));
}
}
}
TEST_F(OpenCLOperationTest, SliceMulSumConcat) {
TestExecutionEnvironment* env = &exec_env_;
TensorFloat32 src_tensor;
src_tensor.shape = BHWC(1, 2, 1, 2);
src_tensor.data = {3.0f, 4.0f, 5.0f, 6.0f};
TensorFloat32 weights_tensor;
weights_tensor.shape = BHWC(1, 2, 1, 2);
weights_tensor.data = {1.0f, 2.0f, 1.0f, 2.0f};
ConvPointwiseAttributes op_attr;
op_attr.mean = false;
op_attr.offsets.push_back(int2(0, 0));
for (auto precision : env->GetSupportedPrecisions()) {
auto data_type = DeduceDataTypeFromPrecision(precision);
for (auto storage : env->GetSupportedStorages(data_type)) {
const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-2f;
OperationDef op_def;
op_def.precision = precision;
op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
op_def.dst_tensors.push_back({data_type, storage, Layout::HWC});
TensorFloat32 dst_tensor;
GPUOperation operation = CreateConvPointwise(op_def, op_attr);
ASSERT_OK(env->ExecuteGPUOperation(
{src_tensor, weights_tensor},
std::make_unique<GPUOperation>(std::move(operation)),
BHWC(1, 2, 1, 2), &dst_tensor));
ASSERT_OK(
PointWiseNear({11.0f, 11.0f, 17.0f, 17.0f}, dst_tensor.data, eps));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7695cc84-31be-478b-a72f-9f410a73ea54 | cpp | tensorflow/tensorflow | remove_noop | tensorflow/lite/delegates/gpu/common/transformations/remove_noop.cc | tensorflow/lite/delegates/gpu/common/transformations/remove_noop_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/remove_noop.h"
#include <algorithm>
#include <any>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
using ShouldRemoveOperation = std::function<bool(GraphFloat32* graph, Node*)>;
class RemoveOperation : public SequenceTransformation {
public:
explicit RemoveOperation(ShouldRemoveOperation remove_predicate)
: remove_predicate_(std::move(remove_predicate)) {}
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
Node* prev_op_node = sequence.front();
Node* op_node = sequence.back();
if (!remove_predicate_(graph, op_node)) {
return {TransformStatus::SKIPPED, ""};
}
absl::Status status = RemoveFollowingNode(graph, op_node, prev_op_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove a node: " + std::string(status.message())};
}
return {TransformStatus::APPLIED, ""};
}
private:
ShouldRemoveOperation remove_predicate_;
};
}
std::unique_ptr<SequenceTransformation> NewRemoveSingleInputConcat() {
auto type = ToString(OperationType::CONCAT);
return absl::make_unique<RemoveOperation>(
[type](GraphFloat32* graph, Node* node) {
return type == node->operation.type;
});
}
std::unique_ptr<SequenceTransformation> NewRemoveSingleInputAdd() {
auto type = ToString(OperationType::ADD);
return absl::make_unique<RemoveOperation>(
[type](GraphFloat32* graph, Node* node) {
if (node->operation.type != type) {
return false;
}
auto& attr = absl::any_cast<const ElementwiseAttributes&>(
node->operation.attributes);
return !absl::holds_alternative<Tensor<HWC, DataType::FLOAT32>>(
attr.param) &&
!absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
attr.param) &&
!absl::holds_alternative<float>(attr.param);
});
}
std::unique_ptr<SequenceTransformation> NewRemoveDegenerateUpsampling() {
auto type = ToString(OperationType::RESIZE);
return absl::make_unique<RemoveOperation>(
[type](GraphFloat32* graph, Node* node) {
if (node->operation.type != type) {
return false;
}
auto inputs = graph->FindInputs(node->id);
auto outputs = graph->FindOutputs(node->id);
return inputs.size() == 1 && outputs.size() == 1 &&
inputs[0]->tensor.shape == outputs[0]->tensor.shape;
});
}
class RemoveIdentityReshape : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::RESHAPE)) {
return {TransformStatus::SKIPPED, ""};
}
auto input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
const auto& reshape_attr =
absl::any_cast<const ReshapeAttributes&>(node->operation.attributes);
if (input_shape != reshape_attr.new_shape) {
return {TransformStatus::SKIPPED, ""};
}
auto output = graph->FindOutputs(node->id)[0];
const auto& graph_outputs = graph->outputs();
if (std::find(graph_outputs.begin(), graph_outputs.end(), output) !=
graph_outputs.end()) {
return {TransformStatus::SKIPPED,
"Can not apply transformation when node output is graph output"};
}
absl::Status status = RemoveSimpleNodeKeepInput(graph, node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove a node: " + std::string(status.message())};
}
return {TransformStatus::APPLIED,
"Removed reshape with input_shape == output_shape."};
}
};
std::unique_ptr<NodeTransformation> NewRemoveIdentityReshape() {
return absl::make_unique<RemoveIdentityReshape>();
}
class RemoveIdentityStridedSlice : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::SLICE)) {
return {TransformStatus::SKIPPED, ""};
}
auto input = graph->FindInputs(node->id)[0];
auto output = graph->FindOutputs(node->id)[0];
const auto& slice_attr =
absl::any_cast<const SliceAttributes&>(node->operation.attributes);
if (input->tensor.shape != output->tensor.shape) {
return {TransformStatus::SKIPPED, ""};
}
if (slice_attr.starts != BHWC(0, 0, 0, 0)) {
return {TransformStatus::SKIPPED, ""};
}
if (slice_attr.strides != BHWC(1, 1, 1, 1)) {
return {TransformStatus::SKIPPED, ""};
}
if (slice_attr.ends != output->tensor.shape) {
return {TransformStatus::SKIPPED, ""};
}
const auto& graph_outputs = graph->outputs();
const auto& graph_inputs = graph->inputs();
const bool input_is_graph_input =
std::find(graph_inputs.begin(), graph_inputs.end(), input) !=
graph_inputs.end();
const bool output_is_graph_output =
std::find(graph_outputs.begin(), graph_outputs.end(), output) !=
graph_outputs.end();
if (input_is_graph_input && output_is_graph_output) {
return {TransformStatus::SKIPPED,
"Can not apply transformation when node input is graph input and "
"node output is graph output"};
}
if (output_is_graph_output) {
if (graph->FindConsumers(input->id).size() != 1) {
return {TransformStatus::SKIPPED,
"Can not apply transformation when node output is graph output "
"and input consumed by other nodes."};
}
absl::Status status = RemoveSimpleNodeKeepOutput(graph, node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove a node: " + std::string(status.message())};
}
return {TransformStatus::APPLIED, "Removed identity strided slice."};
}
absl::Status status = RemoveSimpleNodeKeepInput(graph, node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove a node: " + std::string(status.message())};
}
return {TransformStatus::APPLIED, "Removed identity strided slice."};
}
};
std::unique_ptr<NodeTransformation> NewRemoveIdentityStridedSlice() {
return absl::make_unique<RemoveIdentityStridedSlice>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/remove_noop.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::UnorderedElementsAre;
TEST(RemoveSingleInputAdd, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = ElementwiseAttributes();
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
ASSERT_EQ(first_node, graph.nodes()[0]);
ASSERT_EQ(input, graph.values()[0]);
ASSERT_EQ(output, graph.values()[1]);
}
TEST(RemoveSingleInputAdd, DoNotTrigger_TensorHWC) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
ElementwiseAttributes attr;
attr.param = Tensor<HWC, DataType::FLOAT32>();
add_node->operation.attributes = attr;
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
}
TEST(RemoveSingleInputAdd, DoNotTrigger_LinearTensor) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
ElementwiseAttributes attr;
attr.param = Tensor<Linear, DataType::FLOAT32>();
add_node->operation.attributes = attr;
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
}
TEST(RemoveSingleInputAdd, DoNotTrigger_Scalar) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
ElementwiseAttributes attr;
attr.param = 0.5f;
add_node->operation.attributes = attr;
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
}
TEST(RemoveSingleInputAdd, DoNotTrigger_Multiple) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto node_a = graph.NewNode();
auto node_b = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(node_a->id, input->id).ok());
ASSERT_TRUE(graph.AddConsumer(node_b->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
Value* temp_a = nullptr;
Value* temp_b = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, node_a, add_node, &temp_a).ok());
ASSERT_TRUE(ConnectTwoNodes(&graph, node_b, add_node, &temp_b).ok());
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
}
TEST(RemoveDegenerateUpsampling, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto node_to_remove = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, node_to_remove, &output).ok());
output->tensor.shape = BHWC(1, 5, 5, 1);
node_to_remove->operation.type = ToString(OperationType::RESIZE);
Resize2DAttributes attr;
attr.new_shape = HW(5, 5);
attr.type = SamplingType::BILINEAR;
node_to_remove->operation.attributes = attr;
Value* link = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, node_to_remove, &link).ok());
link->tensor.shape = output->tensor.shape;
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveDegenerateUpsampling();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
EXPECT_EQ(first_node, graph.nodes()[0]);
EXPECT_EQ(input, graph.values()[0]);
EXPECT_EQ(output, graph.values()[1]);
}
TEST(RemoveIdentityReshape, Smoke) {
GraphFloat32 graph;
Node* simple_node = graph.NewNode();
Node* producer_node = graph.NewNode();
Node* consumer_node = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* value0 = graph.NewValue();
Value* value1 = graph.NewValue();
value0->tensor.shape = BHWC(1, 1, 1, 11);
simple_node->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes attr;
attr.new_shape = BHWC(1, 1, 1, 11);
simple_node->operation.attributes = attr;
ASSERT_TRUE(graph.AddConsumer(producer_node->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(producer_node->id, value0->id).ok());
ASSERT_TRUE(graph.AddConsumer(simple_node->id, value0->id).ok());
ASSERT_TRUE(graph.SetProducer(simple_node->id, value1->id).ok());
ASSERT_TRUE(graph.AddConsumer(consumer_node->id, value1->id).ok());
ASSERT_TRUE(graph.SetProducer(consumer_node->id, graph_output->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(simple_node, producer_node, consumer_node));
auto transformation = NewRemoveIdentityReshape();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(producer_node, consumer_node));
EXPECT_THAT(graph.values(),
UnorderedElementsAre(graph_input, graph_output, value0));
}
TEST(RemoveIdentityStridedSlice, Smoke) {
GraphFloat32 graph;
Node* simple_node = graph.NewNode();
Node* producer_node = graph.NewNode();
Node* consumer_node = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* value0 = graph.NewValue();
Value* value1 = graph.NewValue();
value0->tensor.shape = BHWC(1, 1, 1, 11);
value1->tensor.shape = BHWC(1, 1, 1, 11);
simple_node->operation.type = ToString(OperationType::SLICE);
SliceAttributes attr;
attr.starts = BHWC(0, 0, 0, 0);
attr.strides = BHWC(1, 1, 1, 1);
attr.ends = BHWC(1, 1, 1, 11);
simple_node->operation.attributes = attr;
ASSERT_TRUE(graph.AddConsumer(producer_node->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(producer_node->id, value0->id).ok());
ASSERT_TRUE(graph.AddConsumer(simple_node->id, value0->id).ok());
ASSERT_TRUE(graph.SetProducer(simple_node->id, value1->id).ok());
ASSERT_TRUE(graph.AddConsumer(consumer_node->id, value1->id).ok());
ASSERT_TRUE(graph.SetProducer(consumer_node->id, graph_output->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(simple_node, producer_node, consumer_node));
auto transformation = NewRemoveIdentityStridedSlice();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(producer_node, consumer_node));
EXPECT_THAT(graph.values(),
UnorderedElementsAre(graph_input, graph_output, value0));
}
TEST(RemoveIdentityStridedSlice, OutputIsGraphOutputInputConsumedByFewNodes) {
GraphFloat32 graph;
Node* first_node = graph.NewNode();
Node* slice_node = graph.NewNode();
Node* second_node = graph.NewNode();
Value* value0 = graph.NewValue();
Value* value1 = graph.NewValue();
Value* value2 = graph.NewValue();
Value* value3 = graph.NewValue();
value0->tensor.shape = BHWC(1, 1, 1, 11);
value1->tensor.shape = BHWC(1, 1, 1, 11);
value2->tensor.shape = BHWC(1, 1, 1, 11);
value3->tensor.shape = BHWC(1, 1, 1, 11);
slice_node->operation.type = ToString(OperationType::SLICE);
SliceAttributes attr;
attr.starts = BHWC(0, 0, 0, 0);
attr.strides = BHWC(1, 1, 1, 1);
attr.ends = BHWC(1, 1, 1, 11);
slice_node->operation.attributes = attr;
ASSERT_TRUE(graph.AddConsumer(first_node->id, value0->id).ok());
ASSERT_TRUE(graph.SetProducer(first_node->id, value1->id).ok());
ASSERT_TRUE(graph.AddConsumer(slice_node->id, value1->id).ok());
ASSERT_TRUE(graph.AddConsumer(second_node->id, value1->id).ok());
ASSERT_TRUE(graph.SetProducer(slice_node->id, value2->id).ok());
ASSERT_TRUE(graph.SetProducer(second_node->id, value3->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(value0));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(value2, value3));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(first_node, slice_node, second_node));
auto transformation = NewRemoveIdentityStridedSlice();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(value0));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(value2, value3));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(first_node, slice_node, second_node));
EXPECT_THAT(graph.values(),
UnorderedElementsAre(value0, value1, value2, value3));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/remove_noop.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/remove_noop_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8caf7b1e-1994-4627-83bd-4a5595f8f5cf | cpp | tensorflow/tensorflow | global_pooling_to_reduce_op | tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.cc | tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
bool IsGlobalPooling(const Pooling2DAttributes& attr, const BHWC& src_shape,
const BHWC& dst_shape) {
return dst_shape.w == 1 && dst_shape.h == 1 && attr.kernel.w == src_shape.w &&
attr.kernel.h == src_shape.h && attr.padding.appended.w == 0 &&
attr.padding.appended.h == 0 && attr.padding.prepended.w == 0 &&
attr.padding.prepended.h == 0;
}
bool IsGlobalAveragePooling(const Pooling2DAttributes& attr,
const BHWC& src_shape, const BHWC& dst_shape) {
return attr.type == tflite::gpu::PoolingType::AVERAGE &&
attr.output_indices == false &&
IsGlobalPooling(attr, src_shape, dst_shape);
}
class GlobalPoolingToReduceOp : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::POOLING_2D)) {
return {TransformStatus::SKIPPED, ""};
}
auto inputs = graph->FindInputs(node->id);
auto outputs = graph->FindOutputs(node->id);
const auto& pool_attr =
std::any_cast<const Pooling2DAttributes&>(node->operation.attributes);
if (!IsGlobalAveragePooling(pool_attr, inputs[0]->tensor.shape,
outputs[0]->tensor.shape)) {
return {TransformStatus::SKIPPED, ""};
}
MeanAttributes mean_attr;
mean_attr.dims = {Axis::WIDTH, Axis::HEIGHT};
node->operation.attributes = mean_attr;
node->operation.type = ToString(OperationType::MEAN);
return {TransformStatus::APPLIED,
"Replaced global average pooling with mean."};
}
};
}
std::unique_ptr<NodeTransformation> NewGlobalPoolingToReduceOp() {
return std::make_unique<GlobalPoolingToReduceOp>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MakeMeanFromGlobalAveragePooling, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Pooling2DAttributes attr;
attr.padding.prepended = tflite::gpu::HW(0, 0);
attr.padding.appended = tflite::gpu::HW(0, 0);
attr.strides = tflite::gpu::HW(4, 4);
attr.kernel = tflite::gpu::HW(4, 4);
attr.type = tflite::gpu::PoolingType::AVERAGE;
attr.output_indices = false;
auto pool_node = graph.NewNode();
pool_node->operation.type = ToString(OperationType::POOLING_2D);
pool_node->operation.attributes = attr;
ASSERT_TRUE(graph.AddConsumer(pool_node->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, pool_node, &output).ok());
output->tensor.shape = BHWC(1, 1, 1, 8);
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
auto transformation = NewGlobalPoolingToReduceOp();
ModelTransformer transformer(&graph);
transformer.Apply("global_average_pooling_to_mean", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
ASSERT_EQ(ToString(OperationType::MEAN), graph.nodes()[0]->operation.type);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ccf41526-44d8-4845-b376-164154cfd8fb | cpp | tensorflow/tensorflow | fuse_mul_to_conv | tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.cc | tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.h"
#include <any>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/types/any.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
class MergeConvolutionWithMul : public SequenceTransformation {
public:
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
auto& conv_node = *sequence[0];
if (graph->FindInputs(conv_node.id).size() != 1) {
return {TransformStatus::DECLINED,
"This fusion is only applicable to ops with one runtime input."};
}
auto& mul_node = *sequence[1];
if (mul_node.operation.type != ToString(OperationType::MUL) ||
!mul_node.operation.attributes.has_value()) {
return {TransformStatus::SKIPPED, ""};
}
ElementwiseAttributes mul_attr =
std::any_cast<ElementwiseAttributes>(mul_node.operation.attributes);
if (!std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
mul_attr.param) &&
!std::holds_alternative<float>(mul_attr.param)) {
return {
TransformStatus::DECLINED,
"This fuse applicable only for broadcast or scalar multiplication."};
}
if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) {
Convolution2DAttributes* conv_attr =
std::any_cast<Convolution2DAttributes>(
&conv_node.operation.attributes);
FuseConvolution2DWithMultiply(mul_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::CONVOLUTION_TRANSPOSED)) {
ConvolutionTransposedAttributes* conv_attr =
std::any_cast<ConvolutionTransposedAttributes>(
&conv_node.operation.attributes);
FuseConvolutionTransposedWithMultiply(mul_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::DEPTHWISE_CONVOLUTION)) {
DepthwiseConvolution2DAttributes* conv_attr =
std::any_cast<DepthwiseConvolution2DAttributes>(
&conv_node.operation.attributes);
FuseDepthwiseConvolution2DWithMultiply(mul_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::FULLY_CONNECTED)) {
FullyConnectedAttributes* conv_attr =
std::any_cast<FullyConnectedAttributes>(
&conv_node.operation.attributes);
FuseFullyConnectedWithMultiply(mul_attr, conv_attr);
} else {
return {TransformStatus::SKIPPED, ""};
}
absl::Status status = RemoveFollowingNode(graph, &mul_node, &conv_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove mul node after convolution: " +
std::string(status.message())};
}
return {TransformStatus::APPLIED, ""};
}
};
class MergeMulWithConvolution : public SequenceTransformation {
public:
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
auto& conv_node = *sequence[1];
if (graph->FindInputs(conv_node.id).size() != 1) {
return {TransformStatus::DECLINED,
"This fusion is only applicable to ops with one runtime input."};
}
auto& mul_node = *sequence[0];
if (mul_node.operation.type != ToString(OperationType::MUL) ||
!mul_node.operation.attributes.has_value()) {
return {TransformStatus::SKIPPED, ""};
}
ElementwiseAttributes mul_attr =
std::any_cast<ElementwiseAttributes>(mul_node.operation.attributes);
if (!std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
mul_attr.param) &&
!std::holds_alternative<float>(mul_attr.param)) {
return {
TransformStatus::DECLINED,
"This fuse applicable only for broadcast or scalar multiplication."};
}
if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) {
Convolution2DAttributes* conv_attr =
std::any_cast<Convolution2DAttributes>(
&conv_node.operation.attributes);
FuseMultiplyWithConvolution2D(mul_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::CONVOLUTION_TRANSPOSED)) {
ConvolutionTransposedAttributes* conv_attr =
std::any_cast<ConvolutionTransposedAttributes>(
&conv_node.operation.attributes);
FuseMultiplyWithConvolutionTransposed(mul_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::DEPTHWISE_CONVOLUTION)) {
DepthwiseConvolution2DAttributes* conv_attr =
std::any_cast<DepthwiseConvolution2DAttributes>(
&conv_node.operation.attributes);
FuseMultiplyWithDepthwiseConvolution2D(mul_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::FULLY_CONNECTED)) {
FullyConnectedAttributes* conv_attr =
std::any_cast<FullyConnectedAttributes>(
&conv_node.operation.attributes);
FuseMultiplyWithFullyConnected(mul_attr, conv_attr);
} else {
return {TransformStatus::SKIPPED, ""};
}
absl::Status status = RemovePrecedingNode(graph, &mul_node, &conv_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove mul node after convolution: " +
std::string(status.message())};
}
return {TransformStatus::APPLIED, ""};
}
};
}
std::unique_ptr<SequenceTransformation> NewMergeConvolutionWithMul() {
return std::make_unique<MergeConvolutionWithMul>();
}
std::unique_ptr<SequenceTransformation> NewMergeMulWithConvolution() {
return std::make_unique<MergeMulWithConvolution>();
}
void FuseConvolution2DWithMultiply(const ElementwiseAttributes& mul_attr,
Convolution2DAttributes* attr) {
auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param);
auto mul_scalar = std::get_if<float>(&mul_attr.param);
for (int d = 0; d < attr->weights.shape.o; ++d) {
const float multiplier = mul ? mul->data[d] : *mul_scalar;
for (int s = 0; s < attr->weights.shape.i; ++s) {
for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) {
for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) {
const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}});
attr->weights.data[index] *= multiplier;
}
}
}
if (!attr->bias.data.empty()) {
attr->bias.data[d] *= multiplier;
}
}
}
void FuseDepthwiseConvolution2DWithMultiply(
const ElementwiseAttributes& mul_attr,
DepthwiseConvolution2DAttributes* attr) {
auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param);
auto mul_scalar = std::get_if<float>(&mul_attr.param);
for (int g = 0; g < attr->weights.shape.o; ++g) {
for (int s = 0; s < attr->weights.shape.i; ++s) {
const int d = s * attr->weights.shape.o + g;
const float multiplier = mul ? mul->data[d] : *mul_scalar;
for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) {
for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) {
const int index = attr->weights.shape.LinearIndex({{g, k_y, k_x, s}});
attr->weights.data[index] *= multiplier;
}
}
if (!attr->bias.data.empty()) {
attr->bias.data[d] *= multiplier;
}
}
}
}
void FuseConvolutionTransposedWithMultiply(
const ElementwiseAttributes& mul_attr,
ConvolutionTransposedAttributes* attr) {
auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param);
auto mul_scalar = std::get_if<float>(&mul_attr.param);
for (int d = 0; d < attr->weights.shape.o; ++d) {
const float multiplier = mul ? mul->data[d] : *mul_scalar;
for (int s = 0; s < attr->weights.shape.i; ++s) {
for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) {
for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) {
const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}});
attr->weights.data[index] *= multiplier;
}
}
}
if (!attr->bias.data.empty()) {
attr->bias.data[d] *= multiplier;
}
}
}
void FuseFullyConnectedWithMultiply(const ElementwiseAttributes& mul_attr,
FullyConnectedAttributes* attr) {
auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param);
auto mul_scalar = std::get_if<float>(&mul_attr.param);
for (int d = 0; d < attr->weights.shape.o; ++d) {
const float multiplier = mul ? mul->data[d] : *mul_scalar;
for (int s = 0; s < attr->weights.shape.i; ++s) {
const int index = attr->weights.shape.LinearIndex({{d, 0, 0, s}});
attr->weights.data[index] *= multiplier;
}
if (!attr->bias.data.empty()) {
attr->bias.data[d] *= multiplier;
}
}
}
void FuseMultiplyWithConvolution2D(const ElementwiseAttributes& mul_attr,
Convolution2DAttributes* attr) {
auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param);
auto mul_scalar = std::get_if<float>(&mul_attr.param);
for (int s = 0; s < attr->weights.shape.i; ++s) {
const float multiplier = mul ? mul->data[s] : *mul_scalar;
for (int d = 0; d < attr->weights.shape.o; ++d) {
for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) {
for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) {
const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}});
attr->weights.data[index] *= multiplier;
}
}
}
}
}
void FuseMultiplyWithDepthwiseConvolution2D(
const ElementwiseAttributes& mul_attr,
DepthwiseConvolution2DAttributes* attr) {
auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param);
auto mul_scalar = std::get_if<float>(&mul_attr.param);
for (int s = 0; s < attr->weights.shape.i; ++s) {
const float multiplier = mul ? mul->data[s] : *mul_scalar;
for (int g = 0; g < attr->weights.shape.o; ++g) {
for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) {
for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) {
const int index = attr->weights.shape.LinearIndex({{g, k_y, k_x, s}});
attr->weights.data[index] *= multiplier;
}
}
}
}
}
void FuseMultiplyWithConvolutionTransposed(
const ElementwiseAttributes& mul_attr,
ConvolutionTransposedAttributes* attr) {
auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param);
auto mul_scalar = std::get_if<float>(&mul_attr.param);
for (int s = 0; s < attr->weights.shape.i; ++s) {
const float multiplier = mul ? mul->data[s] : *mul_scalar;
for (int d = 0; d < attr->weights.shape.o; ++d) {
for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) {
for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) {
const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}});
attr->weights.data[index] *= multiplier;
}
}
}
}
}
void FuseMultiplyWithFullyConnected(const ElementwiseAttributes& mul_attr,
FullyConnectedAttributes* attr) {
auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param);
auto mul_scalar = std::get_if<float>(&mul_attr.param);
for (int s = 0; s < attr->weights.shape.i; ++s) {
const float multiplier = mul ? mul->data[s] : *mul_scalar;
for (int d = 0; d < attr->weights.shape.o; ++d) {
const int index = attr->weights.shape.LinearIndex({{d, 0, 0, s}});
attr->weights.data[index] *= multiplier;
}
}
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace {
TEST(MergeConvolutionWithMulTest, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Convolution2DAttributes conv_attr;
conv_attr.padding.prepended = HW(0, 0);
conv_attr.padding.appended = HW(0, 0);
conv_attr.strides = HW(1, 1);
conv_attr.dilations = HW(1, 1);
conv_attr.weights.shape = OHWI(16, 3, 2, 8);
conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct());
conv_attr.bias.shape = Linear(16);
conv_attr.bias.data.resize(16);
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(16);
mul_tensor.data.resize(16);
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
auto conv_node = graph.NewNode();
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv_node->operation.attributes = conv_attr;
auto mul_node = graph.NewNode();
mul_node->operation.type = ToString(OperationType::MUL);
mul_node->operation.attributes = mul_attr;
ASSERT_TRUE(graph.AddConsumer(conv_node->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, mul_node, &output).ok());
output->tensor.shape = BHWC(1, 4, 4, 16);
Value* link1 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, conv_node, mul_node, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 16);
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewMergeConvolutionWithMul();
ModelTransformer transformer(&graph);
transformer.Apply("merge_convolution_with_mul", transformation.get());
EXPECT_EQ(1, graph.nodes().size());
EXPECT_EQ(2, graph.values().size());
EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
}
TEST(MergeMulWithConvolutionTest, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(8);
mul_tensor.data.resize(8);
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
Convolution2DAttributes conv_attr;
conv_attr.padding.prepended = HW(0, 0);
conv_attr.padding.appended = HW(0, 0);
conv_attr.strides = HW(1, 1);
conv_attr.dilations = HW(1, 1);
conv_attr.weights.shape = OHWI(16, 3, 2, 8);
conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct());
conv_attr.bias.shape = Linear(16);
conv_attr.bias.data.resize(16);
auto conv_node = graph.NewNode();
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv_node->operation.attributes = conv_attr;
auto mul_node = graph.NewNode();
mul_node->operation.type = ToString(OperationType::MUL);
mul_node->operation.attributes = mul_attr;
ASSERT_TRUE(graph.AddConsumer(mul_node->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, conv_node, &output).ok());
output->tensor.shape = BHWC(1, 4, 4, 16);
Value* link1 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, mul_node, conv_node, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 16);
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewMergeMulWithConvolution();
ModelTransformer transformer(&graph);
transformer.Apply("merge_mul_with_convolution", transformation.get());
EXPECT_EQ(1, graph.nodes().size());
EXPECT_EQ(2, graph.values().size());
EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
}
TEST(FuseMulAfterConvolution2DTest, Smoke) {
Convolution2DAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.5f, 2.5f};
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(2);
mul_tensor.data = {0.5f, 2.0f};
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
FuseConvolution2DWithMultiply(mul_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.05f, 0.1f, 0.15f, 0.2f, 1.0f, 1.2f, 1.4f, 1.6f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f}));
}
TEST(FuseMulAfterDepthwiseConvolution2DTest, Smoke) {
DepthwiseConvolution2DAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(4);
attr.bias.data = {1.5f, 2.5f, 1.0f, 2.0f};
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(4);
mul_tensor.data = {0.5f, 2.0f, 4.0f, 0.25f};
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
FuseDepthwiseConvolution2DWithMultiply(mul_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.05f, 0.8f, 0.15f, 1.6f, 1.0f, 0.15f, 1.4f, 0.2f}));
EXPECT_THAT(attr.bias.data,
Pointwise(FloatNear(1e-6), {0.75f, 5.0f, 4.0f, 0.5f}));
}
TEST(FuseMulAfterConvolutionTransposedTest, Smoke) {
ConvolutionTransposedAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.5f, 2.5f};
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(2);
mul_tensor.data = {0.5f, 2.0f};
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
FuseConvolutionTransposedWithMultiply(mul_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.05f, 0.1f, 0.15f, 0.2f, 1.0f, 1.2f, 1.4f, 1.6f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f}));
}
TEST(FuseMulAfterFullyConnectedTest, Smoke) {
FullyConnectedAttributes attr;
attr.weights.shape = OHWI(2, 1, 1, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.5f, 2.5f};
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(2);
mul_tensor.data = {0.5f, 2.0f};
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
FuseFullyConnectedWithMultiply(mul_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6), {0.05f, 0.1f, 0.6f, 0.8f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f}));
}
TEST(FuseMulBeforeConvolution2DTest, Smoke) {
Convolution2DAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.5f, 2.5f};
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(2);
mul_tensor.data = {0.5f, 2.0f};
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
FuseMultiplyWithConvolution2D(mul_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.05f, 0.4f, 0.15f, 0.8f, 0.25f, 1.2f, 0.35f, 1.6f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f}));
}
TEST(FuseMulBeforeDepthwiseConvolution2DTest, Smoke) {
DepthwiseConvolution2DAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(4);
attr.bias.data = {1.5f, 2.5f, 1.0f, 2.0f};
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(4);
mul_tensor.data = {0.5f, 2.0f, 4.0f, 0.25f};
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
FuseMultiplyWithDepthwiseConvolution2D(mul_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.05f, 0.4f, 0.15f, 0.8f, 0.25f, 1.2f, 0.35f, 1.6f}));
EXPECT_THAT(attr.bias.data,
Pointwise(FloatNear(1e-6), {1.5f, 2.5f, 1.0f, 2.0f}));
}
TEST(FuseMulBeforeConvolutionTransposedTest, Smoke) {
ConvolutionTransposedAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.5f, 2.5f};
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(2);
mul_tensor.data = {0.5f, 2.0f};
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
FuseMultiplyWithConvolutionTransposed(mul_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.05f, 0.4f, 0.15f, 0.8f, 0.25f, 1.2f, 0.35f, 1.6f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f}));
}
TEST(FuseMulBeforeFullyConnectedTest, Smoke) {
FullyConnectedAttributes attr;
attr.weights.shape = OHWI(2, 1, 1, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.5f, 2.5f};
Tensor<Linear, DataType::FLOAT32> mul_tensor;
mul_tensor.shape = Linear(2);
mul_tensor.data = {0.5f, 2.0f};
ElementwiseAttributes mul_attr;
mul_attr.param = mul_tensor;
FuseMultiplyWithFullyConnected(mul_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6), {0.05f, 0.4f, 0.15f, 0.8f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cee23eed-0600-448d-a489-0e80e9774145 | cpp | tensorflow/tensorflow | make_fully_connected | tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.cc | tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
bool IsConvEquivalentToFullyConnected(const Convolution2DAttributes& attr) {
return attr.weights.shape.w == 1 &&
attr.weights.shape.h == 1 &&
attr.strides == HW(1, 1) &&
attr.dilations == HW(1, 1) &&
attr.padding.prepended == HW(0, 0) &&
attr.padding.appended == HW(0, 0);
}
class MakeFullyConnectedFromConvolution : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::CONVOLUTION_2D)) {
return {TransformStatus::SKIPPED, ""};
}
auto inputs = graph->FindInputs(node->id);
if (inputs.size() != 1) {
return {TransformStatus::SKIPPED, ""};
}
const auto& input_shape = inputs[0]->tensor.shape;
if (input_shape.w != 1 || input_shape.h != 1) {
return {TransformStatus::SKIPPED, ""};
}
const auto& conv_attr = std::any_cast<const Convolution2DAttributes&>(
node->operation.attributes);
if (!IsConvEquivalentToFullyConnected(conv_attr)) {
return {TransformStatus::SKIPPED, ""};
}
FullyConnectedAttributes fc_attr;
fc_attr.weights = conv_attr.weights;
fc_attr.bias = conv_attr.bias;
node->operation.attributes = fc_attr;
node->operation.type = ToString(OperationType::FULLY_CONNECTED);
return {TransformStatus::APPLIED,
"Replaced convolution with fully connected."};
}
};
}
std::unique_ptr<NodeTransformation> NewMakeFullyConnectedFromConvolution() {
return std::make_unique<MakeFullyConnectedFromConvolution>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MakeFullyConnected, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Convolution2DAttributes attr0;
attr0.padding.prepended = HW(0, 0);
attr0.padding.appended = HW(0, 0);
attr0.strides = HW(1, 1);
attr0.dilations = HW(1, 1);
attr0.weights.shape = OHWI(16, 1, 1, 8);
attr0.bias.shape = Linear(16);
Convolution2DAttributes attr1;
attr1.padding.prepended = HW(0, 0);
attr1.padding.appended = HW(0, 0);
attr1.strides = HW(4, 4);
attr1.dilations = HW(1, 1);
attr1.weights.shape = OHWI(16, 4, 4, 16);
attr1.bias.shape = Linear(16);
Convolution2DAttributes attr2;
attr2.padding.prepended = HW(0, 0);
attr2.padding.appended = HW(0, 0);
attr2.strides = HW(1, 1);
attr2.dilations = HW(1, 1);
attr2.weights.shape = OHWI(32, 1, 1, 16);
attr2.bias.shape = Linear(32);
auto conv1x1_node0 = graph.NewNode();
conv1x1_node0->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv1x1_node0->operation.attributes = attr0;
auto conv4x4_node1 = graph.NewNode();
conv4x4_node1->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv4x4_node1->operation.attributes = attr1;
auto conv1x1_node2 = graph.NewNode();
conv1x1_node2->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv1x1_node2->operation.attributes = attr2;
ASSERT_TRUE(graph.AddConsumer(conv1x1_node0->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, conv1x1_node2, &output).ok());
output->tensor.shape = BHWC(1, 1, 1, 32);
Value* link1 = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, conv1x1_node0, conv4x4_node1, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 16);
Value* link2 = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, conv4x4_node1, conv1x1_node2, &link2).ok());
link2->tensor.shape = BHWC(1, 1, 1, 16);
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewMakeFullyConnectedFromConvolution();
ModelTransformer transformer(&graph);
transformer.Apply("make_fully_connected", transformation.get());
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[1]->operation.type);
ASSERT_EQ(ToString(OperationType::FULLY_CONNECTED),
graph.nodes()[2]->operation.type);
auto fc_attr = std::any_cast<FullyConnectedAttributes>(
graph.nodes()[2]->operation.attributes);
EXPECT_EQ(OHWI(32, 1, 1, 16), fc_attr.weights.shape);
EXPECT_EQ(Linear(32), fc_attr.bias.shape);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95ca6da9-8737-4ac5-baea-d3ecf04420f1 | cpp | tensorflow/tensorflow | merge_padding_with | tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.cc | tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h"
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/types/any.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/matching.h"
namespace tflite {
namespace gpu {
namespace {
template <typename Attr>
class MergePaddingWith2DOperation : public SequenceTransformation {
public:
explicit MergePaddingWith2DOperation(OperationType operation_type)
: operations_to_match_(
{ToString(OperationType::PAD), ToString(operation_type)}) {}
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
if (!MatchesByOperationType(sequence, operations_to_match_)) {
return {TransformStatus::SKIPPED, ""};
}
Node* pad_node = sequence.front();
Node* op_node = sequence.back();
PadAttributes pad_attr =
absl::any_cast<PadAttributes>(pad_node->operation.attributes);
if (pad_attr.type != PaddingContentType::ZEROS) {
return {TransformStatus::DECLINED, "Only Zero padding is supported."};
}
if (pad_attr.appended.c != 0 || pad_attr.prepended.c != 0 ||
pad_attr.appended.b != 0 || pad_attr.prepended.b != 0) {
return {TransformStatus::DECLINED,
"Pad has non-zero padding on non HW axis."};
}
Attr* node_attr = absl::any_cast<Attr>(&op_node->operation.attributes);
absl::Status status = RemovePrecedingNode(graph, pad_node, op_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove Pad node with Operation node: " +
std::string(status.message())};
}
node_attr->padding.appended.h += pad_attr.appended.h;
node_attr->padding.appended.w += pad_attr.appended.w;
node_attr->padding.prepended.h += pad_attr.prepended.h;
node_attr->padding.prepended.w += pad_attr.prepended.w;
return {
TransformStatus::APPLIED,
absl::StrCat("Added padding: prepended = {h = ", pad_attr.prepended.h,
", w = ", pad_attr.prepended.w, "}, appended = { h = ",
pad_attr.appended.h, ", w = ", pad_attr.appended.w, "}")};
}
private:
const std::vector<std::string> operations_to_match_;
};
}
std::unique_ptr<SequenceTransformation> NewMergePaddingWithPooling() {
return absl::make_unique<MergePaddingWith2DOperation<Pooling2DAttributes>>(
OperationType::POOLING_2D);
}
std::unique_ptr<SequenceTransformation> NewMergePaddingWithConvolution2D() {
return absl::make_unique<
MergePaddingWith2DOperation<Convolution2DAttributes>>(
OperationType::CONVOLUTION_2D);
}
std::unique_ptr<SequenceTransformation>
NewMergePaddingWithDepthwiseConvolution() {
return absl::make_unique<
MergePaddingWith2DOperation<DepthwiseConvolution2DAttributes>>(
OperationType::DEPTHWISE_CONVOLUTION);
}
class MergePaddingWithAddOperation : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::PAD)) {
return {TransformStatus::SKIPPED, ""};
}
auto inputs = graph->FindInputs(node->id);
if (inputs.size() != 1) {
return {TransformStatus::SKIPPED, ""};
}
const auto& input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
if (input_shape.c % 4 != 0) {
return {TransformStatus::DECLINED,
"Pad with input where src_channels % 4 != 0"};
}
PadAttributes pad_attr =
absl::any_cast<PadAttributes>(node->operation.attributes);
if (pad_attr.type != PaddingContentType::ZEROS) {
return {TransformStatus::DECLINED, "Only Zero padding is supported."};
}
if (pad_attr.prepended != BHWC(0, 0, 0, 0) || pad_attr.appended.h != 0 ||
pad_attr.appended.w != 0 || pad_attr.appended.b != 0) {
return {TransformStatus::DECLINED,
"Pad has padding not only in appended channels axis."};
}
auto pad_output = graph->FindOutputs(node->id)[0];
auto consumer_nodes = graph->FindConsumers(pad_output->id);
if (consumer_nodes.size() != 1) {
return {TransformStatus::SKIPPED, ""};
}
auto add_node = consumer_nodes[0];
auto consumer_type = OperationTypeFromString(add_node->operation.type);
if (consumer_type != OperationType::ADD) {
return {TransformStatus::SKIPPED, ""};
}
ElementwiseAttributes add_attr =
absl::any_cast<ElementwiseAttributes>(add_node->operation.attributes);
const bool is_add_hwc =
absl::holds_alternative<Tensor<HWC, DataType::FLOAT32>>(add_attr.param);
const bool is_add_linear =
absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
add_attr.param);
const bool is_add_scalar = absl::holds_alternative<float>(add_attr.param);
if (is_add_hwc || is_add_linear || is_add_scalar) {
return {TransformStatus::SKIPPED,
"Cannot remove padding when ADD has constant argument."};
}
absl::Status status = RemovePrecedingNode(graph, node, add_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove Pad node " + std::string(status.message())};
}
return {TransformStatus::APPLIED,
"Removed padding with zeroes in appended channels dimension"};
}
};
std::unique_ptr<NodeTransformation> NewMergePaddingWithAdd() {
return absl::make_unique<MergePaddingWithAddOperation>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MergePaddingWith, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto pad_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(pad_node->id, input->id).ok());
pad_node->operation.type = ToString(OperationType::PAD);
PadAttributes attr;
attr.prepended = BHWC(0, 1, 1, 0);
attr.appended = BHWC(0, 2, 2, 0);
pad_node->operation.attributes = attr;
auto conv_node = graph.NewNode();
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, pad_node, conv_node, &temp).ok());
ASSERT_TRUE(AddOutput(&graph, conv_node, &temp).ok());
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
Convolution2DAttributes conv_attr;
conv_attr.padding.appended = HW(0, 0);
conv_attr.padding.prepended = HW(0, 0);
conv_node->operation.attributes = conv_attr;
ASSERT_EQ(2, graph.nodes().size());
auto transformation = NewMergePaddingWithConvolution2D();
ModelTransformer transformer(&graph);
transformer.Apply("merge_padding", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
ASSERT_EQ(conv_node, graph.nodes()[0]);
conv_attr =
absl::any_cast<Convolution2DAttributes>(conv_node->operation.attributes);
EXPECT_EQ(HW(1, 1), conv_attr.padding.prepended);
EXPECT_EQ(HW(2, 2), conv_attr.padding.appended);
}
TEST(MergePaddingWith, MergeTwo) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto pad_node1 = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(pad_node1->id, input->id).ok());
pad_node1->operation.type = ToString(OperationType::PAD);
PadAttributes attr;
attr.prepended = BHWC(0, 1, 1, 0);
attr.appended = BHWC(0, 0, 0, 0);
pad_node1->operation.attributes = attr;
auto pad_node2 = graph.NewNode();
Value* temp1 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, pad_node1, pad_node2, &temp1).ok());
pad_node2->operation.type = ToString(OperationType::PAD);
attr.prepended = BHWC(0, 0, 0, 0);
attr.appended = BHWC(0, 2, 2, 0);
pad_node2->operation.attributes = attr;
auto conv_node = graph.NewNode();
Value* temp2 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, pad_node2, conv_node, &temp2).ok());
ASSERT_TRUE(AddOutput(&graph, conv_node, &temp2).ok());
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
Convolution2DAttributes conv_attr;
conv_attr.padding.appended = HW(0, 0);
conv_attr.padding.prepended = HW(0, 0);
conv_node->operation.attributes = conv_attr;
ASSERT_EQ(3, graph.nodes().size());
auto transformation = NewMergePaddingWithConvolution2D();
ModelTransformer transformer(&graph);
transformer.Apply("merge_padding", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
ASSERT_EQ(conv_node, graph.nodes()[0]);
conv_attr =
absl::any_cast<Convolution2DAttributes>(conv_node->operation.attributes);
EXPECT_EQ(HW(1, 1), conv_attr.padding.prepended);
EXPECT_EQ(HW(2, 2), conv_attr.padding.appended);
}
TEST(MergePaddingWithAdd, MergeAlignedPadding) {
GraphFloat32 graph;
auto input0 = graph.NewValue();
input0->tensor.shape = BHWC(1, 4, 4, 8);
auto input1 = graph.NewValue();
auto padded = graph.NewValue();
auto output = graph.NewValue();
auto pad_node = graph.NewNode();
pad_node->operation.type = ToString(OperationType::PAD);
PadAttributes pad_attr;
pad_attr.prepended = BHWC(0, 0, 0, 0);
pad_attr.appended = BHWC(0, 0, 0, 32);
pad_node->operation.attributes = pad_attr;
ASSERT_TRUE(graph.AddConsumer(pad_node->id, input0->id).ok());
ASSERT_TRUE(graph.SetProducer(pad_node->id, padded->id).ok());
auto add_node = graph.NewNode();
ElementwiseAttributes add_attr;
ASSERT_TRUE(graph.AddConsumer(add_node->id, padded->id).ok());
ASSERT_TRUE(graph.AddConsumer(add_node->id, input1->id).ok());
ASSERT_TRUE(graph.SetProducer(add_node->id, output->id).ok());
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = add_attr;
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewMergePaddingWithAdd();
ModelTransformer transformer(&graph);
transformer.Apply("merge_padding", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
EXPECT_EQ(add_node, graph.nodes()[0]);
}
TEST(MergePaddingWithAdd, DoNotTrigger_AddWithAttributes) {
GraphFloat32 graph;
auto input0 = graph.NewValue();
input0->tensor.shape = BHWC(1, 4, 4, 8);
auto input1 = graph.NewValue();
auto padded = graph.NewValue();
auto output = graph.NewValue();
auto pad_node = graph.NewNode();
pad_node->operation.type = ToString(OperationType::PAD);
PadAttributes pad_attr;
pad_attr.prepended = BHWC(0, 0, 0, 0);
pad_attr.appended = BHWC(0, 0, 0, 32);
pad_node->operation.attributes = pad_attr;
ASSERT_TRUE(graph.AddConsumer(pad_node->id, input0->id).ok());
ASSERT_TRUE(graph.SetProducer(pad_node->id, padded->id).ok());
auto add_node = graph.NewNode();
ElementwiseAttributes add_attr;
add_attr.param = Tensor<HWC, DataType::FLOAT32>();
ASSERT_TRUE(graph.AddConsumer(add_node->id, padded->id).ok());
ASSERT_TRUE(graph.AddConsumer(add_node->id, input1->id).ok());
ASSERT_TRUE(graph.SetProducer(add_node->id, output->id).ok());
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = add_attr;
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewMergePaddingWithAdd();
ModelTransformer transformer(&graph);
transformer.Apply("merge_padding", transformation.get());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
EXPECT_EQ(pad_node, graph.nodes()[0]);
EXPECT_EQ(add_node, graph.nodes()[1]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35a86109-ab28-4382-a184-1d795c9d4f32 | cpp | tensorflow/tensorflow | make_padding | tensorflow/lite/delegates/gpu/common/transformations/make_padding.cc | tensorflow/lite/delegates/gpu/common/transformations/make_padding_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/make_padding.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
bool IsConstZeros(const Node& node) {
if (node.operation.type != ToString(OperationType::CONSTANT)) {
return false;
}
auto& attr =
std::any_cast<const ConstTensorAttributes&>(node.operation.attributes);
for (auto f : attr.tensor.data) {
if (f != 0) {
return false;
}
}
return true;
}
class MakePaddingFromZerosConcat : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::CONCAT)) {
return {TransformStatus::SKIPPED, ""};
}
auto inputs = graph->FindInputs(node->id);
if (inputs.size() != 2) {
return {TransformStatus::SKIPPED, ""};
}
bool first = true;
for (auto input : inputs) {
auto dep = graph->FindProducer(input->id);
if (dep != nullptr && IsConstZeros(*dep)) {
auto& concat_attr =
std::any_cast<const ConcatAttributes&>(node->operation.attributes);
PadAttributes pad_attr;
pad_attr.type = PaddingContentType::ZEROS;
pad_attr.appended = BHWC(0, 0, 0, 0);
pad_attr.prepended = BHWC(0, 0, 0, 0);
BHWC* p = first ? &pad_attr.prepended : &pad_attr.appended;
switch (concat_attr.axis) {
case Axis::HEIGHT:
p->h = input->tensor.shape.h;
break;
case Axis::WIDTH:
p->w = input->tensor.shape.w;
break;
case Axis::CHANNELS:
p->c = input->tensor.shape.c;
break;
default:
return {TransformStatus::DECLINED,
"Padding for concat axis is unsupported: " +
ToString(concat_attr.axis)};
}
absl::Status status = RemovePrecedingNode(graph, dep, node);
if (!status.ok()) {
return {TransformStatus::INVALID, "Unable to remove const node: " +
std::string(status.message())};
}
node->operation.attributes = pad_attr;
node->operation.type = ToString(OperationType::PAD);
return {TransformStatus::APPLIED, "Replaced concat with padding"};
}
first = false;
}
return {TransformStatus::SKIPPED, ""};
}
};
}
std::unique_ptr<NodeTransformation> NewMakePaddingFromConcat() {
return std::make_unique<MakePaddingFromZerosConcat>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/make_padding.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MakePadding, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 2, 3, 5);
auto concat_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(concat_node->id, input->id).ok());
concat_node->operation.type = ToString(OperationType::CONCAT);
ConcatAttributes attr;
attr.axis = Axis::HEIGHT;
concat_node->operation.attributes = attr;
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, concat_node, &output).ok());
output->tensor.shape = BHWC(1, 7, 3, 5);
auto const_node = graph.NewNode();
const_node->operation.type = ToString(OperationType::CONSTANT);
ConstTensorAttributes const_attr;
const_attr.tensor.shape = BHWC(1, 5, 3, 5);
const_attr.tensor.data =
std::vector<float>(const_attr.tensor.shape.DimensionsProduct(), 0);
const_node->operation.attributes = const_attr;
Value* const_link = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, const_node, concat_node, &const_link).ok());
const_link->tensor.shape = const_attr.tensor.shape;
ASSERT_EQ(2, graph.nodes().size());
auto transformation = NewMakePaddingFromConcat();
ModelTransformer transformer(&graph);
transformer.Apply("make_padding", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
auto pad_node = graph.nodes()[0];
ASSERT_EQ(ToString(OperationType::PAD), pad_node->operation.type);
auto pad_attr = std::any_cast<PadAttributes>(pad_node->operation.attributes);
EXPECT_EQ(BHWC(0, 0, 0, 0), pad_attr.prepended);
EXPECT_EQ(BHWC(0, 5, 0, 0), pad_attr.appended);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/make_padding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/make_padding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bf520f33-15b9-46ba-bc71-c18155f70b5e | cpp | tensorflow/tensorflow | add_quant_adjustments | tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.cc | tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.h"
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
class AddQuantAdjustments : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type ==
ToString(OperationType::QUANTIZE_AND_DEQUANTIZE)) {
return {TransformStatus::SKIPPED, ""};
}
bool transform_applied = false;
auto node_outputs = graph->FindOutputs(node->id);
for (auto output_value : node_outputs) {
if (!output_value->quant_params) continue;
auto consumers = graph->FindConsumers(output_value->id);
if (consumers.empty()) {
continue;
}
Node* quant_and_dequant_node;
absl::Status status =
graph->InsertNodeAfter(node->id, &quant_and_dequant_node);
if (!status.ok()) {
return {TransformStatus::INVALID, "Could not insert new node."};
}
quant_and_dequant_node->operation.type =
ToString(OperationType::QUANTIZE_AND_DEQUANTIZE);
QuantizeAndDequantizeAttributes attr;
attr.min = output_value->quant_params.value().min;
attr.max = output_value->quant_params.value().max;
attr.scale = output_value->quant_params.value().scale;
quant_and_dequant_node->operation.attributes = attr;
Value* adjusted_value = graph->NewValue();
adjusted_value->tensor = output_value->tensor;
status =
graph->SetProducer(quant_and_dequant_node->id, adjusted_value->id);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Could not create QuantizeAndDequantize node."};
}
for (auto& consumer : consumers) {
status = graph->ReplaceInput(consumer->id, output_value->id,
adjusted_value->id);
if (!status.ok()) {
return {TransformStatus::INVALID,
absl::StrCat(
"Failed to associate quant-adjusted value for consumer: ",
status.message())};
}
}
status = graph->AddConsumer(quant_and_dequant_node->id, output_value->id);
if (!status.ok()) {
return {TransformStatus::INVALID,
absl::StrCat(
"Could not associate output to QuantizeAndDequantize: ",
status.message())};
}
output_value->quant_params.reset();
transform_applied = true;
}
if (transform_applied) {
return {TransformStatus::APPLIED, ""};
}
return {TransformStatus::SKIPPED, ""};
}
};
std::unique_ptr<NodeTransformation> NewAddQuantAdjustments() {
return std::make_unique<AddQuantAdjustments>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.h"
#include <any>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "absl/types/optional.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
void AddQuantParams(std::optional<QuantizationParams>* params, float min,
float max, float scale) {
params->emplace();
params->value().min = min;
params->value().max = max;
params->value().scale = scale;
}
TEST(AddQuantAdjustments, OneNode) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
AddQuantParams(&input->quant_params, 0.0, 1.0,
0.004);
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(8);
add_tensor.data.resize(8);
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
auto add_node = graph.NewNode();
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = add_attr;
ASSERT_TRUE(graph.AddConsumer(add_node->id, input->id).ok());
Value* output = nullptr;
AddQuantParams(&input->quant_params, 0.0, 2.0,
0.008);
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
output->tensor.shape = BHWC(1, 4, 4, 8);
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
auto transformation = NewAddQuantAdjustments();
ModelTransformer transformer(&graph);
transformer.Apply("add_quant_adjustments", transformation.get());
EXPECT_EQ(1, graph.nodes().size());
EXPECT_EQ(2, graph.values().size());
}
TEST(AddQuantAdjustments, GeneralCase) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
AddQuantParams(&input->quant_params, 0.0, 1.0,
0.004);
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(8);
add_tensor.data.resize(8);
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
auto add1_node = graph.NewNode();
add1_node->operation.type = ToString(OperationType::ADD);
add1_node->operation.attributes = add_attr;
QuantizeAndDequantizeAttributes quant_attr;
quant_attr.min = -1.0;
quant_attr.max = 1.0;
quant_attr.scale = 0.008;
auto quant_node = graph.NewNode();
quant_node->operation.type = ToString(OperationType::QUANTIZE_AND_DEQUANTIZE);
quant_node->operation.attributes = quant_attr;
auto add2_node = graph.NewNode();
add2_node->operation.type = ToString(OperationType::ADD);
ASSERT_TRUE(graph.AddConsumer(add1_node->id, input->id).ok());
Value* link1 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, add1_node, quant_node, &link1).ok());
AddQuantParams(&link1->quant_params, 0.0, 2.0,
0.008);
link1->tensor.shape = BHWC(1, 4, 4, 8);
ASSERT_TRUE(graph.AddConsumer(add2_node->id, link1->id).ok());
Value* link2 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, quant_node, add2_node, &link2).ok());
AddQuantParams(&link2->quant_params, -1.0, 1.0,
0.008);
link2->tensor.shape = BHWC(1, 4, 4, 8);
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add2_node, &output).ok());
AddQuantParams(&output->quant_params, -1.0, 1.0,
0.008);
output->tensor.shape = BHWC(1, 4, 4, 8);
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewAddQuantAdjustments();
ModelTransformer transformer(&graph);
transformer.Apply("add_quant_adjustments", transformation.get());
EXPECT_EQ(4, graph.nodes().size());
EXPECT_EQ(5, graph.values().size());
EXPECT_EQ(ToString(OperationType::ADD), graph.nodes()[0]->operation.type);
EXPECT_EQ(ToString(OperationType::QUANTIZE_AND_DEQUANTIZE),
graph.nodes()[1]->operation.type);
EXPECT_EQ(ToString(OperationType::QUANTIZE_AND_DEQUANTIZE),
graph.nodes()[2]->operation.type);
EXPECT_EQ(quant_node->id, graph.nodes()[2]->id);
EXPECT_EQ(ToString(OperationType::ADD), graph.nodes()[3]->operation.type);
auto new_quant_attr = std::any_cast<QuantizeAndDequantizeAttributes>(
graph.nodes()[1]->operation.attributes);
EXPECT_EQ(0.0, new_quant_attr.min);
EXPECT_EQ(2.0, new_quant_attr.max);
const auto& new_quant_consumers = graph.FindConsumers(graph.values()[4]->id);
EXPECT_EQ(2, new_quant_consumers.size());
EXPECT_EQ(quant_node, new_quant_consumers[0]);
EXPECT_EQ(add2_node, new_quant_consumers[1]);
transformer.Apply("add_quant_adjustments", transformation.get());
EXPECT_EQ(4, graph.nodes().size());
EXPECT_EQ(5, graph.values().size());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db830d9b-54d6-4e33-a5a4-45840555a2d1 | cpp | tensorflow/tensorflow | fuse_add_to_conv | tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.cc | tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.h"
#include <any>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/types/any.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
void FuseBiasWithAddAttributes(const ElementwiseAttributes& add_attr,
const int channels,
Tensor<Linear, DataType::FLOAT32>* bias) {
auto add = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&add_attr.param);
auto add_scalar = std::get_if<float>(&add_attr.param);
if (bias->data.empty()) {
*bias = MakeZeroTensor<Linear, DataType::FLOAT32>(Linear(channels));
}
for (int d = 0; d < channels; ++d) {
bias->data[d] += add ? add->data[d] : *add_scalar;
}
}
class MergeConvolutionWithAdd : public SequenceTransformation {
public:
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
auto& conv_node = *sequence[0];
if (graph->FindInputs(conv_node.id).size() != 1) {
return {TransformStatus::DECLINED,
"This fusion is only applicable to ops with one runtime input."};
}
auto& add_node = *sequence[1];
if (add_node.operation.type != ToString(OperationType::ADD)) {
return {TransformStatus::SKIPPED, ""};
}
ElementwiseAttributes add_attr =
std::any_cast<ElementwiseAttributes>(add_node.operation.attributes);
if (!std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
add_attr.param) &&
!std::holds_alternative<float>(add_attr.param)) {
return {TransformStatus::DECLINED,
"This fuse applicable only for broadcast or scalar addition."};
}
if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) {
Convolution2DAttributes* conv_attr =
std::any_cast<Convolution2DAttributes>(
&conv_node.operation.attributes);
FuseConvolution2DWithAdd(add_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::CONVOLUTION_TRANSPOSED)) {
ConvolutionTransposedAttributes* conv_attr =
std::any_cast<ConvolutionTransposedAttributes>(
&conv_node.operation.attributes);
FuseConvolutionTransposedWithAdd(add_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::DEPTHWISE_CONVOLUTION)) {
DepthwiseConvolution2DAttributes* conv_attr =
std::any_cast<DepthwiseConvolution2DAttributes>(
&conv_node.operation.attributes);
FuseDepthwiseConvolution2DWithAdd(add_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::FULLY_CONNECTED)) {
FullyConnectedAttributes* conv_attr =
std::any_cast<FullyConnectedAttributes>(
&conv_node.operation.attributes);
FuseFullyConnectedWithAdd(add_attr, conv_attr);
} else {
return {TransformStatus::SKIPPED, ""};
}
absl::Status status = RemoveFollowingNode(graph, &add_node, &conv_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove add node after convolution: " +
std::string(status.message())};
}
return {TransformStatus::APPLIED, ""};
}
};
void FuseAddWithConvolution2D(const ElementwiseAttributes& add_attr,
Convolution2DAttributes* attr) {
auto add = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&add_attr.param);
auto add_scalar = std::get_if<float>(&add_attr.param);
if (attr->bias.data.empty()) {
attr->bias = MakeZeroTensor<Linear, DataType::FLOAT32>(
Linear(attr->weights.shape.o));
}
for (int d = 0; d < attr->weights.shape.o; ++d) {
float sum = 0.0f;
for (int s = 0; s < attr->weights.shape.i; ++s) {
const float add_value = add ? add->data[s] : *add_scalar;
for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) {
for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) {
const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}});
sum += add_value * attr->weights.data[index];
}
}
}
attr->bias.data[d] += sum;
}
}
class MergeAddWithConvolution : public SequenceTransformation {
public:
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
auto& conv_node = *sequence[1];
if (graph->FindInputs(conv_node.id).size() != 1) {
return {TransformStatus::DECLINED,
"This fusion is only applicable to ops with one runtime input."};
}
auto& add_node = *sequence[0];
if (add_node.operation.type != ToString(OperationType::ADD)) {
return {TransformStatus::SKIPPED, ""};
}
ElementwiseAttributes add_attr =
std::any_cast<ElementwiseAttributes>(add_node.operation.attributes);
if (!std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
add_attr.param) &&
!std::holds_alternative<float>(add_attr.param)) {
return {TransformStatus::DECLINED,
"This fuse applicable only for broadcast or scalar addition."};
}
if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) {
Convolution2DAttributes* conv_attr =
std::any_cast<Convolution2DAttributes>(
&conv_node.operation.attributes);
if (conv_attr->groups != 1) {
return {TransformStatus::DECLINED,
"This fuse not applicable for grouped convolution."};
}
if (conv_attr->padding.appended.w != 0 ||
conv_attr->padding.appended.h != 0 ||
conv_attr->padding.prepended.w != 0 ||
conv_attr->padding.prepended.h != 0) {
return {TransformStatus::DECLINED,
"This fuse applicable only for convolution that do not read "
"out of bound elements."};
}
FuseAddWithConvolution2D(add_attr, conv_attr);
} else {
return {TransformStatus::SKIPPED, ""};
}
absl::Status status = RemovePrecedingNode(graph, &add_node, &conv_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove mul node after convolution: " +
std::string(status.message())};
}
return {TransformStatus::APPLIED, ""};
}
};
}
std::unique_ptr<SequenceTransformation> NewMergeConvolutionWithAdd() {
return std::make_unique<MergeConvolutionWithAdd>();
}
std::unique_ptr<SequenceTransformation> NewMergeAddWithConvolution() {
return std::make_unique<MergeAddWithConvolution>();
}
void FuseConvolution2DWithAdd(const ElementwiseAttributes& add_attr,
Convolution2DAttributes* attr) {
FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias);
}
void FuseDepthwiseConvolution2DWithAdd(const ElementwiseAttributes& add_attr,
DepthwiseConvolution2DAttributes* attr) {
FuseBiasWithAddAttributes(
add_attr, attr->weights.shape.o * attr->weights.shape.i, &attr->bias);
}
void FuseConvolutionTransposedWithAdd(const ElementwiseAttributes& add_attr,
ConvolutionTransposedAttributes* attr) {
FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias);
}
void FuseFullyConnectedWithAdd(const ElementwiseAttributes& add_attr,
FullyConnectedAttributes* attr) {
FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias);
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace {
TEST(MergeConvolutionWithAddTest, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Convolution2DAttributes conv_attr;
conv_attr.padding.prepended = HW(0, 0);
conv_attr.padding.appended = HW(0, 0);
conv_attr.strides = HW(1, 1);
conv_attr.dilations = HW(1, 1);
conv_attr.weights.shape = OHWI(16, 3, 2, 8);
conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct());
conv_attr.bias.shape = Linear(16);
conv_attr.bias.data.resize(16);
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(16);
add_tensor.data.resize(16);
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
auto conv_node = graph.NewNode();
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv_node->operation.attributes = conv_attr;
auto add_node = graph.NewNode();
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = add_attr;
ASSERT_TRUE(graph.AddConsumer(conv_node->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
output->tensor.shape = BHWC(1, 4, 4, 16);
Value* link1 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, conv_node, add_node, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 16);
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewMergeConvolutionWithAdd();
ModelTransformer transformer(&graph);
transformer.Apply("merge_convolution_with_add", transformation.get());
EXPECT_EQ(1, graph.nodes().size());
EXPECT_EQ(2, graph.values().size());
EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
}
TEST(FuseAddAfterConvolution2DTest, Smoke) {
Convolution2DAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.1f, 1.2f};
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(2);
add_tensor.data = {0.3f, 0.7f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
FuseConvolution2DWithAdd(add_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f}));
}
TEST(FuseAddAfterDepthwiseConvolution2DTest, Smoke) {
DepthwiseConvolution2DAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(4);
attr.bias.data = {1.1f, 1.2f, 1.3f, 1.4f};
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(4);
add_tensor.data = {0.3f, 0.7f, 0.5f, 0.1f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
FuseDepthwiseConvolution2DWithAdd(add_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}));
EXPECT_THAT(attr.bias.data,
Pointwise(FloatNear(1e-6), {1.4f, 1.9f, 1.8f, 1.5f}));
}
TEST(FuseAddAfterConvolutionTransposedTest, Smoke) {
ConvolutionTransposedAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.1f, 1.2f};
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(2);
add_tensor.data = {0.3f, 0.7f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
FuseConvolutionTransposedWithAdd(add_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f}));
}
TEST(FuseAddAfterFullyConnectedTest, Smoke) {
FullyConnectedAttributes attr;
attr.weights.shape = OHWI(2, 1, 1, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.1f, 1.2f};
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(2);
add_tensor.data = {0.3f, 0.7f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
FuseFullyConnectedWithAdd(add_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6), {0.1f, 0.2f, 0.3f, 0.4f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f}));
}
TEST(MergeAddWithConvolutionTest, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 2);
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(2);
add_tensor.data = {1.0f, 2.0f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
Convolution2DAttributes conv_attr;
conv_attr.padding.prepended = HW(0, 0);
conv_attr.padding.appended = HW(0, 0);
conv_attr.strides = HW(1, 1);
conv_attr.dilations = HW(1, 1);
conv_attr.weights.shape = OHWI(2, 1, 2, 2);
conv_attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
conv_attr.bias.shape = Linear(2);
conv_attr.bias.data = {1.1f, 1.2f};
auto conv_node = graph.NewNode();
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv_node->operation.attributes = conv_attr;
auto add_node = graph.NewNode();
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = add_attr;
ASSERT_TRUE(graph.AddConsumer(add_node->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, conv_node, &output).ok());
output->tensor.shape = BHWC(1, 4, 3, 2);
Value* link1 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, add_node, conv_node, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 2);
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewMergeAddWithConvolution();
ModelTransformer transformer(&graph);
transformer.Apply("merge_add_with_convolution", transformation.get());
EXPECT_EQ(1, graph.nodes().size());
EXPECT_EQ(2, graph.values().size());
EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
Convolution2DAttributes* conv_attr_new =
std::any_cast<Convolution2DAttributes>(
&graph.nodes()[0]->operation.attributes);
EXPECT_THAT(conv_attr_new->bias.data,
Pointwise(FloatNear(1e-6), {2.7f, 5.2f}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d31ea08a-dfb2-4f54-8dcb-90625354d274 | cpp | tensorflow/tensorflow | internal | tensorflow/lite/delegates/gpu/common/memory_management/internal.cc | tensorflow/lite/delegates/gpu/common/memory_management/internal_test.cc | #include "tensorflow/lite/delegates/gpu/common/memory_management/internal.h"
#include <algorithm>
#include <cstddef>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
bool CompareBySize(const TensorUsageWithIndex<size_t>& first,
const TensorUsageWithIndex<size_t>& second) {
return first.usage_record->tensor_size > second.usage_record->tensor_size;
}
bool IsCoveringObject(const uint2& first_object, const uint2& second_object) {
return first_object.x >= second_object.x && first_object.y >= second_object.y;
}
bool IsCoveringObject(const uint3& first_object, const uint3& second_object) {
return first_object.x >= second_object.x &&
first_object.y >= second_object.y && first_object.z >= second_object.z;
}
size_t AbsDiffInElements(const size_t first_size, const size_t second_size) {
return first_size >= second_size ? first_size - second_size
: second_size - first_size;
}
size_t AbsDiffInElements(const uint2& first_size, const uint2& second_size) {
const size_t first_elements_cnt = first_size.y * first_size.x;
const size_t second_elements_cnt = second_size.y * second_size.x;
return first_elements_cnt >= second_elements_cnt
? first_elements_cnt - second_elements_cnt
: second_elements_cnt - first_elements_cnt;
}
size_t AbsDiffInElements(const uint3& first_size, const uint3& second_size) {
const size_t first_elements_cnt = first_size.z * first_size.y * first_size.x;
const size_t second_elements_cnt =
second_size.z * second_size.y * second_size.x;
return first_elements_cnt >= second_elements_cnt
? first_elements_cnt - second_elements_cnt
: second_elements_cnt - first_elements_cnt;
}
std::vector<TaskProfile> CalculateTaskProfiles(
const std::vector<TensorUsageRecord<size_t>>& usage_records) {
TaskId num_tasks = 0;
for (size_t i = 0; i < usage_records.size(); ++i) {
num_tasks = std::max(num_tasks, usage_records[i].last_task + 1);
}
std::vector<TaskProfile> task_profiles(num_tasks);
for (size_t rec_id = 0; rec_id < usage_records.size(); ++rec_id) {
for (TaskId task_id = usage_records[rec_id].first_task;
task_id <= usage_records[rec_id].last_task; ++task_id) {
task_profiles[task_id].emplace_back(&usage_records[rec_id], rec_id);
}
}
for (auto& task_profile : task_profiles) {
std::stable_sort(task_profile.begin(), task_profile.end(), CompareBySize);
}
return task_profiles;
}
std::vector<size_t> CalculatePositionalMaximums(
const std::vector<TensorUsageRecord<size_t>>& usage_records) {
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records);
std::vector<size_t> positional_max;
for (const auto& task_profile : task_profiles) {
size_t i = 0;
for (; i < task_profile.size() && i < positional_max.size(); ++i) {
positional_max[i] = std::max(positional_max[i],
task_profile[i].usage_record->tensor_size);
}
for (; i < task_profile.size(); ++i) {
positional_max.push_back(task_profile[i].usage_record->tensor_size);
}
}
return positional_max;
}
}
} | #include "tensorflow/lite/delegates/gpu/common/memory_management/internal.h"
#include <cstddef>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::ElementsAre;
TEST(TaskProfileTest, EmptyRecords) {
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles({});
EXPECT_TRUE(task_profiles.empty());
std::vector<size_t> positional_max = CalculatePositionalMaximums({});
EXPECT_TRUE(positional_max.empty());
}
TEST(TaskProfileTest, OneRecord) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1}};
const std::vector<std::vector<size_t>> correct_idx = {{0}, {0}};
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records);
ASSERT_EQ(task_profiles.size(), correct_idx.size());
for (size_t i = 0; i < task_profiles.size(); ++i) {
ASSERT_EQ(task_profiles[i].size(), correct_idx[i].size());
for (size_t j = 0; j < task_profiles[i].size(); ++j) {
ASSERT_EQ(task_profiles[i][j].usage_record,
&usage_records[correct_idx[i][j]]);
ASSERT_EQ(task_profiles[i][j].idx, correct_idx[i][j]);
}
}
std::vector<size_t> positional_max =
CalculatePositionalMaximums(usage_records);
EXPECT_THAT(positional_max, ElementsAre(16));
}
TEST(TaskProfileTest, ChainRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1},
{8, 1, 2},
{64, 2, 3},
{32, 3, 4},
{8, 4, 5},
};
const std::vector<std::vector<size_t>> correct_idx = {{0}, {0, 1}, {2, 1},
{2, 3}, {3, 4}, {4}};
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records);
ASSERT_EQ(task_profiles.size(), correct_idx.size());
for (size_t i = 0; i < task_profiles.size(); ++i) {
ASSERT_EQ(task_profiles[i].size(), correct_idx[i].size());
for (size_t j = 0; j < task_profiles[i].size(); ++j) {
ASSERT_EQ(task_profiles[i][j].usage_record,
&usage_records[correct_idx[i][j]]);
ASSERT_EQ(task_profiles[i][j].idx, correct_idx[i][j]);
}
}
std::vector<size_t> positional_max =
CalculatePositionalMaximums(usage_records);
EXPECT_THAT(positional_max, ElementsAre(64, 32));
}
TEST(TaskProfileTest, ComplexRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{32, 0, 1},
{32, 1, 4},
{8, 2, 5},
{16, 3, 5},
{8, 4, 5},
{64, 5, 7},
{8, 6, 8},
{8, 7, 8},
{16, 8, 9}};
const std::vector<std::vector<size_t>> correct_idx = {
{0}, {0, 1}, {1, 2}, {1, 3, 2}, {1, 3, 2, 4},
{5, 3, 2, 4}, {5, 6}, {5, 6, 7}, {8, 6, 7}, {8}};
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records);
ASSERT_EQ(task_profiles.size(), correct_idx.size());
for (size_t i = 0; i < task_profiles.size(); ++i) {
ASSERT_EQ(task_profiles[i].size(), correct_idx[i].size());
for (size_t j = 0; j < task_profiles[i].size(); ++j) {
ASSERT_EQ(task_profiles[i][j].usage_record,
&usage_records[correct_idx[i][j]]);
ASSERT_EQ(task_profiles[i][j].idx, correct_idx[i][j]);
}
}
std::vector<size_t> positional_max =
CalculatePositionalMaximums(usage_records);
EXPECT_THAT(positional_max, ElementsAre(64, 32, 8, 8));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/memory_management/internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/memory_management/internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8bba7f0f-f871-4066-bcbc-e3cd8d4e8d1d | cpp | tensorflow/tensorflow | android_sync | tensorflow/lite/delegates/gpu/gl/android_sync.cc | tensorflow/lite/delegates/gpu/gl/android_sync_test.cc | #include "tensorflow/lite/delegates/gpu/gl/android_sync.h"
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <EGL/eglplatform.h>
#include <GLES2/gl2.h>
#include <unistd.h>
namespace {
PFNEGLDUPNATIVEFENCEFDANDROIDPROC eglDupNativeFenceFDANDROID;
PFNEGLCREATESYNCKHRPROC eglCreateSyncKHR;
PFNEGLWAITSYNCKHRPROC eglWaitSyncKHR;
PFNEGLDESTROYSYNCKHRPROC eglDestroySyncKHR;
bool IsGlSupported() {
static const bool extensions_allowed = [] {
eglDupNativeFenceFDANDROID =
reinterpret_cast<PFNEGLDUPNATIVEFENCEFDANDROIDPROC>(
eglGetProcAddress("eglDupNativeFenceFDANDROID"));
eglCreateSyncKHR = reinterpret_cast<PFNEGLCREATESYNCKHRPROC>(
eglGetProcAddress("eglCreateSyncKHR"));
eglWaitSyncKHR = reinterpret_cast<PFNEGLWAITSYNCKHRPROC>(
eglGetProcAddress("eglWaitSyncKHR"));
eglDestroySyncKHR = reinterpret_cast<PFNEGLDESTROYSYNCKHRPROC>(
eglGetProcAddress("eglDestroySyncKHR"));
return eglWaitSyncKHR && eglCreateSyncKHR && eglDupNativeFenceFDANDROID &&
eglDestroySyncKHR;
}();
return extensions_allowed;
}
}
namespace tflite::gpu::gl {
bool WaitFdGpu(int fence_fd) {
if (fence_fd == -1) {
return false;
}
if (!IsGlSupported()) {
return false;
}
EGLDisplay egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
if (egl_display == EGL_NO_DISPLAY) return false;
int fd_for_egl = dup(fence_fd);
EGLint sync_attribs[] = {EGL_SYNC_NATIVE_FENCE_FD_ANDROID, (EGLint)fd_for_egl,
EGL_NONE};
EGLSync fence_sync = eglCreateSyncKHR(
egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID, sync_attribs);
if (fence_sync != EGL_NO_SYNC_KHR) {
eglWaitSyncKHR(egl_display, fence_sync, 0);
return true;
} else {
close(fd_for_egl);
return false;
}
}
int CreateFdGpu() {
if (IsGlSupported()) {
EGLDisplay egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
if (egl_display != EGL_NO_DISPLAY) {
EGLSync fence_sync =
eglCreateSyncKHR(egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);
if (fence_sync != EGL_NO_SYNC_KHR) {
int fence_fd = eglDupNativeFenceFDANDROID(egl_display, fence_sync);
if (fence_fd == -1) {
eglDestroySyncKHR(egl_display, fence_sync);
} else {
return fence_fd;
}
}
}
}
glFinish();
return -1;
}
} | #include "tensorflow/lite/delegates/gpu/gl/android_sync.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
namespace tflite::gpu::gl {
TEST(AsyncBufferTest, FenceTest) {
EXPECT_EQ(CreateFdGpu(), -1);
EXPECT_FALSE(WaitFdGpu(1));
std::unique_ptr<EglEnvironment> env;
EXPECT_OK(EglEnvironment::NewEglEnvironment(&env));
int gpu_fd = CreateFdGpu();
EXPECT_GE(gpu_fd, 0);
EXPECT_TRUE(WaitFdGpu(gpu_fd));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/android_sync.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/android_sync_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8c60fc57-1a59-4d6f-851c-314f43c18020 | cpp | tensorflow/tensorflow | gl_buffer | tensorflow/lite/delegates/gpu/gl/gl_buffer.cc | tensorflow/lite/delegates/gpu/gl/gl_buffer_test.cc | #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace gl {
absl::Status CopyBuffer(const GlBuffer& read_buffer,
const GlBuffer& write_buffer) {
if (read_buffer.bytes_size() != write_buffer.bytes_size()) {
return absl::InvalidArgumentError(
"Read buffer does not match write buffer size.");
}
gl_buffer_internal::BufferBinder read_buffer_binder(GL_COPY_READ_BUFFER,
read_buffer.id());
gl_buffer_internal::BufferBinder write_buffer_binder(GL_COPY_WRITE_BUFFER,
write_buffer.id());
return TFLITE_GPU_CALL_GL(glCopyBufferSubData, GL_COPY_READ_BUFFER,
GL_COPY_WRITE_BUFFER, read_buffer.offset(),
write_buffer.offset(), read_buffer.bytes_size());
}
absl::Status GetSSBOSize(GLuint id, int64_t* size_bytes) {
GLuint prev_id;
RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glGetIntegerv,
GL_SHADER_STORAGE_BUFFER_BINDING,
reinterpret_cast<GLint*>(&prev_id)));
gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id,
prev_id);
return TFLITE_GPU_CALL_GL(glGetBufferParameteri64v, GL_SHADER_STORAGE_BUFFER,
GL_BUFFER_SIZE, size_bytes);
}
GlBuffer::GlBuffer(GlBuffer&& buffer)
: GlBuffer(buffer.target_, buffer.id_, buffer.bytes_size_, buffer.offset_,
buffer.has_ownership_) {
buffer.has_ownership_ = false;
}
GlBuffer& GlBuffer::operator=(GlBuffer&& buffer) {
if (this != &buffer) {
Invalidate();
target_ = buffer.target_;
bytes_size_ = buffer.bytes_size_;
offset_ = buffer.offset_;
has_ownership_ = buffer.has_ownership_;
id_ = buffer.id_;
buffer.has_ownership_ = false;
}
return *this;
}
GlBuffer::~GlBuffer() { Invalidate(); }
void GlBuffer::Invalidate() {
if (has_ownership_ && id_ != GL_INVALID_INDEX) {
TFLITE_GPU_CALL_GL(glDeleteBuffers, 1, &id_).IgnoreError();
id_ = GL_INVALID_INDEX;
}
}
absl::Status GlBuffer::BindToIndex(uint32_t index) const {
return TFLITE_GPU_CALL_GL(glBindBufferRange, target_, index, id_, offset_,
bytes_size_);
}
absl::Status GlBuffer::MakeView(size_t offset, size_t bytes_size,
GlBuffer* gl_buffer) {
if (offset + bytes_size > bytes_size_) {
return absl::OutOfRangeError("GlBuffer view is out of range.");
}
*gl_buffer = GlBuffer(target_, id_, bytes_size, offset_ + offset,
false);
return absl::OkStatus();
}
GlBuffer GlBuffer::MakeRef() {
return GlBuffer(target_, id_, bytes_size_, offset_,
false);
}
GlPersistentBuffer::GlPersistentBuffer(GLenum target, GLuint id,
size_t bytes_size, size_t offset,
bool has_ownership, void* data)
: GlBuffer(target, id, bytes_size, offset, has_ownership), data_(data) {}
GlPersistentBuffer::GlPersistentBuffer()
: GlPersistentBuffer(GL_INVALID_ENUM, GL_INVALID_INDEX, 0, 0, false,
nullptr) {}
GlPersistentBuffer::GlPersistentBuffer(GlPersistentBuffer&& buffer)
: GlBuffer(std::move(buffer)), data_(buffer.data_) {}
GlPersistentBuffer& GlPersistentBuffer::operator=(GlPersistentBuffer&& buffer) {
if (this != &buffer) {
data_ = buffer.data_;
GlBuffer::operator=(std::move(buffer));
}
return *this;
}
GlPersistentBuffer::~GlPersistentBuffer() {
if (!data_) return;
gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id());
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
}
absl::Status CreatePersistentBuffer(size_t size,
GlPersistentBuffer* gl_buffer) {
PFNGLBUFFERSTORAGEEXTPROC glBufferStorageEXT = nullptr;
glBufferStorageEXT = reinterpret_cast<PFNGLBUFFERSTORAGEEXTPROC>(
eglGetProcAddress("glBufferStorageEXT"));
if (!glBufferStorageEXT) {
return absl::UnavailableError("glBufferStorageEXT is not supported");
}
gl_buffer_internal::BufferId id;
gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id.id());
RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(
glBufferStorageEXT, GL_SHADER_STORAGE_BUFFER, size, nullptr,
GL_MAP_COHERENT_BIT_EXT | GL_MAP_READ_BIT | GL_MAP_WRITE_BIT |
GL_MAP_PERSISTENT_BIT_EXT));
void* data = nullptr;
RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(
glMapBufferRange, &data, GL_SHADER_STORAGE_BUFFER, 0, size,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT_EXT));
*gl_buffer = GlPersistentBuffer{
GL_SHADER_STORAGE_BUFFER, id.Release(), size, 0, true, data};
return absl::OkStatus();
}
namespace gl_buffer_internal {
BufferMapper::BufferMapper(GLenum target, size_t offset, size_t bytes,
GLbitfield access)
: target_(target),
data_(glMapBufferRange(target_, offset, bytes, access)) {}
BufferMapper::~BufferMapper() {
TFLITE_GPU_CALL_GL(glUnmapBuffer, target_).IgnoreError();
}
};
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(Buffer, CreateReadWrite) {
std::unique_ptr<EglEnvironment> env;
ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok());
GlBuffer buffer;
ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(4, &buffer).ok());
std::vector<float> from_buffer;
ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok());
EXPECT_THAT(from_buffer, testing::ElementsAre(0, 0, 0, 0));
}
TEST(Buffer, Read) {
std::unique_ptr<EglEnvironment> env;
ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok());
std::vector<float> test = {0, 1, 2, 3};
GlBuffer buffer;
ASSERT_TRUE(CreateReadOnlyShaderStorageBuffer<float>(test, &buffer).ok());
std::vector<float> from_buffer;
ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok());
EXPECT_EQ(test, from_buffer);
}
TEST(Buffer, Write) {
std::unique_ptr<EglEnvironment> env;
ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok());
GlBuffer buffer;
ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(4, &buffer).ok());
std::vector<float> test = {0, 1, 2, 3};
ASSERT_TRUE(buffer.Write<float>(test).ok());
std::vector<float> from_buffer;
ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok());
EXPECT_EQ(test, from_buffer);
}
TEST(Buffer, View) {
std::unique_ptr<EglEnvironment> env;
ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok());
GlBuffer buffer;
ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(6, &buffer).ok());
EXPECT_TRUE(buffer.has_ownership());
EXPECT_EQ(24, buffer.bytes_size());
EXPECT_EQ(0, buffer.offset());
GlBuffer view;
ASSERT_TRUE(buffer.MakeView(4, 16, &view).ok());
EXPECT_FALSE(view.has_ownership());
EXPECT_EQ(16, view.bytes_size());
EXPECT_EQ(4, view.offset());
std::vector<float> test = {1, 2, 3, 4};
ASSERT_TRUE(view.Write<float>(test).ok());
std::vector<float> from_buffer;
ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok());
EXPECT_THAT(from_buffer, testing::ElementsAre(0, 1, 2, 3, 4, 0));
std::vector<float> from_view;
ASSERT_TRUE(AppendFromBuffer(view, &from_view).ok());
EXPECT_THAT(from_view, testing::ElementsAre(1, 2, 3, 4));
}
TEST(Buffer, SubView) {
std::unique_ptr<EglEnvironment> env;
ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok());
GlBuffer buffer;
ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(6, &buffer).ok());
GlBuffer view1;
ASSERT_TRUE(buffer.MakeView(4, 16, &view1).ok());
GlBuffer view2;
EXPECT_FALSE(view1.MakeView(1, 16, &view2).ok());
ASSERT_TRUE(view1.MakeView(2, 2, &view2).ok());
EXPECT_FALSE(view2.has_ownership());
EXPECT_EQ(2, view2.bytes_size());
EXPECT_EQ(6, view2.offset());
}
TEST(Buffer, Copy) {
std::unique_ptr<EglEnvironment> env;
ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok());
GlBuffer buffer;
ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(4, &buffer).ok());
GlBuffer view1;
ASSERT_TRUE(buffer.MakeView(4, 4, &view1).ok());
GlBuffer view2;
ASSERT_TRUE(buffer.MakeView(8, 4, &view2).ok());
ASSERT_TRUE(view1.Write<float>({1}).ok());
ASSERT_TRUE(CopyBuffer(view1, view2).ok());
std::vector<float> from_buffer;
ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok());
EXPECT_THAT(from_buffer, testing::ElementsAre(0, 1, 1, 0));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/gl_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/gl_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
65b81fe1-98d0-44ae-8c4a-5403dc3066f5 | cpp | tensorflow/tensorflow | mean | tensorflow/lite/delegates/gpu/gl/kernels/mean.cc | tensorflow/lite/delegates/xnnpack/mean_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/mean.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
bool UseSubgroupBasedImpl(const GpuInfo& gpu_info) {
return gpu_info.IsApiVulkan() &&
(gpu_info.vulkan_info.api_version_major > 1 ||
gpu_info.vulkan_info.api_version_minor >= 1) &&
gpu_info.vulkan_info.subgroup_size >= 32 &&
gpu_info.vulkan_info.supports_subgroup_arithmetic;
}
void GenerateSubgroupBasedMean(const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
int height = ctx.input_shapes[0][1];
int width = ctx.input_shapes[0][2];
int depth = ctx.input_shapes[0][3];
std::vector<Variable> parameters = {
{"input_data_0_h", height},
{"input_data_0_w", width},
{"output_data_0_h", 1},
{"output_data_0_w", 1},
};
std::string source = R"(
const uint columns_per_invocation =
($input_data_0_w$ + (gl_WorkGroupSize.x - 1))/gl_WorkGroupSize.x;
const uint rows_per_invocation =
($input_data_0_h$ + (gl_WorkGroupSize.y - 1))/gl_WorkGroupSize.y;
const uint first_row = gl_GlobalInvocationID.y*rows_per_invocation;
const uint first_col = gl_GlobalInvocationID.x*columns_per_invocation;
const uint last_row_exclusive =
min(first_row+rows_per_invocation, $input_data_0_h$);
const uint last_column_exclusive =
min(first_col+columns_per_invocation, $input_data_0_w$);
vec4 value = vec4(0);
for (uint h = first_row; h < last_row_exclusive; ++h) {
for (uint w = first_col; w < last_column_exclusive; ++w) {
value += $input_data_0[w, h, gid.z]$;
}
}
highp vec4 subgroup_sum = subgroupAdd(value);
if(subgroupElect()) {
subgroup_sums[gl_SubgroupID] = subgroup_sum;
}
memoryBarrierShared();
barrier();
if(gl_SubgroupID == 0) {
highp vec4 subtotal = vec4(0);
if (gl_SubgroupInvocationID < gl_NumSubgroups) {
subtotal = subgroup_sums[gl_SubgroupInvocationID];
}
highp vec4 grand_total = subgroupAdd(subtotal);
if(subgroupElect()) {
highp vec4 result = grand_total / $input_data_0_w$ / $input_data_0_h$;
$output_data_0[0, 0, gid.z] = result$;
}
}
)";
const uint32_t subgroup_size = ctx.gpu_info->vulkan_info.subgroup_size;
const uint32_t max_wg_size_x = ctx.gpu_info->GetMaxWorkGroupSizeForX();
const uint32_t max_wg_size_y = ctx.gpu_info->GetMaxWorkGroupSizeForY();
const uint32_t max_wg_size =
std::min(static_cast<uint32_t>(ctx.gpu_info->GetMaxWorkGroupTotalSize()),
subgroup_size * subgroup_size);
const uint32_t max_number_of_subgroups = max_wg_size / subgroup_size;
uint32_t wg_size_x = 0;
uint32_t wg_size_y = 0;
if (width * height <= max_wg_size && width <= max_wg_size_x &&
height <= max_wg_size_y) {
wg_size_x = width;
wg_size_y = height;
} else {
wg_size_x = std::min({static_cast<uint32_t>(std::sqrt(max_wg_size)),
max_wg_size_x, static_cast<uint32_t>(width)});
wg_size_y = std::min({max_wg_size / wg_size_x, max_wg_size_y,
static_cast<uint32_t>(height)});
}
std::vector<Variable> shared_variables = {
{"subgroup_sums", std::vector<float4>(max_number_of_subgroups)},
};
*generated_code = {
std::move(parameters),
{},
{std::move(shared_variables)},
uint3(wg_size_x, wg_size_y, uint32_t(DivideRoundUp(depth, 4))),
uint3(wg_size_x, wg_size_y, 1u),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::ONLY_DEFINITIONS,
};
}
void GenerateTrivialMean(const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])}};
std::string source = R"(
highp vec4 sum = vec4(0.0);
highp float size = float($input_data_0_w$ * $input_data_0_h$);
for (int w = 0; w < $input_data_0_w$; w++) {
for (int h = 0; h < $input_data_0_h$; h++) {
sum += $input_data_0[w, h, gid.z]$;
}
}
value_0 = sum / size;
)";
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(1, 1, 4),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
}
constexpr uint3 kTileSize = {8, 8, 1};
inline bool UseTiledImpl(const NodeShader::GenerationContext& ctx) {
const int h = ctx.input_shapes[0][1];
const int w = ctx.input_shapes[0][2];
const int c = ctx.input_shapes[0][3];
return h % kTileSize.y == 0 && w % kTileSize.x == 0 && c % 4 == 0 &&
(h / kTileSize.y) * (w / kTileSize.x) * c * sizeof(float) <=
32768;
}
void GenerateTiledMean(const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
const int h = ctx.input_shapes[0][1];
const int w = ctx.input_shapes[0][2];
const int s = DivideRoundUp(ctx.input_shapes[0][3], 4);
std::vector<Variable> parameters = {
{"input_data_0_h", h},
{"input_data_0_w", w},
{"tile_size_h", kTileSize.y},
{"tile_size_w", kTileSize.x},
};
std::vector<Variable> shared_variables = {
{"tile_sum",
std::vector<float4>((w / kTileSize.x) * (h / kTileSize.y) * s)}};
std::string source = R"(
ivec2 tile_size = ivec2($tile_size_w$, $tile_size_h$);
ivec2 num_tiles = ivec2($input_data_0_w$, $input_data_0_h$) / tile_size;
highp vec4 partial_sum = vec4(0.0);
for (int x = gid.x * tile_size.x; x < (gid.x + 1) * tile_size.x; ++x) {
for (int y = gid.y * tile_size.y; y < (gid.y + 1) * tile_size.y; ++y) {
partial_sum += $input_data_0[x, y, gid.z]$;
}
}
$tile_sum$[num_tiles.x * num_tiles.y * gid.z + num_tiles.x * gid.y + gid.x] = partial_sum;
memoryBarrierShared(); barrier();
if (gid.x == 0 && gid.y == 0) {
highp vec4 sum = vec4(0.0);
for (int i = 0; i < num_tiles.x * num_tiles.y; ++i) {
sum += $tile_sum$[num_tiles.x * num_tiles.y * gid.z + i];
}
highp vec4 mean = sum / float($input_data_0_w$ * $input_data_0_h$);
$output_data_0[0, 0, gid.z] = mean$;
}
)";
*generated_code = {
std::move(parameters),
{},
std::move(shared_variables),
uint3(kTileSize.x, kTileSize.y, static_cast<uint32_t>(s)),
kTileSize,
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::ONLY_DEFINITIONS,
};
}
class Mean : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const MeanAttributes&>(ctx.op_attr);
if (attr.dims != std::set<Axis>({Axis::HEIGHT, Axis::WIDTH})) {
return absl::InvalidArgumentError(
"Mean calculation is supported only for height and width.");
}
if (!(ctx.input_shapes.size() == 1 && ctx.output_shapes.size() == 1 &&
ctx.output_shapes[0][1] == 1 && ctx.output_shapes[0][2] == 1 &&
ctx.output_shapes[0][3] == ctx.input_shapes[0][3])) {
return absl::InvalidArgumentError(
"Mean calculation is supported for one input and one 1x1 output with "
"the same channel count.");
}
if (UseSubgroupBasedImpl(*ctx.gpu_info)) {
GenerateSubgroupBasedMean(ctx, generated_code);
} else if (UseTiledImpl(ctx)) {
GenerateTiledMean(ctx, generated_code);
} else {
GenerateTrivialMean(ctx, generated_code);
}
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewMeanNodeShader() {
return std::make_unique<Mean>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/reduce_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Mean, 4DReduceBatchSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({0})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceBatchKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({0})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceHeightSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceHeightKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceWidthSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({2})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceWidthKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({2})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceHeightWidthSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1, 2})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({2, 1})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceHeightWidthKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1, 2})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({2, 1})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceChannelsSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({3})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceChannelsKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({3})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceBatchSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({0})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceBatchKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({0})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceWidthSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({1})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceWidthKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({1})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceChannelsSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({2})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceChannelsKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({2})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 2DReduceBatchSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, channels})
.Axes({0})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 2DReduceBatchKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, channels})
.Axes({0})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 2DReduceChannelsSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, channels})
.Axes({1})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 2DReduceChannelsKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, channels})
.Axes({1})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 1DSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
ReduceTester().InputShape({batch}).Axes({0}).KeepDims(false).Test(
BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 1DKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
ReduceTester().InputShape({batch}).Axes({0}).KeepDims(true).Test(
BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1, 2})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/mean.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/mean_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
453427bf-9eea-4191-ab2c-2632dbdaa00c | cpp | tensorflow/tensorflow | converter | tensorflow/lite/delegates/gpu/cl/kernels/converter.cc | tensorflow/lite/delegates/gpu/gl/kernels/converter_test.cc | #include "tensorflow/lite/delegates/gpu/cl/kernels/converter.h"
#include <algorithm>
#include <array>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_arguments.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_command_queue.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_errors.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor_type_util.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/task/arguments.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/conversion.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
class OpenClConverterImpl : public TensorObjectConverter {
public:
virtual absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) = 0;
void SetGpuInfo(const GpuInfo& info) { gpu_info_ = info; }
protected:
absl::Status DispatchKernel(Buffer* buffer, Tensor* tensor) {
RETURN_IF_ERROR(cl_args_.SetObjectRef("buffer", buffer));
RETURN_IF_ERROR(cl_args_.SetObjectRef("tensor", tensor));
RETURN_IF_ERROR(cl_args_.Bind(kernel_.kernel()));
const int3 grid = int3(tensor->Width() * tensor->Batch(), tensor->Height(),
tensor->Slices());
std::vector<int3> work_groups;
GetPossibleWorkGroupsConv(TuningType::kFast, gpu_info_, kernel_.info_, grid,
&work_groups);
const int3 work_group_size = work_groups[0];
const int3 work_groups_count = GetWorkGroupsCount(grid, work_group_size);
return queue_->Dispatch(kernel_, work_groups_count, work_group_size);
}
CLArguments cl_args_;
BHWC shape_;
CLKernel kernel_;
TensorDescriptor tensor_descriptor_;
GpuInfo gpu_info_;
CLCommandQueue* queue_ = nullptr;
const CLContext* context_ = nullptr;
};
bool IsSupportedDataType(DataType type) {
return type == DataType::FLOAT16 || type == DataType::FLOAT32 ||
type == DataType::INT32 || type == DataType::BOOL;
}
bool IsBHWCOpenCLBuffer(const ObjectDef& def) {
return IsSupportedDataType(def.data_type) &&
def.object_type == ObjectType::OPENCL_BUFFER &&
def.data_layout == DataLayout::BHWC;
}
bool IsOpenCLTensor(const ObjectDef& def) {
const bool is_buffer_tensor = def.object_type == ObjectType::OPENCL_BUFFER &&
def.data_layout == DataLayout::DHWC4;
const bool is_image2d_tensor =
def.object_type == ObjectType::OPENCL_TEXTURE &&
def.data_layout == DataLayout::HDWC4;
const bool is_image2d_array_tensor =
def.object_type == ObjectType::OPENCL_TEXTURE &&
def.data_layout == DataLayout::DHWC4;
const bool is_single_image_tensor =
def.object_type == ObjectType::OPENCL_TEXTURE &&
def.data_layout == DataLayout::BHWC;
return IsSupportedDataType(def.data_type) &&
(is_buffer_tensor || is_image2d_tensor || is_image2d_array_tensor ||
is_single_image_tensor);
}
absl::Status GetOpenCLMemory(const TensorObject& obj, cl_mem* memory) {
auto texture = std::get_if<OpenClTexture>(&obj);
auto buffer = std::get_if<OpenClBuffer>(&obj);
if (texture && texture->memobj) {
*memory = texture->memobj;
} else if (buffer && buffer->memobj) {
*memory = buffer->memobj;
} else {
return absl::InvalidArgumentError("Missing OpenCL object.");
}
return absl::OkStatus();
}
class TensorToTensorConverter : public OpenClConverterImpl {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsOpenCLTensor(input) && IsOpenCLTensor(output);
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
src_tensor_descriptor_ =
TensorDescriptor(input_def.object_def.data_type,
ToTensorStorageType(input_def.object_def.object_type,
input_def.object_def.data_layout),
Layout::BHWC);
dst_tensor_descriptor_ =
TensorDescriptor(output_def.object_def.data_type,
ToTensorStorageType(output_def.object_def.object_type,
output_def.object_def.data_layout),
Layout::BHWC);
GPUOperation gpu_op =
CreateTensorToTensorOp(environment->GetDevicePtr()->GetInfo(),
src_tensor_descriptor_, dst_tensor_descriptor_);
gpu_op.code_ =
"#define MAIN_FUNCTION __kernel void tensor_to_tensor\n" + gpu_op.code_;
const bool need_fp16_support =
input_def.object_def.data_type == DataType::FLOAT16 ||
output_def.object_def.data_type == DataType::FLOAT16;
if (need_fp16_support) {
gpu_op.code_ =
"#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n" + gpu_op.code_;
}
queue_ = environment->queue();
context_ = &environment->context();
shape_ = BHWC(input_def.dimensions.b, input_def.dimensions.h,
input_def.dimensions.w, input_def.dimensions.c);
RETURN_IF_ERROR(gpu_op.AssembleCode(environment->device().GetInfo()));
RETURN_IF_ERROR(cl_args_.Init(environment->device().GetInfo(), nullptr,
&gpu_op.args_, &gpu_op.code_));
return environment->program_cache()->GetOrCreateCLKernel(
gpu_op.code_, "tensor_to_tensor", environment->context(),
environment->device(), &kernel_);
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
cl_mem in_memory;
RETURN_IF_ERROR(GetOpenCLMemory(input_obj, &in_memory));
cl_mem out_memory;
RETURN_IF_ERROR(GetOpenCLMemory(output_obj, &out_memory));
Tensor src_tensor;
TensorDescriptor descriptor_with_shape = src_tensor_descriptor_;
descriptor_with_shape.SetBHWCShape(shape_);
RETURN_IF_ERROR(CreateTensorShared(*context_, in_memory,
descriptor_with_shape, &src_tensor));
Tensor dst_tensor;
descriptor_with_shape = dst_tensor_descriptor_;
descriptor_with_shape.SetBHWCShape(shape_);
RETURN_IF_ERROR(CreateTensorShared(*context_, out_memory,
descriptor_with_shape, &dst_tensor));
RETURN_IF_ERROR(cl_args_.SetObjectRef("src_tensor", &src_tensor));
RETURN_IF_ERROR(cl_args_.SetObjectRef("dst_tensor", &dst_tensor));
RETURN_IF_ERROR(cl_args_.Bind(kernel_.kernel()));
const int3 grid = int3(dst_tensor.Width() * dst_tensor.Batch(),
dst_tensor.Height(), dst_tensor.Slices());
const int3 work_group_size = {16, 8, 1};
const int3 work_groups_count = GetWorkGroupsCount(grid, work_group_size);
return queue_->Dispatch(kernel_, work_groups_count, work_group_size);
}
private:
TensorDescriptor src_tensor_descriptor_;
TensorDescriptor dst_tensor_descriptor_;
};
class TensorToBHWCBufferConverter : public OpenClConverterImpl {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsOpenCLTensor(input) && IsBHWCOpenCLBuffer(output);
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
TensorStorageType src_tensor_type = ToTensorStorageType(
input_def.object_def.object_type, input_def.object_def.data_layout);
tensor_descriptor_ = TensorDescriptor(input_def.object_def.data_type,
src_tensor_type, Layout::BHWC);
BufferDescriptor buffer_desc;
buffer_desc.element_type = output_def.object_def.data_type;
buffer_desc.element_size = 1;
buffer_desc.memory_type = MemoryType::GLOBAL;
GPUOperation gpu_op =
CreateTensorToBhwcBufferOp(environment->GetDevicePtr()->GetInfo(),
tensor_descriptor_, buffer_desc);
gpu_op.code_ =
"#define MAIN_FUNCTION __kernel void tensor_to_bhwc\n" + gpu_op.code_;
if (output_def.object_def.data_type == DataType::BOOL ||
input_def.object_def.data_type == DataType::BOOL) {
gpu_op.code_ =
"#define convert_bool4(value) (convert_uchar4((value) != 0) & "
"(uchar4) 1)\n#define bool4 uchar4\n" +
gpu_op.code_;
}
const bool need_fp16_support =
input_def.object_def.data_type == DataType::FLOAT16 ||
output_def.object_def.data_type == DataType::FLOAT16;
if (need_fp16_support) {
gpu_op.code_ =
"#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n" + gpu_op.code_;
}
queue_ = environment->queue();
context_ = &environment->context();
shape_ = BHWC(input_def.dimensions.b, input_def.dimensions.h,
input_def.dimensions.w, input_def.dimensions.c);
RETURN_IF_ERROR(gpu_op.AssembleCode(environment->device().GetInfo()));
RETURN_IF_ERROR(cl_args_.Init(environment->device().GetInfo(), nullptr,
&gpu_op.args_, &gpu_op.code_));
return environment->program_cache()->GetOrCreateCLKernel(
gpu_op.code_, "tensor_to_bhwc", environment->context(),
environment->device(), &kernel_);
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto output = std::get_if<OpenClBuffer>(&output_obj);
if (!output || !output->memobj) {
return absl::InvalidArgumentError(
"Missing output in tensor_to_bhwc converter");
}
cl_mem in_memory;
RETURN_IF_ERROR(GetOpenCLMemory(input_obj, &in_memory));
Tensor tensor;
TensorDescriptor descriptor_with_shape = tensor_descriptor_;
descriptor_with_shape.SetBHWCShape(shape_);
RETURN_IF_ERROR(CreateTensorShared(*context_, in_memory,
descriptor_with_shape, &tensor));
Buffer buffer = CreateBufferShared(output->memobj);
return DispatchKernel(&buffer, &tensor);
}
};
class BHWCBufferToTensorConverter : public OpenClConverterImpl {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsBHWCOpenCLBuffer(input) && IsOpenCLTensor(output);
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
TensorStorageType dst_tensor_type = ToTensorStorageType(
output_def.object_def.object_type, output_def.object_def.data_layout);
tensor_descriptor_ = TensorDescriptor(output_def.object_def.data_type,
dst_tensor_type, Layout::BHWC);
BufferDescriptor buffer_desc;
buffer_desc.element_type = input_def.object_def.data_type;
buffer_desc.element_size = 1;
buffer_desc.memory_type = MemoryType::GLOBAL;
GPUOperation gpu_op =
CreateBhwcBufferToTensorOp(environment->GetDevicePtr()->GetInfo(),
buffer_desc, tensor_descriptor_);
gpu_op.code_ =
"#define MAIN_FUNCTION __kernel void bhwc_to_tensor\n" + gpu_op.code_;
if (output_def.object_def.data_type == DataType::BOOL ||
input_def.object_def.data_type == DataType::BOOL) {
gpu_op.code_ =
"#define convert_bool4(value) (convert_uchar4((value) != 0) & "
"(uchar4) 1)\n#define bool4 uchar4\n" +
gpu_op.code_;
}
const bool need_fp16_support =
input_def.object_def.data_type == DataType::FLOAT16 ||
output_def.object_def.data_type == DataType::FLOAT16;
if (need_fp16_support) {
gpu_op.code_ =
"#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n" + gpu_op.code_;
}
queue_ = environment->queue();
context_ = &environment->context();
shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h,
output_def.dimensions.w, output_def.dimensions.c);
RETURN_IF_ERROR(gpu_op.AssembleCode(environment->device().GetInfo()));
RETURN_IF_ERROR(cl_args_.Init(environment->device().GetInfo(), nullptr,
&gpu_op.args_, &gpu_op.code_));
return environment->program_cache()->GetOrCreateCLKernel(
gpu_op.code_, "bhwc_to_tensor", environment->context(),
environment->device(), &kernel_);
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto input = std::get_if<OpenClBuffer>(&input_obj);
if (!input || !input->memobj) {
return absl::InvalidArgumentError(
"Missing input in bhwc_to_tensor converter");
}
cl_mem out_memory;
RETURN_IF_ERROR(GetOpenCLMemory(output_obj, &out_memory));
Tensor tensor;
TensorDescriptor descriptor_with_shape = tensor_descriptor_;
descriptor_with_shape.SetBHWCShape(shape_);
RETURN_IF_ERROR(CreateTensorShared(*context_, out_memory,
descriptor_with_shape, &tensor));
Buffer buffer = CreateBufferShared(input->memobj);
return DispatchKernel(&buffer, &tensor);
}
};
std::array<size_t, 3> CalculateTextureRegion(const TensorObjectDef& def) {
const auto& dims = def.dimensions;
std::array<size_t, 3> region = {0, 0, 1};
switch (ToTensorStorageType(def.object_def.object_type,
def.object_def.data_layout)) {
case TensorStorageType::SINGLE_TEXTURE_2D:
region[0] = static_cast<size_t>(dims.w * dims.b);
region[1] = static_cast<size_t>(dims.h);
break;
case TensorStorageType::TEXTURE_2D:
region[0] = static_cast<size_t>(dims.w * dims.b);
region[1] = static_cast<size_t>(dims.h * dims.d());
break;
case TensorStorageType::TEXTURE_ARRAY:
region[0] = static_cast<size_t>(dims.w * dims.b);
region[1] = static_cast<size_t>(dims.h);
region[2] = static_cast<size_t>(dims.d());
break;
default:
break;
}
return region;
}
bool IsOpenClTextureOrBuffer(ObjectType type) {
return type == ObjectType::OPENCL_BUFFER ||
type == ObjectType::OPENCL_TEXTURE;
}
class TrivialCopier : public OpenClConverterImpl {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsOpenClTextureOrBuffer(input.object_type) &&
input.data_type == output.data_type &&
input.object_type == output.object_type &&
input.data_layout == output.data_layout;
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
shape_ = BHWC(input_def.dimensions.b, input_def.dimensions.h,
input_def.dimensions.w, input_def.dimensions.c);
data_type_ = input_def.object_def.data_type;
queue_ = environment->queue();
region_ = CalculateTextureRegion(output_def);
return absl::OkStatus();
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto texture_input = std::get_if<OpenClTexture>(&input_obj);
auto texture_output = std::get_if<OpenClTexture>(&output_obj);
if (texture_input && texture_output) {
return Copy(*texture_input, *texture_output);
}
auto buffer_input = std::get_if<OpenClBuffer>(&input_obj);
auto buffer_output = std::get_if<OpenClBuffer>(&output_obj);
if (buffer_input && buffer_output) {
return Copy(*buffer_input, *buffer_output);
}
return absl::InternalError("Unexpected object");
}
absl::Status Copy(const OpenClBuffer& input, const OpenClBuffer& output) {
if (input.memobj == output.memobj) {
return absl::OkStatus();
}
return GetOpenCLError(
clEnqueueCopyBuffer(queue_->queue(), input.memobj, output.memobj, 0, 0,
SizeOf(data_type_) * shape_.w * shape_.h *
AlignByN(shape_.c, 4) * shape_.b,
0, nullptr, nullptr));
}
absl::Status Copy(const OpenClTexture& input, const OpenClTexture& output) {
if (input.memobj == output.memobj) {
return absl::OkStatus();
}
size_t origin[3] = {0, 0, 0};
return GetOpenCLError(
clEnqueueCopyImage(queue_->queue(), input.memobj, output.memobj, origin,
origin, region_.data(), 0, nullptr, nullptr));
}
private:
DataType data_type_ = DataType::UNKNOWN;
std::array<size_t, 3> region_;
};
class CpuCopier : public OpenClConverterImpl {
public:
explicit CpuCopier(bool asynchronous = false) : async_(asynchronous) {}
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return input.data_type == output.data_type &&
input.data_layout == output.data_layout &&
((input.object_type == ObjectType::CPU_MEMORY &&
IsOpenClTextureOrBuffer(output.object_type)) ||
(output.object_type == ObjectType::CPU_MEMORY &&
IsOpenClTextureOrBuffer(input.object_type)));
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
region_ = CalculateTextureRegion(
input_def.object_def.object_type == ObjectType::CPU_MEMORY ? output_def
: input_def);
input_data_type_ = input_def.object_def.data_type;
output_data_type_ = output_def.object_def.data_type;
queue_ = environment->queue();
return absl::OkStatus();
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto cpu_input = std::get_if<CpuMemory>(&input_obj);
auto cpu_output = std::get_if<CpuMemory>(&output_obj);
if (cpu_input) {
if (output_data_type_ == DataType::BOOL) {
return CopyFromBoolCpu(cpu_input, output_obj);
}
auto texture_output = std::get_if<OpenClTexture>(&output_obj);
if (texture_output) {
return queue_->EnqueueWriteImage(
texture_output->memobj, int3(region_[0], region_[1], region_[2]),
cpu_input->data, async_);
}
auto buffer_output = std::get_if<OpenClBuffer>(&output_obj);
if (buffer_output) {
return queue_->EnqueueWriteBuffer(buffer_output->memobj,
cpu_input->size_bytes,
cpu_input->data, async_);
}
} else if (cpu_output) {
if (input_data_type_ == DataType::BOOL) {
return CopyToBoolCpu(input_obj, cpu_output);
}
auto texture_input = std::get_if<OpenClTexture>(&input_obj);
if (texture_input) {
return queue_->EnqueueReadImage(
texture_input->memobj, int3(region_[0], region_[1], region_[2]),
cpu_output->data, async_);
}
auto buffer_input = std::get_if<OpenClBuffer>(&input_obj);
if (buffer_input) {
return queue_->EnqueueReadBuffer(buffer_input->memobj,
cpu_output->size_bytes,
cpu_output->data, async_);
}
}
return absl::InternalError("Unexpected object");
}
private:
absl::Status CopyToBoolCpu(const TensorObject& tensor_obj,
const CpuMemory* cpu_memory) {
const size_t num_elements = cpu_memory->size_bytes;
std::vector<uint8_t> tmp_data(num_elements);
auto texture_input = std::get_if<OpenClTexture>(&tensor_obj);
if (texture_input) {
RETURN_IF_ERROR(queue_->EnqueueReadImage(
texture_input->memobj, int3(region_[0], region_[1], region_[2]),
tmp_data.data(), false));
} else {
auto buffer_input = std::get_if<OpenClBuffer>(&tensor_obj);
if (!buffer_input) {
return absl::InternalError("Unexpected object");
}
RETURN_IF_ERROR(queue_->EnqueueReadBuffer(
buffer_input->memobj, tmp_data.size(), tmp_data.data(), false));
}
bool* output_data = reinterpret_cast<bool*>(cpu_memory->data);
for (int i = 0; i < num_elements; ++i) {
output_data[i] = tmp_data[i];
}
return absl::OkStatus();
}
absl::Status CopyFromBoolCpu(const CpuMemory* cpu_memory,
const TensorObject& tensor_obj) {
const size_t num_elements = cpu_memory->size_bytes;
const bool* bool_data = reinterpret_cast<bool*>(cpu_memory->data);
tmp_bool_data_ = std::make_unique<std::vector<uint8_t>>();
tmp_bool_data_->reserve(num_elements);
for (int i = 0; i < num_elements; ++i) {
tmp_bool_data_->push_back(bool_data[i]);
}
auto texture_output = std::get_if<OpenClTexture>(&tensor_obj);
if (texture_output) {
return queue_->EnqueueWriteImage(texture_output->memobj,
int3(region_[0], region_[1], region_[2]),
tmp_bool_data_->data(), async_);
}
auto buffer_output = std::get_if<OpenClBuffer>(&tensor_obj);
if (buffer_output) {
return queue_->EnqueueWriteBuffer(buffer_output->memobj,
tmp_bool_data_->size(),
tmp_bool_data_->data(), async_);
}
return absl::InternalError("Unexpected object");
}
std::array<size_t, 3> region_;
bool async_;
DataType input_data_type_;
DataType output_data_type_;
std::unique_ptr<std::vector<uint8_t>> tmp_bool_data_;
};
class OpenClTensorConverterBuilder : public TensorObjectConverterBuilder {
public:
explicit OpenClTensorConverterBuilder(Environment* environment)
: environment_(environment) {}
bool IsSupported(const TensorObjectDef& input,
const TensorObjectDef& output) const final {
const auto& input_def = input.object_def;
const auto& output_def = output.object_def;
return input.dimensions == output.dimensions &&
(TrivialCopier::IsSupported(input_def, output_def) ||
TensorToTensorConverter::IsSupported(input_def, output_def) ||
CpuCopier::IsSupported(input_def, output_def) ||
TensorToBHWCBufferConverter::IsSupported(input_def, output_def) ||
BHWCBufferToTensorConverter::IsSupported(input_def, output_def));
}
absl::Status MakeConverter(
const TensorObjectDef& input, const TensorObjectDef& output,
std::unique_ptr<TensorObjectConverter>* converter) final {
std::unique_ptr<OpenClConverterImpl> impl;
const auto& input_def = input.object_def;
const auto& output_def = output.object_def;
if (TrivialCopier::IsSupported(input_def, output_def)) {
impl = std::make_unique<TrivialCopier>();
} else if (TensorToTensorConverter::IsSupported(input_def, output_def)) {
impl = std::make_unique<TensorToTensorConverter>();
} else if (CpuCopier::IsSupported(input_def, output_def)) {
impl = std::make_unique<CpuCopier>( true);
} else if (TensorToBHWCBufferConverter::IsSupported(input_def,
output_def)) {
impl = std::make_unique<TensorToBHWCBufferConverter>();
} else if (BHWCBufferToTensorConverter::IsSupported(input_def,
output_def)) {
impl = std::make_unique<BHWCBufferToTensorConverter>();
} else {
return absl::UnimplementedError("Unsupported conversion");
}
RETURN_IF_ERROR(impl->Init(input, output, environment_));
impl->SetGpuInfo(environment_->GetDevicePtr()->GetInfo());
*converter = std::move(impl);
return absl::OkStatus();
}
Environment* environment_;
};
}
std::unique_ptr<TensorObjectConverterBuilder> NewConverterBuilder(
Environment* environment) {
return std::make_unique<OpenClTensorConverterBuilder>(environment);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/converter.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
inline std::vector<float> GenerateFloats(float multiplier, int size) {
std::vector<float> v(size);
for (int i = 0; i < size; ++i) {
v[i] = multiplier * i * (i % 2 == 0 ? -1 : 1);
}
return v;
}
Dimensions ToDimensions(const BHWC& shape) {
return Dimensions(shape.b, shape.h, shape.w, shape.c);
}
absl::Status RunFromTensorTest(const BHWC& shape) {
std::vector<float> input =
GenerateFloats(0.01, GetElementsSizeForPHWC4(shape));
std::vector<float> output(shape.DimensionsProduct(), 0);
RETURN_IF_ERROR(
ConvertFromPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
shape.DimensionsProduct(), &output_buffer));
auto builder = NewConverterBuilder(nullptr);
TensorObjectDef input_def;
input_def.object_def.data_type = DataType::FLOAT32;
input_def.object_def.data_layout = DataLayout::DHWC4;
input_def.object_def.object_type = ObjectType::OPENGL_SSBO;
input_def.dimensions = ToDimensions(shape);
TensorObjectDef output_def = input_def;
output_def.object_def.data_layout = DataLayout::BHWC;
std::unique_ptr<TensorObjectConverter> converter;
RETURN_IF_ERROR(builder->MakeConverter(input_def, output_def, &converter));
RETURN_IF_ERROR(converter->Convert(OpenGlBuffer{input_buffer.id()},
OpenGlBuffer{output_buffer.id()}));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(FromTensor, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
auto status = RunFromTensorTest(shape);
EXPECT_TRUE(status.ok()) << status << ", shape = " << shape.h << " "
<< shape.w << " " << shape.c;
}
}
}
}
absl::Status RunToTensorTest(const BHWC& shape) {
std::vector<float> input = GenerateFloats(0.01, shape.DimensionsProduct());
std::vector<float> output(GetElementsSizeForPHWC4(shape), 0);
RETURN_IF_ERROR(
ConvertToPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
GetElementsSizeForPHWC4(shape), &output_buffer));
auto builder = NewConverterBuilder(nullptr);
TensorObjectDef input_def;
input_def.object_def.data_type = DataType::FLOAT32;
input_def.object_def.data_layout = DataLayout::BHWC;
input_def.object_def.object_type = ObjectType::OPENGL_SSBO;
input_def.dimensions = ToDimensions(shape);
TensorObjectDef output_def = input_def;
output_def.object_def.data_layout = DataLayout::DHWC4;
std::unique_ptr<TensorObjectConverter> converter;
RETURN_IF_ERROR(builder->MakeConverter(input_def, output_def, &converter));
RETURN_IF_ERROR(converter->Convert(OpenGlBuffer{input_buffer.id()},
OpenGlBuffer{output_buffer.id()}));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(ToTensor, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
auto status = RunToTensorTest(shape);
EXPECT_TRUE(status.ok()) << status << ", shape = " << shape.h << " "
<< shape.w << " " << shape.c;
}
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
599d7b4e-302c-4d83-ba96-bccd81216d8d | cpp | tensorflow/tensorflow | concat | tensorflow/lite/delegates/gpu/gl/kernels/concat.cc | tensorflow/lite/delegates/hexagon/builders/tests/concat_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/concat.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class AlignedConcatByChannels : public NodeShader {
public:
static bool IsSupported(const GenerationContext& ctx) {
const auto& attr = std::any_cast<const ConcatAttributes&>(ctx.op_attr);
if (attr.axis != Axis::CHANNELS) return false;
if (ctx.input_shapes.size() != 2) return false;
for (int i = 1; i < ctx.input_shapes.size(); i++) {
if (ctx.input_shapes[0][1] != ctx.input_shapes[i][1] ||
ctx.input_shapes[0][2] != ctx.input_shapes[i][2]) {
return false;
}
}
for (const auto& shape : ctx.input_shapes) {
if (shape[3] % 4 != 0) return false;
}
return true;
}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (!IsSupported(ctx)) {
return absl::InvalidArgumentError(
"This case is not supported by aligned concat");
}
std::string source = R"(
if (gid.z < $border$) {
value_0 = $input_data_0[gid.x, gid.y, gid.z]$;
} else {
int z = gid.z - $border$;
value_0 = $input_data_1[gid.x, gid.y, z]$;
}
)";
*generated_code = {
{
{"border", static_cast<int>(ctx.input_shapes[0][3]) / 4}},
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class ConcatByAnyChannel : public NodeShader {
public:
static bool IsSupported(const GenerationContext& ctx) {
const auto& attr = std::any_cast<const ConcatAttributes&>(ctx.op_attr);
if (attr.axis != Axis::CHANNELS) return false;
if (ctx.input_shapes.size() <= 1) return false;
for (int i = 1; i < ctx.input_shapes.size(); i++) {
if (ctx.input_shapes[0][1] != ctx.input_shapes[i][1] ||
ctx.input_shapes[0][2] != ctx.input_shapes[i][2]) {
return false;
}
}
return true;
}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (!IsSupported(ctx)) {
return absl::UnimplementedError("This case is not supported by concat");
}
std::string code = DeclareVariables();
int already_written = 0;
int t = 0;
for (int current_input_id = 0; current_input_id < ctx.input_shapes.size();
current_input_id++) {
int in_ch = ctx.input_shapes[current_input_id][3];
code += PrintStartMessage(current_input_id, in_ch, already_written);
std::string input = "input_data_" + std::to_string(current_input_id);
int reminder = already_written % 4;
if (reminder == 0) {
code += AlignedCase(in_ch, input);
} else {
code += UnalignedCase(reminder, in_ch, input, &t);
}
already_written += in_ch;
}
*generated_code = {
{},
{},
{},
uint3(static_cast<int>(ctx.output_shapes[0][2]),
static_cast<int>(ctx.output_shapes[0][1]), 1),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::ONLY_DEFINITIONS,
};
return absl::OkStatus();
}
private:
std::string temp(int t) const { return "temp" + std::to_string(t); }
std::string DeclareVariables() const {
return R"(
int z = gid.z;
vec4 val = vec4(0.0f);
)";
}
std::string PrintStartMessage(int current_input_id, int in_ch,
int already_written) const {
return "
" tensor with " + std::to_string(in_ch) +
" channels\n
std::to_string(already_written) + " elements\n\n";
}
std::string AlignedCase(int in_ch, const std::string& input) const {
std::string code;
int blocks_amount = DivideRoundUp<int>(in_ch, 4);
code += "
code += "
" write(s)\n\n";
for (int block = 0; block < blocks_amount; block++) {
code += "val = $" + input + "[gid.x, gid.y, " + std::to_string(block) +
"]$;\n" +
"$output_data_0[gid.x, gid.y, z] = val$;\n"
+ "z++; \n\n";
}
return code;
}
std::string UnalignedCase(int reminder, int in_ch, const std::string& input,
int* t) const {
std::string code = "
int shift = 4 - reminder;
if (shift > in_ch) {
shift = in_ch;
}
code += "\n
code += "vec4 " + temp(*t) + " = $" + input + "[gid.x, gid.y, 0]$;\n";
for (int i = 0; i < shift; i++) {
code += "val[" + std::to_string(reminder + i) + "] = " + temp(*t) + "[" +
std::to_string(i) + "];\n";
}
code += "$output_data_0[gid.x, gid.y, z - 1] = val$;\n";
(*t)++;
int left_blocks = (in_ch - shift) / 4;
if ((in_ch - shift) % 4 != 0) {
left_blocks++;
}
if (left_blocks) {
code += "\n
for (int block = 0; block < left_blocks; block++) {
for (int elem = 0; elem < 4; elem++) {
if (shift % 4 == 0) {
code += "vec4 " + temp(*t) + " = $" + input + "[gid.x, gid.y, " +
std::to_string(block + 1) + "]$;\n";
(*t)++;
}
code += "val[" + std::to_string(elem) + "] = " + temp(*t - 1) + "[" +
std::to_string(shift % 4) + "];\n";
if (shift == in_ch) {
break;
}
shift++;
}
code += "$output_data_0[gid.x, gid.y, z] = val$;\n";
code += "z++;\n";
}
} else {
code += "
}
return code;
}
};
class FlatConcatByHeight : public NodeShader {
public:
static bool IsSupported(const GenerationContext& ctx) {
const auto& attr = std::any_cast<const ConcatAttributes&>(ctx.op_attr);
if (attr.axis != Axis::HEIGHT) return false;
if (ctx.input_shapes.size() <= 1) return false;
for (int i = 1; i < ctx.input_shapes.size(); i++) {
if (ctx.input_shapes[0][3] != ctx.input_shapes[i][3] ||
ctx.input_shapes[0][2] != ctx.input_shapes[i][2]) {
return false;
}
}
return true;
}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string code;
std::vector<Variable> params;
for (int i = 0, shift = 0; i < ctx.input_shapes.size();
shift += ctx.input_shapes[i][1], i++) {
code += "if (";
if (i != 0) {
code += "$input_data_" + std::to_string(i - 1) + "_h$ <= gid.y && ";
}
code +=
"gid.y < " + std::to_string(shift + ctx.input_shapes[i][1]) + ") {\n";
code += "if (gid.y - " + std::to_string(shift) + " >= $input_data_" +
std::to_string(i) + "_h$) return;\n";
code += "value_0 = $input_data_" + std::to_string(i) +
"[gid.x, gid.y - " + std::to_string(shift) + ", gid.z]$;\n}\n";
if (i != ctx.input_shapes.size() - 1) {
code += " else ";
}
params.push_back({"input_data_" + std::to_string(i) + "_h",
static_cast<int>(ctx.input_shapes[i][1])});
}
*generated_code = {
std::move(params),
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class FlatConcatByWidth : public NodeShader {
public:
static bool IsSupported(const GenerationContext& ctx) {
const auto& attr = std::any_cast<const ConcatAttributes&>(ctx.op_attr);
if (attr.axis != Axis::WIDTH) return false;
if (ctx.input_shapes.size() <= 1) return false;
for (int i = 1; i < ctx.input_shapes.size(); i++) {
if (ctx.input_shapes[0][3] != ctx.input_shapes[i][3] ||
ctx.input_shapes[0][1] != ctx.input_shapes[i][1]) {
return false;
}
}
return true;
}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string code;
std::vector<Variable> params;
for (int i = 0, shift = 0; i < ctx.input_shapes.size();
shift += ctx.input_shapes[i][2], i++) {
code += "if (";
if (i != 0) {
code += "$input_data_" + std::to_string(i - 1) + "_w$ <= gid.x && ";
}
code +=
"gid.x < " + std::to_string(shift + ctx.input_shapes[i][2]) + ") {\n";
code += "if (gid.x - " + std::to_string(shift) + " >= $input_data_" +
std::to_string(i) + "_w$) return;\n";
code += "value_0 = $input_data_" + std::to_string(i) + "[gid.x - " +
std::to_string(shift) + ", gid.y, gid.z]$;\n}\n";
if (i != ctx.input_shapes.size() - 1) {
code += " else ";
}
params.push_back({"input_data_" + std::to_string(i) + "_w",
static_cast<int>(ctx.input_shapes[i][2])});
}
*generated_code = {
std::move(params),
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class FlatConcat : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (FlatConcatByHeight::IsSupported(ctx)) {
return flat_concat_by_height_.GenerateCode(ctx, generated_code);
}
if (FlatConcatByWidth::IsSupported(ctx)) {
return flat_concat_by_width_.GenerateCode(ctx, generated_code);
}
return absl::InvalidArgumentError(
"This case is not supported by flat concat");
}
private:
FlatConcatByHeight flat_concat_by_height_;
FlatConcatByWidth flat_concat_by_width_;
};
}
std::unique_ptr<NodeShader> NewAlignedConcatNodeShader() {
return std::make_unique<AlignedConcatByChannels>();
}
std::unique_ptr<NodeShader> NewConcatNodeShader() {
return std::make_unique<ConcatByAnyChannel>();
}
std::unique_ptr<NodeShader> NewFlatConcatNodeShader() {
return std::make_unique<FlatConcat>();
}
}
}
} | #include <random>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
namespace {
void GenerateUniformRandomVector(int size, float min, float max,
std::minstd_rand* random_engine,
std::vector<float>* result) {
result->resize(size);
for (int i = 0; i < size; i++) {
float random_value_scaled_0_1 =
(*random_engine)() *
(1.0f / static_cast<float>(std::minstd_rand::modulus));
(*result)[i] = min + (max - min) * random_value_scaled_0_1;
}
}
}
class QuantizedConcatenationOpModel : public SingleOpModelWithHexagon {
public:
QuantizedConcatenationOpModel(const std::vector<TensorData>& input_template,
int axis, const TensorData& output_template) {
std::vector<std::vector<int>> all_input_shapes;
for (int i = 0; i < input_template.size(); ++i) {
all_input_shapes.push_back(input_template[i].shape);
AddInput(input_template[i]);
}
output_ = AddOutput({output_template.type, {},
output_template.min, output_template.max});
SetBuiltinOp(
BuiltinOperator_CONCATENATION, BuiltinOptions_ConcatenationOptions,
CreateConcatenationOptions(builder_, axis, ActivationFunctionType_NONE)
.Union());
BuildInterpreter(all_input_shapes);
}
template <typename T>
void SetInput(int index, std::vector<float> data) {
QuantizeAndPopulate<T>(index, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
private:
int output_;
};
template <typename integer_type, TensorType tensor_dtype>
void FourInputsQuantizedSameRangeImpl() {
QuantizedConcatenationOpModel m0({{tensor_dtype, {2, 1, 1, 2}, -12.7, 12.8},
{tensor_dtype, {2, 1, 1, 2}, -12.7, 12.8},
{tensor_dtype, {2, 1, 1, 2}, -12.7, 12.8},
{tensor_dtype, {2, 1, 1, 2}, -12.7, 12.8}},
3, {tensor_dtype, {}, -12.7, 12.8});
m0.SetInput<integer_type>(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput<integer_type>(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput<integer_type>(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput<integer_type>(3, {1.3f, 3.3f, 4.3f, 7.3f});
m0.ApplyDelegateAndInvoke();
EXPECT_THAT(m0.GetDequantizedOutput<integer_type>(),
ElementsAreArray(ArrayFloatNear(
{
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f,
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f,
},
0.2)));
}
TEST(QuantizedConcatenationOpModel, FourInputsQuantizedSameRange_UInt8) {
FourInputsQuantizedSameRangeImpl<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedConcatenationOpModel, FourInputsQuantizedSameRange_Int8) {
FourInputsQuantizedSameRangeImpl<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void TwoInputsNegativeAxisImpl() {
auto tensor0 = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
auto tensor1 = {7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f};
QuantizedConcatenationOpModel m0({{tensor_dtype,
{2, 3},
std::numeric_limits<integer_type>::min(),
std::numeric_limits<integer_type>::max()},
{tensor_dtype,
{2, 3},
std::numeric_limits<integer_type>::min(),
std::numeric_limits<integer_type>::max()}},
-2,
{tensor_dtype,
{},
std::numeric_limits<integer_type>::min(),
std::numeric_limits<integer_type>::max()});
m0.SetInput<integer_type>(0, tensor0);
m0.SetInput<integer_type>(1, tensor1);
m0.ApplyDelegateAndInvoke();
EXPECT_THAT(m0.GetOutput<integer_type>(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}));
}
TEST(QuantizedConcatenationOpModel, TwoInputsNegativeAxis_UInt8) {
TwoInputsNegativeAxisImpl<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedConcatenationOpModel, TwoInputsNegativeAxis_Int8) {
TwoInputsNegativeAxisImpl<int8_t, TensorType_INT8>();
}
TEST(QuantizedConcatenationOpModel, FourInputsQuantizedMixedRange) {
QuantizedConcatenationOpModel m0(
{{TensorType_UINT8, {2, 1, 1, 2}, -10.7, 10.8},
{TensorType_UINT8, {2, 1, 1, 2}, 0, 12.8},
{TensorType_UINT8, {2, 1, 1, 2}, -11, 11.8},
{TensorType_UINT8, {2, 1, 1, 2}, 0, 7.4}},
3, {TensorType_UINT8, {}, -12.7, 12.8});
m0.SetInput<uint8_t>(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput<uint8_t>(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput<uint8_t>(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput<uint8_t>(3, {1.3f, 3.3f, 4.3f, 7.3f});
m0.ApplyDelegateAndInvoke();
EXPECT_THAT(m0.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f,
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f,
},
0.2)));
}
TEST(QuantizedConcatenationOpModel, FourInputsAxis2_UInt8) {
QuantizedConcatenationOpModel m0({{TensorType_UINT8, {2, 1, 2}, -10.7, 10.8},
{TensorType_UINT8, {2, 1, 2}, 0, 12.8},
{TensorType_UINT8, {2, 1, 2}, -11, 11.8},
{TensorType_UINT8, {2, 1, 2}, 0, 7.4}},
2,
{TensorType_UINT8, {2, 1, 2}, -1., 1.});
m0.SetInput<uint8_t>(0, {1.0f, -3.0f, -4.0f, -7.0f});
m0.SetInput<uint8_t>(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput<uint8_t>(2, {1.2f, -3.2f, -4.2f, 7.2f});
m0.SetInput<uint8_t>(3, {1.3f, 3.3f, 4.3f, 7.3f});
m0.ApplyDelegateAndInvoke();
EXPECT_THAT(m0.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
1.0f, -1.0f, 1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f,
-1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f,
},
0.2)));
}
TEST(QuantizedConcatenationOpModel, FourInputsQuantizedMixedRange_LargeData) {
std::vector<float> params1 = {0, 11.30514f};
std::vector<float> params2 = {0, 10.38416f};
std::vector<float> params3 = {0, 13.52495f};
std::vector<float> params4 = {0, 5.883808f};
std::vector<float> params_output = {0, 13.52495f};
QuantizedConcatenationOpModel m0(
{{TensorType_UINT8, {1, 35, 35, 64}, params1[0], params1[1]},
{TensorType_UINT8, {1, 35, 35, 64}, params2[0], params2[1]},
{TensorType_UINT8, {1, 35, 35, 96}, params3[0], params3[1]},
{TensorType_UINT8, {1, 35, 35, 32}, params4[0], params4[1]}},
3, {TensorType_UINT8, {}, params_output[0], params_output[1]});
std::minstd_rand random_engine;
std::vector<float> data1, data2, data3, data4;
int num_elements_multiplier = 1 * 35 * 35;
GenerateUniformRandomVector(num_elements_multiplier * 64, params1[0],
params1[1], &random_engine, &data1);
GenerateUniformRandomVector(num_elements_multiplier * 64, params2[0],
params2[1], &random_engine, &data2);
GenerateUniformRandomVector(num_elements_multiplier * 96, params3[0],
params3[1], &random_engine, &data3);
GenerateUniformRandomVector(num_elements_multiplier * 32, params4[0],
params4[1], &random_engine, &data4);
m0.SetInput<uint8_t>(0, data1);
m0.SetInput<uint8_t>(1, data2);
m0.SetInput<uint8_t>(2, data3);
m0.SetInput<uint8_t>(3, data4);
ASSERT_EQ(m0.Invoke(), kTfLiteOk);
std::vector<float> reference_output = m0.GetDequantizedOutput<uint8_t>();
m0.ApplyDelegateAndInvoke();
EXPECT_THAT(m0.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output,
0.1)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/concat.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/concat_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d9deaec4-1a4f-4ebb-9b2a-34857a5e3323 | cpp | tensorflow/tensorflow | phwc4_to_bhwc | tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.cc | tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc_test.cc | #include "tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/converters/util.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_program.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
absl::Status ConverterPhwc4ToBhwc::Create(ConverterPhwc4ToBhwc* converter) {
uint3 workgroup_size = uint3(4, 4, 4);
std::string shader_source = GetShaderHeader(workgroup_size) + R"(
layout(std430) buffer;
precision highp float;
layout(binding = 0) readonly buffer B0 {
vec4 elements[];
} input_data;
layout(binding = 1) writeonly buffer B1 {
float elements[];
} output_data;
uniform ivec4 sizes_;
void main() {
ivec3 gid = ivec3(gl_GlobalInvocationID.xyz);
if (gid.x >= sizes_.x || gid.y >= sizes_.y || gid.z >= sizes_.z) {
return;
}
output_data.elements[(gid.y * sizes_.x + gid.x) * sizes_.z + gid.z] = input_data.elements[(gid.z / 4 * sizes_.y + gid.y) * sizes_.x + gid.x][gid.z % 4];
})";
GlShader shader;
RETURN_IF_ERROR(
GlShader::CompileShader(GL_COMPUTE_SHADER, shader_source, &shader));
GlProgram program;
RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program));
*converter = ConverterPhwc4ToBhwc(std::move(program), workgroup_size);
return absl::OkStatus();
}
absl::Status ConverterPhwc4ToBhwc::Convert(const BHWC& shape,
const GlBuffer& source,
CommandQueue* command_queue,
GlBuffer* destination) {
if (source.bytes_size() < BytesForPHWC4(shape)) {
return absl::InvalidArgumentError(
"Phwc4ToBhwc: Input data size does not match expected size.");
}
if (destination->bytes_size() < BytesForBHWC(shape)) {
return absl::InvalidArgumentError(
"Phwc4ToBhwc: output data size does not match expected size.");
}
if (shape.b != 1) {
return absl::UnimplementedError(
"Phwc4ToBhwc: Batch size is not equal to 1.");
}
uint3 workload = uint3(shape.w, shape.h, shape.c);
uint3 num_workgroups = DivideRoundUp(workload, workgroup_size_);
RETURN_IF_ERROR(program_.SetParameter(
{"sizes_",
int4(static_cast<int32_t>(workload.x), static_cast<int32_t>(workload.y),
static_cast<int32_t>(workload.z), 0)}));
RETURN_IF_ERROR(source.BindToIndex(0));
RETURN_IF_ERROR(destination->BindToIndex(1));
if (command_queue) {
return command_queue->Dispatch(program_, num_workgroups);
}
return program_.Dispatch(num_workgroups);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
inline std::vector<float> GenerateFloats(float multiplier, int size) {
std::vector<float> v(size);
for (int i = 0; i < size; ++i) {
v[i] = multiplier * i * (i % 2 == 0 ? -1 : 1);
}
return v;
}
absl::Status RunTest(const BHWC& shape) {
std::vector<float> input =
GenerateFloats(0.01, GetElementsSizeForPHWC4(shape));
std::vector<float> output(shape.DimensionsProduct(), 0);
RETURN_IF_ERROR(
ConvertFromPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
shape.DimensionsProduct(), &output_buffer));
ConverterPhwc4ToBhwc converter;
RETURN_IF_ERROR(ConverterPhwc4ToBhwc::Create(&converter));
RETURN_IF_ERROR(
converter.Convert(shape, input_buffer, nullptr, &output_buffer));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(Phwc4ToHwc, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
EXPECT_TRUE(RunTest(shape).ok())
<< shape.h << " " << shape.w << " " << shape.c;
}
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
99489848-76b4-4607-b069-058084f822dd | cpp | tensorflow/tensorflow | bhwc_to_phwc4 | tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.cc | tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4_test.cc | #include "tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/converters/util.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_program.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
absl::Status ConverterBhwcToPhwc4::Create(ConverterBhwcToPhwc4* converter) {
uint3 workgroup_size = uint3(4, 4, 4);
std::string shader_source = GetShaderHeader(workgroup_size) + R"(
layout(std430) buffer;
precision highp float;
layout(binding = 0) readonly buffer B0 {
float elements[];
} input_data;
layout(binding = 1) writeonly buffer B1 {
vec4 elements[];
} output_data;
uniform ivec4 sizes_;
void main() {
ivec3 gid = ivec3(gl_GlobalInvocationID.xyz);
if (gid.x >= sizes_.x || gid.y >= sizes_.y || gid.z >= sizes_.z) {
return;
}
vec4 v = vec4(0);
int dst_channel = gid.z * 4;
int index = (gid.y * sizes_.x + gid.x) * sizes_.w + dst_channel;
for (int i = 0; i < 4; ++i, ++index, ++dst_channel) {
if (dst_channel >= sizes_.w) break;
v[i] = input_data.elements[index];
}
output_data.elements[(gid.z * sizes_.y + gid.y) * sizes_.x + gid.x] = v;
})";
GlShader shader;
RETURN_IF_ERROR(
GlShader::CompileShader(GL_COMPUTE_SHADER, shader_source, &shader));
GlProgram program;
RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program));
*converter = ConverterBhwcToPhwc4(std::move(program), workgroup_size);
return absl::OkStatus();
}
absl::Status ConverterBhwcToPhwc4::Convert(const BHWC& shape,
const GlBuffer& source,
CommandQueue* command_queue,
GlBuffer* destination) {
if (source.bytes_size() < BytesForBHWC(shape)) {
return absl::InvalidArgumentError(
"BhwcToPhwc4: Input data size does not match expected size.");
}
if (destination->bytes_size() < BytesForPHWC4(shape)) {
return absl::InvalidArgumentError(
"BhwcToPhwc4: output data size does not match expected size.");
}
if (shape.b != 1) {
return absl::UnimplementedError(
"BhwcToPhwc4: Batch size is not equal to 1.");
}
uint3 workload = uint3(shape.w, shape.h, DivideRoundUp(shape.c, 4));
uint3 num_workgroups = DivideRoundUp(workload, workgroup_size_);
RETURN_IF_ERROR(program_.SetParameter(
{"sizes_",
int4(static_cast<int32_t>(workload.x), static_cast<int32_t>(workload.y),
static_cast<int32_t>(workload.z), static_cast<int32_t>(shape.c))}));
RETURN_IF_ERROR(source.BindToIndex(0));
RETURN_IF_ERROR(destination->BindToIndex(1));
if (command_queue) {
return command_queue->Dispatch(program_, num_workgroups);
}
return program_.Dispatch(num_workgroups);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
inline std::vector<float> GenerateFloats(float multiplier, int size) {
std::vector<float> v(size);
for (int i = 0; i < size; ++i) {
v[i] = multiplier * i * (i % 2 == 0 ? -1 : 1);
}
return v;
}
absl::Status RunTest(const BHWC& shape) {
std::vector<float> input = GenerateFloats(0.01, shape.DimensionsProduct());
std::vector<float> output(GetElementsSizeForPHWC4(shape), 0);
RETURN_IF_ERROR(
ConvertToPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
GetElementsSizeForPHWC4(shape), &output_buffer));
ConverterBhwcToPhwc4 converter;
RETURN_IF_ERROR(ConverterBhwcToPhwc4::Create(&converter));
RETURN_IF_ERROR(
converter.Convert(shape, input_buffer, nullptr, &output_buffer));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(HwcToPhwc4, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
EXPECT_TRUE(RunTest(shape).ok())
<< shape.h << " " << shape.w << " " << shape.c;
}
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
00ced7c9-ea4d-4995-adb2-b9b085a86dfe | cpp | tensorflow/tensorflow | compiled_node | tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.cc | tensorflow/lite/delegates/gpu/gl/compiler/compiled_node_test.cc | #include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
#include <algorithm>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/rename.h"
namespace tflite {
namespace gpu {
namespace gl {
absl::Status MergeCode(CompiledNodeAttributes* attr,
CompiledNodeAttributes* merged_attr) {
absl::flat_hash_set<std::string> known_names;
for (const auto& parameter : merged_attr->code.parameters) {
known_names.insert(parameter.name);
}
for (const auto& object : merged_attr->code.objects) {
known_names.insert(object.first);
}
int index =
merged_attr->code.parameters.size() + merged_attr->code.objects.size();
RETURN_IF_ERROR(Rename(
[&](absl::string_view name) -> std::string {
std::string n(name.begin(), name.end());
std::string ret = n;
while (known_names.find(ret) != known_names.end()) {
ret = absl::StrCat(n, index++);
}
known_names.insert(ret);
return ret;
},
&attr->code));
std::move(attr->code.objects.begin(), attr->code.objects.end(),
std::back_inserter(merged_attr->code.objects));
std::move(attr->code.parameters.begin(), attr->code.parameters.end(),
std::back_inserter(merged_attr->code.parameters));
std::move(attr->node_indices.begin(), attr->node_indices.end(),
std::back_inserter(merged_attr->node_indices));
return absl::OkStatus();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
bool VariableDuplicates(std::vector<Variable> variables) {
std::sort(
std::begin(variables), std::end(variables),
[](const auto& lhs, const auto& rhs) { return lhs.name < rhs.name; });
for (int i = 0; i < variables.size() - 1; ++i) {
if (variables[i].name == variables[i + 1].name) return true;
}
return false;
}
TEST(CompiledNodeTest, NoDuplicates) {
Variable scalar;
scalar.name = "scalar";
Variable scalar1;
scalar1.name = "scalar1";
CompiledNodeAttributes attr;
CompiledNodeAttributes merged_attr;
attr.code.parameters = {scalar, scalar1};
merged_attr.code.parameters = {scalar};
ASSERT_OK(MergeCode(&attr, &merged_attr));
EXPECT_FALSE(VariableDuplicates(merged_attr.code.parameters));
}
TEST(CompiledNodeTest, NameConvergenceConflict) {
Variable scalar;
scalar.name = "scalar";
Variable scalar1;
scalar1.name = "scalar1";
CompiledNodeAttributes attr;
CompiledNodeAttributes merged_attr;
attr.code.parameters = {scalar1, scalar};
merged_attr.code.parameters = {scalar};
ASSERT_OK(MergeCode(&attr, &merged_attr));
EXPECT_FALSE(VariableDuplicates(merged_attr.code.parameters));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
10b07a94-816f-47d6-ab5a-942410b3ce6e | cpp | tensorflow/tensorflow | fuse_auto_input | tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.cc | tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input_test.cc | #include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h"
#include <any>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
std::pair<std::string, std::string> MakeValueReplacement(int n, int k) {
return {absl::StrCat("value_", n), absl::StrCat("value_", k)};
}
std::pair<std::string, std::string> MakeDataReplacement(int n, int k) {
return {absl::StrCat("input_data_", n), absl::StrCat("input_data_", k)};
}
}
TransformResult FuseAutoInput::ApplyToNode(Node* node, GraphFloat32* graph) {
auto& node_attr =
std::any_cast<CompiledNodeAttributes&>(node->operation.attributes);
auto& node_code = node_attr.code;
if (node_code.input != IOStructure::AUTO) {
return {TransformStatus::SKIPPED, ""};
}
uint3 workgroup = node_code.workgroup;
auto node_outputs = graph->FindOutputs(node->id);
std::vector<std::pair<Node*, int>> nodes_to_fuse;
std::vector<std::pair<ValueId, int>> input_values;
int input_num = -1;
for (auto input_value : graph->FindInputs(node->id)) {
input_num++;
const ValueId input_id = input_value->id;
input_values.push_back({input_id, input_num});
if (graph->FindConsumers(input_id).size() > 1) {
continue;
}
Node* input_producer = graph->FindProducer(input_id);
if (input_producer == nullptr) {
continue;
}
if (graph->FindOutputs(input_producer->id).size() != 1) {
continue;
}
auto& input_producer_attr = std::any_cast<const CompiledNodeAttributes&>(
input_producer->operation.attributes);
if (input_producer_attr.code.output != IOStructure::AUTO) {
continue;
}
if (input_producer_attr.code.workload != node_code.workload &&
uint3() != input_producer_attr.code.workload) {
continue;
}
if (input_producer_attr.code.workgroup != uint3()) {
if (workgroup != uint3()) {
continue;
}
workgroup = input_producer_attr.code.workgroup;
}
nodes_to_fuse.push_back({input_producer, input_num});
input_values.pop_back();
}
if (nodes_to_fuse.empty()) {
return {TransformStatus::SKIPPED, ""};
}
{
absl::flat_hash_set<ValueId> all_inputs;
for (const auto& node_to_fuse : nodes_to_fuse) {
for (const auto& input : graph->FindInputs(node_to_fuse.first->id)) {
if (all_inputs.find(input->id) != all_inputs.end()) {
return {TransformStatus::SKIPPED, ""};
}
all_inputs.insert(input->id);
}
}
for (const auto& input : graph->FindInputs(node->id)) {
if (all_inputs.find(input->id) != all_inputs.end()) {
return {TransformStatus::SKIPPED, ""};
}
all_inputs.insert(input->id);
}
}
for (auto value : graph->FindInputs(node->id)) {
if (!graph->RemoveConsumer(node->id, value->id).ok()) {
return {TransformStatus::INVALID, ""};
}
}
std::string operation_type;
std::string source_code;
std::string values;
std::swap(source_code, node_code.source_code);
int extra_input_num = input_num;
input_num = 0;
for (auto input_and_num : nodes_to_fuse) {
auto& input = input_and_num.first;
auto& attr =
std::any_cast<CompiledNodeAttributes&>(input->operation.attributes);
auto super_inputs = graph->FindInputs(input->id);
std::vector<std::pair<std::string, std::string>> replacements;
for (int i = 0; i < super_inputs.size(); ++i) {
int value_index = i == 0 ? input_and_num.second : ++extra_input_num;
replacements.push_back(MakeValueReplacement(i, value_index));
replacements.push_back(MakeDataReplacement(i, input_num));
if (attr.code.input == IOStructure::AUTO) {
absl::StrAppend(&values, " value_", value_index, " = $input_data_",
input_num, "[gid.x, gid.y, gid.z]$;\n");
}
if (!graph->AddConsumer(node->id, super_inputs[i]->id).ok()) {
return {TransformStatus::INVALID, ""};
}
input_num++;
}
for (auto& param : attr.code.parameters) {
param.name = absl::StrReplaceAll(param.name, replacements);
}
attr.code.source_code =
absl::StrReplaceAll(attr.code.source_code, replacements);
if (!MergeCode(&attr, &node_attr).ok()) {
return {TransformStatus::INVALID, "Unable to merge the code"};
}
absl::StrAppend(&node_attr.code.source_code, "{\n", attr.code.source_code,
"\n}");
if (!operation_type.empty()) {
operation_type += ",";
}
operation_type += input->operation.type;
if (!graph->DeleteNode(input->id).ok()) {
return {TransformStatus::INVALID, ""};
}
}
for (int i = 0; i < input_values.size(); i++) {
if (node_code.input == IOStructure::AUTO) {
absl::StrAppend(&values, " value_", input_values[i].second,
" = $input_data_", input_num,
"[gid.x, gid.y, gid.z]$;\n");
}
if (!graph->AddConsumer(node->id, input_values[i].first).ok()) {
return {TransformStatus::INVALID, ""};
}
input_num++;
}
node_code.input = IOStructure::ONLY_DEFINITIONS;
absl::StrAppend(&node->operation.type, "(", operation_type, ")");
node_code.source_code =
absl::StrCat(values, node_code.source_code, "{
node->operation.type, "\n", source_code, "\n}");
return {TransformStatus::APPLIED, ""};
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h"
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(FuseAutoInputTest, SkipsDiamond) {
GraphFloat32 graph;
auto* v0 = graph.NewValue();
auto* v1 = graph.NewValue();
auto* v2 = graph.NewValue();
auto* v3 = graph.NewValue();
auto* n1 = graph.NewNode();
CompiledNodeAttributes a1;
a1.code.output = IOStructure::AUTO;
n1->operation.attributes = std::move(a1);
ASSERT_OK(graph.AddConsumer(n1->id, v0->id));
ASSERT_OK(graph.SetProducer(n1->id, v1->id));
auto* n2 = graph.NewNode();
CompiledNodeAttributes a2;
a2.code.output = IOStructure::AUTO;
n2->operation.attributes = std::move(a2);
ASSERT_OK(graph.AddConsumer(n2->id, v0->id));
ASSERT_OK(graph.SetProducer(n2->id, v2->id));
auto* n3 = graph.NewNode();
CompiledNodeAttributes a3;
a3.code.input = IOStructure::AUTO;
n3->operation.attributes = std::move(a3);
ASSERT_OK(graph.AddConsumer(n3->id, v1->id));
ASSERT_OK(graph.AddConsumer(n3->id, v2->id));
ASSERT_OK(graph.SetProducer(n3->id, v3->id));
FuseAutoInput fuse_auto_input;
EXPECT_EQ(fuse_auto_input.ApplyToNode(n3, &graph).status,
TransformStatus::SKIPPED);
}
TEST(FuseAutoInputTest, SkipsTriangle) {
GraphFloat32 graph;
auto* v0 = graph.NewValue();
auto* v1 = graph.NewValue();
auto* v2 = graph.NewValue();
auto* n1 = graph.NewNode();
CompiledNodeAttributes a1;
a1.code.output = IOStructure::AUTO;
n1->operation.attributes = std::move(a1);
ASSERT_OK(graph.AddConsumer(n1->id, v0->id));
ASSERT_OK(graph.SetProducer(n1->id, v1->id));
auto* n2 = graph.NewNode();
CompiledNodeAttributes a2;
a2.code.input = IOStructure::AUTO;
n2->operation.attributes = std::move(a2);
ASSERT_OK(graph.AddConsumer(n2->id, v0->id));
ASSERT_OK(graph.AddConsumer(n2->id, v1->id));
ASSERT_OK(graph.SetProducer(n2->id, v2->id));
FuseAutoInput fuse_auto_input;
EXPECT_EQ(fuse_auto_input.ApplyToNode(n2, &graph).status,
TransformStatus::SKIPPED);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
99fe9fde-b924-4870-ba69-05afb3b8d966 | cpp | tensorflow/tensorflow | variable_accessor | tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.cc | tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor_test.cc | #include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h"
#include <string>
#include <utility>
#include <variant>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace variable_accessor_internal {
VariableReference Parse(absl::string_view input) {
VariableReference ref;
auto start_index = input.find('[');
if (start_index != std::string::npos) {
auto end_index = input.rfind(']');
if (end_index == std::string::npos) {
return ref;
}
ref.index = input.substr(start_index + 1, end_index - start_index - 1);
ref.name = input.substr(0, start_index);
ref.field = input.substr(end_index + 1);
} else {
auto dot = input.find('.');
if (dot != std::string::npos) {
ref.name = input.substr(0, dot);
ref.field = input.substr(dot);
} else {
ref.name = input;
}
}
return ref;
}
}
namespace {
struct VariableTypeGetter {
std::string operator()(int) const { return "int"; }
std::string operator()(const int2&) const { return "ivec2"; }
std::string operator()(const std::vector<int2>&) const { return "ivec2"; }
std::string operator()(const int4&) const { return "ivec4"; }
std::string operator()(unsigned int) const { return "uint"; }
std::string operator()(const uint4&) const { return "uvec4"; }
std::string operator()(float) const { return "float"; }
std::string operator()(const float2&) const { return "vec2"; }
std::string operator()(const float4&) const { return "vec4"; }
std::string operator()(const std::vector<float4>&) const { return "vec4"; }
};
std::string GetVariableType(const Variable::ValueType& value) {
return std::visit(VariableTypeGetter(), value);
}
struct LengthGetter {
template <typename T>
int operator()(const T& param) const {
return 1;
}
template <typename T>
int operator()(const std::vector<T>& param) const {
return param.size();
}
};
int GetLength(const Variable::ValueType& value) {
return std::visit(LengthGetter(), value);
}
template <typename T>
void FormatValue(std::string* result, T t) {
absl::StrAppend(result, t);
}
template <>
void FormatValue(std::string* result, float t) {
absl::StrAppend(result, absl::StrFormat("%.9ff", t));
}
template <typename T, int N>
std::vector<std::string> ToString(const std::array<T, N>& data) {
std::vector<std::string> result(N);
for (int i = 0; i < N; ++i) {
FormatValue(&result[i], data[i]);
}
return result;
}
struct ConstGenerator {
template <typename T>
void operator()(T t) const {
FormatValue(result, t);
}
template <typename T>
void operator()(const Vec2<T>& v) const {
absl::StrAppend(result, VariableTypeGetter()(v), "(",
absl::StrJoin(ToString<T, 2>(v.data_), ","), ")");
}
template <typename T>
void operator()(const Vec3<T>& v) const {
absl::StrAppend(result, VariableTypeGetter()(v), "(",
absl::StrJoin(ToString<T, 3>(v.data_), ","), ")");
}
template <typename T>
void operator()(const Vec4<T>& v) const {
absl::StrAppend(result, VariableTypeGetter()(v), "(",
absl::StrJoin(ToString<T, 4>(v.data_), ","), ")");
}
template <typename T>
void operator()(const std::vector<T>& v) const {
std::string type = VariableTypeGetter()(v);
absl::StrAppend(result, type, "[", v.size(), "](");
bool first = true;
for (const auto& i : v) {
if (first) {
first = false;
} else {
absl::StrAppend(result, ",");
}
(*this)(i);
}
absl::StrAppend(result, ")");
}
std::string* result;
};
void GetValue(const Variable::ValueType& value, std::string* result) {
std::visit(ConstGenerator{result}, value);
}
struct SharedVariableDeclarationGenerator {
template <typename T>
void operator()(const T&) const {
absl::StrAppend(result, "shared highp ", GetVariableType(variable.value),
" ", variable.name, ";\n");
}
template <typename T>
void operator()(const std::vector<T>& v) const {
absl::StrAppend(result, "shared highp ", GetVariableType(variable.value),
" ", variable.name);
if (v.empty()) {
absl::StrAppend(
result,
"[gl_WorkGroupSize.z * gl_WorkGroupSize.y * gl_WorkGroupSize.x];\n");
} else {
absl::StrAppend(result, "[", v.size(), "];\n");
}
}
const Variable& variable;
std::string* result;
};
void GenerateSharedVariableDeclaration(const Variable& variable,
std::string* result) {
std::visit(SharedVariableDeclarationGenerator{variable, result},
variable.value);
}
struct UniformParameterDeclarationGenerator {
template <typename T>
void operator()(const T&) const {
absl::StrAppend(result, "uniform ", GetVariableType(variable.value), " ",
variable.name, ";\n");
}
template <typename T>
void operator()(const std::vector<T>& v) const {
absl::StrAppend(result, "uniform ", GetVariableType(variable.value), " ",
variable.name, "[", v.size(), "];\n");
}
const Variable& variable;
std::string* result;
};
void GenerateUniformParameterDeclaration(const Variable& variable,
std::string* result) {
std::visit(UniformParameterDeclarationGenerator{variable, result},
variable.value);
}
struct VulkanPushConstantGenerator {
template <typename T>
void operator()(const T&) const {
absl::StrAppend(result, " ", GetVariableType(variable.value), " ",
variable.name, ";\n");
}
template <typename T>
void operator()(const std::vector<T>& v) const {
absl::StrAppend(result, " ", GetVariableType(variable.value), " ",
variable.name, "[", v.size(), "];\n");
}
const Variable& variable;
std::string* result;
};
void GenerateVulkanPushConstant(const Variable& variable, std::string* result) {
std::visit(VulkanPushConstantGenerator{variable, result}, variable.value);
}
struct VariableLengthGetter {
template <typename T>
bool operator()(const T&) const {
return false;
}
template <typename T>
bool operator()(const std::vector<T>&) const {
return true;
}
};
struct VulkanConstantGenerator {
template <typename T>
void operator()(const T&) const {
const std::string variable_type = GetVariableType(variable.value);
if (variable_type == "int" || variable_type == "uint" ||
variable_type == "float") {
absl::StrAppend(result, "layout(constant_id = ", *constant_id, ") const ",
variable_type, " ", variable.name, " = ");
absl::StrAppend(result, (variable_type == "float" ? "0.0" : "0"), ";\n");
(*constant_id)++;
} else {
non_scalar_variables->push_back(variable);
}
}
template <typename T>
void operator()(const std::vector<T>& v) const {
non_scalar_variables->push_back(variable);
}
const Variable& variable;
int* const constant_id;
std::vector<Variable>* non_scalar_variables;
std::string* result;
};
void GenerateVulkanConstant(const Variable& variable, int* constant_id,
std::vector<Variable>* non_scalar_variables,
std::string* result) {
std::visit(VulkanConstantGenerator{variable, constant_id,
non_scalar_variables, result},
variable.value);
}
class VulkanConstantsProcessor {
public:
void ProcessVulkanConstant(const Variable& variable, std::string* result) {
GenerateVulkanConstant(variable, &constant_id_, &non_scalar_variables_,
result);
}
void GeneratePushConstantsDeclarations(std::string* result) {
if (!non_scalar_variables_.empty()) {
*result += "\nlayout(push_constant) uniform pushConstants {\n";
for (const auto& variable : non_scalar_variables_) {
GenerateVulkanPushConstant(variable, result);
}
*result += "};\n";
}
}
protected:
int constant_id_ = 3;
std::vector<Variable> non_scalar_variables_;
};
bool IsVariableLength(const Variable::ValueType& value) {
return std::visit(VariableLengthGetter(), value);
}
enum Field : uint8_t { UNKNOWN = 4, X = 0, Y = 1, Z = 2, W = 3 };
Field ToField(absl::string_view field_name) {
if (field_name.size() == 2 && field_name[0] == '.') {
switch (field_name[1]) {
case 'x':
return Field::X;
case 'y':
return Field::Y;
case 'z':
return Field::Z;
case 'w':
return Field::W;
}
}
return Field::UNKNOWN;
}
struct FieldAccessor {
template <typename T>
void operator()(const T&) const {}
template <typename T>
void operator()(const Vec2<T>& v) const {
FormatValue(result, v[field]);
}
template <typename T>
void operator()(const Vec3<T>& v) const {
FormatValue(result, v[field]);
}
template <typename T>
void operator()(const Vec4<T>& v) const {
FormatValue(result, v[field]);
}
Field field;
std::string* result;
};
void GetValue(const Variable::ValueType& value, Field field,
std::string* result) {
std::visit(FieldAccessor{field, result}, value);
}
struct FieldChecker {
template <typename T>
bool operator()(const T&) const {
return false;
}
template <typename T>
bool operator()(const Vec2<T>& v) const {
return field < v.size();
}
template <typename T>
bool operator()(const Vec3<T>& v) const {
return field < v.size();
}
template <typename T>
bool operator()(const Vec4<T>& v) const {
return field < v.size();
}
template <typename T>
bool operator()(const std::vector<T>&) const {
T t;
return (*this)(t);
}
Field field;
};
bool HasField(const Variable::ValueType& value, Field field) {
return std::visit(FieldChecker{field}, value);
}
void AssembleAccessor(absl::string_view name, absl::string_view index,
absl::string_view field, std::string* result) {
if (index.empty()) {
absl::StrAppend(result, name, field);
} else {
absl::StrAppend(result, name, "[", index, "]", field);
}
}
}
RewriteStatus VariableAccessor::Rewrite(absl::string_view input,
std::string* output) {
auto ref = variable_accessor_internal::Parse(input);
if (ref.name.empty()) {
absl::StrAppend(output, "INVALID_SYNTAX");
return RewriteStatus::ERROR;
}
auto it =
name_to_variable_.find(std::string(ref.name.data(), ref.name.size()));
if (it == name_to_variable_.end()) {
return RewriteStatus::NOT_RECOGNIZED;
}
const auto& value = it->second.value;
if (!ref.index.empty() && !IsVariableLength(value)) {
absl::StrAppend(output, "INVALID_ACCESS_BY_INDEX");
return RewriteStatus::ERROR;
}
Field f = ToField(ref.field);
if (!ref.field.empty() && !HasField(value, f)) {
absl::StrAppend(output, "INVALID_ACCESS_BY_FIELD");
return RewriteStatus::ERROR;
}
if (!inline_values_ || IsVariableLength(value)) {
AssembleAccessor(it->second.name, ref.index, ref.field, output);
} else {
if (f != Field::UNKNOWN) {
GetValue(value, f, output);
} else {
GetValue(value, output);
}
}
return RewriteStatus::SUCCESS;
}
bool VariableAccessor::AddSharedVariable(Variable&& variable) {
const std::string name = variable.name;
if (!name_to_variable_.insert({name, std::move(variable)}).second) {
return false;
}
shared_variables_.insert(name);
return true;
}
bool VariableAccessor::AddUniformParameter(Variable&& variable) {
const std::string name = variable.name;
if (!name_to_variable_.insert({name, std::move(variable)}).second) {
return false;
}
uniform_parameters_.insert(name);
return true;
}
bool VariableAccessor::IsEmptyVariableLength(const Variable& variable) const {
const auto& value = variable.value;
return IsVariableLength(value) && GetLength(value) == 0;
}
std::string VariableAccessor::GetConstDeclarations() const {
std::string declarations;
for (const auto& variable : name_to_variable_) {
const std::string& variable_name = variable.second.name;
if (shared_variables_.find(variable_name) != shared_variables_.end()) {
continue;
}
const auto& value = variable.second.value;
if (IsVariableLength(value)) {
absl::StrAppend(&declarations, "const ", GetVariableType(value), " ",
variable_name, "[] = ");
GetValue(value, &declarations);
absl::StrAppend(&declarations, ";\n");
}
}
return declarations;
}
std::string VariableAccessor::GetSharedVariableDeclarations() const {
std::string declarations;
for (const auto& name : shared_variables_) {
const auto& variable = name_to_variable_.at(name);
GenerateSharedVariableDeclaration(variable, &declarations);
}
return declarations;
}
std::string VariableAccessor::GetUniformParameterDeclarations() const {
std::string declarations;
if (!inline_values_) {
if (vulkan_support_) {
VulkanConstantsProcessor processor;
for (const auto& name : uniform_parameters_) {
const auto& variable = name_to_variable_.at(name);
processor.ProcessVulkanConstant(variable, &declarations);
}
processor.GeneratePushConstantsDeclarations(&declarations);
} else {
for (const auto& name : uniform_parameters_) {
const auto& variable = name_to_variable_.at(name);
GenerateUniformParameterDeclaration(variable, &declarations);
}
}
}
return declarations;
}
std::vector<Variable> VariableAccessor::GetUniformParameters() const {
std::vector<Variable> variables;
if (!inline_values_) {
variables.reserve(name_to_variable_.size());
for (const auto& name : uniform_parameters_) {
const auto& variable = name_to_variable_.at(name);
variables.push_back(variable);
}
}
return variables;
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(PreprocessorTest, CornerCases) {
VariableAccessor variable_accessor(true);
std::string result;
EXPECT_EQ(variable_accessor.Rewrite("unknown", &result),
RewriteStatus::NOT_RECOGNIZED);
}
TEST(PreprocessorTest, Value) {
VariableAccessor variable_accessor(true);
ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", int32_t(1)}));
std::string result;
ASSERT_EQ(variable_accessor.Rewrite("var", &result), RewriteStatus::SUCCESS);
EXPECT_EQ(result, "1");
}
TEST(PreprocessorTest, ValueVec) {
VariableAccessor variable_accessor(true);
ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", int2(1, 2)}));
std::string result;
ASSERT_EQ(variable_accessor.Rewrite("var", &result), RewriteStatus::SUCCESS);
EXPECT_EQ(result, "ivec2(1,2)");
}
TEST(PreprocessorTest, Field) {
VariableAccessor variable_accessor(true);
ASSERT_TRUE(
variable_accessor.AddUniformParameter({"var", float2(1.0, 2.1234567)}));
std::string result;
ASSERT_EQ(variable_accessor.Rewrite("var.y", &result),
RewriteStatus::SUCCESS);
EXPECT_EQ(result, "2.123456717f");
}
TEST(PreprocessorTest, FieldFail) {
VariableAccessor variable_accessor(true);
ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", 1.0f}));
ASSERT_TRUE(variable_accessor.AddUniformParameter({"vec", float2(1.0, 1.0)}));
std::string result;
ASSERT_EQ(variable_accessor.Rewrite("var.y", &result), RewriteStatus::ERROR);
EXPECT_EQ(result, "INVALID_ACCESS_BY_FIELD");
result.clear();
ASSERT_EQ(variable_accessor.Rewrite("vec.z", &result), RewriteStatus::ERROR);
EXPECT_EQ(result, "INVALID_ACCESS_BY_FIELD");
}
TEST(PreprocessorTest, Variable) {
VariableAccessor variable_accessor(true);
std::vector<int2> v;
v.push_back(int2(1, 2));
ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", v}));
std::string result;
ASSERT_EQ(variable_accessor.Rewrite("var[i].y", &result),
RewriteStatus::SUCCESS);
ASSERT_EQ(result, "var[i].y");
EXPECT_EQ(variable_accessor.GetConstDeclarations(),
"const ivec2 var[] = ivec2[1](ivec2(1,2));\n");
}
TEST(PreprocessorTest, InlineVariableFail) {
VariableAccessor variable_accessor(true);
ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", 1}));
std::string result;
ASSERT_EQ(variable_accessor.Rewrite("var[i]", &result), RewriteStatus::ERROR);
EXPECT_EQ(result, "INVALID_ACCESS_BY_INDEX");
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b8fd7778-aef1-4e13-95a9-3baf8f3db331 | cpp | tensorflow/tensorflow | preprocessor | tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.cc | tensorflow/lite/delegates/gpu/gl/compiler/preprocessor_test.cc | #include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
absl::string_view FindInlineBlock(absl::string_view s, char delimiter) {
size_t start = s.find(delimiter);
if (start != absl::string_view::npos) {
size_t end = s.find(delimiter, start + 1);
if (end != std::string::npos) {
return s.substr(start, end - start + 1);
}
return s.substr(start, 1);
}
return s.substr(s.size(), 0);
}
absl::string_view PastSubstr(absl::string_view s, absl::string_view subs) {
return s.substr(subs.data() + subs.size() - s.data());
}
}
absl::Status TextPreprocessor::Rewrite(const std::string& input,
std::string* output) {
absl::string_view s = input;
std::string result;
while (true) {
absl::string_view inline_block = FindInlineBlock(s, inline_delimiter_);
result.append(s.data(), inline_block.data() - s.data());
if (inline_block.empty()) {
break;
}
if (inline_block.size() == 1) {
return absl::NotFoundError("Unable to find end of inline block");
}
s = PastSubstr(s, inline_block);
bool processed = false;
for (auto& rewrite : inline_rewrites_) {
if (processed) {
break;
}
switch (rewrite->Rewrite(inline_block.substr(1, inline_block.size() - 2),
&result)) {
case RewriteStatus::NOT_RECOGNIZED:
break;
case RewriteStatus::SUCCESS:
processed = true;
break;
case RewriteStatus::ERROR:
return absl::InternalError(absl::StrCat("Error while rewriting '",
inline_block, "': ", result));
}
}
if (!processed) {
if (!keep_unknown_rewrites_) {
return absl::NotFoundError(absl::StrCat(
"Didn't find inline rewrite for '", inline_block, "'"));
}
absl::StrAppend(&result, inline_block);
}
}
*output = std::move(result);
return absl::OkStatus();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class AccuInlineRewrite : public InlineRewrite {
public:
explicit AccuInlineRewrite(std::vector<std::string>* blocks)
: blocks_(blocks) {}
RewriteStatus Rewrite(absl::string_view input, std::string* output) final {
blocks_->push_back(std::string(input.data(), input.size()));
output->append("r:");
output->append(input.data(), input.size());
return RewriteStatus::SUCCESS;
}
std::vector<std::string>* blocks_;
};
std::vector<std::string> ParseInlines(const std::string& text) {
std::vector<std::string> blocks;
TextPreprocessor preprocessor('$', false);
AccuInlineRewrite rewrite(&blocks);
preprocessor.AddRewrite(&rewrite);
std::string discard;
preprocessor.Rewrite(text, &discard).IgnoreError();
return blocks;
}
TEST(Preprocessor, CornerCases) {
EXPECT_THAT(ParseInlines(""), testing::ElementsAre());
EXPECT_THAT(ParseInlines("text text"), testing::ElementsAre());
EXPECT_THAT(ParseInlines("$$"), testing::ElementsAre(""));
}
TEST(Preprocessor, One) {
EXPECT_THAT(ParseInlines("$text$"), testing::ElementsAre("text"));
EXPECT_THAT(ParseInlines(" $text$ "), testing::ElementsAre("text"));
}
TEST(Preprocessor, More) {
EXPECT_THAT(ParseInlines("Test $inline1$\n$inline2$ test $inline3$ "),
testing::ElementsAre("inline1", "inline2", "inline3"));
}
std::string RewriteInlines(const std::string& text) {
std::vector<std::string> blocks;
TextPreprocessor preprocessor('$', false);
AccuInlineRewrite rewrite(&blocks);
preprocessor.AddRewrite(&rewrite);
std::string out;
preprocessor.Rewrite(text, &out).IgnoreError();
return out;
}
TEST(Preprocessor, RewriteCornerCases) {
EXPECT_EQ(RewriteInlines(""), "");
EXPECT_EQ(RewriteInlines("text text"), "text text");
EXPECT_EQ(RewriteInlines("$$"), "r:");
}
TEST(Preprocessor, RewriteOne) {
EXPECT_EQ(RewriteInlines("$text$"), "r:text");
EXPECT_EQ(RewriteInlines(" $text$ "), " r:text ");
}
TEST(Preprocessor, RewriteMore) {
EXPECT_EQ(RewriteInlines("Test $inline1$\n$inline2$ test $inline3$ "),
"Test r:inline1\nr:inline2 test r:inline3 ");
}
class SingleRewrite : public InlineRewrite {
public:
RewriteStatus Rewrite(absl::string_view input, std::string* output) final {
if (input == "foo") {
output->append("bla");
return RewriteStatus::SUCCESS;
}
return RewriteStatus::NOT_RECOGNIZED;
}
std::vector<std::string>* blocks_;
};
TEST(Preprocessor, KeepUnknownRewrites) {
TextPreprocessor preprocessor('$', true);
SingleRewrite rewrite;
preprocessor.AddRewrite(&rewrite);
std::string out;
ASSERT_TRUE(preprocessor.Rewrite("Good morning, $name$! $foo$", &out).ok());
EXPECT_EQ("Good morning, $name$! bla", out);
}
TEST(Preprocessor, KeepUnknownRewrites_Fail) {
TextPreprocessor preprocessor('$', false);
SingleRewrite rewrite;
preprocessor.AddRewrite(&rewrite);
std::string out;
EXPECT_FALSE(preprocessor.Rewrite("Good morning, $name$! $foo$", &out).ok());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe31defb-0fd1-4d9a-820f-8eb2460a1483 | cpp | tensorflow/tensorflow | object_accessor | tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.cc | tensorflow/lite/delegates/gpu/gl/compiler/object_accessor_test.cc | #include "tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.h"
#include <string>
#include <utility>
#include <variant>
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/access_type.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace object_accessor_internal {
IndexedElement ParseElement(absl::string_view input) {
auto i = input.find('[');
if (i == std::string::npos || input.back() != ']') {
return {};
}
return {input.substr(0, i),
absl::StrSplit(input.substr(i + 1, input.size() - i - 2), ',',
absl::SkipWhitespace())};
}
}
namespace {
void MaybeConvertToHalf(DataType data_type, absl::string_view value,
std::string* output) {
if (data_type == DataType::FLOAT16) {
absl::StrAppend(output, "Vec4ToHalf(", value, ")");
} else {
absl::StrAppend(output, value);
}
}
void MaybeConvertFromHalf(DataType data_type, absl::string_view value,
std::string* output) {
if (data_type == DataType::FLOAT16) {
absl::StrAppend(output, "Vec4FromHalf(", value, ")");
} else {
absl::StrAppend(output, value);
}
}
struct ReadFromTextureGenerator {
RewriteStatus operator()(size_t) const {
if (element.indices.size() != 1) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
if (sampler_textures) {
absl::StrAppend(result, "texelFetch(", element.object_name, ", ivec2(",
element.indices[0], ", 0), 0)");
} else {
absl::StrAppend(result, "imageLoad(", element.object_name, ", ivec2(",
element.indices[0], ", 0))");
}
return RewriteStatus::SUCCESS;
}
template <typename Shape>
RewriteStatus operator()(const Shape&) const {
if (element.indices.size() != Shape::size()) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
if (sampler_textures) {
absl::StrAppend(result, "texelFetch(", element.object_name, ", ivec",
Shape::size(), "(", absl::StrJoin(element.indices, ", "),
"), 0)");
} else {
absl::StrAppend(result, "imageLoad(", element.object_name, ", ivec",
Shape::size(), "(", absl::StrJoin(element.indices, ", "),
"))");
}
return RewriteStatus::SUCCESS;
}
const object_accessor_internal::IndexedElement& element;
const bool sampler_textures;
std::string* result;
};
struct ReadFromBufferGenerator {
RewriteStatus operator()(size_t) const {
if (element.indices.size() != 1) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
MaybeConvertFromHalf(
data_type,
absl::StrCat(element.object_name, ".data[", element.indices[0], "]"),
result);
return RewriteStatus::SUCCESS;
}
RewriteStatus operator()(const uint2& size) const {
if (element.indices.size() == 1) {
return (*this)(1U);
}
if (element.indices.size() != 2) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
MaybeConvertFromHalf(
data_type,
absl::StrCat(element.object_name, ".data[", element.indices[0], " + $",
element.object_name, "_w$ * (", element.indices[1], ")]"),
result);
*requires_sizes = true;
return RewriteStatus::SUCCESS;
}
RewriteStatus operator()(const uint3& size) const {
if (element.indices.size() == 1) {
return (*this)(1U);
}
if (element.indices.size() != 3) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
MaybeConvertFromHalf(
data_type,
absl::StrCat(element.object_name, ".data[", element.indices[0], " + $",
element.object_name, "_w$ * (", element.indices[1], " + $",
element.object_name, "_h$ * (", element.indices[2], "))]"),
result);
*requires_sizes = true;
return RewriteStatus::SUCCESS;
}
DataType data_type;
const object_accessor_internal::IndexedElement& element;
std::string* result;
bool* requires_sizes;
};
RewriteStatus GenerateReadAccessor(
const Object& object,
const object_accessor_internal::IndexedElement& element,
bool sampler_textures, std::string* result, bool* requires_sizes) {
switch (object.object_type) {
case ObjectType::BUFFER:
return std::visit(ReadFromBufferGenerator{object.data_type, element,
result, requires_sizes},
object.size);
case ObjectType::TEXTURE:
return std::visit(
ReadFromTextureGenerator{element, sampler_textures, result},
object.size);
case ObjectType::UNKNOWN:
return RewriteStatus::ERROR;
}
}
struct WriteToBufferGenerator {
RewriteStatus operator()(size_t) const {
if (element.indices.size() != 1) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, element.object_name, ".data[", element.indices[0],
"] = ");
MaybeConvertToHalf(data_type, value, result);
return RewriteStatus::SUCCESS;
}
RewriteStatus operator()(const uint2& size) const {
if (element.indices.size() == 1) {
return (*this)(1U);
}
if (element.indices.size() != 2) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, element.object_name, ".data[", element.indices[0],
" + $", element.object_name, "_w$ * (", element.indices[1],
")] = ");
MaybeConvertToHalf(data_type, value, result);
*requires_sizes = true;
return RewriteStatus::SUCCESS;
}
RewriteStatus operator()(const uint3& size) const {
if (element.indices.size() == 1) {
return (*this)(1U);
}
if (element.indices.size() != 3) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, element.object_name, ".data[", element.indices[0],
" + $", element.object_name, "_w$ * (", element.indices[1],
" + $", element.object_name, "_h$ * (", element.indices[2],
"))] = ");
MaybeConvertToHalf(data_type, value, result);
*requires_sizes = true;
return RewriteStatus::SUCCESS;
}
DataType data_type;
const object_accessor_internal::IndexedElement& element;
absl::string_view value;
std::string* result;
bool* requires_sizes;
};
struct WriteToTextureGenerator {
RewriteStatus operator()(size_t) const {
if (element.indices.size() != 1) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, "imageStore(", element.object_name, ", ivec2(",
element.indices[0], ", 0), ", value, ")");
return RewriteStatus::SUCCESS;
}
template <typename Shape>
RewriteStatus operator()(const Shape&) const {
if (element.indices.size() != Shape::size()) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, "imageStore(", element.object_name, ", ivec",
Shape::size(), "(", absl::StrJoin(element.indices, ", "),
"), ", value, ")");
return RewriteStatus::SUCCESS;
}
const object_accessor_internal::IndexedElement& element;
absl::string_view value;
std::string* result;
};
RewriteStatus GenerateWriteAccessor(
const Object& object,
const object_accessor_internal::IndexedElement& element,
absl::string_view value, std::string* result, bool* requires_sizes) {
switch (object.object_type) {
case ObjectType::BUFFER:
return std::visit(WriteToBufferGenerator{object.data_type, element, value,
result, requires_sizes},
object.size);
case ObjectType::TEXTURE:
return std::visit(WriteToTextureGenerator{element, value, result},
object.size);
case ObjectType::UNKNOWN:
return RewriteStatus::ERROR;
}
}
std::string ToAccessModifier(AccessType access, bool use_readonly_modifier) {
switch (access) {
case AccessType::READ:
return use_readonly_modifier ? " readonly" : "";
case AccessType::WRITE:
return " writeonly";
case AccessType::READ_WRITE:
return " restrict";
}
return " unknown_access";
}
std::string ToBufferType(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32:
return "uvec4";
case DataType::UINT64:
return "u64vec4_not_available_in_glsl";
case DataType::INT8:
case DataType::INT16:
case DataType::INT32:
return "ivec4";
case DataType::INT64:
return "i64vec4_not_available_in_glsl";
case DataType::FLOAT16:
return "uvec2";
case DataType::BOOL:
case DataType::FLOAT32:
return "vec4";
case DataType::FLOAT64:
return "dvec4";
case DataType::UNKNOWN:
return "unknown_buffer_type";
}
}
struct TextureImageTypeGetter {
std::string operator()(size_t) const {
return (*this)(uint2());
}
std::string operator()(const uint2&) const {
switch (type) {
case DataType::UINT16:
case DataType::UINT32:
return "uimage2D";
case DataType::INT16:
case DataType::INT32:
return "iimage2D";
case DataType::FLOAT16:
case DataType::FLOAT32:
return "image2D";
default:
return "unknown_image_2d";
}
}
std::string operator()(const uint3&) const {
switch (type) {
case DataType::UINT16:
case DataType::UINT32:
return "uimage2DArray";
case DataType::INT16:
case DataType::INT32:
return "iimage2DArray";
case DataType::FLOAT16:
case DataType::FLOAT32:
return "image2DArray";
default:
return "unknown_image_2d_array";
}
}
DataType type;
};
struct TextureSamplerTypeGetter {
std::string operator()(size_t) const {
return (*this)(uint2());
}
std::string operator()(const uint2&) const {
switch (type) {
case DataType::FLOAT16:
case DataType::FLOAT32:
return "sampler2D";
case DataType::INT32:
case DataType::INT16:
return "isampler2D";
case DataType::UINT32:
case DataType::UINT16:
return "usampler2D";
default:
return "unknown_sampler2D";
}
}
std::string operator()(const uint3&) const {
switch (type) {
case DataType::FLOAT16:
case DataType::FLOAT32:
return "sampler2DArray";
case DataType::INT32:
case DataType::INT16:
return "isampler2DArray";
case DataType::UINT32:
case DataType::UINT16:
return "usampler2DArray";
default:
return "unknown_sampler2DArray";
}
}
DataType type;
};
std::string ToImageType(const Object& object, bool sampler_textures) {
if (sampler_textures && (object.access == AccessType::READ)) {
return std::visit(TextureSamplerTypeGetter{object.data_type}, object.size);
} else {
return std::visit(TextureImageTypeGetter{object.data_type}, object.size);
}
}
std::string ToImageLayoutQualifier(DataType type) {
switch (type) {
case DataType::UINT16:
return "rgba16ui";
case DataType::UINT32:
return "rgba32ui";
case DataType::INT16:
return "rgba16i";
case DataType::INT32:
return "rgba32i";
case DataType::FLOAT16:
return "rgba16f";
case DataType::FLOAT32:
return "rgba32f";
default:
return "unknown_image_layout";
}
}
std::string ToImagePrecision(DataType type) {
switch (type) {
case DataType::UINT16:
case DataType::INT16:
case DataType::FLOAT16:
return "mediump";
case DataType::UINT32:
case DataType::INT32:
case DataType::FLOAT32:
return "highp";
default:
return "unknown_image_precision";
}
}
struct SizeParametersAdder {
void operator()(size_t) const {}
void operator()(const uint2& size) const {
variable_accessor->AddUniformParameter(
{absl::StrCat(object_name, "_w"), static_cast<int32_t>(size.x)});
}
void operator()(const uint3& size) const {
variable_accessor->AddUniformParameter(
{absl::StrCat(object_name, "_w"), static_cast<int32_t>(size.x)});
variable_accessor->AddUniformParameter(
{absl::StrCat(object_name, "_h"), static_cast<int32_t>(size.y)});
}
absl::string_view object_name;
VariableAccessor* variable_accessor;
};
void AddSizeParameters(absl::string_view object_name, const Object& object,
VariableAccessor* parameters) {
std::visit(SizeParametersAdder{object_name, parameters}, object.size);
}
void GenerateObjectDeclaration(absl::string_view name, const Object& object,
std::string* declaration, bool is_mali,
bool sampler_textures) {
switch (object.object_type) {
case ObjectType::BUFFER:
absl::StrAppend(declaration, "layout(binding = ", object.binding, ")",
ToAccessModifier(object.access, !is_mali), " buffer B",
object.binding, " { ", ToBufferType(object.data_type),
" data[]; } ", name, ";\n");
break;
case ObjectType::TEXTURE:
if (sampler_textures && (object.access == AccessType::READ)) {
absl::StrAppend(declaration, "layout(binding = ", object.binding,
") uniform ", ToImagePrecision(object.data_type), " ",
ToImageType(object, sampler_textures), " ", name,
";\n");
} else {
absl::StrAppend(
declaration, "layout(", ToImageLayoutQualifier(object.data_type),
", binding = ", object.binding, ")",
ToAccessModifier(object.access, true), " uniform ",
ToImagePrecision(object.data_type), " ",
ToImageType(object, sampler_textures), " ", name, ";\n");
}
break;
case ObjectType::UNKNOWN:
break;
}
}
}
RewriteStatus ObjectAccessor::Rewrite(absl::string_view input,
std::string* output) {
std::pair<absl::string_view, absl::string_view> n =
absl::StrSplit(input, absl::MaxSplits('=', 1), absl::SkipWhitespace());
if (n.first.empty()) {
return RewriteStatus::NOT_RECOGNIZED;
}
if (n.second.empty()) {
return RewriteRead(absl::StripAsciiWhitespace(n.first), output);
}
return RewriteWrite(absl::StripAsciiWhitespace(n.first),
absl::StripAsciiWhitespace(n.second), output);
}
RewriteStatus ObjectAccessor::RewriteRead(absl::string_view location,
std::string* output) {
auto element = object_accessor_internal::ParseElement(location);
if (element.object_name.empty()) {
return RewriteStatus::NOT_RECOGNIZED;
}
auto it = name_to_object_.find(
std::string(element.object_name.data(), element.object_name.size()));
if (it == name_to_object_.end()) {
return RewriteStatus::NOT_RECOGNIZED;
}
bool requires_sizes = false;
auto status = GenerateReadAccessor(it->second, element, sampler_textures_,
output, &requires_sizes);
if (requires_sizes) {
AddSizeParameters(it->first, it->second, variable_accessor_);
}
return status;
}
RewriteStatus ObjectAccessor::RewriteWrite(absl::string_view location,
absl::string_view value,
std::string* output) {
auto element = object_accessor_internal::ParseElement(location);
if (element.object_name.empty()) {
return RewriteStatus::NOT_RECOGNIZED;
}
auto it = name_to_object_.find(
std::string(element.object_name.data(), element.object_name.size()));
if (it == name_to_object_.end()) {
return RewriteStatus::NOT_RECOGNIZED;
}
bool requires_sizes = false;
auto status = GenerateWriteAccessor(it->second, element, value, output,
&requires_sizes);
if (requires_sizes) {
AddSizeParameters(it->first, it->second, variable_accessor_);
}
return status;
}
bool ObjectAccessor::AddObject(const std::string& name, Object object) {
if (object.object_type == ObjectType::UNKNOWN) {
return false;
}
return name_to_object_.insert({name, std::move(object)}).second;
}
std::string ObjectAccessor::GetObjectDeclarations() const {
std::string declarations;
for (auto& o : name_to_object_) {
GenerateObjectDeclaration(o.first, o.second, &declarations, is_mali_,
sampler_textures_);
}
return declarations;
}
std::string ObjectAccessor::GetFunctionsDeclarations() const {
for (const auto& o : name_to_object_) {
if (o.second.data_type == DataType::FLOAT16 &&
o.second.object_type == ObjectType::BUFFER) {
return absl::StrCat(
"#define Vec4FromHalf(v) vec4(unpackHalf2x16(v.x), "
"unpackHalf2x16(v.y))\n",
"#define Vec4ToHalf(v) uvec2(packHalf2x16(v.xy), "
"packHalf2x16(v.zw))");
}
}
return "";
}
std::vector<Object> ObjectAccessor::GetObjects() const {
std::vector<Object> objects;
objects.reserve(name_to_object_.size());
for (auto& o : name_to_object_) {
objects.push_back(o.second);
}
return objects;
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.h"
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
struct ParameterComparator {
template <typename T>
bool operator()(const T& t) const {
const T* v = std::get_if<T>(&p.value);
return v && t == *v;
}
const Variable& p;
};
bool operator==(const Variable& l, const Variable& r) {
return l.name == r.name && std::visit(ParameterComparator{l}, r.value);
}
namespace {
TEST(Preprocessor, CornerCases) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
std::string result;
ASSERT_EQ(accessor.Rewrite("", &result), RewriteStatus::NOT_RECOGNIZED);
ASSERT_EQ(accessor.Rewrite("=", &result), RewriteStatus::NOT_RECOGNIZED);
}
TEST(Preprocessor, ReadFromBuffer) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]", &result), RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "obj.data[i]");
}
TEST(Preprocessor, ReadFromBufferLinear) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyBuffer(uint3(1, 2, 3), std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]", &result), RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "obj.data[i]");
}
TEST(Preprocessor, ReadFromBufferByIndex) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyBuffer(uint3(1, 2, 3), std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[x,y + 5,z]", &result),
RewriteStatus::SUCCESS);
EXPECT_THAT(variable_accessor.GetUniformParameters(),
testing::UnorderedElementsAre(Variable{"obj_w", 1},
Variable{"obj_h", 2}));
ASSERT_EQ(result, "obj.data[x + $obj_w$ * (y + 5 + $obj_h$ * (z))]");
}
TEST(Preprocessor, ReadFromTexture) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyTexture(uint3(1, 2, 3), {1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i,j,k]", &result), RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "imageLoad(obj, ivec3(i, j, k))");
}
TEST(Preprocessor, ReadFromTexture1D) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyTexture({1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]", &result), RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "imageLoad(obj, ivec2(i, 0))");
}
TEST(Preprocessor, WriteToBuffer) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite(" obj[i] =value", &result),
RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "obj.data[i] = value");
}
TEST(Preprocessor, WriteToBufferByIndex) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyBuffer(uint3(1, 2, 3), {1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite(" obj[i,j,k] =value", &result),
RewriteStatus::SUCCESS);
EXPECT_THAT(variable_accessor.GetUniformParameters(),
testing::UnorderedElementsAre(Variable{"obj_w", 1},
Variable{"obj_h", 2}));
ASSERT_EQ(result, "obj.data[i + $obj_w$ * (j + $obj_h$ * (k))] = value");
}
TEST(Preprocessor, WriteToTexture) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyTexture(uint3(1, 1, 1), {1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i,j,k]= value ", &result),
RewriteStatus::SUCCESS);
ASSERT_EQ(result, "imageStore(obj, ivec3(i, j, k), value)");
}
TEST(Preprocessor, WriteToTexture1D) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyTexture({1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]= value ", &result),
RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "imageStore(obj, ivec2(i, 0), value)");
}
TEST(Preprocessor, FailedWriteToBuffer) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite(" obj[i,j] =value", &result),
RewriteStatus::ERROR);
ASSERT_EQ(result, "WRONG_NUMBER_OF_INDICES");
}
TEST(Preprocessor, FailedWriteToTexture) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyTexture(uint3(1, 1, 1), {1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]= value ", &result), RewriteStatus::ERROR);
ASSERT_EQ(result, "WRONG_NUMBER_OF_INDICES");
}
TEST(Preprocessor, DeclareTexture) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyTexture(uint3(1, 1, 1), {1.0, 2.0, 3.0, 4.0})));
ASSERT_EQ(accessor.GetObjectDeclarations(),
"layout(rgba32f, binding = 0) readonly uniform highp image2DArray "
"obj;\n");
}
TEST(Preprocessor, DeclareBuffer) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(true, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0})));
ASSERT_EQ(accessor.GetObjectDeclarations(),
"layout(binding = 0) buffer B0 { vec4 data[]; } obj;\n");
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/object_accessor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d02217b8-66ec-4437-93a2-5f7cdff94d3b | cpp | tensorflow/tensorflow | cl_device | tensorflow/lite/delegates/gpu/cl/cl_device.cc | tensorflow/lite/delegates/gpu/cl/cl_device_test.cc | #include "tensorflow/lite/delegates/gpu/cl/cl_device.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h"
#include "tensorflow/lite/delegates/gpu/cl/util.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
namespace tflite {
namespace gpu {
namespace cl {
void ParseQualcommOpenClCompilerVersion(
const std::string& cl_driver_version,
AdrenoInfo::OpenClCompilerVersion* result) {
const std::string start = "Compiler E031.";
size_t position = cl_driver_version.find(start);
if (position == std::string::npos) {
return;
}
const size_t main_part_length = 8;
if (position + start.length() + main_part_length >
cl_driver_version.length()) {
return;
}
const std::string main_part =
cl_driver_version.substr(position + start.length(), main_part_length);
if (!absl::ascii_isdigit(main_part[0]) ||
!absl::ascii_isdigit(main_part[1]) || main_part[2] != '.' ||
!absl::ascii_isdigit(main_part[3]) ||
!absl::ascii_isdigit(main_part[4]) || main_part[5] != '.' ||
!absl::ascii_isdigit(main_part[6]) ||
!absl::ascii_isdigit(main_part[7])) {
return;
}
result->major = (main_part[0] - '0') * 10 + (main_part[1] - '0');
result->minor = (main_part[3] - '0') * 10 + (main_part[4] - '0');
result->patch = (main_part[6] - '0') * 10 + (main_part[7] - '0');
}
static void ParsePowerVRDriverVersion(const std::string& cl_driver_version,
PowerVRInfo::DriverVersion& result) {
size_t position = cl_driver_version.find('@');
if (position == std::string::npos) {
return;
}
int main = 0;
size_t curpos = 0;
while (curpos < position && absl::ascii_isdigit(cl_driver_version[curpos])) {
main = main * 10 + cl_driver_version[curpos] - '0';
++curpos;
}
++curpos;
int minor = 0;
while (curpos < position) {
minor = minor * 10 + cl_driver_version[curpos] - '0';
++curpos;
}
curpos = position + 1;
int id = 0;
while (curpos < cl_driver_version.length()) {
id = id * 10 + cl_driver_version[curpos] - '0';
++curpos;
}
result.branch_main = main;
result.branch_minor = minor;
result.id = id;
}
template <>
std::string GetDeviceInfo<std::string>(cl_device_id id, cl_device_info info) {
size_t size;
cl_int error = clGetDeviceInfo(id, info, 0, nullptr, &size);
if (error != CL_SUCCESS) {
return "";
}
std::string result(size - 1, 0);
error = clGetDeviceInfo(id, info, size, &result[0], nullptr);
if (error != CL_SUCCESS) {
return "";
}
return result;
}
namespace {
template <typename T>
T GetPlatformInfo(cl_platform_id id, cl_platform_info info) {
T result;
cl_int error = clGetPlatformInfo(id, info, sizeof(T), &result, nullptr);
if (error != CL_SUCCESS) {
return -1;
}
return result;
}
std::string GetPlatformInfo(cl_platform_id id, cl_platform_info info) {
size_t size;
cl_int error = clGetPlatformInfo(id, info, 0, nullptr, &size);
if (error != CL_SUCCESS) {
return "";
}
std::string result(size - 1, 0);
error = clGetPlatformInfo(id, info, size, &result[0], nullptr);
if (error != CL_SUCCESS) {
return "";
}
return result;
}
void GetDeviceWorkDimsSizes(cl_device_id id, int3* result) {
int dims_count =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS);
if (dims_count < 3) {
return;
}
std::vector<size_t> limits(dims_count);
cl_int error =
clGetDeviceInfo(id, CL_DEVICE_MAX_WORK_ITEM_SIZES,
sizeof(size_t) * dims_count, limits.data(), nullptr);
if (error != CL_SUCCESS) {
return;
}
result->x = limits[0];
result->y = limits[1];
result->z = limits[2];
}
OpenClVersion ParseCLVersion(const std::string& version) {
const auto first_dot_pos = version.find_first_of('.');
if (first_dot_pos == std::string::npos) {
return OpenClVersion::kCl1_0;
}
const int major = version[first_dot_pos - 1] - '0';
const int minor = version[first_dot_pos + 1] - '0';
if (major == 1) {
if (minor == 2) {
return OpenClVersion::kCl1_2;
} else if (minor == 1) {
return OpenClVersion::kCl1_1;
} else {
return OpenClVersion::kCl1_0;
}
} else if (major == 2) {
if (minor == 2) {
return OpenClVersion::kCl2_2;
} else if (minor == 1) {
return OpenClVersion::kCl2_1;
} else {
return OpenClVersion::kCl2_0;
}
} else if (major == 3) {
return OpenClVersion::kCl3_0;
} else {
return OpenClVersion::kCl1_0;
}
}
bool IsGPUVersionInRange(int gpu_version, int min_version, int max_version) {
return gpu_version >= min_version && gpu_version < max_version;
}
GpuInfo GpuInfoFromDeviceID(cl_device_id id, cl_platform_id platform_id) {
GpuInfo info;
info.opencl_info.platform_version =
GetPlatformInfo(platform_id, CL_PLATFORM_VERSION);
info.opencl_info.device_name = GetDeviceInfo<std::string>(id, CL_DEVICE_NAME);
info.opencl_info.vendor_name =
GetDeviceInfo<std::string>(id, CL_DEVICE_VENDOR);
info.opencl_info.opencl_c_version =
GetDeviceInfo<std::string>(id, CL_DEVICE_OPENCL_C_VERSION);
info.opencl_info.driver_version =
GetDeviceInfo<std::string>(id, CL_DRIVER_VERSION);
const std::string gpu_description = absl::StrCat(
info.opencl_info.device_name, " ", info.opencl_info.vendor_name, " ",
info.opencl_info.opencl_c_version);
GetGpuInfoFromDeviceDescription(gpu_description, GpuApi::kOpenCl, &info);
info.opencl_info.cl_version =
ParseCLVersion(info.opencl_info.opencl_c_version);
info.opencl_info.extensions =
absl::StrSplit(GetDeviceInfo<std::string>(id, CL_DEVICE_EXTENSIONS), ' ');
const std::vector<std::string> unsupported_extensions =
GetUnsupportedExtensions();
for (const auto& unsupported_extension : unsupported_extensions) {
for (auto it = info.opencl_info.extensions.begin();
it != info.opencl_info.extensions.end();) {
if (*it == unsupported_extension) {
it = info.opencl_info.extensions.erase(it);
} else {
++it;
}
}
}
info.opencl_info.supports_fp16 = false;
info.opencl_info.supports_image3d_writes = false;
for (const auto& ext : info.opencl_info.extensions) {
if (ext == "cl_khr_fp16") {
info.opencl_info.supports_fp16 = true;
}
if (ext == "cl_khr_3d_image_writes") {
info.opencl_info.supports_image3d_writes = true;
}
}
info.opencl_info.supports_images =
GetDeviceInfo<cl_bool>(id, CL_DEVICE_IMAGE_SUPPORT);
cl_device_fp_config f32_config =
GetDeviceInfo<cl_device_fp_config>(id, CL_DEVICE_SINGLE_FP_CONFIG);
info.opencl_info.supports_fp32_rtn = f32_config & CL_FP_ROUND_TO_NEAREST;
if (info.opencl_info.supports_fp16) {
cl_device_fp_config f16_config;
auto status = GetDeviceInfo<cl_device_fp_config>(
id, CL_DEVICE_HALF_FP_CONFIG, &f16_config);
if (status.ok() && !info.IsAMD()) {
info.opencl_info.supports_fp16_rtn = f16_config & CL_FP_ROUND_TO_NEAREST;
} else {
f16_config = f32_config;
info.opencl_info.supports_fp16_rtn = info.opencl_info.supports_fp32_rtn;
}
} else {
info.opencl_info.supports_fp16_rtn = false;
}
if (info.IsPowerVR()) {
if (!info.powervr_info.IsBetterThan(PowerVRGpu::kRogueGm9xxx)) {
info.opencl_info.supports_fp16 = false;
} else if (!info.opencl_info.supports_fp16) {
info.opencl_info.supports_fp16 = true;
info.opencl_info.supports_fp16_rtn = info.opencl_info.supports_fp32_rtn;
}
}
if (!info.opencl_info.supports_image3d_writes &&
((info.IsAdreno() && info.adreno_info.IsAdreno4xx()) ||
info.IsNvidia())) {
info.opencl_info.supports_image3d_writes = true;
}
info.opencl_info.compute_units_count =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_MAX_COMPUTE_UNITS);
info.opencl_info.image2d_max_width =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE2D_MAX_WIDTH);
info.opencl_info.image2d_max_height =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE2D_MAX_HEIGHT);
info.opencl_info.buffer_max_size =
GetDeviceInfo<cl_ulong>(id, CL_DEVICE_MAX_MEM_ALLOC_SIZE);
info.opencl_info.max_allocation_size =
GetDeviceInfo<cl_ulong>(id, CL_DEVICE_MAX_MEM_ALLOC_SIZE);
if (info.opencl_info.cl_version >= OpenClVersion::kCl1_2) {
info.opencl_info.image_buffer_max_size =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE_MAX_BUFFER_SIZE);
info.opencl_info.image_array_max_layers =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE_MAX_ARRAY_SIZE);
}
info.opencl_info.image3d_max_width =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE3D_MAX_WIDTH);
info.opencl_info.image3d_max_height =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE2D_MAX_HEIGHT);
info.opencl_info.image3d_max_depth =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE3D_MAX_DEPTH);
int3 max_work_group_sizes;
GetDeviceWorkDimsSizes(id, &max_work_group_sizes);
info.opencl_info.max_work_group_size_x = max_work_group_sizes.x;
info.opencl_info.max_work_group_size_y = max_work_group_sizes.y;
info.opencl_info.max_work_group_size_z = max_work_group_sizes.z;
info.opencl_info.max_work_group_total_size =
GetDeviceInfo<size_t>(id, CL_DEVICE_MAX_WORK_GROUP_SIZE);
info.opencl_info.dedicated_local_memory =
(GetDeviceInfo<cl_device_local_mem_type>(id, CL_DEVICE_LOCAL_MEM_TYPE) ==
CL_LOCAL);
if (info.IsCL30OrHigher()) {
info.opencl_info.preferred_work_group_size_multiple =
GetDeviceInfo<size_t>(id, CL_DEVICE_PREFERRED_WORK_GROUP_SIZE_MULTIPLE);
} else {
info.opencl_info.preferred_work_group_size_multiple = 0;
}
info.opencl_info.base_addr_align_in_bits =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_MEM_BASE_ADDR_ALIGN);
info.opencl_info.image_pitch_alignment = 0;
if (info.opencl_info.cl_version == OpenClVersion::kCl2_0 ||
info.opencl_info.cl_version == OpenClVersion::kCl2_1 ||
info.opencl_info.cl_version == OpenClVersion::kCl2_2) {
info.opencl_info.image_pitch_alignment =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_IMAGE_PITCH_ALIGNMENT);
info.opencl_info.image_base_address_alignment =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT);
} else if (info.SupportsExtension("cl_khr_image2d_from_buffer")) {
cl_uint result = 0;
auto status =
GetDeviceInfo(id, CL_DEVICE_IMAGE_PITCH_ALIGNMENT_KHR, &result);
if (status.ok()) {
info.opencl_info.image_pitch_alignment = result;
}
result = 0;
status =
GetDeviceInfo(id, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT_KHR, &result);
if (status.ok()) {
info.opencl_info.image_base_address_alignment = result;
}
}
if (info.SupportsExtension("cl_arm_scheduling_controls")) {
auto capabilities =
GetDeviceInfo<cl_device_scheduling_controls_capabilities_arm>(
id, CL_DEVICE_SCHEDULING_CONTROLS_CAPABILITIES_ARM);
info.opencl_info.supports_register_allocation_arm =
capabilities & CL_DEVICE_SCHEDULING_REGISTER_ALLOCATION_ARM;
}
if (info.SupportsExtension("cl_intel_required_subgroup_size")) {
size_t sub_groups_ret_size;
cl_int status =
clGetDeviceInfo(id, 0x4108 , 0,
nullptr, &sub_groups_ret_size);
if (status == CL_SUCCESS) {
size_t sub_groups_count = sub_groups_ret_size / sizeof(size_t);
std::vector<size_t> sub_group_sizes(sub_groups_count);
status =
clGetDeviceInfo(id, 0x4108 ,
sub_groups_ret_size, sub_group_sizes.data(), nullptr);
if (status == CL_SUCCESS) {
for (int i = 0; i < sub_groups_count; ++i) {
info.supported_subgroup_sizes.push_back(sub_group_sizes[i]);
}
}
}
}
if (info.IsAdreno()) {
ParseQualcommOpenClCompilerVersion(info.opencl_info.driver_version,
&info.adreno_info.cl_compiler_version);
} else if (info.IsPowerVR()) {
ParsePowerVRDriverVersion(info.opencl_info.driver_version,
info.powervr_info.driver_version);
}
return info;
}
}
CLDevice::CLDevice(cl_device_id id, cl_platform_id platform_id)
: info_(GpuInfoFromDeviceID(id, platform_id)),
id_(id),
platform_id_(platform_id) {
if (info_.IsAdreno() &&
info_.adreno_info.adreno_gpu == AdrenoGpu::kAdreno630) {
acceleration::AndroidInfo android_info;
if (acceleration::RequestAndroidInfo(&android_info).ok()) {
info_.adreno_info.compiler_bugs_in_a6xx =
android_info.android_sdk_version == "26";
}
}
}
CLDevice::CLDevice(const CLDevice& device)
: info_(device.info_), id_(device.id_), platform_id_(device.platform_id_) {}
CLDevice& CLDevice::operator=(const CLDevice& device) {
if (this != &device) {
info_ = device.info_;
id_ = device.id_;
platform_id_ = device.platform_id_;
}
return *this;
}
CLDevice::CLDevice(CLDevice&& device)
: info_(std::move(device.info_)),
id_(device.id_),
platform_id_(device.platform_id_) {
device.id_ = nullptr;
device.platform_id_ = nullptr;
}
CLDevice& CLDevice::operator=(CLDevice&& device) {
if (this != &device) {
id_ = nullptr;
platform_id_ = nullptr;
info_ = std::move(device.info_);
std::swap(id_, device.id_);
std::swap(platform_id_, device.platform_id_);
}
return *this;
}
std::string CLDevice::GetPlatformVersion() const {
return GetPlatformInfo(platform_id_, CL_PLATFORM_VERSION);
}
void CLDevice::DisableOneLayerTextureArray() {
info_.adreno_info.support_one_layer_texture_array = false;
}
absl::Status CreateDefaultGPUDevice(CLDevice* result) {
cl_uint num_platforms;
cl_int status = clGetPlatformIDs(0, nullptr, &num_platforms);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetPlatformIDs returned %d", status));
}
if (num_platforms == 0) {
return absl::UnknownError("No supported OpenCL platform.");
}
std::vector<cl_platform_id> platforms(num_platforms);
status = clGetPlatformIDs(num_platforms, platforms.data(), nullptr);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetPlatformIDs returned %d", status));
}
cl_platform_id platform_id = platforms[0];
cl_uint num_devices;
status =
clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 0, nullptr, &num_devices);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetDeviceIDs returned %d", status));
}
if (num_devices == 0) {
return absl::UnknownError("No GPU on current platform.");
}
std::vector<cl_device_id> devices(num_devices);
status = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, num_devices,
devices.data(), nullptr);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetDeviceIDs returned %d", status));
}
*result = CLDevice(devices[0], platform_id);
LoadOpenCLFunctionExtensions(platform_id);
return absl::OkStatus();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/cl/cl_device.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
namespace cl {
TEST(QualcommOpenClCompilerVersionParsing, Base) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.79.53.41",
&result);
EXPECT_EQ(result.major, 79);
EXPECT_EQ(result.minor, 53);
EXPECT_EQ(result.patch, 41);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat0) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Assembler A337.79.53.41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat1) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.79.53.4",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat2) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031:79:53:41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat3) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.79.x53.41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat4) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.a9.53.41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/cl_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/cl_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c0931f4d-cd5e-4947-b16f-94cba53908d8 | cpp | tensorflow/tensorflow | cl_arguments | tensorflow/lite/delegates/gpu/cl/cl_arguments.cc | tensorflow/lite/delegates/gpu/cl/cl_arguments_test.cc | #include "tensorflow/lite/delegates/gpu/cl/cl_arguments.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include "tensorflow/lite/delegates/gpu/cl/gpu_object.h"
#include "tensorflow/lite/delegates/gpu/cl/qcom_thin_filter.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
bool IsWordSymbol(char symbol) {
return absl::ascii_isalnum(symbol) || symbol == '_';
}
void ReplaceAllWords(const std::string& old_word, const std::string& new_word,
std::string* str) {
size_t position = str->find(old_word);
while (position != std::string::npos) {
char prev = position == 0 ? '.' : (*str)[position - 1];
char next = position + old_word.size() < str->size()
? (*str)[position + old_word.size()]
: '.';
if (IsWordSymbol(prev) || IsWordSymbol(next)) {
position = str->find(old_word, position + 1);
continue;
}
str->replace(position, old_word.size(), new_word);
position = str->find(old_word, position + new_word.size());
}
}
void AppendArgument(const std::string& arg, std::string* args) {
if (!args->empty()) {
absl::StrAppend(args, ",\n ");
}
absl::StrAppend(args, arg);
}
std::string GetImageModifier(AccessType access) {
switch (access) {
case AccessType::READ:
return "__read_only";
case AccessType::WRITE:
return "__write_only";
case AccessType::READ_WRITE:
return "__read_write";
}
}
std::string GetDefaultSamplers(const GpuInfo& gpu_info) {
std::string result;
result +=
"__constant sampler_t smp_none = CLK_NORMALIZED_COORDS_FALSE | "
"CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;\n";
if (gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx()) {
result +=
"__constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | "
"CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;\n";
} else {
result +=
"__constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | "
"CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;\n";
}
return result;
}
absl::Status CreateCLObject(GPUObjectDescriptor* desc, CLContext* context,
GPUObjectPtr* result) {
const auto* buffer_desc = dynamic_cast<const BufferDescriptor*>(desc);
if (buffer_desc) {
Buffer gpu_buffer;
RETURN_IF_ERROR(
gpu_buffer.CreateFromBufferDescriptor(*buffer_desc, context));
*result = std::make_unique<Buffer>(std::move(gpu_buffer));
return absl::OkStatus();
}
const auto* tensor_desc = dynamic_cast<const TensorDescriptor*>(desc);
if (tensor_desc) {
Tensor gpu_tensor;
RETURN_IF_ERROR(gpu_tensor.CreateFromDescriptor(*tensor_desc, context));
*result = std::make_unique<Tensor>(std::move(gpu_tensor));
return absl::OkStatus();
}
const auto* qcom_thin_filter_desc =
dynamic_cast<const QcomThinFilterDescriptor*>(desc);
if (qcom_thin_filter_desc) {
QcomThinFilter thin_filter;
RETURN_IF_ERROR(
thin_filter.CreateFromDescriptor(*qcom_thin_filter_desc, context));
*result = std::make_unique<QcomThinFilter>(std::move(thin_filter));
return absl::OkStatus();
}
return absl::InvalidArgumentError("Unknown GPU descriptor.");
}
}
constexpr char CLArguments::kArgsPrefix[];
absl::Status CLArguments::Init(const GpuInfo& gpu_info, CLContext* context,
Arguments* args, std::string* code) {
RETURN_IF_ERROR(AllocateObjects(*args, context));
RETURN_IF_ERROR(AddObjectArgs(gpu_info, *args));
args->MoveObjectRefs(&object_refs_);
const bool use_f32_for_halfs = gpu_info.IsPowerVR();
CopyArguments(*args, use_f32_for_halfs);
RETURN_IF_ERROR(SetObjectsResources(*args));
RenameArgumentsInCode(code);
args->ResolveArgsPass(code);
*code = absl::Substitute(*code, GetListOfArgs());
if (gpu_info.SupportsImages()) {
*code = GetDefaultSamplers(gpu_info) + *code;
}
return absl::OkStatus();
}
absl::Status CLArguments::Init(const GpuInfo& gpu_info, Arguments* args,
CLContext* context) {
RETURN_IF_ERROR(AllocateObjects(*args, context));
RETURN_IF_ERROR(AddObjectArgs(gpu_info, *args));
args->MoveObjectRefs(&object_refs_);
const bool use_f32_for_halfs = gpu_info.IsPowerVR();
CopyArguments(*args, use_f32_for_halfs);
RETURN_IF_ERROR(SetObjectsResources(*args));
return absl::OkStatus();
}
absl::Status CLArguments::AllocateObjects(const Arguments& args,
CLContext* context) {
objects_.resize(args.GetObjects().size());
int i = 0;
for (auto& t : args.GetObjects()) {
RETURN_IF_ERROR(CreateCLObject(t.second.get(), context, &objects_[i]));
i++;
}
return absl::OkStatus();
}
absl::Status CLArguments::AddObjectArgs(const GpuInfo& gpu_info,
const Arguments& args) {
for (const auto& t : args.GetObjects()) {
AddGPUResources(t.first, t.second->GetGPUResources(gpu_info));
}
for (const auto& t : args.GetObjectRefs()) {
AddGPUResources(t.first, t.second->GetGPUResources(gpu_info));
}
return absl::OkStatus();
}
absl::Status CLArguments::SetObjectsResources(const Arguments& args) {
int i = 0;
for (const auto& t : args.GetObjects()) {
GPUResourcesWithValue resources;
RETURN_IF_ERROR(objects_[i]->GetGPUResources(t.second.get(), &resources));
RETURN_IF_ERROR(SetGPUResources(t.first, resources));
i++;
}
return absl::OkStatus();
}
void CLArguments::CopyArguments(const Arguments& args, bool use_f32_for_halfs) {
for (const auto& fvalue : args.GetFloatValues()) {
auto& new_val = float_values_[fvalue.first];
new_val.value = fvalue.second.value;
new_val.active = fvalue.second.active;
if (fvalue.second.active) {
new_val.offset = shared_float4s_data_.size();
shared_float4s_data_.push_back(new_val.value);
}
}
for (const auto& ivalue : args.GetIntValues()) {
auto& new_val = int_values_[ivalue.first];
new_val.value = ivalue.second.value;
new_val.active = ivalue.second.active;
if (ivalue.second.active) {
new_val.offset = shared_int4s_data_.size();
shared_int4s_data_.push_back(new_val.value);
}
}
for (const auto& hfvalue : args.GetHalfValues()) {
auto& new_val = half_values_[hfvalue.first];
new_val.value = hfvalue.second.value;
new_val.active = hfvalue.second.active;
if (hfvalue.second.active) {
if (use_f32_for_halfs) {
new_val.store_as_f32 = true;
new_val.offset = shared_float4s_data_.size();
shared_float4s_data_.push_back(new_val.value);
} else {
new_val.store_as_f32 = false;
new_val.offset = shared_half4s_data_.size();
shared_half4s_data_.push_back(new_val.value);
}
}
}
int shared_int4s_aligned_size = AlignByN(shared_int4s_data_.size(), 4);
shared_int4s_data_.resize(shared_int4s_aligned_size);
int shared_float4s_aligned_size = AlignByN(shared_float4s_data_.size(), 4);
shared_float4s_data_.resize(shared_float4s_aligned_size);
int shared_half4s_aligned_size = AlignByN(shared_half4s_data_.size(), 4);
shared_half4s_data_.resize(shared_half4s_aligned_size);
}
void CLArguments::RenameArgumentsInCode(std::string* code) {
const std::string postfixes[4] = {"x", "y", "z", "w"};
for (const auto& fvalue : float_values_) {
if (fvalue.second.active) {
std::string index = std::to_string(fvalue.second.offset / 4);
std::string new_name =
"shared_float4_" + index + "." + postfixes[fvalue.second.offset % 4];
ReplaceAllWords(kArgsPrefix + fvalue.first, new_name, code);
}
}
for (const auto& ivalue : int_values_) {
if (ivalue.second.active) {
std::string index = std::to_string(ivalue.second.offset / 4);
std::string new_name =
"shared_int4_" + index + "." + postfixes[ivalue.second.offset % 4];
ReplaceAllWords(kArgsPrefix + ivalue.first, new_name, code);
}
}
for (const auto& hfvalue : half_values_) {
if (hfvalue.second.active) {
std::string index = std::to_string(hfvalue.second.offset / 4);
std::string new_name;
if (hfvalue.second.store_as_f32) {
new_name = "(half)(shared_float4_" + index + "." +
postfixes[hfvalue.second.offset % 4] + ")";
} else {
new_name = "shared_half4_" + index + "." +
postfixes[hfvalue.second.offset % 4];
}
ReplaceAllWords(kArgsPrefix + hfvalue.first, new_name, code);
}
}
}
void CLArguments::AddBuffer(const std::string& name,
const GPUBufferDescriptor& desc) {
buffers_[name].desc = desc;
}
void CLArguments::AddImage2D(const std::string& name,
const GPUImage2DDescriptor& desc) {
images2d_[name].desc = desc;
}
void CLArguments::AddImage2DArray(const std::string& name,
const GPUImage2DArrayDescriptor& desc) {
image2d_arrays_[name].desc = desc;
}
void CLArguments::AddImage3D(const std::string& name,
const GPUImage3DDescriptor& desc) {
images3d_[name].desc = desc;
}
void CLArguments::AddImageBuffer(const std::string& name,
const GPUImageBufferDescriptor& desc) {
image_buffers_[name].desc = desc;
}
void CLArguments::AddCustomMemory(const std::string& name,
const GPUCustomMemoryDescriptor& desc) {
custom_memories_[name].desc = desc;
}
void CLArguments::AddGPUResources(const std::string& name,
const GPUResources& resources) {
for (const auto& r : resources.buffers) {
AddBuffer(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.images2d) {
AddImage2D(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.image2d_arrays) {
AddImage2DArray(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.images3d) {
AddImage3D(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.image_buffers) {
AddImageBuffer(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.custom_memories) {
AddCustomMemory(absl::StrCat(name, "_", r.first), r.second);
}
}
absl::Status CLArguments::SetInt(const std::string& name, int value) {
auto it = int_values_.find(name);
if (it == int_values_.end()) {
return absl::NotFoundError(
absl::StrCat("No int argument with name - ", name));
}
it->second.value = value;
if (it->second.active) {
shared_int4s_data_[it->second.offset] = value;
}
return absl::OkStatus();
}
absl::Status CLArguments::SetFloat(const std::string& name, float value) {
auto it = float_values_.find(name);
if (it == float_values_.end()) {
return absl::NotFoundError(
absl::StrCat("No float argument with name - ", name));
}
it->second.value = value;
if (it->second.active) {
shared_float4s_data_[it->second.offset] = value;
}
return absl::OkStatus();
}
absl::Status CLArguments::SetHalf(const std::string& name, half value) {
auto it = half_values_.find(name);
if (it == half_values_.end()) {
return absl::NotFoundError(
absl::StrCat("No half argument with name - ", name));
}
it->second.value = value;
if (it->second.active) {
if (it->second.store_as_f32) {
shared_float4s_data_[it->second.offset] = value;
} else {
shared_half4s_data_[it->second.offset] = value;
}
}
return absl::OkStatus();
}
absl::Status CLArguments::SetImage2D(const std::string& name, cl_mem memory) {
auto it = images2d_.find(name);
if (it == images2d_.end()) {
return absl::NotFoundError(
absl::StrCat("No image2D argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetBuffer(const std::string& name, cl_mem memory) {
auto it = buffers_.find(name);
if (it == buffers_.end()) {
return absl::NotFoundError(
absl::StrCat("No buffer argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetImage2DArray(const std::string& name,
cl_mem memory) {
auto it = image2d_arrays_.find(name);
if (it == image2d_arrays_.end()) {
return absl::NotFoundError(
absl::StrCat("No image2D array argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetImage3D(const std::string& name, cl_mem memory) {
auto it = images3d_.find(name);
if (it == images3d_.end()) {
return absl::NotFoundError(
absl::StrCat("No image3D argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetImageBuffer(const std::string& name,
cl_mem memory) {
auto it = image_buffers_.find(name);
if (it == image_buffers_.end()) {
return absl::NotFoundError(
absl::StrCat("No image buffer argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetCustomMemory(const std::string& name,
cl_mem memory) {
auto it = custom_memories_.find(name);
if (it == custom_memories_.end()) {
return absl::NotFoundError(
absl::StrCat("No custom memory argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetObjectRef(const std::string& name,
const GPUObject* object) {
auto it = object_refs_.find(name);
if (it == object_refs_.end()) {
return absl::NotFoundError(
absl::StrCat("No object ref with name - ", name));
}
GPUResourcesWithValue resources;
RETURN_IF_ERROR(object->GetGPUResources(it->second.get(), &resources));
return SetGPUResources(name, resources);
}
absl::Status CLArguments::SetGPUResources(
const std::string& name, const GPUResourcesWithValue& resources) {
for (const auto& r : resources.generic.ints) {
RETURN_IF_ERROR(SetInt(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.generic.floats) {
RETURN_IF_ERROR(SetFloat(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.buffers) {
RETURN_IF_ERROR(SetBuffer(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.images2d) {
RETURN_IF_ERROR(SetImage2D(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.image2d_arrays) {
RETURN_IF_ERROR(
SetImage2DArray(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.images3d) {
RETURN_IF_ERROR(SetImage3D(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.image_buffers) {
RETURN_IF_ERROR(SetImageBuffer(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.custom_memories) {
RETURN_IF_ERROR(
SetCustomMemory(absl::StrCat(name, "_", r.first), r.second));
}
return absl::OkStatus();
}
std::string CLArguments::GetListOfArgs() {
std::string result;
for (auto& t : buffers_) {
const std::string type_name =
t.second.desc.data_type == DataType::FLOAT32 ? "float" : "half";
std::string attributes;
for (const auto& attr : t.second.desc.attributes) {
attributes += absl::StrCat(" __attribute__((", attr, "))");
}
std::string cl_type;
if (t.second.desc.data_type == DataType::BOOL) {
cl_type = ToCLDataType(DataType::UINT8, t.second.desc.element_size);
} else {
cl_type =
ToCLDataType(t.second.desc.data_type, t.second.desc.element_size);
}
AppendArgument(absl::StrCat(MemoryTypeToCLType(t.second.desc.memory_type),
" ", cl_type, "* ", t.first, attributes),
&result);
}
for (auto& t : image_buffers_) {
AppendArgument(absl::StrCat(GetImageModifier(t.second.desc.access_type),
" image1d_buffer_t ", t.first),
&result);
}
for (auto& t : images2d_) {
AppendArgument(absl::StrCat(GetImageModifier(t.second.desc.access_type),
" image2d_t ", t.first),
&result);
}
for (auto& t : image2d_arrays_) {
AppendArgument(absl::StrCat(GetImageModifier(t.second.desc.access_type),
" image2d_array_t ", t.first),
&result);
}
for (auto& t : images3d_) {
AppendArgument(absl::StrCat(GetImageModifier(t.second.desc.access_type),
" image3d_t ", t.first),
&result);
}
for (auto& t : custom_memories_) {
AppendArgument(absl::StrCat(t.second.desc.type_name, " ", t.first),
&result);
}
for (int i = 0; i < shared_int4s_data_.size() / 4; ++i) {
AppendArgument(absl::StrCat("int4 shared_int4_", i), &result);
}
for (int i = 0; i < shared_float4s_data_.size() / 4; ++i) {
AppendArgument(absl::StrCat("float4 shared_float4_", i), &result);
}
for (int i = 0; i < shared_half4s_data_.size() / 4; ++i) {
AppendArgument(absl::StrCat("half4 shared_half4_", i), &result);
}
return result;
}
absl::Status CLArguments::Bind(cl_kernel kernel, int offset) {
for (auto& t : buffers_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : image_buffers_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : images2d_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : image2d_arrays_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : images3d_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : custom_memories_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (int i = 0; i < shared_int4s_data_.size() / 4; ++i) {
const int error_code = clSetKernelArg(kernel, offset, sizeof(int32_t) * 4,
&shared_int4s_data_[i * 4]);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (int i = 0; i < shared_float4s_data_.size() / 4; ++i) {
const int error_code = clSetKernelArg(kernel, offset, sizeof(int32_t) * 4,
&shared_float4s_data_[i * 4]);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (int i = 0; i < shared_half4s_data_.size() / 4; ++i) {
const int error_code = clSetKernelArg(kernel, offset, sizeof(int16_t) * 4,
&shared_half4s_data_[i * 4]);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
return absl::OkStatus();
}
bool CLArguments::HasEqualScalarArguments(const CLArguments& other) const {
return (other.int_values_ == int_values_ &&
other.float_values_ == float_values_ &&
other.half_values_ == half_values_);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/cl/cl_arguments.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_test.h"
#include "tensorflow/lite/delegates/gpu/cl/gpu_object.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST(CLArgumentsTest, TestSelectorResolve) {
BufferDescriptor desc;
desc.element_type = DataType::FLOAT32;
desc.element_size = 4;
desc.memory_type = MemoryType::GLOBAL;
Arguments args;
args.AddObjectRef("weights", AccessType::READ,
std::make_unique<BufferDescriptor>(std::move(desc)));
std::string sample_code = R"(
__kernel void main_function($0) {
if (a < 3) {
value = args.weights.Read(id);
}
})";
CLArguments cl_args;
GpuInfo gpu_info;
ASSERT_OK(cl_args.Init(gpu_info, nullptr, &args, &sample_code));
EXPECT_TRUE(absl::StrContains(sample_code, "value = weights_buffer[id];"));
EXPECT_TRUE(
absl::StrContains(sample_code, "__global float4* weights_buffer"));
}
TEST(CLArgumentsTest, TestNoSelector) {
BufferDescriptor desc;
desc.element_type = DataType::FLOAT32;
desc.element_size = 4;
desc.memory_type = MemoryType::GLOBAL;
Arguments args;
args.AddObjectRef("weights", AccessType::READ,
std::make_unique<BufferDescriptor>(std::move(desc)));
std::string sample_code = R"(
if (a < 3) {
value = args.weights.UnknownSelector(id);
}
)";
CLArguments cl_args;
GpuInfo gpu_info;
EXPECT_FALSE(cl_args.Init(gpu_info, nullptr, &args, &sample_code).ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/cl_arguments.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/cl_arguments_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7c7e80e4-5c2e-4c2d-b596-3d9fceb6252c | cpp | tensorflow/tensorflow | simple_opaque_delegate | tensorflow/lite/delegates/utils/simple_opaque_delegate.cc | tensorflow/lite/delegates/utils/simple_opaque_delegate_test.cc | #include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace {
TfLiteOperator* CreateDelegateKernelRegistration(
SimpleOpaqueDelegateInterface* delegate) {
TfLiteOperator* kernel_registration =
TfLiteOperatorCreate(kTfLiteBuiltinDelegate, delegate->Name(),
1, nullptr);
TfLiteOperatorSetFreeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, void* buffer) -> void {
delete reinterpret_cast<SimpleOpaqueDelegateInterface*>(buffer);
});
TfLiteOperatorSetInitWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, const char* buffer,
size_t length) -> void* {
const TfLiteOpaqueDelegateParams* params =
reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
if (params == nullptr) {
return nullptr;
}
auto* delegate_data = reinterpret_cast<SimpleOpaqueDelegateInterface*>(
params->delegate_data);
std::unique_ptr<SimpleOpaqueDelegateKernelInterface> delegate_kernel(
delegate_data->CreateDelegateKernelInterface());
if (delegate_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return delegate_kernel.release();
});
TfLiteOperatorSetPrepareWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
return delegate_kernel->Prepare(context, opaque_node);
});
TfLiteOperatorSetInvokeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
TFLITE_DCHECK(delegate_kernel != nullptr);
return delegate_kernel->Eval(context, opaque_node);
});
return kernel_registration;
}
TfLiteStatus DelegatePrepare(TfLiteOpaqueContext* opaque_context,
TfLiteOpaqueDelegate* opaque_delegate,
void* data) {
auto* simple_opaque_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
TF_LITE_ENSURE_STATUS(simple_opaque_delegate->Initialize(opaque_context));
std::vector<int> supported_nodes;
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(
TfLiteOpaqueContextGetExecutionPlan(opaque_context, &execution_plan));
IntArrayUniquePtr plan(TfLiteIntArrayCopy(execution_plan));
for (int i = 0; i < plan->size; ++i) {
const int node_id = plan->data[i];
TfLiteOpaqueNode* opaque_node;
TfLiteOperator* registration_external;
TfLiteOpaqueContextGetNodeAndRegistration(
opaque_context, node_id, &opaque_node, ®istration_external);
if (simple_opaque_delegate->IsNodeSupportedByDelegate(
registration_external, opaque_node, opaque_context)) {
supported_nodes.push_back(node_id);
}
}
TfLiteOperator* delegate_kernel_registration =
CreateDelegateKernelRegistration(simple_opaque_delegate);
return TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
opaque_context, delegate_kernel_registration,
BuildTfLiteArray(supported_nodes).get(), opaque_delegate);
}
}
TfLiteOpaqueDelegate* TfLiteOpaqueDelegateFactory::CreateSimpleDelegate(
std::unique_ptr<SimpleOpaqueDelegateInterface> simple_delegate,
int64_t flags) {
if (simple_delegate == nullptr) {
return {};
}
TfLiteOpaqueDelegateBuilder opaque_delegate_builder{};
opaque_delegate_builder.Prepare = &DelegatePrepare;
opaque_delegate_builder.flags = flags;
opaque_delegate_builder.data = simple_delegate.release();
opaque_delegate_builder.CopyFromBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) {
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
return simple_delegate->CopyFromBufferHandle(context, buffer_handle,
tensor);
};
opaque_delegate_builder.CopyToBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) {
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
return simple_delegate->CopyToBufferHandle(context, buffer_handle,
tensor);
};
opaque_delegate_builder.FreeBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle* buffer_handle) {
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
simple_delegate->FreeBufferHandle(context, buffer_handle);
};
return TfLiteOpaqueDelegateCreate(&opaque_delegate_builder);
}
void TfLiteOpaqueDelegateFactory::DeleteSimpleDelegate(
TfLiteOpaqueDelegate* opaque_delegate) {
if (!opaque_delegate) return;
auto* simple_delegate = reinterpret_cast<SimpleOpaqueDelegateInterface*>(
TfLiteOpaqueDelegateGetData(opaque_delegate));
delete simple_delegate;
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
} | #include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <array>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/delegate_test_util.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/interpreter_builder.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
namespace tflite {
class TestDelegate : public ::testing::Test {};
TEST_F(TestDelegate, TestDataAddBin_SingleInputSingleOutput_FullyDelegated) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 1);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
EXPECT_NE(TfLiteTensorData(input_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(input_tensor), "input");
TfLiteQuantizationParams input_params =
TfLiteTensorQuantizationParams(input_tensor);
EXPECT_EQ(input_params.scale, 0.f);
EXPECT_EQ(input_params.zero_point, 0);
const float kTensorCellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(output_tensor), "output");
TfLiteQuantizationParams output_params =
TfLiteTensorQuantizationParams(output_tensor);
EXPECT_EQ(output_params.scale, 0.f);
EXPECT_EQ(output_params.zero_point, 0);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(DelegateTest,
TestDataAddBin_SingleInputSingleOutput_FullyDelegated_ResizeInputTensors) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 1);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
std::array<int, 1> input_dims = {2};
ASSERT_EQ(TfLiteInterpreterResizeInputTensor(
interpreter, 0, input_dims.data(), input_dims.size()),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
EXPECT_EQ(TfLiteTensorNumDims(input_tensor), 1);
EXPECT_EQ(TfLiteTensorDim(input_tensor, 0), 2);
EXPECT_EQ(TfLiteTensorByteSize(input_tensor), sizeof(float) * 2);
EXPECT_NE(TfLiteTensorData(input_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(input_tensor), "input");
TfLiteQuantizationParams input_params =
TfLiteTensorQuantizationParams(input_tensor);
EXPECT_EQ(input_params.scale, 0.f);
EXPECT_EQ(input_params.zero_point, 0);
std::array<float, 2> input = {1.f, 3.f};
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
EXPECT_EQ(TfLiteTensorNumDims(output_tensor), 1);
EXPECT_EQ(TfLiteTensorDim(output_tensor, 0), 2);
EXPECT_EQ(TfLiteTensorByteSize(output_tensor), sizeof(float) * 2);
EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(output_tensor), "output");
TfLiteQuantizationParams output_params =
TfLiteTensorQuantizationParams(output_tensor);
EXPECT_EQ(output_params.scale, 0.f);
EXPECT_EQ(output_params.zero_point, 0);
std::array<float, 2> output;
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
EXPECT_EQ(output[0], 3.f);
EXPECT_EQ(output[1], 9.f);
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(DelegateTest, TestDataMultiAddBin_MultiInputMultiOutput_FullyDelegated) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/multi_add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 4);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 2);
TfLiteTensor* input_tensor0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
TfLiteTensor* input_tensor1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
TfLiteTensor* input_tensor2 =
TfLiteInterpreterGetInputTensor(interpreter, 2);
TfLiteTensor* input_tensor3 =
TfLiteInterpreterGetInputTensor(interpreter, 3);
std::vector<TfLiteTensor*> input_tensors{input_tensor0, input_tensor1,
input_tensor2, input_tensor3};
for (TfLiteTensor* input_tensor : input_tensors) {
const float kTensorCellValue = 1.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
}
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor0 =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
const TfLiteTensor* output_tensor1 =
TfLiteInterpreterGetOutputTensor(interpreter, 1);
std::vector<const TfLiteTensor*> output_tensors{output_tensor0,
output_tensor1};
for (const TfLiteTensor* output_tensor : output_tensors) {
int64_t n = tflite::NumElements(output_tensor);
std::vector<float> output_tensor_values(n, 0);
ASSERT_EQ(
TfLiteTensorCopyToBuffer(output_tensor, output_tensor_values.data(),
output_tensor_values.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < n; ++i) {
EXPECT_EQ(output_tensor_values[i], 3.f);
}
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TfLiteOperator* CreateDelegateKernelRegistrationImpl(
SimpleOpaqueDelegateInterface* delegate) {
TfLiteOperator* kernel_registration = TfLiteOperatorCreate(
kTfLiteBuiltinDelegate, delegate->Name(), 1, nullptr);
TfLiteOperatorSetFreeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, void* buffer) -> void {
delete reinterpret_cast<SimpleOpaqueDelegateInterface*>(buffer);
});
TfLiteOperatorSetInitWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, const char* buffer,
size_t length) -> void* {
auto* params =
reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
if (params == nullptr) {
return nullptr;
}
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(
params->delegate_data);
std::unique_ptr<SimpleOpaqueDelegateKernelInterface> delegate_kernel(
simple_delegate->CreateDelegateKernelInterface());
if (delegate_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return delegate_kernel.release();
});
TfLiteOperatorSetPrepareWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
return delegate_kernel->Prepare(context, opaque_node);
});
TfLiteOperatorSetInvokeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
TFLITE_DCHECK(delegate_kernel != nullptr);
return delegate_kernel->Eval(context, opaque_node);
});
return kernel_registration;
}
using ::tflite::delegates::test_utils::TestFP16Delegation;
TEST_F(TestFP16Delegation, MultipleDelegateKernels) {
auto my_simple_delegate = std::make_unique<example::SampleStableDelegate>();
TfLiteOpaqueDelegate* opaque_delegate =
TfLiteOpaqueDelegateFactory::CreateSimpleDelegate(
std::move(my_simple_delegate));
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(
reinterpret_cast<TfLiteDelegate*>(opaque_delegate)),
kTfLiteOk);
ASSERT_EQ(interpreter_->execution_plan().size(), 7);
VerifyInvoke();
TfLiteOpaqueDelegateFactory::DeleteSimpleDelegate(opaque_delegate);
}
class MySimpleOpaqueDelegateWithBufferHandleSupport
: public example::SampleStableDelegate {
public:
static constexpr int kDelegateOutputValue = 42;
TfLiteStatus CopyFromBufferHandle(TfLiteOpaqueContext* context,
TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) override {
auto* output = reinterpret_cast<float*>(TfLiteOpaqueTensorData(tensor));
std::vector<float> test_output(
example::helpers::CalculateNumElements(tensor), kDelegateOutputValue);
memcpy(output, test_output.data(), test_output.size() * sizeof(float));
return kTfLiteOk;
}
void FreeBufferHandle(TfLiteOpaqueContext* context,
TfLiteBufferHandle* handle) override {
recorded_buffer_handle_ = *handle;
free_buffer_handle_called_ = true;
}
int recorded_buffer_handle_ = -1;
bool free_buffer_handle_called_ = false;
};
TEST_F(TestDelegate, SetBufferHandle) {
MySimpleOpaqueDelegateWithBufferHandleSupport my_simple_delegate;
TfLiteOpaqueDelegateBuilder opaque_delegate_builder{};
opaque_delegate_builder.Prepare = [](TfLiteOpaqueContext* opaque_context,
TfLiteOpaqueDelegate* opaque_delegate,
void* data) {
auto* simple_opaque_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
TF_LITE_ENSURE_STATUS(simple_opaque_delegate->Initialize(opaque_context));
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(
TfLiteOpaqueContextGetExecutionPlan(opaque_context, &execution_plan));
TfLiteOperator* delegate_kernel_registration =
CreateDelegateKernelRegistrationImpl(simple_opaque_delegate);
return TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
opaque_context, delegate_kernel_registration, execution_plan,
opaque_delegate);
};
opaque_delegate_builder.flags = kTfLiteDelegateFlagsNone;
opaque_delegate_builder.data = &my_simple_delegate;
opaque_delegate_builder.CopyFromBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) -> TfLiteStatus {
auto* simple_opaque_delegate =
reinterpret_cast<MySimpleOpaqueDelegateWithBufferHandleSupport*>(data);
simple_opaque_delegate->CopyFromBufferHandle(context, buffer_handle,
tensor);
return kTfLiteOk;
};
opaque_delegate_builder.FreeBufferHandle = [](TfLiteOpaqueContext* context,
TfLiteOpaqueDelegate* delegate,
void* data,
TfLiteBufferHandle* handle) {
auto* simple_opaque_delegate =
reinterpret_cast<MySimpleOpaqueDelegateWithBufferHandleSupport*>(data);
simple_opaque_delegate->FreeBufferHandle(context, handle);
};
TfLiteDelegate tflite_delegate{};
tflite_delegate.opaque_delegate_builder = &opaque_delegate_builder;
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder builder(*model, resolver);
builder.AddDelegate(&tflite_delegate);
std::unique_ptr<tflite::Interpreter> interpreter;
builder(&interpreter);
ASSERT_NE(interpreter, nullptr);
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
constexpr int kTensorDimensions = 1 * 8 * 8 * 3;
std::vector<float> floats(kTensorDimensions, 1);
memcpy(interpreter->typed_input_tensor<float>(0), floats.data(),
floats.size() * sizeof(float));
EXPECT_FALSE(my_simple_delegate.free_buffer_handle_called_);
int first_buffer_handle = 1;
const int kOutputTensorIndex = 2;
interpreter->SetBufferHandle(
kOutputTensorIndex, first_buffer_handle,
reinterpret_cast<TfLiteDelegate*>(&tflite_delegate));
TfLiteTensor* output_t = interpreter->output_tensor(0);
output_t->data_is_stale = true;
EXPECT_FALSE(my_simple_delegate.free_buffer_handle_called_);
EXPECT_NE(my_simple_delegate.recorded_buffer_handle_, first_buffer_handle);
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
std::vector<float> outputs(kTensorDimensions, 0);
memcpy(outputs.data(), interpreter->typed_output_tensor<float>(0),
outputs.size() * sizeof(float));
for (int i = 0; i < outputs.size(); ++i) {
EXPECT_EQ(
outputs[i],
MySimpleOpaqueDelegateWithBufferHandleSupport::kDelegateOutputValue);
}
int next_buffer_handle = first_buffer_handle + 1;
interpreter->SetBufferHandle(kOutputTensorIndex, next_buffer_handle,
&tflite_delegate);
EXPECT_TRUE(my_simple_delegate.free_buffer_handle_called_);
EXPECT_EQ(my_simple_delegate.recorded_buffer_handle_, first_buffer_handle);
my_simple_delegate.free_buffer_handle_called_ = false;
my_simple_delegate.recorded_buffer_handle_ = first_buffer_handle = -1;
interpreter.reset();
EXPECT_TRUE(my_simple_delegate.free_buffer_handle_called_);
EXPECT_EQ(my_simple_delegate.recorded_buffer_handle_, next_buffer_handle);
}
TEST(DelegateTest,
TestDataConvHugeIm2ColBin_MultiInputSingleOutput_PartiallyDelegated) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/conv_huge_im2col.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 4);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
TfLiteTensor* input_tensor0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
TfLiteTensor* input_tensor1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
TfLiteTensor* input_tensor2 =
TfLiteInterpreterGetInputTensor(interpreter, 2);
TfLiteTensor* input_tensor3 =
TfLiteInterpreterGetInputTensor(interpreter, 3);
std::vector<TfLiteTensor*> input_tensors{input_tensor0, input_tensor1,
input_tensor2, input_tensor3};
for (TfLiteTensor* input_tensor : input_tensors) {
const float kTensorCellValue = 4.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
}
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
TfLiteQuantizationParams output_params =
TfLiteTensorQuantizationParams(output_tensor);
EXPECT_EQ(output_params.scale, 0.f);
EXPECT_EQ(output_params.zero_point, 0);
int64_t n = tflite::NumElements(output_tensor);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < n; ++i) {
EXPECT_EQ(output[i], 4);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/simple_opaque_delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/simple_opaque_delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b1461e3e-bfdf-4425-90cb-2c455821fae2 | cpp | tensorflow/tensorflow | simple_delegate | tensorflow/lite/delegates/utils/simple_delegate.cc | tensorflow/lite/delegates/utils/simple_delegate_test.cc | #include "tensorflow/lite/delegates/utils/simple_delegate.h"
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/utils.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace {
TfLiteRegistration GetDelegateKernelRegistration(
SimpleDelegateInterface* delegate) {
TfLiteRegistration kernel_registration{};
kernel_registration.profiling_string = nullptr;
kernel_registration.builtin_code = kTfLiteBuiltinDelegate;
kernel_registration.custom_name = delegate->Name();
kernel_registration.version = 1;
kernel_registration.free = [](TfLiteContext* context, void* buffer) -> void {
delete reinterpret_cast<SimpleDelegateKernelInterface*>(buffer);
};
kernel_registration.init = [](TfLiteContext* context, const char* buffer,
size_t length) -> void* {
const TfLiteDelegateParams* params =
reinterpret_cast<const TfLiteDelegateParams*>(buffer);
if (params == nullptr) {
TF_LITE_KERNEL_LOG(context, "NULL TfLiteDelegateParams passed.");
return nullptr;
}
auto* delegate =
reinterpret_cast<SimpleDelegateInterface*>(params->delegate->data_);
std::unique_ptr<SimpleDelegateKernelInterface> delegate_kernel(
delegate->CreateDelegateKernelInterface());
if (delegate_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return delegate_kernel.release();
};
kernel_registration.prepare = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
if (node->user_data == nullptr) {
TF_LITE_KERNEL_LOG(context, "Delegate kernel was not initialized");
return kTfLiteError;
}
SimpleDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleDelegateKernelInterface*>(node->user_data);
return delegate_kernel->Prepare(context, node);
};
kernel_registration.invoke = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
SimpleDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleDelegateKernelInterface*>(node->user_data);
TFLITE_DCHECK(delegate_kernel != nullptr);
return delegate_kernel->Eval(context, node);
};
return kernel_registration;
}
TfLiteStatus DelegatePrepare(TfLiteContext* context,
TfLiteDelegate* base_delegate) {
auto* delegate =
reinterpret_cast<SimpleDelegateInterface*>(base_delegate->data_);
auto delegate_options = delegate->DelegateOptions();
if (delegate_options.max_delegated_partitions <= 0)
delegate_options.max_delegated_partitions = std::numeric_limits<int>::max();
TF_LITE_ENSURE_STATUS(delegate->Initialize(context));
delegates::IsNodeSupportedFn node_supported_fn =
[=](TfLiteContext* context, TfLiteNode* node,
TfLiteRegistration* registration,
std::string* unsupported_details) -> bool {
return delegate->IsNodeSupportedByDelegate(registration, node, context);
};
delegates::GraphPartitionHelper helper(context, node_supported_fn);
TF_LITE_ENSURE_STATUS(helper.Partition(nullptr));
std::vector<int> supported_nodes = helper.GetNodesOfFirstNLargestPartitions(
delegate_options.max_delegated_partitions,
delegate_options.min_nodes_per_partition);
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"%s delegate: %d nodes delegated out of %d nodes with "
"%d partitions.\n",
delegate->Name(), supported_nodes.size(),
helper.num_total_nodes(), helper.num_partitions());
TfLiteRegistration delegate_kernel_registration =
GetDelegateKernelRegistration(delegate);
return context->ReplaceNodeSubsetsWithDelegateKernels(
context, delegate_kernel_registration,
BuildTfLiteArray(supported_nodes).get(), base_delegate);
}
}
TfLiteDelegate* TfLiteDelegateFactory::CreateSimpleDelegate(
std::unique_ptr<SimpleDelegateInterface> simple_delegate, int64_t flag) {
if (simple_delegate == nullptr) {
return nullptr;
}
auto delegate = new TfLiteDelegate{};
delegate->Prepare = &DelegatePrepare;
delegate->flags = flag;
delegate->data_ = simple_delegate.release();
delegate->CopyFromBufferHandle = [](TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor) -> TfLiteStatus {
auto* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
return simple_delegate->CopyFromBufferHandle(context, buffer_handle,
tensor);
};
delegate->CopyToBufferHandle = [](TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor) -> TfLiteStatus {
auto* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
return simple_delegate->CopyToBufferHandle(context, buffer_handle, tensor);
};
delegate->FreeBufferHandle = [](TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle* buffer_handle) {
auto* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
simple_delegate->FreeBufferHandle(context, buffer_handle);
};
return delegate;
}
void TfLiteDelegateFactory::DeleteSimpleDelegate(TfLiteDelegate* delegate) {
if (!delegate) return;
SimpleDelegateInterface* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
delete simple_delegate;
delete delegate;
}
} | #include <stdlib.h>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/delegates/utils/dummy_delegate/dummy_delegate.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite {
namespace {
class TestDelegate : public ::testing::Test {
protected:
void SetUp() override {
interpreter_ = std::make_unique<Interpreter>();
interpreter_->AddTensors(5);
interpreter_->SetInputs({0, 1});
interpreter_->SetOutputs({3, 4});
TfLiteQuantizationParams quant;
interpreter_->SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(4, kTfLiteFloat32, "", {3},
quant);
TfLiteRegistration* reg = ops::builtin::Register_ADD();
void* builtin_data_1 = malloc(sizeof(int));
void* builtin_data_2 = malloc(sizeof(int));
void* builtin_data_3 = malloc(sizeof(int));
interpreter_->AddNodeWithParameters({0, 0}, {2}, nullptr, 0, builtin_data_1,
reg);
interpreter_->AddNodeWithParameters({1, 1}, {3}, nullptr, 0, builtin_data_2,
reg);
interpreter_->AddNodeWithParameters({2, 1}, {4}, nullptr, 0, builtin_data_3,
reg);
}
void TearDown() override { interpreter_.reset(); }
protected:
std::unique_ptr<Interpreter> interpreter_;
};
TEST_F(TestDelegate, BasicDelegate) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
interpreter_->ModifyGraphWithDelegate(std::move(delegate));
ASSERT_EQ(interpreter_->execution_plan().size(), 1);
int node = interpreter_->execution_plan()[0];
const auto* node_and_reg = interpreter_->node_and_registration(node);
EXPECT_STREQ("DummyDelegate", node_and_reg->second.custom_name);
EXPECT_EQ(1, node_and_reg->second.version);
const TfLiteDelegateParams* params = static_cast<const TfLiteDelegateParams*>(
node_and_reg->first.builtin_data);
ASSERT_EQ(params->nodes_to_replace->size, 3);
EXPECT_EQ(params->nodes_to_replace->data[0], 0);
EXPECT_EQ(params->nodes_to_replace->data[1], 1);
EXPECT_EQ(params->nodes_to_replace->data[2], 2);
ASSERT_EQ(params->input_tensors->size, 2);
EXPECT_EQ(params->input_tensors->data[0], 0);
EXPECT_EQ(params->input_tensors->data[1], 1);
ASSERT_EQ(params->output_tensors->size, 2);
EXPECT_EQ(params->output_tensors->data[0], 3);
EXPECT_EQ(params->output_tensors->data[1], 4);
}
TEST_F(TestDelegate, NoNodesToDelegate) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinSub;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
interpreter_->ModifyGraphWithDelegate(std::move(delegate));
ASSERT_EQ(interpreter_->execution_plan().size(), 3);
}
TEST_F(TestDelegate, DelegateFailedPrepare) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
options.error_during_prepare = true;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
ASSERT_EQ(kTfLiteDelegateError,
interpreter_->ModifyGraphWithDelegate(std::move(delegate)));
}
TEST_F(TestDelegate, DelegateFailedInvoke) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
options.error_during_invoke = true;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
ASSERT_EQ(kTfLiteOk,
interpreter_->ModifyGraphWithDelegate(std::move(delegate)));
ASSERT_EQ(kTfLiteError, interpreter_->Invoke());
}
TEST_F(TestDelegate, DelegateFailedInit) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
options.error_during_init = true;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
ASSERT_EQ(kTfLiteDelegateError,
interpreter_->ModifyGraphWithDelegate(std::move(delegate)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/simple_delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/simple_delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2af69398-20b4-4a9a-9f51-bca2455e1db4 | cpp | tensorflow/tensorflow | sample_stable_delegate_external | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_external.cc | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_external_test.cc | #include <memory>
#include <utility>
#include "tensorflow/lite/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/stable_delegate_interface.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace {
TfLiteOpaqueDelegate* SampleStableDelegateCreateFunc(
const void* tflite_settings) {
auto delegate = std::make_unique<tflite::example::SampleStableDelegate>();
return tflite::TfLiteOpaqueDelegateFactory::CreateSimpleDelegate(
std::move(delegate));
}
void SampleStableDelegateDestroyFunc(
TfLiteOpaqueDelegate* sample_stable_delegate) {
tflite::TfLiteOpaqueDelegateFactory::DeleteSimpleDelegate(
sample_stable_delegate);
}
int SampleStableDelegateErrnoFunc(
TfLiteOpaqueDelegate* sample_stable_delegate) {
return 0;
}
const TfLiteOpaqueDelegatePlugin sample_stable_delegate_plugin = {
SampleStableDelegateCreateFunc, SampleStableDelegateDestroyFunc,
SampleStableDelegateErrnoFunc};
const TfLiteStableDelegate sample_stable_delegate = {
TFL_STABLE_DELEGATE_ABI_VERSION, tflite::example::kSampleStableDelegateName,
tflite::example::kSampleStableDelegateVersion,
&sample_stable_delegate_plugin};
}
extern "C" const TfLiteStableDelegate TFL_TheStableDelegate =
sample_stable_delegate; | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace {
using tflite::TFLiteSettings;
using tflite::TFLiteSettingsBuilder;
using tflite::delegates::utils::LoadDelegateFromSharedLibrary;
TEST(SampleStableDelegate, LoadFromSharedLibraryFile) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/libtensorflowlite_sample_stable_delegate.so");
ASSERT_NE(stable_delegate_handle, nullptr);
EXPECT_STREQ(stable_delegate_handle->delegate_abi_version,
TFL_STABLE_DELEGATE_ABI_VERSION);
EXPECT_STREQ(stable_delegate_handle->delegate_name,
tflite::example::kSampleStableDelegateName);
EXPECT_STREQ(stable_delegate_handle->delegate_version,
tflite::example::kSampleStableDelegateVersion);
ASSERT_NE(stable_delegate_handle->delegate_plugin, nullptr);
}
TEST(SampleStableDelegate, LoadFromSharedLibraryTestFile) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/"
"libtensorflowlite_sample_stable_delegate.so");
ASSERT_NE(stable_delegate_handle, nullptr);
EXPECT_STREQ(stable_delegate_handle->delegate_abi_version,
TFL_STABLE_DELEGATE_ABI_VERSION);
EXPECT_STREQ(stable_delegate_handle->delegate_name,
tflite::example::kSampleStableDelegateName);
EXPECT_STREQ(stable_delegate_handle->delegate_version,
tflite::example::kSampleStableDelegateVersion);
ASSERT_NE(stable_delegate_handle->delegate_plugin, nullptr);
flatbuffers::FlatBufferBuilder flatbuffer_builder;
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder.Finish(tflite_settings);
const TFLiteSettings* settings = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder.GetBufferPointer());
TfLiteOpaqueDelegate* opaque_delegate =
stable_delegate_handle->delegate_plugin->create(settings);
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 3.f;
std::int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
stable_delegate_handle->delegate_plugin->destroy(opaque_delegate);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_external.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_external_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e938fec1-2a3e-4a74-a363-d75017bc5abe | cpp | tensorflow/tensorflow | sample_stable_delegate_with_control_flow | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.cc | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow_test.cc | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace tflite {
namespace example {
static const int kTopLevelSubgraphIndex = -1;
namespace {
class SampleStableDelegateKernel : public SimpleOpaqueDelegateKernelInterface {
bool IsExternalTensor(const TfLiteOpaqueTensor* opaque_tensor) const {
return external_tensors_.count(opaque_tensor) != 0;
}
void DeriveExternalTensors() {
for (const TfLiteOpaqueTensor* tensor : node_input_tensors_set_) {
if (node_output_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
for (const TfLiteOpaqueTensor* tensor : node_output_tensors_set_) {
if (node_input_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
}
public:
TfLiteStatus Init(TfLiteOpaqueContext* context,
const TfLiteOpaqueDelegateParams* params) override {
if (params->delegate == nullptr) return kTfLiteDelegateError;
context_ = context;
std::vector<int> callee_subgraph_indices;
TfLiteStatus status =
InitSubgraphNodes(context, kTopLevelSubgraphIndex,
params->nodes_to_replace, callee_subgraph_indices);
if (status != kTfLiteOk) return status;
DeriveExternalTensors();
return kTfLiteOk;
}
TfLiteStatus InitSubgraphNodes(TfLiteOpaqueContext* context,
int subgraph_index,
const TfLiteIntArray* nodes_to_execute,
std::vector<int>& callee_subgraph_indices) {
node_input_tensors_[subgraph_index].resize(nodes_to_execute->size);
node_output_tensors_[subgraph_index].resize(nodes_to_execute->size);
builtin_codes_[subgraph_index].resize(nodes_to_execute->size);
for (int i = 0; i < nodes_to_execute->size; ++i) {
const int node_index = nodes_to_execute->data[i];
TfLiteOpaqueNode* delegated_node = nullptr;
TfLiteOperator* delegated_node_registration = nullptr;
TfLiteOpaqueContextGetNodeAndRegistration(
context, node_index, &delegated_node, &delegated_node_registration);
builtin_codes_[subgraph_index][i] =
TfLiteOperatorGetBuiltInCode(delegated_node_registration);
for (int n = 0; n < TfLiteOpaqueNodeNumberOfInputs(delegated_node); ++n) {
auto input_tensor =
TfLiteOpaqueNodeGetInput(context, delegated_node, n);
node_input_tensors_[subgraph_index][i].push_back(input_tensor);
if (subgraph_index == kTopLevelSubgraphIndex) {
node_input_tensors_set_.insert(input_tensor);
}
}
for (int n = 0; n < TfLiteOpaqueNodeNumberOfOutputs(delegated_node);
++n) {
auto output_tensor =
TfLiteOpaqueNodeGetOutput(context, delegated_node, n);
node_output_tensors_[subgraph_index][i].push_back(output_tensor);
if (subgraph_index == kTopLevelSubgraphIndex) {
node_output_tensors_set_.insert(output_tensor);
}
}
if (builtin_codes_[subgraph_index][i] == kTfLiteBuiltinWhile) {
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(delegated_node);
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(builtin_data);
control_flow_branch_indices_[subgraph_index][i] = {
params->cond_subgraph_index, params->body_subgraph_index};
for (int branch_index :
control_flow_branch_indices_[subgraph_index][i]) {
callee_subgraph_indices.push_back(branch_index);
TfLiteStatus status;
TfLiteIntArray* execution_plan;
TfLiteOpaqueContext* branch_context;
status = TfLiteOpaqueContextAcquireSubgraphContext(
context, branch_index, &branch_context);
if (status != kTfLiteOk) return status;
status = TfLiteOpaqueContextGetExecutionPlan(branch_context,
&execution_plan);
if (status != kTfLiteOk) return status;
status = InitSubgraphNodes(branch_context, branch_index,
execution_plan, callee_subgraph_indices);
if (status != kTfLiteOk) return status;
status =
TfLiteOpaqueContextReleaseSubgraphContext(context, branch_index);
if (status != kTfLiteOk) return status;
}
}
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
if (external_tensors_.empty()) return kTfLiteOk;
const int kTheInputTensorSize =
helpers::CalculateNumElements((*external_tensors_.begin()));
for (auto [_, node_input_tensors] : node_input_tensors_) {
for (std::vector<const TfLiteOpaqueTensor*>& vecs : node_input_tensors) {
for (const TfLiteOpaqueTensor* tensor : vecs) {
if (IsExternalTensor(tensor)) continue;
std::vector<float>& vec_memory =
internal_float_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
}
}
for (auto [subgraph_index, node_output_tensors] : node_output_tensors_) {
for (int i = 0; i < node_output_tensors.size(); ++i) {
std::vector<const TfLiteOpaqueTensor*>& vecs = node_output_tensors[i];
for (int j = 0; j < vecs.size(); ++j) {
const TfLiteOpaqueTensor* tensor = vecs[j];
if (IsExternalTensor(tensor)) break;
if (builtin_codes_[subgraph_index][i] == kTfLiteBuiltinEqual) {
std::vector<int>& vec_memory = internal_int_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
} else {
std::vector<float>& vec_memory =
internal_float_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
}
}
}
return kTfLiteOk;
}
int* GetIntRawDataSource(const TfLiteOpaqueTensor* tensor) {
if (IsExternalTensor(tensor)) {
return reinterpret_cast<int*>(TfLiteOpaqueTensorData(tensor));
} else {
return internal_int_tensors_memory_[tensor].data();
}
}
float* GetFloatRawDataSource(const TfLiteOpaqueTensor* tensor) {
if (IsExternalTensor(tensor)) {
return reinterpret_cast<float*>(TfLiteOpaqueTensorData(tensor));
} else {
return internal_float_tensors_memory_[tensor].data();
}
}
void CopyRawDataSource(const TfLiteOpaqueTensor* from_tensor,
const TfLiteOpaqueTensor* to_tensor) {
float* from_data = GetFloatRawDataSource(from_tensor);
float* to_data = GetFloatRawDataSource(to_tensor);
int number_of_elements = helpers::CalculateNumElements(to_tensor);
memcpy(to_data, from_data, number_of_elements * sizeof(float));
}
TfLiteStatus EvalArithmeticOp(int subgraph_index, int node_index) {
auto node_input_tensors = node_input_tensors_[subgraph_index];
auto node_output_tensors = node_output_tensors_[subgraph_index];
auto builtin_codes = builtin_codes_[subgraph_index];
float* input1 = GetFloatRawDataSource(node_input_tensors[node_index][0]);
float* input2 = GetFloatRawDataSource(node_input_tensors[node_index][1]);
float* output = GetFloatRawDataSource(node_output_tensors[node_index][0]);
int number_of_elements =
helpers::CalculateNumElements(node_output_tensors[node_index][0]);
for (int i = 0; i < number_of_elements; ++i) {
switch (builtin_codes[node_index]) {
case kTfLiteBuiltinAdd:
output[i] = input1[i] + input2[i];
break;
case kTfLiteBuiltinSub:
output[i] = input1[i] - input2[i];
break;
case kTfLiteBuiltinMul:
output[i] = input1[i] * input2[i];
break;
default:
return kTfLiteDelegateError;
}
}
return kTfLiteOk;
}
TfLiteStatus EvalComparisonOp(int subgraph_index, int node_index) {
auto node_input_tensors = node_input_tensors_[subgraph_index];
auto node_output_tensors = node_output_tensors_[subgraph_index];
auto builtin_codes = builtin_codes_[subgraph_index];
float* input1 = GetFloatRawDataSource(node_input_tensors[node_index][0]);
float* input2 = GetFloatRawDataSource(node_input_tensors[node_index][1]);
int* output = GetIntRawDataSource(node_output_tensors[node_index][0]);
int number_of_elements =
helpers::CalculateNumElements(node_output_tensors[node_index][0]);
for (int i = 0; i < number_of_elements; ++i) {
switch (builtin_codes[node_index]) {
case kTfLiteBuiltinEqual:
output[i] = input1[i] == input2[i];
break;
default:
return kTfLiteDelegateError;
}
}
return kTfLiteOk;
}
TfLiteStatus EvalWhileOp(int while_subgraph_index, int while_node_index) {
auto branch_indices =
control_flow_branch_indices_[while_subgraph_index][while_node_index];
int cond_subgraph_index = branch_indices[0];
int body_subgraph_index = branch_indices[1];
int last_cond_node_index =
node_output_tensors_[cond_subgraph_index].size() - 1;
int last_body_node_index =
node_output_tensors_[body_subgraph_index].size() - 1;
CopyRawDataSource(
node_input_tensors_[while_subgraph_index][while_node_index][0],
node_input_tensors_[cond_subgraph_index][0][0]);
TfLiteStatus status;
while (true) {
status = EvalSubgraph(cond_subgraph_index);
if (status != kTfLiteOk) return status;
int* cond_output = GetIntRawDataSource(
node_output_tensors_[cond_subgraph_index][last_cond_node_index][0]);
int number_of_elements = helpers::CalculateNumElements(
node_output_tensors_[cond_subgraph_index][last_cond_node_index][0]);
bool condition = true;
for (int i = 0; i < number_of_elements; ++i) {
if (cond_output[i] == 0) {
condition = false;
break;
}
}
if (!condition) {
CopyRawDataSource(
node_output_tensors_[body_subgraph_index][last_body_node_index][0],
node_output_tensors_[while_subgraph_index][while_node_index][0]);
break;
}
CopyRawDataSource(node_input_tensors_[cond_subgraph_index][0][0],
node_input_tensors_[body_subgraph_index][0][0]);
status = EvalSubgraph(body_subgraph_index);
if (status != kTfLiteOk) return status;
CopyRawDataSource(
node_output_tensors_[body_subgraph_index][last_body_node_index][0],
node_input_tensors_[cond_subgraph_index][0][0]);
}
return kTfLiteOk;
}
TfLiteStatus EvalSubgraph(int subgraph_index) {
TfLiteStatus status;
for (int i = 0; i < node_input_tensors_[subgraph_index].size(); ++i) {
status = EvalNode(subgraph_index, i);
if (status != kTfLiteOk) return status;
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
return EvalSubgraph(kTopLevelSubgraphIndex);
}
TfLiteStatus EvalNode(int subgraph_index, int node_index) {
TfLiteStatus status;
switch (builtin_codes_[subgraph_index][node_index]) {
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinSub:
case kTfLiteBuiltinMul:
status = EvalArithmeticOp(subgraph_index, node_index);
break;
case kTfLiteBuiltinEqual:
status = EvalComparisonOp(subgraph_index, node_index);
break;
case kTfLiteBuiltinWhile:
status = EvalWhileOp(subgraph_index, node_index);
break;
default:
return kTfLiteDelegateError;
}
if (status != kTfLiteOk) {
return status;
}
return kTfLiteOk;
}
private:
absl::flat_hash_map<int, absl::flat_hash_map<int, std::vector<int>>>
control_flow_branch_indices_;
absl::flat_hash_map<int, std::vector<std::vector<const TfLiteOpaqueTensor*>>>
node_input_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_input_tensors_set_;
absl::flat_hash_map<int, std::vector<std::vector<const TfLiteOpaqueTensor*>>>
node_output_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_output_tensors_set_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> external_tensors_;
absl::flat_hash_map<const TfLiteOpaqueTensor*, std::vector<float>>
internal_float_tensors_memory_;
absl::flat_hash_map<const TfLiteOpaqueTensor*, std::vector<int>>
internal_int_tensors_memory_;
TfLiteOpaqueContext* context_;
absl::flat_hash_map<int, std::vector<int>> builtin_codes_;
};
}
TfLiteStatus SampleStableDelegate::ComputeCompatibleCalleeSubgraphs(
TfLiteOpaqueContext* opaque_context, int subgraph_index) {
TfLiteStatus status;
TfLiteOpaqueContext* current_context;
status = TfLiteOpaqueContextAcquireSubgraphContext(
opaque_context, subgraph_index, ¤t_context);
if (status != kTfLiteOk) {
return status;
}
TfLiteIntArray* execution_plan;
status =
TfLiteOpaqueContextGetExecutionPlan(current_context, &execution_plan);
if (status != kTfLiteOk) {
return status;
}
bool is_compatible_subgraph = true;
for (int i = 0; i < execution_plan->size; ++i) {
int node_index = execution_plan->data[i];
TfLiteOpaqueNode* node = nullptr;
TfLiteOperator* registration = nullptr;
status = TfLiteOpaqueContextGetNodeAndRegistration(
current_context, node_index, &node, ®istration);
if (status != kTfLiteOk) {
return status;
}
TfLiteBuiltinOperator builtin_operator =
TfLiteOperatorGetBuiltInCode(registration);
if (builtin_operator == kTfLiteBuiltinWhile) {
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(node);
const auto* op_data =
reinterpret_cast<const TfLiteWhileParams*>(builtin_data);
AddCalleeSubgraphToCallerSubgraph(op_data->cond_subgraph_index,
subgraph_index);
ComputeCompatibleCalleeSubgraphs(opaque_context,
op_data->cond_subgraph_index);
AddCalleeSubgraphToCallerSubgraph(op_data->body_subgraph_index,
subgraph_index);
ComputeCompatibleCalleeSubgraphs(opaque_context,
op_data->body_subgraph_index);
}
if (!IsNodeSupportedByDelegate(registration, node, current_context)) {
is_compatible_subgraph = false;
}
}
if (is_compatible_subgraph) {
AddCompatibleCalleeSubgraph(subgraph_index);
}
status =
TfLiteOpaqueContextReleaseSubgraphContext(opaque_context, subgraph_index);
if (status != kTfLiteOk) {
return status;
}
return kTfLiteOk;
}
TfLiteStatus SampleStableDelegate::PrepareControlFlow(
TfLiteOpaqueContext* opaque_context) {
constexpr int kPrimarySubgraphIndex = 0;
ComputeCompatibleCalleeSubgraphs(opaque_context, kPrimarySubgraphIndex);
for (const auto& [caller_subgraph_index, callee_subgraph_indices] :
control_flow_subgraph_tree_) {
if (callee_subgraph_indices.empty()) {
continue;
}
bool callee_subgraphs_all_delegatable = true;
for (int callee_subgraph_index : callee_subgraph_indices) {
if (!IsCompatibleCalleeSubgraph(callee_subgraph_index)) {
callee_subgraphs_all_delegatable = false;
}
}
if (!callee_subgraphs_all_delegatable) {
continue;
}
for (int callee_subgraph_index : callee_subgraph_indices) {
TfLiteOpaqueContextMarkSubgraphAsDelegationSkippable(
opaque_context, callee_subgraph_index);
}
}
return kTfLiteOk;
}
int helpers::CalculateNumElements(const TfLiteOpaqueTensor* opaque_tensor) {
int total_num_elements = 1;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(opaque_tensor); ++i) {
total_num_elements *= TfLiteOpaqueTensorDim(opaque_tensor, i);
}
return total_num_elements;
}
bool SampleStableDelegate::IsNodeSupportedByDelegate(
const TfLiteOperator* registration_external, const TfLiteOpaqueNode* node,
TfLiteOpaqueContext* context) const {
TfLiteBuiltinOperator builtin_operator =
TfLiteOperatorGetBuiltInCode(registration_external);
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(node);
switch (builtin_operator) {
case kTfLiteBuiltinAdd: {
TfLiteAddParams* params =
reinterpret_cast<TfLiteAddParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
break;
}
case kTfLiteBuiltinSub: {
TfLiteSubParams* params =
reinterpret_cast<TfLiteSubParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
break;
}
case kTfLiteBuiltinMul: {
TfLiteMulParams* params =
reinterpret_cast<TfLiteMulParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
break;
}
case kTfLiteBuiltinEqual:
break;
case kTfLiteBuiltinWhile: {
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(builtin_data);
if (!params || !IsCompatibleCalleeSubgraph(params->cond_subgraph_index) ||
!IsCompatibleCalleeSubgraph(params->body_subgraph_index)) {
return false;
}
break;
}
default:
return false;
}
if (builtin_operator == kTfLiteBuiltinWhile) {
if (TfLiteOpaqueNodeNumberOfInputs(node) != 1) return false;
const TfLiteOpaqueTensor* tensor =
TfLiteOpaqueNodeGetInput(context, node, 0);
if (!tensor || TfLiteOpaqueTensorType(tensor) != kTfLiteFloat32)
return false;
} else {
if (TfLiteOpaqueNodeNumberOfInputs(node) != 2) return false;
const TfLiteOpaqueTensor* tensor_1 =
TfLiteOpaqueNodeGetInput(context, node, 0);
const TfLiteOpaqueTensor* tensor_2 =
TfLiteOpaqueNodeGetInput(context, node, 1);
if (!tensor_1 || TfLiteOpaqueTensorType(tensor_1) != kTfLiteFloat32)
return false;
if (!tensor_2 || TfLiteOpaqueTensorType(tensor_2) != kTfLiteFloat32)
return false;
if (TfLiteOpaqueTensorNumDims(tensor_1) !=
TfLiteOpaqueTensorNumDims(tensor_2))
return false;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(tensor_1); ++i) {
if (TfLiteOpaqueTensorDim(tensor_1, i) !=
TfLiteOpaqueTensorDim(tensor_2, i)) {
return false;
}
}
}
return true;
}
TfLiteStatus SampleStableDelegate::Initialize(TfLiteOpaqueContext* context) {
if (!has_been_initialized_) {
PrepareControlFlow(context);
has_been_initialized_ = true;
}
return kTfLiteOk;
}
const char* SampleStableDelegate::Name() const {
return kSampleStableDelegateName;
}
std::unique_ptr<SimpleOpaqueDelegateKernelInterface>
SampleStableDelegate::CreateDelegateKernelInterface() {
return std::make_unique<SampleStableDelegateKernel>();
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.h"
#include <cstddef>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace {
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithAdd) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithSub) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/sub.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor_0, nullptr);
const float kTensor0CellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor_0);
std::vector<float> input_0(n, kTensor0CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_0, input_0.data(),
input_0.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
ASSERT_NE(input_tensor_1, nullptr);
n = tflite::NumElements(input_tensor_1);
const float kTensor1CellValue = 2.f;
std::vector<float> input_1(n, kTensor1CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_1, input_1.data(),
input_1.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensor0CellValue - kTensor1CellValue);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithNestedWhile) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/nested_while.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 1.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 2);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d825912f-d5a7-4c16-9580-be760349350d | cpp | tensorflow/tensorflow | sample_stable_delegate | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.cc | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_test.cc | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace tflite {
namespace example {
namespace {
class SampleStableDelegateKernel : public SimpleOpaqueDelegateKernelInterface {
bool IsExternalTensor(const TfLiteOpaqueTensor* opaque_tensor) const {
return external_tensors_.count(opaque_tensor) != 0;
}
void DeriveExternalTensors() {
for (const TfLiteOpaqueTensor* tensor : node_input_tensors_set_) {
if (node_output_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
for (const TfLiteOpaqueTensor* tensor : node_output_tensors_set_) {
if (node_input_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
}
public:
TfLiteStatus Init(TfLiteOpaqueContext* context,
const TfLiteOpaqueDelegateParams* params) override {
if (params->delegate == nullptr) return kTfLiteDelegateError;
context_ = context;
builtin_code_.resize(params->nodes_to_replace->size);
node_input_tensors_.resize(params->nodes_to_replace->size);
node_output_tensors_.resize(params->nodes_to_replace->size);
for (int i = 0; i < params->nodes_to_replace->size; ++i) {
const int node_index = params->nodes_to_replace->data[i];
TfLiteOpaqueNode* delegated_node = nullptr;
TfLiteOperator* delegated_node_registration = nullptr;
TfLiteOpaqueContextGetNodeAndRegistration(
context, node_index, &delegated_node, &delegated_node_registration);
auto input_tensor1 = TfLiteOpaqueNodeGetInput(context, delegated_node, 0);
node_input_tensors_[i].push_back(input_tensor1);
node_input_tensors_set_.insert(input_tensor1);
auto input_tensor2 = TfLiteOpaqueNodeGetInput(context, delegated_node, 1);
node_input_tensors_[i].push_back(input_tensor2);
node_input_tensors_set_.insert(input_tensor2);
auto output_tensor =
TfLiteOpaqueNodeGetOutput(context, delegated_node, 0);
node_output_tensors_[i] = output_tensor;
node_output_tensors_set_.insert(output_tensor);
builtin_code_[i] =
TfLiteOperatorGetBuiltInCode(delegated_node_registration);
}
DeriveExternalTensors();
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
if (external_tensors_.empty()) return kTfLiteOk;
const int kTheInputTensorSize =
helpers::CalculateNumElements((*external_tensors_.begin()));
for (std::vector<const TfLiteOpaqueTensor*>& vecs : node_input_tensors_) {
for (const TfLiteOpaqueTensor* tensor : vecs) {
if (IsExternalTensor(tensor)) continue;
std::vector<float>& vec_memory = internal_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
}
for (const TfLiteOpaqueTensor* tensor : node_output_tensors_) {
if (IsExternalTensor(tensor)) continue;
std::vector<float>& vec_memory = internal_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
return kTfLiteOk;
}
void ComputeImpl(float* input_1, float* input_2, float* output,
int builtin_code, int number_of_elements) {
for (int i = 0; i < number_of_elements; ++i) {
if (builtin_code == kTfLiteBuiltinAdd) {
output[i] = input_1[i] + input_2[i];
} else {
output[i] = input_1[i] - input_2[i];
}
}
}
float* GetRawDataSource(TfLiteOpaqueContext* context,
const TfLiteOpaqueTensor* tensor) {
if (IsExternalTensor(tensor)) {
return reinterpret_cast<float*>(TfLiteOpaqueTensorData(tensor));
} else {
return internal_tensors_memory_[tensor].data();
}
}
TfLiteStatus Eval(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
for (int i = 0; i < node_input_tensors_.size(); ++i) {
float* input1 = GetRawDataSource(context, node_input_tensors_[i][0]);
float* input2 = GetRawDataSource(context, node_input_tensors_[i][1]);
float* output = GetRawDataSource(context, node_output_tensors_[i]);
ComputeImpl(input1, input2, output, builtin_code_[i],
helpers::CalculateNumElements(node_output_tensors_[i]));
}
return kTfLiteOk;
}
private:
std::vector<std::vector<const TfLiteOpaqueTensor*>> node_input_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_input_tensors_set_;
std::vector<const TfLiteOpaqueTensor*> node_output_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_output_tensors_set_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> external_tensors_;
absl::flat_hash_map<const TfLiteOpaqueTensor*, std::vector<float>>
internal_tensors_memory_;
TfLiteOpaqueContext* context_;
std::vector<int> builtin_code_;
};
}
int helpers::CalculateNumElements(const TfLiteOpaqueTensor* opaque_tensor) {
int total_num_elements = 1;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(opaque_tensor); ++i) {
total_num_elements *= TfLiteOpaqueTensorDim(opaque_tensor, i);
}
return total_num_elements;
}
bool SampleStableDelegate::IsNodeSupportedByDelegate(
const TfLiteOperator* registration_external, const TfLiteOpaqueNode* node,
TfLiteOpaqueContext* context) const {
TfLiteBuiltinOperator builtin_operator =
TfLiteOperatorGetBuiltInCode(registration_external);
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(node);
if (builtin_operator == kTfLiteBuiltinAdd) {
TfLiteAddParams* params = reinterpret_cast<TfLiteAddParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
} else if (builtin_operator == kTfLiteBuiltinSub) {
TfLiteSubParams* params = reinterpret_cast<TfLiteSubParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
} else {
return false;
}
if (TfLiteOpaqueNodeNumberOfInputs(node) != 2) return false;
const TfLiteOpaqueTensor* tensor_1 =
TfLiteOpaqueNodeGetInput(context, node, 0);
const TfLiteOpaqueTensor* tensor_2 =
TfLiteOpaqueNodeGetInput(context, node, 1);
if (!tensor_1 || TfLiteOpaqueTensorType(tensor_1) != kTfLiteFloat32)
return false;
if (!tensor_2 || TfLiteOpaqueTensorType(tensor_2) != kTfLiteFloat32)
return false;
if (TfLiteOpaqueTensorNumDims(tensor_1) !=
TfLiteOpaqueTensorNumDims(tensor_2))
return false;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(tensor_1); ++i) {
if (TfLiteOpaqueTensorDim(tensor_1, i) !=
TfLiteOpaqueTensorDim(tensor_2, i)) {
return false;
}
}
return true;
}
TfLiteStatus SampleStableDelegate::Initialize(TfLiteOpaqueContext* context) {
return kTfLiteOk;
}
const char* SampleStableDelegate::Name() const {
return kSampleStableDelegateName;
}
std::unique_ptr<SimpleOpaqueDelegateKernelInterface>
SampleStableDelegate::CreateDelegateKernelInterface() {
return std::make_unique<SampleStableDelegateKernel>();
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include <cstddef>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace {
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithAdd) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithSub) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/sub.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor_0, nullptr);
const float kTensor0CellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor_0);
std::vector<float> input_0(n, kTensor0CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_0, input_0.data(),
input_0.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
ASSERT_NE(input_tensor_1, nullptr);
n = tflite::NumElements(input_tensor_1);
const float kTensor1CellValue = 2.f;
std::vector<float> input_1(n, kTensor1CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_1, input_1.data(),
input_1.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensor0CellValue - kTensor1CellValue);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b204cf53-9ac5-4632-8b9c-991be7e258ce | cpp | tensorflow/tensorflow | tflite_settings_json_parser | tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.cc | tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser_test.cc | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include <string>
#include "flatbuffers/idl.h"
#include "tensorflow/lite/acceleration/configuration/configuration_fbs_contents-inl.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace delegates {
namespace utils {
TfLiteSettingsJsonParser::TfLiteSettingsJsonParser() {
TFLITE_DCHECK(parser_.Parse(configuration_fbs_contents) &&
parser_.SetRootType("TFLiteSettings"));
}
const TFLiteSettings* TfLiteSettingsJsonParser::Parse(
const std::string& json_file_path) {
if (!LoadFromJsonFile(json_file_path) || buffer_pointer_ == nullptr) {
return nullptr;
}
return flatbuffers::GetRoot<TFLiteSettings>(buffer_pointer_);
}
const uint8_t* TfLiteSettingsJsonParser::GetBufferPointer() {
return buffer_pointer_;
}
flatbuffers::uoffset_t TfLiteSettingsJsonParser::GetBufferSize() {
return buffer_size_;
}
bool TfLiteSettingsJsonParser::LoadFromJsonFile(
const std::string& json_file_path) {
buffer_size_ = 0;
buffer_pointer_ = nullptr;
if (json_file_path.empty()) {
TFLITE_LOG(ERROR) << "Invalid JSON file path.";
return false;
}
std::string json_file;
if (!flatbuffers::LoadFile(json_file_path.c_str(), false, &json_file)) {
TFLITE_LOG(ERROR) << "Failed to load the delegate settings file ("
<< json_file_path << ").";
return false;
}
if (!parser_.Parse(json_file.c_str())) {
TFLITE_LOG(ERROR) << "Failed to parse the delegate settings file ("
<< json_file_path << ").";
return false;
}
buffer_size_ = parser_.builder_.GetSize();
buffer_pointer_ = parser_.builder_.GetBufferPointer();
return true;
}
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace {
using tflite::TFLiteSettings;
using tflite::delegates::utils::TfLiteSettingsJsonParser;
TEST(TfLiteSettingsJsonParserTest, SuccessWithValidXNNPackDelegateSettings) {
TfLiteSettingsJsonParser parser;
const TFLiteSettings* tflite_settings = parser.Parse(
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/test_xnnpack_settings.json");
EXPECT_NE(parser.GetBufferPointer(), nullptr);
EXPECT_NE(parser.GetBufferSize(), 0);
ASSERT_NE(tflite_settings, nullptr);
EXPECT_EQ(tflite_settings->delegate(), tflite::Delegate_XNNPACK);
ASSERT_NE(tflite_settings->xnnpack_settings(), nullptr);
EXPECT_EQ(tflite_settings->xnnpack_settings()->num_threads(), 5);
}
TEST(TfLiteSettingsJsonParserTest, GetBufferPointerReturnsValidBufferPointers) {
TfLiteSettingsJsonParser parser;
parser.Parse(
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/test_xnnpack_settings.json");
const uint8_t* buffer_pointer = parser.GetBufferPointer();
ASSERT_NE(buffer_pointer, nullptr);
ASSERT_NE(parser.GetBufferSize(), 0);
const TFLiteSettings* tflite_settings =
flatbuffers::GetRoot<TFLiteSettings>(buffer_pointer);
ASSERT_NE(tflite_settings, nullptr);
EXPECT_EQ(tflite_settings->delegate(), tflite::Delegate_XNNPACK);
ASSERT_NE(tflite_settings->xnnpack_settings(), nullptr);
EXPECT_EQ(tflite_settings->xnnpack_settings()->num_threads(), 5);
}
TEST(TfLiteSettingsJsonParserTest, FailedToParseInvalidSettings) {
TfLiteSettingsJsonParser parser;
EXPECT_EQ(
parser.Parse("tensorflow/lite/tools/delegates/experimental/"
"stable_delegate/test_invalid_settings.json"),
nullptr);
EXPECT_EQ(parser.GetBufferPointer(), nullptr);
EXPECT_EQ(parser.GetBufferSize(), 0);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5657ffe1-80dd-465c-9d29-73c706d6baa7 | cpp | tensorflow/tensorflow | delegate_loader | tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.cc | tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader_test.cc | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include <dlfcn.h>
#include <stdlib.h>
#include <string.h>
#include <cerrno>
#include <string>
#include "absl/strings/numbers.h"
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace delegates {
namespace utils {
namespace {
void setLibraryPathEnvironmentVariable(const std::string& delegate_path) {
std::string directory_path = "";
size_t last_slash_index = delegate_path.rfind('/');
if (last_slash_index != std::string::npos) {
directory_path = delegate_path.substr(0, last_slash_index);
}
if (setenv(kTfLiteLibraryPathEnvironmentVariable, directory_path.c_str(),
1) != 0) {
TFLITE_LOG(WARN) << "Error setting environment variable "
<< kTfLiteLibraryPathEnvironmentVariable
<< " with error: " << strerror(errno);
}
}
}
using ::tflite::acceleration::AndroidInfo;
using ::tflite::acceleration::RequestAndroidInfo;
const TfLiteStableDelegate* LoadDelegateFromSharedLibrary(
const std::string& delegate_path) {
void* symbol_pointer =
LoadSymbolFromSharedLibrary(delegate_path, kTfLiteStableDelegateSymbol);
if (!symbol_pointer) {
return nullptr;
}
return reinterpret_cast<const TfLiteStableDelegate*>(symbol_pointer);
}
void* LoadSymbolFromSharedLibrary(const std::string& delegate_path,
const std::string& delegate_symbol) {
void* delegate_lib_handle = nullptr;
int dlopen_flags = RTLD_NOW | RTLD_LOCAL;
int sdk_version;
AndroidInfo android_info;
if (RequestAndroidInfo(&android_info).ok() &&
absl::SimpleAtoi(android_info.android_sdk_version, &sdk_version) &&
sdk_version >= 23) {
dlopen_flags |= RTLD_NODELETE;
TFLITE_LOG(INFO) << "Android SDK level is " << sdk_version
<< ", using dlopen with RTLD_NODELETE.";
}
setLibraryPathEnvironmentVariable(delegate_path);
delegate_lib_handle = dlopen(delegate_path.c_str(), dlopen_flags);
if (!delegate_lib_handle) {
TFLITE_LOG(ERROR) << "Failed to open library " << delegate_path << ": "
<< dlerror();
return nullptr;
}
void* symbol_pointer = dlsym(delegate_lib_handle, delegate_symbol.c_str());
if (!symbol_pointer) {
TFLITE_LOG(ERROR) << "Failed to find " << delegate_symbol
<< " symbol: " << dlerror();
dlclose(delegate_lib_handle);
return nullptr;
}
return symbol_pointer;
}
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include <cstdlib>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
namespace {
using tflite::TFLiteSettings;
using tflite::TFLiteSettingsBuilder;
using tflite::delegates::utils::LoadDelegateFromSharedLibrary;
using tflite::delegates::utils::LoadSymbolFromSharedLibrary;
TEST(TfLiteDelegateLoaderUtilsTest, Simple) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/"
"libtensorflowlite_sample_stable_delegate.so"
);
ASSERT_NE(stable_delegate_handle, nullptr);
EXPECT_STREQ(stable_delegate_handle->delegate_abi_version,
TFL_STABLE_DELEGATE_ABI_VERSION);
EXPECT_STREQ(stable_delegate_handle->delegate_name,
tflite::example::kSampleStableDelegateName);
EXPECT_STREQ(stable_delegate_handle->delegate_version,
tflite::example::kSampleStableDelegateVersion);
EXPECT_NE(stable_delegate_handle->delegate_plugin, nullptr);
EXPECT_STREQ(
getenv(tflite::delegates::utils::kTfLiteLibraryPathEnvironmentVariable),
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate");
flatbuffers::FlatBufferBuilder flatbuffer_builder;
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder.Finish(tflite_settings);
const TFLiteSettings* settings = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder.GetBufferPointer());
auto delegate = stable_delegate_handle->delegate_plugin->create(settings);
ASSERT_NE(delegate, nullptr);
EXPECT_EQ(
stable_delegate_handle->delegate_plugin->get_delegate_errno(delegate), 0);
stable_delegate_handle->delegate_plugin->destroy(delegate);
}
TEST(TfLiteDelegateLoaderUtilsTest, WrongSymbolReturnsNullptr) {
void* symbol_pointer = LoadSymbolFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/libtensorflowlite_sample_stable_delegate.so",
"NOT_REAL_SYMBOL");
EXPECT_EQ(symbol_pointer, nullptr);
}
TEST(TfLiteDelegateLoaderUtilsTest, MissingLibReturnsNullptr) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary("not_real_delegate.so");
EXPECT_EQ(stable_delegate_handle, nullptr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
40fe60b6-15b3-4cfd-9724-57a62982e349 | cpp | tensorflow/tensorflow | min_max_builder | tensorflow/lite/delegates/hexagon/builders/min_max_builder.cc | tensorflow/lite/delegates/hexagon/builders/tests/min_max_builder_test.cc | #include "tensorflow/lite/delegates/hexagon/builders/min_max_builder.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus MinMaxOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
int a_tensor_id = inputs->data[0];
int b_tensor_id = inputs->data[1];
const auto& a_tensor = context->tensors[a_tensor_id];
const auto& b_tensor = context->tensors[b_tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(a_tensor_id));
AddInput(graph_builder_->GetHexagonTensorId(b_tensor_id));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, a_tensor));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, b_tensor));
const int output_tensor_id = outputs->data[0];
const auto& output_tensor = context->tensors[output_tensor_id];
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, output_tensor));
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, kScalarShape);
AddOutput(sizeof(float), 4, kScalarShape);
return kTfLiteOk;
}
TfLiteStatus MinMaxOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
OpBuilder* CreateMinMaxBuilder(GraphBuilder* graph_builder, int op_type) {
return new MinMaxOpBuilder(graph_builder, op_type);
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
template <typename data_type>
class MinMaxOpModel : public SingleOpModelWithHexagon {
public:
MinMaxOpModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
MinMaxOpModel(tflite::BuiltinOperator op, const TensorData& input1,
std::initializer_list<data_type> input1_values,
const TensorData& input2,
std::initializer_list<data_type> input2_values,
const TensorData& output, bool input1_const) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
if (input1_const) {
auto* input1_tensor = interpreter_->tensor(input1_);
input1_tensor->allocation_type = kTfLiteMmapRo;
} else {
auto* input2_tensor = interpreter_->tensor(input2_);
input2_tensor->allocation_type = kTfLiteMmapRo;
}
}
void SetInput1(std::vector<data_type> data) { PopulateTensor(input1_, data); }
void SetInput2(std::vector<data_type> data) { PopulateTensor(input2_, data); }
std::vector<data_type> GetOutput() {
return ExtractVector<data_type>(output_);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
template <typename data_type>
void TestModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output,
std::initializer_list<data_type> input1_values,
std::initializer_list<data_type> input2_values) {
std::unique_ptr<MinMaxOpModel<data_type>> m;
m = std::make_unique<MinMaxOpModel<data_type>>(op, input1, input2, output);
m->SetInput1(input1_values);
m->SetInput2(input2_values);
ASSERT_EQ(m->Invoke(), kTfLiteOk);
const auto reference_output = m->GetOutput();
const auto reference_output_shape = m->GetOutputShape();
m->ApplyDelegateAndInvoke();
EXPECT_THAT(m->GetOutputShape(), ElementsAreArray(reference_output_shape));
EXPECT_THAT(m->GetOutput(), ElementsAreArray(reference_output));
}
template <typename data_type>
void TestModelConstInput(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output,
std::initializer_list<data_type> input1_values,
std::initializer_list<data_type> input2_values,
bool input1_const) {
std::unique_ptr<MinMaxOpModel<data_type>> m;
m = std::make_unique<MinMaxOpModel<data_type>>(
op, input1, input1_values, input2, input2_values, output, input1_const);
m->SetInput1(input1_values);
m->SetInput2(input2_values);
ASSERT_EQ(m->Invoke(), kTfLiteOk);
const auto reference_output = m->GetOutput();
const auto reference_output_shape = m->GetOutputShape();
m->ApplyDelegateAndInvoke();
EXPECT_THAT(m->GetOutputShape(), ElementsAreArray(reference_output_shape));
EXPECT_THAT(m->GetOutput(), ElementsAreArray(reference_output));
}
TEST(MinMaxOpTest, Maximum_Uint8Test) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModel<uint8_t>(BuiltinOperator_MAXIMUM,
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255}, data1, data2);
}
TEST(MinMaxOpTest, Maximum_Uint8Test_Const) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModelConstInput<uint8_t>(
BuiltinOperator_MAXIMUM, {TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255}, data1, data2, false);
}
TEST(MinMaxOpTest, Minimum_Uint8Test) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModel<uint8_t>(BuiltinOperator_MINIMUM,
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255}, data1, data2);
}
TEST(MinMaxOpTest, Minimum_Uint8Test_Const) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 20, 1};
TestModelConstInput<uint8_t>(
BuiltinOperator_MINIMUM, {TensorType_UINT8, {1, 3, 1, 2}, -1, 25},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 25},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 25}, data1, data2, false);
}
TEST(MinMaxOpTest, Maximum_Int8Test) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 123, 1};
TestModel<int8_t>(BuiltinOperator_MAXIMUM,
{TensorType_INT8, {1, 3, 1, 2}, -1, 125},
{TensorType_INT8, {1, 3, 1, 2}, -1, 125},
{TensorType_INT8, {1, 3, 1, 2}, -1, 125}, data1, data2);
}
TEST(MinMaxOpTest, Minimum_Int8Test) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 12, 1};
TestModel<int8_t>(BuiltinOperator_MINIMUM,
{TensorType_INT8, {1, 3, 1, 2}, -1, 25},
{TensorType_INT8, {1, 3, 1, 2}, -1, 25},
{TensorType_INT8, {1, 3, 1, 2}, -1, 25}, data1, data2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/min_max_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/min_max_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
887c6150-e4d7-406b-8f4d-849ff0d7cb86 | cpp | tensorflow/tensorflow | nnapi_delegate | tensorflow/lite/delegates/nnapi/nnapi_delegate.cc | tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include <algorithm>
#include <cinttypes>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <functional>
#include <initializer_list>
#include <iostream>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "Eigen/Core"
#include "tensorflow/compiler/mlir/lite/allocation.h"
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_plugin.h"
#include "tensorflow/lite/delegates/serialization.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/nnapi/sl/public/NeuralNetworksSupportLibraryImpl.h"
#ifdef __ANDROID__
#include <sys/system_properties.h>
#endif
#if defined __ANDROID__ || defined __unix__
#define TFLITE_NNAPI_ALLOW_MMAP_SHARING
#include <sys/mman.h>
#include <unistd.h>
#endif
#include "fp16.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/delegates/nnapi/quant_lstm_sup.h"
#include "tensorflow/lite/delegates/utils.h"
#include "tensorflow/lite/kernels/internal/utils/sparsity_format_converter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include "tensorflow/lite/nnapi/nnapi_util.h"
#include "tensorflow/lite/util.h"
#ifdef NNAPI_VERBOSE_VALIDATION
#include "tensorflow/lite/schema/schema_generated.h"
#endif
namespace tflite {
namespace {
static const char kNnapiId[] = "nnapi_";
constexpr uint64_t kNoMemoryTimestamp = 0;
std::string NnApiBackendId(
const StatefulNnApiDelegate::Options& delegate_options) {
std::string delegate_id = kNnapiId;
if (delegate_options.accelerator_name) {
delegate_id += delegate_options.accelerator_name;
}
return delegate_id;
}
std::string NnApiErrorDescription(int error_code) {
switch (error_code) {
case ANEURALNETWORKS_NO_ERROR:
return "ANEURALNETWORKS_NO_ERROR";
case ANEURALNETWORKS_OUT_OF_MEMORY:
return "ANEURALNETWORKS_OUT_OF_MEMORY";
case ANEURALNETWORKS_INCOMPLETE:
return "ANEURALNETWORKS_INCOMPLETE";
case ANEURALNETWORKS_UNEXPECTED_NULL:
return "ANEURALNETWORKS_UNEXPECTED_NULL";
case ANEURALNETWORKS_BAD_DATA:
return "ANEURALNETWORKS_BAD_DATA";
case ANEURALNETWORKS_OP_FAILED:
return "ANEURALNETWORKS_OP_FAILED";
case ANEURALNETWORKS_BAD_STATE:
return "ANEURALNETWORKS_BAD_STATE";
case ANEURALNETWORKS_UNMAPPABLE:
return "ANEURALNETWORKS_UNMAPPABLE";
case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE:
return "ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE";
case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
return "ANEURALNETWORKS_UNAVAILABLE_DEVICE";
case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT:
return "ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT";
case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT:
return "ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT";
case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT:
return "ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT";
case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT:
return "ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT";
case ANEURALNETWORKS_DEAD_OBJECT:
return "ANEURALNETWORKS_DEAD_OBJECT";
default:
return "Unknown NNAPI error code: " + std::to_string(error_code);
}
}
#define RETURN_TFLITE_ERROR_IF_NN_ERROR(context, code, call_desc, p_errno) \
do { \
const auto _code = (code); \
const auto _call_desc = (call_desc); \
if (_code != ANEURALNETWORKS_NO_ERROR) { \
const auto error_desc = NnApiErrorDescription(_code); \
TF_LITE_KERNEL_LOG(context, \
"NN API returned error %s at line %d while %s.\n", \
error_desc.c_str(), __LINE__, _call_desc); \
*p_errno = _code; \
return kTfLiteError; \
} \
} while (0)
#define RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(context, code, call_desc, \
p_tensor, p_errno) \
do { \
const auto _code = (code); \
const auto _call_desc = (call_desc); \
if (_code != ANEURALNETWORKS_NO_ERROR) { \
const auto error_desc = NnApiErrorDescription(_code); \
TF_LITE_KERNEL_LOG(context, \
"NN API returned error %s at line %d while %s " \
"for tensor '%s'.\n", \
error_desc.c_str(), __LINE__, _call_desc, \
(p_tensor)->name ? (p_tensor)->name : "no-name"); \
*p_errno = _code; \
return kTfLiteError; \
} \
} while (0)
bool IsFloat(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
return true;
default:
return false;
}
}
bool IsFloatOrUInt8(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
return true;
default:
return false;
}
}
bool IsQuantized(TfLiteType type) {
switch (type) {
case kTfLiteUInt8:
case kTfLiteInt8:
return true;
default:
return false;
}
}
bool IsInt32(TfLiteType type) {
switch (type) {
case kTfLiteInt32:
return true;
default:
return false;
}
}
bool IsFloatOrQuantized(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
return true;
default:
return false;
}
}
bool IsFloatOrInt32(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteInt32:
return true;
default:
return false;
}
}
bool IsFloatQuantizedOrInt32(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt32:
return true;
default:
return false;
}
}
bool IsScalarInputSupported(int builtin_code) {
switch (builtin_code) {
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinMul:
case kTfLiteBuiltinSub:
case kTfLiteBuiltinDiv:
case kTfLiteBuiltinEqual:
case kTfLiteBuiltinNotEqual:
case kTfLiteBuiltinGreater:
case kTfLiteBuiltinGreaterEqual:
case kTfLiteBuiltinLess:
case kTfLiteBuiltinLessEqual:
case kTfLiteBuiltinPow:
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinPrelu:
case kTfLiteBuiltinLeakyRelu:
return true;
default:
return false;
}
}
bool NeedInt8Conversion(const TfLiteContext* context, int builtin_code,
const TfLiteNode* node) {
const int input_id = node->inputs->data[0];
const TfLiteType input_type = context->tensors[input_id].type;
switch (builtin_code) {
case kTfLiteBuiltinConv2d:
case kTfLiteBuiltinDepthwiseConv2d:
case kTfLiteBuiltinFullyConnected: {
if (input_type == kTfLiteInt8) {
const int weights_id = node->inputs->data[1];
const auto& weights_tensor = context->tensors[weights_id];
if ((weights_tensor.type == kTfLiteInt8 ||
weights_tensor.type == kTfLiteUInt8) &&
weights_tensor.quantization.type == kTfLiteAffineQuantization) {
return true;
}
}
return false;
}
case kTfLiteBuiltinTransposeConv: {
const int input_id = 2;
const TfLiteType input_type = context->tensors[input_id].type;
if (input_type == kTfLiteInt8) {
return true;
}
return false;
}
case kTfLiteBuiltinSelect: {
const auto value_type = context->tensors[node->inputs->data[1]].type;
return value_type == kTfLiteInt8;
}
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinArgMax:
case kTfLiteBuiltinArgMin:
case kTfLiteBuiltinAveragePool2d:
case kTfLiteBuiltinBatchToSpaceNd:
case kTfLiteBuiltinConcatenation:
case kTfLiteBuiltinEqual:
case kTfLiteBuiltinExpandDims:
case kTfLiteBuiltinGather:
case kTfLiteBuiltinGreater:
case kTfLiteBuiltinGreaterEqual:
case kTfLiteBuiltinHardSwish:
case kTfLiteBuiltinL2Normalization:
case kTfLiteBuiltinLeakyRelu:
case kTfLiteBuiltinLess:
case kTfLiteBuiltinLessEqual:
case kTfLiteBuiltinLogistic:
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMaxPool2d:
case kTfLiteBuiltinMean:
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinMul:
case kTfLiteBuiltinNotEqual:
case kTfLiteBuiltinPad:
case kTfLiteBuiltinPadv2:
case kTfLiteBuiltinPrelu:
case kTfLiteBuiltinReduceMax:
case kTfLiteBuiltinReduceMin:
case kTfLiteBuiltinRelu:
case kTfLiteBuiltinReluN1To1:
case kTfLiteBuiltinRelu6:
case kTfLiteBuiltinResizeBilinear:
case kTfLiteBuiltinResizeNearestNeighbor:
case kTfLiteBuiltinReshape:
case kTfLiteBuiltinSlice:
case kTfLiteBuiltinSoftmax:
case kTfLiteBuiltinSpaceToBatchNd:
case kTfLiteBuiltinSpaceToDepth:
case kTfLiteBuiltinDepthToSpace:
case kTfLiteBuiltinStridedSlice:
case kTfLiteBuiltinSub:
case kTfLiteBuiltinTanh:
case kTfLiteBuiltinTile:
case kTfLiteBuiltinTopkV2:
case kTfLiteBuiltinTranspose: {
return input_type == kTfLiteInt8;
}
default:
return false;
}
}
constexpr int kLstmFullKernelInputSize = 24;
constexpr int kLstmFullKernelNoOptionalParamsInputSize = 20;
constexpr int kLstmBasicKernelInputSize = 5;
inline bool isLstmBasicKernel(const TfLiteNode* node) {
return node->inputs->size == kLstmBasicKernelInputSize;
}
inline bool isLstmFullKernel(const TfLiteNode* node) {
return node->inputs->size == kLstmFullKernelInputSize ||
node->inputs->size == kLstmFullKernelNoOptionalParamsInputSize;
}
bool IsMeanWithDifferentInputOutputQuantization(const TfLiteContext* context,
const TfLiteNode* node) {
const auto& input = context->tensors[node->inputs->data[0]];
const auto& output = context->tensors[node->outputs->data[0]];
return input.params.scale != output.params.scale ||
input.params.zero_point != output.params.zero_point;
}
bool IsBroadcastBatchMatMul(const TfLiteContext* context,
const TfLiteNode* node) {
const auto& input0 = context->tensors[node->inputs->data[0]];
const auto& input1 = context->tensors[node->inputs->data[1]];
if (input0.dims->size != input1.dims->size) {
return true;
}
for (int i = 0; i < input0.dims->size - 2; i++) {
if (input0.dims->data[i] != input1.dims->data[i]) {
return true;
}
}
return false;
}
bool IsHybridOperator(const TfLiteContext* context, int builtin_code,
const TfLiteNode* node) {
switch (builtin_code) {
case kTfLiteBuiltinConv2d:
case kTfLiteBuiltinFullyConnected: {
const int input_id = node->inputs->data[0];
const int filter_id = node->inputs->data[1];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType filter_type = context->tensors[filter_id].type;
return IsFloat(input_type) && IsQuantized(filter_type);
}
case kTfLiteBuiltinLstm: {
const int input_id = node->inputs->data[0];
const int weights_id = node->inputs->data[2];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType weights_type = context->tensors[weights_id].type;
return isLstmFullKernel(node) && IsFloat(input_type) &&
IsQuantized(weights_type);
}
case kTfLiteBuiltinUnidirectionalSequenceLstm: {
const int input_id = node->inputs->data[0];
const int weights_id = node->inputs->data[2];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType weights_type = context->tensors[weights_id].type;
return IsFloat(input_type) && IsQuantized(weights_type);
}
case kTfLiteBuiltinBidirectionalSequenceLstm: {
const int input_id = node->inputs->data[0];
const int weights_id = node->inputs->data[2];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType weights_type = context->tensors[weights_id].type;
return IsFloat(input_type) && IsQuantized(weights_type);
}
case kTfLiteBuiltinUnidirectionalSequenceRnn: {
const int input_id = node->inputs->data[0];
const int weights_id = node->inputs->data[1];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType weights_type = context->tensors[weights_id].type;
return IsFloat(input_type) && IsQuantized(weights_type);
}
default:
return false;
}
}
bool IsDequantizeConstFloat16(TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration) {
return registration->builtin_code == kTfLiteBuiltinDequantize &&
context->tensors[node->inputs->data[0]].type ==
TfLiteType::kTfLiteFloat16 &&
IsConstantTensor(&context->tensors[node->inputs->data[0]]);
}
bool IsDequantizeNonConstFloat16(TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration) {
return registration->builtin_code == kTfLiteBuiltinDequantize &&
context->tensors[node->inputs->data[0]].type ==
TfLiteType::kTfLiteFloat16 &&
!IsConstantTensor(&context->tensors[node->inputs->data[0]]);
}
bool IsDensifyConstTensor(TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration) {
return registration->builtin_code == kTfLiteBuiltinDensify &&
IsConstantTensor(&context->tensors[node->inputs->data[0]]);
}
ANeuralNetworksOperandType ConvertTensorTypeToNNType(
const TfLiteTensor* tensor, TfLiteType ann_type_equivalent,
bool use_int8_asymm_signed) {
int32_t nn_type = 0;
float scale = 0.0f;
int32_t zero_point = 0;
switch (tensor->type) {
case kTfLiteFloat32:
nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
break;
case kTfLiteUInt8:
nn_type = ann_type_equivalent == kTfLiteInt32
? ANEURALNETWORKS_TENSOR_INT32
: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
scale = tensor->params.scale;
zero_point = tensor->params.zero_point;
if (scale == 0) {
scale = 1;
}
break;
case kTfLiteInt8:
scale = tensor->params.scale;
zero_point = tensor->params.zero_point;
if (use_int8_asymm_signed) {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
} else if (ann_type_equivalent == kTfLiteUInt8) {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
zero_point += 128;
} else if (ann_type_equivalent == kTfLiteInt32) {
nn_type = ANEURALNETWORKS_TENSOR_INT32;
zero_point += 128;
} else {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM;
}
if (scale == 0) {
scale = 1;
}
break;
case kTfLiteInt32:
nn_type = ANEURALNETWORKS_TENSOR_INT32;
scale = tensor->params.scale;
zero_point = tensor->params.zero_point;
break;
case kTfLiteBool:
nn_type = ANEURALNETWORKS_TENSOR_BOOL8;
break;
case kTfLiteInt16:
nn_type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM;
scale = tensor->params.scale;
zero_point = tensor->params.zero_point;
break;
default:
break;
}
uint32_t tensor_rank = static_cast<uint32_t>(tensor->dims->size);
uint32_t* tensor_dims = reinterpret_cast<uint32_t*>(tensor->dims->data);
static uint32_t scalar_rank = 1;
if (tensor_rank == 0) {
tensor_rank = scalar_rank;
tensor_dims = &scalar_rank;
}
ANeuralNetworksOperandType nn_operand_type{
.type = nn_type,
.dimensionCount = tensor_rank,
.dimensions = tensor_dims,
.scale = scale,
.zeroPoint = zero_point,
};
return nn_operand_type;
}
constexpr size_t kDefaultByteAlignmentForNNAPI = 64;
static size_t GetNumPaddingBytes(size_t byte_size) {
size_t num_padding_bytes = 0;
if (byte_size % kDefaultByteAlignmentForNNAPI) {
num_padding_bytes = kDefaultByteAlignmentForNNAPI -
(byte_size % kDefaultByteAlignmentForNNAPI);
}
return num_padding_bytes;
}
static size_t GetNNTensorSize(size_t tensor_size, bool allow_padding) {
size_t padding_bytes = GetNumPaddingBytes(tensor_size);
size_t nn_tensor_size = tensor_size;
if (allow_padding) {
nn_tensor_size += padding_bytes;
}
return nn_tensor_size;
}
TfLiteStatus GetDeviceHandle(const NnApi* nnapi, TfLiteContext* context,
const char* device_name_ptr,
ANeuralNetworksDevice** result, int* nnapi_errno) {
if (!device_name_ptr) return kTfLiteError;
*result = nullptr;
std::string device_name(device_name_ptr);
uint32_t num_devices = 0;
nnapi->ANeuralNetworks_getDeviceCount(&num_devices);
for (uint32_t i = 0; i < num_devices; i++) {
ANeuralNetworksDevice* device = nullptr;
const char* buffer = nullptr;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworks_getDevice(i, &device),
"Searching for target device", nnapi_errno);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworksDevice_getName(device, &buffer),
"Searching for target device", nnapi_errno);
if (device_name == buffer) {
*result = device;
return kTfLiteOk;
}
}
TF_LITE_KERNEL_LOG(context,
"Could not find the specified NNAPI accelerator: %s. "
"Must be one of: {%s}.",
device_name_ptr,
nnapi::GetStringDeviceNamesList(nnapi).c_str());
return kTfLiteError;
}
uint64_t GetHash(const TfLiteIntArray* int_array, uint64_t combine_with = 0) {
constexpr auto kHashConst = 0x9e3779b97f4a7800ULL;
uint64_t result = combine_with;
for (auto i : TfLiteIntArrayView(int_array)) {
result = result ^ (i + kHashConst + (result << 10) + (result >> 4));
}
return result;
}
bool HasZeroes(TfLiteIntArrayView array) {
for (auto value : array) {
if (value == 0) {
return true;
}
}
return false;
}
int ComputeSplitVUnknownSplitSize(const TfLiteContext* context,
const TfLiteNode* node) {
const auto& input = context->tensors[node->inputs->data[0]];
const auto& size_splits_tensor = context->tensors[node->inputs->data[1]];
const auto& axis_tensor = context->tensors[node->inputs->data[2]];
const auto* size_splits = size_splits_tensor.data.i32;
int num_splits = size_splits_tensor.dims->data[0];
bool has_unknown_split_size = false;
int sum_of_known_split_sizes = 0;
for (int i = 0; i < num_splits; i++) {
if (size_splits[i] == -1) {
has_unknown_split_size = true;
} else {
sum_of_known_split_sizes += size_splits[i];
}
}
int axis = axis_tensor.data.i32[0];
axis = axis < 0 ? axis + input.dims->size : axis;
int total_size = input.dims->data[axis];
return has_unknown_split_size ? total_size - sum_of_known_split_sizes : -1;
}
enum {
NN_TENSOR_FLAG_SCALAR_AS_TENSOR = 1U << 0,
NN_TENSOR_FLAG_INT8_CONVERSION = 1U << 1,
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED = 1U << 2,
NN_TENSOR_FLAG_FORCE_PER_CHANNEL = 1U << 3,
NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION = 1U << 4,
};
TfLiteStatus GetTargetFeatureLevel(
TfLiteContext* context, const NnApi* nnapi,
const std::vector<ANeuralNetworksDevice*>& device_handles,
int* target_feature_level, int* nnapi_errno) {
*target_feature_level = nnapi->nnapi_runtime_feature_level;
int64_t devices_feature_level = -1;
for (const auto* device_handle : device_handles) {
int64_t curr_device_feature_level;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi->ANeuralNetworksDevice_getFeatureLevel(
device_handle, &curr_device_feature_level),
"Searching for target device", nnapi_errno);
devices_feature_level =
std::max(curr_device_feature_level, devices_feature_level);
}
if ((devices_feature_level > 0) &&
(devices_feature_level < nnapi->nnapi_runtime_feature_level)) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Changing NNAPI Feature Level %lld to "
"supported by target devices: %lld",
nnapi->android_sdk_version, devices_feature_level);
*target_feature_level = devices_feature_level;
}
return kTfLiteOk;
}
bool ShouldUseTargetDevices(StatefulNnApiDelegate::Options delegate_options,
const NnApi* nnapi,
bool exclude_nnapi_reference = false) {
const char* device_name_ptr = delegate_options.accelerator_name;
std::string nnapi_cpu("nnapi-reference");
bool has_selected_accelerator = device_name_ptr != nullptr;
if (exclude_nnapi_reference && has_selected_accelerator) {
if (nnapi_cpu == device_name_ptr) return false;
}
return (delegate_options.disallow_nnapi_cpu &&
nnapi->android_sdk_version >=
delegate::nnapi::kMinSdkVersionForNNAPI12) ||
has_selected_accelerator;
}
TfLiteStatus GetTargetDevices(TfLiteContext* context, TfLiteDelegate* delegate,
const NnApi* nnapi, int* nnapi_errno,
std::vector<ANeuralNetworksDevice*>* result) {
if (nnapi->android_sdk_version < delegate::nnapi::kMinSdkVersionForNNAPI12) {
return kTfLiteError;
}
const auto delegate_options = StatefulNnApiDelegate::GetOptions(delegate);
const char* device_name_ptr = delegate_options.accelerator_name;
if (device_name_ptr != nullptr) {
ANeuralNetworksDevice* nnapi_device = nullptr;
TF_LITE_ENSURE_STATUS(GetDeviceHandle(nnapi, context, device_name_ptr,
&nnapi_device, nnapi_errno));
result->push_back(nnapi_device);
} else if (delegate_options.disallow_nnapi_cpu) {
std::string nnapi_cpu("nnapi-reference");
uint32_t num_devices = 0;
nnapi->ANeuralNetworks_getDeviceCount(&num_devices);
for (uint32_t i = 0; i < num_devices; i++) {
ANeuralNetworksDevice* device = nullptr;
const char* buffer = nullptr;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworks_getDevice(i, &device),
"Getting list of available devices", nnapi_errno);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworksDevice_getName(device, &buffer),
"Getting list of available devices", nnapi_errno);
if (nnapi_cpu != buffer) {
result->push_back(device);
}
}
}
return kTfLiteOk;
}
class NnapiMappingContext {
public:
int next_ann_tensor_index_ = 0;
std::vector<int> lite_tensor_to_ann_tensor_;
std::vector<int> index_to_type_conversion_;
std::vector<int> nnapi_to_tflite_op_mapping_;
};
}
namespace delegate {
namespace nnapi {
#ifdef TFLITE_NNAPI_ALLOW_MMAP_SHARING
NNMemory::NNMemory(const NnApi* nnapi, const char* name, size_t size) {
if (name && size > 0) {
nnapi_ = nnapi;
byte_size_ = size;
#ifdef __ANDROID__
fd_ = nnapi_->ASharedMemory_create(name, size);
#else
char shm_name_buffer[L_tmpnam];
if (tmpnam(shm_name_buffer) == nullptr) {
shm_name_buffer[0] = '\0';
}
shm_region_name_ = std::string(name) + std::string(shm_name_buffer);
std::replace(shm_region_name_.begin(), shm_region_name_.end(), '/', '-');
fd_ = nnapi_->ASharedMemory_create(shm_region_name_.c_str(), size);
#endif
data_ptr_ = reinterpret_cast<uint8_t*>(
mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, 0));
nnapi_->ANeuralNetworksMemory_createFromFd(size, PROT_READ | PROT_WRITE,
fd_, 0, &nn_memory_handle_);
}
}
#else
NNMemory::NNMemory(const NnApi* , const char* ,
size_t )
: nnapi_(nullptr) {}
#endif
NNMemory::~NNMemory() {
#ifdef TFLITE_NNAPI_ALLOW_MMAP_SHARING
if (data_ptr_) {
munmap(data_ptr_, byte_size_);
}
if (nn_memory_handle_) {
nnapi_->ANeuralNetworksMemory_free(nn_memory_handle_);
}
#ifdef __ANDROID__
if (fd_ >= 0) close(fd_);
#else
if (!shm_region_name_.empty()) shm_unlink(shm_region_name_.c_str());
#endif
#endif
}
class DequantizeMapping {
public:
int DequantizedAnnIndex(int ann_index, TfLiteType type) const {
for (const auto& element : mapping_) {
if (ann_index == std::get<0>(element) && type == std::get<1>(element)) {
return std::get<2>(element);
}
}
return -1;
}
void Add(int ann_index, TfLiteType type, int dequantized_ann_index) {
mapping_.emplace_back(ann_index, type, dequantized_ann_index);
}
private:
std::vector<std::tuple<int, TfLiteType, int>> mapping_;
};
class NNAPIOpBuilder {
public:
NNAPIOpBuilder(const NnApi* nnapi, TfLiteContext* context,
NnapiMappingUtilCInterface* mapping_util,
DequantizeMapping* dequantize_mapping,
std::map<const MMAPAllocation*, ANeuralNetworksMemory*>*
allocation_mapping,
ANeuralNetworksModel* nn_model, int* nnapi_errno,
bool allow_dynamic_dimensions)
: nnapi_(nnapi),
context_(context),
mapping_util_(mapping_util),
dequantize_mapping_(dequantize_mapping),
allocation_memory_mapping_(allocation_mapping),
nn_model_(nn_model),
nnapi_errno_(nnapi_errno),
allow_dynamic_dimensions_(allow_dynamic_dimensions) {}
TfLiteStatus AddScalarBoolOperand(bool value) {
return AddScalarOperand<bool>(value, ANEURALNETWORKS_BOOL);
}
TfLiteStatus AddScalarInt32Operand(int32_t value) {
return AddScalarOperand<int32_t>(value, ANEURALNETWORKS_INT32);
}
TfLiteStatus AddScalarFloat32Operand(float value) {
return AddScalarOperand<float>(value, ANEURALNETWORKS_FLOAT32);
}
TfLiteStatus AddVectorInt32Operand(const int32_t* values,
uint32_t num_values) {
return AddVectorOperand<int32_t>(values, num_values,
ANEURALNETWORKS_TENSOR_INT32,
0.f, 0);
}
TfLiteStatus AddVectorInt32Operand(const int32_t* values, uint32_t num_values,
float scale, int32_t zero_point) {
return AddVectorOperand<int32_t>(
values, num_values, ANEURALNETWORKS_TENSOR_INT32, scale, zero_point);
}
TfLiteStatus AddVectorInt16Operand(const int16_t* values,
uint32_t num_values) {
return AddVectorOperand<int16_t>(values, num_values,
ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
1.f, 0);
}
TfLiteStatus AddVectorInt8Operand(const int8_t* values, uint32_t num_values) {
return AddVectorOperand<int8_t>(values, num_values,
ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
1.f, 0);
}
TfLiteStatus AddVectorFloat32Operand(const float* values,
uint32_t num_values) {
return AddVectorOperand<float>(values, num_values,
ANEURALNETWORKS_TENSOR_FLOAT32);
}
TfLiteStatus AddPoolingParams(void* data) {
auto builtin = reinterpret_cast<TfLitePoolParams*>(data);
AddScalarInt32Operand(builtin->padding);
AddScalarInt32Operand(builtin->stride_width);
AddScalarInt32Operand(builtin->stride_height);
AddScalarInt32Operand(builtin->filter_width);
AddScalarInt32Operand(builtin->filter_height);
AddScalarInt32Operand(builtin->activation);
return kTfLiteOk;
}
TfLiteStatus AddTensorInput(int tensor_index, bool hybrid_op,
int tensor_flags = 0) {
return AddTensor(tensor_index, hybrid_op, &augmented_inputs_, tensor_flags);
}
TfLiteStatus AddTensorOutput(int tensor_index, int tensor_flags = 0) {
return AddTensor(tensor_index, false, &augmented_outputs_,
tensor_flags);
}
TfLiteStatus AddAdditionalFloat32OutputTensor(uint32_t dimension_count) {
std::vector<uint32_t> dims(dimension_count, 0);
return AddFloat32OutputTensor(dimension_count, dims.data(), nullptr);
}
TfLiteStatus AddStateFloat32Tensor(int tensor_index,
int* ann_tensor_index_out) {
TfLiteTensor* tensor = &context_->tensors[tensor_index];
return AddFloat32OutputTensor(
tensor->dims->size, reinterpret_cast<uint32_t*>(tensor->dims->data),
ann_tensor_index_out);
}
TfLiteStatus AddStateInt16Tensor(int tensor_index,
int* ann_tensor_index_out) {
TfLiteTensor* tensor = &context_->tensors[tensor_index];
return AddAdditionalOutputTensor(
tensor->dims->size, reinterpret_cast<uint32_t*>(tensor->dims->data),
ANEURALNETWORKS_TENSOR_QUANT16_SYMM, tensor->params.scale,
tensor->params.zero_point, ann_tensor_index_out);
}
TfLiteStatus AddStateInt8AsymTensor(int tensor_index,
int* ann_tensor_index_out) {
TfLiteTensor* tensor = &context_->tensors[tensor_index];
return AddAdditionalOutputTensor(
tensor->dims->size, reinterpret_cast<uint32_t*>(tensor->dims->data),
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, tensor->params.scale,
tensor->params.zero_point, ann_tensor_index_out);
}
TfLiteStatus AddSingleValueConstantTensor(float value, bool is_quantized) {
if (!is_quantized) {
return AddVectorFloat32Operand(&value, 1);
} else {
const uint8_t quant8_value = 64;
return AddVectorOperand<uint8_t>(&quant8_value, 1,
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
value / quant8_value, 0);
}
}
TfLiteStatus CalculateQuantizationParams(float min, float max, float* scale,
int* zero_point) {
if (max < min) return kTfLiteError;
*scale = (max - min) / 255.f;
if (min > 0.f) {
*zero_point = 0;
} else if (max < 0.f) {
*zero_point = 255;
} else {
*zero_point = (0.f - min) / (*scale);
}
return kTfLiteOk;
}
TfLiteStatus TransformHardSwishIntoSupportedOps(int lite_input_index,
int lite_output_index,
bool need_int8_conversion,
int lite_node_index) {
const TfLiteTensor& tensor = context_->tensors[lite_input_index];
float input_scale = tensor.params.scale;
int input_zero_point = tensor.params.zero_point;
float input_min = 0.f;
float input_max = 0.f;
int tensor_flags = 0;
if (need_int8_conversion) {
tensor_flags = tensor_flags | NN_TENSOR_FLAG_INT8_CONVERSION;
input_zero_point += 128;
}
bool is_quantized = false;
int nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
if (tensor.type == kTfLiteInt8 || tensor.type == kTfLiteUInt8) {
is_quantized = true;
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
input_min = (0 - input_zero_point) * input_scale;
input_max = (255 - input_zero_point) * input_scale;
}
float s1_output_min = 0.f;
float s1_output_max = 0.f;
int s1_out_ann_index = 0;
{
float s1_output_scale = 0.f;
int s1_output_zero_point = 0;
if (is_quantized) {
s1_output_min = input_min / 3.f < -1.f ? -1.f : input_min / 3.f;
s1_output_max = input_max / 3.f > 1.f ? 1.f : input_max / 3.f;
CalculateQuantizationParams(s1_output_min, s1_output_max,
&s1_output_scale, &s1_output_zero_point);
}
TF_LITE_ENSURE_OK(context_,
AddTensorInput(lite_input_index, false, tensor_flags));
const float value3f = 1.f / 3.f;
TF_LITE_ENSURE_OK(context_,
AddSingleValueConstantTensor(value3f, is_quantized));
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_RELU1));
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
tensor.dims->size, reinterpret_cast<uint32_t*>(tensor.dims->data),
nn_type, s1_output_scale, s1_output_zero_point,
&s1_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_MUL, lite_node_index));
}
float s2_output_min = input_min / 2.f;
float s2_output_max = input_max / 2.f;
int s2_out_ann_index = 0;
{
float s2_output_scale = input_scale / 2.0f;
int s2_output_zero_point = input_zero_point;
TF_LITE_ENSURE_OK(context_,
AddTensorInput(lite_input_index, false, tensor_flags));
const float value2f = 0.5f;
TF_LITE_ENSURE_OK(context_,
AddSingleValueConstantTensor(value2f, is_quantized));
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
tensor.dims->size, reinterpret_cast<uint32_t*>(tensor.dims->data),
nn_type, s2_output_scale, s2_output_zero_point,
&s2_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_MUL, lite_node_index));
}
int s3_out_ann_index = 0;
{
augmented_inputs_.push_back(s1_out_ann_index);
augmented_inputs_.push_back(s2_out_ann_index);
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
float s3_output_scale = 0.f;
int s3_output_zero_point = 0;
if (is_quantized) {
float s3_output_min = 0.f;
float s3_output_max =
s1_output_max * s2_output_max > s1_output_min * s2_output_min
? s1_output_max * s2_output_max
: s1_output_min * s2_output_min;
CalculateQuantizationParams(s3_output_min, s3_output_max,
&s3_output_scale, &s3_output_zero_point);
}
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
tensor.dims->size, reinterpret_cast<uint32_t*>(tensor.dims->data),
nn_type, s3_output_scale, s3_output_zero_point,
&s3_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_MUL, lite_node_index));
}
{
augmented_inputs_.push_back(s2_out_ann_index);
augmented_inputs_.push_back(s3_out_ann_index);
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(context_,
AddTensorOutput(lite_output_index, tensor_flags));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_ADD, lite_node_index));
}
return kTfLiteOk;
}
TfLiteStatus AddOperationToModel(ANeuralNetworksOperationType type,
uint32_t input_count, const uint32_t* inputs,
uint32_t output_count,
const uint32_t* outputs,
int lite_node_index) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperation(
nn_model_, type, input_count, inputs, output_count, outputs),
"adding operation", nnapi_errno_);
mapping_util_->AddNnapiToTfliteOpMapping(mapping_util_, lite_node_index);
return kTfLiteOk;
}
TfLiteStatus AddDequantize(int nn_input_index, int lite_tensor_index,
TfLiteType dequantized_type, int lite_node_index) {
const int ann_index =
mapping_util_->TfLiteIndexToNnIndex(mapping_util_, lite_tensor_index);
int dequantized_ann_index =
dequantize_mapping_->DequantizedAnnIndex(ann_index, dequantized_type);
if (dequantized_ann_index == -1) {
const TfLiteTensor& tensor = context_->tensors[lite_tensor_index];
ANeuralNetworksOperandType operand_type{
ANEURALNETWORKS_TENSOR_FLOAT32,
static_cast<uint32_t>(tensor.dims->size),
reinterpret_cast<uint32_t*>(tensor.dims->data), 0.f, 0};
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
dequantized_ann_index =
mapping_util_->AddNewNonTensorOperand(mapping_util_);
const uint32_t dequantize_input[1] = {static_cast<uint32_t>(ann_index)};
const uint32_t dequantize_output[1] = {
static_cast<uint32_t>(dequantized_ann_index)};
TF_LITE_ENSURE_OK(
context_, AddOperationToModel(ANEURALNETWORKS_DEQUANTIZE,
1, dequantize_input,
1, dequantize_output,
lite_node_index));
dequantize_mapping_->Add(ann_index, dequantized_type,
dequantized_ann_index);
}
augmented_inputs_[nn_input_index] = dequantized_ann_index;
return kTfLiteOk;
}
TfLiteStatus AppendReshape(int nn_input_index, int lite_out_tensor_index,
int lite_node_index) {
augmented_inputs_.push_back(nn_input_index);
auto& output_tensor = context_->tensors[lite_out_tensor_index];
TF_LITE_ENSURE_STATUS(
AddVectorInt32Operand(output_tensor.dims->data,
static_cast<uint32_t>(output_tensor.dims->size)));
TF_LITE_ENSURE_OK(context_,
AddTensorOutput(lite_out_tensor_index,
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_RESHAPE, lite_node_index));
return kTfLiteOk;
}
TfLiteStatus AppendRequantize(int nn_input_index, int lite_out_tensor_index,
int lite_node_index, int tensor_flags = 0) {
augmented_inputs_.push_back(nn_input_index);
auto& output_tensor = context_->tensors[lite_out_tensor_index];
TF_LITE_ENSURE(context_, IsQuantized(output_tensor.type));
bool need_int8_conversion = tensor_flags & NN_TENSOR_FLAG_INT8_CONVERSION;
int nn_type = (output_tensor.type == kTfLiteUInt8 || need_int8_conversion)
? ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
int8_t zero = 0;
TF_LITE_ENSURE_STATUS(AddVectorOperand(&zero, 1, nn_type,
1.0f, 0));
TF_LITE_ENSURE_STATUS(AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_STATUS(AddTensorOutput(lite_out_tensor_index, tensor_flags));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_ADD, lite_node_index));
return kTfLiteOk;
}
TfLiteStatus TransformPackIntoSupportedOps(int lite_node_index,
TfLiteNode* node,
TfLiteRegistration* reg) {
int concat_output_ann_index = -1;
TfLitePackParams* builtin =
reinterpret_cast<TfLitePackParams*>(node->builtin_data);
auto& input_tensor = context_->tensors[node->inputs->data[0]];
int axis = builtin->axis < 0 ? input_tensor.dims->size + builtin->axis + 1
: builtin->axis;
TF_LITE_ENSURE(context_, axis < input_tensor.dims->size);
uint32_t concat_dim_size = 0;
for (int input_pos = 0; input_pos < node->inputs->size; ++input_pos) {
const auto input_index = node->inputs->data[input_pos];
concat_dim_size +=
context_->tensors[node->inputs->data[input_pos]].dims->data[axis];
TF_LITE_ENSURE_STATUS(
AddTensorInput(input_index, false,
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
}
TF_LITE_ENSURE_STATUS(AddScalarInt32Operand(axis));
std::vector<uint32_t> concat_output_shape(input_tensor.dims->size, 0);
for (int i = 0; i < concat_output_shape.size(); i++) {
if (i == axis) {
concat_output_shape[i] = concat_dim_size;
} else {
concat_output_shape[i] = input_tensor.dims->data[i];
}
}
TF_LITE_ENSURE_STATUS(AddIntermediateOutputTensor(
input_tensor.type, concat_output_shape.size(),
concat_output_shape.data(), input_tensor.params.scale,
input_tensor.params.zero_point, &concat_output_ann_index));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_CONCATENATION, lite_node_index));
TF_LITE_ENSURE_STATUS(AppendReshape(
concat_output_ann_index, node->outputs->data[0], lite_node_index));
return kTfLiteOk;
}
TfLiteStatus TransformUnpackIntoSupportedOps(int lite_node_index,
TfLiteNode* node,
TfLiteRegistration* reg) {
auto& input_tensor = context_->tensors[node->inputs->data[0]];
auto* builtin = reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data);
int axis = builtin->axis < 0 ? builtin->axis + input_tensor.dims->size
: builtin->axis;
TF_LITE_ENSURE(context_, axis >= 0);
TF_LITE_ENSURE(context_, axis < (input_tensor.dims->size - 1));
int num_splits = builtin->num;
TF_LITE_ENSURE(context_, num_splits == input_tensor.dims->data[axis]);
TF_LITE_ENSURE(context_, num_splits == node->outputs->size);
std::vector<int32_t> intermediate_shape(input_tensor.dims->size - 1);
std::copy(input_tensor.dims->data, input_tensor.dims->data + axis,
intermediate_shape.begin());
intermediate_shape[axis] =
input_tensor.dims->data[axis] * input_tensor.dims->data[axis + 1];
std::copy(input_tensor.dims->data + axis + 2,
input_tensor.dims->data + input_tensor.dims->size,
intermediate_shape.begin() + axis + 1);
TF_LITE_ENSURE_STATUS(AddTensorInput(node->inputs->data[0],
false,
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_STATUS(AddVectorInt32Operand(intermediate_shape.data(),
intermediate_shape.size()));
int reshape_output_ann_index = -1;
float scale = input_tensor.params.scale;
if (IsQuantized(input_tensor.type) && scale == 0.0f) {
scale = 1.0f;
}
TF_LITE_ENSURE_STATUS(AddIntermediateOutputTensor(
input_tensor.type, intermediate_shape.size(),
reinterpret_cast<uint32_t*>(intermediate_shape.data()), scale,
input_tensor.params.zero_point, &reshape_output_ann_index));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_RESHAPE, lite_node_index));
augmented_inputs_.push_back(reshape_output_ann_index);
TF_LITE_ENSURE_STATUS(AddScalarInt32Operand(axis));
TF_LITE_ENSURE_STATUS(AddScalarInt32Operand(num_splits));
for (int i = 0; i < num_splits; i++) {
int lite_output_index = node->outputs->data[i];
TF_LITE_ENSURE_STATUS(AddTensorOutput(
lite_output_index, NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
}
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_SPLIT, lite_node_index));
return kTfLiteOk;
}
TfLiteStatus TransformSplitVIntoSupportedOps(int lite_node_index,
TfLiteNode* node,
TfLiteRegistration* reg) {
auto& input = context_->tensors[node->inputs->data[0]];
int input_rank = input.dims->size;
const auto& size_splits_tensor = context_->tensors[node->inputs->data[1]];
const auto* size_splits = size_splits_tensor.data.i32;
int num_splits = size_splits_tensor.dims->data[0];
int axis = context_->tensors[node->inputs->data[2]].data.i32[0];
axis = axis < 0 ? axis + input_rank : axis;
TF_LITE_ENSURE(context_, axis >= 0);
TF_LITE_ENSURE(context_, axis < input_rank);
int unknown_split_size = ComputeSplitVUnknownSplitSize(context_, node);
int slice_begin_index = 0;
for (int split_index = 0; split_index < num_splits; split_index++) {
int split_size = size_splits[split_index] == -1
? unknown_split_size
: size_splits[split_index];
TF_LITE_ENSURE(context_, split_size > 0);
std::vector<int> begin_indices(input_rank);
std::vector<int> slice_sizes(input_rank);
for (int i = 0; i < input_rank; i++) {
if (i == axis) {
begin_indices[i] = slice_begin_index;
slice_sizes[i] = split_size;
} else {
begin_indices[i] = 0;
slice_sizes[i] = input.dims->data[i];
}
}
slice_begin_index += split_size;
TF_LITE_ENSURE_STATUS(AddTensorInput(
node->inputs->data[0],
false, NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_STATUS(
AddVectorInt32Operand(begin_indices.data(), begin_indices.size()));
TF_LITE_ENSURE_STATUS(
AddVectorInt32Operand(slice_sizes.data(), slice_sizes.size()));
int lite_output_index = node->outputs->data[split_index];
TF_LITE_ENSURE_STATUS(AddTensorOutput(
lite_output_index, NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_SLICE, lite_node_index));
}
return kTfLiteOk;
}
TfLiteStatus TransformSquaredDifferenceIntoSupportedOps(
int lite_node_index, TfLiteNode* node, TfLiteRegistration* reg) {
const TfLiteTensor& lhs = context_->tensors[node->inputs->data[0]];
const TfLiteTensor& output = context_->tensors[node->outputs->data[0]];
int diff_out_ann_index = 0;
{
float max_output = 0.f;
int diff_output_zero_point = 0;
int diff_output_nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
switch (lhs.type) {
case kTfLiteFloat32:
diff_output_nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
break;
case kTfLiteInt32:
diff_output_nn_type = ANEURALNETWORKS_TENSOR_INT32;
break;
case kTfLiteUInt8:
max_output = (255 - output.params.zero_point) * output.params.scale;
diff_output_zero_point = 128;
diff_output_nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
break;
case kTfLiteInt8:
max_output = (127 - output.params.zero_point) * output.params.scale;
diff_output_zero_point = 0;
diff_output_nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
break;
default:
return kTfLiteError;
}
float diff_output_scale = 2.0f * std::sqrt(max_output) / 254.0f;
TF_LITE_ENSURE_OK(
context_, AddTensorInput(node->inputs->data[0], false,
NN_TENSOR_FLAG_SCALAR_AS_TENSOR |
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_OK(
context_, AddTensorInput(node->inputs->data[1], false,
NN_TENSOR_FLAG_SCALAR_AS_TENSOR |
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
output.dims->size, reinterpret_cast<uint32_t*>(output.dims->data),
diff_output_nn_type, diff_output_scale, diff_output_zero_point,
&diff_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_SUB, lite_node_index));
}
{
augmented_inputs_.push_back(diff_out_ann_index);
augmented_inputs_.push_back(diff_out_ann_index);
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(context_,
AddTensorOutput(node->outputs->data[0],
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_MUL, lite_node_index));
}
return kTfLiteOk;
}
TfLiteStatus TransformCosIntoSupportedOps(int lite_node_index,
TfLiteNode* node,
TfLiteRegistration* reg) {
const TfLiteTensor& input = context_->tensors[node->inputs->data[0]];
const TfLiteTensor& output = context_->tensors[node->outputs->data[0]];
int diff_out_ann_index;
{
auto tensor_size = input.bytes / sizeof(float);
int tensor_index;
TF_LITE_ENSURE_OK(context_,
AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_FLOAT32, kTfLiteFloat32,
input.dims, std::vector<float>(tensor_size, M_PI_2),
input.params, &tensor_index));
TF_LITE_ENSURE_OK(
context_, AddTensorInput(node->inputs->data[0], false));
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
output.dims->size, reinterpret_cast<uint32_t*>(output.dims->data),
ANEURALNETWORKS_TENSOR_FLOAT32, 0, 0, &diff_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_SUB, lite_node_index));
}
{
augmented_inputs_.push_back(diff_out_ann_index);
TF_LITE_ENSURE_OK(context_, AddTensorOutput(node->outputs->data[0]));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_SIN, lite_node_index));
}
return kTfLiteOk;
}
TfLiteStatus FinalizeAddOperation(ANeuralNetworksOperationType type,
int lite_node_index) {
TF_LITE_ENSURE_OK(context_,
AddOperationToModel(
type, static_cast<uint32_t>(augmented_inputs_.size()),
augmented_inputs_.data(),
static_cast<uint32_t>(augmented_outputs_.size()),
augmented_outputs_.data(), lite_node_index));
augmented_inputs_.clear();
augmented_outputs_.clear();
return kTfLiteOk;
}
TfLiteStatus AddSingleValueTensorAsScalarOperand(int tensor_index,
int nn_type) {
const TfLiteTensor* tensor = &context_->tensors[tensor_index];
TF_LITE_ENSURE_EQ(context_, NumElements(tensor), 1);
ANeuralNetworksOperandType operand_type{.type = nn_type};
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", tensor, nnapi_errno_);
int ann_tensor_index =
mapping_util_->TfLiteIndexToNnIndex(mapping_util_, tensor_index);
if (ann_tensor_index != -1) {
augmented_inputs_.push_back(ann_tensor_index);
return kTfLiteOk;
}
ann_tensor_index =
mapping_util_->AddNewNnTensorIndex(mapping_util_, tensor_index);
augmented_inputs_.push_back(ann_tensor_index);
const TfLiteType tensor_type = tensor->type;
TfLiteType nn_type_equivalent;
TF_LITE_ENSURE_OK(context_, GetEquivalentToANNType(context_, nn_type,
&nn_type_equivalent));
if (tensor_type != nn_type_equivalent) {
mapping_util_->AddTypeConversion(mapping_util_, tensor_index,
nn_type_equivalent);
}
return kTfLiteOk;
}
template <typename T>
TfLiteStatus AddNewInputConstantTensor(
int32_t nn_type, TfLiteType type, const TfLiteIntArray* dims,
const std::vector<T>& tensor_value,
const TfLiteQuantizationParams& quant_params, int* tensor_index) {
TF_LITE_ENSURE_OK(context_,
context_->AddTensors(context_, 1, tensor_index));
TfLiteTensor* new_tensor = &context_->tensors[*tensor_index];
new_tensor->type = type;
new_tensor->allocation_type = kTfLiteDynamic;
new_tensor->params = quant_params;
TF_LITE_ENSURE_OK(
context_,
context_->ResizeTensor(
context_, new_tensor,
TfLiteIntArrayCopy(dims)));
memcpy(new_tensor->data.raw,
reinterpret_cast<const char*>(tensor_value.data()),
tensor_value.size() * sizeof(T));
const uint32_t tensor_rank = static_cast<uint32_t>(dims->size);
const uint32_t* tensor_dims = reinterpret_cast<const uint32_t*>(dims->data);
ANeuralNetworksOperandType operand_type{nn_type, tensor_rank, tensor_dims,
quant_params.scale,
quant_params.zero_point};
const int ann_tensor_index =
mapping_util_->AddDelegateGeneratedInputAnnTensorOperand(mapping_util_);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
augmented_inputs_.push_back(ann_tensor_index);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_tensor_index, new_tensor->data.raw,
new_tensor->bytes),
"setting new operand value", nnapi_errno_);
return kTfLiteOk;
}
template <typename T>
TfLiteStatus AddNewInputConstantTensor(
int32_t nn_type, TfLiteType type, std::initializer_list<int> dims,
const std::vector<T>& tensor_value,
const TfLiteQuantizationParams& quant_params, int* tensor_index) {
TfLiteIntArray* dim_array = TfLiteIntArrayCreate(dims.size());
dim_array->size = dims.size();
std::copy(dims.begin(), dims.end(), dim_array->data);
const auto result = AddNewInputConstantTensor(
nn_type, type, dim_array, tensor_value, quant_params, tensor_index);
TfLiteIntArrayFree(dim_array);
return result;
}
TfLiteStatus AddIntermediateOutputTensor(TfLiteType tfl_type,
uint32_t dimension_count,
const uint32_t* dimension_data,
float scale, int32_t zero_point,
int* ann_index_out,
bool need_int8_conversion = false) {
int32_t nn_type;
switch (tfl_type) {
case kTfLiteFloat32:
nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
break;
case kTfLiteInt8:
nn_type = need_int8_conversion
? ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
break;
case kTfLiteUInt8:
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
break;
default:
return kTfLiteError;
}
if (need_int8_conversion) {
zero_point += 128;
}
TF_LITE_ENSURE_STATUS(
AddAdditionalOutputTensor(dimension_count, dimension_data, nn_type,
scale, zero_point, ann_index_out));
return kTfLiteOk;
}
void ClearInputOuputLists() {
augmented_inputs_.clear();
augmented_outputs_.clear();
}
private:
TfLiteStatus GetEquivalentToANNType(TfLiteContext* context, int nn_type,
TfLiteType* type) {
switch (nn_type) {
case ANEURALNETWORKS_INT32:
*type = kTfLiteInt32;
return kTfLiteOk;
case ANEURALNETWORKS_FLOAT32:
*type = kTfLiteFloat32;
return kTfLiteOk;
default:
TF_LITE_KERNEL_LOG(context,
"NN API Delegate: Can't get an equivalent TF Lite "
"type for provided NN API type: %d.\n",
nn_type);
return kTfLiteError;
}
}
template <typename T>
TfLiteStatus AddScalarOperand(T value, int32_t nn_type) {
ANeuralNetworksOperandType operand_type{.type = nn_type};
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
const int ann_index = mapping_util_->AddNewNonTensorOperand(mapping_util_);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(nn_model_, ann_index,
&value, sizeof(T)),
"setting new operand value", nnapi_errno_);
augmented_inputs_.push_back(ann_index);
return kTfLiteOk;
}
template <typename T>
TfLiteStatus AddVectorOperand(const T* values, uint32_t num_values,
int32_t nn_type, float scale,
int32_t zero_point) {
ANeuralNetworksOperandType operand_type{.type = nn_type,
.dimensionCount = 1,
.dimensions = &num_values,
.scale = scale,
.zeroPoint = zero_point};
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
const int ann_index = mapping_util_->AddNewNonTensorOperand(mapping_util_);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_index, values, sizeof(T) * num_values),
"settings new operand value", nnapi_errno_);
augmented_inputs_.push_back(ann_index);
return kTfLiteOk;
}
template <typename T>
TfLiteStatus AddVectorOperand(const T* values, uint32_t num_values,
int32_t nn_type) {
return AddVectorOperand(values, num_values, nn_type, 0.f,
0);
}
TfLiteStatus AddFloat32OutputTensor(uint32_t dimension_count,
const uint32_t* dimension_data,
int* ann_index_out) {
return AddAdditionalOutputTensor(
dimension_count, dimension_data, ANEURALNETWORKS_TENSOR_FLOAT32,
0.f, 0, ann_index_out);
}
TfLiteStatus AddAdditionalOutputTensor(uint32_t dimension_count,
const uint32_t* dimension_data,
int32_t nn_type, float scale,
int32_t zero_point,
int* ann_index_out) {
ANeuralNetworksOperandType operand_type{
.type = nn_type,
.dimensionCount = dimension_count,
.dimensions = dimension_data,
.scale = scale,
.zeroPoint = zero_point,
};
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
const int ann_index = mapping_util_->AddNewNonTensorOperand(mapping_util_);
augmented_outputs_.push_back(ann_index);
if (ann_index_out) *ann_index_out = ann_index;
return kTfLiteOk;
}
TfLiteStatus AddTensor(int tensor_index, bool hybrid_op,
std::vector<uint32_t>* indices, int tensor_flags = 0) {
const bool scalar_as_tensor =
tensor_flags & NN_TENSOR_FLAG_SCALAR_AS_TENSOR;
const bool need_int8_conversion =
tensor_flags & NN_TENSOR_FLAG_INT8_CONVERSION;
const bool use_int8_asymm_signed =
tensor_flags & NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED;
const bool force_per_channel =
tensor_flags & NN_TENSOR_FLAG_FORCE_PER_CHANNEL;
const bool need_half2float_conversion =
tensor_flags & NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION;
int ann_tensor_index =
mapping_util_->TfLiteIndexToNnIndex(mapping_util_, tensor_index);
if (ann_tensor_index != -1) {
indices->push_back(ann_tensor_index);
return kTfLiteOk;
}
ann_tensor_index =
mapping_util_->AddNewNnTensorIndex(mapping_util_, tensor_index);
int32_t nn_type = 0;
float scale = 0.0f;
int32_t zeroPoint = 0;
ANeuralNetworksSymmPerChannelQuantParams ann_perchannel_params;
TfLiteTensor* tensor = &context_->tensors[tensor_index];
TfLiteType tensor_type = tensor->type;
if (hybrid_op && (tensor_type == kTfLiteUInt8)) {
tensor_type = kTfLiteInt8;
}
switch (tensor_type) {
case kTfLiteNoType:
indices->push_back(-1);
return kTfLiteOk;
case kTfLiteFloat32:
nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
break;
case kTfLiteFloat16:
nn_type = ANEURALNETWORKS_TENSOR_FLOAT16;
if (need_half2float_conversion) {
nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
mapping_util_->AddTypeConversion(mapping_util_, tensor_index,
kTfLiteFloat32);
}
break;
case kTfLiteUInt8:
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
scale = tensor->params.scale;
zeroPoint = tensor->params.zero_point;
if (scale == 0) {
scale = 1;
}
break;
case kTfLiteInt8:
if (use_int8_asymm_signed) {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
} else if (need_int8_conversion) {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
} else {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM;
}
scale = tensor->params.scale;
zeroPoint = tensor->params.zero_point;
if (tensor->quantization.type == kTfLiteAffineQuantization) {
TfLiteAffineQuantization* quantization_params =
static_cast<TfLiteAffineQuantization*>(
tensor->quantization.params);
if (quantization_params->scale->size > 1 || force_per_channel) {
ann_perchannel_params = {
.channelDim = static_cast<uint32_t>(
quantization_params->quantized_dimension),
.scaleCount =
static_cast<uint32_t>(quantization_params->scale->size),
.scales = quantization_params->scale->data,
};
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL;
scale = 0.0f;
zeroPoint = 0;
} else if (quantization_params->scale->size == 1) {
scale = quantization_params->scale->data[0];
zeroPoint = quantization_params->zero_point->data[0];
}
}
if (nn_type != ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
if (need_int8_conversion) {
zeroPoint += 128;
mapping_util_->AddTypeConversion(mapping_util_, tensor_index,
kTfLiteUInt8);
}
if (scale == 0) {
scale = 1;
}
}
break;
case kTfLiteInt32:
nn_type = ANEURALNETWORKS_TENSOR_INT32;
scale = tensor->params.scale;
zeroPoint = tensor->params.zero_point;
break;
case kTfLiteBool:
nn_type = ANEURALNETWORKS_TENSOR_BOOL8;
break;
case kTfLiteInt16:
nn_type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM;
scale = tensor->params.scale;
zeroPoint = tensor->params.zero_point;
break;
default:
context_->ReportError(
context_, "Failed to add NN API tensor: type %s is not supported.",
TfLiteTypeGetName(tensor_type));
return kTfLiteError;
}
bool has_unspecified_dimensions = ::tflite::HasUnspecifiedDimension(tensor);
uint32_t tensor_rank = static_cast<uint32_t>(tensor->dims->size);
std::vector<uint32_t> dims_unspecified(tensor_rank, 0);
if (has_unspecified_dimensions) {
for (int i = 0; i < tensor->dims_signature->size; i++) {
dims_unspecified[i] = tensor->dims_signature->data[i] == -1
? 0
: tensor->dims_signature->data[i];
}
}
uint32_t* tensor_dims =
has_unspecified_dimensions && allow_dynamic_dimensions_
? dims_unspecified.data()
: reinterpret_cast<uint32_t*>(tensor->dims->data);
if (scalar_as_tensor && tensor_rank == 0) {
tensor_rank = 1;
tensor_dims = &tensor_rank;
}
if (tensor_rank == 0) {
tensor_dims = nullptr;
}
ANeuralNetworksOperandType operand_type{nn_type, tensor_rank, tensor_dims,
scale, zeroPoint};
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", tensor, nnapi_errno_);
if (nn_type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
nn_model_, ann_tensor_index, &ann_perchannel_params),
"setting new operand per channel quantization params", tensor,
nnapi_errno_);
}
if (tensor->allocation_type == kTfLiteMmapRo) {
if (IsQuantized(tensor_type) && need_int8_conversion &&
nn_type != ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
int new_tensor_index = -1;
TF_LITE_ENSURE_OK(context_,
context_->AddTensors(context_, 1, &new_tensor_index));
TfLiteTensor* new_tensor = &context_->tensors[new_tensor_index];
new_tensor->type = kTfLiteUInt8;
new_tensor->allocation_type = kTfLiteDynamic;
new_tensor->params.scale = scale;
new_tensor->params.zero_point = zeroPoint;
TF_LITE_ENSURE_OK(
context_, context_->ResizeTensor(context_, new_tensor,
TfLiteIntArrayCopy(tensor->dims)));
const auto num_elements = NumElements(tensor);
for (int i = 0; i < num_elements; ++i) {
new_tensor->data.uint8[i] = static_cast<const uint8_t>(
static_cast<int32_t>(tensor->data.int8[i]) + 128);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_tensor_index, new_tensor->data.raw,
new_tensor->bytes),
"setting new operand value", tensor, nnapi_errno_);
} else if (tensor_type == kTfLiteFloat16 && need_half2float_conversion) {
int new_tensor_index = -1;
TF_LITE_ENSURE_OK(context_,
context_->AddTensors(context_, 1, &new_tensor_index));
TfLiteTensor* new_tensor = &context_->tensors[new_tensor_index];
new_tensor->type = kTfLiteFloat32;
new_tensor->allocation_type = kTfLiteDynamic;
TF_LITE_ENSURE_OK(
context_, context_->ResizeTensor(context_, new_tensor,
TfLiteIntArrayCopy(tensor->dims)));
const auto num_elements = NumElements(tensor);
for (int i = 0; i < num_elements; ++i) {
new_tensor->data.f[i] = fp16_ieee_to_fp32_value(
reinterpret_cast<uint16_t*>(tensor->data.data)[i]);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_tensor_index, new_tensor->data.data,
new_tensor->bytes),
"setting new operand value", tensor, nnapi_errno_);
#ifdef TFLITE_NNAPI_ALLOW_MMAP_SHARING
} else if (tensor->allocation &&
static_cast<const Allocation*>(tensor->allocation)->type() ==
Allocation::Type::kMMap) {
const MMAPAllocation* mmap_alloc =
static_cast<const MMAPAllocation*>(tensor->allocation);
if (allocation_memory_mapping_->count(mmap_alloc) == 0) {
ANeuralNetworksMemory* ann_memory_handle = nullptr;
nnapi_->ANeuralNetworksMemory_createFromFd(
mmap_alloc->mmapped_buffer_size(), PROT_READ, mmap_alloc->fd(),
mmap_alloc->mmapped_buffer_offset_in_file(), &ann_memory_handle);
allocation_memory_mapping_->insert(
std::make_pair(mmap_alloc, ann_memory_handle));
}
ANeuralNetworksMemory* ann_memory_handle =
allocation_memory_mapping_->at(mmap_alloc);
auto offset =
reinterpret_cast<const uint8_t*>(tensor->data.raw) -
reinterpret_cast<const uint8_t*>(mmap_alloc->mmapped_buffer());
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValueFromMemory(
nn_model_, ann_tensor_index, ann_memory_handle, offset,
tensor->bytes),
"setting new operand value from memory", tensor, nnapi_errno_);
#endif
} else {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_tensor_index, tensor->data.data, tensor->bytes),
"setting new operand value", tensor, nnapi_errno_);
}
}
indices->push_back(ann_tensor_index);
return kTfLiteOk;
}
const NnApi* const nnapi_;
TfLiteContext* const context_;
NnapiMappingUtilCInterface* const mapping_util_;
DequantizeMapping* const dequantize_mapping_;
std::map<const MMAPAllocation*, ANeuralNetworksMemory*>* const
allocation_memory_mapping_;
ANeuralNetworksModel* const nn_model_;
std::vector<uint32_t> augmented_inputs_;
std::vector<uint32_t> augmented_outputs_;
int* nnapi_errno_;
bool allow_dynamic_dimensions_;
};
namespace {
struct OpValidationContext {
bool is_valid;
std::vector<NNAPIValidationFailure>* validation_failures;
};
#define EXPECT_INPUT_TYPE_IN(actual_type, ...) \
ExpectTypeIn(actual_type, {__VA_ARGS__}, \
NNAPIValidationFailureType::kUnsupportedInputType, \
"Input type not in expected list " #__VA_ARGS__, &val_ctx)
inline void AddValidationFailure(NNAPIValidationFailureType failure_type,
const char* message,
OpValidationContext* val_ctx) {
val_ctx->is_valid = false;
#ifdef NNAPI_VERBOSE_VALIDATION
if (val_ctx->validation_failures) {
val_ctx->validation_failures->push_back({failure_type, message});
}
#endif
}
template <typename... Args>
inline void AddValidationFailureFmt(OpValidationContext* val_ctx,
NNAPIValidationFailureType failure_type,
const char* message_fmt, Args... args) {
val_ctx->is_valid = false;
#ifdef NNAPI_VERBOSE_VALIDATION
if (val_ctx->validation_failures) {
size_t req_buf_size = snprintf(nullptr, 0, message_fmt, args...) + 1;
std::unique_ptr<char[]> tmp_buf(new char[req_buf_size]);
snprintf(tmp_buf.get(), req_buf_size, message_fmt, args...);
val_ctx->validation_failures->push_back({failure_type, tmp_buf.get()});
}
#endif
}
inline bool Expect(bool condition, NNAPIValidationFailureType failure_type,
const char* message, OpValidationContext* val_ctx) {
if (!condition) {
AddValidationFailure(failure_type, message, val_ctx);
return false;
}
return true;
}
template <typename... Args>
inline bool ExpectFmt(bool condition, OpValidationContext* val_ctx,
NNAPIValidationFailureType failure_type,
const char* message_fmt, Args... args) {
if (!condition) {
AddValidationFailureFmt(val_ctx, failure_type, message_fmt, args...);
return false;
}
return true;
}
inline bool ExpectTypeIn(TfLiteType actual_type,
std::initializer_list<TfLiteType> allowed_types,
NNAPIValidationFailureType failure_type,
const char* msg, OpValidationContext* val_ctx) {
return Expect(std::find(allowed_types.begin(), allowed_types.end(),
actual_type) != allowed_types.end(),
failure_type, msg, val_ctx);
}
inline bool ExpectMinAndroidSdkVersion(int curr_version, int min_version,
OpValidationContext* val_ctx) {
return ExpectFmt(curr_version >= min_version, val_ctx,
NNAPIValidationFailureType::kUnsupportedAndroidVersion,
"Android sdk version less than %d", min_version);
}
inline bool ExpectMaxOpVersion(int curr_version, int max_version,
OpValidationContext* val_ctx) {
return ExpectFmt(curr_version <= max_version, val_ctx,
NNAPIValidationFailureType::kUnsupportedOperatorVersion,
"OP Version higher than %d", max_version);
}
inline bool ExpectOpVersion(int curr_version, int max_version,
OpValidationContext* val_ctx) {
return ExpectFmt(curr_version <= max_version, val_ctx,
NNAPIValidationFailureType::kUnsupportedOperatorVersion,
"OP Version different from %d", max_version);
}
inline bool ExpectIsFloatOperator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloat(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float", val_ctx);
}
bool ExpectIsFloatOrUint8Operator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloatOrUInt8(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float or UINT8", val_ctx);
}
bool ExpectIsFloatOrQuant8Operator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloatOrQuantized(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float or Quant8", val_ctx);
}
bool ExpectIsFloatOrInt32Operator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloatOrInt32(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float or Int32", val_ctx);
}
bool ExpectIsFloatQuant8OrInt32Operator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloatQuantizedOrInt32(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float, Quant8, or Int32", val_ctx);
}
bool ExpectIsRestrictedScalesCompliant(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const int input_id = node->inputs->data[0];
const int filter_id = node->inputs->data[1];
const int output_id = node->outputs->data[0];
const float input_scale = context->tensors[input_id].params.scale;
const float filter_scale = context->tensors[filter_id].params.scale;
const float output_scale = context->tensors[output_id].params.scale;
return Expect(input_scale * filter_scale < output_scale,
NNAPIValidationFailureType::kNotRestrictedScaleCompliant,
"When using NN API version 1.0 or 1.1, input_scale * "
"filter_scale < output_scale.",
val_ctx);
}
void AppendDynamicDimensions(const TfLiteContext* context,
const TfLiteIntArray* tensor_indices,
std::vector<int>& dynamic_dimensions) {
for (int i : TfLiteIntArrayView(tensor_indices)) {
if (i == kTfLiteOptionalTensor) continue;
const auto& tensor = context->tensors[i];
if (tensor.dims_signature) {
for (int i = 0; i < tensor.dims_signature->size; i++) {
if (tensor.dims_signature->data[i] == -1) {
dynamic_dimensions.push_back(tensor.dims->data[i]);
}
}
}
}
}
NNAPIExecutionCache::Signature CreateExecutionCacheSignature(
const TfLiteContext* context, const TfLiteNode* node,
const StatefulNnApiDelegate::Options& delegate_options,
const std::vector<StatefulNnApiDelegate::MemoryRegistration>&
tensor_memory_map) {
std::vector<uint64_t> tensor_handle_timestamps(context->tensors_size);
for (int i = 0; i < tensor_handle_timestamps.size(); i++) {
auto handle = context->tensors[i].buffer_handle;
if (handle < 0 || handle >= tensor_memory_map.size()) {
tensor_handle_timestamps[i] = kNoMemoryTimestamp;
} else {
tensor_handle_timestamps[i] = tensor_memory_map[handle].timestamp;
}
}
std::vector<int> dynamic_dimensions;
if (delegate_options.allow_dynamic_dimensions) {
AppendDynamicDimensions(context, node->inputs, dynamic_dimensions);
if (delegate_options.vendor_plugin == nullptr) {
AppendDynamicDimensions(context, node->outputs, dynamic_dimensions);
}
}
return NNAPIExecutionCache::Signature{std::move(tensor_handle_timestamps),
std::move(dynamic_dimensions)};
}
template <typename T>
std::size_t HashVector(const std::vector<T>& vec) {
std::size_t seed = vec.size();
auto hasher = std::hash<T>{};
for (const auto& i : vec) {
seed = CombineHashes({seed, hasher(i)});
}
return seed;
}
}
bool NNAPIExecutionCache::Signature::operator==(const Signature& other) const {
return tensor_handle_timestamps == other.tensor_handle_timestamps &&
dynamic_dimensions == other.dynamic_dimensions;
}
std::size_t NNAPIExecutionCache::Signature::Hasher::operator()(
const Signature& signature) const {
return CombineHashes({HashVector(signature.tensor_handle_timestamps),
HashVector(signature.dynamic_dimensions)});
}
ANeuralNetworksExecution* NNAPIExecutionCache::Get(const Signature& signature) {
auto it = lookup_.find(signature);
if (it == lookup_.end()) {
return nullptr;
}
auto& list_it = it->second.first;
order_.erase(list_it);
order_.push_front(signature);
list_it = order_.begin();
auto& execution = it->second.second;
return execution.get();
}
void NNAPIExecutionCache::Put(const Signature& signature,
UniqueExecution execution) {
if (order_.size() >= max_cache_size_) {
ReleaseLRU();
}
order_.push_front(signature);
lookup_.emplace(signature,
std::make_pair(order_.begin(), std::move(execution)));
}
void NNAPIExecutionCache::Clear() {
order_.clear();
lookup_.clear();
}
void NNAPIExecutionCache::SetMaxCacheSize(uint32_t max_cache_size) {
max_cache_size_ = max_cache_size;
while (order_.size() > max_cache_size_) {
ReleaseLRU();
}
}
void NNAPIExecutionCache::ReleaseLRU() {
lookup_.erase(order_.back());
order_.pop_back();
}
bool NNAPIDelegateKernel::Validate(
const TfLiteContext* context, const TfLiteRegistration* registration,
int android_sdk_version, const TfLiteNode* node,
bool is_accelerator_specified, NnapiDelegateVendorPlugin* vendor_plugin,
std::vector<NNAPIValidationFailure>* map_failures) {
OpValidationContext val_ctx{true, map_failures};
if (vendor_plugin) {
if (vendor_plugin->ValidateNode(context, registration, node)) {
return true;
}
}
auto builtin_code = registration->builtin_code;
auto version = registration->version;
switch (builtin_code) {
case kTfLiteBuiltinAdd: {
ExpectMaxOpVersion(version, 2, &val_ctx);
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
if (IsInt32(context->tensors[node->inputs->data[0]].type)) {
Expect(reinterpret_cast<TfLiteAddParams*>(node->builtin_data)
->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"No activation function supported", &val_ctx);
}
} else {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
}
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank must be <= 4", &val_ctx);
} break;
case kTfLiteBuiltinArgMax:
case kTfLiteBuiltinArgMin: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[(0)]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat16, kTfLiteFloat32,
kTfLiteInt32, kTfLiteUInt8, kTfLiteInt8);
const auto& axis_tensor = context->tensors[node->inputs->data[1]];
if (axis_tensor.type == kTfLiteInt64) {
Expect(
axis_tensor.allocation_type == kTfLiteMmapRo &&
*axis_tensor.data.i64 <= std::numeric_limits<int32_t>::max() &&
*axis_tensor.data.i64 >= std::numeric_limits<int32_t>::min(),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports axis as int32. If the axis type is int64 and "
"constant we can convert it to int32 if the value isn't too "
"large.",
&val_ctx);
} else {
Expect(axis_tensor.type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Axis should be Int32", &val_ctx);
}
if (builtin_code == kTfLiteBuiltinArgMax) {
auto builtin =
reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data);
Expect(builtin->output_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI only supports int32 output.", &val_ctx);
} else {
auto builtin =
reinterpret_cast<TfLiteArgMinParams*>(node->builtin_data);
Expect(builtin->output_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI only supports int32 output.", &val_ctx);
}
} break;
case kTfLiteBuiltinMul: {
if (is_accelerator_specified) {
ExpectMaxOpVersion(version, 3, &val_ctx);
} else {
ExpectMaxOpVersion(version, 2, &val_ctx);
}
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
if (IsInt32(context->tensors[node->inputs->data[0]].type)) {
Expect(reinterpret_cast<TfLiteMulParams*>(node->builtin_data)
->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"No activation function supported", &val_ctx);
}
} else {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
}
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank must be <= 4", &val_ctx);
} break;
case kTfLiteBuiltinAveragePool2d: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
auto builtin = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
if (IsQuantized(context->tensors[node->inputs->data[0]].type)) {
Expect(is_accelerator_specified ||
(builtin->filter_width * builtin->filter_height <= 256),
NNAPIValidationFailureType::kUnsupportedOperandSize,
"Large filter window would overflow on the reference CPU path",
&val_ctx);
}
} break;
case kTfLiteBuiltinMaxPool2d: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinL2Pool2d: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectIsFloatOperator(context, node, &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
auto builtin = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
Expect(builtin->activation == kTfLiteActNone,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Before NNAPI 1.2 fused activation for l2_pool may not be "
"supported.",
&val_ctx);
}
} break;
case kTfLiteBuiltinConv2d: {
ExpectMaxOpVersion(version, 5, &val_ctx);
const auto& input_tensor = context->tensors[node->inputs->data[0]];
const auto& filter_tensor = context->tensors[node->inputs->data[1]];
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid operators not supported before NNAPI 1.2", &val_ctx);
ExpectIsFloatOrUint8Operator(context, node, &val_ctx);
if (filter_tensor.quantization.type == kTfLiteAffineQuantization) {
TfLiteAffineQuantization* quantization_params =
static_cast<TfLiteAffineQuantization*>(
filter_tensor.quantization.params);
Expect(quantization_params->scale->size <= 1,
NNAPIValidationFailureType::kUnsupportedQuantizationType,
"Per-channel quantized convolution not supported before NNAPI "
"1.2.",
&val_ctx);
}
}
const auto input_type = input_tensor.type;
if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
input_type == kTfLiteUInt8) {
ExpectIsRestrictedScalesCompliant(context, node, &val_ctx);
}
auto builtin = reinterpret_cast<TfLiteConvParams*>(node->builtin_data);
Expect(node->inputs->size == 3,
NNAPIValidationFailureType::kMissingRequiredOperand,
"Conv2D with omitted bias not supported", &val_ctx);
if (builtin->dilation_width_factor != 1 ||
builtin->dilation_height_factor != 1) {
Expect(android_sdk_version >= kMinSdkVersionForNNAPI12,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI supports dilated Conv2D since NNAPI 1.2.", &val_ctx);
}
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(input_tensor.dims->data[3] == filter_tensor.dims->data[3],
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Grouped convolution not supported before NNAPI < 1.2",
&val_ctx);
}
} break;
case kTfLiteBuiltinDepthwiseConv2d: {
ExpectMaxOpVersion(version, 3, &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
ExpectIsFloatOrUint8Operator(context, node, &val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
if (input_type == kTfLiteUInt8) {
ExpectIsRestrictedScalesCompliant(context, node, &val_ctx);
}
auto builtin =
reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data);
Expect(builtin->dilation_width_factor == 1 &&
builtin->dilation_height_factor == 1,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"dilation_width_factor and dilation_height_factor expected to "
"be equal to 1",
&val_ctx);
}
} break;
case kTfLiteBuiltinFullyConnected: {
ExpectMaxOpVersion(version, 5, &val_ctx);
const auto output_type = context->tensors[node->outputs->data[0]].type;
Expect(output_type != kTfLiteInt16,
NNAPIValidationFailureType::kUnsupportedOutputType,
"Unsupported output of type kTfLiteInt16", &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid operators not supported before NNAPI 1.2", &val_ctx);
ExpectIsFloatOrUint8Operator(context, node, &val_ctx);
}
const auto input_type = context->tensors[node->inputs->data[0]].type;
if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
input_type == kTfLiteUInt8) {
ExpectIsRestrictedScalesCompliant(context, node, &val_ctx);
}
auto builtin =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
if (builtin->keep_num_dims) {
ExpectMinAndroidSdkVersion(android_sdk_version,
kMinSdkVersionForNNAPI13, &val_ctx);
}
} break;
case kTfLiteBuiltinHardSwish: {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinSoftmax: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
const auto& output = context->tensors[node->outputs->data[0]];
ExpectTypeIn(output.type, {kTfLiteFloat32, kTfLiteUInt8, kTfLiteInt8},
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output type should be one of kTfLiteFloat32, kTfLiteUInt8, "
"kTfLiteInt8.",
&val_ctx);
const auto& input = context->tensors[node->inputs->data[0]];
const int input_rank = input.dims->size;
Expect(input_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank should be <= 4", &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(
input_rank == 2 || input_rank == 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Before API level 29 only 2D and 4D input tensors were supported.",
&val_ctx);
}
} break;
case kTfLiteBuiltinReshape: {
ExpectOpVersion(version, 1, &val_ctx);
if (android_sdk_version < kNNAPIRuntimeFeatureLevel6) {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
} else {
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
}
const auto& input = context->tensors[node->inputs->data[0]];
Expect(input.dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank should be <= 4", &val_ctx);
const auto& output = context->tensors[node->outputs->data[0]];
Expect(output.dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Output rank should be <= 4", &val_ctx);
if (node->inputs->size >= 2) {
Expect(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo,
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"The shape input tensor must be constant.", &val_ctx);
}
if (node->inputs->size == 1) {
auto* params =
reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data);
int num_dimensions = params->num_dimensions;
if (num_dimensions == 1 && params->shape[0] == 0) {
num_dimensions = 0;
}
Expect(num_dimensions > 0,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"New shape rank should be > 0", &val_ctx);
}
} break;
case kTfLiteBuiltinResizeBilinear: {
ExpectMaxOpVersion(version, 3, &val_ctx);
const auto& input = context->tensors[node->inputs->data[0]];
const auto output_dims = context->tensors[node->outputs->data[0]].dims;
Expect(input.dims->size == 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input should have rank 4", &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
Expect(node->inputs->size >= 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expected at least 2 inputs", &val_ctx);
if (node->inputs->size >= 2) {
Expect(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo,
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"The size input tensor must be constant.", &val_ctx);
}
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(output_dims->data[1] == output_dims->data[2],
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Require width == height due to driver differences in NNAPI "
"< 1.2",
&val_ctx);
}
auto builtin =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
if (android_sdk_version <= kMinSdkVersionForNNAPI12) {
Expect(!builtin->align_corners,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support align_corners == true.", &val_ctx);
Expect(!builtin->half_pixel_centers,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support half_pixel_centers == true.", &val_ctx);
}
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(input.type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI 1.0 & 1.1 only supports float input.", &val_ctx);
}
} break;
case kTfLiteBuiltinResizeNearestNeighbor: {
ExpectMaxOpVersion(version, 3, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
Expect(node->inputs->size >= 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expected at least 2 inputs", &val_ctx);
if (node->inputs->size >= 2) {
Expect(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo,
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"The size input tensor must be constant.", &val_ctx);
}
auto builtin = reinterpret_cast<TfLiteResizeNearestNeighborParams*>(
node->builtin_data);
if (android_sdk_version <= kMinSdkVersionForNNAPI12) {
Expect(!builtin->align_corners,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support align_corners == true.", &val_ctx);
Expect(!builtin->half_pixel_centers,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support half_pixel_centers == true.", &val_ctx);
}
} break;
case kTfLiteBuiltinSqueeze: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
auto builtin = reinterpret_cast<TfLiteSqueezeParams*>(node->builtin_data);
if (android_sdk_version == kMinSdkVersionForNNAPI11) {
Expect(builtin->num_squeeze_dims != 0,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI 1.1 does not support null squeeze_dims properly.",
&val_ctx);
}
} break;
case kTfLiteBuiltinUnidirectionalSequenceLstm: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid version of this op is not supported by NN API.", &val_ctx);
Expect(node->inputs->size == 20 || node->inputs->size == 24,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Supporting only operation with 20 or 24 inputs", &val_ctx);
} break;
case kTfLiteBuiltinL2Normalization: {
ExpectMaxOpVersion(version, 2, &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
ExpectIsFloatOperator(context, node, &val_ctx);
const auto& input = context->tensors[node->inputs->data[0]];
Expect(input.dims->size == 4,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expected 4 inputs", &val_ctx);
}
auto builtin = reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data);
Expect(builtin->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"Expected no activation", &val_ctx);
} break;
case kTfLiteBuiltinLocalResponseNormalization: {
ExpectOpVersion(version, 1, &val_ctx);
} break;
case kTfLiteBuiltinLshProjection: {
ExpectOpVersion(version, 1, &val_ctx);
if (reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data)
->type == kTfLiteLshProjectionSparse) {
Expect(android_sdk_version >= kMinSdkVersionForNNAPI12,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI does not support sparse projection correctly pre-Q",
&val_ctx);
Expect(node->inputs->size == 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
" NNAPI does not support weights for sparse projects.",
&val_ctx);
}
} break;
case kTfLiteBuiltinConcatenation: {
ExpectMaxOpVersion(version, 2, &val_ctx);
Expect(reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data)
->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"No activation function supported", &val_ctx);
Expect(context->tensors[node->inputs->data[0]].dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank should be less than 4", &val_ctx);
const auto& input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat16, kTfLiteFloat32,
kTfLiteUInt8, kTfLiteInt8);
if (input_type == kTfLiteUInt8 &&
android_sdk_version < kMinSdkVersionForNNAPI12) {
auto first_param = context->tensors[node->inputs->data[0]].params;
for (int i = 1; i < node->inputs->size; i++) {
auto curr_param = context->tensors[node->inputs->data[i]].params;
if (!Expect(curr_param.scale == first_param.scale &&
curr_param.zero_point == first_param.zero_point,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI 1.0-1 only supported concatenating quantized "
"tensor of the same scale and offset.",
&val_ctx)) {
break;
}
}
}
} break;
case kTfLiteBuiltinDequantize: {
if (android_sdk_version >= kMinSdkVersionForNNAPI13 &&
context->tensors[node->inputs->data[0]].type == kTfLiteFloat16 &&
context->tensors[node->inputs->data[0]].allocation_type !=
kTfLiteMmapRo) {
return true;
}
Expect(version == 1 || version == 2,
NNAPIValidationFailureType::kUnsupportedOperatorVersion,
"Supported op versions are 1 and 2 only", &val_ctx);
const auto& input = context->tensors[node->inputs->data[0]];
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteUInt8);
} else {
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteUInt8, kTfLiteInt8);
if (android_sdk_version == kMinSdkVersionForNNAPI12 &&
input.type == kTfLiteInt8) {
const auto zero_point = input.params.zero_point;
Expect(zero_point == 0,
NNAPIValidationFailureType::kUnsupportedInputType,
"NN API supports int8 type since version 1.2 but only for "
"symmetric quantization.",
&val_ctx);
}
}
} break;
case kTfLiteBuiltinDensify: {
if (android_sdk_version >= kMinSdkVersionForNNAPI13 &&
context->tensors[node->inputs->data[0]].allocation_type ==
kTfLiteMmapRo) {
return true;
}
return false;
} break;
case kTfLiteBuiltinFloor: {
ExpectOpVersion(version, 1, &val_ctx);
} break;
case kTfLiteBuiltinRelu:
case kTfLiteBuiltinReluN1To1:
case kTfLiteBuiltinRelu6:
case kTfLiteBuiltinLogistic: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinTanh: {
ExpectMaxOpVersion(version, 2, &val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
Expect(IsFloat(input_type) ||
(IsQuantized(input_type) &&
android_sdk_version >= kMinSdkVersionForNNAPI12),
NNAPIValidationFailureType::kUnsupportedInputType,
" NNAPI only support float tanh.", &val_ctx);
} break;
case kTfLiteBuiltinSub: {
ExpectMaxOpVersion(version, 3, &val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
Expect((android_sdk_version >= kMinSdkVersionForNNAPI11 &&
IsFloat(input_type)) ||
(android_sdk_version >= kMinSdkVersionForNNAPI12 &&
IsQuantized(input_type)) ||
(android_sdk_version >= kMinSdkVersionForNNAPI13 &&
IsInt32(input_type)),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float sub.", &val_ctx);
if (IsInt32(input_type)) {
Expect(reinterpret_cast<TfLiteSubParams*>(node->builtin_data)
->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"No activation function supported", &val_ctx);
}
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank must be <= 4", &val_ctx);
} break;
case kTfLiteBuiltinDiv: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
Expect(context->tensors[node->inputs->data[0]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float div.", &val_ctx);
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank must be <= 4", &val_ctx);
} break;
case kTfLiteBuiltinPad:
case kTfLiteBuiltinPadv2: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
const TfLiteIntArrayView input_shape(
context->tensors[node->inputs->data[0]].dims);
Expect(!HasZeroes(input_shape),
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NN API pad ops do not support input tensors with no elements",
&val_ctx);
Expect(node->inputs->size >= 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expecting at least 2 inputs", &val_ctx);
if (node->inputs->size == 3) {
Expect(
android_sdk_version >= kMinSdkVersionForNNAPI12,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Specification of the padding value is supported from NNAPI 1.2.",
&val_ctx);
} else {
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(context->tensors[node->inputs->data[0]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Only Float32 inputs are supported before NNAPI 1.2",
&val_ctx);
}
}
} break;
case kTfLiteBuiltinUnidirectionalSequenceRnn: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid version of this op is not supported by NN API.", &val_ctx);
} break;
case kTfLiteBuiltinSpaceToBatchNd: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
} break;
case kTfLiteBuiltinBatchToSpaceNd: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
auto crops = context->tensors[node->inputs->data[2]];
auto crops_data = crops.data.i32;
Expect(crops_data && crops.bytes == 16 && crops_data[0] == 0 &&
crops_data[1] == 0 && crops_data[2] == 0 && crops_data[3] == 0,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"All crops should be 0.", &val_ctx);
} break;
case kTfLiteBuiltinStridedSlice: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
} break;
case kTfLiteBuiltinTranspose: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
Expect((node->inputs->size > 1) &&
(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo),
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"Dynamically-sized tensors not supported.", &val_ctx);
} break;
case kTfLiteBuiltinAbs:
case kTfLiteBuiltinExp:
case kTfLiteBuiltinLog:
case kTfLiteBuiltinPow: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
ExpectIsFloatOperator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinRsqrt: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
if (android_sdk_version < kNNAPIRuntimeFeatureLevel7) {
ExpectIsFloatOperator(context, node, &val_ctx);
} else {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
}
} break;
case kTfLiteBuiltinSlice: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
const auto begin_type = context->tensors[node->inputs->data[1]].type;
const auto size_type = context->tensors[node->inputs->data[2]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8, kTfLiteInt8);
Expect(begin_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Begin type should be Int32", &val_ctx);
Expect(size_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Size type should be Int32", &val_ctx);
} break;
case kTfLiteBuiltinCos:
case kTfLiteBuiltinSin: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
ExpectIsFloatOperator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinTransposeConv: {
ExpectMaxOpVersion(version, 4, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect((node->inputs->size > 1) &&
(context->tensors[node->inputs->data[0]].allocation_type ==
kTfLiteMmapRo) &&
(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo),
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"Dynamically-sized tensors not supported.", &val_ctx);
} break;
case kTfLiteBuiltinSqrt: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
ExpectIsFloatOperator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinRnn: {
ExpectOpVersion(version, 1, &val_ctx);
Expect(node->inputs->size == 5,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expected 5 input", &val_ctx);
if (node->inputs->size >= 2) {
Expect(
context->tensors[node->inputs->data[ 1]].type ==
kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float32 weights.", &val_ctx);
}
} break;
case kTfLiteBuiltinSpaceToDepth: {
ExpectMaxOpVersion(version, 2, &val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
} break;
case kTfLiteBuiltinSvdf: {
ExpectOpVersion(version, 1, &val_ctx);
Expect(node->inputs->size == 5,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Expected input of rank 5", &val_ctx);
if (node->inputs->size >= 2) {
Expect(
context->tensors[node->inputs->data[ 1]].type ==
kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float32 weights.", &val_ctx);
}
Expect(android_sdk_version >= kMinSdkVersionForNNAPI11,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"SVDF does not support rank > 1 on NNAPI 1.0.", &val_ctx);
Expect(context->tensors[node->inputs->data[ 1]]
.type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Weights should be Float32", &val_ctx);
} break;
case kTfLiteBuiltinLstm: {
ExpectMaxOpVersion(version, 3, &val_ctx);
Expect(
android_sdk_version >= kMinSdkVersionForNNAPI11,
NNAPIValidationFailureType::kUnsupportedAndroidVersion,
"NNAPI 1.0 has a bug for optional tensors which would affect LSTM.",
&val_ctx);
Expect(android_sdk_version >= kMinSdkVersionForNNAPI12 ||
!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid operators not supported before NNAPI 1.2.", &val_ctx);
const auto weight_input_index =
isLstmBasicKernel(node) ? 2
: 4 ;
const TfLiteType weight_type =
context->tensors[node->inputs->data[weight_input_index]].type;
if (isLstmBasicKernel(node)) {
Expect(weight_type == kTfLiteUInt8,
NNAPIValidationFailureType::kUnsupportedInputType,
"Basic LSTM Kernels support only UINT8 weights", &val_ctx);
const auto input_quantization_params =
context->tensors[node->inputs->data[0]].params;
Expect(input_quantization_params.scale == 1. / 128. &&
input_quantization_params.zero_point == 128,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Invalid input quantization", &val_ctx);
const auto output_quantization_params =
context->tensors[node->outputs->data[0]].params;
Expect(output_quantization_params.scale == 1. / 128. &&
output_quantization_params.zero_point == 128,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Invalid output quantization", &val_ctx);
const auto cell_state_quantization_params =
context->tensors[node->outputs->data[1]].params;
Expect(cell_state_quantization_params.scale == 16. / 32768. ||
cell_state_quantization_params.zero_point == 0,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Invalid cell state quantization", &val_ctx);
auto is_const_tensor = [&node, &context](int tensor_idx) {
return context->tensors[node->inputs->data[tensor_idx]]
.allocation_type == kTfLiteMmapRo;
};
Expect(is_const_tensor(2 ),
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"Weights tensor should be constant", &val_ctx);
Expect(is_const_tensor(3 ),
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"Biases tensor should be constant", &val_ctx);
return val_ctx.is_valid;
} else {
if (node->inputs->size == 24) {
ExpectMinAndroidSdkVersion(android_sdk_version,
kMinSdkVersionForNNAPI12, &val_ctx);
}
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
Expect(weight_type == kTfLiteFloat32 || weight_type == kTfLiteUInt8 ||
weight_type == kTfLiteInt8,
NNAPIValidationFailureType::kUnsupportedInputType,
"Weight has to be Float32 or UINT8 or INT8", &val_ctx);
} else {
Expect(weight_type == kTfLiteFloat32 || weight_type == kTfLiteUInt8,
NNAPIValidationFailureType::kUnsupportedInputType,
"Weight has to be Float32 or UINT8", &val_ctx);
}
}
} break;
case kTfLiteBuiltinMean: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
if (android_sdk_version >= kMinSdkVersionForNNAPI12) {
Expect(context->tensors[node->inputs->data[0]].type == kTfLiteFloat32 ||
IsQuantized(context->tensors[node->inputs->data[0]].type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Expected Float32 or Quantized input", &val_ctx);
} else {
Expect(context->tensors[node->inputs->data[0]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Expected Float32 input", &val_ctx);
}
Expect(context->tensors[node->outputs->data[0]].dims->size > 0,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI does not support generating a scalar as output for MEAN.",
&val_ctx);
Expect(context->tensors[node->inputs->data[0]].dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support mean of a tensor with rank > 4", &val_ctx);
} break;
case kTfLiteBuiltinEmbeddingLookup: {
ExpectOpVersion(version, 1, &val_ctx);
Expect(context->tensors[node->inputs->data[1]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float32 values.", &val_ctx);
} break;
case kTfLiteBuiltinHashtableLookup: {
ExpectOpVersion(version, 1, &val_ctx);
Expect(context->tensors[node->outputs->data[0]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI only support float32 output.", &val_ctx);
} break;
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMinimum: {
ExpectMaxOpVersion(version, 3, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteInt32);
const TfLiteTensor& operand0 = context->tensors[node->inputs->data[0]];
if (operand0.dims->size == 0) {
Expect(operand0.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"Scalar operand should be constant", &val_ctx);
}
const TfLiteTensor& operand1 = context->tensors[node->inputs->data[1]];
if (operand1.dims->size == 0) {
Expect(operand1.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"Scalar operand should be constant", &val_ctx);
}
} break;
case kTfLiteBuiltinCast: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
const TfLiteType output_type =
context->tensors[node->outputs->data[0]].type;
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8, kTfLiteInt8);
ExpectTypeIn(
output_type,
{kTfLiteFloat32, kTfLiteInt32, kTfLiteUInt8, kTfLiteInt8},
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output type should be one of kTfLiteFloat32, kTfLiteInt32, "
"kTfLiteUInt8, kTfLiteInt8.",
&val_ctx);
} else {
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8);
ExpectTypeIn(
output_type, {kTfLiteFloat32, kTfLiteInt32, kTfLiteUInt8},
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output type should be one of kTfLiteFloat32, kTfLiteInt32, "
"kTfLiteUInt8.",
&val_ctx);
}
} break;
case kTfLiteBuiltinLeakyRelu:
case kTfLiteBuiltinPrelu: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
} break;
case kTfLiteBuiltinTile: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt8,
kTfLiteUInt8, kTfLiteInt32);
const auto multipliers_type =
context->tensors[node->inputs->data[1]].type;
Expect(multipliers_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Multipliers should be Int32", &val_ctx);
} break;
case kTfLiteBuiltinLogicalOr:
case kTfLiteBuiltinLogicalAnd:
case kTfLiteBuiltinLogicalNot: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
Expect(input_type == kTfLiteBool,
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be bool", &val_ctx);
} break;
case kTfLiteBuiltinLess:
case kTfLiteBuiltinLessEqual:
case kTfLiteBuiltinGreater:
case kTfLiteBuiltinGreaterEqual:
case kTfLiteBuiltinEqual:
case kTfLiteBuiltinNotEqual: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteBool, kTfLiteInt32);
} break;
case kTfLiteBuiltinNeg: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32);
} break;
case kTfLiteBuiltinTopkV2: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto& input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8, kTfLiteInt8);
const auto& k_param = context->tensors[node->inputs->data[1]];
Expect(k_param.type == kTfLiteInt32 &&
k_param.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"K param should be a constant of type Int32", &val_ctx);
} break;
case kTfLiteBuiltinSelect: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto value_type = context->tensors[node->inputs->data[1]].type;
EXPECT_INPUT_TYPE_IN(value_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8, kTfLiteInt8);
TfLiteIntArray* condition_shape =
context->tensors[node->inputs->data[0]].dims;
TfLiteIntArray* input_shape =
context->tensors[node->inputs->data[1]].dims;
Expect(TfLiteIntArrayEqual(condition_shape, input_shape),
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Condition and inputs tensors should have the same shape",
&val_ctx);
} break;
case kTfLiteBuiltinGather: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
const auto& positions = context->tensors[node->inputs->data[1]];
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteFloat16,
kTfLiteInt32, kTfLiteUInt8, kTfLiteInt8);
Expect(positions.type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Positions type should be one of kTfLiteInt32", &val_ctx);
Expect(positions.dims->size != 0,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"0-dimension args are not supported by NNAPI.", &val_ctx);
} break;
case kTfLiteBuiltinBidirectionalSequenceLstm: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid version of this op is not supported by NN API.", &val_ctx);
} break;
case kTfLiteBuiltinExpandDims: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteFloat16,
kTfLiteInt32, kTfLiteUInt8, kTfLiteInt8);
const auto axis = context->tensors[node->inputs->data[1]];
Expect(axis.type == kTfLiteInt32 && axis.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports constant int32 axis tensor.", &val_ctx);
} break;
case kTfLiteBuiltinSplit: {
ExpectOpVersion(version, 3, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const TfLiteTensor& input = context->tensors[node->inputs->data[1]];
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteInt32);
} else {
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt32);
}
const TfLiteTensor& axis = context->tensors[node->inputs->data[0]];
Expect(axis.type == kTfLiteInt32 && axis.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports constant int32 axis tensor.", &val_ctx);
} break;
case kTfLiteBuiltinSplitV: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const TfLiteTensor& input = context->tensors[node->inputs->data[0]];
const TfLiteTensor& size_splits = context->tensors[node->inputs->data[1]];
const TfLiteTensor& axis = context->tensors[node->inputs->data[2]];
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteInt32);
bool size_splits_is_int32_const_vector =
size_splits.type == kTfLiteInt32 && size_splits.dims->size == 1 &&
size_splits.allocation_type == kTfLiteMmapRo;
bool axis_is_int32_const =
axis.type == kTfLiteInt32 && axis.allocation_type == kTfLiteMmapRo;
Expect(size_splits_is_int32_const_vector,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports constant int32 size_splits vector.",
&val_ctx);
Expect(axis_is_int32_const,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports constant int32 axis tensor.", &val_ctx);
if (size_splits_is_int32_const_vector && axis_is_int32_const) {
Expect(std::all_of(size_splits.data.i32,
size_splits.data.i32 + size_splits.dims->data[0],
[](auto size) { return size != 0; }),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports non-zero split sizes.", &val_ctx);
Expect(ComputeSplitVUnknownSplitSize(context, node) != 0,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports non-zero split sizes.", &val_ctx);
}
} break;
case kTfLiteBuiltinLogSoftmax: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
Expect(input_type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float32.", &val_ctx);
} break;
case kTfLiteBuiltinQuantize: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto value_type = context->tensors[node->inputs->data[0]].type;
Expect(value_type == kTfLiteFloat32 || IsQuantized(value_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Value should be quantized or Float32.", &val_ctx);
if (IsQuantized(value_type)) {
const auto quantization_params =
context->tensors[node->inputs->data[0]].params;
Expect(quantization_params.scale > 0.f,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Quantization scale should be > 0.", &val_ctx);
}
const auto output_type = context->tensors[node->outputs->data[0]].type;
if (android_sdk_version < kMinSdkVersionForNNAPI13) {
Expect(output_type == kTfLiteUInt8,
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output should be kTfLiteUInt8.", &val_ctx);
} else {
ExpectTypeIn(output_type, {kTfLiteUInt8, kTfLiteInt8},
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output should be kTfLiteUInt8.", &val_ctx);
}
const auto quantization_params =
context->tensors[node->outputs->data[0]].params;
Expect(quantization_params.scale > 0.f,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Quantization scale should be > 0.", &val_ctx);
} break;
case kTfLiteBuiltinReduceAny: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(context->tensors[node->outputs->data[0]].dims->size != 0,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI does not support generating a scalar as output.", &val_ctx);
} break;
case kTfLiteBuiltinReduceMin:
case kTfLiteBuiltinReduceMax: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_tensor = context->tensors[node->inputs->data[0]];
const auto input_type = input_tensor.type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
Expect(input_tensor.dims->size != 0,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI does not support generating a scalar as output.", &val_ctx);
} break;
case kTfLiteBuiltinDepthToSpace: {
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
} break;
case kTfLiteBuiltinReduceProd:
case kTfLiteBuiltinSum: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(context->tensors[node->outputs->data[0]].dims->size != 0,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI does not support generating a scalar as output", &val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
Expect(input_type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports floating point input.", &val_ctx);
} break;
case kTfLiteBuiltinElu: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
Expect(input_type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports floating point input.", &val_ctx);
} break;
case kTfLiteBuiltinFill: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const auto& dims_tensor = context->tensors[node->inputs->data[0]];
Expect(IsConstantTensor(&dims_tensor),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI doesn't support dynamic dimensions tensor.", &val_ctx);
EXPECT_INPUT_TYPE_IN(dims_tensor.type, kTfLiteInt32, kTfLiteInt64);
if (IsConstantTensor(&dims_tensor)) {
Expect(dims_tensor.dims->data[0] != 0,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI doesn't support generating scalars from FILL", &val_ctx);
if (dims_tensor.type == kTfLiteInt64) {
bool fit_in_int32 =
std::all_of(dims_tensor.data.i64,
dims_tensor.data.i64 + dims_tensor.dims->data[0],
[](int64_t dim) {
return std::numeric_limits<int32_t>::min() <= dim &&
dim <= std::numeric_limits<int32_t>::max();
});
Expect(fit_in_int32,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI only supports int32 dimensions tensor. If the "
"dimensions type is int64 and they are constant we can "
"convert them to int32 if the value isn't too large.",
&val_ctx);
}
}
const auto& value_tensor = context->tensors[node->inputs->data[1]];
EXPECT_INPUT_TYPE_IN(value_tensor.type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteInt64);
if (value_tensor.type == kTfLiteInt64 &&
IsConstantTensor(&value_tensor)) {
Expect(
*value_tensor.data.i64 <= std::numeric_limits<int32_t>::max() &&
*value_tensor.data.i64 >= std::numeric_limits<int32_t>::min(),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports int32 input. If the input type is int64 and "
"constant we can convert it to int32 if the value isn't too "
"large.",
&val_ctx);
}
} break;
case kTfLiteBuiltinPack: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
if (android_sdk_version >= kNNAPIRuntimeFeatureLevel6) {
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteInt32, kTfLiteFloat32,
kTfLiteInt8, kTfLiteUInt8);
} else {
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt8);
auto builtin = reinterpret_cast<TfLitePackParams*>(node->builtin_data);
Expect(builtin->axis != -1 &&
builtin->axis !=
context->tensors[node->inputs->data[0]].dims->size,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support axis being the last dimension",
&val_ctx);
}
} break;
case kTfLiteBuiltinUnpack: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
Expect(context->tensors[node->inputs->data[0]].dims->size > 1,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support unpacking a rank-1 tensor", &val_ctx);
Expect(context->tensors[node->inputs->data[0]].dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support unpacking a tensor with rank > 4",
&val_ctx);
const auto* builtin =
reinterpret_cast<const TfLiteUnpackParams*>(node->builtin_data);
Expect(builtin->axis != -1 &&
builtin->axis !=
context->tensors[node->inputs->data[0]].dims->size - 1,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support axis being the last dimension", &val_ctx);
} break;
case kTfLiteBuiltinSquaredDifference: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
const auto input0_type = context->tensors[node->inputs->data[0]].type;
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
EXPECT_INPUT_TYPE_IN(input0_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteInt32);
} else if (android_sdk_version >= kMinSdkVersionForNNAPI12) {
EXPECT_INPUT_TYPE_IN(input0_type, kTfLiteFloat32, kTfLiteUInt8);
} else {
EXPECT_INPUT_TYPE_IN(input0_type, kTfLiteFloat32);
}
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"NNAPI does not support input rank greater than 4", &val_ctx);
} break;
case kTfLiteBuiltinBatchMatmul: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version,
kNNAPIRuntimeFeatureLevel6, &val_ctx);
const auto& input0 = context->tensors[node->inputs->data[0]];
const auto& input1 = context->tensors[node->inputs->data[1]];
EXPECT_INPUT_TYPE_IN(input0.type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteInt8);
Expect(input0.type == input1.type,
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"NNAPI does not support hybrid batch matmul", &val_ctx);
Expect(input0.dims->size <= 4 && input0.dims->size >= 2,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"NNAPI does not support input rank greater than 4 or less than 2",
&val_ctx);
Expect(!IsBroadcastBatchMatMul(context, node),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI does not support broadcast batch matmul", &val_ctx);
} break;
case kTfLiteBuiltinMirrorPad: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version,
kNNAPIRuntimeFeatureLevel7, &val_ctx);
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
Expect(reinterpret_cast<TfLiteMirrorPaddingParams*>(node->builtin_data)
->mode != kTfLiteMirrorPaddingUnknown,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Unknown padding mode", &val_ctx);
const TfLiteIntArrayView input_shape(
context->tensors[node->inputs->data[0]].dims);
Expect(!HasZeroes(input_shape),
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NN API pad ops do not support input tensors with no elements",
&val_ctx);
Expect(node->inputs->size == 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expecting 2 inputs", &val_ctx);
} break;
case kTfLiteBuiltinReverseV2: {
ExpectMaxOpVersion(version, 3, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version,
kNNAPIRuntimeFeatureLevel7, &val_ctx);
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
Expect(node->inputs->size == 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expecting 2 inputs", &val_ctx);
} break;
default:
AddValidationFailure(NNAPIValidationFailureType::kUnsupportedOperator,
"Unsupported operation type.", &val_ctx);
}
return val_ctx.is_valid;
}
TfLiteStatus NNAPIDelegateKernel::Map(
TfLiteContext* context, int builtin_code, int version,
int android_sdk_version, const NNAPIOpMappingArgs& mapping_args,
ANeuralNetworksOperationType* nn_op_type,
NnapiDelegateVendorPlugin* vendor_plugin) {
auto add_zero_bias = [mapping_args](int input_id, int filter_id,
int num_elements) -> void {
int bias_index = -1;
mapping_args.context->AddTensors(mapping_args.context, 1, &bias_index);
TfLiteTensor* bias_tensor = &mapping_args.context->tensors[bias_index];
const auto input_type = mapping_args.context->tensors[input_id].type;
if (input_type == kTfLiteFloat32) {
bias_tensor->type = kTfLiteFloat32;
} else {
bias_tensor->type = kTfLiteInt32;
}
TfLiteIntArray* bias_shape = TfLiteIntArrayCreate(1);
bias_shape->data[0] = num_elements;
bias_tensor->allocation_type = kTfLiteDynamic;
mapping_args.context->ResizeTensor(mapping_args.context, bias_tensor,
bias_shape);
if (input_type == kTfLiteFloat32) {
memset(bias_tensor->data.f, 0, num_elements * sizeof(float));
mapping_args.builder->AddVectorFloat32Operand(bias_tensor->data.f,
num_elements);
} else {
memset(bias_tensor->data.i32, 0, num_elements * sizeof(int));
const TfLiteTensor& input_tensor =
mapping_args.context->tensors[input_id];
const TfLiteTensor& filter_tensor =
mapping_args.context->tensors[filter_id];
bias_tensor->params.scale =
input_tensor.params.scale * filter_tensor.params.scale;
mapping_args.builder->AddVectorInt32Operand(
bias_tensor->data.i32, num_elements, bias_tensor->params.scale,
0);
}
};
switch (builtin_code) {
case kTfLiteBuiltinAdd: {
auto builtin =
reinterpret_cast<TfLiteAddParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_ADD;
} break;
case kTfLiteBuiltinArgMax: {
*nn_op_type = ANEURALNETWORKS_ARGMAX;
} break;
case kTfLiteBuiltinArgMin: {
*nn_op_type = ANEURALNETWORKS_ARGMIN;
} break;
case kTfLiteBuiltinMul: {
auto builtin =
reinterpret_cast<TfLiteMulParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_MUL;
} break;
case kTfLiteBuiltinAveragePool2d: {
mapping_args.builder->AddPoolingParams(mapping_args.node->builtin_data);
*nn_op_type = ANEURALNETWORKS_AVERAGE_POOL_2D;
} break;
case kTfLiteBuiltinMaxPool2d: {
mapping_args.builder->AddPoolingParams(mapping_args.node->builtin_data);
*nn_op_type = ANEURALNETWORKS_MAX_POOL_2D;
} break;
case kTfLiteBuiltinL2Pool2d: {
mapping_args.builder->AddPoolingParams(mapping_args.node->builtin_data);
*nn_op_type = ANEURALNETWORKS_L2_POOL_2D;
} break;
case kTfLiteBuiltinConv2d: {
auto builtin =
reinterpret_cast<TfLiteConvParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->padding);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
const int input_id = mapping_args.node->inputs->data[ 0];
const int filter_id =
mapping_args.node->inputs->data[ 1];
const auto& input_tensor = context->tensors[input_id];
const auto& filter_tensor = context->tensors[filter_id];
auto is_grouped_conv = false;
if (input_tensor.dims->size != 0 && filter_tensor.dims->size != 0) {
is_grouped_conv =
input_tensor.dims->data[3] != filter_tensor.dims->data[3];
}
if (is_grouped_conv) {
mapping_args.builder->AddScalarInt32Operand(
input_tensor.dims->data[3] / filter_tensor.dims->data[3]);
}
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
if (builtin->dilation_width_factor != 1 ||
builtin->dilation_height_factor != 1) {
mapping_args.builder->AddScalarBoolOperand(false);
mapping_args.builder->AddScalarInt32Operand(
builtin->dilation_width_factor);
mapping_args.builder->AddScalarInt32Operand(
builtin->dilation_height_factor);
}
if (is_grouped_conv) {
*nn_op_type = ANEURALNETWORKS_GROUPED_CONV_2D;
} else {
*nn_op_type = ANEURALNETWORKS_CONV_2D;
}
} break;
case kTfLiteBuiltinDepthwiseConv2d: {
auto builtin = reinterpret_cast<TfLiteDepthwiseConvParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->padding);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
mapping_args.builder->AddScalarInt32Operand(builtin->depth_multiplier);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
if (builtin->dilation_width_factor != 1 ||
builtin->dilation_height_factor != 1) {
mapping_args.builder->AddScalarBoolOperand(false);
mapping_args.builder->AddScalarInt32Operand(
builtin->dilation_width_factor);
mapping_args.builder->AddScalarInt32Operand(
builtin->dilation_height_factor);
}
*nn_op_type = ANEURALNETWORKS_DEPTHWISE_CONV_2D;
} break;
case kTfLiteBuiltinFullyConnected: {
const bool is_bias_present =
mapping_args.node->inputs->size == 3 &&
mapping_args.node->inputs->data[2] != kTfLiteOptionalTensor;
if (!is_bias_present) {
const int input_tensor_id =
mapping_args.node->inputs->data[ 0];
const int filter_tensor_id =
mapping_args.node->inputs->data[ 1];
const int num_units =
mapping_args.context->tensors[filter_tensor_id].dims->data[0];
add_zero_bias(input_tensor_id, filter_tensor_id, num_units);
}
auto builtin = reinterpret_cast<TfLiteFullyConnectedParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_FULLY_CONNECTED;
} break;
case kTfLiteBuiltinHardSwish: {
*nn_op_type = ANEURALNETWORKS_HARD_SWISH;
} break;
case kTfLiteBuiltinSoftmax: {
auto builtin = reinterpret_cast<TfLiteSoftmaxParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
*nn_op_type = ANEURALNETWORKS_SOFTMAX;
} break;
case kTfLiteBuiltinReshape: {
if (mapping_args.node->inputs->size == 1) {
auto* params = reinterpret_cast<TfLiteReshapeParams*>(
mapping_args.node->builtin_data);
int num_dimensions = params->num_dimensions;
std::vector<int32_t> output_shape(num_dimensions);
for (int i = 0; i < num_dimensions; ++i) {
output_shape[i] = params->shape[i];
}
mapping_args.builder->AddVectorInt32Operand(
output_shape.data(), static_cast<uint32_t>(num_dimensions));
}
*nn_op_type = ANEURALNETWORKS_RESHAPE;
} break;
case kTfLiteBuiltinResizeBilinear: {
const int output_id = mapping_args.node->outputs->data[0];
auto& output = mapping_args.context->tensors[output_id];
const int output_height = output.dims->data[1];
const int output_width = output.dims->data[2];
mapping_args.builder->AddScalarInt32Operand(output_width);
mapping_args.builder->AddScalarInt32Operand(output_height);
auto builtin = reinterpret_cast<TfLiteResizeBilinearParams*>(
mapping_args.node->builtin_data);
if (builtin->align_corners == true ||
builtin->half_pixel_centers == true) {
mapping_args.builder->AddScalarBoolOperand(false);
mapping_args.builder->AddScalarBoolOperand(builtin->align_corners);
mapping_args.builder->AddScalarBoolOperand(builtin->half_pixel_centers);
}
*nn_op_type = ANEURALNETWORKS_RESIZE_BILINEAR;
} break;
case kTfLiteBuiltinResizeNearestNeighbor: {
const TfLiteTensor& new_shape =
mapping_args.context->tensors[mapping_args.node->inputs->data[1]];
mapping_args.builder->AddScalarInt32Operand(new_shape.data.i32[1]);
mapping_args.builder->AddScalarInt32Operand(new_shape.data.i32[0]);
mapping_args.builder->AddScalarBoolOperand(false);
auto builtin = reinterpret_cast<TfLiteResizeNearestNeighborParams*>(
mapping_args.node->builtin_data);
if (builtin->align_corners == true ||
builtin->half_pixel_centers == true) {
mapping_args.builder->AddScalarBoolOperand(builtin->align_corners);
mapping_args.builder->AddScalarBoolOperand(builtin->half_pixel_centers);
}
*nn_op_type = ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR;
} break;
case kTfLiteBuiltinSqueeze: {
auto builtin = reinterpret_cast<TfLiteSqueezeParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddVectorInt32Operand(
builtin->num_squeeze_dims ? builtin->squeeze_dims : nullptr,
static_cast<uint32_t>(builtin->num_squeeze_dims));
*nn_op_type = ANEURALNETWORKS_SQUEEZE;
} break;
case kTfLiteBuiltinUnidirectionalSequenceLstm: {
auto builtin = reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip);
mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip);
mapping_args.builder->AddScalarBoolOperand(builtin->time_major);
const bool hybrid_op = IsHybridOperator(
mapping_args.context, kTfLiteBuiltinUnidirectionalSequenceLstm,
mapping_args.node);
if (mapping_args.node->inputs->size == 24) {
for (int i = 20; i < 24; ++i) {
const int input_index = mapping_args.node->inputs->data[i];
if (input_index != kTfLiteOptionalTensor) {
mapping_args.builder->AddTensorInput(input_index, hybrid_op);
} else {
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
}
} else {
for (int i = 0; i < 4; ++i) {
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
}
*nn_op_type = ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM;
} break;
case kTfLiteBuiltinL2Normalization: {
*nn_op_type = ANEURALNETWORKS_L2_NORMALIZATION;
} break;
case kTfLiteBuiltinLocalResponseNormalization: {
auto builtin = reinterpret_cast<TfLiteLocalResponseNormParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->radius);
mapping_args.builder->AddScalarFloat32Operand(builtin->bias);
mapping_args.builder->AddScalarFloat32Operand(builtin->alpha);
mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
*nn_op_type = ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION;
} break;
case kTfLiteBuiltinLshProjection: {
auto builtin = reinterpret_cast<TfLiteLSHProjectionParams*>(
mapping_args.node->builtin_data);
int type = builtin->type;
const int kNNAPILshProjectionSparse = 3;
if (builtin->type == kTfLiteLshProjectionSparse) {
type = kNNAPILshProjectionSparse;
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
mapping_args.builder->AddScalarInt32Operand(type);
*nn_op_type = ANEURALNETWORKS_LSH_PROJECTION;
} break;
case kTfLiteBuiltinConcatenation: {
auto builtin = reinterpret_cast<TfLiteConcatenationParams*>(
mapping_args.node->builtin_data);
int axis = builtin->axis < 0
? mapping_args.context
->tensors[mapping_args.node->inputs->data[0]]
.dims->size +
builtin->axis
: builtin->axis;
mapping_args.builder->AddScalarInt32Operand(axis);
*nn_op_type = ANEURALNETWORKS_CONCATENATION;
} break;
case kTfLiteBuiltinDequantize: {
*nn_op_type = ANEURALNETWORKS_DEQUANTIZE;
} break;
case kTfLiteBuiltinFloor: {
*nn_op_type = ANEURALNETWORKS_FLOOR;
} break;
case kTfLiteBuiltinRelu: {
*nn_op_type = ANEURALNETWORKS_RELU;
} break;
case kTfLiteBuiltinReluN1To1: {
*nn_op_type = ANEURALNETWORKS_RELU1;
} break;
case kTfLiteBuiltinRelu6: {
*nn_op_type = ANEURALNETWORKS_RELU6;
} break;
case kTfLiteBuiltinLogistic: {
*nn_op_type = ANEURALNETWORKS_LOGISTIC;
} break;
case kTfLiteBuiltinTanh: {
*nn_op_type = ANEURALNETWORKS_TANH;
} break;
case kTfLiteBuiltinSub: {
auto builtin =
reinterpret_cast<TfLiteSubParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_SUB;
} break;
case kTfLiteBuiltinDiv: {
auto builtin =
reinterpret_cast<TfLiteDivParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_DIV;
} break;
case kTfLiteBuiltinPad:
case kTfLiteBuiltinPadv2: {
if (mapping_args.node->inputs->size == 2) {
*nn_op_type = ANEURALNETWORKS_PAD;
} else {
const int constant_value_id = mapping_args.node->inputs->data[2];
if (constant_value_id == kTfLiteOptionalTensor) {
*nn_op_type = ANEURALNETWORKS_PAD;
} else {
*nn_op_type = ANEURALNETWORKS_PAD_V2;
}
}
} break;
case kTfLiteBuiltinUnidirectionalSequenceRnn: {
auto builtin = reinterpret_cast<TfLiteSequenceRNNParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarInt32Operand(builtin->time_major);
*nn_op_type = ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN;
} break;
case kTfLiteBuiltinSpaceToBatchNd: {
*nn_op_type = ANEURALNETWORKS_SPACE_TO_BATCH_ND;
} break;
case kTfLiteBuiltinBatchToSpaceNd: {
*nn_op_type = ANEURALNETWORKS_BATCH_TO_SPACE_ND;
} break;
case kTfLiteBuiltinStridedSlice: {
auto builtin = reinterpret_cast<TfLiteStridedSliceParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->begin_mask);
mapping_args.builder->AddScalarInt32Operand(builtin->end_mask);
mapping_args.builder->AddScalarInt32Operand(builtin->shrink_axis_mask);
*nn_op_type = ANEURALNETWORKS_STRIDED_SLICE;
} break;
case kTfLiteBuiltinTranspose: {
*nn_op_type = ANEURALNETWORKS_TRANSPOSE;
} break;
case kTfLiteBuiltinAbs: {
*nn_op_type = ANEURALNETWORKS_ABS;
} break;
case kTfLiteBuiltinExp: {
*nn_op_type = ANEURALNETWORKS_EXP;
} break;
case kTfLiteBuiltinLog: {
*nn_op_type = ANEURALNETWORKS_LOG;
} break;
case kTfLiteBuiltinRsqrt: {
*nn_op_type = ANEURALNETWORKS_RSQRT;
} break;
case kTfLiteBuiltinPow: {
*nn_op_type = ANEURALNETWORKS_POW;
} break;
case kTfLiteBuiltinSlice: {
*nn_op_type = ANEURALNETWORKS_SLICE;
} break;
case kTfLiteBuiltinCos: {
*nn_op_type = ANEURALNETWORKS_SIN;
} break;
case kTfLiteBuiltinSin: {
*nn_op_type = ANEURALNETWORKS_SIN;
} break;
case kTfLiteBuiltinTransposeConv: {
int input_tensor_flags = 0;
const int input_tensor_id =
mapping_args.node->inputs->data[ 2];
const int weight_tensor_id =
mapping_args.node->inputs->data[ 1];
const bool hybrid_op = false;
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
mapping_args.builder->AddTensorInput(
input_tensor_id, hybrid_op,
input_tensor_flags | NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED);
} else {
mapping_args.builder->AddTensorInput(
input_tensor_id, hybrid_op,
input_tensor_flags | NN_TENSOR_FLAG_INT8_CONVERSION);
}
mapping_args.builder->AddTensorInput(
weight_tensor_id, hybrid_op,
input_tensor_flags | NN_TENSOR_FLAG_FORCE_PER_CHANNEL);
const bool is_bias_present =
mapping_args.node->inputs->size == 4 &&
mapping_args.node->inputs->data[ 3] !=
kTfLiteOptionalTensor;
if (is_bias_present) {
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[ 3], hybrid_op);
} else {
const TfLiteTensor& output_shape =
mapping_args.context->tensors[mapping_args.node->inputs
->data[ 0]];
const int output_depth = output_shape.data.i32[3];
add_zero_bias(input_tensor_id, weight_tensor_id, output_depth);
}
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[ 0], hybrid_op);
auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->padding);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarBoolOperand(false);
*nn_op_type = ANEURALNETWORKS_TRANSPOSE_CONV;
} break;
case kTfLiteBuiltinSqrt: {
*nn_op_type = ANEURALNETWORKS_SQRT;
} break;
case kTfLiteBuiltinRnn: {
int ann_index;
mapping_args.builder->AddStateFloat32Tensor(
mapping_args.node->inputs->data[ 4],
&ann_index);
mapping_args.model_state_outputs->push_back(ann_index);
mapping_args.model_state_tfl_inputs->push_back(
mapping_args.node->inputs->data[ 4]);
auto builtin =
reinterpret_cast<TfLiteRNNParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_RNN;
} break;
case kTfLiteBuiltinSpaceToDepth: {
auto builtin = reinterpret_cast<TfLiteSpaceToDepthParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->block_size);
*nn_op_type = ANEURALNETWORKS_SPACE_TO_DEPTH;
} break;
case kTfLiteBuiltinSvdf: {
int ann_index;
mapping_args.builder->AddStateFloat32Tensor(
mapping_args.node->inputs->data[ 4],
&ann_index);
mapping_args.model_state_outputs->push_back(ann_index);
mapping_args.model_state_tfl_inputs->push_back(
mapping_args.node->inputs->data[ 4]);
auto builtin =
reinterpret_cast<TfLiteSVDFParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->rank);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_SVDF;
} break;
case kTfLiteBuiltinLstm: {
if (isLstmBasicKernel(mapping_args.node)) {
const auto output_dims =
mapping_args.context->tensors[mapping_args.node->outputs->data[1]]
.dims;
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[0 ],
false,
false);
const auto weight_tensor =
mapping_args.context->tensors[mapping_args.node->inputs
->data[2 ]];
std::vector<uint8_t> recurrent_to_input;
std::vector<uint8_t> input_to_input;
std::vector<uint8_t> recurrent_to_cell;
std::vector<uint8_t> input_to_cell;
std::vector<uint8_t> recurrent_to_forget;
std::vector<uint8_t> input_to_forget;
std::vector<uint8_t> recurrent_to_output;
std::vector<uint8_t> input_to_output;
tflite::delegate::nnapi::DecomposeQuantLstmWeightsTensor(
weight_tensor.data.uint8, weight_tensor.dims, &recurrent_to_input,
&input_to_input, &recurrent_to_cell, &input_to_cell,
&recurrent_to_forget, &input_to_forget, &recurrent_to_output,
&input_to_output);
TfLiteIntArray* recurrent_weight_dims = TfLiteIntArrayCreate(2);
TfLiteIntArray* input_weight_dims = TfLiteIntArrayCreate(2);
tflite::delegate::nnapi::SetWeightSubmatrixDims(
weight_tensor.dims, recurrent_weight_dims, input_weight_dims);
int new_tensor_index = -1;
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
input_weight_dims, input_to_input, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
input_weight_dims, input_to_forget, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
input_weight_dims, input_to_cell, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
input_weight_dims, input_to_output, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
recurrent_weight_dims, recurrent_to_input, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
recurrent_weight_dims, recurrent_to_forget, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
recurrent_weight_dims, recurrent_to_cell, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
recurrent_weight_dims, recurrent_to_output, weight_tensor.params,
&new_tensor_index);
TfLiteIntArrayFree(input_weight_dims);
TfLiteIntArrayFree(recurrent_weight_dims);
const auto bias_size = output_dims->data[1];
const TfLiteTensor& biases_tensor =
mapping_args.context->tensors[mapping_args.node->inputs
->data[3 ]];
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
delegate::nnapi::DecomposeBiasTensor(biases_tensor.data.i32, bias_size,
&input_bias, &cell_bias,
&forget_bias, &output_bias);
int input_bias_tensor = -1;
mapping_args.builder->AddNewInputConstantTensor<int32_t>(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size}, input_bias,
biases_tensor.params, &input_bias_tensor);
int forget_bias_tensor = -1;
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size},
forget_bias, biases_tensor.params, &forget_bias_tensor);
int cell_gate_bias_tensor = -1;
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size}, cell_bias,
biases_tensor.params, &cell_gate_bias_tensor);
int output_gate_bias_tensor = -1;
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size},
output_bias, biases_tensor.params, &output_gate_bias_tensor);
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[4 ],
false,
false);
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[1 ],
false,
false);
mapping_args.feedback_loops->push_back(std::make_tuple(
mapping_args.node->outputs->data[0 ],
mapping_args.node->inputs->data[1 ]));
mapping_args.feedback_loops->push_back(std::make_tuple(
mapping_args.node->outputs->data[1 ],
mapping_args.node->inputs->data[4 ]));
mapping_args.builder->AddTensorOutput(
mapping_args.node->outputs->data[1 ], 0);
mapping_args.builder->AddTensorOutput(
mapping_args.node->outputs->data[0 ], 0);
*nn_op_type = ANEURALNETWORKS_QUANTIZED_16BIT_LSTM;
} else {
auto builtin = reinterpret_cast<TfLiteLSTMParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip);
mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip);
mapping_args.builder->AddAdditionalFloat32OutputTensor(2);
int ann_index;
mapping_args.builder->AddStateFloat32Tensor(
mapping_args.node->inputs->data[ 18],
&ann_index);
mapping_args.model_state_outputs->push_back(ann_index);
mapping_args.model_state_tfl_inputs->push_back(
mapping_args.node->inputs
->data[ 18]);
mapping_args.builder->AddStateFloat32Tensor(
mapping_args.node->inputs->data[ 19],
&ann_index);
mapping_args.model_state_outputs->push_back(ann_index);
mapping_args.model_state_tfl_inputs->push_back(
mapping_args.node->inputs->data[ 19]);
const bool hybrid_op = IsHybridOperator(
mapping_args.context, kTfLiteBuiltinLstm, mapping_args.node);
if (mapping_args.node->inputs->size == 24) {
for (int i = 20; i < 24; ++i) {
const auto input_index = mapping_args.node->inputs->data[i];
if (input_index != kTfLiteOptionalTensor) {
mapping_args.builder->AddTensorInput(input_index, hybrid_op);
} else {
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
}
}
*nn_op_type = ANEURALNETWORKS_LSTM;
}
} break;
case kTfLiteBuiltinMean: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
int32_t keep_dims = 0;
if (builtin->keep_dims) keep_dims = 1;
mapping_args.builder->AddScalarInt32Operand(keep_dims);
*nn_op_type = ANEURALNETWORKS_MEAN;
} break;
case kTfLiteBuiltinEmbeddingLookup: {
*nn_op_type = ANEURALNETWORKS_EMBEDDING_LOOKUP;
} break;
case kTfLiteBuiltinHashtableLookup: {
*nn_op_type = ANEURALNETWORKS_HASHTABLE_LOOKUP;
} break;
case kTfLiteBuiltinMaximum: {
*nn_op_type = ANEURALNETWORKS_MAXIMUM;
} break;
case kTfLiteBuiltinMinimum: {
*nn_op_type = ANEURALNETWORKS_MINIMUM;
} break;
case kTfLiteBuiltinCast: {
*nn_op_type = ANEURALNETWORKS_CAST;
} break;
case kTfLiteBuiltinLeakyRelu: {
const auto input_type =
mapping_args.context->tensors[mapping_args.node->inputs->data[0]]
.type;
auto builtin = reinterpret_cast<TfLiteLeakyReluParams*>(
mapping_args.node->builtin_data);
TfLiteTensor alpha_tensor;
alpha_tensor.type = input_type;
alpha_tensor.allocation_type = kTfLiteDynamic;
alpha_tensor.dims = TfLiteIntArrayCreate(1);
alpha_tensor.dims->data[0] = 1;
alpha_tensor.params.zero_point = 0;
int new_tensor_index = -1;
if (input_type == kTfLiteFloat32) {
alpha_tensor.params.scale = 0;
std::vector<float> alpha_value = {builtin->alpha};
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_FLOAT32, kTfLiteFloat32, alpha_tensor.dims,
alpha_value, alpha_tensor.params, &new_tensor_index);
} else if (input_type == kTfLiteInt8 &&
android_sdk_version >= kMinSdkVersionForNNAPI13) {
alpha_tensor.params.scale = builtin->alpha;
std::vector<int8_t> alpha_value = {1};
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, kTfLiteInt8,
alpha_tensor.dims, alpha_value, alpha_tensor.params,
&new_tensor_index);
} else {
alpha_tensor.params.scale = builtin->alpha;
std::vector<uint8_t> alpha_value = {1};
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
alpha_tensor.dims, alpha_value, alpha_tensor.params,
&new_tensor_index);
}
*nn_op_type = ANEURALNETWORKS_PRELU;
} break;
case kTfLiteBuiltinPrelu: {
*nn_op_type = ANEURALNETWORKS_PRELU;
} break;
case kTfLiteBuiltinTile: {
*nn_op_type = ANEURALNETWORKS_TILE;
} break;
case kTfLiteBuiltinLogicalOr: {
*nn_op_type = ANEURALNETWORKS_LOGICAL_OR;
} break;
case kTfLiteBuiltinLogicalAnd: {
*nn_op_type = ANEURALNETWORKS_LOGICAL_AND;
} break;
case kTfLiteBuiltinLogicalNot: {
*nn_op_type = ANEURALNETWORKS_LOGICAL_NOT;
} break;
case kTfLiteBuiltinLess: {
*nn_op_type = ANEURALNETWORKS_LESS;
} break;
case kTfLiteBuiltinLessEqual: {
*nn_op_type = ANEURALNETWORKS_LESS_EQUAL;
} break;
case kTfLiteBuiltinGreater: {
*nn_op_type = ANEURALNETWORKS_GREATER;
} break;
case kTfLiteBuiltinGreaterEqual: {
*nn_op_type = ANEURALNETWORKS_GREATER_EQUAL;
} break;
case kTfLiteBuiltinEqual: {
*nn_op_type = ANEURALNETWORKS_EQUAL;
} break;
case kTfLiteBuiltinNotEqual: {
*nn_op_type = ANEURALNETWORKS_NOT_EQUAL;
} break;
case kTfLiteBuiltinNeg: {
*nn_op_type = ANEURALNETWORKS_NEG;
} break;
case kTfLiteBuiltinTopkV2: {
const TfLiteTensor& k_param =
mapping_args.context->tensors[mapping_args.node->inputs->data[1]];
mapping_args.builder->AddScalarInt32Operand(*k_param.data.i32);
*nn_op_type = ANEURALNETWORKS_TOPK_V2;
} break;
case kTfLiteBuiltinSelect: {
*nn_op_type = ANEURALNETWORKS_SELECT;
} break;
case kTfLiteBuiltinGather: {
auto builtin = reinterpret_cast<TfLiteGatherParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->axis);
mapping_args.builder->AddTensorInput(mapping_args.node->inputs->data[1],
false,
0);
*nn_op_type = ANEURALNETWORKS_GATHER;
} break;
case kTfLiteBuiltinBidirectionalSequenceLstm: {
auto builtin = reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip);
mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip);
mapping_args.builder->AddScalarBoolOperand(builtin->merge_outputs);
mapping_args.builder->AddScalarBoolOperand(builtin->time_major);
for (int i = 0; i < 8; ++i) {
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
*nn_op_type = ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM;
} break;
case kTfLiteBuiltinExpandDims: {
const TfLiteTensor& axis_param =
mapping_args.context->tensors[mapping_args.node->inputs->data[1]];
mapping_args.builder->AddScalarInt32Operand(*axis_param.data.i32);
*nn_op_type = ANEURALNETWORKS_EXPAND_DIMS;
} break;
case kTfLiteBuiltinSplit: {
const TfLiteTensor& axis =
mapping_args.context->tensors[mapping_args.node->inputs->data[0]];
auto builtin =
reinterpret_cast<TfLiteSplitParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(*axis.data.i32);
mapping_args.builder->AddScalarInt32Operand(builtin->num_splits);
*nn_op_type = ANEURALNETWORKS_SPLIT;
} break;
case kTfLiteBuiltinLogSoftmax: {
mapping_args.builder->AddScalarFloat32Operand(1);
mapping_args.builder->AddScalarInt32Operand(-1);
*nn_op_type = ANEURALNETWORKS_LOG_SOFTMAX;
} break;
case kTfLiteBuiltinQuantize: {
auto input_index = mapping_args.node->inputs->data[0];
if (IsQuantized(mapping_args.context->tensors[input_index].type)) {
mapping_args.builder->AddDequantize(0, input_index, kTfLiteFloat32,
mapping_args.node_index);
}
*nn_op_type = ANEURALNETWORKS_QUANTIZE;
} break;
case kTfLiteBuiltinReduceAny: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_ANY;
} break;
case kTfLiteBuiltinReduceMin: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_MIN;
} break;
case kTfLiteBuiltinReduceMax: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_MAX;
} break;
case kTfLiteBuiltinDepthToSpace: {
auto builtin = reinterpret_cast<TfLiteDepthToSpaceParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->block_size);
*nn_op_type = ANEURALNETWORKS_DEPTH_TO_SPACE;
} break;
case kTfLiteBuiltinReduceProd: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_PROD;
} break;
case kTfLiteBuiltinSum: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_SUM;
} break;
case kTfLiteBuiltinElu: {
mapping_args.builder->AddScalarFloat32Operand(1.0);
*nn_op_type = ANEURALNETWORKS_ELU;
} break;
case kTfLiteBuiltinFill: {
*nn_op_type = ANEURALNETWORKS_FILL;
} break;
case kTfLiteBuiltinBatchMatmul: {
auto builtin = reinterpret_cast<TfLiteBatchMatMulParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->adj_x);
mapping_args.builder->AddScalarBoolOperand(builtin->adj_y);
*nn_op_type = ANEURALNETWORKS_BATCH_MATMUL;
} break;
case kTfLiteBuiltinPack: {
*nn_op_type = ANEURALNETWORKS_PACK;
} break;
case kTfLiteBuiltinMirrorPad: {
constexpr int kNnapiModeReflect = 0;
constexpr int kNnapiModeSymmetric = 1;
auto builtin = reinterpret_cast<TfLiteMirrorPaddingParams*>(
mapping_args.node->builtin_data);
int32_t nn_mirror_mode = -1;
if (builtin->mode == kTfLiteMirrorPaddingReflect) {
nn_mirror_mode = kNnapiModeReflect;
} else if (builtin->mode == kTfLiteMirrorPaddingSymmetric) {
nn_mirror_mode = kNnapiModeSymmetric;
}
mapping_args.builder->AddScalarInt32Operand(nn_mirror_mode);
*nn_op_type = ANEURALNETWORKS_MIRROR_PAD;
} break;
case kTfLiteBuiltinReverseV2: {
*nn_op_type = ANEURALNETWORKS_REVERSE;
} break;
default:
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::Init(TfLiteContext* context,
const TfLiteDelegateParams* params,
int* nnapi_errno) {
for (auto node_index : TfLiteIntArrayView(params->nodes_to_replace)) {
nodes_.push_back(node_index);
}
densify_output_to_node_mapping_ = std::vector<int>(context->tensors_size, -1);
non_const_dequantize_output_to_node_mapping_ =
std::vector<int>(context->tensors_size, -1);
const auto delegate_options =
StatefulNnApiDelegate::GetOptions(params->delegate);
if (nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI12 &&
ShouldUseTargetDevices(delegate_options, nnapi_)) {
TF_LITE_ENSURE_STATUS(GetTargetDevices(context, params->delegate, nnapi_,
nnapi_errno, &nnapi_devices_));
if (nnapi_devices_.empty()) {
TF_LITE_KERNEL_LOG(
context, "NNAPI delegate requested but no accelerators available.");
return kTfLiteError;
}
if (!delegate_options.disable_debugging_diagnostics_callbacks) {
if (nnapi_->SL_ANeuralNetworksDiagnostic_registerCallbacks != nullptr) {
nnapi_->SL_ANeuralNetworksDiagnostic_registerCallbacks(
[](const void* nnapi,
const ANeuralNetworksDiagnosticCompilationInfo* info) {
return LogCompilationInfoOnce(static_cast<const NnApi*>(nnapi),
info);
},
[](const void* nnapi,
const ANeuralNetworksDiagnosticExecutionInfo* info) {
return LogExecutionInfoOnce(static_cast<const NnApi*>(nnapi),
info);
},
const_cast<NnApi*>(nnapi_));
TFLITE_LOG_PROD(TFLITE_LOG_INFO,
"Registered diagnostics callbacks in NNAPI SL driver"
"SL_ANeuralNetworksDiagnostic_registerCallbacks.");
} else {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"NNAPI SL driver did not implement "
"SL_ANeuralNetworksDiagnostic_registerCallbacks!");
}
}
}
if (nnapi_->android_sdk_version < kMinSdkVersionForNNAPI12 &&
delegate_options.allow_dynamic_dimensions &&
delegate_options.vendor_plugin != nullptr) {
TF_LITE_KERNEL_LOG(context,
"Models with dynamic dimensions and vendor plugin is "
"not supported before NNAPI 1.2 (API level 29).");
return kTfLiteError;
}
tensor_memory_map_ =
&StatefulNnApiDelegate::GetTensorMemoryMap(params->delegate);
tensor_max_size_hints_.resize(context->tensors_size, 0);
for (const auto it : delegate_options.tensor_max_size_hints) {
auto tensor_index = it.first;
if (tensor_index >= context->tensors_size || tensor_index < 0) continue;
if (!HasUnspecifiedDimension(&context->tensors[tensor_index])) continue;
auto max_size_hint = it.second;
tensor_max_size_hints_[tensor_index] = max_size_hint;
}
if (!nn_model_) {
ANeuralNetworksModel* model = nullptr;
RETURN_TFLITE_ERROR_IF_NN_ERROR(context,
nnapi_->ANeuralNetworksModel_create(&model),
"creating NNAPI model", nnapi_errno);
nn_model_.reset(model);
TF_LITE_ENSURE_STATUS(BuildGraph(context, delegate_options,
params->input_tensors,
params->output_tensors, nnapi_errno));
}
auto* cache = StatefulNnApiDelegate::GetCache(params->delegate);
if (cache) {
uint64_t token_parts[4];
auto partition_entry = cache->GetEntryForKernel(kNnapiId, context, params);
token_parts[0] = partition_entry.GetFingerprint();
token_parts[1] = partition_entry.GetFingerprint();
token_parts[2] = partition_entry.GetFingerprint();
token_parts[3] = partition_entry.GetFingerprint();
std::vector<uint8_t> nnapi_cache_token(33, 0);
uint8_t* p = reinterpret_cast<uint8_t*>(token_parts);
for (int i = 0; i < 4 * sizeof(uint64_t); i++) {
nnapi_cache_token[i] = p[i];
}
nn_compilation_cache_token_ = nnapi_cache_token;
}
nn_execution_cache_.SetMaxCacheSize(
delegate_options.max_execution_cache_size);
initialised_ = true;
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::Prepare(TfLiteContext* context,
TfLiteNode* node, int* nnapi_errno) {
if (!initialised_) {
return kTfLiteError;
}
const auto delegate_options =
StatefulNnApiDelegate::GetOptions(node->delegate);
if (nn_compilation_) {
return kTfLiteOk;
}
ANeuralNetworksCompilation* compilation = nullptr;
if (!nnapi_devices_.empty()) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksCompilation_createForDevices(
nn_model_.get(), nnapi_devices_.data(), nnapi_devices_.size(),
&compilation),
"creating NNAPI model for given devices", nnapi_errno);
} else {
if (nnapi_->ANeuralNetworksCompilation_create != nullptr) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(context,
nnapi_->ANeuralNetworksCompilation_create(
nn_model_.get(), &compilation),
"creating NNAPI compilation",
nnapi_errno);
} else {
TF_LITE_KERNEL_LOG(
context,
"Attempted to call ANeuralNetworksCompilation_create from NNAPI "
"delegate that is constructed from a support library");
return kTfLiteError;
}
}
auto preference = delegate_options.execution_preference;
if (preference !=
StatefulNnApiDelegate::Options::ExecutionPreference::kUndefined) {
const int preference_result =
nnapi_->ANeuralNetworksCompilation_setPreference(compilation,
preference);
if (preference_result != ANEURALNETWORKS_NO_ERROR) {
nnapi_->ANeuralNetworksCompilation_free(compilation);
compilation = nullptr;
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, preference_result,
"setting compilation preferences",
nnapi_errno);
}
if (!nn_compilation_cache_token_.empty()) {
const char* cache_dir = delegate_options.cache_dir;
const int set_caching_result =
nnapi_->ANeuralNetworksCompilation_setCaching(
compilation, cache_dir, nn_compilation_cache_token_.data());
if (set_caching_result != ANEURALNETWORKS_NO_ERROR) {
nnapi_->ANeuralNetworksCompilation_free(compilation);
compilation = nullptr;
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, set_caching_result,
"configuring NNAPI caching", nnapi_errno);
}
if (nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI13) {
if (delegate_options.max_compilation_timeout_duration_ns > 0) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksCompilation_setTimeout(
compilation,
delegate_options.max_compilation_timeout_duration_ns),
"setting compilation timeout", nnapi_errno);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksCompilation_setPriority(
compilation, delegate_options.execution_priority),
"setting compilation priority", nnapi_errno);
}
if (delegate_options.vendor_compilation_hints && vendor_plugin_) {
TF_LITE_ENSURE_STATUS(vendor_plugin_->ConfigureCompilationHints(
delegate_options.vendor_compilation_hints, compilation));
}
const int finish_result =
nnapi_->ANeuralNetworksCompilation_finish(compilation);
if (finish_result != ANEURALNETWORKS_NO_ERROR) {
nnapi_->ANeuralNetworksCompilation_free(compilation);
compilation = nullptr;
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, finish_result,
"completing NNAPI compilation", nnapi_errno);
nn_compilation_.reset(compilation);
bool should_use_burst_mode = delegate_options.use_burst_computation;
if (!nnapi_devices_.empty() &&
target_feature_level_ >= kNNAPIRuntimeFeatureLevel5 &&
target_feature_level_ <= kNNAPIRuntimeFeatureLevel7) {
should_use_burst_mode = true;
}
if (should_use_burst_mode &&
nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI12 &&
nnapi_->ANeuralNetworksBurst_create) {
ANeuralNetworksBurst* burst = nullptr;
const int create_burst_result =
nnapi_->ANeuralNetworksBurst_create(nn_compilation_.get(), &burst);
if (create_burst_result != ANEURALNETWORKS_NO_ERROR) {
nnapi_->ANeuralNetworksBurst_free(burst);
burst = nullptr;
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, create_burst_result,
"creating NNAPI burst", nnapi_errno);
nn_burst_.reset(burst);
}
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::GetOperationsSupportedByTargetNnApiDevices(
TfLiteContext* context, std::vector<int>* supported_nodes,
int* nnapi_errno) {
if (!nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices) {
return kTfLiteError;
}
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping_util_->context);
const int nnapi_model_size =
mapping_context->nnapi_to_tflite_op_mapping_.size();
std::unique_ptr<bool[]> nnapi_ops_support_flags(new bool[nnapi_model_size]);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices(
nn_model_.get(), nnapi_devices_.data(), nnapi_devices_.size(),
nnapi_ops_support_flags.get()),
"Checking supported operations for devices", nnapi_errno);
auto tflite_ops_support_status = std::map<int, bool>();
std::for_each(nodes_.begin(), nodes_.end(),
[&tflite_ops_support_status](int tflite_node_index) {
tflite_ops_support_status[tflite_node_index] = true;
});
for (int nnapi_op_index = 0; nnapi_op_index < nnapi_model_size;
nnapi_op_index++) {
const auto tflite_op_index =
mapping_context->nnapi_to_tflite_op_mapping_[nnapi_op_index];
tflite_ops_support_status[tflite_op_index] &=
nnapi_ops_support_flags[nnapi_op_index];
if (!tflite_ops_support_status[tflite_op_index]) {
if (std::count(non_const_dequantize_output_to_node_mapping_.begin(),
non_const_dequantize_output_to_node_mapping_.end(), -1) <
non_const_dequantize_output_to_node_mapping_.size() ||
std::count(densify_output_to_node_mapping_.begin(),
densify_output_to_node_mapping_.end(),
-1) < densify_output_to_node_mapping_.size()) {
return kTfLiteOk;
}
}
}
supported_nodes->clear();
std::for_each(nodes_.begin(), nodes_.end(),
[&supported_nodes, &tflite_ops_support_status](int node_index) {
if (tflite_ops_support_status[node_index]) {
supported_nodes->push_back(node_index);
}
});
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::Invoke(TfLiteContext* context,
TfLiteNode* node, int* nnapi_errno) {
const bool allow_padding =
nnapi_->nnapi_runtime_feature_level > kMinSdkVersionForNNAPI13 &&
nnapi_->ANeuralNetworksExecution_enableInputAndOutputPadding != nullptr;
const auto delegate_options =
StatefulNnApiDelegate::GetOptions(node->delegate);
bool execution_is_reusable =
nnapi_->nnapi_runtime_feature_level > kMinSdkVersionForNNAPI13 &&
delegate_options.max_execution_cache_size > 0;
bool can_infer_output_shape = !delegate_options.allow_dynamic_dimensions ||
delegate_options.vendor_plugin == nullptr;
ANeuralNetworksExecution* execution = nullptr;
NNAPIExecutionCache::Signature signature;
if (execution_is_reusable) {
signature = CreateExecutionCacheSignature(context, node, delegate_options,
*tensor_memory_map_);
execution = nn_execution_cache_.Get(signature);
}
bool should_create_new_execution = execution == nullptr;
UniqueExecution unique_execution(nullptr, NNFreeExecution(nnapi_));
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(context,
nnapi_->ANeuralNetworksExecution_create(
nn_compilation_.get(), &execution),
"creating NNAPI execution", nnapi_errno);
unique_execution.reset(execution);
if (nnapi_->nnapi_runtime_feature_level > kMinSdkVersionForNNAPI13) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_setReusable(execution,
true),
"making execution reusable", nnapi_errno);
}
if (delegate_options.vendor_execution_hints && vendor_plugin_) {
TF_LITE_ENSURE_STATUS(vendor_plugin_->ConfigureExecutionHints(
delegate_options.vendor_execution_hints, execution));
}
if (allow_padding) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_enableInputAndOutputPadding(
execution, true),
"setting allow padding for execution intputs and outputs",
nnapi_errno);
}
if (nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI13) {
if (delegate_options.max_execution_timeout_duration_ns > 0) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_setTimeout(
execution, delegate_options.max_execution_timeout_duration_ns),
"setting execution timeout", nnapi_errno);
}
if (delegate_options.max_execution_loop_timeout_duration_ns > 0) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_setLoopTimeout(
execution,
delegate_options.max_execution_loop_timeout_duration_ns),
"setting execution loop timeout", nnapi_errno);
}
}
if (delegate_options.allow_dynamic_dimensions) {
size_t total_input_byte_size = 0;
for (int i : TfLiteIntArrayView(node->inputs)) {
if (i != kTfLiteOptionalTensor &&
context->tensors[i].allocation_type != kTfLiteMmapRo &&
mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(), i) != -1) {
if (context->tensors[i].buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
const TfLiteType nn_type_conversion =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(),
i);
int tensor_size = 0;
if (nn_type_conversion == kTfLiteNoType) {
tensor_size = context->tensors[i].bytes;
} else {
size_t type_size;
TF_LITE_ENSURE_OK(
context,
GetSizeOfType(context, nn_type_conversion, &type_size));
tensor_size = NumElements(&context->tensors[i]) * type_size;
}
total_input_byte_size += tensor_size;
total_input_byte_size += GetNumPaddingBytes(tensor_size);
}
}
if (total_input_byte_size > nn_input_memory_->get_byte_size()) {
nn_input_memory_ = std::make_unique<NNMemory>(nnapi_, "input_pool",
total_input_byte_size);
nn_execution_cache_.Clear();
}
size_t total_output_byte_size = 0;
for (int i : TfLiteIntArrayView(node->outputs)) {
const auto& tensor = context->tensors[i];
if (tensor.buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
size_t tensor_size = tensor.bytes;
if (!can_infer_output_shape && HasUnspecifiedDimension(&tensor)) {
if (tensor_max_size_hints_[i] == 0) {
TF_LITE_KERNEL_LOG(context,
"Missing max tensor size for tensor#%d. When a "
"vendor plugin is supplied, max tensor size is "
"required for all dynamic output tensors.",
i);
return kTfLiteError;
}
tensor_size = std::max(tensor_size, tensor_max_size_hints_[i]);
}
total_output_byte_size += tensor_size;
total_output_byte_size += GetNumPaddingBytes(tensor_size);
}
if (total_output_byte_size > nn_output_memory_->get_byte_size()) {
nn_output_memory_ = std::make_unique<NNMemory>(nnapi_, "output_pool",
total_output_byte_size);
nn_execution_cache_.Clear();
}
}
if (execution_is_reusable) {
nn_execution_cache_.Put(signature, std::move(unique_execution));
unique_execution = nullptr;
}
}
int relative_input_index = 0;
const bool use_int8_asymm_signed =
target_feature_level_ >= kMinSdkVersionForNNAPI13;
size_t input_offset = 0;
for (auto absolute_input_index : TfLiteIntArrayView(node->inputs)) {
if (absolute_input_index == kTfLiteOptionalTensor) {
continue;
}
ANeuralNetworksOperandType input_nn_operand_type;
ANeuralNetworksOperandType* input_nn_operand_type_ptr = nullptr;
TfLiteTensor* tensor = &context->tensors[absolute_input_index];
TfLiteType ann_type_equivalent =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(),
absolute_input_index);
if (delegate_options.allow_dynamic_dimensions &&
::tflite::HasUnspecifiedDimension(tensor)) {
input_nn_operand_type = ConvertTensorTypeToNNType(
tensor, ann_type_equivalent, use_int8_asymm_signed);
input_nn_operand_type_ptr = &input_nn_operand_type;
}
if (tensor->allocation_type != kTfLiteMmapRo) {
if (tensor->buffer_handle != kTfLiteNullBufferHandle &&
tensor->buffer_handle < tensor_memory_map_->size()) {
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setInputFromMemory(
execution, relative_input_index, input_nn_operand_type_ptr,
tensor_memory_map_->at(tensor->buffer_handle).memory, 0,
tensor->bytes),
"associating NNAPI execution input with a memory object", tensor,
nnapi_errno);
}
relative_input_index++;
continue;
}
int tensor_size = 0;
int padding_bytes = 0;
if (ann_type_equivalent != kTfLiteNoType) {
const auto num_elements = NumElements(tensor);
uint8_t* input_ptr = nn_input_memory_->get_data_ptr() + input_offset;
if (tensor->type == kTfLiteUInt8 &&
ann_type_equivalent == kTfLiteInt32) {
for (int i = 0; i < num_elements; ++i) {
reinterpret_cast<int32_t*>(input_ptr)[i] =
static_cast<const int32_t>(tensor->data.uint8[i]);
}
} else if (tensor->type == kTfLiteInt8 &&
ann_type_equivalent == kTfLiteUInt8) {
for (int i = 0; i < num_elements; ++i) {
input_ptr[i] = static_cast<const uint8_t>(
static_cast<int32_t>(tensor->data.int8[i]) + 128);
}
} else if (tensor->type == kTfLiteInt8 &&
ann_type_equivalent == kTfLiteInt32) {
if (use_int8_asymm_signed) {
for (int i = 0; i < num_elements; ++i) {
reinterpret_cast<int32_t*>(input_ptr)[i] =
static_cast<const int32_t>(tensor->data.int8[i]);
}
} else {
for (int i = 0; i < num_elements; ++i) {
reinterpret_cast<int32_t*>(input_ptr)[i] =
static_cast<const int32_t>(tensor->data.int8[i]) + 128;
}
}
} else if (tensor->type == kTfLiteInt64 &&
ann_type_equivalent == kTfLiteInt32) {
int32_t* input_ptr_i32 = reinterpret_cast<int32_t*>(input_ptr);
for (int i = 0; i < num_elements; ++i) {
if (input_ptr_i32[i] < std::numeric_limits<int32_t>::min() ||
input_ptr_i32[i] > std::numeric_limits<int32_t>::max()) {
TF_LITE_KERNEL_LOG(context,
"NN API Delegate: int64 value out of bounds "
"for int32 target NNAPI tensor\n");
return kTfLiteError;
}
input_ptr_i32[i] = static_cast<int32_t>(tensor->data.i64[i]);
}
} else {
TF_LITE_KERNEL_LOG(
context,
"NN API Delegate: unsupported tensor types conversion: "
"from type code %d to type code %d.\n",
tensor->type, ann_type_equivalent);
return kTfLiteError;
}
size_t type_size;
TF_LITE_ENSURE_OK(
context, GetSizeOfType(context, ann_type_equivalent, &type_size));
tensor_size = NumElements(tensor) * type_size;
padding_bytes = GetNumPaddingBytes(tensor_size);
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setInputFromMemory(
execution, relative_input_index, input_nn_operand_type_ptr,
nn_input_memory_->get_handle(), input_offset,
GetNNTensorSize(tensor_size, allow_padding)),
"associating NNAPI execution input with a memory object", tensor,
nnapi_errno);
}
} else if (mapping_util_->TfLiteIndexToNnIndex(
mapping_util_.get(), absolute_input_index) != -1) {
memcpy(nn_input_memory_->get_data_ptr() + input_offset,
tensor->data.raw, tensor->bytes);
tensor_size = tensor->bytes;
padding_bytes = GetNumPaddingBytes(tensor_size);
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setInputFromMemory(
execution, relative_input_index, input_nn_operand_type_ptr,
nn_input_memory_->get_handle(), input_offset,
GetNNTensorSize(tensor_size, allow_padding)),
"associating NNAPI execution input with a memory object", tensor,
nnapi_errno);
}
}
input_offset += tensor_size + padding_bytes;
relative_input_index++;
}
}
int relative_output_index = 0;
size_t output_offset = 0;
for (auto output_index : TfLiteIntArrayView(node->outputs)) {
if (mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(),
output_index) == -1) {
continue;
}
ANeuralNetworksOperandType output_nn_operand_type;
ANeuralNetworksOperandType* output_nn_operand_type_ptr = nullptr;
TfLiteTensor* tensor = &context->tensors[output_index];
if (delegate_options.allow_dynamic_dimensions && can_infer_output_shape &&
::tflite::HasUnspecifiedDimension(tensor)) {
TfLiteType ann_type_equivalent =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(),
output_index);
output_nn_operand_type = ConvertTensorTypeToNNType(
tensor, ann_type_equivalent, use_int8_asymm_signed);
output_nn_operand_type_ptr = &output_nn_operand_type;
}
if (tensor->buffer_handle != kTfLiteNullBufferHandle &&
tensor->buffer_handle < tensor_memory_map_->size() &&
should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setOutputFromMemory(
execution, relative_output_index, output_nn_operand_type_ptr,
tensor_memory_map_->at(tensor->buffer_handle).memory, 0,
tensor->bytes),
"associating NNAPI execution output to a memory object", tensor,
nnapi_errno);
} else {
size_t tensor_size = tensor->bytes;
if (!can_infer_output_shape && HasUnspecifiedDimension(tensor)) {
tensor_size =
std::max(tensor->bytes, tensor_max_size_hints_[output_index]);
}
int padding_bytes = GetNumPaddingBytes(tensor_size);
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setOutputFromMemory(
execution, relative_output_index, output_nn_operand_type_ptr,
nn_output_memory_->get_handle(), output_offset,
GetNNTensorSize(tensor_size, allow_padding)),
"associating NNAPI execution output to a memory object", tensor,
nnapi_errno);
}
output_offset += tensor_size + padding_bytes;
}
relative_output_index++;
}
for (size_t i = 0; i < model_state_tfl_inputs_.size(); i++) {
int state_tensor_idx = model_state_tfl_inputs_[i];
TfLiteTensor* tensor = &context->tensors[state_tensor_idx];
int padding_bytes = GetNumPaddingBytes(tensor->bytes);
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_setOutputFromMemory(
execution, relative_output_index, nullptr,
nn_output_memory_->get_handle(), output_offset,
GetNNTensorSize(tensor->bytes, allow_padding)),
"associating NNAPI execution state output to a memory object",
nnapi_errno);
}
output_offset += tensor->bytes + padding_bytes;
relative_output_index++;
}
if (nnapi_->android_sdk_version < kMinSdkVersionForNNAPI12) {
ANeuralNetworksEvent* event = nullptr;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_startCompute(execution, &event),
"starting async computation", nnapi_errno);
const int wait_result = nnapi_->ANeuralNetworksEvent_wait(event);
nnapi_->ANeuralNetworksEvent_free(event);
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, wait_result,
"waiting for async computation completion",
nnapi_errno);
} else {
if (nn_burst_) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_burstCompute(execution,
nn_burst_.get()),
"running burst computation", nnapi_errno);
} else {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi_->ANeuralNetworksExecution_compute(execution),
"running computation", nnapi_errno);
}
}
if (!can_infer_output_shape) {
relative_output_index = 0;
for (auto output_index : TfLiteIntArrayView(node->outputs)) {
TfLiteTensor* tensor = &context->tensors[output_index];
if (HasUnspecifiedDimension(tensor)) {
auto* new_dims = TfLiteIntArrayCreate(tensor->dims->size);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_getOutputOperandDimensions(
execution, relative_output_index,
reinterpret_cast<uint32_t*>(new_dims->data)),
"get output operand dimensions", nnapi_errno);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, tensor, new_dims));
}
relative_output_index++;
}
}
output_offset = 0;
for (auto output_index : TfLiteIntArrayView(node->outputs)) {
TfLiteTensor* tensor = &context->tensors[output_index];
if (tensor->buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
TfLiteType ann_type_equivalent =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(),
output_index);
if (tensor->type == kTfLiteInt8 && ann_type_equivalent == kTfLiteUInt8) {
uint8_t* output_ptr = reinterpret_cast<uint8_t*>(
nn_output_memory_->get_data_ptr() + output_offset);
const auto num_elements = NumElements(tensor);
for (int i = 0; i < num_elements; ++i) {
output_ptr[i] =
static_cast<uint8_t>(static_cast<int32_t>(output_ptr[i]) - 128);
}
}
memcpy(tensor->data.raw, nn_output_memory_->get_data_ptr() + output_offset,
tensor->bytes);
size_t tensor_size = tensor->bytes;
if (!can_infer_output_shape && HasUnspecifiedDimension(tensor)) {
tensor_size =
std::max(tensor->bytes, tensor_max_size_hints_[output_index]);
}
output_offset += tensor_size;
output_offset += GetNumPaddingBytes(tensor_size);
}
for (size_t i = 0; i < model_state_tfl_inputs_.size(); i++) {
int state_tensor_idx = model_state_tfl_inputs_[i];
TfLiteTensor* tensor = &context->tensors[state_tensor_idx];
memcpy(tensor->data.raw, nn_output_memory_->get_data_ptr() + output_offset,
tensor->bytes);
output_offset += tensor->bytes;
output_offset += GetNumPaddingBytes(tensor->bytes);
}
for (auto feedback_loop : feedback_loops_) {
int output_tensor_idx;
int input_tensor_idx;
std::tie(output_tensor_idx, input_tensor_idx) = feedback_loop;
TfLiteTensor& src = context->tensors[output_tensor_idx];
TfLiteTensor& dest = context->tensors[input_tensor_idx];
memcpy(dest.data.raw, src.data.raw, src.bytes);
}
return kTfLiteOk;
}
void NNAPIDelegateKernel::AddDequantizeOperatorsWhereNeeded(
const TfLiteContext* context, int builtin_code, const TfLiteNode* node,
int tflite_node_index, NNAPIOpBuilder* builder, int* nnapi_errno) {
int input_tensor_index = -1;
std::vector<int> inputs_to_potentially_dequantize;
switch (builtin_code) {
case kTfLiteBuiltinConv2d:
case kTfLiteBuiltinFullyConnected: {
input_tensor_index = 0;
inputs_to_potentially_dequantize = {1, 2};
break;
}
case kTfLiteBuiltinLstm: {
input_tensor_index = 0;
inputs_to_potentially_dequantize = {1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 20, 21, 22, 23};
break;
}
default:
return;
}
int tensor_id = node->inputs->data[input_tensor_index];
if (tensor_id < 0) return;
if (!IsFloat(context->tensors[tensor_id].type)) return;
for (int i : inputs_to_potentially_dequantize) {
if (i < 0 || i >= node->inputs->size) continue;
tensor_id = node->inputs->data[i];
if (tensor_id < 0) continue;
const TfLiteType type = context->tensors[tensor_id].type;
if (!IsQuantized(type)) continue;
builder->AddDequantize(i, node->inputs->data[i], type, tflite_node_index);
}
}
TfLiteStatus NNAPIDelegateKernel::DensifyAndDequantizeConstTensor(
TfLiteContext* context, int densify_node_id, bool should_dequantize,
NNAPIOpBuilder& builder) {
TfLiteNode* densify_node;
TfLiteRegistration* reg;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, densify_node_id, &densify_node, ®));
int sparse_weight_tid = densify_node->inputs->data[0];
auto input_tensor = context->tensors[sparse_weight_tid];
auto output_tensor = context->tensors[densify_node->outputs->data[0]];
if (input_tensor.sparsity == nullptr) {
return kTfLiteError;
}
const int dims_count = output_tensor.dims->size;
std::vector<int> vector_shape(dims_count);
for (int i = 0; i < dims_count; i++) {
vector_shape[i] = output_tensor.dims->data[i];
}
size_t dense_size;
int new_tensor_index = -1;
switch (input_tensor.type) {
case kTfLiteFloat32: {
dense_size = output_tensor.bytes / sizeof(float);
std::vector<float> output_data(dense_size);
tflite::internal::sparsity::FormatConverter<float> converter(
vector_shape, *input_tensor.sparsity);
converter.SparseToDense(static_cast<const float*>(input_tensor.data.data),
dense_size, output_data.data(), context);
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor<float>(
ANEURALNETWORKS_TENSOR_FLOAT32, kTfLiteFloat32, output_tensor.dims,
output_data, output_tensor.params, &new_tensor_index));
break;
}
case kTfLiteFloat16: {
dense_size = output_tensor.bytes / sizeof(Eigen::half);
std::vector<uint16_t> output_data(dense_size);
Eigen::half* unpacked_fp16_data =
reinterpret_cast<Eigen::half*>(output_data.data());
tflite::internal::sparsity::FormatConverter<Eigen::half> converter(
vector_shape, *input_tensor.sparsity);
converter.SparseToDense(
static_cast<const Eigen::half*>(input_tensor.data.data), dense_size,
unpacked_fp16_data, context);
if (should_dequantize) {
std::vector<float> float_dense_data(dense_size);
for (int i = 0; i < dense_size; ++i) {
float_dense_data[i] = fp16_ieee_to_fp32_value(
reinterpret_cast<uint16_t*>(output_data.data())[i]);
}
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor<float>(
ANEURALNETWORKS_TENSOR_FLOAT32, kTfLiteFloat32, output_tensor.dims,
float_dense_data, output_tensor.params, &new_tensor_index));
} else {
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor<uint16_t>(
ANEURALNETWORKS_TENSOR_FLOAT16, kTfLiteFloat16, output_tensor.dims,
output_data, output_tensor.params, &new_tensor_index));
}
break;
}
case kTfLiteInt8: {
dense_size = output_tensor.bytes / sizeof(int8_t);
std::vector<int8_t> output_data(dense_size);
tflite::internal::sparsity::FormatConverter<int8_t> converter(
vector_shape, *input_tensor.sparsity);
converter.SparseToDense(
static_cast<const int8_t*>(input_tensor.data.data), dense_size,
output_data.data(), context);
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor<int8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, kTfLiteInt8,
output_tensor.dims, output_data, output_tensor.params,
&new_tensor_index));
break;
}
default: {
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteIntArray* ResizeTfLiteIntArray(TfLiteIntArray* old_array, int new_size,
int init_value) {
TfLiteIntArray* ret = TfLiteIntArrayCreate(new_size);
if (ret) {
int size_to_copy = 0;
if (old_array) {
size_to_copy = new_size > old_array->size ? old_array->size : new_size;
memcpy(ret->data, old_array->data, size_to_copy * sizeof(int));
}
for (int i = size_to_copy; i < ret->size; i++) {
ret->data[i] = init_value;
}
}
TfLiteIntArrayFree(old_array);
return ret;
}
void NNFreeMappingUtil::operator()(NnapiMappingUtilCInterface* mapping_util) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping_util->context);
delete (mapping_context);
mapping_util->context = nullptr;
free(mapping_util);
}
class NnapiMappingUtilCInterfaceImpl {
public:
static int TfLiteIndexToNnIndex(NnapiMappingUtilCInterface* mapping,
int index) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
const size_t max_size = mapping_context->lite_tensor_to_ann_tensor_.size();
if (index >= 0 && index < max_size)
return mapping_context->lite_tensor_to_ann_tensor_[index];
else
return -1;
}
static int AddNewNonTensorOperand(NnapiMappingUtilCInterface* mapping) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
return mapping_context->next_ann_tensor_index_++;
}
static int AddDelegateGeneratedInputAnnTensorOperand(
NnapiMappingUtilCInterface* mapping) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
return mapping_context->next_ann_tensor_index_++;
}
static int AddNewNnTensorIndex(NnapiMappingUtilCInterface* mapping,
int tflite_index) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
const size_t current_size =
mapping_context->lite_tensor_to_ann_tensor_.size();
if (tflite_index >= current_size) {
mapping_context->lite_tensor_to_ann_tensor_.resize(tflite_index + 1, -1);
}
const int new_tensor_index = mapping_context->next_ann_tensor_index_++;
mapping_context->lite_tensor_to_ann_tensor_[tflite_index] =
new_tensor_index;
return new_tensor_index;
}
static TfLiteType TfLiteIndexToNnTypeConversion(
NnapiMappingUtilCInterface* mapping, int index) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
const size_t max_size = mapping_context->index_to_type_conversion_.size();
if (index >= 0 && index < max_size)
return static_cast<TfLiteType>(
mapping_context->index_to_type_conversion_[index]);
else
return kTfLiteNoType;
}
static void AddTypeConversion(NnapiMappingUtilCInterface* mapping,
int tflite_index, TfLiteType tflite_type) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
const size_t current_size =
mapping_context->index_to_type_conversion_.size();
if (tflite_index >= current_size) {
mapping_context->index_to_type_conversion_.resize(tflite_index + 1,
kTfLiteNoType);
}
mapping_context->index_to_type_conversion_[tflite_index] = tflite_type;
}
static void AddNnapiToTfliteOpMapping(NnapiMappingUtilCInterface* mapping,
int tflite_node_index) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
mapping_context->nnapi_to_tflite_op_mapping_.push_back(tflite_node_index);
}
};
NnapiMappingUtilCInterface*
NNAPIDelegateKernel::NnapiMappingUtilCInterfaceCreate() {
NnapiMappingUtilCInterface* mapping =
static_cast<NnapiMappingUtilCInterface*>(
malloc(sizeof(NnapiMappingUtilCInterface)));
mapping->context = new NnapiMappingContext();
mapping->TfLiteIndexToNnIndex =
NnapiMappingUtilCInterfaceImpl::TfLiteIndexToNnIndex;
mapping->AddNewNonTensorOperand =
NnapiMappingUtilCInterfaceImpl::AddNewNonTensorOperand;
mapping->AddDelegateGeneratedInputAnnTensorOperand =
NnapiMappingUtilCInterfaceImpl::AddDelegateGeneratedInputAnnTensorOperand;
mapping->AddNewNnTensorIndex =
NnapiMappingUtilCInterfaceImpl::AddNewNnTensorIndex;
mapping->TfLiteIndexToNnTypeConversion =
NnapiMappingUtilCInterfaceImpl::TfLiteIndexToNnTypeConversion;
mapping->AddTypeConversion =
NnapiMappingUtilCInterfaceImpl::AddTypeConversion;
mapping->AddNnapiToTfliteOpMapping =
NnapiMappingUtilCInterfaceImpl::AddNnapiToTfliteOpMapping;
return mapping;
}
TfLiteStatus NNAPIDelegateKernel::AddOpsAndTensors(
TfLiteContext* context, int* nnapi_errno, bool allow_dynamic_dimensions) {
DequantizeMapping dequantize_mapping;
NNAPIOpBuilder builder(nnapi_, context, mapping_util_.get(),
&dequantize_mapping, &allocation_memory_mapping_,
nn_model_.get(), nnapi_errno,
allow_dynamic_dimensions);
target_feature_level_ = nnapi_->nnapi_runtime_feature_level;
if (!nnapi_devices_.empty()) {
TF_LITE_ENSURE_STATUS(GetTargetFeatureLevel(
context, nnapi_, nnapi_devices_, &target_feature_level_, nnapi_errno));
}
for (auto node_index : nodes_) {
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, node_index, &node, ®istration));
if (IsDequantizeConstFloat16(context, node, registration)) {
builder.AddTensorInput(node->inputs->data[0], false,
NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION |
NN_TENSOR_FLAG_SCALAR_AS_TENSOR);
}
if (IsDensifyConstTensor(context, node, registration)) {
densify_output_to_node_mapping_[node->outputs->data[0]] = node_index;
}
if (IsDequantizeNonConstFloat16(context, node, registration)) {
non_const_dequantize_output_to_node_mapping_[node->outputs->data[0]] =
node_index;
}
}
builder.ClearInputOuputLists();
for (auto node_index : nodes_) {
TfLiteNode* node;
TfLiteRegistration* reg;
TF_LITE_ENSURE_STATUS(
context->GetNodeAndRegistration(context, node_index, &node, ®));
if (IsDensifyConstTensor(context, node, reg) ||
IsDequantizeNonConstFloat16(context, node, reg)) {
continue;
}
if (vendor_plugin_ && vendor_plugin_->ValidateNode(context, reg, node)) {
TF_LITE_ENSURE_STATUS(vendor_plugin_->MapNode(
context, node, node_index, mapping_util_.get(), nn_model_.get()));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinPack &&
target_feature_level_ < kNNAPIRuntimeFeatureLevel6) {
TF_LITE_ENSURE_STATUS(
builder.TransformPackIntoSupportedOps(node_index, node, reg));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinUnpack) {
TF_LITE_ENSURE_STATUS(
builder.TransformUnpackIntoSupportedOps(node_index, node, reg));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinSplitV) {
TF_LITE_ENSURE_STATUS(
builder.TransformSplitVIntoSupportedOps(node_index, node, reg));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinSquaredDifference) {
TF_LITE_ENSURE_STATUS(builder.TransformSquaredDifferenceIntoSupportedOps(
node_index, node, reg));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinCos) {
TF_LITE_ENSURE_STATUS(
builder.TransformCosIntoSupportedOps(node_index, node, reg));
continue;
}
if (target_feature_level_ >= kMinSdkVersionForNNAPI13 &&
reg->builtin_code == kTfLiteBuiltinLstm && isLstmFullKernel(node) &&
context->tensors[node->inputs->data[0]].type == kTfLiteInt8) {
const auto quant8_full_lstm_op_code = ANEURALNETWORKS_QUANTIZED_LSTM;
constexpr int kInputTensor = 0;
constexpr int kInputToInputWeightsTensor = 1;
constexpr int kRecurrentToInputWeightsTensor = 5;
constexpr int kInputGateBiasTensor = 12;
constexpr int kForgetGateBiasTensor = 13;
constexpr int kCellGateBiasTensor = 14;
constexpr int kOutputGateBiasTensor = 15;
constexpr int kProjectionWeightsTensor = 16;
constexpr int kProjectionBiasTensor = 17;
constexpr int kPrevOutputTensor = 18;
for (int input_pos = 0; input_pos < node->inputs->size; ++input_pos) {
const auto input_index = node->inputs->data[input_pos];
if (input_index == kTfLiteOptionalTensor) {
if (input_pos == kInputToInputWeightsTensor ||
input_pos == kRecurrentToInputWeightsTensor ||
input_pos == kProjectionWeightsTensor) {
TF_LITE_ENSURE_STATUS(builder.AddVectorInt8Operand(nullptr, 0));
} else if (input_pos == kInputGateBiasTensor ||
input_pos == kForgetGateBiasTensor ||
input_pos == kCellGateBiasTensor ||
input_pos == kOutputGateBiasTensor ||
input_pos == kProjectionBiasTensor) {
TF_LITE_ENSURE_STATUS(builder.AddVectorInt32Operand(nullptr, 0));
} else {
TF_LITE_ENSURE_STATUS(builder.AddVectorInt16Operand(nullptr, 0));
}
} else {
int flags =
(input_pos == kInputTensor || input_pos == kPrevOutputTensor)
? NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED
: 0;
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, false, flags));
}
}
auto builtin = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
TF_LITE_ENSURE_STATUS(
builder.AddScalarFloat32Operand(builtin->cell_clip));
TF_LITE_ENSURE_STATUS(
builder.AddScalarFloat32Operand(builtin->proj_clip));
TF_LITE_ENSURE_EQ(context, node->intermediates->size, 5);
for (int intermediate_pos = 0;
intermediate_pos < node->intermediates->size; ++intermediate_pos) {
const auto intermediate_index =
node->intermediates->data[intermediate_pos];
const TfLiteTensor& tensor = context->tensors[intermediate_index];
TfLiteAffineQuantization* quantization_params =
static_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
if (intermediate_pos == 4) {
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(
quantization_params->zero_point->data[0]));
}
TF_LITE_ENSURE_STATUS(builder.AddScalarFloat32Operand(
quantization_params->scale->data[0]));
}
int ann_index;
builder.AddStateInt8AsymTensor(
node->inputs->data[ 18], &ann_index);
model_state_outputs_.push_back(ann_index);
model_state_tfl_inputs_.push_back(
node->inputs->data[ 18]);
builder.AddStateInt16Tensor(
node->inputs->data[ 19], &ann_index);
model_state_outputs_.push_back(ann_index);
model_state_tfl_inputs_.push_back(
node->inputs->data[ 19]);
for (int output_pos = 0; output_pos < node->outputs->size; ++output_pos) {
const auto output_index = node->outputs->data[output_pos];
TF_LITE_ENSURE_STATUS(builder.AddTensorOutput(
output_index, NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
}
builder.FinalizeAddOperation(quant8_full_lstm_op_code, node_index);
continue;
}
const bool hybrid_op = IsHybridOperator(context, reg->builtin_code, node);
const bool scalar_as_tensor = IsScalarInputSupported(reg->builtin_code);
const bool need_int8_conversion =
target_feature_level_ < kMinSdkVersionForNNAPI13 &&
NeedInt8Conversion(context, reg->builtin_code, node);
const bool use_int8_asymm_signed =
target_feature_level_ >= kMinSdkVersionForNNAPI13 && !hybrid_op;
if (IsDequantizeConstFloat16(context, node, reg)) {
continue;
}
int input_tensor_flags = 0;
if (scalar_as_tensor) {
input_tensor_flags |= NN_TENSOR_FLAG_SCALAR_AS_TENSOR;
}
if (use_int8_asymm_signed) {
input_tensor_flags |= NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED;
}
if (reg->builtin_code == kTfLiteBuiltinHardSwish &&
nnapi_->android_sdk_version < kMinSdkVersionForNNAPI13) {
builder.TransformHardSwishIntoSupportedOps(
node->inputs->data[0], node->outputs->data[0], need_int8_conversion,
node_index);
continue;
}
if (reg->builtin_code == kTfLiteBuiltinPack) {
const auto* builtin =
reinterpret_cast<TfLitePackParams*>(node->builtin_data);
auto& input_tensor = context->tensors[node->inputs->data[0]];
int axis = builtin->axis < 0 ? input_tensor.dims->size + builtin->axis + 1
: builtin->axis;
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(axis));
}
for (int input_pos = 0; input_pos < node->inputs->size; ++input_pos) {
if (node->inputs->data[input_pos] != kTfLiteOptionalTensor &&
context->tensors[node->inputs->data[input_pos]].type ==
kTfLiteFloat16 &&
IsConstantTensor(&context->tensors[node->inputs->data[input_pos]])) {
input_tensor_flags |= NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION;
}
if (reg->builtin_code == kTfLiteBuiltinTransposeConv) {
continue;
}
if (reg->builtin_code == kTfLiteBuiltinFullyConnected &&
node->inputs->data[input_pos] == kTfLiteOptionalTensor) {
continue;
}
const auto input_index = node->inputs->data[input_pos];
if (reg->builtin_code == kTfLiteBuiltinConv2d && input_pos == 1) {
int densify_node_id = -1;
bool should_dequantize = false;
int dequantize_node_id =
non_const_dequantize_output_to_node_mapping_[input_index];
if (dequantize_node_id != -1) {
should_dequantize = true;
TfLiteNode* dequant_node;
TfLiteRegistration* reg;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, dequantize_node_id, &dequant_node, ®));
densify_node_id =
densify_output_to_node_mapping_[dequant_node->inputs->data[0]];
} else {
densify_node_id = densify_output_to_node_mapping_[input_index];
}
if (densify_node_id != -1) {
TF_LITE_ENSURE_STATUS(DensifyAndDequantizeConstTensor(
context, densify_node_id, should_dequantize, builder));
continue;
}
}
if (need_int8_conversion &&
(input_pos == 0 ||
reg->builtin_code == kTfLiteBuiltinFullyConnected ||
reg->builtin_code == kTfLiteBuiltinConv2d ||
reg->builtin_code == kTfLiteBuiltinDepthwiseConv2d ||
reg->builtin_code == kTfLiteBuiltinAdd ||
reg->builtin_code == kTfLiteBuiltinMul ||
reg->builtin_code == kTfLiteBuiltinSub ||
reg->builtin_code == kTfLiteBuiltinConcatenation ||
reg->builtin_code == kTfLiteBuiltinMaximum ||
reg->builtin_code == kTfLiteBuiltinMinimum ||
reg->builtin_code == kTfLiteBuiltinLeakyRelu ||
reg->builtin_code == kTfLiteBuiltinLess ||
reg->builtin_code == kTfLiteBuiltinLessEqual ||
reg->builtin_code == kTfLiteBuiltinPrelu ||
reg->builtin_code == kTfLiteBuiltinGreater ||
reg->builtin_code == kTfLiteBuiltinGreaterEqual ||
reg->builtin_code == kTfLiteBuiltinEqual ||
reg->builtin_code == kTfLiteBuiltinNotEqual ||
reg->builtin_code == kTfLiteBuiltinSelect)) {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(
input_index, hybrid_op,
input_tensor_flags | NN_TENSOR_FLAG_INT8_CONVERSION));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinLstm && isLstmFullKernel(node) &&
input_pos >= 20) {
continue;
}
if (reg->builtin_code == kTfLiteBuiltinLstm && isLstmBasicKernel(node)) {
continue;
}
if (reg->builtin_code == kTfLiteBuiltinUnidirectionalSequenceLstm) {
if (input_pos >= 20) {
continue;
}
if (input_index == kTfLiteOptionalTensor) {
TF_LITE_ENSURE_STATUS(builder.AddVectorFloat32Operand(nullptr, 0));
continue;
}
}
if ((reg->builtin_code == kTfLiteBuiltinSplit) &&
(input_index == node->inputs->data[0])) {
continue;
}
if ((reg->builtin_code == kTfLiteBuiltinPadv2 ||
reg->builtin_code == kTfLiteBuiltinPad) &&
node->inputs->size == 3 && input_pos == 2) {
const int constant_value_id = node->inputs->data[2];
if (constant_value_id == kTfLiteOptionalTensor) {
continue;
}
const TfLiteTensor constant_value = context->tensors[constant_value_id];
switch (constant_value.type) {
case kTfLiteFloat16:
if (constant_value.allocation_type == kTfLiteMmapRo) {
builder.AddScalarFloat32Operand(constant_value.data.f16->data);
} else {
builder.AddSingleValueTensorAsScalarOperand(
constant_value_id, ANEURALNETWORKS_TENSOR_FLOAT16);
}
break;
case kTfLiteFloat32:
if (constant_value.allocation_type == kTfLiteMmapRo) {
builder.AddScalarFloat32Operand(*constant_value.data.f);
} else {
builder.AddSingleValueTensorAsScalarOperand(
constant_value_id, ANEURALNETWORKS_FLOAT32);
}
break;
case kTfLiteUInt8:
if (constant_value.allocation_type == kTfLiteMmapRo) {
builder.AddScalarInt32Operand(
static_cast<int32_t>(*constant_value.data.uint8));
} else {
builder.AddSingleValueTensorAsScalarOperand(
constant_value_id, ANEURALNETWORKS_INT32);
}
break;
case kTfLiteInt8:
if (constant_value.allocation_type == kTfLiteMmapRo) {
if (need_int8_conversion) {
builder.AddScalarInt32Operand(
static_cast<int32_t>(*constant_value.data.int8) + 128);
} else {
builder.AddScalarInt32Operand(*constant_value.data.int8);
}
} else {
builder.AddSingleValueTensorAsScalarOperand(
constant_value_id, ANEURALNETWORKS_INT32);
}
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unsupported type of pad value for pad_v2\n");
return kTfLiteError;
}
continue;
}
if (input_index == kTfLiteOptionalTensor &&
(reg->builtin_code == kTfLiteBuiltinLstm ||
reg->builtin_code == kTfLiteBuiltinSvdf ||
reg->builtin_code == kTfLiteBuiltinBidirectionalSequenceLstm)) {
TF_LITE_ENSURE_STATUS(builder.AddVectorFloat32Operand(nullptr, 0));
} else if (reg->builtin_code == kTfLiteBuiltinResizeBilinear ||
reg->builtin_code == kTfLiteBuiltinResizeNearestNeighbor) {
if (input_pos == 0) {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
}
} else if (reg->builtin_code == kTfLiteBuiltinTopkV2 && input_pos > 0) {
continue;
} else if (reg->builtin_code == kTfLiteBuiltinGather) {
if (input_pos == 0) {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
}
continue;
} else if (reg->builtin_code == kTfLiteBuiltinExpandDims &&
input_pos == 1) {
continue;
} else if (reg->builtin_code == kTfLiteBuiltinBatchToSpaceNd &&
input_pos == 2) {
continue;
} else if (reg->builtin_code == kTfLiteBuiltinArgMin ||
reg->builtin_code == kTfLiteBuiltinArgMax) {
if (input_pos == 0) {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
} else {
const int axis_id = node->inputs->data[1];
const TfLiteTensor& axis_tensor = context->tensors[axis_id];
switch (axis_tensor.type) {
case kTfLiteInt32:
if (axis_tensor.allocation_type == kTfLiteMmapRo) {
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(
static_cast<int32_t>(*axis_tensor.data.i32)));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddSingleValueTensorAsScalarOperand(
axis_id, ANEURALNETWORKS_INT32));
}
break;
case kTfLiteInt64:
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(
static_cast<int32_t>(*axis_tensor.data.i64)));
break;
default:
return kTfLiteError;
}
}
} else if (reg->builtin_code == kTfLiteBuiltinMaximum ||
reg->builtin_code == kTfLiteBuiltinMinimum) {
const TfLiteTensor& operand_tensor =
context->tensors[node->inputs->data[input_pos]];
if (operand_tensor.dims->size == 0) {
int tensor_index;
TF_LITE_ENSURE_EQ(context, operand_tensor.allocation_type,
kTfLiteMmapRo);
switch (operand_tensor.type) {
case kTfLiteFloat32:
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_FLOAT32, operand_tensor.type, {1},
std::vector<float>(1, operand_tensor.data.f[0]),
operand_tensor.params, &tensor_index));
break;
case kTfLiteUInt8:
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, operand_tensor.type, {1},
std::vector<uint8_t>(1, operand_tensor.data.uint8[0]),
operand_tensor.params, &tensor_index));
break;
case kTfLiteInt8: {
auto params = operand_tensor.params;
if (params.scale == 0.0) {
params.scale = 1.0;
}
if (use_int8_asymm_signed) {
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
operand_tensor.type, {1},
std::vector<int8_t>(1, operand_tensor.data.int8[0]), params,
&tensor_index));
} else {
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, operand_tensor.type,
{1},
std::vector<int8_t>(1, operand_tensor.data.int8[0] + 128),
params, &tensor_index));
}
} break;
case kTfLiteInt32:
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, operand_tensor.type, {1},
std::vector<int32_t>(1, operand_tensor.data.i32[0]),
operand_tensor.params, &tensor_index));
break;
default:
return kTfLiteError;
}
} else {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
}
} else if ((reg->builtin_code == kTfLiteBuiltinReduceAny ||
reg->builtin_code == kTfLiteBuiltinReduceMax ||
reg->builtin_code == kTfLiteBuiltinReduceMin ||
reg->builtin_code == kTfLiteBuiltinReduceProd ||
reg->builtin_code == kTfLiteBuiltinSum ||
reg->builtin_code == kTfLiteBuiltinMean) &&
(input_pos == 1)) {
const TfLiteTensor& axis_tensor =
context->tensors[node->inputs->data[input_pos]];
if (axis_tensor.dims->size == 0) {
TF_LITE_ENSURE_STATUS(
builder.AddVectorInt32Operand(axis_tensor.data.i32, 1));
} else {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
}
} else if (reg->builtin_code == kTfLiteBuiltinFill) {
if (input_pos == 0) {
const int dims_id = node->inputs->data[0];
const TfLiteTensor& dims_tensor = context->tensors[dims_id];
switch (dims_tensor.type) {
case kTfLiteInt32:
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, hybrid_op));
break;
case kTfLiteInt64: {
const int dims_size = dims_tensor.dims->data[0];
std::vector<int32_t> dims_int32(dims_size);
std::copy(dims_tensor.data.i64, dims_tensor.data.i64 + dims_size,
dims_int32.begin());
int new_tensor_index = -1;
builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, dims_tensor.dims,
dims_int32, dims_tensor.params, &new_tensor_index);
} break;
default:
return kTfLiteError;
}
} else {
const int value_id = node->inputs->data[1];
const TfLiteTensor& value_tensor = context->tensors[value_id];
switch (value_tensor.type) {
case kTfLiteFloat32:
if (value_tensor.allocation_type == kTfLiteMmapRo) {
TF_LITE_ENSURE_STATUS(
builder.AddScalarFloat32Operand(*value_tensor.data.f));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddSingleValueTensorAsScalarOperand(
value_id, ANEURALNETWORKS_FLOAT32));
}
break;
case kTfLiteInt32:
if (value_tensor.allocation_type == kTfLiteMmapRo) {
TF_LITE_ENSURE_STATUS(
builder.AddScalarInt32Operand(*value_tensor.data.i32));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddSingleValueTensorAsScalarOperand(
value_id, ANEURALNETWORKS_INT32));
}
break;
case kTfLiteInt64:
if (value_tensor.allocation_type == kTfLiteMmapRo) {
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(
static_cast<int32_t>(*value_tensor.data.i64)));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddSingleValueTensorAsScalarOperand(
value_id, ANEURALNETWORKS_INT32));
}
break;
default:
return kTfLiteError;
}
}
} else {
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, hybrid_op, input_tensor_flags));
}
}
int nn_op_type;
TF_LITE_ENSURE_STATUS(
Map(context, reg->builtin_code, reg->version, target_feature_level_,
{context, &builder, node, node_index, &model_state_outputs_,
&model_state_tfl_inputs_, &feedback_loops_, nnapi_errno},
&nn_op_type));
int output_tensor_flags = 0;
if (need_int8_conversion) {
output_tensor_flags |= NN_TENSOR_FLAG_INT8_CONVERSION;
}
if (use_int8_asymm_signed) {
output_tensor_flags |= NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED;
}
int fc_nn_intermediate_output_index = -1;
int mean_nn_intermediate_output_index = -1;
for (int output_pos = 0; output_pos < node->outputs->size; ++output_pos) {
auto output_index = node->outputs->data[output_pos];
if (reg->builtin_code == kTfLiteBuiltinLstm && isLstmBasicKernel(node)) {
continue;
}
if (reg->builtin_code == kTfLiteBuiltinFullyConnected &&
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data)
->keep_num_dims) {
auto& output_tensor = context->tensors[output_index];
int num_units = output_tensor.dims->data[output_tensor.dims->size - 1];
std::vector<uint32_t> output_dims(2);
output_dims[0] = NumElements(output_tensor.dims) / num_units;
output_dims[1] = num_units;
TF_LITE_ENSURE_STATUS(builder.AddIntermediateOutputTensor(
output_tensor.type, output_dims.size(), output_dims.data(),
output_tensor.params.scale, output_tensor.params.zero_point,
&fc_nn_intermediate_output_index));
} else if (reg->builtin_code == kTfLiteBuiltinMean &&
IsMeanWithDifferentInputOutputQuantization(context, node)) {
auto& input_tensor = context->tensors[node->inputs->data[0]];
auto& output_tensor = context->tensors[output_index];
TF_LITE_ENSURE_STATUS(builder.AddIntermediateOutputTensor(
output_tensor.type, output_tensor.dims->size,
reinterpret_cast<const uint32_t*>(output_tensor.dims->data),
input_tensor.params.scale, input_tensor.params.zero_point,
&mean_nn_intermediate_output_index, need_int8_conversion));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddTensorOutput(output_index, output_tensor_flags));
}
}
AddDequantizeOperatorsWhereNeeded(context, reg->builtin_code, node,
node_index, &builder, nnapi_errno);
TF_LITE_ENSURE_OK(context_,
builder.FinalizeAddOperation(nn_op_type, node_index));
if (fc_nn_intermediate_output_index > -1) {
TF_LITE_ENSURE_STATUS(builder.AppendReshape(
fc_nn_intermediate_output_index, node->outputs->data[0], node_index));
}
if (mean_nn_intermediate_output_index > -1) {
TF_LITE_ENSURE_STATUS(builder.AppendRequantize(
mean_nn_intermediate_output_index, node->outputs->data[0], node_index,
output_tensor_flags));
}
}
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::BuildGraph(
TfLiteContext* context,
const StatefulNnApiDelegate::Options& delegate_options,
const TfLiteIntArray* input_tensors, const TfLiteIntArray* output_tensors,
int* nnapi_errno) {
TF_LITE_ENSURE_STATUS(AddOpsAndTensors(
context, nnapi_errno, delegate_options.allow_dynamic_dimensions));
std::vector<uint32_t> inputs;
inputs.reserve(input_tensors->size);
std::vector<uint32_t> outputs;
outputs.reserve(output_tensors->size);
size_t total_input_byte_size = 0;
for (int i : TfLiteIntArrayView(input_tensors)) {
if (i != kTfLiteOptionalTensor &&
context->tensors[i].allocation_type != kTfLiteMmapRo &&
mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(), i) != -1) {
inputs.push_back(
mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(), i));
if (context->tensors[i].buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
const TfLiteType nn_type_conversion =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(), i);
int tensor_size = 0;
if (nn_type_conversion == kTfLiteNoType) {
tensor_size =
std::max(context->tensors[i].bytes, tensor_max_size_hints_[i]);
} else {
size_t type_size;
TF_LITE_ENSURE_OK(
context, GetSizeOfType(context, nn_type_conversion, &type_size));
tensor_size = NumElements(&context->tensors[i]) * type_size;
}
total_input_byte_size += tensor_size;
total_input_byte_size += GetNumPaddingBytes(tensor_size);
}
}
size_t total_output_byte_size = 0;
for (int i : TfLiteIntArrayView(output_tensors)) {
const int output_tensor_ann_index =
mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(), i);
if (output_tensor_ann_index != -1) {
outputs.push_back(output_tensor_ann_index);
}
if (context->tensors[i].buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
size_t tensor_size =
std::max(context->tensors[i].bytes, tensor_max_size_hints_[i]);
total_output_byte_size += tensor_size;
total_output_byte_size += GetNumPaddingBytes(tensor_size);
}
for (int i = 0; i < model_state_outputs_.size(); i++) {
outputs.push_back(model_state_outputs_[i]);
auto tfl_state_idx = model_state_tfl_inputs_[i];
total_output_byte_size += context->tensors[tfl_state_idx].bytes;
total_output_byte_size +=
GetNumPaddingBytes(context->tensors[tfl_state_idx].bytes);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksModel_identifyInputsAndOutputs(
nn_model_.get(), inputs.size(), inputs.data(), outputs.size(),
outputs.data()),
"identifying model inputs and outputs", nnapi_errno);
auto allow_fp16 =
context->allow_fp32_relax_to_fp16 | delegate_options.allow_fp16;
if (nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI11) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16(
nn_model_.get(), allow_fp16),
"set relaxed computation mode for fp32 if possible", nnapi_errno);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi_->ANeuralNetworksModel_finish(nn_model_.get()),
"finalizing the model", nnapi_errno);
nn_input_memory_ =
std::make_unique<NNMemory>(nnapi_, "input_pool", total_input_byte_size);
nn_output_memory_ =
std::make_unique<NNMemory>(nnapi_, "output_pool", total_output_byte_size);
return kTfLiteOk;
}
void NNAPIDelegateKernel::LogCompilationInfoOnce(
const NnApi* nnapi, const ANeuralNetworksDiagnosticCompilationInfo* info) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"NNAPI SL compilation callback called.");
const int32_t session_id =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getSessionId(info);
const int32_t error_code =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getErrorCode(info);
const uint64_t compilation_time_ns =
nnapi
->SL_ANeuralNetworksDiagnosticCompilationInfo_getCompilationTimeNanos(
info);
const int64_t nnapi_version =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getNnApiVersion(info);
const uint8_t model_arch_hash_first_byte =
*nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getModelArchHash(
info);
const std::string device_ids_string = std::string(
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getDeviceIds(info));
const ANeuralNetworksDiagnosticDataClass input_data_class =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getInputDataClass(
info);
const ANeuralNetworksDiagnosticDataClass output_data_class =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getOutputDataClass(
info);
const bool is_caching_enabled =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_isCachingEnabled(info);
const bool is_control_flow_used =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_isControlFlowUsed(
info);
TFLITE_LOG_PROD_ONCE(
TFLITE_LOG_INFO,
"Compilation info: getSessionId=%d getErrorCode=%d "
"getCompilationTimeNanos=%" PRIu64 " getNnApiVersion=%" PRId64
" getDeviceIds=%s getModelArchHash=%x getInputDataClass=%d "
"getOutputDataClass=%d isCachingEnabled=%s isControlFlowUser=%s",
session_id, error_code, compilation_time_ns, nnapi_version,
device_ids_string.c_str(), unsigned{model_arch_hash_first_byte},
input_data_class, output_data_class, is_caching_enabled ? "Y" : "N",
is_control_flow_used ? "Y" : "N");
}
void NNAPIDelegateKernel::LogExecutionInfoOnce(
const NnApi* nnapi, const ANeuralNetworksDiagnosticExecutionInfo* info) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO, "NNAPI SL execution callback called.");
const int32_t session_id =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getSessionId(info);
const int32_t error_code =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getErrorCode(info);
const int64_t nnapi_version =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getNnApiVersion(info);
const uint8_t model_arch_hash_first_byte =
*nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getModelArchHash(info);
const std::string device_ids_string = std::string(
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getDeviceIds(info));
const ANeuralNetworksDiagnosticDataClass input_data_class =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getInputDataClass(info);
const ANeuralNetworksDiagnosticDataClass output_data_class =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getOutputDataClass(info);
const bool is_caching_enabled =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_isCachingEnabled(info);
const bool is_control_flow_used =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_isControlFlowUsed(info);
const ANeuralNetworksDiagnosticExecutionMode execution_mode =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getExecutionMode(info);
const uint64_t runtime_time_ns =
nnapi
->SL_ANeuralNetworksDiagnosticExecutionInfo_getRuntimeExecutionTimeNanos(
info);
const uint64_t driver_time_ns =
nnapi
->SL_ANeuralNetworksDiagnosticExecutionInfo_getDriverExecutionTimeNanos(
info);
const uint64_t hardware_time_ns =
nnapi
->SL_ANeuralNetworksDiagnosticExecutionInfo_getHardwareExecutionTimeNanos(
info);
TFLITE_LOG_PROD_ONCE(
TFLITE_LOG_INFO,
"Execution info: getSessionId=%d getErrorCode=%d "
"getNnApiVersion=%" PRId64
" getModelArchHash=%x getDeviceIds=%s getInputDataClass=%d "
"getOutputDataClass=%d isCachingEnabled=%s isControlFlowUsed=%s "
"getExecutionMode=%d getRuntimeExecutionTimeNanos=%" PRIu64
" getDriverExecutionTimeNanos=%" PRIu64
" getHardwareExecutionTimeNanos=%" PRIu64,
session_id, error_code, nnapi_version,
unsigned{model_arch_hash_first_byte}, device_ids_string.c_str(),
input_data_class, output_data_class, is_caching_enabled ? "Y" : "N",
is_control_flow_used ? "Y" : "N", execution_mode, runtime_time_ns,
driver_time_ns, hardware_time_ns);
}
}
}
using ::tflite::delegate::nnapi::kMinSdkVersionForNNAPI;
using ::tflite::delegate::nnapi::kMinSdkVersionForNNAPI11;
using ::tflite::delegate::nnapi::kMinSdkVersionForNNAPI12;
using ::tflite::delegate::nnapi::NNAPIDelegateKernel;
StatefulNnApiDelegate::Data::Data(const NnApi* nnapi) : nnapi(nnapi) {}
StatefulNnApiDelegate::Data::Data(std::unique_ptr<const NnApi> nnapi)
: nnapi(nnapi.get()), owned_nnapi(std::move(nnapi)) {}
StatefulNnApiDelegate::Data::~Data() {
std::for_each(std::begin(delegate_state_cache),
std::end(delegate_state_cache),
[](const std::pair<int, NNAPIDelegateKernel*>& entry) {
delete entry.second;
});
}
void StatefulNnApiDelegate::Data::CacheDelegateKernel(
const TfLiteDelegateParams* delegate_params,
NNAPIDelegateKernel* delegate_state) {
const int cache_key = delegate_params->nodes_to_replace->data[0];
delegate_state_cache.emplace(cache_key, delegate_state);
}
NNAPIDelegateKernel* StatefulNnApiDelegate::Data::MaybeGetCachedDelegateKernel(
const TfLiteDelegateParams* delegate_params) {
const int cache_key = delegate_params->nodes_to_replace->data[0];
const auto cached_state = delegate_state_cache.find(cache_key);
if (cached_state != std::end(delegate_state_cache)) {
auto result = cached_state->second;
delegate_state_cache.erase(cached_state);
return result;
} else {
return nullptr;
}
}
void StatefulNnApiDelegate::StatefulNnApiDelegateConstructorImpl(
const Options& options) {
if (options.accelerator_name) {
delegate_data_.accelerator_name = options.accelerator_name;
}
if (options.cache_dir) {
delegate_data_.cache_dir = options.cache_dir;
}
if (options.model_token) {
delegate_data_.model_token = options.model_token;
}
delegate_data_.execution_preference = options.execution_preference;
delegate_data_.disallow_nnapi_cpu = options.disallow_nnapi_cpu;
delegate_data_.max_number_delegated_partitions =
options.max_number_delegated_partitions;
delegate_data_.allow_fp16 = options.allow_fp16;
delegate_data_.execution_priority = options.execution_priority;
delegate_data_.max_compilation_timeout_duration_ns =
options.max_compilation_timeout_duration_ns;
delegate_data_.max_execution_timeout_duration_ns =
options.max_execution_timeout_duration_ns;
delegate_data_.max_execution_loop_timeout_duration_ns =
options.max_execution_loop_timeout_duration_ns;
if (delegate_data_.nnapi->android_sdk_version >= kMinSdkVersionForNNAPI11) {
delegate_data_.allow_dynamic_dimensions = options.allow_dynamic_dimensions;
}
delegate_data_.use_burst_computation = options.use_burst_computation;
delegate_data_.vendor_compilation_hints = options.vendor_compilation_hints;
delegate_data_.vendor_execution_hints = options.vendor_execution_hints;
delegate_data_.vendor_plugin = options.vendor_plugin;
delegate_data_.max_execution_cache_size = options.max_execution_cache_size;
delegate_data_.tensor_max_size_hints = options.tensor_max_size_hints;
delegate_data_.disable_debugging_diagnostics_callbacks =
options.disable_debugging_diagnostics_callbacks;
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"Created TensorFlow Lite delegate for NNAPI.");
Prepare = DoPrepare;
CopyFromBufferHandle = DoCopyFromBufferHandle;
CopyToBufferHandle = DoCopyToBufferHandle;
FreeBufferHandle = DoFreeBufferHandle;
data_ = &delegate_data_;
if (delegate_data_.allow_dynamic_dimensions) {
flags |= kTfLiteDelegateFlagsAllowDynamicTensors;
if (!delegate_data_.vendor_plugin) {
flags |= kTfLiteDelegateFlagsRequirePropagatedShapes;
}
}
}
StatefulNnApiDelegate::StatefulNnApiDelegate(const NnApi* nnapi)
: StatefulNnApiDelegate(nnapi, Options()) {}
StatefulNnApiDelegate::StatefulNnApiDelegate(Options options)
: StatefulNnApiDelegate(NnApiImplementation(), options) {}
StatefulNnApiDelegate::StatefulNnApiDelegate(
const NnApiSLDriverImplFL5* nnapi_support_library_driver, Options options)
: TfLiteDelegate(TfLiteDelegateCreate()),
delegate_data_(
CreateNnApiFromSupportLibrary(nnapi_support_library_driver)) {
StatefulNnApiDelegateConstructorImpl(options);
}
StatefulNnApiDelegate::StatefulNnApiDelegate(const NnApi* nnapi,
Options options)
: TfLiteDelegate(TfLiteDelegateCreate()), delegate_data_(nnapi) {
StatefulNnApiDelegateConstructorImpl(options);
}
StatefulNnApiDelegate::StatefulNnApiDelegate()
: StatefulNnApiDelegate(Options()) {}
const StatefulNnApiDelegate::Options StatefulNnApiDelegate::GetOptions(
TfLiteDelegate* delegate) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
StatefulNnApiDelegate::Options options;
options.execution_preference = delegate_data->execution_preference;
options.accelerator_name = delegate_data->accelerator_name.empty()
? nullptr
: delegate_data->accelerator_name.c_str();
options.cache_dir = delegate_data->cache_dir.empty()
? nullptr
: delegate_data->cache_dir.c_str();
options.model_token = delegate_data->model_token.empty()
? nullptr
: delegate_data->model_token.c_str();
options.disallow_nnapi_cpu = delegate_data->disallow_nnapi_cpu;
options.max_number_delegated_partitions =
delegate_data->max_number_delegated_partitions;
options.allow_fp16 = delegate_data->allow_fp16;
options.execution_priority = delegate_data->execution_priority;
options.max_compilation_timeout_duration_ns =
delegate_data->max_compilation_timeout_duration_ns;
options.max_execution_timeout_duration_ns =
delegate_data->max_execution_timeout_duration_ns;
options.max_execution_loop_timeout_duration_ns =
delegate_data->max_execution_loop_timeout_duration_ns;
options.allow_dynamic_dimensions = delegate_data->allow_dynamic_dimensions;
options.use_burst_computation = delegate_data->use_burst_computation;
options.vendor_compilation_hints = delegate_data->vendor_compilation_hints;
options.vendor_execution_hints = delegate_data->vendor_execution_hints;
options.vendor_plugin = delegate_data->vendor_plugin;
options.max_execution_cache_size = delegate_data->max_execution_cache_size;
options.tensor_max_size_hints = delegate_data->tensor_max_size_hints;
options.disable_debugging_diagnostics_callbacks =
delegate_data->disable_debugging_diagnostics_callbacks;
return options;
}
const std::vector<StatefulNnApiDelegate::MemoryRegistration>&
StatefulNnApiDelegate::GetTensorMemoryMap(TfLiteDelegate* delegate) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
return delegate_data->tensor_memory_map;
}
delegates::Serialization* StatefulNnApiDelegate::GetCache(
TfLiteDelegate* delegate) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
return delegate_data->cache.get();
}
TfLiteBufferHandle StatefulNnApiDelegate::RegisterNnapiMemory(
ANeuralNetworksMemory* memory, CopyToHostTensorFnPtr callback,
void* callback_context) {
uint64_t timestamp = delegate_data_.next_buffer_handle_timestamp++;
int map_size = delegate_data_.tensor_memory_map.size();
for (int i = 0; i < map_size; i++) {
if (delegate_data_.tensor_memory_map[i].memory == nullptr) {
delegate_data_.tensor_memory_map[i] = {memory, callback, callback_context,
timestamp};
return i;
}
}
delegate_data_.tensor_memory_map.push_back(
{memory, callback, callback_context, timestamp});
return map_size;
}
TfLiteStatus StatefulNnApiDelegate::DoCopyFromBufferHandle(
TfLiteContext* context, TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle, TfLiteTensor* tensor) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
if (buffer_handle < 0 ||
buffer_handle >= delegate_data->tensor_memory_map.size()) {
return kTfLiteError;
}
auto memory = delegate_data->tensor_memory_map[buffer_handle].memory;
auto callback = delegate_data->tensor_memory_map[buffer_handle].callback;
auto callback_context =
delegate_data->tensor_memory_map[buffer_handle].callback_context;
if (!memory || !callback) {
return kTfLiteError;
}
return callback(tensor, memory, 0, tensor->bytes, callback_context);
}
TfLiteStatus StatefulNnApiDelegate::DoCopyToBufferHandle(
TfLiteContext* context, TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle, TfLiteTensor* tensor) {
return kTfLiteError;
}
void StatefulNnApiDelegate::DoFreeBufferHandle(TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle* handle) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
if (*handle >= 0 && *handle < delegate_data->tensor_memory_map.size()) {
delegate_data->tensor_memory_map[*handle] = {nullptr, nullptr, nullptr};
*handle = kTfLiteNullBufferHandle;
}
}
int StatefulNnApiDelegate::GetNnApiErrno() const {
return delegate_data_.nnapi_errno;
}
TfLiteStatus StatefulNnApiDelegate::GetNodesSupportedByAccelerator(
TfLiteContext* context, TfLiteDelegate* delegate, const NnApi* nnapi,
const std::vector<int>& supported_nodes,
std::vector<int>* device_supported_nodes, int* num_partitions,
TfLiteDelegateParams** params_array, int* nnapi_errno) {
auto* delegate_data = static_cast<Data*>(delegate->data_);
auto supported_nodes_int_array = BuildTfLiteArray(supported_nodes);
TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
context, supported_nodes_int_array.get(), params_array, num_partitions));
delegate_data->delegate_state_cache.clear();
for (int idx = 0; idx < *num_partitions; idx++) {
const auto& partition_params = (*params_array)[idx];
std::unique_ptr<NNAPIDelegateKernel> kernel_state(
new NNAPIDelegateKernel(nnapi, delegate_data->vendor_plugin));
TfLiteDelegateParams params_with_delegate = partition_params;
params_with_delegate.delegate = delegate;
TF_LITE_ENSURE_STATUS(
kernel_state->Init(context, ¶ms_with_delegate, nnapi_errno));
std::vector<int> supported_partition_nodes;
TF_LITE_ENSURE_STATUS(
kernel_state->GetOperationsSupportedByTargetNnApiDevices(
context, &supported_partition_nodes, nnapi_errno));
device_supported_nodes->insert(device_supported_nodes->end(),
supported_partition_nodes.begin(),
supported_partition_nodes.end());
bool model_fully_supported = (supported_partition_nodes.size() ==
partition_params.nodes_to_replace->size);
if (model_fully_supported) {
delegate_data->CacheDelegateKernel(&partition_params,
kernel_state.release());
}
}
if (device_supported_nodes->size() != supported_nodes.size()) {
auto device_sup_nodes_int_array = BuildTfLiteArray(*device_supported_nodes);
TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
context, device_sup_nodes_int_array.get(), params_array,
num_partitions));
}
return kTfLiteOk;
}
TfLiteStatus StatefulNnApiDelegate::LimitDelegatedPartitions(
int max_partitions,
std::vector<TfLiteDelegateParams> partition_params_array,
std::vector<int>* nodes_to_delegate) {
int num_partitions = partition_params_array.size();
if (max_partitions <= 0 || num_partitions <= max_partitions) {
return kTfLiteOk;
}
int number_delegated_partitions = std::count_if(
partition_params_array.begin(), partition_params_array.end(),
[nodes_to_delegate](const TfLiteDelegateParams& partition_params) {
return std::find(nodes_to_delegate->begin(), nodes_to_delegate->end(),
partition_params.nodes_to_replace->data[0]) !=
nodes_to_delegate->end();
});
if (number_delegated_partitions > max_partitions) {
std::sort(partition_params_array.begin(), partition_params_array.end(),
[](const TfLiteDelegateParams& left,
const TfLiteDelegateParams& right) -> bool {
return left.nodes_to_replace->size >
right.nodes_to_replace->size;
});
nodes_to_delegate->clear();
for (int i = 0; i < max_partitions; i++) {
const TfLiteDelegateParams& partition_params = partition_params_array[i];
nodes_to_delegate->insert(nodes_to_delegate->end(),
partition_params.nodes_to_replace->data,
partition_params.nodes_to_replace->data +
partition_params.nodes_to_replace->size);
}
}
return kTfLiteOk;
}
static std::vector<int> GetSupportedOpsWithFp16WeightRemapping(
TfLiteContext* context, int target_feature_level,
bool is_accelerator_specified, int max_number_delegated_partitions) {
std::vector<int> supported_nodes;
delegates::IsNodeSupportedFn node_supported_fn =
[=](TfLiteContext* context, TfLiteNode* node,
TfLiteRegistration* registration,
std::string* unsupported_details) -> bool {
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures;
const auto is_supported = NNAPIDelegateKernel::Validate(
context, registration, target_feature_level, node,
is_accelerator_specified, nullptr, &map_failures);
if (!is_supported) {
if (unsupported_details) {
for (auto& failure : map_failures) {
unsupported_details->append(failure.message.c_str());
}
}
return false;
}
return true;
};
delegates::FP16GraphPartitionHelper partition_helper(context,
node_supported_fn);
std::set<std::string> unsupported_nodes_info;
if (partition_helper.Partition(&unsupported_nodes_info) == kTfLiteOk) {
supported_nodes = partition_helper.GetNodesOfFirstNLargestPartitions();
}
return supported_nodes;
}
TfLiteStatus StatefulNnApiDelegate::DoPrepare(TfLiteContext* context,
TfLiteDelegate* delegate) {
auto* delegate_data = static_cast<Data*>(delegate->data_);
int* nnapi_errno = &(delegate_data->nnapi_errno);
const NnApi* nnapi = delegate_data->nnapi;
*nnapi_errno = 0;
if (nnapi->android_sdk_version < kMinSdkVersionForNNAPI ||
!nnapi->nnapi_exists) {
return kTfLiteOk;
}
int target_feature_level = nnapi->android_sdk_version;
const StatefulNnApiDelegate::Options delegate_options =
StatefulNnApiDelegate::GetOptions(delegate);
if (nnapi->android_sdk_version >= kMinSdkVersionForNNAPI12) {
if (ShouldUseTargetDevices(delegate_options, nnapi)) {
std::vector<ANeuralNetworksDevice*> devices;
TF_LITE_ENSURE_STATUS(
GetTargetDevices(context, delegate, nnapi, nnapi_errno, &devices));
if (devices.empty()) {
if (delegate_options.accelerator_name) {
return kTfLiteError;
} else {
return kTfLiteOk;
}
}
TF_LITE_ENSURE_STATUS(GetTargetFeatureLevel(
context, nnapi, devices, &target_feature_level, nnapi_errno));
} else {
uint32_t device_count = 0;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworks_getDeviceCount(&device_count),
"getting number of NNAPI devices", nnapi_errno);
if (device_count <= 1) {
return kTfLiteOk;
}
}
}
std::vector<int> supported_nodes;
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
IntArrayUniquePtr plan(TfLiteIntArrayCopy(execution_plan));
const bool is_accelerator_specified = ShouldUseTargetDevices(
delegate_options, nnapi, true);
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures;
std::vector<int> fp16_to_fp32(context->tensors_size, -1);
bool should_prune_fp16_dequantize = false;
for (int i = 0; i < plan->size; ++i) {
const int node_id = plan->data[i];
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, node_id, &node, ®istration));
if (IsDequantizeConstFloat16(context, node, registration)) {
should_prune_fp16_dequantize = true;
fp16_to_fp32[node->inputs->data[0]] = node->outputs->data[0];
}
}
if (should_prune_fp16_dequantize) {
supported_nodes = GetSupportedOpsWithFp16WeightRemapping(
context, target_feature_level, is_accelerator_specified,
delegate_options.max_number_delegated_partitions);
} else {
for (int node_index : TfLiteIntArrayView(plan.get())) {
TfLiteNode* node;
TfLiteRegistration* registration;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, node_index, &node, ®istration));
if (NNAPIDelegateKernel::Validate(
context, registration, target_feature_level, node,
is_accelerator_specified, delegate_options.vendor_plugin,
&map_failures)) {
supported_nodes.push_back(node_index);
}
#ifdef NNAPI_VERBOSE_VALIDATION
for (auto& failure : map_failures) {
TFLITE_LOG_PROD(
TFLITE_LOG_WARNING,
"Operator %s (v%d) refused by NNAPI delegate: %s",
tflite::EnumNameBuiltinOperator(
static_cast<BuiltinOperator>(registration->builtin_code)),
registration->version, failure.message.c_str());
}
map_failures.clear();
#endif
}
}
if (supported_nodes.empty()) {
return kTfLiteOk;
}
static const TfLiteRegistration nnapi_delegate_kernel = {
.init = [](TfLiteContext* context, const char* buffer,
size_t length) -> void* {
const TfLiteDelegateParams* params =
reinterpret_cast<const TfLiteDelegateParams*>(buffer);
auto* delegate_data = static_cast<Data*>(params->delegate->data_);
int* nnapi_errno = &(delegate_data->nnapi_errno);
NNAPIDelegateKernel* kernel_state =
delegate_data->MaybeGetCachedDelegateKernel(params);
if (!kernel_state) {
kernel_state = new NNAPIDelegateKernel(delegate_data->nnapi,
delegate_data->vendor_plugin);
kernel_state->Init(context, params, nnapi_errno);
}
return kernel_state;
},
.free = [](TfLiteContext* context, void* buffer) -> void {
delete reinterpret_cast<NNAPIDelegateKernel*>(buffer);
},
.prepare = [](TfLiteContext* context, TfLiteNode* node) -> TfLiteStatus {
NNAPIDelegateKernel* state =
reinterpret_cast<NNAPIDelegateKernel*>(node->user_data);
int* nnapi_errno =
&(static_cast<Data*>(node->delegate->data_)->nnapi_errno);
return state->Prepare(context, node, nnapi_errno);
},
.invoke = [](TfLiteContext* context, TfLiteNode* node) -> TfLiteStatus {
NNAPIDelegateKernel* state =
reinterpret_cast<NNAPIDelegateKernel*>(node->user_data);
int* nnapi_errno =
&(static_cast<Data*>(node->delegate->data_)->nnapi_errno);
return state->Invoke(context, node, nnapi_errno);
},
.profiling_string = nullptr,
.builtin_code = kTfLiteBuiltinDelegate,
.custom_name = "TfLiteNnapiDelegate",
.version = 1,
};
const char* cache_dir = delegate_options.cache_dir;
const char* model_token = delegate_options.model_token;
delegates::SerializationParams params = {model_token, cache_dir};
if (nnapi->android_sdk_version >= kMinSdkVersionForNNAPI12 && cache_dir &&
model_token) {
delegate_data->cache = std::make_unique<delegates::Serialization>(params);
}
delegates::Serialization* cache_ptr = delegate_data->cache.get();
if (cache_ptr) {
std::string accelerator_id = NnApiBackendId(delegate_options);
TfLiteIntArray* cached_nodes_to_delegate = nullptr;
if (delegates::GetDelegatedNodes(context, cache_ptr, accelerator_id,
&cached_nodes_to_delegate) == kTfLiteOk) {
if (cached_nodes_to_delegate->size == 0) return kTfLiteOk;
auto status = context->ReplaceNodeSubsetsWithDelegateKernels(
context, nnapi_delegate_kernel, cached_nodes_to_delegate, delegate);
TfLiteIntArrayFree(cached_nodes_to_delegate);
return status;
}
}
std::vector<int> nodes_to_delegate;
int num_partitions;
TfLiteDelegateParams* params_array;
if (is_accelerator_specified &&
nnapi->android_sdk_version >= kMinSdkVersionForNNAPI12) {
TF_LITE_ENSURE_STATUS(GetNodesSupportedByAccelerator(
context, delegate, nnapi, supported_nodes, &nodes_to_delegate,
&num_partitions, ¶ms_array, nnapi_errno));
} else {
nodes_to_delegate = supported_nodes;
auto supported_nodes_int_array = BuildTfLiteArray(supported_nodes);
TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
context, supported_nodes_int_array.get(), ¶ms_array,
&num_partitions));
}
if (should_prune_fp16_dequantize &&
supported_nodes.size() != nodes_to_delegate.size()) {
for (int execution_plan_index = 0; execution_plan_index < plan->size;
++execution_plan_index) {
int node_index = plan->data[execution_plan_index];
TfLiteNode* node = nullptr;
TfLiteRegistration* reg = nullptr;
TF_LITE_ENSURE_STATUS(
context->GetNodeAndRegistration(context, node_index, &node, ®));
if (reg->builtin_code == kTfLiteBuiltinDequantize) continue;
for (int i = 0; i < node->inputs->size; ++i) {
const int original_input_idx = node->inputs->data[i];
if (original_input_idx == kTfLiteOptionalTensor) continue;
if (context->tensors[original_input_idx].type == kTfLiteFloat16 &&
fp16_to_fp32[original_input_idx] != -1) {
node->inputs->data[i] = fp16_to_fp32[original_input_idx];
}
}
}
return kTfLiteOk;
}
TF_LITE_ENSURE_STATUS(
LimitDelegatedPartitions(delegate_options.max_number_delegated_partitions,
std::vector<TfLiteDelegateParams>(
params_array, params_array + num_partitions),
&nodes_to_delegate));
auto nodes_to_delegate_int_array = BuildTfLiteArray(nodes_to_delegate);
if (cache_ptr) {
std::string accelerator_id = NnApiBackendId(delegate_options);
if (delegates::SaveDelegatedNodes(context, cache_ptr, accelerator_id,
nodes_to_delegate_int_array.get()) !=
kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, "Could not save delegated nodes");
}
}
if (nodes_to_delegate_int_array->size == 0) {
return kTfLiteOk;
} else {
return context->ReplaceNodeSubsetsWithDelegateKernels(
context, nnapi_delegate_kernel, nodes_to_delegate_int_array.get(),
delegate);
}
}
TfLiteDelegate* NnApiDelegate() {
static StatefulNnApiDelegate* delegate = new StatefulNnApiDelegate();
return delegate;
}
} | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include <sys/mman.h>
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_plugin.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
MATCHER(QuantizedNear, "") {
const int diff = abs(std::get<0>(arg) - std::get<1>(arg));
if (diff > 1) {
*result_listener << "Quantized values can be at most off by one: " << diff;
return false;
}
return true;
}
class SingleOpModelWithNNAPI : public SingleOpModel {
public:
SingleOpModelWithNNAPI() { options_.disallow_nnapi_cpu = false; }
~SingleOpModelWithNNAPI() { stateful_delegate_.reset(); }
explicit SingleOpModelWithNNAPI(
const StatefulNnApiDelegate::Options& options) {
options_ = options;
options_.disallow_nnapi_cpu = false;
}
TfLiteStatus ResizeInputTensor(int tensor_index,
const std::vector<int>& dims) {
return interpreter_->ResizeInputTensor(tensor_index, dims);
}
StatefulNnApiDelegate* GetDelegate() { return stateful_delegate_.get(); }
void SetBufferHandle(int index, TfLiteBufferHandle handle) {
interpreter_->SetBufferHandle(index, handle, stateful_delegate_.get());
}
void MarkInputTensorDataStale(int index) {
interpreter_->tensor(index)->data_is_stale = true;
}
TfLiteStatus AllocateTensors() { return interpreter_->AllocateTensors(); }
void SetTensorMaxSize(uint32_t tensor_index, size_t max_size) {
options_.tensor_max_size_hints.emplace(tensor_index, max_size);
}
void ApplyNNAPIDelegate() {
stateful_delegate_ = std::make_unique<StatefulNnApiDelegate>(options_);
SetDelegate(stateful_delegate_.get());
ApplyDelegate();
}
protected:
void SetData(int index, TensorType type, const std::vector<float>& data) {
switch (type) {
case TensorType_FLOAT32:
PopulateTensor(index, data);
break;
case TensorType_INT32:
QuantizeAndPopulate<int32_t>(index, data);
break;
case TensorType_UINT8:
QuantizeAndPopulate<uint8_t>(index, data);
break;
case TensorType_INT8:
QuantizeAndPopulate<int8_t>(index, data);
break;
default:
FAIL() << "Type not supported: " << type;
break;
}
}
void GetData(int index, TensorType type, std::vector<float>* output) {
switch (type) {
case TensorType_FLOAT32:
*output = ExtractVector<float>(index);
break;
case TensorType_UINT8:
*output = Dequantize<uint8_t>(ExtractVector<uint8_t>(index),
GetScale(index), GetZeroPoint(index));
break;
default:
FAIL() << "Type not supported: " << type;
break;
}
}
void BuildInterpreterWithNNAPI(std::vector<std::vector<int>> input_shapes,
bool allow_fp32_relax_to_fp16 = false,
bool apply_delegate = true) {
BuildInterpreter(input_shapes, -1, allow_fp32_relax_to_fp16,
false, true);
if (apply_delegate) {
ApplyNNAPIDelegate();
}
}
private:
StatefulNnApiDelegate::Options options_;
std::unique_ptr<StatefulNnApiDelegate> stateful_delegate_;
};
class FloatAddOpModel : public SingleOpModelWithNNAPI {
public:
FloatAddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type,
bool allow_fp32_relax_to_fp16 = false) {
Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16);
}
FloatAddOpModel(const StatefulNnApiDelegate::Options& options,
const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type,
bool allow_fp32_relax_to_fp16 = false)
: SingleOpModelWithNNAPI(options) {
Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16);
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
private:
void Init(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type,
bool allow_fp32_relax_to_fp16 = false) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)},
allow_fp32_relax_to_fp16);
}
};
TEST(NNAPIDelegate, AddWithNoActivation) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, AddScalarWithNoActivation) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.7});
m.PopulateTensor<float>(m.input2(), {0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.3, 0.8, 0.8}));
}
TEST(NNAPIDelegate, AddWithNoActivationRelaxed) {
FloatAddOpModel m(
{TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE, true);
m.PopulateTensor<float>(m.input1(), {-2.0, -1.0, 1.0, 2.0});
m.PopulateTensor<float>(m.input2(), {1.0, 2.0, 3.0, 4.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.0, 1.0, 4.0, 6.0}));
}
TEST(NNAPIDelegate, AddWithRelu) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_RELU);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0.0, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, ResizeInputTensorsWorks) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3, 1.1, 1.5}));
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1.0, 1.3, 1.1, 1.5}));
}
TEST(NNAPIDelegate, ResizeDynamicBatchInputTensorsWorks) {
StatefulNnApiDelegate::Options options;
options.allow_dynamic_dimensions = true;
options.max_execution_cache_size = 1;
FloatAddOpModel m(options,
{TensorType_FLOAT32, {1, 3, 2, 1}, 0.0f,
0.0f, 0.0f,
0, false,
{},
{},
0, {},
{},
{}, {},
{1, -1, 2, 1}},
{TensorType_FLOAT32, {1, 3, 2, 1}, 0.0f,
0.0f, 0.0f,
0, false,
{},
{},
0, {},
{},
{}, {},
{1, -1, 2, 1}},
{TensorType_FLOAT32, {}, 0.0f,
0.0f, 0.0f,
0, false,
{},
{},
0, {},
{},
{}, {},
{1, -1, 2, 1}},
ActivationFunctionType_NONE);
auto RunTestCase1 = [&m]() {
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-1.9, 0.4, 1.0, 1.3, 1.1, 1.5}));
};
auto RunTestCase2 = [&m]() {
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1.0, 1.3, 1.1, 1.5}));
};
RunTestCase1();
RunTestCase1();
RunTestCase2();
RunTestCase1();
}
TEST(NNAPIDelegate, StatefulDelegate) {
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithAcceleratorName) {
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
options.accelerator_name = "nnapi-reference";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithInvalidAcceleratorName) {
if (!NnApiImplementation()->ANeuralNetworksDevice_getName) {
GTEST_SKIP();
}
testing::internal::CaptureStderr();
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
options.accelerator_name = "foo";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
EXPECT_THAT(testing::internal::GetCapturedStderr(),
testing::HasSubstr(
"Could not find the specified NNAPI accelerator: foo"));
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithCompilationCaching) {
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
options.cache_dir = "/data/local/tmp";
options.model_token = "NNAPIDelegate.StatefulDelegateWithCompilationCaching";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithQoS) {
StatefulNnApiDelegate::Options options;
options.accelerator_name = "nnapi-reference";
options.execution_priority = ANEURALNETWORKS_PRIORITY_HIGH;
options.max_compilation_timeout_duration_ns = UINT64_MAX;
options.max_execution_timeout_duration_ns = UINT64_MAX;
options.max_execution_loop_timeout_duration_ns = UINT64_MAX;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, DISABLED_StatefulDelegateWithBufferHandles) {
if (!NnApiImplementation()->ASharedMemory_create ||
!NnApiImplementation()->ANeuralNetworksMemory_createFromFd) {
GTEST_SKIP();
}
StatefulNnApiDelegate::Options options;
options.disallow_nnapi_cpu = false;
options.max_execution_cache_size = 2;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
auto* delegate = m.GetDelegate();
constexpr auto kInput1ByteSize = 4 * sizeof(float);
ANeuralNetworksMemory* input1_memory = nullptr;
int fd =
NnApiImplementation()->ASharedMemory_create("input1", kInput1ByteSize);
EXPECT_GE(fd, 0);
void* input1_memory_data =
mmap(nullptr, kInput1ByteSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
EXPECT_TRUE(input1_memory_data != nullptr);
float input1_data[] = {-2.0, 0.2, 0.7, 0.8};
memcpy(input1_memory_data, input1_data, kInput1ByteSize);
int result = NnApiImplementation()->ANeuralNetworksMemory_createFromFd(
kInput1ByteSize, PROT_READ, fd, 0, &input1_memory);
EXPECT_EQ(result, ANEURALNETWORKS_NO_ERROR);
ASSERT_NE(input1_memory, nullptr);
struct DummyMemoryContext {
ANeuralNetworksMemory* memory_handle;
void* memory_data;
size_t byte_size;
};
DummyMemoryContext memory_context = {input1_memory, input1_memory_data,
kInput1ByteSize};
static StatefulNnApiDelegate::CopyToHostTensorFnPtr memory_callback =
[](TfLiteTensor* tensor, ANeuralNetworksMemory* memory,
size_t memory_offset, size_t byte_size,
void* callback_context) -> TfLiteStatus {
auto memory_context =
reinterpret_cast<DummyMemoryContext*>(callback_context);
if (memory != memory_context->memory_handle ||
memory_offset + byte_size > memory_context->byte_size) {
return kTfLiteError;
}
memcpy(
tensor->data.raw,
reinterpret_cast<uint8_t*>(memory_context->memory_data) + memory_offset,
byte_size);
return kTfLiteOk;
};
auto input1_handle = delegate->RegisterNnapiMemory(
input1_memory, memory_callback, &memory_context);
m.SetBufferHandle(m.input1(), input1_handle);
m.MarkInputTensorDataStale(m.input1());
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
for (int i = 0; i < 10; i++) {
input1_data[0] = -2.0 + i;
memcpy(input1_memory_data, input1_data, kInput1ByteSize);
m.MarkInputTensorDataStale(m.input1());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9 + i, 0.4, 1.0, 1.3}));
}
for (int i = 0; i < 10; i++) {
input1_data[0] = -2.0 + i;
memcpy(input1_memory_data, input1_data, kInput1ByteSize);
auto input1_handle = delegate->RegisterNnapiMemory(
input1_memory, memory_callback, &memory_context);
m.SetBufferHandle(m.input1(), input1_handle);
m.MarkInputTensorDataStale(m.input1());
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9 + i, 0.4, 1.0, 1.3}));
}
}
class FloatMulOpModel : public SingleOpModelWithNNAPI {
public:
FloatMulOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_MUL, BuiltinOptions_MulOptions,
CreateMulOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(NNAPIDelegate, MulWithNoActivation) {
FloatMulOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 0.4})));
}
class FloatPoolingOpModel : public SingleOpModelWithNNAPI {
public:
FloatPoolingOpModel(BuiltinOperator type, const TensorData& input,
int filter_width, int filter_height,
const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
type, BuiltinOptions_Pool2DOptions,
CreatePool2DOptions(builder_, Padding_VALID, 2, 2, filter_width,
filter_height, ActivationFunctionType_NONE)
.Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int output_;
};
TEST(NNAPIDelegate, AveragePoolWithNoActivation) {
FloatPoolingOpModel m(BuiltinOperator_AVERAGE_POOL_2D,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4,
3, 2, 10, 7,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2.75, 5.75}));
}
TEST(NNAPIDelegate, MaxPoolWithNoActivation) {
FloatPoolingOpModel m(BuiltinOperator_MAX_POOL_2D,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4,
3, 2, 10, 7,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({6, 10}));
}
TEST(NNAPIDelegate, L2PoolWithNoActivation) {
FloatPoolingOpModel m(BuiltinOperator_L2_POOL_2D,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4,
3, 2, 10, 7,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3.5, 6.5}));
}
class ConvolutionOpModel : public SingleOpModelWithNNAPI {
public:
ConvolutionOpModel(
const TensorData& input, const TensorData& filter,
const TensorData& output, int stride_width = 2, int stride_height = 2,
enum Padding padding = Padding_VALID,
enum ActivationFunctionType activation = ActivationFunctionType_NONE,
int dilation_width_factor = 1, int dilation_height_factor = 1)
: input_type_(input.type), filter_type_(filter.type) {
input_ = AddInput(input);
filter_ = AddInput(filter);
int bias_size = GetShape(filter_)[0];
if (input.type == TensorType_FLOAT32) {
bias_ = AddInput({TensorType_FLOAT32, {bias_size}});
} else {
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_CONV_2D, BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(
builder_, padding, stride_width, stride_height, activation,
dilation_width_factor, dilation_height_factor)
.Union());
BuildInterpreterWithNNAPI(
{GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
void SetFilter(std::initializer_list<float> data) {
SetData(filter_, filter_type_, data);
}
void SetBias(std::initializer_list<float> data) {
const auto bias_type =
(input_type_ == TensorType_FLOAT32) ? input_type_ : TensorType_INT32;
SetData(bias_, bias_type, data);
}
std::vector<float> GetOutput() {
if (input_type_ == TensorType_FLOAT32) {
return ExtractVector<float>(output_);
} else {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
}
std::vector<uint8_t> GetQuantizedOutput() {
if (input_type_ == TensorType_FLOAT32) {
return {};
} else {
return ExtractVector<uint8_t>(output_);
}
}
protected:
int input_;
int filter_;
int bias_;
int output_;
const TensorType input_type_;
const TensorType filter_type_;
};
TEST(ConvolutionOpTest, SimpleTestQuantized) {
ConvolutionOpModel m({TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
{TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128});
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
18, 2, 5,
18, 2, 5,
17, 4, 3,
37, 4, 3,
},
1e-5)));
EXPECT_THAT(m.GetQuantizedOutput(), ElementsAreArray({
145, 129, 132,
145, 129, 132,
144, 131, 130,
164, 131, 130,
}));
}
TEST(ConvolutionOpTest, SimpleTestQuantizedGrouped) {
ConvolutionOpModel m({TensorType_UINT8, {2, 2, 2, 2}, -63.5, 64},
{TensorType_UINT8, {2, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128});
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
18, 2,
23, 6
},
1e-5)));
EXPECT_THAT(m.GetQuantizedOutput(), ElementsAreArray({
145, 129,
150, 133,
}));
}
TEST(ConvolutionOpTest, FloatInputQuantizedWeights) {
ConvolutionOpModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_UINT8, {3, 2, 2, 1}, 0, 64},
{TensorType_FLOAT32, {}});
m.SetInput({
1, 1, 1, 2,
2, 2, 2, 1,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
0, 1, 0, 1,
0, 0, 1, 1,
});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
18, 5, 7,
16, 5, 6,
17, 6, 6,
37, 10, 10,
},
0.2)));
}
TEST(ConvolutionOpTest, NoActivation) {
ConvolutionOpModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}});
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
18, 2, 5,
18, 2, 5,
17, 4, 3,
37, 4, 3,
}));
}
TEST(ConvolutionOpTest, SimpleTestQuantizedOutputMultiplierGreaterThan1) {
ConvolutionOpModel quant_op({TensorType_UINT8, {2, 2, 4, 1}, -128.5, 128},
{TensorType_UINT8, {3, 2, 2, 1}, -128.5, 128},
{TensorType_UINT8, {}, -127, 128});
ConvolutionOpModel float_op({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}});
std::initializer_list<float> input = {
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
};
std::initializer_list<float> filter = {
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
};
std::initializer_list<float> bias = {1, 2, 3};
quant_op.SetInput(input);
quant_op.SetFilter(filter);
quant_op.SetBias(bias);
ASSERT_EQ(quant_op.Invoke(), kTfLiteOk);
float_op.SetInput(input);
float_op.SetFilter(filter);
float_op.SetBias(bias);
ASSERT_EQ(float_op.Invoke(), kTfLiteOk);
EXPECT_THAT(quant_op.GetOutput(),
ElementsAreArray(ArrayFloatNear(float_op.GetOutput(), 1)));
}
TEST(ConvolutionOpTest, SimpleTestFloatWithDilation) {
const int depth = 1;
const int image_width = 9;
const int image_height = 9;
const int image_batch_count = 1;
const int filter_size = 3;
const int filter_count = 1;
const int stride_width = 1;
const int stride_height = 1;
const int dilation_width_factor = 3;
const int dilation_height_factor = 3;
const Padding padding = Padding_VALID;
ConvolutionOpModel m(
{TensorType_FLOAT32,
{image_batch_count, image_height, image_width, depth}},
{TensorType_FLOAT32, {depth, filter_size, filter_size, filter_count}},
{TensorType_FLOAT32, {}}, stride_width, stride_height, padding,
ActivationFunctionType_NONE, dilation_width_factor,
dilation_height_factor);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5, 6, 7, 8, 9});
m.SetBias({0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5, 5}));
}
class QuantizedConvolutionOpModel : public ConvolutionOpModel {
public:
using ConvolutionOpModel::ConvolutionOpModel;
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
void SetFilter(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(filter_, data);
}
void SetBias(std::initializer_list<float> data) {
QuantizeAndPopulate<int32_t>(bias_, data);
}
std::vector<uint8_t> GetOutput() { return ExtractVector<uint8_t>(output_); }
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
};
TEST(ConvolutionOpTest, SimpleTestQuantizedWithDilation) {
const int depth = 1;
const int image_width = 9;
const int image_height = 9;
const int image_batch_count = 1;
const int filter_size = 3;
const int filter_count = 1;
const int stride_width = 1;
const int stride_height = 1;
const int dilation_width_factor = 3;
const int dilation_height_factor = 3;
const Padding padding = Padding_VALID;
ConvolutionOpModel m({TensorType_UINT8,
{image_batch_count, image_height, image_width, depth},
0,
127.5},
{TensorType_UINT8,
{depth, filter_size, filter_size, filter_count},
0,
127.5},
{TensorType_UINT8, {}, 0, 255}, stride_width,
stride_height, padding, ActivationFunctionType_NONE,
dilation_width_factor, dilation_height_factor);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5, 6, 7, 8, 9});
m.SetBias({0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetQuantizedOutput(),
ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5, 5}));
}
class PerChannelQuantizedConvolutionWithConstantFilterOpModel
: public SingleOpModelWithNNAPI {
public:
PerChannelQuantizedConvolutionWithConstantFilterOpModel(
const TensorData& input, const TensorData& filter,
std::initializer_list<int8_t> filter_data,
std::initializer_list<int32_t> bias_data, const TensorData& output,
int stride_width = 2, int stride_height = 2,
enum Padding padding = Padding_VALID,
enum ActivationFunctionType activation = ActivationFunctionType_NONE,
int dilation_width_factor = 1, int dilation_height_factor = 1)
: input_type_(input.type), filter_type_(filter.type) {
CHECK(filter.per_channel_quantization);
input_ = AddInput(input);
filter_ = AddConstInput(filter, filter_data);
const int bias_size = GetShape(filter_)[0];
const int num_channels = filter.per_channel_quantization_scales.size();
const std::vector<int64_t> bias_offsets(num_channels, 0);
std::vector<float> bias_scales(num_channels);
for (int i = 0; i < num_channels; i++) {
bias_scales[i] = input.scale * filter.per_channel_quantization_scales[i];
}
const TensorData bias{TensorType_INT32,
{bias_size},
0,
0,
0,
0,
true,
bias_scales,
bias_offsets,
0};
bias_ = AddConstInput(bias, bias_data);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_CONV_2D, BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(
builder_, padding, stride_width, stride_height, activation,
dilation_width_factor, dilation_height_factor)
.Union());
BuildInterpreterWithNNAPI(
{GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<int8_t>(input_, data);
}
std::vector<int8_t> GetOutput() { return ExtractVector<int8_t>(output_); }
protected:
int input_;
int filter_;
int bias_;
int output_;
const TensorType input_type_;
const TensorType filter_type_;
};
TEST(ConvolutionOpTest, SimplePerChannelTest) {
PerChannelQuantizedConvolutionWithConstantFilterOpModel m(
{TensorType_INT8, {1, 2, 3, 2}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{2, 2, 2, 2},
0,
0,
0,
0,
true,
{1, 2},
{0, 0},
0},
{
1, 2,
3, 4,
3, 4,
5, 6,
4, 4,
3, 3,
2, 2,
1, 1,
},
{6, -2}, {TensorType_INT8, {}, -63.5, 64, 0.5, -1},
1, 1);
m.SetInput({
3, 2,
1, -1,
-2, -3,
4, 3,
2, -2,
-3, -4,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::Pointwise(QuantizedNear(), {61, 127, -115, -93}));
}
class DepthwiseConvolutionOpModel : public SingleOpModelWithNNAPI {
public:
DepthwiseConvolutionOpModel(const TensorData& input, const TensorData& filter,
const TensorData& output)
: input_type_(input.type) {
input_ = AddInput(input);
filter_ = AddInput(filter);
int bias_size = GetShape(filter_)[3];
if (input.type == TensorType_FLOAT32) {
bias_ = AddInput({TensorType_FLOAT32, {bias_size}});
} else {
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
output_ = AddOutput(output);
int input_depth = GetShape(input_)[3];
int output_depth = GetShape(filter_)[3];
int depth_mul = output_depth / input_depth;
SetBuiltinOp(
BuiltinOperator_DEPTHWISE_CONV_2D,
BuiltinOptions_DepthwiseConv2DOptions,
CreateDepthwiseConv2DOptions(builder_, Padding_VALID, 1, 1, depth_mul,
ActivationFunctionType_NONE)
.Union());
BuildInterpreterWithNNAPI(
{GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
void SetFilter(std::initializer_list<float> data) {
SetData(filter_, input_type_, data);
}
void SetBias(std::initializer_list<float> data) {
const auto bias_type =
(input_type_ == TensorType_FLOAT32) ? input_type_ : TensorType_INT32;
SetData(bias_, bias_type, data);
}
std::vector<float> GetOutput() {
if (input_type_ == TensorType_FLOAT32) {
return ExtractVector<float>(output_);
} else {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
}
protected:
int input_;
int filter_;
int bias_;
int output_;
const TensorType input_type_;
};
TEST(NNAPIDelegate, DepthwiseConv2DWithNoActivation) {
DepthwiseConvolutionOpModel m({TensorType_FLOAT32, {1, 3, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4}},
{TensorType_FLOAT32, {}});
m.SetInput({
1, 2, 7, 8,
3, 4, 9, 10,
5, 6, 11, 12,
});
m.SetFilter({
1, 2, 3, 4,
-9, 10, -11, 12,
5, 6, 7, 8,
13, -14, 15, -16,
});
m.SetBias({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
71, -34, 99, -20,
91, -26, 127, -4,
}));
}
TEST(QuantizedDepthwiseConv2DTest, FilterMultiplierGreaterThan1) {
DepthwiseConvolutionOpModel quant_op(
{TensorType_UINT8, {1, 3, 2, 2}, -128.5, 128},
{TensorType_UINT8, {1, 2, 2, 4}, -128.5, 128},
{TensorType_UINT8, {}, -127, 128});
DepthwiseConvolutionOpModel float_op({TensorType_FLOAT32, {1, 3, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4}},
{TensorType_FLOAT32, {}});
std::initializer_list<float> input = {
1, 2, 7, 8,
3, 4, 9, 10,
5, 6, 11, 12,
};
std::initializer_list<float> filter = {
1, 2, 3, 4,
-9, 10, -11, 12,
5, 6, 7, 8,
13, -14, 15, -16,
};
std::initializer_list<float> bias = {1, 2, 3, 4};
quant_op.SetInput(input);
quant_op.SetFilter(filter);
quant_op.SetBias(bias);
ASSERT_EQ(quant_op.Invoke(), kTfLiteOk);
float_op.SetInput(input);
float_op.SetFilter(filter);
float_op.SetBias(bias);
ASSERT_EQ(float_op.Invoke(), kTfLiteOk);
EXPECT_THAT(quant_op.GetOutput(),
ElementsAreArray(ArrayFloatNear(float_op.GetOutput(), 1)));
}
class FullyConnectedOpModel : public SingleOpModelWithNNAPI {
public:
FullyConnectedOpModel(
const TensorData& input, const TensorData& weights,
const TensorData& output,
enum ActivationFunctionType activation = ActivationFunctionType_NONE)
: input_type_(input.type), weights_type_(weights.type) {
input_ = AddInput(input);
weights_ = AddInput(weights);
const int units = weights.shape[0];
if (input.type == TensorType_FLOAT32) {
bias_ = AddInput({TensorType_FLOAT32, {units}});
} else {
auto bias_scale = GetScale(input_) * GetScale(weights_);
TensorData bias{TensorType_INT32, {units}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_FULLY_CONNECTED,
BuiltinOptions_FullyConnectedOptions,
CreateFullyConnectedOptions(builder_, activation).Union());
BuildInterpreterWithNNAPI(
{GetShape(input_), GetShape(weights_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
void SetWeights(std::initializer_list<float> data) {
SetData(weights_, weights_type_, data);
}
void SetBias(std::initializer_list<float> data) {
const auto bias_type =
(input_type_ == TensorType_FLOAT32) ? input_type_ : TensorType_INT32;
SetData(bias_, bias_type, data);
}
std::vector<float> GetOutput() {
if (input_type_ == TensorType_FLOAT32) {
return ExtractVector<float>(output_);
} else {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
}
protected:
int input_;
int weights_;
int bias_;
int output_;
const TensorType input_type_;
const TensorType weights_type_;
};
TEST(FullyConnectedOpTest, SimpleTest) {
FullyConnectedOpModel m({TensorType_FLOAT32, {2, 10}},
{TensorType_FLOAT32, {3, 10}},
{TensorType_FLOAT32});
m.SetWeights({
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
});
m.SetBias({1, 2, 3});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre(24, 25, 26, 58, 59, 60));
}
TEST(FullyConnectedOpTest, FloatInputQuantizedWeights) {
FullyConnectedOpModel m({TensorType_FLOAT32, {2, 10}},
{TensorType_UINT8, {3, 10}, 0, 64},
{TensorType_FLOAT32});
m.SetWeights({
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
});
m.SetBias({1, 2, 3});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({24, 25, 26, 58, 59, 60}, 1.3)));
}
TEST(FullyConnectedOpTest, QuantizedOutputMultiplierGreaterThan1) {
FullyConnectedOpModel m(
{TensorType_UINT8, {2, 10}, -127, 128},
{TensorType_UINT8, {3, 10}, -127, 128},
{TensorType_UINT8, {}, -63.5, 64});
m.SetWeights({
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
});
m.SetBias({1, 2, 3});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
24, 25, 26,
58, 59, 60,
})));
}
class SoftmaxOpModel : public SingleOpModelWithNNAPI {
public:
SoftmaxOpModel(const TensorData& input, float beta) {
input_ = AddInput(input);
output_ = AddOutput(input);
SetBuiltinOp(BuiltinOperator_SOFTMAX, BuiltinOptions_SoftmaxOptions,
CreateSoftmaxOptions(builder_, beta).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
TEST(SoftmaxOpTest, SimpleTest) {
SoftmaxOpModel m({TensorType_FLOAT32, {2, 5}}, 1.0);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231},
1e-6)));
}
TEST(SoftmaxOpTest, Beta2) {
SoftmaxOpModel m({TensorType_FLOAT32, {1, 5}}, 2.0);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.000290076, 0.002143387, 0.015837606, 0.117024957, 0.864703974},
1e-6)));
}
TEST(SoftmaxOpTest, 3dInput) {
SoftmaxOpModel m({TensorType_FLOAT32, {2, 2, 5}}, 1.0);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
5.0, 1.0, 2.0, 3.0, 4.0,
-5.0, -1.0, -2.0, -3.0, -4.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231,
0.636408647, 0.011656231, 0.031684921, 0.086128544, 0.234121657,
0.011656231, 0.636408647, 0.234121657, 0.086128544, 0.031684921},
1e-6)));
}
TEST(SoftmaxOpTest, 4dInput) {
SoftmaxOpModel m({TensorType_FLOAT32, {2, 2, 1, 5}}, 1.0);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
5.0, 1.0, 2.0, 3.0, 4.0,
-5.0, -1.0, -2.0, -3.0, -4.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231,
0.636408647, 0.011656231, 0.031684921, 0.086128544, 0.234121657,
0.011656231, 0.636408647, 0.234121657, 0.086128544, 0.031684921},
1e-6)));
}
class ReshapeOpModel : public SingleOpModelWithNNAPI {
public:
ReshapeOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> new_shape) {
input_ = AddInput(TensorType_FLOAT32);
new_shape_ = AddConstInput<int>(TensorType_INT32, new_shape,
{static_cast<int>(new_shape.size())});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(
BuiltinOperator_RESHAPE, BuiltinOptions_ReshapeOptions,
CreateReshapeOptions(builder_, builder_.CreateVector<int>(new_shape))
.Union());
BuildInterpreterWithNNAPI(
{input_shape, {static_cast<int>(new_shape.size())}});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int new_shape_;
int output_;
};
TEST(NNAPIDelegate, ReshapeSimpleTest) {
ReshapeOpModel m({1, 2, 4, 1}, {2, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
class SqueezeOpModel : public SingleOpModelWithNNAPI {
public:
SqueezeOpModel(const TensorData& input, const TensorData& output,
std::initializer_list<int> axis) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_SQUEEZE, BuiltinOptions_SqueezeOptions,
CreateSqueezeOptions(builder_, builder_.CreateVector<int>(axis))
.Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int new_shape_;
int output_;
};
TEST(NNAPIDelegate, DISABLED_SqueezeSimpleTest) {
std::initializer_list<float> data = {
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SqueezeOpModel m({TensorType_FLOAT32, {1, 24, 1}}, {TensorType_FLOAT32, {24}},
{});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
}
TEST(NNAPIDelegate, SqueezeWithAxisTest) {
std::initializer_list<float> data = {
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SqueezeOpModel m({TensorType_FLOAT32, {1, 24, 1}}, {TensorType_FLOAT32, {24}},
{2});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
}
class L2NormOpModel : public SingleOpModelWithNNAPI {
public:
L2NormOpModel(const TensorData& input, const TensorData& output,
ActivationFunctionType activation_type) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_L2_NORMALIZATION, BuiltinOptions_L2NormOptions,
CreateL2NormOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int new_shape_;
int output_;
};
TEST(NNAPIDelegate, L2NormSimpleTest) {
std::initializer_list<float> data = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
L2NormOpModel m({TensorType_FLOAT32, {1, 1, 1, 6}},
{TensorType_FLOAT32, {1, 1, 1, 6}},
ActivationFunctionType_NONE);
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 6}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}));
}
class TransposeSimpleModel : public SingleOpModelWithNNAPI {
public:
TransposeSimpleModel(std::initializer_list<int> input_shape,
std::initializer_list<int> perm_shape,
std::initializer_list<int> perm) {
input_ = AddInput(TensorType_FLOAT32);
perm_ = AddConstInput(TensorType_INT32, perm, perm_shape);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_TRANSPOSE, BuiltinOptions_TransposeOptions,
CreateTransposeOptions(builder_).Union());
BuildInterpreterWithNNAPI({input_shape, perm_shape});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int perm_;
int output_;
};
TEST(NNAPIDelegate, TransposeSimpleTest) {
TransposeSimpleModel m({2, 3, 4}, {3}, {2, 0, 1});
m.SetInput({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 3}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21,
2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}));
}
class ElementwiseOpBaseModel : public SingleOpModelWithNNAPI {
public:
int input() const { return input_; }
int output() const { return output_; }
protected:
int input_;
int output_;
};
class ElementwiseOpFloatModel : public ElementwiseOpBaseModel {
public:
ElementwiseOpFloatModel(BuiltinOperator op,
std::initializer_list<int> input_shape) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(op, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({input_shape});
}
};
TEST(Elementwise, Abs) {
ElementwiseOpFloatModel m(BuiltinOperator_ABS, {1, 2, 4, 1});
m.PopulateTensor<float>(m.input(), {
0.f, -6.2f, 2.f, 4.f,
3.f, -2.f, 10.f, 1.f,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()), ElementsAreArray({
0.f, 6.2f, 2.f, 4.f,
3.f, 2.f, 10.f, 1.f,
}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 2, 4, 1}));
}
TEST(Elementwise, Exp) {
ElementwiseOpFloatModel m(BuiltinOperator_EXP, {3, 1, 2});
m.PopulateTensor<float>(m.input(), {1.0, 0.0, -1.0, 1.0, 1.0, -1.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear(
{2.71828, 1, 0.367879, 2.71828, 2.71828, 0.367879})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({3, 1, 2}));
}
TEST(Elementwise, Log) {
ElementwiseOpFloatModel m(BuiltinOperator_LOG, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {1, 3.1415926, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({0, 1.14473, 0, 0})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
TEST(Elementwise, Rsqrt) {
ElementwiseOpFloatModel m(BuiltinOperator_RSQRT, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {1, 2, 4, 9});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({1, 0.7071, 0.5, 0.33333})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
TEST(Elementwise, Sin) {
ElementwiseOpFloatModel m(BuiltinOperator_SIN, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {0, 3.1415926, -3.1415926, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({0, 0, 0, 0.84147})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
TEST(Elementwise, Cos) {
ElementwiseOpFloatModel m(BuiltinOperator_COS, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {0, 3.1415926, -3.1415926, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({1.0, -1, -1, 0.54030})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
TEST(Elementwise, Sqrt) {
ElementwiseOpFloatModel m(BuiltinOperator_SQRT, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {0, 1, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({0, 1, 1.41421, 2})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
class FloatSubOpModel : public SingleOpModelWithNNAPI {
public:
FloatSubOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_SUB, BuiltinOptions_SubOptions,
CreateMulOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(NNAPIDelegate, SubWithNoActivation) {
FloatSubOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-2.1, 0.0, 0.4, 0.3})));
}
class FloatDivOpModel : public SingleOpModelWithNNAPI {
public:
FloatDivOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_DIV, BuiltinOptions_DivOptions,
CreateMulOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(NNAPIDelegate, DivWithNoActivation) {
FloatDivOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.8, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.4, 0.2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({-20, 1, 2, 4})));
}
class BaseConcatenationOpModel : public SingleOpModelWithNNAPI {
public:
BaseConcatenationOpModel() {}
BaseConcatenationOpModel(const TensorData& input_template, int axis,
int num_inputs) {
std::vector<std::vector<int>> all_input_shapes;
for (int i = 0; i < num_inputs; ++i) {
all_input_shapes.push_back(input_template.shape);
AddInput(input_template);
}
output_ = AddOutput({input_template.type, {}, input_template.min,
input_template.max});
SetBuiltinOp(
BuiltinOperator_CONCATENATION, BuiltinOptions_ConcatenationOptions,
CreateConcatenationOptions(builder_, axis, ActivationFunctionType_NONE)
.Union());
BuildInterpreterWithNNAPI(all_input_shapes);
}
protected:
int output_;
};
class ConcatenationOpModel : public BaseConcatenationOpModel {
public:
using BaseConcatenationOpModel::BaseConcatenationOpModel;
void SetInput(int index, std::initializer_list<float> data) {
PopulateTensor(index, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
TEST(NNAPIDelegate, ConcatenationThreeDimensionalOneInput) {
ConcatenationOpModel m0({TensorType_FLOAT32, {2, 1, 2}}, 1,
1);
m0.SetInput(0, {1.0f, 3.0f, 4.0f, 7.0f});
ASSERT_EQ(m0.Invoke(), kTfLiteOk);
EXPECT_THAT(m0.GetOutput(), ElementsAreArray({1, 3, 4, 7}));
}
TEST(NNAPIDelegate, ConcatenationFourInputs) {
ConcatenationOpModel m0({TensorType_FLOAT32, {2, 1, 2}}, 2,
4);
m0.SetInput(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput(3, {1.3f, 3.3f, 4.3f, 7.3f});
ASSERT_EQ(m0.Invoke(), kTfLiteOk);
EXPECT_THAT(m0.GetOutput(),
ElementsAreArray({
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f,
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f,
}));
}
class QuantizedConcatenationOpModel : public BaseConcatenationOpModel {
public:
using BaseConcatenationOpModel::BaseConcatenationOpModel;
QuantizedConcatenationOpModel(const std::vector<TensorData>& input_template,
int axis, int num_inputs,
const TensorData& output_template) {
std::vector<std::vector<int>> all_input_shapes;
CHECK_EQ(input_template.size(), num_inputs);
for (int i = 0; i < num_inputs; ++i) {
all_input_shapes.push_back(input_template[i].shape);
AddInput(input_template[i]);
}
output_ = AddOutput({output_template.type, {},
output_template.min, output_template.max});
SetBuiltinOp(
BuiltinOperator_CONCATENATION, BuiltinOptions_ConcatenationOptions,
CreateConcatenationOptions(builder_, axis, ActivationFunctionType_NONE)
.Union());
BuildInterpreterWithNNAPI(all_input_shapes);
}
void SetInput(int index, std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(index, data);
}
std::vector<uint8_t> GetOutput() { return ExtractVector<uint8_t>(output_); }
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
};
TEST(NNAPIDelegate, ConcatenationFourInputsQuantized) {
QuantizedConcatenationOpModel m0({TensorType_UINT8, {2, 1, 2}, -12.7, 12.8},
2,
4);
m0.SetInput(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput(3, {1.3f, 3.3f, 4.3f, 7.3f});
ASSERT_EQ(m0.Invoke(), kTfLiteOk);
EXPECT_THAT(m0.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f,
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f,
})));
EXPECT_THAT(m0.GetOutput(), ElementsAreArray({
137, 157, 138, 158, 139, 159, 140, 160,
167, 197, 168, 198, 169, 199, 170, 200,
}));
}
TEST(NNAPIDelegate, ConcatenationFourInputsQuantizedMixedRange) {
QuantizedConcatenationOpModel m0({{TensorType_UINT8, {2, 1, 2}, -10.7, 10.8},
{TensorType_UINT8, {2, 1, 2}, 0, 12.8},
{TensorType_UINT8, {2, 1, 2}, -11, 11.8},
{TensorType_UINT8, {2, 1, 2}, 0, 7.4}},
2, 4,
{TensorType_UINT8, {2, 1, 2}, -12.7, 12.8});
m0.SetInput(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput(3, {1.3f, 3.3f, 4.3f, 7.3f});
ASSERT_EQ(m0.Invoke(), kTfLiteOk);
EXPECT_THAT(m0.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f,
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f,
})));
EXPECT_THAT(m0.GetOutput(), ElementsAreArray({
137, 157, 138, 158, 139, 159, 140, 160,
167, 197, 168, 198, 169, 199, 170, 200,
}));
}
class DequantizeOpModel : public SingleOpModelWithNNAPI {
public:
DequantizeOpModel(TensorType inputType, std::initializer_list<int> shape,
float min, float max) {
input_ = AddInput({inputType, shape, min, max});
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
TEST(NNAPIDelegate, DequantizeFourDimensionalUint8) {
DequantizeOpModel m(TensorType_UINT8, {2, 5}, -63.5, 64);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(NNAPIDelegate, DequantizeFourDimensionalInt8Symm) {
DequantizeOpModel m(TensorType_INT8, {2, 5}, -64, 63.5);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-64, -63.5, -63, -62.5, -62, 61.5, 62, 62.5, 63, 63.5})));
}
class FloorOpModel : public SingleOpModelWithNNAPI {
public:
FloorOpModel(std::initializer_list<int> input_shape, TensorType input_type) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_FLOOR, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({
input_shape,
});
}
int input() { return input_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int output_;
};
TEST(NNAPIDelegate, FloorSingleDim) {
FloorOpModel model({2}, TensorType_FLOAT32);
model.PopulateTensor<float>(model.input(), {8.5, 0.0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({8, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2}));
}
TEST(NNAPIDelegate, FloorMultiDims) {
FloorOpModel model({2, 1, 1, 5}, TensorType_FLOAT32);
model.PopulateTensor<float>(model.input(), {
0.0001,
8.0001,
0.9999,
9.9999,
0.5,
-0.0001,
-8.0001,
-0.9999,
-9.9999,
-0.5,
});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({0, 8, 0, 9, 0, -1, -9, -1, -10, -1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 1, 1, 5}));
}
class LocalResponseNormOpModel : public SingleOpModelWithNNAPI {
public:
LocalResponseNormOpModel(std::initializer_list<int> input_shape, int radius,
float bias, float alpha, float beta) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
BuiltinOptions_LocalResponseNormalizationOptions,
CreateLocalResponseNormalizationOptions(builder_, radius, bias,
alpha, beta)
.Union());
BuildInterpreterWithNNAPI({input_shape});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
TEST(NNAPIDelegate, LocalResponseNormSameAsL2Norm) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 0.0,
1.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05})));
}
TEST(NNAPIDelegate, LocalResponseNormWithAlpha) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 0.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{-0.275, 0.15, 0.175, 0.3, -0.175, 0.025})));
}
TEST(NNAPIDelegate, LocalResponseNormWithBias) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 9.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.22, 0.12, 0.14, 0.24, -0.14, 0.02})));
}
TEST(NNAPIDelegate, LocalResponseNormSmallRadius) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 2, 9.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-0.264926, 0.125109, 0.140112, 0.267261, -0.161788, 0.0244266})));
}
class LSHProjectionOpModel : public SingleOpModelWithNNAPI {
public:
LSHProjectionOpModel(LSHProjectionType type,
std::initializer_list<int> hash_shape,
std::initializer_list<int> input_shape,
std::initializer_list<int> weight_shape) {
hash_ = AddInput(TensorType_FLOAT32);
input_ = AddInput(TensorType_INT32);
if (weight_shape.size() > 0) {
weight_ = AddInput(TensorType_FLOAT32);
}
output_ = AddOutput(TensorType_INT32);
SetBuiltinOp(BuiltinOperator_LSH_PROJECTION,
BuiltinOptions_LSHProjectionOptions,
CreateLSHProjectionOptions(builder_, type).Union());
if (weight_shape.size() > 0) {
BuildInterpreterWithNNAPI({hash_shape, input_shape, weight_shape});
} else {
BuildInterpreterWithNNAPI({hash_shape, input_shape});
}
output_size_ = 1;
for (int i : hash_shape) {
output_size_ *= i;
if (type == LSHProjectionType_SPARSE) {
break;
}
}
}
void SetInput(std::initializer_list<int> data) {
PopulateTensor(input_, data);
}
void SetHash(std::initializer_list<float> data) {
PopulateTensor(hash_, data);
}
void SetWeight(std::initializer_list<float> f) { PopulateTensor(weight_, f); }
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
private:
int input_;
int hash_;
int weight_;
int output_;
int output_size_;
};
TEST(NNAPIDelegate, LSHProjectionDense1DInputs) {
LSHProjectionOpModel m(LSHProjectionType_DENSE, {3, 2}, {5}, {5});
m.SetInput({12345, 54321, 67890, 9876, -12345678});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
m.SetWeight({1.0, 1.0, 1.0, 1.0, 1.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 1, 1, 1, 0));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 0, 1, 0, 0));
#endif
}
TEST(NNAPIDelegate, LSHProjectionSparse1DInputs) {
LSHProjectionOpModel m(LSHProjectionType_SPARSE, {3, 2}, {5}, {});
m.SetInput({12345, 54321, 67890, 9876, -12345678});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 1, 8 + 0));
#endif
}
TEST(NNAPIDelegate, LSHProjectionSparse3DInputs) {
LSHProjectionOpModel m(LSHProjectionType_SPARSE, {3, 2}, {5, 2, 2}, {5});
m.SetInput({1234, 2345, 3456, 1234, 4567, 5678, 6789, 4567, 7891, 8912,
9123, 7890, -987, -876, -765, -987, -543, -432, -321, -543});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
m.SetWeight({0.12, 0.34, 0.56, 0.67, 0.78});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 2, 4 + 1, 8 + 1));
#endif
}
class BaseActivationsOpModel : public SingleOpModelWithNNAPI {
public:
BaseActivationsOpModel(BuiltinOperator type, const TensorData& input) {
input_ = AddInput(input);
if (input.type == TensorType_UINT8) {
output_ = AddOutput({input.type, {}, 0, 0, 1. / 256});
} else {
output_ = AddOutput({input.type, {}});
}
SetBuiltinOp(type, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({GetShape(input_)});
}
BaseActivationsOpModel(BuiltinOperator type, const TensorData& input,
const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(type, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({GetShape(input_)});
}
protected:
int input_;
int output_;
};
class FloatActivationsOpModel : public BaseActivationsOpModel {
public:
using BaseActivationsOpModel::BaseActivationsOpModel;
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
const float kQuantizedTolerance = 2 * (1. / 256);
class QuantizedActivationsOpModel : public BaseActivationsOpModel {
public:
using BaseActivationsOpModel::BaseActivationsOpModel;
template <typename T>
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
};
TEST(NNAPIDelegate, Relu) {
FloatActivationsOpModel m(BuiltinOperator_RELU,
{TensorType_FLOAT32, {1, 2, 4, 1}});
m.SetInput({
0, -6, 2, 4,
3, -2, 10, 1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 2, 4,
3, 0, 10, 1,
}));
}
TEST(NNAPIDelegate, Relu1) {
FloatActivationsOpModel m(BuiltinOperator_RELU_N1_TO_1,
{TensorType_FLOAT32, {1, 2, 4, 1}});
m.SetInput({
0.0, -0.6, 0.2, -0.4,
0.3, -2.0, 1.1, -0.1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0.0, -0.6, 0.2, -0.4,
0.3, -1.0, 1.0, -0.1,
}));
}
TEST(NNAPIDelegate, Relu6) {
FloatActivationsOpModel m(BuiltinOperator_RELU6,
{TensorType_FLOAT32, {1, 2, 4, 1}});
m.SetInput({
0, -6, 2, 4,
3, -2, 10, 1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 2, 4,
3, 0, 6, 1,
}));
}
TEST(NNAPIDelegate, LogisticFloat) {
FloatActivationsOpModel m(BuiltinOperator_LOGISTIC,
{TensorType_FLOAT32, {1, 2, 4, 1}});
m.SetInput({
0, -6, 2, 4,
3, -2, 10, 1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
0.5, 0.002473, 0.880797, 0.982014,
0.952574, 0.119203, 0.999955, 0.731059,
})));
}
TEST(NNAPIDelegate, LogisticQuantized) {
QuantizedActivationsOpModel m(
BuiltinOperator_LOGISTIC,
{TensorType_UINT8, {1, 2, 4, 1}, -10, 10});
m.SetInput<uint8_t>({
0, -6, 2, 4,
3, -2, 10, 1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
0.5, 0.002473, 0.880797, 0.982014,
0.952574, 0.119203, 0.999955, 0.731059,
},
kQuantizedTolerance)));
EXPECT_THAT(m.GetOutput<uint8_t>(),
testing::Pointwise(QuantizedNear(),
{128, 1, 227, 251, 244, 32, 255, 188}));
}
class ResizeBilinearOpModel : public SingleOpModelWithNNAPI {
public:
ResizeBilinearOpModel(const TensorData& input,
std::initializer_list<int> size_data) {
bool const_size = size_data.size() != 0;
input_ = AddInput(input);
if (const_size) {
size_ = AddConstInput(TensorType_INT32, size_data, {2});
} else {
size_ = AddInput({TensorType_INT32, {2}});
}
output_ = AddOutput(input.type);
SetBuiltinOp(BuiltinOperator_RESIZE_BILINEAR,
BuiltinOptions_ResizeBilinearOptions,
CreateResizeBilinearOptions(builder_).Union());
if (const_size) {
BuildInterpreterWithNNAPI({GetShape(input_)});
} else {
BuildInterpreterWithNNAPI({GetShape(input_), GetShape(size_)});
}
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
void SetSize(std::initializer_list<int> data) { PopulateTensor(size_, data); }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
private:
int input_;
int size_;
int output_;
};
TEST(ResizeBilinear, Horizontal) {
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 1, 2, 1}}, {});
m.SetInput<float>({3, 6});
m.SetSize({1, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({3, 5, 6})));
}
TEST(ResizeBilinear, HorizontalConstant) {
ResizeBilinearOpModel const_m({TensorType_FLOAT32, {1, 1, 2, 1}}, {1, 3});
const_m.SetInput<float>({3, 6});
ASSERT_EQ(const_m.Invoke(), kTfLiteOk);
EXPECT_THAT(const_m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({3, 5, 6})));
}
TEST(ResizeBilinear, Vertical) {
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 2, 1, 1}}, {});
m.SetInput<float>({3, 9});
m.SetSize({3, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({3, 7, 9})));
}
TEST(ResizeBilinear, VerticalConstant) {
ResizeBilinearOpModel const_m({TensorType_FLOAT32, {1, 2, 1, 1}}, {3, 1});
const_m.SetInput<float>({3, 9});
ASSERT_EQ(const_m.Invoke(), kTfLiteOk);
EXPECT_THAT(const_m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({3, 7, 9})));
}
TEST(ResizeBilinear, TwoDimensional) {
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}}, {});
m.SetInput<float>({
3, 6,
9, 12
});
m.SetSize({3, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({
3, 5, 6,
7, 9, 10,
9, 11, 12,
})));
}
TEST(ResizeBilinear, TwoDimensionalConstant) {
ResizeBilinearOpModel const_m({TensorType_FLOAT32, {1, 2, 2, 1}}, {3, 3});
const_m.SetInput<float>({
3, 6,
9, 12
});
ASSERT_EQ(const_m.Invoke(), kTfLiteOk);
EXPECT_THAT(const_m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({
3, 5, 6,
7, 9, 10,
9, 11, 12,
})));
}
template <typename T>
class PadOpModel : public SingleOpModelWithNNAPI {
public:
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
template <typename QuantizedInputOutput>
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<QuantizedInputOutput>(input_, data);
}
template <typename QuantizedInputOutput>
void SetQuantizedPadValue(float data) {
QuantizeAndPopulate<QuantizedInputOutput>(constant_values_, {data});
}
void SetPaddings(std::initializer_list<int> paddings) {
PopulateTensor<int>(paddings_, paddings);
}
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename QuantizedInputOutput>
std::vector<float> GetDequantizedOutput() {
return Dequantize<QuantizedInputOutput>(
ExtractVector<QuantizedInputOutput>(output_), GetScale(output_),
GetZeroPoint(output_));
}
protected:
int input_;
int output_;
int paddings_;
int constant_values_;
};
class PadOpConstModel : public PadOpModel<float> {
public:
PadOpConstModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
std::initializer_list<int> paddings,
const TensorData& output) {
input_ = AddInput(input);
paddings_ = AddConstInput(TensorType_INT32, paddings, paddings_shape);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_PAD, BuiltinOptions_PadOptions,
CreatePadOptions(builder_).Union());
BuildInterpreterWithNNAPI({input.shape});
}
};
TEST(NNAPIDelegate, PadAdvancedConstTest) {
PadOpConstModel m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2},
{0, 0, 0, 2, 1, 3, 0, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
class SpaceToBatchNDOpModel : public SingleOpModelWithNNAPI {
public:
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
void SetBlockShape(std::initializer_list<int> data) {
PopulateTensor<int>(block_shape_, data);
}
void SetPaddings(std::initializer_list<int> data) {
PopulateTensor<int>(paddings_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int block_shape_;
int paddings_;
int output_;
};
class SpaceToBatchNDOpConstModel : public SpaceToBatchNDOpModel {
public:
SpaceToBatchNDOpConstModel(std::initializer_list<int> input_shape,
std::initializer_list<int> block_shape,
std::initializer_list<int> paddings) {
input_ = AddInput(TensorType_FLOAT32);
block_shape_ = AddConstInput(TensorType_INT32, block_shape, {2});
paddings_ = AddConstInput(TensorType_INT32, paddings, {2, 2});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
BuiltinOptions_SpaceToBatchNDOptions,
CreateSpaceToBatchNDOptions(builder_).Union());
BuildInterpreterWithNNAPI({input_shape});
}
};
TEST(NNAPIDelegate, SpaceToBatchNDSimpleConstTest) {
SpaceToBatchNDOpConstModel m({1, 4, 4, 1}, {2, 2}, {0, 0, 0, 0});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(NNAPIDelegate, SpaceToBatchNDMultipleInputBatchesConstTest) {
SpaceToBatchNDOpConstModel m({2, 2, 4, 1}, {2, 2}, {0, 0, 0, 0});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(NNAPIDelegate, SpaceToBatchNDSimplePaddingConstTest) {
SpaceToBatchNDOpConstModel m({1, 5, 2, 1}, {3, 2}, {1, 0, 2, 0});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
}));
}
TEST(NNAPIDelegate, SpaceToBatchNDComplexPaddingConstTest) {
SpaceToBatchNDOpConstModel m({1, 4, 2, 1}, {3, 2}, {1, 1, 2, 4});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
}));
}
template <typename input_type = float,
TensorType tensor_input_type = TensorType_FLOAT32>
class StridedSliceOpModel : public SingleOpModelWithNNAPI {
public:
StridedSliceOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> begin_shape,
std::initializer_list<int> begin_data,
std::initializer_list<int> end_shape,
std::initializer_list<int> end_data,
std::initializer_list<int> strides_shape,
std::initializer_list<int> strides_data, int begin_mask,
int end_mask, int ellipsis_mask, int new_axis_mask,
int shrink_axis_mask) {
input_ = AddInput(tensor_input_type);
begin_ = AddConstInput(TensorType_INT32, begin_data, begin_shape);
end_ = AddConstInput(TensorType_INT32, end_data, end_shape);
strides_ = AddConstInput(TensorType_INT32, strides_data, strides_shape);
output_ = AddOutput(tensor_input_type);
SetBuiltinOp(
BuiltinOperator_STRIDED_SLICE, BuiltinOptions_StridedSliceOptions,
CreateStridedSliceOptions(builder_, begin_mask, end_mask, ellipsis_mask,
new_axis_mask, shrink_axis_mask)
.Union());
BuildInterpreterWithNNAPI(
{input_shape, begin_shape, end_shape, strides_shape});
}
void SetInput(std::initializer_list<input_type> data) {
PopulateTensor<input_type>(input_, data);
}
std::vector<input_type> GetOutput() {
return ExtractVector<input_type>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int begin_;
int end_;
int strides_;
int output_;
};
TEST(StridedSliceOpTest, In1D) {
StridedSliceOpModel<> m({4}, {1}, {1}, {1}, {3}, {1}, {1}, 0, 0, 0, 0, 0);
m.SetInput({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 3}));
}
TEST(StridedSliceOpTest, In1D_BeginMask) {
StridedSliceOpModel<> m({4}, {1}, {1}, {1}, {3}, {1}, {1}, 1, 0, 0, 0, 0);
m.SetInput({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3}));
}
TEST(StridedSliceOpTest, In2D_Stride2) {
StridedSliceOpModel<> m({2, 3}, {2}, {0, 0}, {2}, {2, 3}, {2}, {2, 2}, 0, 0,
0, 0, 0);
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3}));
}
TEST(StridedSliceOpTest, In2D_EndMask) {
StridedSliceOpModel<> m({2, 3}, {2}, {1, 0}, {2}, {2, 2}, {2}, {1, 1}, 0, 2,
0, 0, 0);
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4, 5, 6}));
}
TEST(StridedSliceOpTest, In3D_IdentityShrinkAxis4) {
StridedSliceOpModel<> m({2, 3, 2}, {3}, {0, 0, 0}, {3}, {2, 3, 1}, {3},
{1, 1, 1}, 0, 0, 0, 0, 4);
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 5, 7, 9, 11}));
}
static float rnn_input[] = {
0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133,
0.43773448, 0.60379338, 0.35562468, -0.69424844, -0.93421471,
-0.87287879, 0.37144363, -0.62476718, 0.23791671, 0.40060222,
0.1356622, -0.99774903, -0.98858172, -0.38952237, -0.47685933,
0.31073618, 0.71511042, -0.63767755, -0.31729108, 0.33468103,
0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
-0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007,
-0.61777675, -0.21095741, 0.41213346, 0.73784804, 0.094794154,
0.47791874, 0.86496925, -0.53376222, 0.85315156, 0.10288584,
0.86684, -0.011186242, 0.10513687, 0.87825835, 0.59929144,
0.62827742, 0.18899453, 0.31440187, 0.99059987, 0.87170351,
-0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567,
-0.66609079, 0.59098077, 0.73017097, 0.74604273, 0.32882881,
-0.17503482, 0.22396147, 0.19379807, 0.29120302, 0.077113032,
-0.70331609, 0.15804303, -0.93407321, 0.40182066, 0.036301374,
0.66521823, 0.0300982, -0.7747041, -0.02038002, 0.020698071,
-0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
-0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682,
0.43519354, 0.14744234, 0.62589407, 0.1653645, -0.10651493,
-0.045277178, 0.99032974, -0.88255352, -0.85147917, 0.28153265,
0.19455957, -0.55479527, -0.56042433, 0.26048636, 0.84702539,
0.47587705, -0.074295521, -0.12287641, 0.70117295, 0.90532446,
0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
-0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563,
0.93455386, -0.6324693, -0.083922029};
static float rnn_golden_output[] = {
0.496726, 0, 0.965996, 0, 0.0584254, 0,
0, 0.12315, 0, 0, 0.612266, 0.456601,
0, 0.52286, 1.16099, 0.0291232,
0, 0, 0.524901, 0, 0, 0,
0, 1.02116, 0, 1.35762, 0, 0.356909,
0.436415, 0.0355727, 0, 0,
0, 0, 0, 0.262335, 0, 0,
0, 1.33992, 0, 2.9739, 0, 0,
1.31914, 2.66147, 0, 0,
0.942568, 0, 0, 0, 0.025507, 0,
0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
0.8158, 1.21805, 0.586239, 0.25427,
1.04436, 0, 0.630725, 0, 0.133801, 0.210693,
0.363026, 0, 0.533426, 0, 1.25926, 0.722707,
0, 1.22031, 1.30117, 0.495867,
0.222187, 0, 0.72725, 0, 0.767003, 0,
0, 0.147835, 0, 0, 0, 0.608758,
0.469394, 0.00720298, 0.927537, 0,
0.856974, 0.424257, 0, 0, 0.937329, 0,
0, 0, 0.476425, 0, 0.566017, 0.418462,
0.141911, 0.996214, 1.13063, 0,
0.967899, 0, 0, 0, 0.0831304, 0,
0, 1.00378, 0, 0, 0, 1.44818,
1.01768, 0.943891, 0.502745, 0,
0.940135, 0, 0, 0, 0, 0,
0, 2.13243, 0, 0.71208, 0.123918, 1.53907,
1.30225, 1.59644, 0.70222, 0,
0.804329, 0, 0.430576, 0, 0.505872, 0.509603,
0.343448, 0, 0.107756, 0.614544, 1.44549, 1.52311,
0.0454298, 0.300267, 0.562784, 0.395095,
0.228154, 0, 0.675323, 0, 1.70536, 0.766217,
0, 0, 0, 0.735363, 0.0759267, 1.91017,
0.941888, 0, 0, 0,
0, 0, 1.5909, 0, 0, 0,
0, 0.5755, 0, 0.184687, 0, 1.56296,
0.625285, 0, 0, 0,
0, 0, 0.0857888, 0, 0, 0,
0, 0.488383, 0.252786, 0, 0, 0,
1.02817, 1.85665, 0, 0,
0.00981836, 0, 1.06371, 0, 0, 0,
0, 0, 0, 0.290445, 0.316406, 0,
0.304161, 1.25079, 0.0707152, 0,
0.986264, 0.309201, 0, 0, 0, 0,
0, 1.64896, 0.346248, 0, 0.918175, 0.78884,
0.524981, 1.92076, 2.07013, 0.333244,
0.415153, 0.210318, 0, 0, 0, 0,
0, 2.02616, 0, 0.728256, 0.84183, 0.0907453,
0.628881, 3.58099, 1.49974, 0};
static std::initializer_list<float> rnn_weights = {
0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346,
0.317493, 0.969689, -0.343251, 0.186423, 0.398151, 0.152399,
0.448504, 0.317662, 0.523556, -0.323514, 0.480877, 0.333113,
-0.757714, -0.674487, -0.643585, 0.217766, -0.0251462, 0.79512,
-0.595574, -0.422444, 0.371572, -0.452178, -0.556069, -0.482188,
-0.685456, -0.727851, 0.841829, 0.551535, -0.232336, 0.729158,
-0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183,
0.306261, -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303,
0.0354295, 0.566564, -0.485469, -0.620498, 0.832546, 0.697884,
-0.279115, 0.294415, -0.584313, 0.548772, 0.0648819, 0.968726,
0.723834, -0.0080452, -0.350386, -0.272803, 0.115121, -0.412644,
-0.824713, -0.992843, -0.592904, -0.417893, 0.863791, -0.423461,
-0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042,
0.0960841, 0.368357, 0.244191, -0.817703, -0.211223, 0.442012,
0.37225, -0.623598, -0.405423, 0.455101, 0.673656, -0.145345,
-0.511346, -0.901675, -0.81252, -0.127006, 0.809865, -0.721884,
0.636255, 0.868989, -0.347973, -0.10179, -0.777449, 0.917274,
0.819286, 0.206218, -0.00785118, 0.167141, 0.45872, 0.972934,
-0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
0.277308, 0.415818};
static std::initializer_list<float> rnn_recurrent_weights = {
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1};
static std::initializer_list<float> rnn_bias = {
0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
-0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
0.37197268, 0.61957061, 0.3956964, -0.37609905};
class RNNOpModel : public SingleOpModelWithNNAPI {
public:
RNNOpModel(int batches, int units, int size,
const TensorType weights = TensorType_FLOAT32,
const TensorType recurrent_weights = TensorType_FLOAT32)
: batches_(batches), units_(units), input_size_(size) {
input_ = AddInput(TensorType_FLOAT32);
weights_ = AddInput(weights);
recurrent_weights_ = AddInput(recurrent_weights);
bias_ = AddInput(TensorType_FLOAT32);
hidden_state_ = AddVariableInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(
BuiltinOperator_RNN, BuiltinOptions_RNNOptions,
CreateRNNOptions(builder_, ActivationFunctionType_RELU).Union());
BuildInterpreterWithNNAPI({
{batches_, input_size_},
{units_, input_size_},
{units_, units_},
{units_},
{batches_, units_}
});
}
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetWeights(std::initializer_list<float> f) {
PopulateTensor(weights_, f);
}
void SetRecurrentWeights(std::initializer_list<float> f) {
PopulateTensor(recurrent_weights_, f);
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_;
int recurrent_weights_;
int bias_;
int hidden_state_;
int output_;
int batches_;
int units_;
int input_size_;
};
TEST(NNAPIDelegate, RnnBlackBoxTest) {
RNNOpModel rnn(2, 16, 8);
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = sizeof(rnn_input) / sizeof(float) /
(rnn.input_size() * rnn.num_batches());
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(rnn.input_size(), batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output + i * rnn.num_units();
float* golden_end = golden_start + rnn.num_units();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
}
}
static float svdf_input[] = {
0.12609188, -0.46347019, -0.89598465,
0.35867718, 0.36897406, 0.73463392,
0.14278367, -1.64410412, -0.75222826,
-0.57290924, 0.12729003, 0.7567004,
0.49837467, 0.19278903, 0.26584083,
0.17660543, 0.52949083, -0.77931279,
-0.11186574, 0.13164264, -0.05349274,
-0.72674477, -0.5683046, 0.55900657,
-0.68892461, 0.37783599, 0.18263303,
-0.63690937, 0.44483393, -0.71817774,
-0.81299269, -0.86831826, 1.43940818,
-0.95760226, 1.82078898, 0.71135032,
-1.45006323, -0.82251364, -1.69082689,
-1.65087092, -1.89238167, 1.54172635,
0.03966608, -0.24936394, -0.77526885,
2.06740379, -1.51439476, 1.43768692,
0.11771342, -0.23761693, -0.65898693,
0.31088525, -1.55601168, -0.87661445,
-0.89477462, 1.67204106, -0.53235275,
-0.6230064, 0.29819036, 1.06939757,
};
static float svdf_golden_output_rank_1[] = {
0.014899, -0.0517661, -0.143725, -0.00271883,
-0.03004015, 0.09565311, 0.1587342, 0.00784263,
0.068281, -0.162217, -0.152268, 0.00323521,
0.01582633, 0.03858774, -0.03001583, -0.02671271,
-0.0317821, -0.0333089, 0.0609602, 0.0333759,
-0.01432795, 0.05524484, 0.1101355, -0.02382665,
-0.00623099, -0.077701, -0.391193, -0.0136691,
-0.02333033, 0.02293761, 0.12338032, 0.04326871,
0.201551, -0.164607, -0.179462, -0.0592739,
0.01064911, -0.17503069, 0.07821996, -0.00224009,
0.0886511, -0.0875401, -0.269283, 0.0281379,
-0.02282338, 0.09741908, 0.32973239, 0.12281385,
-0.201174, -0.586145, -0.628624, -0.0330412,
0.24780814, -0.39304617, -0.22473189, 0.02589256,
-0.0839096, -0.299329, 0.108746, 0.109808,
0.10084175, -0.06416984, 0.28936723, 0.0026358,
0.419114, -0.237824, -0.422627, 0.175115,
-0.2314795, -0.18584411, -0.4228974, -0.12928449,
0.36726, -0.522303, -0.456502, -0.175475,
0.17012937, -0.34447709, 0.38505614, -0.28158101,
};
static float svdf_golden_output_rank_2[] = {
-0.09623547, -0.10193135, 0.11083051, -0.0347917,
0.1141196, 0.12965347, -0.12652366, 0.01007236,
-0.16396809, -0.21247184, 0.11259045, -0.04156673,
0.10132131, -0.06143532, -0.00924693, 0.10084561,
0.01257364, 0.0506071, -0.19287863, -0.07162561,
-0.02033747, 0.22673416, 0.15487903, 0.02525555,
-0.1411963, -0.37054959, 0.01774767, 0.05867489,
0.09607603, -0.0141301, -0.08995658, 0.12867066,
-0.27142537, -0.16955489, 0.18521598, -0.12528358,
0.00331409, 0.11167502, 0.02218599, -0.07309391,
0.09593632, -0.28361851, -0.0773851, 0.17199151,
-0.00075242, 0.33691186, -0.1536046, 0.16572715,
-0.27916506, -0.27626723, 0.42615682, 0.3225764,
-0.37472126, -0.55655634, -0.05013514, 0.289112,
-0.24418658, 0.07540751, -0.1940318, -0.08911639,
0.00732617, 0.46737891, 0.26449674, 0.24888524,
-0.17225097, -0.54660404, -0.38795233, 0.08389944,
0.07736043, -0.28260678, 0.15666828, 1.14949894,
-0.57454878, -0.64704704, 0.73235172, -0.34616736,
0.21120001, -0.22927976, 0.02455296, -0.35906726,
};
class BaseSVDFOpModel : public SingleOpModelWithNNAPI {
public:
BaseSVDFOpModel(int batches, int units, int input_size, int memory_size,
int rank,
TensorType weights_feature_type = TensorType_FLOAT32,
TensorType weights_time_type = TensorType_FLOAT32)
: batches_(batches),
units_(units),
input_size_(input_size),
memory_size_(memory_size),
rank_(rank) {
input_ = AddInput(TensorType_FLOAT32);
weights_feature_ = AddInput(weights_feature_type);
weights_time_ = AddInput(weights_time_type);
bias_ = AddInput(TensorType_FLOAT32);
const int num_filters = units * rank;
activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(
BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
CreateSVDFOptions(builder_, rank, ActivationFunctionType_NONE).Union());
BuildInterpreterWithNNAPI({
{batches_, input_size_},
{units_ * rank, input_size_},
{units_ * rank, memory_size_},
{units_},
{batches, memory_size * num_filters}
});
PopulateTensor(bias_, std::vector<float>(units_));
}
void SetWeightsFeature(std::initializer_list<float> f) {
PopulateTensor(weights_feature_, f);
}
void SetWeightsTime(std::initializer_list<float> f) {
PopulateTensor(weights_time_, f);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_feature_;
int weights_time_;
int bias_;
int activation_state_;
int output_;
int batches_;
int units_;
int input_size_;
int memory_size_;
int rank_;
};
class SVDFOpModel : public BaseSVDFOpModel {
public:
using BaseSVDFOpModel::BaseSVDFOpModel;
};
class SVDFOpTest : public ::testing::Test {
protected:
void VerifyGoldens(float golden_input[], float golden_output[],
int golden_size, BaseSVDFOpModel* svdf,
float tolerance = 1e-5) {
const int svdf_num_batches = svdf->num_batches();
const int svdf_input_size = svdf->input_size();
const int svdf_num_units = svdf->num_units();
const int input_sequence_size =
golden_size / sizeof(float) / (svdf_input_size * svdf_num_batches);
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start =
golden_input + i * svdf_input_size * svdf_num_batches;
float* batch_end = batch_start + svdf_input_size * svdf_num_batches;
svdf->SetInput(0, batch_start, batch_end);
ASSERT_EQ(svdf->Invoke(), kTfLiteOk);
const float* golden_start =
golden_output + i * svdf_num_units * svdf_num_batches;
const float* golden_end =
golden_start + svdf_num_units * svdf_num_batches;
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(svdf->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
}
};
TEST_F(SVDFOpTest, BlackBoxTestRank1) {
SVDFOpModel svdf(2, 4, 3,
10, 1);
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
&svdf);
}
TEST_F(SVDFOpTest, BlackBoxTestRank2) {
SVDFOpModel svdf(2, 4, 3,
10, 2);
svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
0.12416199, 0.15785322, 0.27901134, 0.3905206,
0.21931258, -0.36137494, -0.10640851, 0.31053296,
-0.36118156, -0.0976817, -0.36916667, 0.22197971,
0.15294972, 0.38031587, 0.27557442, 0.39635518,
-0.21580373, -0.06634006, -0.02702999, 0.27072677});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657,
-0.14884081, 0.19931212, -0.36002168, 0.34663299, -0.11405486,
0.12672701, 0.39463779, -0.07886535, -0.06384811, 0.08249187,
-0.26816407, -0.19905911, 0.29211238, 0.31264046, -0.28664589,
0.05698794, 0.11613581, 0.14078894, 0.02187902, -0.21781836,
-0.15567942, 0.08693647, -0.38256618, 0.36580828, -0.22922277,
-0.0226903, 0.12878349, -0.28122205, -0.10850525, -0.11955214,
0.27179423, -0.04710215, 0.31069002, 0.22672787, 0.09580326,
0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
&svdf);
}
class LSTMOpModel : public SingleOpModelWithNNAPI {
public:
LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg,
bool use_peephole, bool use_projection_weights,
bool use_projection_bias, float cell_clip, float proj_clip,
const std::vector<std::vector<int>>& input_shapes,
const TensorType weight_type)
: n_batch_(n_batch),
n_input_(n_input),
n_cell_(n_cell),
n_output_(n_output),
weight_type_(weight_type) {
input_ = AddInput(TensorType_FLOAT32);
if (use_cifg) {
input_to_input_weights_ = AddNullInput();
} else {
input_to_input_weights_ = AddInput(weight_type);
}
input_to_forget_weights_ = AddInput(weight_type);
input_to_cell_weights_ = AddInput(weight_type);
input_to_output_weights_ = AddInput(weight_type);
if (use_cifg) {
recurrent_to_input_weights_ = AddNullInput();
} else {
recurrent_to_input_weights_ = AddInput(weight_type);
}
recurrent_to_forget_weights_ = AddInput(weight_type);
recurrent_to_cell_weights_ = AddInput(weight_type);
recurrent_to_output_weights_ = AddInput(weight_type);
if (use_peephole) {
if (use_cifg) {
cell_to_input_weights_ = AddNullInput();
} else {
cell_to_input_weights_ = AddInput(weight_type);
}
cell_to_forget_weights_ = AddInput(weight_type);
cell_to_output_weights_ = AddInput(weight_type);
} else {
cell_to_input_weights_ = AddNullInput();
cell_to_forget_weights_ = AddNullInput();
cell_to_output_weights_ = AddNullInput();
}
if (use_cifg) {
input_gate_bias_ = AddNullInput();
} else {
input_gate_bias_ = AddInput(TensorType_FLOAT32);
}
forget_gate_bias_ = AddInput(TensorType_FLOAT32);
cell_bias_ = AddInput(TensorType_FLOAT32);
output_gate_bias_ = AddInput(TensorType_FLOAT32);
if (use_projection_weights) {
projection_weights_ = AddInput(weight_type);
if (use_projection_bias) {
projection_bias_ = AddInput(TensorType_FLOAT32);
} else {
projection_bias_ = AddNullInput();
}
} else {
projection_weights_ = AddNullInput();
projection_bias_ = AddNullInput();
}
input_activation_state_ = AddVariableInput(TensorType_FLOAT32);
input_cell_state_ = AddVariableInput(TensorType_FLOAT32);
const bool use_layer_norm = input_shapes.size() > 20;
if (use_layer_norm) {
const int kInputLayerNormCoeffsIndex = 20;
const int kForgetLayerNormCoeffsIndex = 21;
const int kCellLayerNormCoeffsIndex = 22;
const int kOutputLayerNormCoeffsIndex = 23;
if (use_cifg) {
input_layer_norm_coefficients_ = AddNullInput();
} else {
input_layer_norm_coefficients_ =
AddLayerNormCoeffsTensor(kInputLayerNormCoeffsIndex, input_shapes);
}
forget_layer_norm_coefficients_ =
AddLayerNormCoeffsTensor(kForgetLayerNormCoeffsIndex, input_shapes);
cell_layer_norm_coefficients_ =
AddLayerNormCoeffsTensor(kCellLayerNormCoeffsIndex, input_shapes);
output_layer_norm_coefficients_ =
AddLayerNormCoeffsTensor(kOutputLayerNormCoeffsIndex, input_shapes);
}
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_LSTM, BuiltinOptions_LSTMOptions,
CreateLSTMOptions(builder_, ActivationFunctionType_TANH,
cell_clip, proj_clip)
.Union());
BuildInterpreterWithNNAPI(input_shapes);
}
void SetInputToInputWeights(const std::vector<float>& f) {
SetData(input_to_input_weights_, weight_type_, f);
}
void SetInputToForgetWeights(const std::vector<float>& f) {
SetData(input_to_forget_weights_, weight_type_, f);
}
void SetInputToCellWeights(const std::vector<float>& f) {
SetData(input_to_cell_weights_, weight_type_, f);
}
void SetInputToOutputWeights(const std::vector<float>& f) {
SetData(input_to_output_weights_, weight_type_, f);
}
void SetRecurrentToInputWeights(const std::vector<float>& f) {
SetData(recurrent_to_input_weights_, weight_type_, f);
}
void SetRecurrentToForgetWeights(const std::vector<float>& f) {
SetData(recurrent_to_forget_weights_, weight_type_, f);
}
void SetRecurrentToCellWeights(const std::vector<float>& f) {
SetData(recurrent_to_cell_weights_, weight_type_, f);
}
void SetRecurrentToOutputWeights(const std::vector<float>& f) {
SetData(recurrent_to_output_weights_, weight_type_, f);
}
void SetCellToInputWeights(const std::vector<float>& f) {
SetData(cell_to_input_weights_, weight_type_, f);
}
void SetCellToForgetWeights(const std::vector<float>& f) {
SetData(cell_to_forget_weights_, weight_type_, f);
}
void SetCellToOutputWeights(const std::vector<float>& f) {
SetData(cell_to_output_weights_, weight_type_, f);
}
void SetInputGateBias(const std::vector<float>& f) {
PopulateTensor(input_gate_bias_, f);
}
void SetForgetGateBias(const std::vector<float>& f) {
PopulateTensor(forget_gate_bias_, f);
}
void SetCellBias(const std::vector<float>& f) {
PopulateTensor(cell_bias_, f);
}
void SetOutputGateBias(const std::vector<float>& f) {
PopulateTensor(output_gate_bias_, f);
}
void SetProjectionWeights(const std::vector<float>& f) {
SetData(projection_weights_, weight_type_, f);
}
void SetProjectionBias(const std::vector<float>& f) {
PopulateTensor(projection_bias_, f);
}
void SetInputLayerNormCoefficients(const std::vector<float>& f) {
PopulateTensor(input_layer_norm_coefficients_, f);
}
void SetForgetLayerNormCoefficients(const std::vector<float>& f) {
PopulateTensor(forget_layer_norm_coefficients_, f);
}
void SetCellLayerNormCoefficients(const std::vector<float>& f) {
PopulateTensor(cell_layer_norm_coefficients_, f);
}
void SetOutputLayerNormCoefficients(const std::vector<float>& f) {
PopulateTensor(output_layer_norm_coefficients_, f);
}
void SetInput(int offset, const float* begin, const float* end) {
PopulateTensor(input_, offset, const_cast<float*>(begin),
const_cast<float*>(end));
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int num_inputs() { return n_input_; }
int num_outputs() { return n_output_; }
int num_cells() { return n_cell_; }
int num_batches() { return n_batch_; }
protected:
int input_;
int input_to_input_weights_;
int input_to_forget_weights_;
int input_to_cell_weights_;
int input_to_output_weights_;
int recurrent_to_input_weights_;
int recurrent_to_forget_weights_;
int recurrent_to_cell_weights_;
int recurrent_to_output_weights_;
int cell_to_input_weights_;
int cell_to_forget_weights_;
int cell_to_output_weights_;
int input_gate_bias_;
int forget_gate_bias_;
int cell_bias_;
int output_gate_bias_;
int projection_weights_;
int projection_bias_;
int input_activation_state_;
int input_cell_state_;
int input_layer_norm_coefficients_;
int forget_layer_norm_coefficients_;
int cell_layer_norm_coefficients_;
int output_layer_norm_coefficients_;
int output_;
int output_state_;
int cell_state_;
int n_batch_;
int n_input_;
int n_cell_;
int n_output_;
private:
const TensorType weight_type_;
int AddLayerNormCoeffsTensor(
int tensor_index, const std::vector<std::vector<int>>& input_shapes) {
if (input_shapes[tensor_index][0] != 0) {
return AddInput(TensorType_FLOAT32);
} else {
return AddNullInput();
}
}
};
class BaseLstmTest : public ::testing::Test {
protected:
std::vector<float> input_to_input_weights_;
std::vector<float> input_to_cell_weights_;
std::vector<float> input_to_forget_weights_;
std::vector<float> input_to_output_weights_;
std::vector<float> input_gate_bias_;
std::vector<float> cell_gate_bias_;
std::vector<float> forget_gate_bias_;
std::vector<float> output_gate_bias_;
std::vector<float> recurrent_to_input_weights_;
std::vector<float> recurrent_to_cell_weights_;
std::vector<float> recurrent_to_forget_weights_;
std::vector<float> recurrent_to_output_weights_;
std::vector<float> cell_to_input_weights_;
std::vector<float> cell_to_forget_weights_;
std::vector<float> cell_to_output_weights_;
std::vector<float> projection_weights_;
std::vector<float> input_layer_norm_coefficients_;
std::vector<float> forget_layer_norm_coefficients_;
std::vector<float> cell_layer_norm_coefficients_;
std::vector<float> output_layer_norm_coefficients_;
std::vector<std::vector<float>> lstm_input_;
std::vector<std::vector<float>> lstm_golden_output_;
void VerifyGoldens(const std::vector<std::vector<float>>& input,
const std::vector<std::vector<float>>& output,
LSTMOpModel* lstm, float tolerance = 1e-5) {
const int num_batches = input.size();
EXPECT_GT(num_batches, 0);
const int num_inputs = lstm->num_inputs();
EXPECT_GT(num_inputs, 0);
const int input_sequence_size = input[0].size() / num_inputs;
EXPECT_GT(input_sequence_size, 0);
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* batch_start = input[b].data() + i * num_inputs;
const float* batch_end = batch_start + num_inputs;
lstm->SetInput(b * lstm->num_inputs(), batch_start, batch_end);
}
ASSERT_EQ(lstm->Invoke(), kTfLiteOk);
const int num_outputs = lstm->num_outputs();
std::vector<float> expected;
for (int b = 0; b < num_batches; ++b) {
const float* golden_start_batch = output[b].data() + i * num_outputs;
const float* golden_end_batch = golden_start_batch + num_outputs;
expected.insert(expected.end(), golden_start_batch, golden_end_batch);
}
EXPECT_THAT(lstm->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
}
};
class NoCifgNoPeepholeNoProjectionNoClippingLstmTest : public BaseLstmTest {
void SetUp() override {
input_to_input_weights_ = {-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524};
input_to_cell_weights_ = {-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113, -0.29909778};
input_to_forget_weights_ = {0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212};
input_to_output_weights_ = {-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077,
-0.1556896, 0.19487578};
input_gate_bias_ = {0., 0., 0., 0.};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_input_weights_ = {
-0.0063535, -0.2042388, 0.31454784, -0.35746509,
0.28902304, 0.08183324, -0.16555229, 0.02286911,
-0.13566875, 0.03034258, 0.48091322, -0.12528998,
0.24077177, -0.51332325, -0.33502164, 0.10629296};
recurrent_to_cell_weights_ = {
-0.3407414, 0.24443203, -0.2078532, 0.26320225,
0.05695659, -0.00123841, -0.4744786, -0.35869038,
-0.06418842, -0.13502428, -0.501764, 0.22830659,
-0.46367589, 0.26016325, -0.03894562, -0.16368064};
recurrent_to_forget_weights_ = {
-0.48684245, -0.06655136, 0.42224967, 0.2112639,
0.27654213, 0.20864892, -0.07646349, 0.45877004,
0.00141793, -0.14609534, 0.36447752, 0.09196436,
0.28053468, 0.01560611, -0.20127171, -0.01140004};
recurrent_to_output_weights_ = {
0.43385774, -0.17194885, 0.2718237, 0.09215671,
0.24107647, -0.39835793, 0.18212086, 0.01301402,
0.48572797, -0.50656658, 0.20047462, -0.20607421,
-0.51818722, -0.15390486, 0.0468148, 0.39922136};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.02973187, 0.1229473, 0.20885126, -0.15358765,
-0.03716109, 0.12507336, 0.41193449, -0.20860538,
-0.15053082, 0.09120187, 0.24278517, -0.12222792}};
}
};
TEST_F(NoCifgNoPeepholeNoProjectionNoClippingLstmTest, LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
LSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
false, false,
false,
false,
0.0, 0.0,
{
{n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_FLOAT32);
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class NoCifgNoPeepholeNoProjectionNoClippingOmittedLayerNormLstmTest
: public NoCifgNoPeepholeNoProjectionNoClippingLstmTest {};
TEST_F(NoCifgNoPeepholeNoProjectionNoClippingOmittedLayerNormLstmTest,
LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
LSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
false, false,
false,
false,
0.0, 0.0,
{
{n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{0},
{0},
{0},
{0},
},
TensorType_FLOAT32);
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class CifgNoPeepholeNoProjectionNoClippingLstmTest : public BaseLstmTest {
void SetUp() override {
input_to_cell_weights_ = {-0.49770179, -0.27711356, -0.09624726,
0.05100781, 0.04717243, 0.48944736,
-0.38535351, -0.17212132};
input_to_forget_weights_ = {-0.55291498, -0.42866567, 0.13056988,
-0.3633365, -0.22755712, 0.28253698,
0.24407166, 0.33826375};
input_to_output_weights_ = {0.10725588, -0.02335852, -0.55932593,
-0.09426838, -0.44257352, 0.54939759,
0.01533556, 0.42751634};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_cell_weights_ = {
0.54066205, -0.32668582, -0.43562764, -0.56094903,
0.42957711, 0.01841056, -0.32764608, -0.33027974,
-0.10826075, 0.20675004, 0.19069612, -0.03026325,
-0.54532051, 0.33003211, 0.44901288, 0.21193194};
recurrent_to_forget_weights_ = {
-0.13832897, -0.0515101, -0.2359007, -0.16661474,
-0.14340827, 0.36986142, 0.23414481, 0.55899,
0.10798943, -0.41174671, 0.17751795, -0.34484994,
-0.35874045, -0.11352962, 0.27268326, 0.54058349};
recurrent_to_output_weights_ = {
0.41613156, 0.42610586, -0.16495961, -0.5663873,
0.30579174, -0.05115908, -0.33941799, 0.23364776,
0.11178309, 0.09481031, -0.26424935, 0.46261835,
0.50248802, 0.26114327, -0.43736315, 0.33149987};
cell_to_forget_weights_ = {0.47485286, -0.51955009, -0.24458408,
0.31544167};
cell_to_output_weights_ = {-0.17135078, 0.82760304, 0.85573703,
-0.77109635};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.36444446, -0.00352185, 0.12886585, -0.05163646,
-0.42312205, -0.01218222, 0.24201041, -0.08124574,
-0.358325, -0.04621704, 0.21641694, -0.06471302}};
}
};
TEST_F(CifgNoPeepholeNoProjectionNoClippingLstmTest, LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
LSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
true, true,
false,
false,
0.0, 0.0,
{
{n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_FLOAT32);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class NoCifgPeepholeProjectionClippingLstmTest : public BaseLstmTest {
void SetUp() override {
input_to_input_weights_ = {
0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677};
input_to_forget_weights_ = {
-0.0018401089, -0.004852237, 0.03698424, 0.014181704,
0.028273236, -0.016726194, -0.05249759, -0.10204261,
0.00861066, -0.040979505, -0.009899187, 0.01923892,
-0.028177269, -0.08535103, -0.14585495, 0.10662567,
-0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395,
0.0814421, -0.12257899, -0.033945758, -0.031303465,
0.045630626, 0.06843887, -0.13492945, -0.012480007,
-0.0811829, -0.07224499, -0.09628791, 0.045100946,
0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997,
0.052625068, 0.12784666, 0.07077897, 0.025725935,
0.04165009, 0.07241905, 0.018668644, -0.037377294,
-0.06277783, -0.08833636, -0.040120605, -0.011405586,
-0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839,
0.13396506, -0.08402166, -0.01901462, -0.044678304,
-0.07720565, 0.014350063, -0.11757958, -0.0652038,
-0.08185733, -0.076754324, -0.092614375, 0.10405491,
0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447,
-0.054523353, 0.02582715, 0.02327355, -0.011857179,
-0.0011980024, -0.034641717, -0.026125094, -0.17582615,
-0.15923657, -0.27486774, -0.0006143371, 0.0001771948,
-8.470171e-05, 0.02651807, 0.045790765, 0.06956496};
input_to_cell_weights_ = {
-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042};
input_to_output_weights_ = {
-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956};
input_gate_bias_ = {0.02234832, 0.14757581, 0.18176508, 0.10380666,
0.053110216, -0.06928846, -0.13942584, -0.11816189,
0.19483899, 0.03652339, -0.10250295, 0.036714908,
-0.18426876, 0.036065217, 0.21810818, 0.02383196,
-0.043370757, 0.08690144, -0.04444982, 0.00030581196};
forget_gate_bias_ = {0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739};
cell_gate_bias_ = {-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027};
output_gate_bias_ = {0.046159424, -0.0012809046, 0.03563469, 0.12648113,
0.027195795, 0.35373217, -0.018957434, 0.008907322,
-0.0762701, 0.12018895, 0.04216877, 0.0022856654,
0.040952638, 0.3147856, 0.08225149, -0.057416286,
-0.14995944, -0.008040261, 0.13208859, 0.029760877};
recurrent_to_input_weights_ = {
-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447};
recurrent_to_cell_weights_ = {
-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404};
recurrent_to_forget_weights_ = {
-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027};
recurrent_to_output_weights_ = {
0.025825322, -0.05813119, 0.09495884, -0.045984812,
-0.01255415, -0.0026479573, -0.08196161, -0.054914974,
-0.0046604523, -0.029587349, -0.044576716, -0.07480124,
-0.082868785, 0.023254942, 0.027502948, -0.0039728214,
-0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307,
-0.08829125, -0.005139627, -0.08989442, -0.0555066,
0.13596267, -0.025062224, -0.048351806, -0.03850004,
0.07266485, -0.022414139, 0.05940088, 0.075114764,
0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916,
0.014416728, 0.043229222, 0.034178585, -0.07530371,
0.035837382, -0.085607, -0.007721233, -0.03287832,
-0.043848954, -0.06404588, -0.06632928, -0.073643476,
0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091,
-0.030063879, 0.008801774, -0.023021035, -0.019558564,
0.05158114, -0.010947698, -0.011825728, 0.0075720972,
0.0699727, -0.0039981045, 0.069350146, 0.08799282,
0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699,
-0.00924166, 0.0046702605, -0.036598757, -0.08811812,
0.10522024, -0.032441203, 0.008176899, -0.04454919,
0.07058152, 0.0067963637, 0.039206743, 0.03259838,
0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724,
0.036879618, 0.043357447, 0.028362012, -0.05908629,
0.0059240665, -0.04995891, -0.019187413, 0.0276265,
-0.01628143, 0.0025863599, 0.08800015, 0.035250366,
-0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454,
-0.009660886, 0.019076364, 0.018299393, -0.046004917,
0.08891175, 0.0431396, -0.026327137, -0.051502608,
0.08979574, -0.051670972, 0.04940282, -0.07491107,
-0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988,
-0.035936575, -0.011681591, 0.064818054, 0.0073146066,
-0.021745546, -0.043124277, -0.06471268, -0.07053354,
-0.029321948, -0.05330136, 0.016933719, -0.053782392,
0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434,
-0.07924483, 0.06936997, 0.0034815092, -0.007305279,
-0.037325785, -0.07251102, -0.033633437, -0.08677009,
0.091591336, -0.14165086, 0.021752775, 0.019683983,
0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828,
0.1183656, -0.0010731248, -0.023590032, -0.072285876,
-0.0724771, -0.026382286, -0.0014920527, 0.042667855,
0.0018776858, 0.02986552, 0.009814309, 0.0733756,
0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798,
-0.010036754, 0.02576849, -0.08307328, 0.010112348,
0.042521734, -0.05869831, -0.071689695, 0.03876447,
-0.13275425, -0.0352966, -0.023077697, 0.10285965,
0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767,
-0.08271222, -0.0030240538, -0.016368777, 0.1070414,
0.042672627, 0.013456989, -0.0437609, -0.022309763,
0.11576483, 0.04108048, 0.061026827, -0.0190714,
-0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893,
-0.023771819, -0.01965048, 0.007955471, -0.043740474,
0.03346837, -0.10549954, 0.090567775, 0.042013682,
-0.03176985, 0.12569028, -0.02421228, -0.029526481,
0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367,
-0.06861939, -0.021256343, -0.041093912, -0.06669611,
0.035498552, 0.021757556, -0.09302526, -0.015403468,
-0.06614931, -0.051798206, -0.013874718, 0.03630673,
0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424,
-0.020674974, -0.03944324, -0.008110165, -0.11113267,
0.08484226, 0.043586485, 0.040582247, 0.0968012,
-0.065249965, -0.028036479, 0.0050708856, 0.0017462453,
0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844,
-0.11768019, 0.085926116, -0.08251791, -0.045081906,
0.0948852, 0.068401024, 0.024856757, 0.06978981,
-0.057309967, -0.012775832, -0.0032452994, 0.01977615,
-0.041040014, -0.024264973, 0.063464895, 0.05431621,
};
cell_to_input_weights_ = {
0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175};
cell_to_forget_weights_ = {
-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355};
cell_to_output_weights_ = {
0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733};
projection_weights_ = {
-0.009802181, 0.09401916, 0.0717386, -0.13895074,
0.09641832, 0.060420845, 0.08539281, 0.054285463,
0.061395317, 0.034448683, -0.042991187, 0.019801661,
-0.16840284, -0.015726732, -0.23041931, -0.024478018,
-0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914,
0.16169067, 0.22465782, -0.03993472, -0.004017731,
0.08633481, -0.28869787, 0.08682067, 0.17240396,
0.014975425, 0.056431185, 0.031037588, 0.16702051,
0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434,
0.014777949, -0.20203483, 0.094781205, 0.19100232,
0.13987629, -0.036132768, -0.06426278, -0.05108664,
0.13221376, 0.009441198, -0.16715929, 0.15859416,
-0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123,
0.0046453946, 0.050794356, 0.10770313, -0.20790008,
-0.07149004, -0.11425117, 0.008225835, -0.035802525,
0.14374903, 0.15262283, 0.048710253, 0.1847461,
-0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216,
0.016261552, 0.022461696, 0.12689082, -0.043589946,
-0.12035478, -0.08361797, -0.050666027, -0.1248618,
-0.1275799, -0.071875185, 0.07377272, 0.09944291,
-0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444,
0.041546922, -0.20424393, 0.06907816, 0.050412357,
0.00724631, 0.039827548, 0.12449835, 0.10747581,
0.13708383, 0.09134148, -0.12617786, -0.06428341,
0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063,
0.022913318, -0.042050496, 0.16842307, -0.060597885,
0.10531834, -0.06411776, -0.07451711, -0.03410368,
-0.13393489, 0.06534304, 0.003620307, 0.04490757,
0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227,
-0.024575593, -0.036445823, 0.07155557, 0.009672501,
-0.02328883, 0.009533515, -0.03606021, -0.07421458,
-0.028082801, -0.2678904, -0.13221288, 0.18419984,
-0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874,
0.14494097, -0.12522776, -0.098633975, -0.10766018,
-0.08317623, 0.08594209, 0.07749552, 0.039474737,
0.1776665, -0.07409566, -0.0477268, 0.29323658,
0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189,
0.10034707, 0.045594677, 0.0635285, -0.0715442,
-0.089667566, -0.10811871, 0.00026344223, 0.08298446,
-0.009525053, 0.006585689, -0.24567553, -0.09450807,
0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363,
0.067035615, 0.19271925, -0.0032889997, -0.043264326,
0.09663576, -0.057112187, -0.10100678, 0.0628376,
0.04447668, 0.017961001, -0.10094388, -0.10190601,
0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151,
-0.1947263, 0.02251204, 0.11216432, -0.10307853,
0.17351969, -0.039091777, 0.08066188, -0.00561982,
0.12633002, 0.11335965, -0.0088127935, -0.019777594,
0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895,
-0.07468996, -0.0855457, 0.099339016, -0.07580735,
-0.13775392, 0.08434318, 0.08330512, -0.12131499,
0.031935584, 0.09180414, -0.08876437, -0.08049874,
0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513,
0.04331237, 0.04299654, -0.036394123, -0.12915532,
0.09793732, 0.07512415, -0.11319543, -0.032502122,
0.15661901, 0.07671967, -0.005491124, -0.19379048,
-0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725,
-0.09334311, 0.15026465, -0.15493552, -0.057762887,
-0.11604192, -0.262013, -0.01391798, 0.012185008,
0.11156489, -0.07483202, 0.06693364, -0.26151478,
0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075,
0.030635102, 0.010969227, 0.11109743, 0.010919218,
0.027526086, 0.13519906, 0.01891392, -0.046839405,
-0.040167913, 0.017953383, -0.09700955, 0.0061885654,
-0.07000971, 0.026893595, -0.038844477, 0.14543656};
lstm_input_ = {
{
0.787926, 0.151646, 0.071352, 0.118426, 0.458058,
0.596268, 0.998386, 0.568695, 0.864524, 0.571277,
0.073204, 0.296072, 0.743333, 0.069199, 0.045348,
0.867394, 0.291279, 0.013714, 0.482521, 0.626339},
{
0.295743, 0.544053, 0.690064, 0.858138, 0.497181,
0.642421, 0.524260, 0.134799, 0.003639, 0.162482,
0.640394, 0.930399, 0.050782, 0.432485, 0.988078,
0.082922, 0.563329, 0.865614, 0.333232, 0.259916}
};
lstm_golden_output_ = {
{
-0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576,
-0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004,
-0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147,
0.0134203, -0.0166936, 0.0381209, 0.000889694, 0.0143363,
-0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322,
-0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308,
0.0155969, 0.0312091, -0.0213783, 0.0350169, 0.000324794,
0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474,
0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827,
0.0136115, 0.0243435, 0.0354492, -0.0189322, 0.0464512,
-0.00251373, 0.0225745, -0.0308346, -0.0317124, 0.0460407,
-0.0189395, 0.0149363, -0.0530162, -0.0150767, -0.0340193,
0.0286833, 0.00824207, 0.0264887, 0.0305169},
{
-0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926,
-0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232,
0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954,
0.02168, -0.0141913, 0.0322082, 0.00227024, 0.0260507,
-0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039,
-0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233,
0.0214762, 0.0293641, -0.0204549, 0.0450315, -0.00117378,
0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034,
0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789,
0.00790065, 0.0220157, 0.0333314, -0.0264787, 0.0387855,
-0.000764675, 0.0217599, -0.037537, -0.0335206, 0.0431679,
-0.0211424, 0.010203, -0.062785, -0.00832363, -0.025181,
0.0412031, 0.0118723, 0.0239643, 0.0394009}};
}
};
TEST_F(NoCifgPeepholeProjectionClippingLstmTest, LstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
LSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
false, true,
true,
false,
0.0, 0.0,
{
{n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_FLOAT32);
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class NoCifgPeepholeProjectionNoClippingLayerNormLstmTest
: public BaseLstmTest {
void SetUp() override {
input_to_input_weights_ = {0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2,
0.3, -0.4, 0.5, -0.8, 0.7, -0.6, 0.5,
-0.4, -0.5, -0.4, -0.3, -0.2, -0.1};
input_to_forget_weights_ = {-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2,
-0.4, 0.3, -0.8, -0.4, 0.3, -0.5, -0.4,
-0.6, 0.3, -0.4, -0.6, -0.5, -0.5};
input_to_cell_weights_ = {-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2,
-0.3, -0.2, -0.6, 0.6, -0.1, -0.4, -0.3,
-0.7, 0.7, -0.9, -0.5, 0.8, 0.6};
input_to_output_weights_ = {-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3,
-0.3, -0.8, -0.2, 0.6, -0.2, 0.4, -0.7,
-0.3, -0.5, 0.1, 0.5, -0.6, -0.4};
input_gate_bias_ = {0.03, 0.15, 0.22, 0.38};
forget_gate_bias_ = {0.1, -0.3, -0.2, 0.1};
cell_gate_bias_ = {-0.05, 0.72, 0.25, 0.08};
output_gate_bias_ = {0.05, -0.01, 0.2, 0.1};
recurrent_to_input_weights_ = {-0.2, -0.3, 0.4, 0.1, -0.5, 0.9,
-0.2, -0.3, -0.7, 0.05, -0.2, -0.6};
recurrent_to_cell_weights_ = {-0.3, 0.2, 0.1, -0.3, 0.8, -0.08,
-0.2, 0.3, 0.8, -0.6, -0.1, 0.2};
recurrent_to_forget_weights_ = {-0.5, -0.3, -0.5, -0.2, 0.6, 0.4,
0.9, 0.3, -0.1, 0.2, 0.5, 0.2};
recurrent_to_output_weights_ = {0.3, -0.1, 0.1, -0.2, -0.5, -0.7,
-0.2, -0.6, -0.1, -0.4, -0.7, -0.2};
cell_to_input_weights_ = {0.05, 0.1, 0.25, 0.15};
cell_to_forget_weights_ = {-0.02, -0.15, -0.25, -0.03};
cell_to_output_weights_ = {0.1, -0.1, -0.5, 0.05};
input_layer_norm_coefficients_ = {0.1, 0.2, 0.3, 0.5};
forget_layer_norm_coefficients_ = {0.2, 0.2, 0.4, 0.3};
cell_layer_norm_coefficients_ = {0.7, 0.2, 0.3, 0.8};
output_layer_norm_coefficients_ = {0.6, 0.2, 0.2, 0.5};
projection_weights_ = {-0.1, 0.2, 0.01, -0.2, 0.1, 0.5,
0.3, 0.08, 0.07, 0.2, -0.4, 0.2};
lstm_input_ = {
{
0.7, 0.8, 0.1, 0.2, 0.3,
0.8, 0.1, 0.2, 0.4, 0.5,
0.2, 0.7, 0.7, 0.1, 0.7},
{
0.3, 0.2, 0.9, 0.8, 0.1,
0.1, 0.5, 0.2, 0.4, 0.2,
0.6, 0.9, 0.2, 0.5, 0.7},
};
}
};
TEST_F(NoCifgPeepholeProjectionNoClippingLayerNormLstmTest,
LayerNormLstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 4;
const int n_output = 3;
const float ceil_clip = 0.0;
const float proj_clip = 0.0;
LSTMOpModel layer_norm_lstm(
n_batch, n_input, n_cell, n_output,
false, true,
true,
false, ceil_clip, proj_clip,
{
{n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
},
TensorType_FLOAT32);
layer_norm_lstm.SetInputToInputWeights(input_to_input_weights_);
layer_norm_lstm.SetInputToCellWeights(input_to_cell_weights_);
layer_norm_lstm.SetInputToForgetWeights(input_to_forget_weights_);
layer_norm_lstm.SetInputToOutputWeights(input_to_output_weights_);
layer_norm_lstm.SetInputGateBias(input_gate_bias_);
layer_norm_lstm.SetCellBias(cell_gate_bias_);
layer_norm_lstm.SetForgetGateBias(forget_gate_bias_);
layer_norm_lstm.SetOutputGateBias(output_gate_bias_);
layer_norm_lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
layer_norm_lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
layer_norm_lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
layer_norm_lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
layer_norm_lstm.SetCellToInputWeights(cell_to_input_weights_);
layer_norm_lstm.SetCellToForgetWeights(cell_to_forget_weights_);
layer_norm_lstm.SetCellToOutputWeights(cell_to_output_weights_);
layer_norm_lstm.SetInputLayerNormCoefficients(input_layer_norm_coefficients_);
layer_norm_lstm.SetForgetLayerNormCoefficients(
forget_layer_norm_coefficients_);
layer_norm_lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients_);
layer_norm_lstm.SetOutputLayerNormCoefficients(
output_layer_norm_coefficients_);
layer_norm_lstm.SetProjectionWeights(projection_weights_);
const std::vector<std::vector<float>> layer_norm_lstm_golden_output = {
{
0.0244077, 0.128027, -0.00170918,
0.0137642, 0.140751, 0.0395835,
-0.00459231, 0.155278, 0.0837377,
},
{
-0.00692428, 0.0848741, 0.063445,
-0.00403912, 0.139963, 0.072681,
0.00752706, 0.161903, 0.0561371,
}};
VerifyGoldens(lstm_input_, layer_norm_lstm_golden_output, &layer_norm_lstm);
}
class CifgPeepholeProjectionNoClippingLayerNormLstmTest : public BaseLstmTest {
void SetUp() override {
input_to_forget_weights_ = {-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2,
-0.4, 0.3, -0.8, -0.4, 0.3, -0.5, -0.4,
-0.6, 0.3, -0.4, -0.6, -0.5, -0.5};
input_to_cell_weights_ = {-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2,
-0.3, -0.2, -0.6, 0.6, -0.1, -0.4, -0.3,
-0.7, 0.7, -0.9, -0.5, 0.8, 0.6};
input_to_output_weights_ = {-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3,
-0.3, -0.8, -0.2, 0.6, -0.2, 0.4, -0.7,
-0.3, -0.5, 0.1, 0.5, -0.6, -0.4};
forget_gate_bias_ = {0.1, -0.3, -0.2, 0.1};
cell_gate_bias_ = {-0.05, 0.72, 0.25, 0.08};
output_gate_bias_ = {0.05, -0.01, 0.2, 0.1};
recurrent_to_cell_weights_ = {-0.3, 0.2, 0.1, -0.3, 0.8, -0.08,
-0.2, 0.3, 0.8, -0.6, -0.1, 0.2};
recurrent_to_forget_weights_ = {-0.5, -0.3, -0.5, -0.2, 0.6, 0.4,
0.9, 0.3, -0.1, 0.2, 0.5, 0.2};
recurrent_to_output_weights_ = {0.3, -0.1, 0.1, -0.2, -0.5, -0.7,
-0.2, -0.6, -0.1, -0.4, -0.7, -0.2};
cell_to_forget_weights_ = {-0.02, -0.15, -0.25, -0.03};
cell_to_output_weights_ = {0.1, -0.1, -0.5, 0.05};
forget_layer_norm_coefficients_ = {0.2, 0.2, 0.4, 0.3};
cell_layer_norm_coefficients_ = {0.7, 0.2, 0.3, 0.8};
output_layer_norm_coefficients_ = {0.6, 0.2, 0.2, 0.5};
projection_weights_ = {-0.1, 0.2, 0.01, -0.2, 0.1, 0.5,
0.3, 0.08, 0.07, 0.2, -0.4, 0.2};
lstm_input_ = {
{
0.7, 0.8, 0.1, 0.2, 0.3,
0.8, 0.1, 0.2, 0.4, 0.5,
0.2, 0.7, 0.7, 0.1, 0.7},
{
0.3, 0.2, 0.9, 0.8, 0.1,
0.1, 0.5, 0.2, 0.4, 0.2,
0.6, 0.9, 0.2, 0.5, 0.7},
};
}
};
TEST_F(CifgPeepholeProjectionNoClippingLayerNormLstmTest,
LayerNormLstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 4;
const int n_output = 3;
const float ceil_clip = 0.0;
const float proj_clip = 0.0;
LSTMOpModel layer_norm_lstm(
n_batch, n_input, n_cell, n_output,
true, true,
true,
false, ceil_clip, proj_clip,
{
{n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
},
TensorType_FLOAT32);
layer_norm_lstm.SetInputToCellWeights(input_to_cell_weights_);
layer_norm_lstm.SetInputToForgetWeights(input_to_forget_weights_);
layer_norm_lstm.SetInputToOutputWeights(input_to_output_weights_);
layer_norm_lstm.SetCellBias(cell_gate_bias_);
layer_norm_lstm.SetForgetGateBias(forget_gate_bias_);
layer_norm_lstm.SetOutputGateBias(output_gate_bias_);
layer_norm_lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
layer_norm_lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
layer_norm_lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
layer_norm_lstm.SetCellToForgetWeights(cell_to_forget_weights_);
layer_norm_lstm.SetCellToOutputWeights(cell_to_output_weights_);
layer_norm_lstm.SetForgetLayerNormCoefficients(
forget_layer_norm_coefficients_);
layer_norm_lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients_);
layer_norm_lstm.SetOutputLayerNormCoefficients(
output_layer_norm_coefficients_);
layer_norm_lstm.SetProjectionWeights(projection_weights_);
const std::vector<std::vector<float>> layer_norm_lstm_golden_output = {
{
0.02129706, 0.140816242, 0.0112733059,
0.0132302344, 0.152308047, 0.0346313119,
-0.0123688057, 0.165790111, 0.0893077999,
},
{
-0.0226350538, 0.0916948169, 0.0769175813,
-0.0269966982, 0.149707705, 0.094149217,
-0.0103429332, 0.173016444, 0.0720508844,
}};
VerifyGoldens(lstm_input_, layer_norm_lstm_golden_output, &layer_norm_lstm);
}
class BaseReduceOpModel : public SingleOpModelWithNNAPI {
public:
void SetAxis(const std::vector<int>& data) { PopulateTensor(axis_, data); }
template <class T>
void SetInput(const std::vector<T>& data) {
PopulateTensor(input_, data);
}
template <class T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
int Input() { return input_; }
protected:
int input_;
int axis_;
int output_;
};
class MeanOpDynamicModel : public BaseReduceOpModel {
public:
MeanOpDynamicModel(const TensorData& input, const TensorData& output,
const TensorData& axis, bool keep_dims) {
input_ = AddInput(input);
axis_ = AddInput(axis);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_MEAN, BuiltinOptions_ReducerOptions,
CreateReducerOptions(builder_, keep_dims).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
};
TEST(DynamicFloatMeanOpTest, NotKeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
false);
std::vector<int> axis = {1, 0, -3, -3};
m.SetAxis(axis);
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({12, 13})));
}
class MeanOpConstModel : public BaseReduceOpModel {
public:
MeanOpConstModel(const TensorData& input, const TensorData& output,
std::initializer_list<int> axis_shape,
std::initializer_list<int> axis, bool keep_dims) {
input_ = AddInput(input);
axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_MEAN, BuiltinOptions_ReducerOptions,
CreateReducerOptions(builder_, keep_dims).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
};
TEST(NNAPIDelegate, MeanFloatNotKeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
{4}, {1, 0, -3, -3}, false);
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({12, 13})));
}
TEST(NNAPIDelegate, MeanFloatKeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
{2}, {0, 2}, true);
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({10.5, 12.5, 14.5})));
}
class BaseEmbeddingLookupOpModel : public SingleOpModelWithNNAPI {
public:
BaseEmbeddingLookupOpModel(std::initializer_list<int> index_shape,
std::initializer_list<int> weight_shape,
TensorType weight_type = TensorType_FLOAT32) {
input_ = AddInput(TensorType_INT32);
weight_ = AddInput(weight_type);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_EMBEDDING_LOOKUP, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({index_shape, weight_shape});
}
void SetInput(std::initializer_list<int> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int weight_;
int output_;
};
class EmbeddingLookupOpModel : public BaseEmbeddingLookupOpModel {
public:
using BaseEmbeddingLookupOpModel::BaseEmbeddingLookupOpModel;
void Set3DWeightMatrix(const std::function<float(int, int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(weight_);
int rows = tensor->dims->data[0];
int columns = tensor->dims->data[1];
int features = tensor->dims->data[2];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < columns; j++) {
for (int k = 0; k < features; k++) {
tensor->data.f[(i * columns + j) * features + k] = function(i, j, k);
}
}
}
}
};
TEST(NNAPIDelegate, EmbeddingLookupSimpleTest) {
EmbeddingLookupOpModel m({3}, {3, 2, 4});
m.SetInput({1, 0, 2});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
})));
}
class HashtableLookupOpModel : public SingleOpModelWithNNAPI {
public:
HashtableLookupOpModel(std::initializer_list<int> lookup_shape,
std::initializer_list<int> key_shape,
std::initializer_list<int> value_shape,
TensorType type) {
lookup_ = AddInput(TensorType_INT32);
key_ = AddInput(TensorType_INT32);
value_ = AddInput(type);
output_ = AddOutput(type);
hit_ = AddOutput(TensorType_UINT8);
SetBuiltinOp(BuiltinOperator_HASHTABLE_LOOKUP, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({lookup_shape, key_shape, value_shape});
}
void SetLookup(std::initializer_list<int> data) {
PopulateTensor<int>(lookup_, data);
}
void SetHashtableKey(std::initializer_list<int> data) {
PopulateTensor<int>(key_, data);
}
void SetHashtableValue(const std::vector<string>& content) {
PopulateStringTensor(value_, content);
}
void SetHashtableValue(const std::function<float(int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
for (int i = 0; i < rows; i++) {
tensor->data.f[i] = function(i);
}
}
void SetHashtableValue(const std::function<float(int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
int features = tensor->dims->data[1];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < features; j++) {
tensor->data.f[i * features + j] = function(i, j);
}
}
}
std::vector<string> GetStringOutput() {
TfLiteTensor* output = interpreter_->tensor(output_);
int num = GetStringCount(output);
std::vector<string> result(num);
for (int i = 0; i < num; i++) {
auto ref = GetString(output, i);
result[i] = string(ref.str, ref.len);
}
return result;
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<uint8_t> GetHit() { return ExtractVector<uint8_t>(hit_); }
private:
int lookup_;
int key_;
int value_;
int output_;
int hit_;
};
TEST(NNAPIDelegate, HashtableLookupTest2DInput) {
HashtableLookupOpModel m({4}, {3}, {3, 2}, TensorType_FLOAT32);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue([](int i, int j) { return i + j / 10.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
2.0, 2.1,
0, 0,
0.0, 0.1,
1.0, 1.1,
})));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
TEST(NNAPIDelegate, HashtableLookupTest1DInput) {
HashtableLookupOpModel m({4}, {3}, {3}, TensorType_FLOAT32);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue([](int i) { return i * i / 10.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
0.4,
0,
0.0,
0.1,
})));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
class PReluOpModel : public SingleOpModelWithNNAPI {
public:
PReluOpModel(const TensorData& input, const TensorData& alpha)
: input_type_(input.type) {
input_ = AddInput(input);
alpha_ = AddInput(alpha);
output_ = AddOutput({input.type, input.shape, input.min, input.max});
SetBuiltinOp(BuiltinOperator_PRELU, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({GetShape(input_), GetShape(alpha_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
void SetAlpha(std::initializer_list<float> data) {
SetData(alpha_, input_type_, data);
}
std::vector<float> GetOutput() {
std::vector<float> output;
GetData(output_, input_type_, &output);
return output;
}
protected:
int input_;
int alpha_;
int output_;
const TensorType input_type_;
};
TEST(NNAPIDelegate, PReluFloat) {
PReluOpModel m({TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_FLOAT32, {1, 1, 3}});
m.SetInput({
0.0f, 0.0f, 0.0f,
1.0f, 1.0f, 1.0f,
-1.0f, -1.0f, -1.0f,
-2.0f, -2.0f, -2.0f,
});
m.SetAlpha({0.0f, 1.0f, 2.0f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0.0f, 0.0f, 0.0f,
1.0f, 1.0f, 1.0f,
0.0f, -1.0f, -2.0f,
0.0f, -2.0f, -4.0f,
}));
}
TEST(NNAPIDelegate, PReluQuantized) {
const float kMin = -1;
const float kMax = 127.f / 128.f;
PReluOpModel m({TensorType_UINT8, {1, 2, 2, 3}, kMin, kMax},
{TensorType_UINT8, {1, 1, 3}, kMin, kMax});
m.SetInput({
0.0f, 0.0f, 0.0f,
0.5f, 0.5f, 0.5f,
-1.0f, -1.0f, -1.0f,
-0.25f, -0.25f, -0.25f,
});
m.SetAlpha({0.0f, 0.5f, -0.5f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
0.0f, 0.0f, 0.0f,
0.5f, 0.5f, 0.5f,
0.0f, -0.5f, 0.5f,
0.0f, -0.125f, 0.125f,
},
kQuantizedTolerance)));
}
template <typename T1>
class PadV2OpConstModel : public PadOpModel<T1> {
public:
PadV2OpConstModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
std::initializer_list<int> paddings, T1 constant_values,
const TensorData& output) {
this->input_ = this->AddInput(input);
this->paddings_ =
this->AddConstInput(TensorType_INT32, paddings, paddings_shape);
this->constant_values_ =
this->AddConstInput(GetTensorType<T1>(), {constant_values}, {1});
this->output_ = this->AddOutput(output);
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
CreatePadV2Options(this->builder_).Union());
this->BuildInterpreterWithNNAPI({input.shape});
}
PadV2OpConstModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
std::initializer_list<int> paddings,
const TensorData& constant_values,
const TensorData& output) {
this->input_ = this->AddInput(input);
this->paddings_ =
this->AddConstInput(TensorType_INT32, paddings, paddings_shape);
this->constant_values_ = this->AddInput(constant_values);
this->output_ = this->AddOutput(output);
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
CreatePadV2Options(this->builder_).Union());
this->BuildInterpreterWithNNAPI({input.shape});
}
};
template <typename RegularInputOutput>
class PadV2OpDynamicModel : public PadOpModel<RegularInputOutput> {
public:
PadV2OpDynamicModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
RegularInputOutput constant_values,
const TensorData& output) {
this->input_ = this->AddInput(input);
this->paddings_ = this->AddInput(TensorType_INT32);
this->constant_values_ = this->AddConstInput(
GetTensorType<RegularInputOutput>(), {constant_values}, {1});
this->output_ = this->AddOutput(output);
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
CreatePadV2Options(this->builder_).Union());
this->BuildInterpreterWithNNAPI({input.shape, paddings_shape});
}
PadV2OpDynamicModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
const TensorData& constant_values,
const TensorData& output) {
this->input_ = this->AddInput(input);
this->paddings_ = this->AddInput(TensorType_INT32);
this->constant_values_ = this->AddInput(constant_values);
this->output_ = this->AddOutput(output);
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
CreatePadV2Options(this->builder_).Union());
this->BuildInterpreterWithNNAPI({input.shape, paddings_shape});
}
};
TEST(PadV2OpTest, SimpleConstTest) {
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2},
{0, 0, 1, 1, 1, 1, 0, 0}, 0.0,
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(PadV2OpTest, SimpleConstFloat32ValuedTestUint8) {
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2},
{0, 0, 1, 1, 1, 1, 0, 0}, 5, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 1, 2, 5, 5, 3, 4,
5, 5, 5, 5, 5}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(PadV2OpTest, Simple4DConstFloat32ValuedTest) {
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 1, 2, 1}}, {4, 2},
{0, 1, 0, 0, 0, 0, 0, 1}, 5, {TensorType_FLOAT32});
m.SetInput({3, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 5, 3, 5, 5, 5, 5, 5}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 1, 2, 2}));
}
TEST(PadV2OpTest, SimpleDynamicTest) {
PadV2OpDynamicModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2}, 0.0,
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4});
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(PadV2OpTest, SimpleDynamicValuedTest) {
PadV2OpDynamicModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2}, 5,
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4});
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 1, 2, 5, 5, 3, 4,
5, 5, 5, 5, 5}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(PadV2OpTest, AdvancedConstTest) {
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2},
{0, 0, 0, 2, 1, 3, 0, 0}, 0, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(PadV2OpTest, AdvancedDynamicTest) {
PadV2OpDynamicModel<float> m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2}, 0,
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6});
m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
std::vector<testing::Matcher<float>> DequantizedArrayNear(
const std::vector<float>& values, const float min, const float max) {
const float quantization_tolerance = (max - min) / 255.0;
return ArrayFloatNear(values, quantization_tolerance);
}
template <typename integer_type, TensorType tensor_dtype>
void SimpleConstTestV2() {
PadV2OpConstModel<integer_type> m(
{tensor_dtype, {1, 2, 2, 1}, -1.0, 1.0}, {4, 2}, {0, 0, 1, 1, 1, 1, 0, 0},
{tensor_dtype, {1}, -1.0, 1.0}, {tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7});
m.template SetQuantizedPadValue<integer_type>(0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{0, 0, 0, 0, 0, -0.8, 0.2, 0, 0, 0.9, 0.7, 0, 0, 0, 0, 0},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8SimpleConstTest) {
SimpleConstTestV2<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8SimpleConstTest) {
SimpleConstTestV2<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void SimpleDynamicTestV2() {
PadV2OpDynamicModel<integer_type> m({tensor_dtype, {1, 2, 2, 1}, -1.0, 1.0},
{4, 2}, {tensor_dtype, {1}, -1.0, 1.0},
{tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7});
m.template SetQuantizedPadValue<integer_type>(0);
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{0, 0, 0, 0, 0, -0.8, 0.2, 0, 0, 0.9, 0.7, 0, 0, 0, 0, 0},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8SimpleDynamicTest) {
SimpleDynamicTestV2<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8SimpleDynamicTest) {
SimpleDynamicTestV2<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void AdvancedConstTestV2() {
PadV2OpConstModel<integer_type> m(
{tensor_dtype, {1, 2, 3, 1}, -1.0, 1.0}, {4, 2}, {0, 0, 0, 2, 1, 3, 0, 0},
{tensor_dtype, {1}, -1.0, 1.0}, {tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
m.template SetQuantizedPadValue<integer_type>(0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8AdvancedConstTest) {
AdvancedConstTestV2<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8AdvancedConstTest) {
AdvancedConstTestV2<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void AdvancedDynamicTestV2() {
PadV2OpDynamicModel<integer_type> m({tensor_dtype, {1, 2, 3, 1}, -1.0, 1.0},
{4, 2}, {tensor_dtype, {1}, -1.0, 1.0},
{tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
m.template SetQuantizedPadValue<integer_type>(0);
m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8AdvancedDynamicTest) {
AdvancedDynamicTestV2<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8AdvancedDynamicTest) {
AdvancedDynamicTestV2<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void SimpleConstValuedTest() {
PadV2OpConstModel<integer_type> m(
{tensor_dtype, {1, 2, 2, 1}, -1.0, 1.0}, {4, 2}, {0, 0, 1, 1, 1, 1, 0, 0},
{tensor_dtype, {1}, -1.0, 1.0}, {tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7});
m.template SetQuantizedPadValue<integer_type>(-0.5);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{-0.5, -0.5, -0.5, -0.5, -0.5, -0.8, 0.2, -0.5, -0.5, 0.9,
0.7, -0.5, -0.5, -0.5, -0.5, -0.5},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8SimpleConstValuedTest) {
SimpleConstValuedTest<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8SimpleConstValuedTest) {
SimpleConstValuedTest<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void SimpleDynamicValuedTest() {
PadV2OpDynamicModel<integer_type> m({tensor_dtype, {1, 2, 2, 1}, -1.0, 1.0},
{4, 2}, {tensor_dtype, {1}, -1.0, 1.0},
{tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7});
m.template SetQuantizedPadValue<integer_type>(-0.5);
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{-0.5, -0.5, -0.5, -0.5, -0.5, -0.8, 0.2, -0.5, -0.5, 0.9,
0.7, -0.5, -0.5, -0.5, -0.5, -0.5},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8SimpleDynamicValuedTest) {
SimpleDynamicValuedTest<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8SimpleDynamicValuedTest) {
SimpleDynamicValuedTest<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void AdvancedConstValuedTest() {
PadV2OpConstModel<integer_type> m(
{tensor_dtype, {1, 2, 3, 1}, -1.0, 1.0}, {4, 2}, {0, 0, 0, 2, 1, 3, 0, 0},
{tensor_dtype, {1}, -1.0, 1.0}, {tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
m.template SetQuantizedPadValue<integer_type>(-0.5);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{-0.5, -0.8, 0.2, 0.9, -0.5, -0.5, -0.5, -0.5, 0.7, 0.1,
-0.3, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8AdvancedConstValuedTest) {
AdvancedConstValuedTest<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8AdvancedConstValuedTest) {
AdvancedConstValuedTest<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void AdvancedDynamicValuedTest() {
PadV2OpDynamicModel<integer_type> m({tensor_dtype, {1, 2, 3, 1}, -1.0, 1.0},
{4, 2}, {tensor_dtype, {1}, -1.0, 1.0},
{tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
m.template SetQuantizedPadValue<integer_type>(-0.5);
m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{-0.5, -0.8, 0.2, 0.9, -0.5, -0.5, -0.5, -0.5, 0.7, 0.1,
-0.3, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8AdvancedDynamicValuedTest) {
AdvancedDynamicValuedTest<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8AdvancedDynamicValuedTest) {
AdvancedDynamicValuedTest<int8_t, TensorType_INT8>();
}
class LeakyReluOpModel : public SingleOpModelWithNNAPI {
public:
LeakyReluOpModel(const TensorData& input, const float alpha)
: input_type_(input.type) {
input_ = AddInput(input);
output_ = AddOutput({input.type, input.shape, input.min, input.max});
SetBuiltinOp(BuiltinOperator_LEAKY_RELU, BuiltinOptions_LeakyReluOptions,
CreateLeakyReluOptions(builder_, alpha).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
std::vector<float> GetOutput() {
std::vector<float> output;
GetData(output_, input_type_, &output);
return output;
}
protected:
int input_;
int output_;
const TensorType input_type_;
};
TEST(NNAPIDelegate, LeakyReluFloat) {
LeakyReluOpModel m({TensorType_FLOAT32, {2, 3}}, 0.5f);
m.SetInput({
0.0f, 1.0f, 3.0f,
1.0f, -1.0f, -2.0f,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0.0f, 1.0f, 3.0f,
1.0f, -0.5f, -1.0f,
}));
}
TEST(NNAPIDelegate, LeakyReluQuantized) {
const float kMin = -1;
const float kMax = 127.f / 128.f;
LeakyReluOpModel m({TensorType_UINT8, {2, 3}, 8 * kMin, 8 * kMax}, 0.5f);
m.SetInput({
0.0f, 1.0f, 3.0f,
1.0f, -1.0f, -2.0f,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
0.0f, 1.0f, 3.0f,
1.0f, -0.5f, -1.0f,
},
kQuantizedTolerance)));
}
}
namespace ops {
namespace builtin {
TfLiteRegistration* Register_FLOOR();
}
}
namespace {
std::vector<uint32_t> GetNNAPIDimensions(const TfLiteTensor* tensor) {
std::vector<uint32_t> dimensions;
dimensions.reserve(tensor->dims->size);
if (tensor->dims_signature != nullptr &&
tensor->dims_signature->size == tensor->dims->size) {
for (auto d : TfLiteIntArrayView(tensor->dims_signature)) {
uint32_t nnapi_dim = (d == -1) ? 0 : static_cast<uint32_t>(d);
dimensions.push_back(nnapi_dim);
}
} else {
dimensions.assign(tensor->dims->data,
tensor->dims->data + tensor->dims->size);
}
return dimensions;
}
static const char kTestCustomOp[] = "nnapi-custom-op";
class NnapiTestVendorPlugin : public NnapiDelegateVendorPlugin {
public:
NnapiTestVendorPlugin() {
ValidateNode = DoValidateNode;
MapNode = DoMapNode;
ConfigureCompilationHints = DoConfigureCompilationHints;
ConfigureExecutionHints = DoConfigureExecutionHints;
}
static bool DoValidateNode(const TfLiteContext* context,
const TfLiteRegistration* registration,
const TfLiteNode* node) {
if (strcmp(kTestCustomOp, registration->custom_name) != 0) {
return false;
}
if (node->inputs->size != 1 || node->outputs->size != 1) {
return false;
}
if (context->tensors[node->inputs->data[(0)]].type != kTfLiteFloat32 ||
context->tensors[node->outputs->data[(0)]].type != kTfLiteFloat32) {
return false;
}
return true;
}
static TfLiteStatus AddFloat32Tensor(const TfLiteContext* context,
int tensor_index,
NnapiMappingUtilCInterface* mapping,
std::vector<uint32_t>* indices,
ANeuralNetworksModel* model) {
int ann_tensor_index = mapping->TfLiteIndexToNnIndex(mapping, tensor_index);
if (ann_tensor_index != -1) {
indices->push_back(ann_tensor_index);
return kTfLiteOk;
}
ann_tensor_index = mapping->AddNewNnTensorIndex(mapping, tensor_index);
TfLiteTensor* tensor = &context->tensors[tensor_index];
auto dimensions = GetNNAPIDimensions(tensor);
ANeuralNetworksOperandType operand_type{
.type = ANEURALNETWORKS_TENSOR_FLOAT32,
.dimensionCount = static_cast<uint32_t>(dimensions.size()),
.dimensions = dimensions.data(),
.scale = 0.0f,
.zeroPoint = 0,
};
EXPECT_EQ(NnApiImplementation()->ANeuralNetworksModel_addOperand(
model, &operand_type),
ANEURALNETWORKS_NO_ERROR);
if (tensor->allocation_type == kTfLiteMmapRo) {
EXPECT_EQ(NnApiImplementation()->ANeuralNetworksModel_setOperandValue(
model, ann_tensor_index, tensor->data.data, tensor->bytes),
ANEURALNETWORKS_NO_ERROR);
}
indices->push_back(ann_tensor_index);
return kTfLiteOk;
}
static TfLiteStatus DoMapNode(TfLiteContext* context, const TfLiteNode* node,
int node_index,
NnapiMappingUtilCInterface* mapping,
ANeuralNetworksModel* model) {
std::vector<uint32_t> input_indices;
std::vector<uint32_t> output_indices;
for (int input_pos = 0; input_pos < node->inputs->size; ++input_pos) {
const auto input_index = node->inputs->data[input_pos];
EXPECT_EQ(AddFloat32Tensor(context, input_index, mapping, &input_indices,
model),
kTfLiteOk);
}
for (int output_pos = 0; output_pos < node->outputs->size; ++output_pos) {
const auto output_index = node->outputs->data[output_pos];
EXPECT_EQ(AddFloat32Tensor(context, output_index, mapping,
&output_indices, model),
kTfLiteOk);
}
EXPECT_EQ(
NnApiImplementation()->ANeuralNetworksModel_addOperation(
model, ANEURALNETWORKS_FLOOR,
static_cast<uint32_t>(input_indices.size()), input_indices.data(),
static_cast<uint32_t>(output_indices.size()),
output_indices.data()),
ANEURALNETWORKS_NO_ERROR);
mapping->AddNnapiToTfliteOpMapping(mapping, node_index);
return kTfLiteOk;
}
static TfLiteStatus DoConfigureCompilationHints(
const char* compilation_hints, ANeuralNetworksCompilation* compilation) {
return kTfLiteOk;
}
static TfLiteStatus DoConfigureExecutionHints(
const char* execution_hints, ANeuralNetworksExecution* execution) {
return kTfLiteOk;
}
};
class CustomFloorOpModel : public SingleOpModelWithNNAPI {
public:
CustomFloorOpModel(const StatefulNnApiDelegate::Options& options,
const TensorData& input, const TensorData& output,
bool allow_fp32_relax_to_fp16 = false,
bool apply_delegate = true)
: SingleOpModelWithNNAPI(options) {
Init(input, output, allow_fp32_relax_to_fp16, apply_delegate);
}
int input() { return input_; }
int output() { return output_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int output_;
private:
void Init(const TensorData& input, const TensorData& output,
bool allow_fp32_relax_to_fp16 = false, bool apply_delegate = true) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetCustomOp(kTestCustomOp, {}, tflite::ops::builtin::Register_FLOOR);
BuildInterpreterWithNNAPI({GetShape(input_)}, allow_fp32_relax_to_fp16,
apply_delegate);
}
};
TEST(NNAPIDelegate, CustomFloorVendorExtension) {
auto vendor_plugin = std::make_unique<NnapiTestVendorPlugin>();
StatefulNnApiDelegate::Options options;
options.accelerator_name = "nnapi-reference";
options.vendor_plugin = vendor_plugin.get();
CustomFloorOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}});
m.PopulateTensor<float>(m.input(), {0, 0.2, 1.7, 2.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0.0, 0.0, 1.0, 2.0}));
}
TEST(NNAPIDelegate, DISABLED_CustomFloorVendorExtensionDynamic) {
if (NnApiImplementation()->android_sdk_version <
delegate::nnapi::kMinSdkVersionForNNAPI12) {
GTEST_SKIP();
}
auto vendor_plugin = std::make_unique<NnapiTestVendorPlugin>();
StatefulNnApiDelegate::Options options;
options.accelerator_name = "nnapi-reference";
options.vendor_plugin = vendor_plugin.get();
options.allow_dynamic_dimensions = true;
auto tensor_data = TensorData{TensorType_FLOAT32,
{1, 2, 2, 1},
0.0f,
0.0f,
0.0f,
0,
false,
{},
{},
0,
{},
{},
{},
{},
{-1, 2, 2, 1}};
size_t max_batch_size = 2;
size_t tensor_max_size = max_batch_size * 2 * 2 * 1 * sizeof(float);
CustomFloorOpModel m(options, tensor_data, tensor_data,
false,
false);
m.SetTensorMaxSize(m.input(), tensor_max_size);
m.SetTensorMaxSize(m.output(), tensor_max_size);
m.ApplyNNAPIDelegate();
EXPECT_EQ(m.ResizeInputTensor(m.input(), {2, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input(), {0, 0.2, 1.7, 2.8, 3.4, 4.1, 5.9, 6.3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0}));
EXPECT_EQ(m.ResizeInputTensor(m.input(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input(), {1.7, 2.8, 3.4, 4.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1.0, 2.0, 3.0, 4.0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3affe1cf-fcfc-4263-9a25-ad26d0fba32e | cpp | tensorflow/tensorflow | quant_lstm_sup | tensorflow/lite/delegates/nnapi/quant_lstm_sup.cc | tensorflow/lite/delegates/nnapi/quant_lstm_sup_test.cc | #include "tensorflow/lite/delegates/nnapi/quant_lstm_sup.h"
#include <algorithm>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegate {
namespace nnapi {
void ExtractQuantLstmWeightsSubmatrix(const TfLiteIntArray* submatrix_dims,
const int32_t offset_row,
const int32_t offset_column,
const TfLiteIntArray* weight_dims,
const uint8_t* weights,
std::vector<uint8_t>* submatrix) {
auto const& submatrix_rows = submatrix_dims->data[0];
auto const& submatrix_cols = submatrix_dims->data[1];
auto const& weight_cols = weight_dims->data[1];
submatrix->resize(NumElements(submatrix_dims));
for (uint32_t i = 0, end = submatrix_rows * submatrix_cols; i < end; ++i) {
const uint32_t row = i / submatrix_cols;
const uint32_t column = i % submatrix_cols;
(*submatrix)[i] =
weights[(row + offset_row) * weight_cols + column + offset_column];
}
}
inline int OutputDepth(const TfLiteIntArray* weight_dims) {
return weight_dims->data[0] / 4;
}
inline int InputDepth(const TfLiteIntArray* weight_dims) {
return weight_dims->data[1] - OutputDepth(weight_dims);
}
void SetWeightSubmatrixDims(const TfLiteIntArray* weight_dims,
TfLiteIntArray* recurrent_submatrix_dims,
TfLiteIntArray* input_submatrix_dims) {
const auto input_depth = InputDepth(weight_dims);
const auto output_depth = OutputDepth(weight_dims);
recurrent_submatrix_dims->data[0] = output_depth;
recurrent_submatrix_dims->data[1] = output_depth;
input_submatrix_dims->data[0] = output_depth;
input_submatrix_dims->data[1] = input_depth;
}
void DecomposeQuantLstmWeightsTensor(const uint8_t* concat_weights,
const TfLiteIntArray* weight_dims,
std::vector<uint8_t>* recurrent_to_input,
std::vector<uint8_t>* input_to_input,
std::vector<uint8_t>* recurrent_to_cell,
std::vector<uint8_t>* input_to_cell,
std::vector<uint8_t>* recurrent_to_forget,
std::vector<uint8_t>* input_to_forget,
std::vector<uint8_t>* recurrent_to_output,
std::vector<uint8_t>* input_to_output) {
const auto output_depth = OutputDepth(weight_dims);
TfLiteIntArray* recurrent_submatrix_dims = TfLiteIntArrayCreate(2);
TfLiteIntArray* input_submatrix_dims = TfLiteIntArrayCreate(2);
SetWeightSubmatrixDims(weight_dims, recurrent_submatrix_dims,
input_submatrix_dims);
ExtractQuantLstmWeightsSubmatrix(recurrent_submatrix_dims, 0 * output_depth,
0, weight_dims, concat_weights,
recurrent_to_input);
ExtractQuantLstmWeightsSubmatrix(input_submatrix_dims, 0 * output_depth,
output_depth, weight_dims, concat_weights,
input_to_input);
ExtractQuantLstmWeightsSubmatrix(recurrent_submatrix_dims, 1 * output_depth,
0, weight_dims, concat_weights,
recurrent_to_cell);
ExtractQuantLstmWeightsSubmatrix(input_submatrix_dims, 1 * output_depth,
output_depth, weight_dims, concat_weights,
input_to_cell);
ExtractQuantLstmWeightsSubmatrix(recurrent_submatrix_dims, 2 * output_depth,
0, weight_dims, concat_weights,
recurrent_to_forget);
ExtractQuantLstmWeightsSubmatrix(input_submatrix_dims, 2 * output_depth,
output_depth, weight_dims, concat_weights,
input_to_forget);
ExtractQuantLstmWeightsSubmatrix(recurrent_submatrix_dims, 3 * output_depth,
0, weight_dims, concat_weights,
recurrent_to_output);
ExtractQuantLstmWeightsSubmatrix(input_submatrix_dims, 3 * output_depth,
output_depth, weight_dims, concat_weights,
input_to_output);
TfLiteIntArrayFree(recurrent_submatrix_dims);
TfLiteIntArrayFree(input_submatrix_dims);
}
void DecomposeBiasTensor(const int32_t* biases, int bias_size,
std::vector<int32_t>* input_bias,
std::vector<int32_t>* cell_bias,
std::vector<int32_t>* forget_bias,
std::vector<int32_t>* output_bias) {
input_bias->resize(bias_size);
std::copy(biases, biases + bias_size, input_bias->begin());
cell_bias->resize(bias_size);
std::copy(biases + bias_size, biases + 2 * bias_size, cell_bias->begin());
forget_bias->resize(bias_size);
std::copy(biases + 2 * bias_size, biases + 3 * bias_size,
forget_bias->begin());
output_bias->resize(bias_size);
std::copy(biases + 3 * bias_size, biases + 4 * bias_size,
output_bias->begin());
}
}
}
} | #include "tensorflow/lite/delegates/nnapi/quant_lstm_sup.h"
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace {
using ::testing::ElementsAreArray;
using ::testing::Test;
class DimsAllocatingTest : public Test {
protected:
DimsAllocatingTest() : allocated_dims_() {}
~DimsAllocatingTest() override {
for (TfLiteIntArray* dim : allocated_dims_) {
TfLiteIntArrayFree(dim);
}
}
TfLiteIntArray* CreateDimArray(int size,
std::initializer_list<int> dimensions) {
TfLiteIntArray* dims = TfLiteIntArrayCreate(size);
allocated_dims_.push_back(dims);
int i = 0;
for (const int dimension : dimensions) {
dims->data[i++] = dimension;
}
return dims;
}
private:
std::vector<TfLiteIntArray*> allocated_dims_;
};
using tflite::delegate::nnapi::ExtractQuantLstmWeightsSubmatrix;
class ExtractQuantLstmWeightsSubmatrixTest : public DimsAllocatingTest {};
TEST_F(ExtractQuantLstmWeightsSubmatrixTest, TopLeftSubmatrixIsExtracted) {
std::vector<uint8_t> weights = {1, 2, 3, 4, 5,
11, 12, 13, 14, 15,
101, 102, 103, 104, 105,
111, 112, 113, 114, 115,
201, 202, 203, 204, 205,
211, 212, 213, 214, 215,
221, 222, 223, 224, 225,
231, 232, 233, 234, 235};
const TfLiteIntArray* weight_dims = CreateDimArray(2, {8, 5});
std::vector<uint8_t> submatrix;
const TfLiteIntArray* submatrix_dims = CreateDimArray(2, {2, 3});
ExtractQuantLstmWeightsSubmatrix(submatrix_dims, 0 ,
0 , weight_dims,
weights.data(), &submatrix);
EXPECT_THAT(submatrix, ElementsAreArray({1, 2, 3, 11, 12, 13}));
}
TEST_F(ExtractQuantLstmWeightsSubmatrixTest, TopRightSubmatrixIsExtracted) {
std::vector<uint8_t> weights = {1, 2, 3, 4, 5,
11, 12, 13, 14, 15,
101, 102, 103, 104, 105,
111, 112, 113, 114, 115,
201, 202, 203, 204, 205,
211, 212, 213, 214, 215,
221, 222, 223, 224, 225,
231, 232, 233, 234, 235};
const TfLiteIntArray* weight_dims = CreateDimArray(2, {8, 5});
std::vector<uint8_t> submatrix;
const TfLiteIntArray* submatrix_dims = CreateDimArray(2, {2, 2});
ExtractQuantLstmWeightsSubmatrix(submatrix_dims, 0 ,
3 , weight_dims,
weights.data(), &submatrix);
EXPECT_THAT(submatrix, ElementsAreArray({4, 5, 14, 15}));
}
TEST_F(ExtractQuantLstmWeightsSubmatrixTest, RightCentralSubmatrixIsExtracted) {
std::vector<uint8_t> weights = {1, 2, 3, 4, 5,
11, 12, 13, 14, 15,
101, 102, 103, 104, 105,
111, 112, 113, 114, 115,
201, 202, 203, 204, 205,
211, 212, 213, 214, 215,
221, 222, 223, 224, 225,
231, 232, 233, 234, 235};
const TfLiteIntArray* weight_dims = CreateDimArray(2, {8, 5});
std::vector<uint8_t> submatrix;
const TfLiteIntArray* submatrix_dims = CreateDimArray(2, {2, 2});
ExtractQuantLstmWeightsSubmatrix(
submatrix_dims, 1 * submatrix_dims->data[0] ,
3 , weight_dims, weights.data(), &submatrix);
EXPECT_THAT(submatrix, ElementsAreArray({104, 105, 114, 115}));
}
using tflite::delegate::nnapi::DecomposeQuantLstmWeightsTensor;
class QuantLstmWeightDecompTest : public DimsAllocatingTest {
protected:
QuantLstmWeightDecompTest()
: weights_({1, 2, 3, 4, 5,
11, 12, 13, 14, 15,
101, 102, 103, 104, 105,
111, 112, 113, 114, 115,
201, 202, 203, 204, 205,
211, 212, 213, 214, 215,
221, 222, 223, 224, 225,
231, 232, 233, 234, 235}),
recurrent_to_input_(),
input_to_input_(),
recurrent_to_cell_(),
input_to_cell_(),
recurrent_to_forget_(),
input_to_forget_(),
recurrent_to_output_(),
input_to_output_() {
weight_dims_ = CreateDimArray(2, {8, 5});
}
const std::vector<uint8_t> weights_;
const TfLiteIntArray* weight_dims_;
std::vector<uint8_t> recurrent_to_input_;
std::vector<uint8_t> input_to_input_;
std::vector<uint8_t> recurrent_to_cell_;
std::vector<uint8_t> input_to_cell_;
std::vector<uint8_t> recurrent_to_forget_;
std::vector<uint8_t> input_to_forget_;
std::vector<uint8_t> recurrent_to_output_;
std::vector<uint8_t> input_to_output_;
};
TEST_F(QuantLstmWeightDecompTest, ExtractRecurrentToInput) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(recurrent_to_input_, ElementsAreArray({1, 2,
11, 12}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractInputToInput) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(input_to_input_, ElementsAreArray({3, 4, 5,
13, 14, 15}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractRecurrentToCell) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(recurrent_to_cell_, ElementsAreArray({101, 102,
111, 112}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractInputToCell) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(input_to_cell_, ElementsAreArray({103, 104, 105,
113, 114, 115}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractRecurrentToForget) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(recurrent_to_forget_, ElementsAreArray({201, 202,
211, 212}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractInputToForget) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(input_to_forget_, ElementsAreArray({203, 204, 205,
213, 214, 215}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractRecurrentToOutput) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(recurrent_to_output_, ElementsAreArray({221, 222,
231, 232}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractInputToOutput) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(input_to_output_, ElementsAreArray({223, 224, 225,
233, 234, 235}));
}
using tflite::delegate::nnapi::DecomposeBiasTensor;
TEST(DecomposeBiasTensor, ExtractInputBias) {
std::vector<int32_t> biases
{-7876, 13488, -726, 32839,
39481, 48624, 48976, -21419,
9206, -46884, -11693, -38724,
-58999, -17050, -41852, -40538};
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
DecomposeBiasTensor(biases.data(), 4, &input_bias, &cell_bias, &forget_bias,
&output_bias);
EXPECT_THAT(input_bias, ElementsAreArray({-7876, 13488, -726, 32839}));
}
TEST(DecomposeBiasTensor, ExtractCellBias) {
std::vector<int32_t> biases
{-7876, 13488, -726, 32839,
39481, 48624, 48976, -21419,
9206, -46884, -11693, -38724,
-58999, -17050, -41852, -40538};
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
DecomposeBiasTensor(biases.data(), 4, &input_bias, &cell_bias, &forget_bias,
&output_bias);
EXPECT_THAT(cell_bias, ElementsAreArray({39481, 48624, 48976, -21419}));
}
TEST(DecomposeBiasTensor, ExtractForgetBias) {
std::vector<int32_t> biases
{-7876, 13488, -726, 32839,
39481, 48624, 48976, -21419,
9206, -46884, -11693, -38724,
-58999, -17050, -41852, -40538};
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
DecomposeBiasTensor(biases.data(), 4, &input_bias, &cell_bias, &forget_bias,
&output_bias);
EXPECT_THAT(forget_bias, ElementsAreArray({9206, -46884, -11693, -38724}));
}
TEST(DecomposeBiasTensor, ExtractOutputBias) {
std::vector<int32_t> biases
{-7876, 13488, -726, 32839,
39481, 48624, 48976, -21419,
9206, -46884, -11693, -38724,
-58999, -17050, -41852, -40538};
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
DecomposeBiasTensor(biases.data(), 4, &input_bias, &cell_bias, &forget_bias,
&output_bias);
EXPECT_THAT(output_bias, ElementsAreArray({-58999, -17050, -41852, -40538}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/quant_lstm_sup.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/quant_lstm_sup_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1c4bfa72-7d6e-4c9a-b6e8-32a2e923bbb6 | cpp | tensorflow/tensorflow | nnapi_delegate_c_api | tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.cc | tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api_test.cc | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/nnapi/sl/public/NeuralNetworksSupportLibraryImpl.h"
TfLiteDelegate* TfLiteNnapiDelegateCreate(
const TfLiteNnapiDelegateOptions* options) {
tflite::StatefulNnApiDelegate::StatefulNnApiDelegate::Options
internal_options;
internal_options.execution_preference =
static_cast<tflite::StatefulNnApiDelegate::StatefulNnApiDelegate::
Options::ExecutionPreference>(
options->execution_preference);
internal_options.accelerator_name = options->accelerator_name;
internal_options.cache_dir = options->cache_dir;
internal_options.model_token = options->model_token;
internal_options.disallow_nnapi_cpu = options->disallow_nnapi_cpu;
internal_options.max_number_delegated_partitions =
options->max_number_delegated_partitions;
internal_options.allow_fp16 = options->allow_fp16;
tflite::StatefulNnApiDelegate* delegate = nullptr;
if (options->nnapi_support_library_handle) {
delegate = new tflite::StatefulNnApiDelegate(
static_cast<NnApiSLDriverImplFL5*>(
options->nnapi_support_library_handle),
internal_options);
} else {
delegate = new tflite::StatefulNnApiDelegate(internal_options);
}
return delegate;
}
TfLiteNnapiDelegateOptions TfLiteNnapiDelegateOptionsDefault() {
TfLiteNnapiDelegateOptions result = {};
tflite::StatefulNnApiDelegate::Options options;
result.execution_preference =
static_cast<TfLiteNnapiDelegateOptions::ExecutionPreference>(
options.execution_preference);
result.accelerator_name = options.accelerator_name;
result.cache_dir = options.cache_dir;
result.model_token = options.model_token;
result.disallow_nnapi_cpu = options.disallow_nnapi_cpu;
result.max_number_delegated_partitions =
options.max_number_delegated_partitions;
result.allow_fp16 = options.allow_fp16;
result.nnapi_support_library_handle = nullptr;
return result;
}
void TfLiteNnapiDelegateDelete(TfLiteDelegate* delegate) {
if (delegate == nullptr) return;
delete static_cast<tflite::StatefulNnApiDelegate*>(delegate);
} | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.h"
#include <sys/mman.h>
#include <algorithm>
#include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class SingleOpModelWithNnapiDelegateCApi : public SingleOpModel {
public:
SingleOpModelWithNnapiDelegateCApi() {
options_ = TfLiteNnapiDelegateOptionsDefault();
options_.disallow_nnapi_cpu = false;
}
explicit SingleOpModelWithNnapiDelegateCApi(
const TfLiteNnapiDelegateOptions& options) {
options_ = options;
options_.disallow_nnapi_cpu = false;
}
~SingleOpModelWithNnapiDelegateCApi() {
if (nnapi_delegate_) {
TfLiteNnapiDelegateDelete(nnapi_delegate_);
}
nnapi_delegate_ = nullptr;
}
protected:
void BuildInterpreterWithNNAPI(std::vector<std::vector<int>> input_shapes) {
if (nnapi_delegate_) {
TfLiteNnapiDelegateDelete(nnapi_delegate_);
}
nnapi_delegate_ = TfLiteNnapiDelegateCreate(&options_);
SetDelegate(nnapi_delegate_);
BuildInterpreter(input_shapes, -1, options_.allow_fp16,
true, true);
}
private:
TfLiteNnapiDelegateOptions options_;
TfLiteDelegate* nnapi_delegate_ = nullptr;
};
class FloatAddOpModel : public SingleOpModelWithNnapiDelegateCApi {
public:
FloatAddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
Init(input1, input2, output, activation_type);
}
FloatAddOpModel(const TfLiteNnapiDelegateOptions& options,
const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type)
: SingleOpModelWithNnapiDelegateCApi(options) {
Init(input1, input2, output, activation_type);
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
private:
void Init(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
};
TEST(NNAPIDelegate, C_API) {
TfLiteNnapiDelegateOptions options = TfLiteNnapiDelegateOptionsDefault();
options.execution_preference =
TfLiteNnapiDelegateOptions::ExecutionPreference::kLowPower;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, C_API_WithAcceleratorName) {
TfLiteNnapiDelegateOptions options = TfLiteNnapiDelegateOptionsDefault();
options.execution_preference =
TfLiteNnapiDelegateOptions::ExecutionPreference::kLowPower;
options.accelerator_name = "nnapi-reference";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, C_API_WithCompilationCaching) {
TfLiteNnapiDelegateOptions options = TfLiteNnapiDelegateOptionsDefault();
options.execution_preference =
TfLiteNnapiDelegateOptions::ExecutionPreference::kLowPower;
options.cache_dir = "/data/local/tmp";
options.model_token = "NNAPIDelegate.C_API_WithCompilationCaching";
{
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
{
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-1.0, 0.1, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.2, 0.2, 0.4, 0.2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-0.8, 0.3, 1.1, 1.0}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1f3949ba-c6eb-45b8-8bf4-d4a5c74d3fa7 | cpp | tensorflow/tensorflow | weight_cache | tensorflow/lite/delegates/xnnpack/weight_cache.cc | tensorflow/lite/delegates/xnnpack/weight_cache_test.cc | #include "tensorflow/lite/delegates/xnnpack/weight_cache.h"
#include <fcntl.h>
#include <sys/stat.h>
#if defined(_MSC_VER)
#include <io.h>
#define F_OK 0
#else
#include <sys/mman.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <cerrno>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "xnnpack.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/verifier.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/file_util.h"
#include "tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#define XNNPACK_ABORT_CHECK(TEST, ...) \
if (!(TEST)) { \
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, __VA_ARGS__); \
std::abort(); \
}
#define XNNPACK_VAR_ARG_HEAD(FIRST, ...) FIRST
#define XNNPACK_RETURN_CHECK(TEST, ...) \
if (!(TEST)) { \
if (sizeof(XNNPACK_VAR_ARG_HEAD("" __VA_ARGS__)) > sizeof("")) { \
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, \
"XNNPack weight cache: " __VA_ARGS__); \
} \
return false; \
}
namespace tflite::xnnpack {
namespace {
constexpr size_t kMinAlignment = 128;
bool IsInMemoryCachePath(const char* path) {
return !strncmp(path, kInMemoryCachePath, sizeof(kInMemoryCachePath) - 1);
}
bool IsInMemoryCachePath(const std::string& path) {
return IsInMemoryCachePath(path.c_str());
}
size_t Align(size_t offset, const size_t alignment) {
const size_t misalign = offset % alignment;
return offset + (misalign ? alignment - misalign : 0);
}
template <class F>
class ScopeGuard {
public:
explicit ScopeGuard(F&& callback) : callback_(std::forward<F>(callback)) {}
ScopeGuard(const ScopeGuard&) = delete;
ScopeGuard& operator=(const ScopeGuard&) = delete;
ScopeGuard(ScopeGuard&& other)
: active_(other.active_), callback_(std::move(other.callback_)) {
other.Deactivate();
}
ScopeGuard& operator=(ScopeGuard&& other) {
if (this != &other) {
active_ = std::move(other.active_);
callback_ = std::move(other.callback_);
other.Deactivate();
}
}
~ScopeGuard() {
if (active_) {
callback_();
}
}
void Deactivate() { active_ = false; }
private:
F callback_;
bool active_ = true;
};
template <class F>
ScopeGuard(F&&) -> ScopeGuard<F>;
[[nodiscard]]
bool FileExists(const char* path) {
return access(path, F_OK) != -1;
}
}
void swap(MMapHandle& a, MMapHandle& b) {
using std::swap;
swap(a.size_, b.size_);
swap(a.offset_, b.offset_);
swap(a.offset_page_adjustment_, b.offset_page_adjustment_);
swap(a.data_, b.data_);
}
MMapHandle::~MMapHandle() { UnMap(); }
MMapHandle::MMapHandle(MMapHandle&& other) { swap(*this, other); }
MMapHandle& MMapHandle::operator=(MMapHandle&& other) {
swap(*this, other);
return *this;
}
bool MMapHandle::Map(const char* path, const size_t offset) {
return this->Map(FileDescriptor::Open(path, O_RDONLY), offset, path);
}
bool MMapHandle::Map(const FileDescriptor& fd, const size_t offset,
const char* const path) {
this->UnMap();
XNNPACK_RETURN_CHECK(fd.IsValid(),
"cannot mmap invalid file descriptor %d ('%s').",
fd.Value(), path);
struct stat file_stats;
XNNPACK_RETURN_CHECK(fstat(fd.Value(), &file_stats) == 0,
"could not access file stats to get size ('%s'): %s.",
path, strerror(errno));
ScopeGuard unmap_on_error([this] { UnMap(); });
size_ = file_stats.st_size - offset;
offset_ = offset;
#if defined(_MSC_VER)
data_ = new uint8_t[size_];
fd.SetPos(offset);
XNNPACK_RETURN_CHECK(fd.Read(data_, size_), "could not read file ('%s'): %s.",
path, strerror(errno));
#else
offset_page_adjustment_ = offset_ % getpagesize();
data_ = static_cast<uint8_t*>(
mmap(nullptr, size_ + offset_page_adjustment_, PROT_READ,
MAP_SHARED, fd.Value(), offset_ - offset_page_adjustment_));
XNNPACK_RETURN_CHECK(data_ != MAP_FAILED, "could not mmap file (%s): %s.",
path, strerror(errno));
#endif
unmap_on_error.Deactivate();
return true;
}
bool MMapHandle::Resize(size_t new_size) {
#if defined(__linux__) || defined(__ANDROID__)
void* const remapped_data =
mremap(data_, size_ + offset_page_adjustment_,
new_size + offset_page_adjustment_, 0);
if (remapped_data == MAP_FAILED) {
XNNPACK_RETURN_CHECK(errno == ENOMEM, "remap failed: %s", strerror(errno));
return false;
}
size_ = new_size;
return true;
#else
return false;
#endif
}
void MMapHandle::UnMap() {
if (data_) {
#if defined(_MSC_VER)
delete[] data_;
#else
munmap(data_, size_);
#endif
}
data_ = nullptr;
offset_ = 0;
offset_page_adjustment_ = 0;
size_ = 0;
}
#define XNN_MOVE_CONSTRUCT_MEMBER(x) x(std::move(other.x))
WeightCacheBuilder::WeightCacheBuilder(WeightCacheBuilder&& other)
: XNN_MOVE_CONSTRUCT_MEMBER(data_),
XNN_MOVE_CONSTRUCT_MEMBER(schema_),
XNN_MOVE_CONSTRUCT_MEMBER(capacity_),
XNN_MOVE_CONSTRUCT_MEMBER(build_segment_size_),
XNN_MOVE_CONSTRUCT_MEMBER(build_segment_start_),
XNN_MOVE_CONSTRUCT_MEMBER(first_write_done_),
XNN_MOVE_CONSTRUCT_MEMBER(fd_),
XNN_MOVE_CONSTRUCT_MEMBER(file_path_) {}
#undef XNN_MOVE_CONSTRUCT_MEMBER
WeightCacheBuilder& WeightCacheBuilder::operator=(WeightCacheBuilder&& other) {
#define XNN_MOVE_MEMBER(x) x = std::move(other.x)
XNN_MOVE_MEMBER(data_);
XNN_MOVE_MEMBER(schema_);
XNN_MOVE_MEMBER(capacity_);
XNN_MOVE_MEMBER(build_segment_size_);
XNN_MOVE_MEMBER(build_segment_start_);
XNN_MOVE_MEMBER(first_write_done_);
XNN_MOVE_MEMBER(fd_);
XNN_MOVE_MEMBER(file_path_);
#undef XNN_MOVE_MEMBER
return *this;
}
bool WeightCacheBuilder::Start(const char* path) {
XNNPACK_RETURN_CHECK(!IsStarted());
file_path_ = path;
if (IsInMemoryCachePath(file_path_)) {
fd_ = CreateInMemoryFileDescriptor("XNNPack in-memory weight cache");
} else {
fd_ = FileDescriptor::Open(file_path_.c_str(), O_CREAT | O_TRUNC | O_RDWR,
0644);
}
XNNPACK_RETURN_CHECK(fd_.IsValid(), "could not open file ('%s'): %s.",
file_path_.c_str(), strerror(errno));
XNNPackCacheHeader header{XNNPackCacheHeader::kInvalidHeader};
header.buffer_list_offset = sizeof(header);
XNNPACK_RETURN_CHECK(fd_.Write(&header, sizeof(header)),
"could not write initial cache header in %s.",
file_path_.c_str());
schema_.base_offset = Align(sizeof(header), kMinAlignment);
return true;
}
bool WeightCacheBuilder::StartBuildStep() {
XNNPACK_RETURN_CHECK(IsStarted());
XNNPackCacheHeader header;
fd_.SetPos(0);
XNNPACK_RETURN_CHECK(fd_.Read(&header, sizeof(header)),
"could not read cache file header.");
if (header.buffer_list_size) {
MMapHandle buffer_list_data;
XNNPACK_RETURN_CHECK(buffer_list_data.Map(fd_, header.buffer_list_offset),
"could not map buffer list mapping");
cache::schema::GetBufferList(buffer_list_data.data())->UnPackTo(&schema_);
}
build_segment_size_ = 0;
build_segment_start_ = fd_.SetPos(header.buffer_list_offset);
XNNPACK_RETURN_CHECK(build_segment_start_ != -1);
is_build_step_ = true;
return true;
}
void WeightCacheBuilder::Reset() { *this = WeightCacheBuilder(); }
void* WeightCacheBuilder::Reserve(size_t size) {
if (size > capacity_) {
data_.reset(nullptr);
data_ = std::make_unique<uint8_t[]>(size + kMinAlignment);
capacity_ = size;
}
return reinterpret_cast<void*>(
Align(reinterpret_cast<size_t>(data_.get()), kMinAlignment));
}
BufferLocation WeightCacheBuilder::Append(PackIdentifier pack_id,
const void* data, uint64_t size) {
XNNPACK_ABORT_CHECK(is_build_step_,
"cannot append data to an unstarted builder.");
const size_t offset = Align(fd_.GetPos(), kMinAlignment);
if (fd_.SetPos(offset) == -1) {
return BufferLocation::Invalid();
}
BufferLocation loc{offset - schema_.base_offset, size};
cache::schema::BufferT buffer;
buffer.packing_algorithm_id = pack_id.pack_algorithm_id;
buffer.weights_id = pack_id.weights_id;
buffer.bias_id = pack_id.bias_id;
buffer.offset = loc.offset;
buffer.size = loc.size;
schema_.buffers.push_back(std::make_unique<cache::schema::BufferT>(buffer));
if (!fd_.Write(data, size)) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: cannot append buffer to cache file");
return BufferLocation::Invalid();
}
return loc;
}
bool WeightCacheBuilder::StopBuildStep() {
XNNPACK_RETURN_CHECK(fd_.IsValid(),
"cache file ('%s') is not open for writing: %s.",
file_path_.c_str(), strerror(errno));
is_build_step_ = false;
if (fd_.GetPos() == build_segment_start_ && first_write_done_) {
return true;
}
flatbuffers::FlatBufferBuilder builder;
cache::schema::FinishBufferListBuffer(
builder, cache::schema::BufferList::Pack(builder, &schema_));
const size_t layout_offset = Align(fd_.GetPos(), kMinAlignment);
XNNPACK_RETURN_CHECK(fd_.SetPos(layout_offset) != -1,
"could not move in the file: %s", strerror(errno));
XNNPACK_RETURN_CHECK(
sizeof(XNNPackCacheHeader::xnnpack_build_identifier) ==
xnn_experimental_get_build_identifier_size(),
"cache file ('%s') header cannot hold XNNPack's build identifier: %s.",
file_path_.c_str(), strerror(errno));
XNNPackCacheHeader header{XNNPackCacheHeader::kVersion};
memcpy(header.xnnpack_build_identifier,
xnn_experimental_get_build_identifier_data(),
xnn_experimental_get_build_identifier_size());
header.buffer_list_offset = fd_.GetPos();
header.buffer_list_size = builder.GetSize();
XNNPACK_RETURN_CHECK(fd_.Write(builder.GetBufferPointer(), builder.GetSize()),
"cannot write buffer list to '%s'.", file_path_.c_str());
build_segment_size_ = fd_.GetPos() - build_segment_start_;
XNNPACK_RETURN_CHECK(fd_.SetPos(0) != -1,
"could not move in the file to write header to %s",
strerror(errno));
XNNPACK_RETURN_CHECK(fd_.Write(&header, sizeof(header)),
"cannot write cache header to %s.", file_path_.c_str());
TFLITE_LOG_PROD(tflite::TFLITE_LOG_VERBOSE,
"XNNPack weight cache: written to '%s'.", file_path_.c_str());
first_write_done_ = true;
return true;
}
MMapWeightCacheProvider::MMapWeightCacheProvider(
MMapWeightCacheProvider&& other) {
*this = std::move(other);
}
MMapWeightCacheProvider& MMapWeightCacheProvider::operator=(
MMapWeightCacheProvider&& other) {
using std::swap;
swap(cache_provider_, other.cache_provider_);
cache_provider_.context = this;
other.cache_provider_.context = &other;
swap(file_path_, other.file_path_);
swap(buffer_address_to_identifier_, other.buffer_address_to_identifier_);
swap(cache_key_to_offset_, other.cache_key_to_offset_);
swap(mmap_handles_, other.mmap_handles_);
swap(mmap_buffer_base_offset_, other.mmap_buffer_base_offset_);
swap(builder_, other.builder_);
return *this;
}
void MMapWeightCacheProvider::SetFilePath(const char* path) {
XNNPACK_ABORT_CHECK(
!IsBuilding(),
"Cannot change the path of a cache that has already been loaded.");
if (file_path_ != path) {
file_path_ = path;
}
}
bool MMapWeightCacheProvider::LoadOrStartBuild(const char* path) {
if (!IsInMemoryCachePath(path) && Load(path)) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_VERBOSE,
"XNNPack weight cache loaded from '%s'.", path);
return true;
} else if (StartBuild(path)) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_VERBOSE,
"XNNPack weight cache build for '%s' started.", path);
return true;
}
return false;
}
bool MMapWeightCacheProvider::StartBuild(const char* path) {
SetFilePath(path);
building_run_ = builder_.Start(path);
if (IsInMemoryCachePath(file_path_)) {
temporary_file_descriptor_ = builder_.GetFileDescriptor().Duplicate();
}
return building_run_;
}
bool MMapWeightCacheProvider::Load(const std::string& path) {
SetFilePath(path.c_str());
return Load();
}
bool MMapWeightCacheProvider::Load() {
mmap_buffer_base_offset_ = 0;
cache_key_to_offset_.clear();
mmap_handles_.resize(1);
MMapHandle& mmap_handle = mmap_handles_.front();
ScopeGuard unmap_on_fail([this] { mmap_handles_.clear(); });
if (temporary_file_descriptor_.IsValid()) {
XNNPACK_RETURN_CHECK(mmap_handle.Map(temporary_file_descriptor_,
0, file_path_.c_str()));
} else {
XNNPACK_ABORT_CHECK(!file_path_.empty(),
"Path wasn't provided to weight cache provider.");
if (!FileExists(file_path_.c_str())) {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"XNNPack weight cache: could not load '%s': %s.",
file_path_.c_str(), strerror(errno));
return false;
}
XNNPACK_RETURN_CHECK(mmap_handle.Map(file_path_.c_str()));
}
XNNPACK_RETURN_CHECK(mmap_handle.size() >= sizeof(XNNPackCacheHeader),
"invalid cache file size.");
const XNNPackCacheHeader header = [&mmap_handle] {
XNNPackCacheHeader header;
memcpy(&header, mmap_handle.data(), sizeof(header));
return header;
}();
XNNPACK_RETURN_CHECK(header.version == XNNPackCacheHeader::kVersion,
"incompatible header version. Got %zd, expected %zd. "
"Cache needs to be built again.",
header.version, XNNPackCacheHeader::kVersion);
XNNPACK_RETURN_CHECK(xnn_experimental_check_build_identifier(
header.xnnpack_build_identifier,
sizeof(header.xnnpack_build_identifier)),
"XNNPack weight cache: incompatible XNNPack version. "
"Cache needs to be built again.");
XNNPACK_RETURN_CHECK(header.buffer_list_offset < mmap_handle.size(),
"invalid offset for buffer list descriptor.");
XNNPACK_RETURN_CHECK(
header.buffer_list_size == mmap_handle.size() - header.buffer_list_offset,
"invalid size for buffer list descriptor.");
flatbuffers::Verifier verifier(mmap_handle.data() + header.buffer_list_offset,
header.buffer_list_size);
XNNPACK_RETURN_CHECK(cache::schema::VerifyBufferListBuffer(verifier),
"buffer list validation failed.");
const cache::schema::BufferList* buffer_list = cache::schema::GetBufferList(
mmap_handle.data() + header.buffer_list_offset);
XNNPACK_RETURN_CHECK(buffer_list,
"could not get packed weights from flatbuffer.");
mmap_buffer_base_offset_ = buffer_list->base_offset();
if (const auto buffers = buffer_list->buffers(); buffers) {
for (auto* buffer : *buffers) {
XNNPACK_RETURN_CHECK(buffer, "invalid buffer address in buffer list.");
cache_key_to_offset_.emplace(
PackIdentifier{buffer->packing_algorithm_id(),
buffer->weights_id(),
buffer->bias_id()},
BufferLocation{buffer->offset(), buffer->size()});
offset_to_addr_.insert(
{buffer->offset(),
mmap_handle.data() + mmap_buffer_base_offset_ + buffer->offset()});
}
}
unmap_on_fail.Deactivate();
return true;
}
bool MMapWeightCacheProvider::LoadLastBuildStep() {
if (mmap_handles_.empty()) {
return Load();
}
if (builder_.LastBuildStepSize() == 0) {
return true;
}
const XNNPackCacheHeader header = [this] {
XNNPackCacheHeader header;
memcpy(&header, mmap_handles_.front().data(), sizeof(header));
return header;
}();
{
MMapHandle& last_mmap_handle = mmap_handles_.back();
const int last_mmap_size = last_mmap_handle.size();
if (!last_mmap_handle.Resize(last_mmap_size +
builder_.LastBuildStepSize())) {
mmap_handles_.emplace_back();
if (temporary_file_descriptor_.IsValid()) {
XNNPACK_RETURN_CHECK(
mmap_handles_.back().Map(temporary_file_descriptor_,
builder_.LastBuildStepStart()),
"could not map last build step");
} else {
XNNPACK_RETURN_CHECK(
mmap_handles_.back().Map(file_path_.c_str(),
builder_.LastBuildStepStart()),
"could not map last build step");
}
}
}
MMapHandle& segment_mmap_handle = mmap_handles_.back();
const size_t buffer_list_offset =
header.buffer_list_offset - segment_mmap_handle.offset();
flatbuffers::Verifier verifier(
segment_mmap_handle.data() + buffer_list_offset, header.buffer_list_size);
XNNPACK_RETURN_CHECK(cache::schema::VerifyBufferListBuffer(verifier),
"buffer list validation failed.");
const cache::schema::BufferList* buffer_list = cache::schema::GetBufferList(
segment_mmap_handle.data() + buffer_list_offset);
XNNPACK_RETURN_CHECK(buffer_list,
"could not get packed weights from flatbuffer.");
const ptrdiff_t offset_modifier =
buffer_list->base_offset() - segment_mmap_handle.offset();
for (const auto* buffer : *(buffer_list->buffers())) {
const size_t offset = buffer->offset();
if (!offset_to_addr_.count(offset)) {
offset_to_addr_.insert(
{offset, segment_mmap_handle.data() + offset + offset_modifier});
}
}
return true;
}
bool MMapWeightCacheProvider::StartBuildStep() {
XNNPACK_RETURN_CHECK(CanStartBuildStep(),
"cannot append data to an existing cache file.");
if (IsBuilding()) {
return true;
}
is_build_step_ = builder_.StartBuildStep();
return is_build_step_;
}
bool MMapWeightCacheProvider::StopBuildStep() {
XNNPACK_RETURN_CHECK(builder_.StopBuildStep());
is_build_step_ = false;
return LoadLastBuildStep();
}
void MMapWeightCacheProvider::MapTensorIdentifiers(
const TfLiteTensor* tensors, const size_t size,
const std::unordered_map<size_t, size_t>& tensor_index_to_identifier) {
for (const auto [index, identifier] : tensor_index_to_identifier) {
XNNPACK_ABORT_CHECK(index < size,
"Tensor index corresponds to a non existing tensor.");
buffer_address_to_identifier_[tensors[index].data.data] = identifier;
}
}
void MMapWeightCacheProvider::RemapDataBuffer(const void* const buffer,
const void* const new_buffer) {
buffer_remaps_[new_buffer] = buffer;
}
size_t MMapWeightCacheProvider::LookUp(
const xnn_weights_cache_look_up_key* cache_key) {
if (!cache_key) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: a null cache key was provided.");
return SIZE_MAX;
}
const PackIdentifier pack_id = BuildPackIdentifier(*cache_key);
if (auto offset_it = cache_key_to_offset_.find(pack_id);
offset_it != cache_key_to_offset_.end()) {
return offset_it->second.offset;
}
return SIZE_MAX;
}
void* MMapWeightCacheProvider::ReserveSpace(size_t size) {
XNNPACK_ABORT_CHECK(IsBuilding(),
"Cannot reserve space in a cache that isn't building.");
return builder_.Reserve(size);
}
size_t MMapWeightCacheProvider::LookUpOrInsert(
const xnn_weights_cache_look_up_key* cache_key, void* ptr, size_t size) {
XNNPACK_ABORT_CHECK(cache_key, "A null cache key was provided.");
const PackIdentifier pack_id = BuildPackIdentifier(*cache_key);
if (auto offset_it = cache_key_to_offset_.find(pack_id);
offset_it != cache_key_to_offset_.end()) {
return offset_it->second.offset;
}
XNNPACK_ABORT_CHECK(
IsBuilding(), "Cannot insert a buffer in a cache that is not building.");
const BufferLocation location = builder_.Append(pack_id, ptr, size);
XNNPACK_ABORT_CHECK(!location.IsInvalid(),
"Inserting data in the cache failed.");
cache_key_to_offset_.emplace(pack_id, location);
return location.offset;
}
void* MMapWeightCacheProvider::OffsetToAddr(const size_t offset) {
XNNPACK_ABORT_CHECK(
!IsBuilding(),
"Cannot get the address of a buffer in a cache during a building step.");
return offset_to_addr_[offset];
}
void MMapWeightCacheProvider::Release() {
buffer_address_to_identifier_.clear();
cache_key_to_offset_.clear();
mmap_handles_.clear();
mmap_buffer_base_offset_ = 0;
builder_ = WeightCacheBuilder();
}
size_t MMapWeightCacheProvider::look_up(
void* context, const xnn_weights_cache_look_up_key* cache_key) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->LookUp(cache_key);
}
void* MMapWeightCacheProvider::reserve_space(void* context, size_t n) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->ReserveSpace(n);
}
size_t MMapWeightCacheProvider::look_up_or_insert(
void* context, const xnn_weights_cache_look_up_key* cache_key, void* ptr,
size_t size) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->LookUpOrInsert(
cache_key, ptr, size);
}
bool MMapWeightCacheProvider::is_finalized(void* context) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->IsActive();
}
void* MMapWeightCacheProvider::offset_to_addr(void* context, size_t offset) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->OffsetToAddr(
offset);
}
enum xnn_status MMapWeightCacheProvider::delete_cache(void* context) {
reinterpret_cast<MMapWeightCacheProvider*>(context)->Release();
return xnn_status_success;
}
PackIdentifier MMapWeightCacheProvider::BuildPackIdentifier(
const xnn_weights_cache_look_up_key& key) {
const auto get_buffer_id = [&](const void* buffer) -> size_t {
if (buffer) {
const auto identifier_it = buffer_address_to_identifier_.find(buffer);
if (identifier_it != buffer_address_to_identifier_.end()) {
return identifier_it->second;
}
auto remapped_it = buffer_remaps_.find(buffer);
while (remapped_it != buffer_remaps_.end()) {
const auto remapped_identifier_it =
buffer_address_to_identifier_.find(remapped_it->second);
if (remapped_identifier_it != buffer_address_to_identifier_.end()) {
return remapped_identifier_it->second;
}
remapped_it = buffer_remaps_.find(remapped_it->second);
}
XNNPACK_ABORT_CHECK(
remapped_it != buffer_remaps_.end(),
"Unknown constant buffer passed to BuildPackIdentifier.");
}
return PackIdentifier::kNoId;
};
return PackIdentifier{key.seed,
get_buffer_id(key.kernel),
get_buffer_id(key.bias)};
}
} | #include "tensorflow/lite/delegates/xnnpack/weight_cache.h"
#include <fcntl.h>
#include <algorithm>
#include <cassert>
#include <cerrno>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <iterator>
#include <map>
#include <ostream>
#include <random>
#include <string>
#include <tuple>
#include <unordered_map>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xnnpack.h"
#include "flatbuffers/verifier.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/file_util.h"
#include "tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite::xnnpack {
std::ostream& operator<<(std::ostream& os, const PackIdentifier& p) {
return os << "PackIdentifier{pack_algo: " << p.pack_algorithm_id
<< ", weights_id: " << p.weights_id << ", bias_id: " << p.bias_id
<< "}";
}
namespace {
using testing::ElementsAreArray;
using testing::Ge;
std::string GenerateRandomString(const size_t size) {
constexpr char chars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz";
std::mt19937 rg{std::random_device{}()};
std::uniform_int_distribution<std::string::size_type> pick(0,
sizeof(chars) - 1);
std::string str(size, 'a');
std::generate(begin(str), end(str), [&] { return pick(rg); });
return str;
};
template <class T>
class LightSpan {
public:
using value_type = T;
LightSpan(const void* data, const size_t size)
: ptr_(reinterpret_cast<T*>(data)), size_(size) {}
size_t size() const { return size(); }
const T* begin() const { return ptr_; }
const T* end() const { return ptr_ + size_; }
friend std::ostream& operator<<(std::ostream& os, const LightSpan<T>& s) {
os << '[';
auto it = s.begin();
if (it != s.end()) {
os << +*it;
}
++it;
for (; it != s.end(); ++it) {
os << ", " << +*it;
}
return os << ']';
}
private:
T* ptr_;
size_t size_;
};
class TempFileDesc {
public:
static constexpr struct AutoClose {
} kAutoClose{};
#if defined(_MSC_VER)
TempFileDesc() : fd_() {
char filename[L_tmpnam_s];
errno_t err = tmpnam_s(filename, L_tmpnam_s);
if (err) {
fprintf(stderr, "Could not create temporary filename.\n");
std::abort();
}
path_ = filename;
fd_ = open(path_.c_str(), O_CREAT | O_EXCL | O_RDWR, 0644);
if (fd_ < 0) {
fprintf(stderr, "Could not create temporary filename.\n");
std::abort();
}
}
#else
TempFileDesc() : fd_(mkstemp(path_.data())) {
if (GetFd() < 0) {
perror("Could not create temporary file");
}
}
#endif
explicit TempFileDesc(AutoClose) : TempFileDesc() { Close(); }
TempFileDesc(const TempFileDesc&) = delete;
TempFileDesc& operator=(const TempFileDesc&) = delete;
friend void swap(TempFileDesc& a, TempFileDesc& b) {
std::swap(a.path_, b.path_);
std::swap(a.fd_, b.fd_);
}
TempFileDesc(TempFileDesc&& other) { swap(*this, other); }
TempFileDesc& operator=(TempFileDesc&& other) {
swap(*this, other);
return *this;
}
~TempFileDesc() { Close(); }
void Close() {
if (GetFd() >= 0) {
close(fd_);
fd_ = -1;
}
}
const std::string& GetPath() const { return path_; }
const char* GetCPath() const { return path_.c_str(); }
int GetFd() const { return fd_; }
bool IsOpen() const { return fd_ >= 0; }
private:
std::string path_ = testing::TempDir() + "/weight_cache_test_file.XXXXXX";
int fd_ = -1;
};
TEST(MMapHandleTest, DefaultConstructs) {
MMapHandle handle;
EXPECT_FALSE(handle.IsMapped());
EXPECT_EQ(handle.data(), nullptr);
EXPECT_EQ(handle.size(), 0);
}
TEST(MMapHandleTest, MapNonExistingFileFails) {
const char* file_path = "sdbgfd";
MMapHandle handle;
EXPECT_FALSE(handle.Map(file_path));
}
TEST(MMapHandleTest, MapExistingFileWorks) {
using std::size;
const std::string payload = "This is some data in the file.";
TempFileDesc tmp_file;
ASSERT_TRUE(tmp_file.IsOpen());
ASSERT_EQ(write(tmp_file.GetFd(), payload.c_str(), size(payload)),
size(payload));
tmp_file.Close();
MMapHandle handle;
ASSERT_TRUE(handle.Map(tmp_file.GetCPath()));
EXPECT_TRUE(handle.IsMapped());
EXPECT_NE(handle.data(), nullptr);
EXPECT_THAT(handle.size(), Ge(size(payload)));
EXPECT_THAT(handle, ElementsAreArray(payload));
handle.UnMap();
EXPECT_FALSE(handle.IsMapped());
EXPECT_EQ(handle.data(), nullptr);
EXPECT_EQ(handle.size(), 0);
}
TEST(MMapHandleTest, MoveConstructs) {
const std::string payload = "This is some data in the file.";
TempFileDesc tmp_file;
ASSERT_TRUE(tmp_file.IsOpen());
ASSERT_EQ(write(tmp_file.GetFd(), payload.c_str(), size(payload)),
size(payload));
tmp_file.Close();
MMapHandle handle;
ASSERT_TRUE(handle.Map(tmp_file.GetCPath()));
MMapHandle handle2(std::move(handle));
EXPECT_FALSE(handle.IsMapped());
EXPECT_EQ(handle.data(), nullptr);
EXPECT_EQ(handle.size(), 0);
EXPECT_TRUE(handle2.IsMapped());
EXPECT_NE(handle2.data(), nullptr);
EXPECT_THAT(handle2.size(), Ge(size(payload)));
EXPECT_THAT(handle2, ElementsAreArray(payload));
}
TEST(MMapHandleTest, Resize) {
const std::string payload = "This is some data in the file.";
TempFileDesc tmp_file;
ASSERT_TRUE(tmp_file.IsOpen());
ASSERT_EQ(write(tmp_file.GetFd(), payload.c_str(), size(payload)),
size(payload));
tmp_file.Close();
MMapHandle handle;
ASSERT_TRUE(handle.Map(tmp_file.GetCPath()));
#if defined(__linux__) || defined(__ANDROID__)
const size_t kMaxResizeTestCount = 20;
bool was_resized = true;
for (size_t i = 0; i < kMaxResizeTestCount && was_resized; ++i) {
was_resized = handle.Resize(payload.size() * 2);
EXPECT_TRUE(was_resized || errno == ENOMEM);
}
#else
EXPECT_FALSE(handle.Resize(payload.size()));
#endif
}
TEST(MMapHandleTest, MapWithOffset) {
const std::string payload = "This is some data in the file.";
const std::string payload2 = "Some other data appended to the the offset.";
TempFileDesc tmp_file;
ASSERT_TRUE(tmp_file.IsOpen());
ASSERT_EQ(write(tmp_file.GetFd(), payload.c_str(), size(payload)),
size(payload));
ASSERT_EQ(write(tmp_file.GetFd(), payload2.c_str(), size(payload2)),
size(payload2));
tmp_file.Close();
MMapHandle handle;
ASSERT_TRUE(handle.Map(tmp_file.GetCPath(), size(payload)));
EXPECT_EQ(handle.size(), size(payload2));
EXPECT_THAT(std::string((const char*)handle.data(), handle.size()),
testing::StrEq(payload2));
}
TEST(MMapHandleTest, ResizeMapWithOffset) {
const std::string payload = "This is some data in the file.";
const std::string payload2 = "Some other data appended to the the offset.";
const std::string payload3 =
"Yet some other data written after the initial mapping.";
TempFileDesc tmp_file;
ASSERT_TRUE(tmp_file.IsOpen());
ASSERT_EQ(write(tmp_file.GetFd(), payload.c_str(), size(payload)),
size(payload));
ASSERT_EQ(write(tmp_file.GetFd(), payload2.c_str(), size(payload2)),
size(payload2));
MMapHandle handle;
ASSERT_TRUE(handle.Map(tmp_file.GetCPath(), size(payload)));
ASSERT_EQ(write(tmp_file.GetFd(), payload3.c_str(), size(payload3)),
size(payload3));
tmp_file.Close();
#if defined(__linux__) || defined(__ANDROID__)
bool was_resized = handle.Resize(payload2.size() + payload3.size());
if (was_resized) {
EXPECT_THAT(std::string((const char*)handle.data(), handle.size()),
testing::StrEq(payload2 + payload3));
} else {
GTEST_SKIP()
<< "This run did not end up in a resize of the mmaped interval.";
}
#else
GTEST_SKIP() << "Resize is not supported for this build.";
#endif
}
TEST(WeightCacheBuilderTest, ReserveAppendWriteWorks) {
using std::size;
const std::string payload = "This is some data in the file.";
const PackIdentifier dummy_id{1, 2, 3};
WeightCacheBuilder builder;
const std::string cache_path = testing::TempDir() + "/cache";
ASSERT_TRUE(builder.Start(cache_path.c_str()));
ASSERT_TRUE(builder.StartBuildStep());
const size_t payload_size = size(payload);
void* buffer = builder.Reserve(payload_size);
std::memcpy(buffer, payload.c_str(), payload_size);
auto loc = builder.Append(dummy_id, buffer, payload_size);
EXPECT_EQ(loc.size, payload_size);
EXPECT_GE(builder.capacity(), payload_size);
ASSERT_TRUE(builder.StopBuildStep());
MMapHandle handle;
ASSERT_TRUE(handle.Map(cache_path.c_str()));
const XNNPackCacheHeader& header =
*reinterpret_cast<const XNNPackCacheHeader*>(handle.data());
ASSERT_EQ(header.version, XNNPackCacheHeader::kVersion);
ASSERT_NE(header.buffer_list_offset, 0);
ASSERT_NE(header.buffer_list_size, 0);
ASSERT_LE(header.buffer_list_offset + header.buffer_list_size, handle.size());
const cache::schema::BufferList* const packed_weights =
cache::schema::GetBufferList(handle.data() + header.buffer_list_offset);
ASSERT_NE(packed_weights, nullptr);
ASSERT_NE(packed_weights->buffers(), nullptr);
ASSERT_EQ(packed_weights->buffers()->size(), 1);
ASSERT_NE(packed_weights->buffers()->Get(0), nullptr);
ASSERT_EQ(packed_weights->buffers()->Get(0)->size(), size(payload));
ASSERT_EQ(packed_weights->buffers()->Get(0)->packing_algorithm_id(),
dummy_id.pack_algorithm_id);
ASSERT_EQ(packed_weights->buffers()->Get(0)->weights_id(),
dummy_id.weights_id);
ASSERT_EQ(packed_weights->buffers()->Get(0)->bias_id(), dummy_id.bias_id);
flatbuffers::Verifier verifier(handle.data() + header.buffer_list_offset,
header.buffer_list_size);
EXPECT_TRUE(cache::schema::VerifyBufferListBuffer(verifier));
ASSERT_LE(packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset(),
size(handle));
ASSERT_LE(packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset() +
packed_weights->buffers()->Get(0)->size(),
size(handle));
std::tuple<const char*, size_t> cache_data(
reinterpret_cast<const char*>(
handle.data() + packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset()),
packed_weights->buffers()->Get(0)->size());
EXPECT_THAT(cache_data, ElementsAreArray(payload));
}
TEST(WeightCacheBuilderTest, AppendWithoutReserveWriteWorks) {
using std::size;
const std::string payload = "This is some data in the file.";
const PackIdentifier dummy_id{1, 2, 3};
const std::string cache_path = testing::TempDir() + "/cache";
WeightCacheBuilder builder;
ASSERT_TRUE(builder.Start(cache_path.c_str()));
ASSERT_TRUE(builder.StartBuildStep());
const size_t payload_size = size(payload);
auto loc = builder.Append(dummy_id, payload.c_str(), payload_size);
EXPECT_EQ(loc.size, payload_size);
ASSERT_TRUE(builder.StopBuildStep());
MMapHandle handle;
ASSERT_TRUE(handle.Map(cache_path.c_str()));
const XNNPackCacheHeader& header =
*reinterpret_cast<const XNNPackCacheHeader*>(handle.data());
ASSERT_EQ(header.version, XNNPackCacheHeader::kVersion);
ASSERT_NE(header.buffer_list_offset, 0);
ASSERT_NE(header.buffer_list_size, 0);
ASSERT_LE(header.buffer_list_offset + header.buffer_list_size, handle.size());
const cache::schema::BufferList* const packed_weights =
cache::schema::GetBufferList(handle.data() + header.buffer_list_offset);
ASSERT_NE(packed_weights, nullptr);
ASSERT_NE(packed_weights->buffers(), nullptr);
ASSERT_EQ(packed_weights->buffers()->size(), 1);
ASSERT_NE(packed_weights->buffers()->Get(0), nullptr);
ASSERT_EQ(packed_weights->buffers()->Get(0)->size(), size(payload));
ASSERT_EQ(packed_weights->buffers()->Get(0)->packing_algorithm_id(),
dummy_id.pack_algorithm_id);
ASSERT_EQ(packed_weights->buffers()->Get(0)->weights_id(),
dummy_id.weights_id);
ASSERT_EQ(packed_weights->buffers()->Get(0)->bias_id(), dummy_id.bias_id);
flatbuffers::Verifier verifier(handle.data() + header.buffer_list_offset,
header.buffer_list_size);
EXPECT_TRUE(cache::schema::VerifyBufferListBuffer(verifier));
ASSERT_LE(packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset(),
size(handle));
ASSERT_LE(packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset() +
packed_weights->buffers()->Get(0)->size(),
size(handle));
std::tuple<const char*, size_t> cache_data(
reinterpret_cast<const char*>(
handle.data() + packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset()),
packed_weights->buffers()->Get(0)->size());
EXPECT_THAT(cache_data, ElementsAreArray(payload));
}
TEST(WeightCacheBuilderTest, NonExistingPathFails) {
WeightCacheBuilder builder;
EXPECT_FALSE(builder.Start(""));
EXPECT_FALSE(builder.Start("/seldf/sedsft"));
}
TEST(WeightCacheBuilderTest, InMemoryCacheTriggeredByCorrectPrefix) {
if (!TfLiteXNNPackDelegateCanUseInMemoryWeightCacheProvider()) {
GTEST_SKIP() << "In-memory weight cache isn't enabled for this build or "
"isn't supported by the current system, skipping test.";
}
{
WeightCacheBuilder builder;
EXPECT_TRUE(builder.Start(kInMemoryCachePath));
EXPECT_TRUE(builder.IsStarted());
const FileDescriptor file_fd(open(kInMemoryCachePath, O_RDONLY));
EXPECT_FALSE(file_fd.IsValid());
EXPECT_EQ(errno, ENOENT);
}
{
WeightCacheBuilder builder;
const std::string path_with_in_memory_prefix =
std::string(kInMemoryCachePath) + "/slkdjfsldf";
EXPECT_TRUE(builder.Start(path_with_in_memory_prefix.c_str()));
EXPECT_TRUE(builder.IsStarted());
const FileDescriptor file_fd(open(kInMemoryCachePath, O_RDONLY));
EXPECT_FALSE(file_fd.IsValid());
EXPECT_EQ(errno, ENOENT);
}
}
TEST(WeightCacheBuilderTest, MultipleStepBuild) {
using std::size;
const std::string payload1 = "This is some data in the file.";
const PackIdentifier dummy_id1{1, 2, 3};
const std::string payload2 = "Other data in the file.";
const PackIdentifier dummy_id2{2, 3, 4};
const std::string payload3 =
GenerateRandomString( 10 * 1024 * 1024);
const PackIdentifier dummy_id3{3, 4, 5};
TempFileDesc tmp_file{TempFileDesc::kAutoClose};
WeightCacheBuilder builder;
ASSERT_TRUE(builder.Start(tmp_file.GetCPath()));
ASSERT_TRUE(builder.StartBuildStep());
{
const size_t payload_size = size(payload1);
void* buffer = builder.Reserve(payload_size);
std::memcpy(buffer, payload1.c_str(), payload_size);
const auto loc = builder.Append(dummy_id1, buffer, payload_size);
EXPECT_EQ(loc.size, payload_size);
EXPECT_GE(builder.capacity(), payload_size);
}
{
const size_t payload_size = size(payload3);
void* buffer = builder.Reserve(payload_size);
std::memcpy(buffer, payload3.c_str(), payload_size);
const auto loc = builder.Append(dummy_id3, buffer, payload_size);
(void)loc;
}
ASSERT_TRUE(builder.StopBuildStep());
MMapHandle handle;
ASSERT_TRUE(handle.Map(tmp_file.GetCPath()));
ASSERT_TRUE(builder.StartBuildStep());
{
const size_t payload_size = size(payload2);
void* buffer = builder.Reserve(payload_size);
std::memcpy(buffer, payload2.c_str(), payload_size);
const auto loc = builder.Append(dummy_id2, buffer, payload_size);
EXPECT_EQ(loc.size, payload_size);
EXPECT_GE(builder.capacity(), payload_size);
}
ASSERT_TRUE(builder.StopBuildStep());
ASSERT_TRUE(handle.Map(tmp_file.GetCPath()));
const XNNPackCacheHeader& header =
*reinterpret_cast<const XNNPackCacheHeader*>(handle.data());
ASSERT_EQ(header.version, XNNPackCacheHeader::kVersion);
ASSERT_NE(header.buffer_list_offset, 0);
ASSERT_NE(header.buffer_list_size, 0);
ASSERT_LE(header.buffer_list_offset + header.buffer_list_size, handle.size());
const cache::schema::BufferList* const packed_weights =
cache::schema::GetBufferList(handle.data() + header.buffer_list_offset);
ASSERT_NE(packed_weights, nullptr);
ASSERT_NE(packed_weights->buffers(), nullptr);
ASSERT_EQ(packed_weights->buffers()->size(), 3);
const auto* buffer1 = packed_weights->buffers()->Get(0);
ASSERT_NE(buffer1, nullptr);
ASSERT_EQ(buffer1->size(), size(payload1));
ASSERT_EQ(buffer1->packing_algorithm_id(), dummy_id1.pack_algorithm_id);
ASSERT_EQ(buffer1->weights_id(), dummy_id1.weights_id);
ASSERT_EQ(buffer1->bias_id(), dummy_id1.bias_id);
const auto* buffer3 = packed_weights->buffers()->Get(1);
ASSERT_NE(buffer3, nullptr);
ASSERT_EQ(buffer3->size(), size(payload3));
ASSERT_EQ(buffer3->packing_algorithm_id(), dummy_id3.pack_algorithm_id);
ASSERT_EQ(buffer3->weights_id(), dummy_id3.weights_id);
ASSERT_EQ(buffer3->bias_id(), dummy_id3.bias_id);
const auto* buffer2 = packed_weights->buffers()->Get(2);
ASSERT_NE(buffer2, nullptr);
ASSERT_EQ(buffer2->size(), size(payload2));
ASSERT_EQ(buffer2->packing_algorithm_id(), dummy_id2.pack_algorithm_id);
ASSERT_EQ(buffer2->weights_id(), dummy_id2.weights_id);
ASSERT_EQ(buffer2->bias_id(), dummy_id2.bias_id);
flatbuffers::Verifier verifier(handle.data() + header.buffer_list_offset,
header.buffer_list_size);
EXPECT_TRUE(cache::schema::VerifyBufferListBuffer(verifier));
ASSERT_LE(packed_weights->base_offset() + buffer1->offset(), size(handle));
ASSERT_LE(packed_weights->base_offset() + buffer1->offset() + buffer1->size(),
size(handle));
ASSERT_LE(packed_weights->base_offset() + buffer2->offset(), size(handle));
ASSERT_LE(packed_weights->base_offset() + buffer2->offset() + buffer2->size(),
size(handle));
ASSERT_LE(packed_weights->base_offset() + buffer3->offset(), size(handle));
ASSERT_LE(packed_weights->base_offset() + buffer3->offset() + buffer3->size(),
size(handle));
auto GetBufferData = [&handle, &packed_weights](const auto* buffer) {
return std::tuple<const char*, size_t>(
reinterpret_cast<const char*>(
handle.data() + packed_weights->base_offset() + buffer->offset()),
buffer->size());
};
EXPECT_THAT(GetBufferData(buffer1), ElementsAreArray(payload1));
EXPECT_THAT(GetBufferData(buffer2), ElementsAreArray(payload2));
EXPECT_THAT(GetBufferData(buffer3), ElementsAreArray(payload3));
}
struct FakeContext {
void AddTensor(int buffer_identifier, size_t size) {
buffers.emplace_back(size, buffer_identifier);
tensors.push_back({});
tensors.back().allocation_type = kTfLiteMmapRo;
tensor_buffer_identifiers[tensors.size() - 1] = buffer_identifier;
}
void FinalizeTensors() {
for (size_t i = 0; i < tensors.size(); ++i) {
tensors[i].data.data = buffers[i].data();
tensors[i].bytes = buffers[i].size();
}
}
xnn_weights_cache_look_up_key LookUpKey(const uint32_t algorithm_seed,
const int weights_index) const {
return {.seed = algorithm_seed,
.kernel = buffers[weights_index].data(),
.bias = nullptr};
}
xnn_weights_cache_look_up_key LookUpKey(const uint32_t algorithm_seed,
const int weights_index,
const int bias_index) const {
return {.seed = algorithm_seed,
.kernel = buffers[weights_index].data(),
.bias = buffers[bias_index].data()};
}
void AddTensorToPack(std::vector<uint8_t>& pack_buffer, int index) {
const std::vector<uint8_t>& buffer = buffers[index];
pack_buffer.resize(std::max(size(pack_buffer), size(buffer)));
for (size_t i = 0; i < size(buffer); ++i) {
pack_buffer[i] ^= buffer[i];
}
}
template <class... Ids>
PackIdentifier PackTensors(xnn_weights_cache_t weight_cache,
const uint32_t algorithm_seed,
const Ids... tensor_indices) {
PackIdentifier pack_id{algorithm_seed,
tensor_buffer_identifiers[tensor_indices]...};
PackedBuffer& packed =
packed_buffers.emplace(pack_id, PackedBuffer{})->second;
(AddTensorToPack(packed.buffer, tensor_indices), ...);
xnn_weights_cache_look_up_key look_up_key =
LookUpKey(algorithm_seed, tensor_indices...);
packed.offset = weight_cache->look_up_or_insert(
weight_cache->context, &look_up_key, packed.buffer.data(),
packed.buffer.size());
return pack_id;
}
struct PackedBuffer {
size_t offset;
std::vector<uint8_t> buffer;
};
std::vector<TfLiteTensor> tensors;
std::vector<std::vector<uint8_t>> buffers;
std::unordered_multimap<PackIdentifier, PackedBuffer, PackIdentifier::Hash>
packed_buffers;
std::unordered_map<size_t, size_t> tensor_buffer_identifiers;
};
struct BuildMMapWeightCacheProviderTest : testing::Test {
enum { kAlgoSeed1, kAlgoSeed2, kAlgoSeed3 };
enum { kBufferId1, kBufferId2, kBufferId3, kBufferId4 };
void SetUp() override {
AddTensors();
EndSetup();
}
void AddTensors() {
ctx.AddTensor(kBufferId1, 12);
ctx.AddTensor(kBufferId2, 43);
ctx.AddTensor(kBufferId3, 64);
ctx.AddTensor(kBufferId4, 8);
}
void EndSetup() {
ctx.FinalizeTensors();
cache_provider.MapTensorIdentifiers(ctx.tensors.data(), ctx.tensors.size(),
ctx.tensor_buffer_identifiers);
ASSERT_TRUE(cache_provider.StartBuild(tmp_file.GetCPath()));
}
FakeContext ctx;
MMapWeightCacheProvider cache_provider;
TempFileDesc tmp_file{TempFileDesc::kAutoClose};
};
TEST_F(BuildMMapWeightCacheProviderTest, LookUpFailsIfKeyDoesntMatch) {
xnn_weights_cache_look_up_key look_up_key{};
EXPECT_EQ(cache_provider.LookUp(&look_up_key), SIZE_MAX);
}
TEST_F(BuildMMapWeightCacheProviderTest, LookUpSucceeds) {
enum { kWeightIndex, kBiasIndex };
ASSERT_TRUE(cache_provider.StartBuildStep());
const auto pack_id = ctx.PackTensors(&cache_provider.GetCacheProvider(),
kAlgoSeed1, kWeightIndex, kBiasIndex);
EXPECT_TRUE(cache_provider.StopBuildStep());
const xnn_weights_cache_look_up_key look_up_key =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex, kBiasIndex);
EXPECT_EQ(cache_provider.LookUp(&look_up_key),
ctx.packed_buffers.find(pack_id)->second.offset);
}
TEST_F(BuildMMapWeightCacheProviderTest,
DifferentAlgoSeedsSameTensorsDontConflict) {
enum { kWeightIndex, kBiasIndex };
ASSERT_TRUE(cache_provider.StartBuildStep());
const auto pack_id_1 = ctx.PackTensors(&cache_provider.GetCacheProvider(),
kAlgoSeed1, kWeightIndex, kBiasIndex);
const auto pack_id_2 = ctx.PackTensors(&cache_provider.GetCacheProvider(),
kAlgoSeed2, kWeightIndex, kBiasIndex);
EXPECT_TRUE(cache_provider.StopBuildStep());
const xnn_weights_cache_look_up_key look_up_key_1 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex, kBiasIndex);
const xnn_weights_cache_look_up_key look_up_key_2 =
ctx.LookUpKey(kAlgoSeed2, kWeightIndex, kBiasIndex);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_1),
ctx.packed_buffers.find(pack_id_1)->second.offset);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_2),
ctx.packed_buffers.find(pack_id_2)->second.offset);
EXPECT_NE(cache_provider.LookUp(&look_up_key_1),
cache_provider.LookUp(&look_up_key_2));
}
TEST_F(BuildMMapWeightCacheProviderTest,
SameAlgoSeedDifferentTensorsDontConflict) {
enum { kWeightIndex1, kWeightIndex2, kBiasIndex1, kBiasIndex2 };
ASSERT_TRUE(cache_provider.StartBuildStep());
const auto pack_id_1 =
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex1, kBiasIndex1);
const auto pack_id_2 =
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex2, kBiasIndex1);
const auto pack_id_3 =
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex1, kBiasIndex2);
const auto pack_id_4 =
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex2, kBiasIndex2);
EXPECT_TRUE(cache_provider.StopBuildStep());
const xnn_weights_cache_look_up_key look_up_key_1 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex1, kBiasIndex1);
const xnn_weights_cache_look_up_key look_up_key_2 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex2, kBiasIndex1);
const xnn_weights_cache_look_up_key look_up_key_3 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex1, kBiasIndex2);
const xnn_weights_cache_look_up_key look_up_key_4 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex2, kBiasIndex2);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_1),
ctx.packed_buffers.find(pack_id_1)->second.offset);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_2),
ctx.packed_buffers.find(pack_id_2)->second.offset);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_3),
ctx.packed_buffers.find(pack_id_3)->second.offset);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_4),
ctx.packed_buffers.find(pack_id_4)->second.offset);
EXPECT_NE(cache_provider.LookUp(&look_up_key_1),
cache_provider.LookUp(&look_up_key_2));
EXPECT_NE(cache_provider.LookUp(&look_up_key_1),
cache_provider.LookUp(&look_up_key_3));
EXPECT_NE(cache_provider.LookUp(&look_up_key_1),
cache_provider.LookUp(&look_up_key_4))
<< pack_id_1 << " " << pack_id_4;
EXPECT_NE(cache_provider.LookUp(&look_up_key_2),
cache_provider.LookUp(&look_up_key_3));
EXPECT_NE(cache_provider.LookUp(&look_up_key_2),
cache_provider.LookUp(&look_up_key_4));
EXPECT_NE(cache_provider.LookUp(&look_up_key_3),
cache_provider.LookUp(&look_up_key_4));
}
TEST_F(BuildMMapWeightCacheProviderTest, BuildStepSequenceWorks) {
enum { kWeightIndex1, kBiasIndex, kWeightIndex2 };
ASSERT_TRUE(cache_provider.StartBuildStep());
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1, kWeightIndex1,
kBiasIndex);
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed2,
kWeightIndex2);
EXPECT_TRUE(cache_provider.IsActive());
EXPECT_TRUE(cache_provider.IsBuilding());
ASSERT_TRUE(cache_provider.StopBuildStep());
ASSERT_TRUE(cache_provider.IsActive());
EXPECT_FALSE(cache_provider.IsBuilding());
}
struct LoadMMapWeightCacheProviderTest : BuildMMapWeightCacheProviderTest {
enum { kWeightIndex1, kBiasIndex, kWeightIndex2 };
void SetUp() override {
BuildMMapWeightCacheProviderTest::SetUp();
ASSERT_TRUE(cache_provider.StartBuildStep());
pack_id_1 = ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex1, kBiasIndex);
pack_id_2 = ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed2,
kWeightIndex2);
ASSERT_TRUE(cache_provider.StopBuildStep());
}
xnn_weights_cache_look_up_key LookUpKey1() const {
return ctx.LookUpKey(kAlgoSeed1, kWeightIndex1, kBiasIndex);
}
xnn_weights_cache_look_up_key LookUpKey2() const {
return ctx.LookUpKey(kAlgoSeed2, kWeightIndex2);
}
PackIdentifier pack_id_1;
PackIdentifier pack_id_2;
};
TEST_F(LoadMMapWeightCacheProviderTest, LookUpFailsIfKeyDoesntMatch) {
xnn_weights_cache_look_up_key look_up_key{};
EXPECT_EQ(cache_provider.LookUp(&look_up_key), SIZE_MAX);
}
TEST_F(LoadMMapWeightCacheProviderTest, LookUpSucceeds) {
const auto& reference_1 = ctx.packed_buffers.find(pack_id_1)->second;
const auto& reference_2 = ctx.packed_buffers.find(pack_id_2)->second;
const xnn_weights_cache_look_up_key look_up_key_1 = LookUpKey1();
const xnn_weights_cache_look_up_key look_up_key_2 = LookUpKey2();
const uint64_t offset_1 = cache_provider.LookUp(&look_up_key_1);
const uint64_t offset_2 = cache_provider.LookUp(&look_up_key_2);
ASSERT_EQ(offset_1, reference_1.offset);
ASSERT_EQ(offset_2, reference_2.offset);
const void* const addr_1 = cache_provider.OffsetToAddr(offset_1);
const void* const addr_2 = cache_provider.OffsetToAddr(offset_2);
ASSERT_NE(addr_1, nullptr);
ASSERT_NE(addr_2, nullptr);
EXPECT_THAT(LightSpan<const uint8_t>(addr_1, reference_1.buffer.size()),
ElementsAreArray(reference_1.buffer));
EXPECT_THAT(LightSpan<const uint8_t>(addr_2, reference_2.buffer.size()),
ElementsAreArray(reference_2.buffer));
}
TEST(MMapWeightCacheProviderTest, XnnpackCApiJourney) {
using std::size;
TempFileDesc temp_fd(TempFileDesc::kAutoClose);
const int32_t fake_packing_algo_seed = 0xBA0BAB;
const char packed_data_ref_1[] = "abcdefghij";
const char packed_data_ref_2[] = "klmnopqr";
const std::string packed_data_ref_3 =
GenerateRandomString( 10 * 1024 * 1024);
auto bytes = [](const auto& array) { return size(array) * sizeof(array[0]); };
constexpr int kBufferCount = 10;
char fake_buffer_pointer[kBufferCount] = {0};
{
TfLiteTensor tensors[kBufferCount];
std::unordered_map<size_t, size_t> tensor_buffer_identifiers;
for (int i = 0; i < kBufferCount; ++i) {
tensors[i].data.data = (void*)(fake_buffer_pointer + i);
tensor_buffer_identifiers[i] = i;
}
MMapWeightCacheProvider cache_provider;
ASSERT_TRUE(cache_provider.StartBuild(temp_fd.GetCPath()));
ASSERT_TRUE(cache_provider.StartBuildStep());
xnn_weights_cache_t cache = &cache_provider.GetCacheProvider();
cache_provider.MapTensorIdentifiers(tensors, size(tensors),
tensor_buffer_identifiers);
const xnn_weights_cache_look_up_key look_up_key_1{
.seed = fake_packing_algo_seed,
.kernel = tensors[0].data.data,
.bias = tensors[1].data.data};
const xnn_weights_cache_look_up_key look_up_key_3{
.seed = fake_packing_algo_seed,
.kernel = tensors[3].data.data,
.bias = tensors[4].data.data};
ASSERT_EQ(cache->look_up(cache, &look_up_key_1), SIZE_MAX);
void* const reserved_ptr =
cache->reserve_space(cache, bytes(packed_data_ref_1));
ASSERT_NE(reserved_ptr, nullptr);
std::memcpy(reserved_ptr, packed_data_ref_1, bytes(packed_data_ref_1));
const size_t build_offset_1 = cache->look_up_or_insert(
cache, &look_up_key_1, reserved_ptr, bytes(packed_data_ref_1));
const size_t build_offset_redundant = cache->look_up_or_insert(
cache, &look_up_key_1, reserved_ptr, bytes(packed_data_ref_1));
EXPECT_EQ(build_offset_1, build_offset_redundant);
ASSERT_EQ(cache->look_up(cache, &look_up_key_3), SIZE_MAX);
void* const reserved_ptr_3 =
cache->reserve_space(cache, bytes(packed_data_ref_3));
ASSERT_NE(reserved_ptr_3, nullptr);
std::memcpy(reserved_ptr_3, packed_data_ref_3.data(),
bytes(packed_data_ref_3));
const size_t build_offset_3 = cache->look_up_or_insert(
cache, &look_up_key_3, reserved_ptr_3, bytes(packed_data_ref_3));
ASSERT_TRUE(cache_provider.StopBuildStep());
ASSERT_EQ(cache->look_up(cache, &look_up_key_1), build_offset_1);
ASSERT_EQ(cache->look_up(cache, &look_up_key_3), build_offset_3);
ASSERT_TRUE(cache_provider.StartBuildStep());
const xnn_weights_cache_look_up_key look_up_key_2{
.seed = fake_packing_algo_seed,
.kernel = tensors[2].data.data,
.bias = tensors[3].data.data};
const size_t build_offset_2 = cache->look_up_or_insert(
cache, &look_up_key_2, (void*)packed_data_ref_2,
bytes(packed_data_ref_2));
EXPECT_EQ(cache->look_up(cache, &look_up_key_3), build_offset_3);
EXPECT_EQ(cache->look_up_or_insert(cache, &look_up_key_3, reserved_ptr_3,
bytes(packed_data_ref_3)),
build_offset_3);
ASSERT_TRUE(cache_provider.StopBuildStep());
ASSERT_TRUE(cache->is_finalized(cache));
const size_t reload_offset_1 = cache->look_up(cache, &look_up_key_1);
ASSERT_EQ(reload_offset_1, build_offset_1);
const void* const loaded_packed_data_1 =
cache->offset_to_addr(cache, reload_offset_1);
ASSERT_NE(loaded_packed_data_1, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_1, size(packed_data_ref_1)),
ElementsAreArray(packed_data_ref_1));
const size_t reload_offset_2 = cache->look_up(cache, &look_up_key_2);
ASSERT_EQ(reload_offset_2, build_offset_2);
const void* const loaded_packed_data_2 =
cache->offset_to_addr(cache, reload_offset_2);
ASSERT_NE(loaded_packed_data_2, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_2, size(packed_data_ref_2)),
ElementsAreArray(packed_data_ref_2));
const size_t reload_offset_3 = cache->look_up(cache, &look_up_key_3);
ASSERT_EQ(reload_offset_3, build_offset_3);
const void* const loaded_packed_data_3 =
cache->offset_to_addr(cache, reload_offset_3);
ASSERT_NE(loaded_packed_data_3, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_3, size(packed_data_ref_3)),
ElementsAreArray(packed_data_ref_3));
}
{
TfLiteTensor tensors[kBufferCount];
std::unordered_map<size_t, size_t> tensor_buffer_identifiers;
for (int i = 0; i < kBufferCount; ++i) {
tensors[i].data.data = (void*)(fake_buffer_pointer + i);
tensor_buffer_identifiers[i] = i;
}
MMapWeightCacheProvider cache_provider;
ASSERT_TRUE(cache_provider.Load(temp_fd.GetCPath()));
xnn_weights_cache_t cache = &cache_provider.GetCacheProvider();
cache_provider.MapTensorIdentifiers(tensors, size(tensors),
tensor_buffer_identifiers);
const xnn_weights_cache_look_up_key look_up_key_1{
.seed = fake_packing_algo_seed,
.kernel = tensors[0].data.data,
.bias = tensors[1].data.data};
const xnn_weights_cache_look_up_key look_up_key_2{
.seed = fake_packing_algo_seed,
.kernel = tensors[2].data.data,
.bias = tensors[3].data.data};
const xnn_weights_cache_look_up_key look_up_key_3{
.seed = fake_packing_algo_seed,
.kernel = tensors[3].data.data,
.bias = tensors[4].data.data};
ASSERT_TRUE(cache->is_finalized(cache));
const size_t offset_1 = cache->look_up(cache, &look_up_key_1);
const void* const loaded_packed_data_1 =
cache->offset_to_addr(cache, offset_1);
ASSERT_NE(loaded_packed_data_1, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_1, size(packed_data_ref_1)),
ElementsAreArray(packed_data_ref_1));
const size_t offset_2 = cache->look_up(cache, &look_up_key_2);
ASSERT_NE(offset_2, SIZE_MAX);
const void* const loaded_packed_data_2 =
cache->offset_to_addr(cache, offset_2);
ASSERT_NE(loaded_packed_data_2, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_2, size(packed_data_ref_2)),
ElementsAreArray(packed_data_ref_2));
const size_t offset_3 = cache->look_up(cache, &look_up_key_3);
const void* const loaded_packed_data_3 =
cache->offset_to_addr(cache, offset_3);
ASSERT_NE(loaded_packed_data_3, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_3, size(packed_data_ref_3)),
ElementsAreArray(packed_data_ref_3));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/weight_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/weight_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e0e8be79-3ac2-41fa-9213-6eda04617265 | cpp | tensorflow/tensorflow | file_util | tensorflow/lite/delegates/xnnpack/file_util.cc | tensorflow/lite/delegates/xnnpack/file_util_test.cc | #include "tensorflow/lite/delegates/xnnpack/file_util.h"
#include <fcntl.h>
#if defined(_MSC_VER)
#include <io.h>
#define F_OK 0
#else
#include <unistd.h>
#endif
#if defined(__linux__) || defined(__ANDROID__)
#ifndef TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED
#include <sys/syscall.h>
#ifdef SYS_memfd_create
#define TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED 1
#endif
#endif
#endif
#include <cstdio>
#if !TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#endif
namespace tflite {
namespace xnnpack {
FileDescriptor FileDescriptor::Duplicate() const {
if (!IsValid()) {
return FileDescriptor(-1);
}
return FileDescriptor(dup(fd_));
}
void FileDescriptor::Reset(int new_fd) {
if (fd_ == new_fd) {
return;
}
if (IsValid()) {
close(fd_);
}
fd_ = new_fd;
}
off_t FileDescriptor::GetPos() const { return lseek(fd_, 0, SEEK_CUR); }
off_t FileDescriptor::SetPos(off_t position) const {
return lseek(fd_, position, SEEK_SET);
}
off_t FileDescriptor::SetPosFromEnd(off_t offset) const {
return lseek(fd_, offset, SEEK_END);
}
off_t FileDescriptor::MovePos(off_t offset) const {
return lseek(fd_, offset, SEEK_CUR);
}
FileDescriptor FileDescriptor::Open(const char* path, int flags, mode_t mode) {
return FileDescriptor(open(path, flags, mode));
}
void FileDescriptor::Close() { Reset(-1); }
bool FileDescriptor::Read(void* dst, size_t count) const {
char* dst_it = reinterpret_cast<char*>(dst);
while (count > 0) {
const auto bytes = read(fd_, dst_it, count);
if (bytes == -1) {
return false;
} else if (bytes == 0) {
break;
}
count -= bytes;
dst_it += bytes;
}
return true;
}
bool FileDescriptor::Write(const void* src, size_t count) const {
const char* src_it = reinterpret_cast<const char*>(src);
while (count > 0) {
const auto bytes = write(fd_, src_it, count);
if (bytes == -1) {
return false;
}
count -= bytes;
src_it += bytes;
}
return true;
}
bool InMemoryFileDescriptorAvailable() {
#if TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED
const int test_fd = syscall(SYS_memfd_create, "test fd", 0);
if (test_fd != -1) {
close(test_fd);
return true;
}
#endif
return false;
}
FileDescriptor CreateInMemoryFileDescriptor(const char* path) {
#ifdef TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED
return FileDescriptor(
syscall(SYS_memfd_create, "XNNPack in-memory weight cache", 0));
#else
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: in-memory cache is not enabled for "
"this build.");
return FileDescriptor(-1);
#endif
}
}
} | #include "tensorflow/lite/delegates/xnnpack/file_util.h"
#include <fcntl.h>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
namespace tflite::xnnpack {
namespace {
TEST(FileDescriptorTest, DefaultConstructedIsInvalid) {
FileDescriptor fd;
EXPECT_FALSE(fd.IsValid());
}
TEST(FileDescriptorTest, ConstructAndRelease) {
const int kFd = 53;
FileDescriptor fd(kFd);
EXPECT_TRUE(fd.IsValid());
EXPECT_EQ(fd.Value(), kFd);
FileDescriptor fd2(std::move(fd));
EXPECT_FALSE(fd.IsValid());
EXPECT_TRUE(fd2.IsValid());
EXPECT_EQ(fd2.Value(), kFd);
EXPECT_EQ(fd2.Release(), kFd);
EXPECT_FALSE(fd2.IsValid());
EXPECT_FALSE(std::is_copy_constructible_v<FileDescriptor>);
}
TEST(FileDescriptorTest, OpenWriteRewindAndReadWorks) {
const std::string tmp_file = testing::TempDir() + __FUNCTION__;
FileDescriptor fd =
FileDescriptor::Open(tmp_file.c_str(), O_CREAT | O_TRUNC | O_RDWR, 0644);
ASSERT_TRUE(fd.IsValid());
const std::string src_data = "The quick brown fox jumps over the lazy dog.";
EXPECT_TRUE(fd.Write(src_data.data(), src_data.size()));
EXPECT_EQ(fd.SetPos(0), 0);
std::string dst_data(src_data.size(), ' ');
EXPECT_TRUE(fd.Read(dst_data.data(), src_data.size()));
EXPECT_EQ(dst_data, src_data);
}
TEST(FileDescriptorTest, WriteFailureReturnsFalse) {
const std::string tmp_file = testing::TempDir() + __FUNCTION__;
FileDescriptor fd = FileDescriptor::Open(tmp_file.c_str(),
O_CREAT | O_TRUNC | O_RDONLY, 0644);
ASSERT_TRUE(fd.IsValid());
const std::string src_data = "The quick brown fox jumps over the lazy dog.";
EXPECT_FALSE(fd.Write(src_data.data(), src_data.size()));
}
TEST(FileDescriptorTest, ReadFailureReturnsFalse) {
const std::string tmp_file = testing::TempDir() + __FUNCTION__;
FileDescriptor fd = FileDescriptor::Open(tmp_file.c_str(),
O_CREAT | O_TRUNC | O_WRONLY, 0644);
ASSERT_TRUE(fd.IsValid());
std::string dst_data(5, ' ');
EXPECT_FALSE(fd.Read(dst_data.data(), dst_data.size()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/file_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/file_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1fbbc63d-427e-4986-9d90-d77ea560e790 | cpp | tensorflow/tensorflow | delegate_data | tensorflow/lite/delegates/flex/delegate_data.cc | tensorflow/lite/delegates/flex/delegate_data_test.cc | #include "tensorflow/lite/delegates/flex/delegate_data.h"
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace flex {
namespace {
void BuildFunctionDefProto(const std::string& function_name,
const Subgraph& subgraph,
tensorflow::FunctionDef& fdef) {
std::vector<std::string> inputs, outputs;
inputs.reserve(subgraph.inputs().size());
outputs.reserve(subgraph.outputs().size());
for (int i = 0; i < subgraph.inputs().size(); ++i) {
inputs.push_back(absl::StrCat(
"args_", i, ": ",
TfLiteTypeToTfTypeName(subgraph.tensor(subgraph.inputs()[i])->type)));
}
for (int i = 0; i < subgraph.outputs().size(); ++i) {
outputs.push_back(absl::StrCat(
"res_", i, ": ",
TfLiteTypeToTfTypeName(subgraph.tensor(subgraph.outputs()[i])->type)));
}
std::vector<tensorflow::FunctionDefHelper::Node> nodes;
nodes.push_back(tensorflow::FunctionDefHelper::Const<tensorflow::tstring>(
"SubgraphResourceKey", function_name));
tensorflow::FunctionDefHelper::Node execute_node;
execute_node.ret.push_back("InvokeTfLite");
execute_node.op = "TfLiteSubgraphExecute";
execute_node.arg.push_back("SubgraphResourceKey:output:0");
for (int i = 0; i < subgraph.inputs().size(); ++i) {
execute_node.arg.push_back(absl::StrCat("args_", i));
}
nodes.push_back(execute_node);
std::vector<std::pair<std::string, std::string>> ret_def;
ret_def.reserve(subgraph.outputs().size());
for (int i = 0; i < subgraph.outputs().size(); ++i) {
ret_def.emplace_back(absl::StrCat("res_", i),
absl::StrCat("InvokeTfLite:output:", i));
}
fdef = tensorflow::FunctionDefHelper::Create(function_name, inputs, outputs,
{}, nodes, ret_def);
tensorflow::AttrValue tin_attrs, tout_attrs;
for (int i = 0; i < subgraph.inputs().size(); ++i) {
TF_DataType dtype = tflite::flex::GetTensorFlowDataType(
subgraph.tensor(subgraph.inputs()[i])->type);
tin_attrs.mutable_list()->add_type(tensorflow::DataType(dtype));
}
for (int i = 0; i < subgraph.outputs().size(); ++i) {
TF_DataType dtype = tflite::flex::GetTensorFlowDataType(
subgraph.tensor(subgraph.outputs()[i])->type);
tout_attrs.mutable_list()->add_type(tensorflow::DataType(dtype));
}
fdef.mutable_node_def(1)->mutable_attr()->insert({"Tin", tin_attrs});
fdef.mutable_node_def(1)->mutable_attr()->insert({"Tout", tout_attrs});
}
tensorflow::Status GetSubgraphNamesForFunctionExecution(
const std::vector<std::unique_ptr<Subgraph>>& subgraphs,
std::set<std::string>* result) {
tensorflow::NodeDef node_def;
for (const auto& subgraph : subgraphs) {
for (const auto& node_and_reg : subgraph->nodes_and_registration()) {
if (node_and_reg.second.builtin_code != tflite::BuiltinOperator_CUSTOM) {
continue;
}
const std::string custom_name = node_and_reg.second.custom_name;
if (custom_name.substr(0, strlen(tflite::kFlexCustomCodePrefix)) !=
tflite::kFlexCustomCodePrefix) {
continue;
}
const flexbuffers::Vector& v =
flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(
node_and_reg.first.custom_initial_data),
node_and_reg.first.custom_initial_data_size)
.AsVector();
if (!node_def.ParseFromString(v[1].AsString().str())) {
return tensorflow::Status(absl::StatusCode::kInternal,
"could not parse NodeDef");
}
for (const auto& attr : node_def.attr()) {
if (attr.second.has_func()) {
result->insert(attr.second.func().name());
}
}
}
}
return absl::OkStatus();
}
}
tensorflow::Status RegisterFunctionDefForSubgraphs(
Subgraph& main_subgraph,
const std::function<tensorflow::Status(
const std::vector<std::unique_ptr<Subgraph>>&, std::set<std::string>*)>&
select_subgraphs_to_register,
tensorflow::ResourceMgr* resource_mgr,
tensorflow::EagerContext* eager_context, TfLiteDelegate* flex_delegate) {
std::vector<std::unique_ptr<Subgraph>>* subgraphs =
main_subgraph.GetSubgraphs();
if (!subgraphs) {
return absl::OkStatus();
}
std::set<std::string> function_subgraphs;
TF_RETURN_IF_ERROR(
select_subgraphs_to_register(*subgraphs, &function_subgraphs));
for (int i = 0; i < subgraphs->size(); ++i) {
if (subgraphs->at(i)->GetName() == "main") {
continue;
}
const std::string subgraph_name = subgraphs->at(i)->GetName();
if (!function_subgraphs.count(subgraph_name)) {
continue;
}
auto* subgraph_resource =
new TFLiteSubgraphResource(*(subgraphs->at(i)), flex_delegate);
TF_RETURN_IF_ERROR(resource_mgr->Create<TFLiteSubgraphResource>(
"flex", subgraph_name, subgraph_resource));
tensorflow::FunctionDef fdef;
BuildFunctionDefProto(subgraph_name, *(subgraphs->at(i)), fdef);
TF_RETURN_IF_ERROR(eager_context->AddFunctionDef(fdef));
}
return absl::OkStatus();
}
DelegateData::DelegateData() {}
DelegateData::~DelegateData() {
if (eager_context_) {
eager_context_->HostCPU()->ClearResourceMgr();
eager_context_->Unref();
}
}
tensorflow::Status DelegateData::Prepare(
const tensorflow::SessionOptions& session_options, Subgraph* main_subgraph,
TfLiteDelegate* flex_delegate) {
if (eager_context_) {
return tensorflow::Status();
}
if (flex_delegate == nullptr && main_subgraph != nullptr) {
return tensorflow::Status(
absl::StatusCode::kFailedPrecondition,
"flex_delegate must be non-null when main_subgraph is provided.");
}
std::vector<std::unique_ptr<tensorflow::Device>> devices;
TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
auto device_mgr =
std::make_unique<tensorflow::StaticDeviceMgr>(std::move(devices));
auto rendezvous = tsl::core::RefCountPtr<tensorflow::IntraProcessRendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
eager_context_ = new tensorflow::EagerContext(
session_options,
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr.release(), true,
std::move(rendezvous), nullptr);
if (main_subgraph) {
TF_RETURN_IF_ERROR(RegisterFunctionDefForSubgraphs(
*main_subgraph, GetSubgraphNamesForFunctionExecution,
eager_context_->HostCPU()->resource_manager(), eager_context_,
flex_delegate));
}
return tensorflow::Status();
}
}
} | #include "tensorflow/lite/delegates/flex/delegate_data.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace flex {
namespace {
using ::tensorflow::protobuf::TextFormat;
using ::tensorflow::protobuf::util::MessageDifferencer;
TEST(DelegateDataTest, Basic) {
DelegateData data;
tensorflow::SessionOptions session_options;
session_options.config.set_intra_op_parallelism_threads(2);
EXPECT_TRUE(data.Prepare(session_options).ok());
TfLiteContext dummy_context1 = {};
TfLiteContext dummy_context2 = {};
ASSERT_NE(data.GetEagerContext(), nullptr);
EXPECT_NE(data.GetBufferMap(&dummy_context1), nullptr);
EXPECT_NE(data.GetBufferMap(&dummy_context1),
data.GetBufferMap(&dummy_context2));
}
TEST(DelegateDataTest, CheckFunctionDef) {
tensorflow::StaticDeviceMgr device_mgr(tensorflow::DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
tensorflow::EagerContext* eager_context = new tensorflow::EagerContext(
tensorflow::SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr, false, nullptr,
nullptr);
auto select_subgraphs_to_register =
[](const std::vector<std::unique_ptr<Subgraph>>& subgraphs,
std::set<std::string>* result) {
result->insert("add_subgraph");
result->insert("mul_subgraph");
return absl::OkStatus();
};
subgraph_test_util::SubgraphBuilder builder;
std::unique_ptr<ErrorReporter> error_reporter =
std::make_unique<TestErrorReporter>();
auto add_subgraph = std::make_unique<Subgraph>(
error_reporter.get(), nullptr,
nullptr, nullptr, nullptr,
nullptr);
add_subgraph->SetName("add_subgraph");
auto mul_subgraph = std::make_unique<Subgraph>(
error_reporter.get(), nullptr,
nullptr, nullptr, nullptr,
nullptr);
mul_subgraph->SetName("mul_subgraph");
builder.BuildAddSubgraph(add_subgraph.get());
builder.BuildMulSubgraph(mul_subgraph.get());
std::vector<std::unique_ptr<Subgraph>> subgraphs;
subgraphs.push_back(std::move(add_subgraph));
subgraphs.push_back(std::move(mul_subgraph));
Subgraph main_subgraph(error_reporter.get(), nullptr, &subgraphs,
nullptr, nullptr,
nullptr);
main_subgraph.SetName("main");
TF_ASSERT_OK(RegisterFunctionDefForSubgraphs(
main_subgraph, select_subgraphs_to_register,
eager_context->HostCPU()->resource_manager(), eager_context,
nullptr));
const string add_fdef_txt = R"pb(
signature {
name: "add_subgraph"
input_arg { name: "args_0" type: DT_INT32 }
input_arg { name: "args_1" type: DT_INT32 }
output_arg { name: "res_0" type: DT_INT32 }
is_stateful: true
}
node_def {
name: "SubgraphResourceKey"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "add_subgraph"
}
}
}
}
node_def {
name: "InvokeTfLite"
op: "TfLiteSubgraphExecute"
input: "SubgraphResourceKey:output:0"
input: "args_0"
input: "args_1"
attr {
key: "Tin"
value { list { type: DT_INT32 type: DT_INT32 } }
}
attr {
key: "Tout"
value { list { type: DT_INT32 } }
}
}
ret { key: "res_0" value: "InvokeTfLite:output:0" })pb";
const string mul_fdef_txt = R"pb(
signature {
name: "mul_subgraph"
input_arg { name: "args_0" type: DT_INT32 }
input_arg { name: "args_1" type: DT_INT32 }
output_arg { name: "res_0" type: DT_INT32 }
is_stateful: true
}
node_def {
name: "SubgraphResourceKey"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "mul_subgraph"
}
}
}
}
node_def {
name: "InvokeTfLite"
op: "TfLiteSubgraphExecute"
input: "SubgraphResourceKey:output:0"
input: "args_0"
input: "args_1"
attr {
key: "Tin"
value { list { type: DT_INT32 type: DT_INT32 } }
}
attr {
key: "Tout"
value { list { type: DT_INT32 } }
}
}
ret { key: "res_0" value: "InvokeTfLite:output:0" })pb";
tensorflow::FunctionDef add_fdef, mul_fdef;
ASSERT_TRUE(TextFormat::ParseFromString(add_fdef_txt, &add_fdef));
ASSERT_TRUE(TextFormat::ParseFromString(mul_fdef_txt, &mul_fdef));
EXPECT_EQ(eager_context->GetFunctionDef("main"), nullptr);
ASSERT_NE(eager_context->GetFunctionDef("add_subgraph"), nullptr);
ASSERT_NE(eager_context->GetFunctionDef("mul_subgraph"), nullptr);
EXPECT_TRUE(MessageDifferencer::Equals(
*(eager_context->GetFunctionDef("add_subgraph")), add_fdef));
EXPECT_TRUE(MessageDifferencer::Equals(
*(eager_context->GetFunctionDef("mul_subgraph")), mul_fdef));
eager_context->Unref();
}
TEST(DelegateDataTest, CheckFunctionDefWithOnlyMainGraph) {
tensorflow::StaticDeviceMgr device_mgr(tensorflow::DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
tensorflow::EagerContext* eager_context = new tensorflow::EagerContext(
tensorflow::SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr, false, nullptr,
nullptr);
auto select_subgraphs_to_register =
[](const std::vector<std::unique_ptr<Subgraph>>& subgraphs,
std::set<std::string>* result) {
result->insert("add_subgraph");
result->insert("mul_subgraph");
return absl::OkStatus();
};
subgraph_test_util::SubgraphBuilder builder;
std::unique_ptr<ErrorReporter> error_reporter =
std::make_unique<TestErrorReporter>();
Subgraph main_subgraph(error_reporter.get(), nullptr,
nullptr, nullptr,
nullptr,
nullptr);
main_subgraph.SetName("main");
TF_ASSERT_OK(RegisterFunctionDefForSubgraphs(
main_subgraph, select_subgraphs_to_register,
eager_context->HostCPU()->resource_manager(), eager_context,
nullptr));
EXPECT_EQ(eager_context->GetFunctionDef("main"), nullptr);
eager_context->Unref();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/delegate_data.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/delegate_data_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2ddb0fc7-9ef8-4b75-ae3c-efd61e630587 | cpp | tensorflow/tensorflow | buffer_map | tensorflow/lite/delegates/flex/buffer_map.cc | tensorflow/lite/delegates/flex/buffer_map_test.cc | #include "tensorflow/lite/delegates/flex/buffer_map.h"
#include <utility>
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/lite/delegates/flex/buffer_map_util.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace flex {
BufferMap::BufferMap() {}
BufferMap::~BufferMap() {}
bool BufferMap::HasTensor(int tensor_index) const {
return id_to_tensor_.count(tensor_index) != 0;
}
tensorflow::Tensor BufferMap::GetTensor(int tensor_index) const {
return id_to_tensor_.at(tensor_index);
}
const tensorflow::Tensor* BufferMap::GetTensorPtr(int tensor_index) const {
auto& tensor = id_to_tensor_.at(tensor_index);
return &tensor;
}
void BufferMap::SetFromTfLite(int tensor_index, const TfLiteTensor* tensor,
bool allow_reusing) {
TFLITE_CHECK(
SetTfTensorFromTfLite(tensor, &id_to_tensor_[tensor_index], allow_reusing)
.ok());
}
void BufferMap::SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor) {
id_to_tensor_[tensor_index] = std::move(tensor);
}
}
} | #include "tensorflow/lite/delegates/flex/buffer_map.h"
#include <sys/types.h>
#include <functional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/delegates/flex/buffer_map_util.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace flex {
namespace {
using ::testing::ElementsAre;
using UniqueTfLiteTensor =
std::unique_ptr<TfLiteTensor, std::function<void(TfLiteTensor*)>>;
template <typename T>
UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
const std::vector<T>& data) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
});
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<T>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
TfLiteTensorRealloc(data.size() * sizeof(T), tensor.get());
memcpy(tensor->data.raw, data.data(), data.size() * sizeof(T));
return tensor;
}
template <>
UniqueTfLiteTensor MakeLiteTensor<string>(const std::vector<int>& shape,
const std::vector<string>& data) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
});
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<string>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
TfLiteTensorRealloc(data.size() * sizeof(string), tensor.get());
DynamicBuffer b;
for (const string& s : data) {
b.AddString(s.data(), s.size());
}
b.WriteToTensor(tensor.get(), ConvertVectorToTfLiteIntArray(shape));
return tensor;
}
template <typename T>
tensorflow::Tensor MakeTensor(const std::vector<int64_t>& shape,
const std::vector<T>& data,
tensorflow::DataType dtype) {
tensorflow::Tensor tensor(dtype, tensorflow::TensorShape(shape));
memcpy(tensor.data(), data.data(), data.size() * sizeof(T));
return tensor;
}
std::vector<int64_t> GetTensorShape(const tensorflow::Tensor& t) {
std::vector<int64_t> shape(t.dims());
for (int i = 0; i < t.dims(); ++i) {
shape[i] = t.dim_size(i);
}
return shape;
}
template <typename T>
std::vector<T> GetTensorData(const tensorflow::Tensor& t) {
const T* data = t.flat<T>().data();
return std::vector<T>(data, data + t.NumElements());
}
TEST(BufferMapTest, EmptyBuffer) {
BufferMap buffer_map;
EXPECT_FALSE(buffer_map.HasTensor(0));
}
TEST(BufferMapTest, SetFromTfLite) {
BufferMap buffer_map;
UniqueTfLiteTensor t =
MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
buffer_map.SetFromTfLite(0, t.get());
ASSERT_TRUE(buffer_map.HasTensor(0));
EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 0.123f, 0, 0));
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
ASSERT_EQ(out_tensor.NumElements(), 6);
ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
}
TEST(BufferMapTest, SetFromTfLiteString) {
BufferMap buffer_map;
UniqueTfLiteTensor t =
MakeLiteTensor<string>({1, 2, 1, 3}, {"", "", "", "str1", "", ""});
buffer_map.SetFromTfLite(0, t.get());
ASSERT_TRUE(buffer_map.HasTensor(0));
EXPECT_THAT(GetTensorData<tensorflow::tstring>(buffer_map.GetTensor(0)),
ElementsAre("", "", "", "str1", "", ""));
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_STRING);
ASSERT_EQ(out_tensor.NumElements(), 6);
ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
}
TEST(BufferMapTest, SetFromTfLiteTwice) {
UniqueTfLiteTensor t1 =
MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
UniqueTfLiteTensor t2 =
MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
BufferMap buffer_map;
buffer_map.SetFromTfLite(0, t1.get());
buffer_map.SetFromTfLite(0, t2.get());
EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
}
TEST(BufferMapTest, SetFromTfLiteStringTwice) {
UniqueTfLiteTensor t1 =
MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
UniqueTfLiteTensor t2 =
MakeLiteTensor<string>({1, 2, 4}, {"", "", "", "s3", "", "", "s1", "s2"});
BufferMap buffer_map;
buffer_map.SetFromTfLite(0, t1.get());
buffer_map.SetFromTfLite(0, t2.get());
EXPECT_THAT(GetTensorData<tensorflow::tstring>(buffer_map.GetTensor(0)),
ElementsAre("", "", "", "s3", "", "", "s1", "s2"));
}
TEST(BufferMapTest, SetFromTfLiteBuiltinResource) {
BufferMap buffer_map;
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
});
tensor->allocation_type = kTfLiteDynamic;
tensor->type = kTfLiteResource;
tensor->dims = ConvertVectorToTfLiteIntArray({1});
TfLiteTensorRealloc(sizeof(int32_t), tensor.get());
tensor->delegate = nullptr;
tensor->data.i32[0] = 1;
buffer_map.SetFromTfLite(0, tensor.get());
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_RESOURCE);
ASSERT_EQ(out_tensor.NumElements(), 1);
tensorflow::ResourceHandle handle =
out_tensor.flat<tensorflow::ResourceHandle>()(0);
EXPECT_EQ(handle.name(), "tflite_resource_variable:1");
}
TEST(BufferMapTest, SetFromTensorFlow) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
BufferMap buffer_map;
buffer_map.SetFromTensorFlow(0, t1);
EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 0.123f, 0, 0));
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
ASSERT_EQ(out_tensor.NumElements(), 6);
ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
}
TEST(BufferMapTest, SetFromTensorFlowTwice) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
tensorflow::Tensor t2 = MakeTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2},
tensorflow::DT_INT32);
BufferMap buffer_map;
buffer_map.SetFromTensorFlow(0, t1);
buffer_map.SetFromTensorFlow(0, t2);
EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
}
TEST(BufferMapTest, TfLiteOverwritesTensorFlow) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
UniqueTfLiteTensor t2 =
MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
BufferMap buffer_map;
buffer_map.SetFromTensorFlow(0, t1);
buffer_map.SetFromTfLite(0, t2.get());
EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
}
TEST(BufferMapTest, TensorFlowOverwritesTfLite) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
UniqueTfLiteTensor t2 =
MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
BufferMap buffer_map;
buffer_map.SetFromTfLite(0, t2.get());
buffer_map.SetFromTensorFlow(0, t1);
EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 0.123f, 0, 0));
}
TEST(BufferMapTest, TensorflowBufferReuse) {
const int kAllocationSize = 1000;
TfLiteTensor tensor;
tensor.allocation_type = kTfLiteDynamic;
tensor.data.raw = nullptr;
TfLiteTensorRealloc(kAllocationSize, &tensor);
CHECK(tensor.data.raw);
EXPECT_EQ(tensor.bytes, kAllocationSize);
TfLiteTensorBuffer* tensor_buffer_reused = new TfLiteTensorBuffer(&tensor);
EXPECT_TRUE(tensor_buffer_reused->BufferReusedFromTfLiteTensor());
EXPECT_EQ(tensor_buffer_reused->data(), tensor.data.raw);
tensor_buffer_reused->Unref();
TfLiteTensorDataFree(&tensor);
}
TEST(BufferMapTest, ExplicitlyDisableBufferReuse) {
TfLiteTensor tensor;
tensor.allocation_type = kTfLiteDynamic;
tensor.data.raw = nullptr;
TfLiteTensorRealloc(10, &tensor);
CHECK(tensor.data.raw);
EXPECT_EQ(tensor.bytes, 10);
TfLiteTensorBuffer* tensor_buffer =
new TfLiteTensorBuffer(&tensor, false);
EXPECT_FALSE(tensor_buffer->BufferReusedFromTfLiteTensor());
EXPECT_NE(tensor_buffer->data(), tensor.data.raw);
tensor_buffer->Unref();
TfLiteTensorDataFree(&tensor);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/buffer_map.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/buffer_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c2483977-5d43-4a8a-b8d1-d284e3bc2329 | cpp | tensorflow/tensorflow | c_api_opaque_internal | tensorflow/lite/c/c_api_opaque_internal.cc | tensorflow/lite/c/c_api_opaque_internal_test.cc | #include "tensorflow/lite/c/c_api_opaque_internal.h"
#include <memory>
#include <unordered_map>
#include <utility>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/c/operator.h"
#include "tensorflow/lite/core/subgraph.h"
namespace tflite {
namespace internal {
namespace {
TfLiteOperator* MakeOperator(const TfLiteRegistration* registration,
int node_index) {
auto* registration_external = TfLiteOperatorCreate(
static_cast<TfLiteBuiltinOperator>(registration->builtin_code),
registration->custom_name, registration->version,
nullptr);
registration_external->node_index = node_index;
return registration_external;
}
}
TfLiteOperator* CommonOpaqueConversionUtil::CachedObtainOperator(
OperatorsCache* registration_externals_cache,
const TfLiteRegistration* registration, int node_index) {
OpResolver::OpId op_id{registration->builtin_code, registration->custom_name,
registration->version};
auto it = registration_externals_cache->find(op_id);
if (it != registration_externals_cache->end()) {
return it->second.get();
}
auto* registration_external = MakeOperator(registration, node_index);
registration_externals_cache->insert(
it, std::make_pair(op_id, registration_external));
return registration_external;
}
TfLiteOperator* CommonOpaqueConversionUtil::ObtainOperator(
TfLiteContext* context, const TfLiteRegistration* registration,
int node_index) {
auto* subgraph = static_cast<tflite::Subgraph*>(context->impl_);
if (!subgraph->registration_externals_) {
subgraph->registration_externals_ = std::make_shared<OperatorsCache>();
}
return CachedObtainOperator(subgraph->registration_externals_.get(),
registration, node_index);
}
}
} | #include "tensorflow/lite/c/c_api_opaque_internal.h"
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/interpreter_builder.h"
#include "tensorflow/lite/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
using tflite::FlatBufferModel;
using tflite::Interpreter;
using tflite::InterpreterBuilder;
using tflite::internal::CommonOpaqueConversionUtil;
using tflite::ops::builtin::BuiltinOpResolver;
TEST(ObtainRegistrationFromContext, ProducesValidResult) {
BuiltinOpResolver op_resolver;
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<FlatBufferModel> model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
InterpreterBuilder builder(*model, op_resolver);
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
TfLiteContext* context = interpreter->primary_subgraph().context();
const TfLiteRegistration* registration = tflite::ops::builtin::Register_ADD();
TfLiteOperator* registration_external =
CommonOpaqueConversionUtil::ObtainOperator(context, registration, 42);
ASSERT_EQ(registration_external->builtin_code, kTfLiteBuiltinAdd);
ASSERT_EQ(registration_external->version, registration->version);
ASSERT_EQ(registration_external->custom_name, registration->custom_name);
ASSERT_EQ(registration_external->node_index, 42);
}
TEST(ObtainRegistrationFromContext, CachingWorks) {
BuiltinOpResolver op_resolver;
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<FlatBufferModel> model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
InterpreterBuilder builder(*model, op_resolver);
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
TfLiteContext* context = interpreter->primary_subgraph().context();
const TfLiteRegistration* registration = tflite::ops::builtin::Register_ADD();
TfLiteOperator* registration_external1 =
CommonOpaqueConversionUtil::ObtainOperator(context, registration, 0);
TfLiteOperator* registration_external2 =
CommonOpaqueConversionUtil::ObtainOperator(context, registration, 1);
ASSERT_EQ(registration_external1, registration_external2);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/c/c_api_opaque_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/c/c_api_opaque_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
44a027b4-1a91-443c-bdaf-d979b02d5afa | cpp | tensorflow/tensorflow | tooling_util | tensorflow/lite/toco/tooling_util.cc | tensorflow/lite/toco/tooling_util_test.cc | #include "tensorflow/lite/toco/tooling_util.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "re2/re2.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/toco/dump_graphviz.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_graphviz_dump_options.h"
namespace toco {
absl::string_view FindLongestCommonPrefix(absl::string_view a,
absl::string_view b) {
if (a.empty() || b.empty()) return absl::string_view();
const char* pa = a.data();
const char* pb = b.data();
size_t count = 0;
const size_t limit = std::min(a.size(), b.size());
while (count < limit && *pa == *pb) {
++pa;
++pb;
++count;
}
return absl::string_view(a.data(), count);
}
std::string LogName(const Operator& op) {
const std::string& opname = HelpfulOperatorTypeName(op);
if (op.outputs.empty()) {
return toco::port::StringF("{%s operator}", opname);
} else {
return toco::port::StringF("{%s operator with output %s}", opname,
op.outputs[0]);
}
}
std::string ArrayDataTypeName(ArrayDataType data_type) {
switch (data_type) {
case ArrayDataType::kFloat:
return "float";
case ArrayDataType::kInt8:
return "int8";
case ArrayDataType::kUint8:
return "uint8";
case ArrayDataType::kInt16:
return "int16";
case ArrayDataType::kUint16:
return "uint16";
case ArrayDataType::kInt32:
return "int32";
case ArrayDataType::kUint32:
return "uint32";
case ArrayDataType::kInt64:
return "int64";
case ArrayDataType::kUint64:
return "uint64";
case ArrayDataType::kString:
return "string";
case ArrayDataType::kBool:
return "bool";
case ArrayDataType::kComplex64:
return "complex64";
case ArrayDataType::kNone:
return "None";
default:
LOG(FATAL) << "Unhandled array data type " << static_cast<int>(data_type);
}
}
bool IsInputArray(const Model& model, const std::string& array_name) {
for (const auto& input_array : model.flags.input_arrays()) {
if (array_name == input_array.name()) {
return true;
}
}
return false;
}
bool IsOutputArray(const Model& model, const std::string& array_name) {
for (const auto& output_array : model.flags.output_arrays()) {
if (array_name == output_array) {
return true;
}
}
return false;
}
bool IsArrayConsumed(const Model& model, const std::string& name) {
if (GetOpWithInput(model, name)) {
return true;
}
if (IsOutputArray(model, name)) {
return true;
}
for (const auto& rnn_state : model.flags.rnn_states()) {
if (rnn_state.back_edge_source_array() == name) {
return true;
}
}
return false;
}
int CountTrueOutputs(const Model& model, const Operator& op) {
int count = 0;
for (const std::string& output : op.outputs) {
if (IsArrayConsumed(model, output)) {
++count;
}
}
return count;
}
int CountOpsWithInput(const Model& model, const std::string& array_name) {
int count = 0;
for (const auto& op : model.operators) {
for (auto& input : op->inputs) {
if (input == array_name) {
count++;
break;
}
}
}
return count;
}
bool DeleteArrayIfUnused(const std::string& array_name, Model* model) {
if (IsDiscardableArray(*model, array_name) &&
CountOpsWithInput(*model, array_name) == 0 &&
GetOpWithOutput(*model, array_name) == nullptr) {
model->EraseArray(array_name);
return true;
}
return false;
}
bool DeleteArrayIfUnusedOutsideOfOp(const std::string& array_name,
const Operator* op, Model* model) {
if (!IsDiscardableArray(*model, array_name)) {
return false;
}
if (CountOpsWithInput(*model, array_name) > 1) {
return false;
}
const Operator* op_having_this_as_input = GetOpWithInput(*model, array_name);
if (op_having_this_as_input && op_having_this_as_input != op) {
return false;
}
const Operator* op_having_this_as_output =
GetOpWithOutput(*model, array_name);
if (op_having_this_as_output && op_having_this_as_output != op) {
return false;
}
model->EraseArray(array_name);
return true;
}
void DeleteOpAndArrays(Model* model, const Operator* op) {
for (const std::string& array_name : op->inputs) {
DeleteArrayIfUnusedOutsideOfOp(array_name, op, model);
}
for (const std::string& array_name : op->outputs) {
DeleteArrayIfUnusedOutsideOfOp(array_name, op, model);
}
auto op_it = FindOp(*model, op);
CHECK(op_it != model->operators.end());
model->operators.erase(op_it);
}
std::vector<std::unique_ptr<Operator>>::const_iterator FindOpWithOutput(
const Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& output : it->get()->outputs) {
if (output == array_name) {
return it;
}
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::iterator FindOpWithOutput(
Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& output : it->get()->outputs) {
if (output == array_name) {
return it;
}
}
}
return model.operators.end();
}
Operator* GetOpWithOutput(const Model& model, const std::string& array_name) {
auto it = FindOpWithOutput(model, array_name);
return it == model.operators.end() ? nullptr : it->get();
}
std::vector<std::unique_ptr<Operator>>::const_iterator FindOpWithInput(
const Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& input : it->get()->inputs) {
if (input == array_name) {
return it;
}
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::iterator FindOpWithInput(
Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& input : it->get()->inputs) {
if (input == array_name) {
return it;
}
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::const_iterator FindOp(
const Model& model, const Operator* op) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
if (it->get() == op) {
return it;
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::iterator FindOp(Model& model,
const Operator* op) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
if (it->get() == op) {
return it;
}
}
return model.operators.end();
}
Operator* GetOpWithInput(const Model& model, const std::string& array_name) {
auto it = FindOpWithInput(model, array_name);
return it == model.operators.end() ? nullptr : it->get();
}
Operator* GetFirstOpWithInput(const Model& model,
const std::string& array_name) {
auto it = FindOpWithInput(model, array_name);
return it == model.operators.end() ? nullptr : it->get();
}
void ReplaceArrayUsage(Model* model, const std::string& old_array_name,
const std::string& new_array_name) {
for (auto& op_it : model->operators) {
Operator* op = op_it.get();
for (size_t i = 0; i < op->inputs.size(); ++i) {
if (op->inputs[i] == old_array_name) {
op->inputs[i] = new_array_name;
}
}
for (size_t i = 0; i < op->outputs.size(); ++i) {
if (op->outputs[i] == old_array_name) {
op->outputs[i] = new_array_name;
}
}
}
}
std::string FormatArraysList(const Model& model,
const std::vector<std::string>& list) {
if (list.empty()) {
return "[]";
}
std::string result = "";
if (list.size() > 1) {
result += "[ ";
}
for (std::size_t i = 0; i < list.size(); i++) {
if (i > 0) {
result += ", ";
}
result += list[i];
}
if (list.size() > 1) {
result += " ]";
}
return result;
}
const char* OperatorTypeName(OperatorType type) {
switch (type) {
#define HANDLE_OPERATORTYPENAME_CASE(c) \
case OperatorType::k##c: \
return #c;
HANDLE_OPERATORTYPENAME_CASE(Abs)
HANDLE_OPERATORTYPENAME_CASE(Add)
HANDLE_OPERATORTYPENAME_CASE(AddN)
HANDLE_OPERATORTYPENAME_CASE(AveragePool)
HANDLE_OPERATORTYPENAME_CASE(BatchMatMul)
HANDLE_OPERATORTYPENAME_CASE(BatchNormalization)
HANDLE_OPERATORTYPENAME_CASE(Conv)
HANDLE_OPERATORTYPENAME_CASE(Concatenation)
HANDLE_OPERATORTYPENAME_CASE(DepthwiseConv)
HANDLE_OPERATORTYPENAME_CASE(DepthToSpace)
HANDLE_OPERATORTYPENAME_CASE(SpaceToDepth)
HANDLE_OPERATORTYPENAME_CASE(FullyConnected)
HANDLE_OPERATORTYPENAME_CASE(HardSwish)
HANDLE_OPERATORTYPENAME_CASE(Dequantize)
HANDLE_OPERATORTYPENAME_CASE(L2Normalization)
HANDLE_OPERATORTYPENAME_CASE(LocalResponseNormalization)
HANDLE_OPERATORTYPENAME_CASE(Log)
HANDLE_OPERATORTYPENAME_CASE(Logistic)
HANDLE_OPERATORTYPENAME_CASE(LstmCell)
HANDLE_OPERATORTYPENAME_CASE(MaxPool)
HANDLE_OPERATORTYPENAME_CASE(L2Pool)
HANDLE_OPERATORTYPENAME_CASE(FakeQuant)
HANDLE_OPERATORTYPENAME_CASE(Mul)
HANDLE_OPERATORTYPENAME_CASE(RandomUniform)
HANDLE_OPERATORTYPENAME_CASE(Elu)
HANDLE_OPERATORTYPENAME_CASE(Relu)
HANDLE_OPERATORTYPENAME_CASE(Relu1)
HANDLE_OPERATORTYPENAME_CASE(Relu6)
HANDLE_OPERATORTYPENAME_CASE(PRelu)
HANDLE_OPERATORTYPENAME_CASE(ReorderAxes)
HANDLE_OPERATORTYPENAME_CASE(Softmax)
HANDLE_OPERATORTYPENAME_CASE(LogSoftmax)
HANDLE_OPERATORTYPENAME_CASE(Div)
HANDLE_OPERATORTYPENAME_CASE(Tanh)
HANDLE_OPERATORTYPENAME_CASE(Sin)
HANDLE_OPERATORTYPENAME_CASE(All)
HANDLE_OPERATORTYPENAME_CASE(Assert)
HANDLE_OPERATORTYPENAME_CASE(ExpandDims)
HANDLE_OPERATORTYPENAME_CASE(Fill)
HANDLE_OPERATORTYPENAME_CASE(FloorMod)
HANDLE_OPERATORTYPENAME_CASE(FloorDiv)
HANDLE_OPERATORTYPENAME_CASE(Greater)
HANDLE_OPERATORTYPENAME_CASE(GreaterEqual)
HANDLE_OPERATORTYPENAME_CASE(Identity)
HANDLE_OPERATORTYPENAME_CASE(Less)
HANDLE_OPERATORTYPENAME_CASE(LessEqual)
HANDLE_OPERATORTYPENAME_CASE(MatMul)
HANDLE_OPERATORTYPENAME_CASE(ReduceMax)
HANDLE_OPERATORTYPENAME_CASE(Maximum)
HANDLE_OPERATORTYPENAME_CASE(Merge)
HANDLE_OPERATORTYPENAME_CASE(ReduceMin)
HANDLE_OPERATORTYPENAME_CASE(Minimum)
HANDLE_OPERATORTYPENAME_CASE(Neg)
HANDLE_OPERATORTYPENAME_CASE(OneHot)
HANDLE_OPERATORTYPENAME_CASE(Pack)
HANDLE_OPERATORTYPENAME_CASE(Pad)
HANDLE_OPERATORTYPENAME_CASE(PadV2)
HANDLE_OPERATORTYPENAME_CASE(StridedSlice)
HANDLE_OPERATORTYPENAME_CASE(Range)
HANDLE_OPERATORTYPENAME_CASE(Rank)
HANDLE_OPERATORTYPENAME_CASE(Reshape)
HANDLE_OPERATORTYPENAME_CASE(Squeeze)
HANDLE_OPERATORTYPENAME_CASE(Rsqrt)
HANDLE_OPERATORTYPENAME_CASE(SegmentSum)
HANDLE_OPERATORTYPENAME_CASE(Shape)
HANDLE_OPERATORTYPENAME_CASE(Slice)
HANDLE_OPERATORTYPENAME_CASE(Split)
HANDLE_OPERATORTYPENAME_CASE(SplitV)
HANDLE_OPERATORTYPENAME_CASE(Sqrt)
HANDLE_OPERATORTYPENAME_CASE(Square)
HANDLE_OPERATORTYPENAME_CASE(Switch)
HANDLE_OPERATORTYPENAME_CASE(Sub)
HANDLE_OPERATORTYPENAME_CASE(Sum)
HANDLE_OPERATORTYPENAME_CASE(Tile)
HANDLE_OPERATORTYPENAME_CASE(Transpose)
HANDLE_OPERATORTYPENAME_CASE(TransposeConv)
HANDLE_OPERATORTYPENAME_CASE(Concat)
HANDLE_OPERATORTYPENAME_CASE(ConcatV2)
HANDLE_OPERATORTYPENAME_CASE(Cast)
HANDLE_OPERATORTYPENAME_CASE(Floor)
HANDLE_OPERATORTYPENAME_CASE(Ceil)
HANDLE_OPERATORTYPENAME_CASE(Round)
HANDLE_OPERATORTYPENAME_CASE(Gather)
HANDLE_OPERATORTYPENAME_CASE(GatherNd)
HANDLE_OPERATORTYPENAME_CASE(ResizeBilinear)
HANDLE_OPERATORTYPENAME_CASE(SpaceToBatchND)
HANDLE_OPERATORTYPENAME_CASE(BatchToSpaceND)
HANDLE_OPERATORTYPENAME_CASE(Mean)
HANDLE_OPERATORTYPENAME_CASE(ReduceProd)
HANDLE_OPERATORTYPENAME_CASE(Svdf)
HANDLE_OPERATORTYPENAME_CASE(ArgMax)
HANDLE_OPERATORTYPENAME_CASE(ArgMin)
HANDLE_OPERATORTYPENAME_CASE(TopK_V2)
HANDLE_OPERATORTYPENAME_CASE(Unsupported)
HANDLE_OPERATORTYPENAME_CASE(Exp)
HANDLE_OPERATORTYPENAME_CASE(DynamicPartition)
HANDLE_OPERATORTYPENAME_CASE(DynamicStitch)
HANDLE_OPERATORTYPENAME_CASE(Select)
HANDLE_OPERATORTYPENAME_CASE(SparseToDense)
HANDLE_OPERATORTYPENAME_CASE(Equal)
HANDLE_OPERATORTYPENAME_CASE(NotEqual)
HANDLE_OPERATORTYPENAME_CASE(Pow)
HANDLE_OPERATORTYPENAME_CASE(Any)
HANDLE_OPERATORTYPENAME_CASE(LogicalAnd)
HANDLE_OPERATORTYPENAME_CASE(LogicalNot)
HANDLE_OPERATORTYPENAME_CASE(LogicalOr)
HANDLE_OPERATORTYPENAME_CASE(CTCBeamSearchDecoder)
HANDLE_OPERATORTYPENAME_CASE(Unpack)
HANDLE_OPERATORTYPENAME_CASE(ZerosLike)
HANDLE_OPERATORTYPENAME_CASE(UnidirectionalSequenceLstm)
HANDLE_OPERATORTYPENAME_CASE(BidirectionalSequenceLstm)
HANDLE_OPERATORTYPENAME_CASE(BidirectionalSequenceRnn)
HANDLE_OPERATORTYPENAME_CASE(ResizeNearestNeighbor)
HANDLE_OPERATORTYPENAME_CASE(LeakyRelu)
HANDLE_OPERATORTYPENAME_CASE(SquaredDifference)
HANDLE_OPERATORTYPENAME_CASE(MirrorPad)
HANDLE_OPERATORTYPENAME_CASE(Unique)
HANDLE_OPERATORTYPENAME_CASE(UnidirectionalSequenceRnn)
HANDLE_OPERATORTYPENAME_CASE(ReverseV2)
HANDLE_OPERATORTYPENAME_CASE(Cos)
HANDLE_OPERATORTYPENAME_CASE(Where)
HANDLE_OPERATORTYPENAME_CASE(ReverseSequence)
HANDLE_OPERATORTYPENAME_CASE(MatrixDiag)
HANDLE_OPERATORTYPENAME_CASE(MatrixSetDiag)
HANDLE_OPERATORTYPENAME_CASE(MatrixDiagV2)
HANDLE_OPERATORTYPENAME_CASE(MatrixSetDiagV2)
HANDLE_OPERATORTYPENAME_CASE(MatrixDiagV3)
HANDLE_OPERATORTYPENAME_CASE(MatrixSetDiagV3)
HANDLE_OPERATORTYPENAME_CASE(ScatterNd)
default:
LOG(FATAL) << "Unhandled op type";
#undef HANDLE_OPERATORTYPENAME_CASE
}
}
std::string HelpfulOperatorTypeName(const Operator& op) {
if (op.type == OperatorType::kUnsupported) {
return toco::port::StringF(
"(Unsupported TensorFlow op: %s)",
static_cast<const TensorFlowUnsupportedOperator&>(op).tensorflow_op);
}
return OperatorTypeName(op.type);
}
bool OperatorSupportsFusedActivation(OperatorType type) {
switch (type) {
case OperatorType::kAdd:
case OperatorType::kAveragePool:
case OperatorType::kBatchNormalization:
case OperatorType::kConv:
case OperatorType::kDepthwiseConv:
case OperatorType::kDiv:
case OperatorType::kFullyConnected:
case OperatorType::kL2Pool:
case OperatorType::kMaxPool:
case OperatorType::kMul:
case OperatorType::kSub:
case OperatorType::kSquaredDifference:
return true;
default:
return false;
}
}
void LogSummary(int log_level, const Model& model) {
VLOG(log_level) << "Operators summary (" << model.operators.size()
<< " operators):";
std::unordered_multiset<OperatorType> ops_by_type;
for (const auto& op : model.operators) {
ops_by_type.insert(op->type);
}
auto it = ops_by_type.begin();
while (it != ops_by_type.end()) {
int count = ops_by_type.count(*it);
VLOG(log_level) << " " << OperatorTypeName(*it) << ": " << count;
std::advance(it, count);
}
}
void LogArray(int log_level, const Model& model, const std::string& name) {
VLOG(log_level) << "Array: " << name;
if (!model.HasArray(name)) {
VLOG(log_level) << " DOES NOT EXIST";
return;
}
const auto& array = model.GetArray(name);
VLOG(log_level) << " Data type: " << ArrayDataTypeName(array.data_type);
VLOG(log_level) << " Final type: "
<< ArrayDataTypeName(array.final_data_type);
if (array.buffer) {
VLOG(log_level) << " Constant Buffer";
}
if (array.alloc) {
VLOG(log_level) << " Transient Alloc";
}
if (array.has_shape()) {
const Shape& array_shape = array.shape();
if (array_shape.dimensions_count() == 0) {
VLOG(log_level) << " (Zero dimensions)";
} else {
std::string message = " Dims: ";
bool first = true;
for (const int dim : array_shape.dims()) {
if (!first) {
message += ", ";
}
first = false;
toco::port::AppendF(&message, "%d", dim);
}
VLOG(log_level) << message;
}
}
if (array.minmax) {
VLOG(log_level) << " MinMax: " << array.minmax->min << " .. "
<< array.minmax->max;
}
if (array.quantization_params) {
VLOG(log_level) << " QuantizationParams: zero_point="
<< static_cast<int>(array.quantization_params->zero_point)
<< ", scale=" << array.quantization_params->scale;
}
}
void DumpGraphvizVideoFrame(const Model& model) {
namespace port = toco::port;
const auto& dump_options = *GraphVizDumpOptions::singleton();
if (!dump_options.dump_graphviz_video) {
return;
}
CHECK(!dump_options.dump_graphviz.empty());
static int dump_id = 0;
static std::unordered_set<std::size_t> dump_hashes;
std::string graphviz_dump;
DumpGraphviz(model, &graphviz_dump,
toco::port::StringF("VIDEO frame:%05d", dump_id));
std::size_t hash = std::hash<std::string>{}(graphviz_dump);
if (!dump_hashes.count(hash)) {
LOG(INFO) << "DUMPING GRAPHVIZ VIDEO FRAME: " << dump_id;
dump_hashes.insert(hash);
const auto result = port::file::SetContents(
port::file::JoinPath(
dump_options.dump_graphviz,
toco::port::StringF("toco_video_%05d.dot", dump_id)),
graphviz_dump, port::file::Defaults());
QCHECK(result.ok()) << result.message();
dump_id++;
}
}
void LogDump(int log_level, const std::string& message, const Model& model) {
namespace port = toco::port;
const auto& dump_options = *GraphVizDumpOptions::singleton();
DumpGraphvizVideoFrame(model);
if (!dump_options.dump_graphviz.empty()) {
std::string graphviz_dump;
DumpGraphviz(model, &graphviz_dump, message);
const auto result = port::file::SetContents(
port::file::JoinPath(
dump_options.dump_graphviz,
absl::StrCat("toco_", absl::StrReplaceAll(message, {{" ", "_"}}),
".dot")),
graphviz_dump, port::file::Defaults());
QCHECK(result.ok()) << result.message();
}
if (!VLOG_IS_ON(log_level)) {
return;
}
VLOG(log_level) << "BEGIN DUMP OF TOCO MODEL (" << message << ")";
LogSummary(log_level, model);
std::unordered_set<std::string> already_printed_arrays;
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
if (!already_printed_arrays.count(input)) {
already_printed_arrays.insert(input);
LogArray(log_level, model, input);
}
}
VLOG(log_level) << HelpfulOperatorTypeName(*op) << " :";
VLOG(log_level) << " " << FormatArraysList(model, op->inputs) << " -> "
<< FormatArraysList(model, op->outputs);
if (op->fused_activation_function != FusedActivationFunctionType::kNone) {
VLOG(log_level) << " (with fused activation function)";
}
for (const auto& output : op->outputs) {
if (!already_printed_arrays.count(output)) {
already_printed_arrays.insert(output);
LogArray(log_level, model, output);
}
}
}
VLOG(log_level) << "END DUMP OF TOCO MODEL (" << message << ")";
}
void ExtendShape(Shape* shape, int new_shape_size) {
CHECK_GE(new_shape_size, shape->dimensions_count());
const int size_increase = new_shape_size - shape->dimensions_count();
auto* shape_dims = shape->mutable_dims();
shape_dims->insert(shape_dims->begin(), size_increase, 1);
}
void UnextendShape(Shape* shape, int new_shape_size) {
CHECK_LE(new_shape_size, shape->dimensions_count());
const int size_reduction = shape->dimensions_count() - new_shape_size;
for (int i = 0; i < size_reduction; i++) {
CHECK_EQ(shape->dims(i), 1);
}
std::vector<int>& shape_dims = *shape->mutable_dims();
shape_dims.erase(shape_dims.begin(), shape_dims.begin() + size_reduction);
}
template <typename Dims>
void CheckValidShapeDimensions(const Dims& dims) {
if (dims.size() == 1 && dims[0] == 0) {
return;
}
for (const auto& dim : dims) {
CHECK_GE(dim, 1);
}
}
void CheckValidShape(const Shape& shape) {
CheckValidShapeDimensions(shape.dims());
}
bool IsNonEmpty(const Shape& shape) {
for (int i = 0; i < shape.dimensions_count(); ++i) {
if (shape.dims(i) < 1) return false;
}
return true;
}
void CheckNonEmptyShapeDimensions(const Shape& shape) {
for (int i = 0; i < shape.dimensions_count(); ++i) {
CHECK_GE(shape.dims()[i], 1) << "shape has dimension 0 at index << " << i
<< ". shape = " << ShapeToString(shape);
}
}
bool ShapesAgreeUpToBroadcasting(const Shape& shape0, const Shape& shape1) {
CheckNonEmptyShapeDimensions(shape0);
CheckNonEmptyShapeDimensions(shape1);
const Shape* longer = &shape0;
const Shape* shorter = &shape1;
if (shape1.dimensions_count() > shape0.dimensions_count()) {
longer = &shape1;
shorter = &shape0;
}
int longer_index = longer->dimensions_count() - 1;
int shorter_index = shorter->dimensions_count() - 1;
while (shorter_index >= 0) {
const int d_long = longer->dims(longer_index);
const int d_short = shorter->dims(shorter_index);
if ((d_long != d_short) && (d_long != 1) && (d_short != 1)) {
return false;
}
longer_index--;
shorter_index--;
}
return true;
}
bool ShapesAgreeUpToExtending(const Shape& shape0, const Shape& shape1) {
CheckNonEmptyShapeDimensions(shape0);
CheckNonEmptyShapeDimensions(shape1);
const Shape* longer = &shape0;
const Shape* shorter = &shape1;
if (shape1.dimensions_count() > shape0.dimensions_count()) {
longer = &shape1;
shorter = &shape0;
}
int longer_index = longer->dimensions_count() - 1;
int shorter_index = shorter->dimensions_count() - 1;
while (shorter_index >= 0) {
const int d_long = longer->dims(longer_index);
const int d_short = shorter->dims(shorter_index);
if (d_long != d_short) {
return false;
}
longer_index--;
shorter_index--;
}
while (longer_index >= 0) {
const int d_long = longer->dims(longer_index);
if (d_long != 1) {
return false;
}
longer_index--;
}
return true;
}
int RequiredBufferSizeForShape(const Shape& shape) {
CheckValidShape(shape);
int max_offset = 1;
for (const auto& dim : shape.dims()) {
max_offset *= dim;
}
return max_offset;
}
bool IsConstantParameterArray(const Model& model, const std::string& name) {
if (!model.HasArray(name)) {
return false;
}
return !!model.GetArray(name).buffer;
}
namespace {
template <ArrayDataType A>
bool CompareArrayBuffers(const Array& lhs_array, const Array& rhs_array) {
CHECK(lhs_array.data_type == rhs_array.data_type) << "Data types must match";
CHECK(lhs_array.buffer) << "LHS must be constant";
CHECK(rhs_array.buffer) << "RHS must be constant";
const auto& lhs_data = lhs_array.GetBuffer<A>().data;
const auto& rhs_data = rhs_array.GetBuffer<A>().data;
CHECK_EQ(lhs_data.size(), rhs_data.size())
<< "Buffer sizes must match in element count";
for (int i = 0; i < lhs_data.size(); ++i) {
if (lhs_data[i] != rhs_data[i]) {
return false;
}
}
return true;
}
bool HaveSameMinMax(const Array& lhs_array, const Array& rhs_array) {
if (lhs_array.minmax || rhs_array.minmax) {
if (!lhs_array.minmax || !rhs_array.minmax) {
return false;
}
if (!(*lhs_array.minmax == *rhs_array.minmax)) {
return false;
}
}
return true;
}
bool HaveSameQuantizationParams(const Array& lhs_array,
const Array& rhs_array) {
if (lhs_array.quantization_params || rhs_array.quantization_params) {
if (!lhs_array.quantization_params || !rhs_array.quantization_params) {
return false;
}
if (!(*lhs_array.quantization_params == *rhs_array.quantization_params)) {
return false;
}
}
return true;
}
}
bool CompareConstantArrays(const Array& lhs_array, const Array& rhs_array) {
bool attrs_equal = lhs_array.shape() == rhs_array.shape() &&
lhs_array.data_type == rhs_array.data_type &&
lhs_array.final_data_type == rhs_array.final_data_type &&
HaveSameMinMax(lhs_array, rhs_array) &&
HaveSameQuantizationParams(lhs_array, rhs_array) &&
lhs_array.narrow_range == rhs_array.narrow_range;
if (!attrs_equal) {
return false;
}
switch (lhs_array.data_type) {
case ArrayDataType::kBool:
return CompareArrayBuffers<ArrayDataType::kBool>(lhs_array, rhs_array);
case ArrayDataType::kFloat:
return CompareArrayBuffers<ArrayDataType::kFloat>(lhs_array, rhs_array);
case ArrayDataType::kInt8:
return CompareArrayBuffers<ArrayDataType::kInt8>(lhs_array, rhs_array);
case ArrayDataType::kUint8:
return CompareArrayBuffers<ArrayDataType::kUint8>(lhs_array, rhs_array);
case ArrayDataType::kInt16:
return CompareArrayBuffers<ArrayDataType::kInt16>(lhs_array, rhs_array);
case ArrayDataType::kUint16:
return CompareArrayBuffers<ArrayDataType::kUint16>(lhs_array, rhs_array);
case ArrayDataType::kInt32:
return CompareArrayBuffers<ArrayDataType::kInt32>(lhs_array, rhs_array);
case ArrayDataType::kUint32:
return CompareArrayBuffers<ArrayDataType::kUint32>(lhs_array, rhs_array);
case ArrayDataType::kInt64:
return CompareArrayBuffers<ArrayDataType::kInt64>(lhs_array, rhs_array);
case ArrayDataType::kUint64:
return CompareArrayBuffers<ArrayDataType::kUint64>(lhs_array, rhs_array);
case ArrayDataType::kString:
return CompareArrayBuffers<ArrayDataType::kString>(lhs_array, rhs_array);
case ArrayDataType::kComplex64:
return CompareArrayBuffers<ArrayDataType::kComplex64>(lhs_array,
rhs_array);
default:
LOG(FATAL) << "Unsupported data type: "
<< ArrayDataTypeName(lhs_array.data_type);
return false;
}
}
namespace {
std::string SanitizeNameForTFNode(const std::string& array_name) {
auto node_name = array_name;
std::replace(node_name.begin(), node_name.end(), ':', '_');
return node_name;
}
void CheckInputArraysAreNotOutputArrays(const ModelFlags& model_flags) {
for (const auto& input_array : model_flags.input_arrays()) {
for (const std::string& output_array : model_flags.output_arrays()) {
QCHECK_NE(input_array.name(), output_array)
<< "The array " << output_array
<< " is listed in both --input_arrays and --output_arrays.";
}
}
}
bool IsAsciiPrintable(const std::string& name) {
for (char c : name) {
if (!absl::ascii_isprint(c)) {
return false;
}
}
return true;
}
std::string DumpAscii(const std::string& name) {
std::string result;
port::AppendF(&result, "ASCII | Hex\n");
port::AppendF(&result, "------+----\n");
for (char c : name) {
if (absl::ascii_isprint(c)) {
port::AppendF(&result, "%c | %x\n", c, c);
} else {
port::AppendF(&result, " | %x Not ASCII printable!\n", c);
}
}
return result;
}
void CheckNonAsciiIOArrays(const ModelFlags& model_flags) {
if (model_flags.allow_nonascii_arrays()) {
return;
}
for (const auto& input_array : model_flags.input_arrays()) {
QCHECK(IsAsciiPrintable(input_array.name()))
<< "Non-ASCII-printable character found in --input_arrays: "
<< input_array.name()
<< ". Pass --allow_nonascii_arrays to allow that. "
<< "Here is a dump of the string:\n\n"
<< DumpAscii(input_array.name());
}
for (const std::string& output_array : model_flags.output_arrays()) {
QCHECK(IsAsciiPrintable(output_array))
<< "Non-ASCII-printable character found in --output_arrays: "
<< output_array << ". Pass --allow_nonascii_arrays to allow that. "
<< "Here is a dump of the string:\n\n"
<< DumpAscii(output_array);
}
}
void CheckNonExistentIOArrays(const Model& model) {
if (model.flags.allow_nonexistent_arrays()) {
return;
}
static constexpr char general_comment[] =
"Is it a typo? This should not happen. If you trigger this error "
"please send a bug report (with code to reproduce this error), to the "
"TensorFlow Lite team.";
for (const std::string& output_array : model.flags.output_arrays()) {
if (IsConstantParameterArray(model, output_array)) {
continue;
}
QCHECK(GetOpWithOutput(model, output_array))
<< "Specified output array \"" << output_array
<< "\" is not produced by any op in this graph. " << general_comment;
}
for (const auto& rnn_state : model.flags.rnn_states()) {
if (!rnn_state.discardable()) {
QCHECK(GetOpWithInput(model, rnn_state.state_array()))
<< "Specified RNN state \"" << rnn_state.state_array()
<< "\" is not consumed by any op in this graph. " << general_comment;
QCHECK(GetOpWithOutput(model, rnn_state.back_edge_source_array()))
<< "Specified RNN back-edge source array \""
<< rnn_state.back_edge_source_array()
<< "\" is not produced by any op in this graph. " << general_comment;
}
}
}
}
void CheckNoMissingArray(const Model& model) {
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
CHECK(model.HasArray(input) || model.optional_arrays.count(input))
<< "Input: " << input << " missing for op: " << op->outputs[0] << ".";
}
for (const auto& output : op->outputs) {
CHECK(model.HasArray(output)) << "Output: " << output << " missing.";
}
}
CheckNonExistentIOArrays(model);
}
void FixNoMissingArray(Model* model) {
for (const auto& op : model->operators) {
for (const auto& input : op->inputs) {
if (!model->HasArray(input) && !model->IsOptionalArray(input)) {
model->GetOrCreateArray(input);
}
}
for (const auto& output : op->outputs) {
if (!model->HasArray(output) && !model->IsOptionalArray(output)) {
model->GetOrCreateArray(output);
}
}
}
if (model->flags.allow_nonexistent_arrays()) {
for (const std::string& output_array : model->flags.output_arrays()) {
model->GetOrCreateArray(output_array);
}
for (const auto& rnn_state : model->flags.rnn_states()) {
model->GetOrCreateArray(rnn_state.state_array());
model->GetOrCreateArray(rnn_state.back_edge_source_array());
}
}
}
void CheckNoOrphanedArray(const Model& model) {
std::unordered_set<std::string> arrays_without_known_use;
for (const auto& array : model.GetArrayMap()) {
if (IsDiscardableArray(model, array.first)) {
arrays_without_known_use.insert(array.first);
}
}
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
arrays_without_known_use.erase(input);
}
for (const auto& output : op->outputs) {
arrays_without_known_use.erase(output);
}
}
for (const auto& rnn_state : model.flags.rnn_states()) {
arrays_without_known_use.erase(rnn_state.state_array());
arrays_without_known_use.erase(rnn_state.back_edge_source_array());
}
if (!arrays_without_known_use.empty()) {
for (const auto& array : arrays_without_known_use) {
LOG(INFO) << "Error: Orphaned array: " << array;
}
}
CHECK(arrays_without_known_use.empty());
}
void FixNoOrphanedArray(Model* model) {
std::unordered_set<std::string> arrays_without_known_use;
for (const auto& array : model->GetArrayMap()) {
arrays_without_known_use.insert(array.first);
}
for (const auto& op : model->operators) {
for (const auto& input : op->inputs) {
arrays_without_known_use.erase(input);
}
for (const auto& output : op->outputs) {
arrays_without_known_use.erase(output);
}
}
for (const auto& rnn_state : model->flags.rnn_states()) {
arrays_without_known_use.erase(rnn_state.state_array());
arrays_without_known_use.erase(rnn_state.back_edge_source_array());
}
for (const auto& array : arrays_without_known_use) {
if (IsDiscardableArray(*model, array)) {
model->EraseArray(array);
}
}
}
void CheckEachArray(const Model& model) {
for (const auto& array_entry : model.GetArrayMap()) {
const auto& array = array_entry.second;
CHECK(!array->buffer || !array->alloc) << "Tensor: " << array_entry.first;
if (array->buffer) {
CHECK(array->buffer->type == array->data_type)
<< "Tensor: " << array_entry.first;
CHECK(array->has_shape()) << array_entry.first;
CheckValidShape(array->shape());
CHECK_EQ(array->buffer->Length(),
RequiredBufferSizeForShape(array->shape()))
<< "Tensor: " << array_entry.first;
}
const std::string& name = array_entry.first;
auto colon_pos = name.find_first_of(':');
if (colon_pos != std::string::npos) {
CHECK_EQ(name.substr(colon_pos + 1).find_first_not_of("0123456789"),
std::string::npos)
<< "Array '" << name << "' has non-digit characters after colon.";
}
CHECK_GT(colon_pos, 0) << "Array '" << name
<< "' must not start with a colon.";
}
}
void CheckOperatorOrdering(const Model& model) {
std::unordered_set<std::string> arrays_behind_us;
for (const auto& array_entry : model.GetArrayMap()) {
if (!GetOpWithOutput(model, array_entry.first)) {
arrays_behind_us.insert(array_entry.first);
}
}
arrays_behind_us.insert(model.optional_arrays.begin(),
model.optional_arrays.end());
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
if (!IsConstantParameterArray(model, input)) {
CHECK(arrays_behind_us.count(input));
}
}
for (const auto& output : op->outputs) {
CHECK(!arrays_behind_us.count(output));
arrays_behind_us.insert(output);
}
}
for (const std::string& output_array : model.flags.output_arrays()) {
CHECK(arrays_behind_us.count(output_array));
}
}
void FixOperatorOrdering(Model* model) {
std::unordered_set<std::string> arrays_behind_us;
for (const auto& array_entry : model->GetArrayMap()) {
if (!GetOpWithOutput(*model, array_entry.first)) {
arrays_behind_us.insert(array_entry.first);
}
}
arrays_behind_us.insert(model->optional_arrays.begin(),
model->optional_arrays.end());
std::vector<std::unique_ptr<Operator>> old_operators;
std::swap(old_operators, model->operators);
std::set<std::size_t> remaining;
for (std::size_t i = 0; i < old_operators.size(); i++) {
remaining.insert(i);
}
std::unordered_map<std::string, std::string> reason_why_leftover;
while (true) {
bool inserted_something = false;
for (const auto& i : remaining) {
bool can_insert = true;
auto& op = old_operators[i];
CHECK(op);
for (const auto& input : op->inputs) {
if (!IsConstantParameterArray(*model, input) &&
!arrays_behind_us.count(input)) {
for (const std::string& output : op->outputs) {
reason_why_leftover[output] = input;
}
can_insert = false;
break;
}
}
if (can_insert) {
model->operators.emplace_back(nullptr);
for (const auto& output : op->outputs) {
arrays_behind_us.insert(output);
}
std::swap(op, model->operators.back());
remaining.erase(i);
inserted_something = true;
break;
}
}
if (!inserted_something) {
break;
}
}
if (!remaining.empty()) {
LOG(ERROR)
<< "No viable ordering of operators was found. "
<< "Here is a 'backtrace' of at least one part of the graph that is "
<< "problematic. It starts with the first operator that has as "
<< "problematic input array, and then walks back the graph to "
<< "the operator that produced that input array, etc., until we find "
<< "the root cause:";
LOG(ERROR) << "BEGIN TRACE OF OPERATOR WITH BAD INPUT";
LOG(ERROR) << "Here is the first-encountered operator with a bad input: ";
const Operator* bad_op = old_operators[*remaining.begin()].get();
std::unordered_set<std::string> bad_inputs_already_traced;
while (true) {
LOG(ERROR) << HelpfulOperatorTypeName(*bad_op) << " : "
<< FormatArraysList(*model, bad_op->inputs) << " -> "
<< FormatArraysList(*model, bad_op->outputs);
bool found_bad_output = false;
std::string bad_output;
for (const std::string& output : bad_op->outputs) {
if (reason_why_leftover.count(output)) {
found_bad_output = true;
bad_output = output;
break;
}
}
CHECK(found_bad_output);
const std::string& bad_input = reason_why_leftover[bad_output];
LOG(ERROR) << "The bad input here is: " << bad_input;
if (bad_inputs_already_traced.count(bad_input)) {
LOG(FATAL)
<< "Cycle found! We already encountered that "
<< "input array, " << bad_input << ", earlier in the "
<< "above trace! We expect graphs to be acyclic, even "
<< "RNNs. Let us know if some graph actually needs to have "
<< "cycles, but first, please check if it really is "
<< "an *inference* graph. *Training* graphs are out-of-scope "
<< "for toco.";
}
bad_inputs_already_traced.insert(bad_input);
bad_op = nullptr;
for (const auto& i : remaining) {
const Operator* op = old_operators[i].get();
for (const std::string& output : op->outputs) {
if (bad_input == output) {
bad_op = op;
break;
}
}
if (bad_op) {
break;
}
}
if (!bad_op) {
LOG(ERROR) << "And that's the root cause: "
<< "that array, " << bad_input << ", isn't produced by any "
<< "operator, or provided in any other way.";
LOG(ERROR) << "END TRACE OF OPERATOR WITH BAD INPUT";
LOG(FATAL) << "(The above was a multi-line fatal error)";
}
LOG(ERROR) << "And that array is the output of the following operator:";
}
}
CHECK(remaining.empty())
<< "Should never get here! In case of bad graph, "
<< "the above code should have generated a FATAL error already!";
}
void CheckInvariants(const Model& model) {
CheckInputArraysAreNotOutputArrays(model.flags);
CheckNonAsciiIOArrays(model.flags);
CheckNoMissingArray(model);
CheckNoOrphanedArray(model);
CheckEachArray(model);
CheckOperatorOrdering(model);
}
void CheckCountInRange(const ::toco::ModelFlags::ModelCheck& model_check,
const int count, const std::string& count_description) {
if (model_check.count_min() >= 0) {
CHECK_GE(count, model_check.count_min())
<< "Mismatch in " << count_description << ": count was " << count
<< ", but the specified "
<< (model_check.count_max() > model_check.count_min() ? "minimum"
: "value")
<< " was " << model_check.count_min() << ".";
}
if (model_check.count_max() > model_check.count_min()) {
CHECK_LE(count, model_check.count_max())
<< "Mismatch in " << count_description << ": count was " << count
<< ", but the specified maximum was " << model_check.count_max() << ".";
}
}
void CheckModelCounts(const Model& model) {
std::unordered_multiset<OperatorType> ops_by_type;
std::unordered_map<std::string, OperatorType> op_type_by_name;
if (model.flags.model_checks_size() == 0) {
return;
}
for (const auto& op : model.operators) {
ops_by_type.insert(op->type);
op_type_by_name[OperatorTypeName(op->type)] = op->type;
}
for (const auto& model_check : model.flags.model_checks()) {
std::string count_type = model_check.count_type();
if (count_type == "None") {
continue;
} else if (count_type == "Arrays") {
CheckCountInRange(model_check, model.GetArrayMap().size(),
"count of arrays");
} else if (count_type == "Total") {
CheckCountInRange(model_check, model.operators.size(),
"count of all operator instances");
} else {
const int found_count =
op_type_by_name.count(count_type) > 0
? ops_by_type.count(op_type_by_name[count_type])
: 0;
CheckCountInRange(model_check, found_count,
"count of instances of " + count_type + " operator");
}
}
}
void FixEdgeArrays(Model* model) {
for (const std::string& output_array_name : model->flags.output_arrays()) {
if (!GetOpWithOutput(*model, output_array_name)) {
LOG(WARNING) << "Fixing constant output array " << output_array_name
<< " by inserting a copy. This is not optimal.";
std::string intermediate_array_name =
AvailableArrayName(*model, output_array_name + "_copy");
CloneArray(model, output_array_name, intermediate_array_name);
InsertCopyOperator(model, intermediate_array_name, output_array_name);
}
}
}
void DedupeConstantArrays(Model* model, size_t min_size) {
const auto& array_map = model->GetArrayMap();
for (auto lhs_array_it = array_map.begin(); lhs_array_it != array_map.end();
++lhs_array_it) {
const auto& lhs_array_name = lhs_array_it->first;
const auto& lhs_array = *lhs_array_it->second;
if (!IsConstantParameterArray(*model, lhs_array_name)) {
continue;
}
ArrayDataType final_data_type =
lhs_array.final_data_type != ArrayDataType::kNone
? lhs_array.final_data_type
: lhs_array.data_type;
if (final_data_type != ArrayDataType::kString) {
size_t array_byte_size =
lhs_array.buffer->Length() * ElementSize(final_data_type);
if (array_byte_size < min_size) {
continue;
}
}
auto next_lhs_array_it = lhs_array_it;
++next_lhs_array_it;
for (auto rhs_array_it = next_lhs_array_it;
rhs_array_it != array_map.end();) {
const auto& rhs_array_name = rhs_array_it->first;
const auto& rhs_array = *rhs_array_it->second;
++rhs_array_it;
if (!IsConstantParameterArray(*model, rhs_array_name)) {
continue;
}
if (!IsDiscardableArray(*model, rhs_array_name)) {
continue;
}
if (!CompareConstantArrays(lhs_array, rhs_array)) {
continue;
}
VLOG(1) << "Deduplicating arrays; using " << lhs_array_name
<< " in place of " << rhs_array_name;
ReplaceArrayUsage(model, rhs_array_name, lhs_array_name);
model->EraseArray(rhs_array_name);
}
}
}
namespace {
void CopyArrayAttribs(const Array& source_array, Array* target_array) {
target_array->data_type = source_array.data_type;
target_array->final_data_type = source_array.final_data_type;
if (source_array.has_shape()) {
target_array->copy_shape(source_array.shape());
}
if (source_array.minmax) {
target_array->GetOrCreateMinMax() = source_array.GetMinMax();
} else {
target_array->minmax.reset();
}
if (source_array.quantization_params) {
target_array->GetOrCreateQuantizationParams() =
source_array.GetQuantizationParams();
} else {
target_array->quantization_params.reset();
}
}
}
void InsertCopyOperator(Model* model, const std::string& source_array_name,
const std::string& target_array_name) {
const Array& source_array = model->GetArray(source_array_name);
std::vector<int> shape = source_array.shape().dims();
Array& target_array = model->GetOrCreateArray(target_array_name);
target_array.buffer.reset();
CopyArrayAttribs(source_array, &target_array);
auto* copy_op = new TensorFlowReshapeOperator;
copy_op->inputs = {
source_array_name,
CreateInt32Array(
model, AvailableArrayName(*model, target_array_name + "_copy_shape"),
shape)};
copy_op->outputs = {target_array_name};
if (target_array.has_shape()) {
copy_op->shape = target_array.shape().dims();
}
model->operators.emplace_back(copy_op);
}
void CloneArray(Model* model, const std::string& source_array_name,
const std::string& target_array_name) {
CHECK(!model->HasArray(target_array_name));
const Array& source_array = model->GetArray(source_array_name);
Array& target_array = model->GetOrCreateArray(target_array_name);
CopyArrayAttribs(source_array, &target_array);
if (!source_array.buffer) {
return;
}
switch (source_array.data_type) {
case ArrayDataType::kBool:
CopyArrayBuffer<ArrayDataType::kBool>(source_array, &target_array);
break;
case ArrayDataType::kFloat:
CopyArrayBuffer<ArrayDataType::kFloat>(source_array, &target_array);
break;
case ArrayDataType::kInt8:
CopyArrayBuffer<ArrayDataType::kInt8>(source_array, &target_array);
break;
case ArrayDataType::kUint8:
CopyArrayBuffer<ArrayDataType::kUint8>(source_array, &target_array);
break;
case ArrayDataType::kInt16:
CopyArrayBuffer<ArrayDataType::kInt16>(source_array, &target_array);
break;
case ArrayDataType::kUint16:
CopyArrayBuffer<ArrayDataType::kUint16>(source_array, &target_array);
break;
case ArrayDataType::kInt32:
CopyArrayBuffer<ArrayDataType::kInt32>(source_array, &target_array);
break;
case ArrayDataType::kUint32:
CopyArrayBuffer<ArrayDataType::kUint32>(source_array, &target_array);
break;
case ArrayDataType::kInt64:
CopyArrayBuffer<ArrayDataType::kInt64>(source_array, &target_array);
break;
case ArrayDataType::kUint64:
CopyArrayBuffer<ArrayDataType::kUint64>(source_array, &target_array);
break;
case ArrayDataType::kString:
CopyArrayBuffer<ArrayDataType::kString>(source_array, &target_array);
break;
case ArrayDataType::kComplex64:
CopyArrayBuffer<ArrayDataType::kComplex64>(source_array, &target_array);
break;
default:
LOG(FATAL) << "Unsupported data type: "
<< ArrayDataTypeName(source_array.data_type);
return;
}
}
void MakeArrayDims(int num_dims, int batch, int height, int width, int depth,
std::vector<int>* out_dims) {
CHECK(out_dims->empty());
if (num_dims == 0) {
return;
} else if (num_dims == 1) {
CHECK_EQ(batch, 1);
*out_dims = {depth};
} else if (num_dims == 2) {
*out_dims = {batch, depth};
} else if (num_dims == 3) {
CHECK_EQ(batch, 1);
*out_dims = {height, width, depth};
} else if (num_dims == 4) {
*out_dims = {batch, height, width, depth};
} else {
LOG(FATAL) << "Should not get here: " << num_dims;
}
}
void CreateOrCheckRnnStateArray(const std::string& name, int size,
int state_num_dims, Model* model) {
int batch = 1;
int num_dims = -1;
if (state_num_dims > 0) {
num_dims = state_num_dims;
} else {
for (const auto& input_array : model->flags.input_arrays()) {
if (input_array.name() == name || num_dims == -1) {
num_dims = input_array.shape().dims_size();
if (num_dims > 0) {
batch = input_array.shape().dims(0);
}
}
}
}
Array& array = model->GetOrCreateArray(name);
if (array.has_shape()) {
num_dims = array.shape().dimensions_count();
}
if (!array.has_shape() && num_dims >= 0) {
Shape* shape = array.mutable_shape();
std::vector<int> dims;
MakeArrayDims(num_dims, batch, 1, 1, size, &dims);
*shape->mutable_dims() = dims;
}
}
void ResolveModelFlags(const ModelFlags& model_flags, Model* model) {
for (const auto& specified_input_array : model_flags.input_arrays()) {
toco::InputArray* dst_input_array = nullptr;
for (int i = 0; i < model->flags.input_arrays_size(); i++) {
toco::InputArray* candidate_dst_input_array =
model->flags.mutable_input_arrays(i);
if (candidate_dst_input_array->name() == specified_input_array.name()) {
dst_input_array = candidate_dst_input_array;
break;
}
}
if (!dst_input_array) {
if (model->flags.input_arrays_size() == 1 &&
model_flags.input_arrays_size() == 1 &&
!specified_input_array.has_name()) {
dst_input_array = model->flags.mutable_input_arrays(0);
}
}
if (!dst_input_array) {
dst_input_array = model->flags.add_input_arrays();
dst_input_array->set_name(specified_input_array.name());
}
#define RESOLVE_MODEL_FLAG(field_name) \
if (specified_input_array.has_##field_name()) { \
if (dst_input_array->has_##field_name()) { \
QCHECK_EQ(dst_input_array->field_name(), \
specified_input_array.field_name()) \
<< "For input array '" << dst_input_array->name() << "', " \
<< "specified " #field_name " flag with value: " \
<< specified_input_array.field_name() \
<< " does not agree with already defined " #field_name \
" of this model, with value: " \
<< specified_input_array.field_name(); \
} else { \
dst_input_array->set_##field_name(specified_input_array.field_name()); \
} \
}
RESOLVE_MODEL_FLAG(std_value);
RESOLVE_MODEL_FLAG(mean_value);
#undef RESOLVE_MODEL_FLAG
if (specified_input_array.has_shape()) {
if (dst_input_array->has_shape()) {
QCHECK_EQ(specified_input_array.shape().dims_size(),
dst_input_array->shape().dims_size())
<< "For input array '" << specified_input_array.name() << "', "
<< "size of specified input shape flag with size: "
<< specified_input_array.shape().dims_size()
<< " does not agree with already defined input shape"
" of this model, with size: "
<< dst_input_array->shape().dims_size();
for (int i = 1; i < specified_input_array.shape().dims_size(); i++) {
QCHECK_EQ(specified_input_array.shape().dims(i),
dst_input_array->shape().dims(i))
<< "At dimension number " << i << " of input array "
<< specified_input_array.name() << ", the specified shape's "
<< "dimension flag with dimension: "
<< specified_input_array.shape().dims(i)
<< " does not agree with already defined shape"
<< " of this model, with dimension: "
<< dst_input_array->shape().dims(i);
}
} else {
*dst_input_array->mutable_shape() = specified_input_array.shape();
}
}
if (specified_input_array.has_data_type()) {
QCHECK(!dst_input_array->has_data_type());
dst_input_array->set_data_type(specified_input_array.data_type());
}
}
if (model_flags.output_arrays_size() > 0) {
model->flags.mutable_output_arrays()->CopyFrom(model_flags.output_arrays());
}
#define RESOLVE_MODEL_FLAG(name) \
if (model_flags.has_##name()) { \
if (model->flags.has_##name()) { \
QCHECK_EQ(model_flags.name(), model->flags.name()) \
<< "Specified " #name " flag with value: " << model_flags.name() \
<< " does not agree with already defined " #name \
" of this model, with value: " \
<< model->flags.name(); \
} else { \
model->flags.set_##name(model_flags.name()); \
} \
}
RESOLVE_MODEL_FLAG(variable_batch)
#undef RESOLVE_MODEL_FLAG
if (!model_flags.rnn_states().empty()) {
model->flags.mutable_rnn_states()->CopyFrom(model_flags.rnn_states());
}
if (model->flags.model_checks_size() == 0) {
model->flags.mutable_model_checks()->CopyFrom(model_flags.model_checks());
}
QCHECK_GT(model->flags.output_arrays_size(), 0)
<< "This model does not define output arrays, so a "
"--output_arrays flag must be given on the command-line.";
for (auto& input_array_proto : *model->flags.mutable_input_arrays()) {
auto& input_array = model->GetOrCreateArray(input_array_proto.name());
if (input_array_proto.has_data_type()) {
const ArrayDataType specified_type =
ConvertIODataTypeToArrayDataType(input_array_proto.data_type());
QCHECK(specified_type != ArrayDataType::kNone);
if (input_array.data_type != ArrayDataType::kNone) {
QCHECK(specified_type == input_array.data_type)
<< "For input array " << input_array_proto.name()
<< " the specified input data type "
<< IODataType_Name(input_array_proto.data_type())
<< " conflicts with the existing type.";
}
input_array.data_type = specified_type;
}
if (input_array.data_type == ArrayDataType::kNone) {
input_array.data_type = ArrayDataType::kFloat;
}
if (!input_array.has_shape()) {
if (input_array_proto.has_shape()) {
auto& input_array_dims = *input_array.mutable_shape()->mutable_dims();
CheckValidShapeDimensions(input_array_proto.shape().dims());
for (const auto& dim : input_array_proto.shape().dims()) {
input_array_dims.push_back(dim);
}
}
} else {
if (input_array_proto.has_shape()) {
const auto& input_array_dims =
*input_array.mutable_shape()->mutable_dims();
CHECK_EQ(input_array_dims.size(),
input_array_proto.shape().dims_size());
for (int i = 0; i < input_array_dims.size(); i++) {
CHECK_EQ(input_array_dims[i], input_array_proto.shape().dims(i));
}
} else {
for (int i = 0; i < input_array.shape().dimensions_count(); i++) {
input_array_proto.mutable_shape()->add_dims(
input_array.shape().dims(i));
}
}
}
const float mean_value = input_array_proto.mean_value();
const float std_value = input_array_proto.std_value();
MinMax input_minmax;
float qmin = 0, qmax = 255;
if (input_array.data_type == ArrayDataType::kInt16) {
qmin = -32768;
qmax = 32767;
}
input_minmax.min = (qmin - mean_value) / std_value;
input_minmax.max = (qmax - mean_value) / std_value;
if (!input_array.minmax) {
input_array.GetOrCreateMinMax() = input_minmax;
}
}
for (const auto& rnn_state : model->flags.rnn_states()) {
CreateOrCheckRnnStateArray(rnn_state.state_array(), rnn_state.size(),
rnn_state.num_dims(), model);
}
model->flags.set_change_concat_input_ranges(
model_flags.change_concat_input_ranges());
model->flags.set_allow_nonascii_arrays(model_flags.allow_nonascii_arrays());
model->flags.set_allow_nonexistent_arrays(
model_flags.allow_nonexistent_arrays());
CHECK(!model->flags.has_arrays_extra_info());
*model->flags.mutable_arrays_extra_info() = model_flags.arrays_extra_info();
}
void CheckIsReadyForQuantization(const Model& model) {
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
const auto& input_array = model.GetArray(input);
if (input_array.data_type != ArrayDataType::kFloat) {
continue;
}
if (input_array.minmax) {
continue;
}
if (input_array.buffer) {
continue;
}
LOG(FATAL)
<< "Array " << input << ", which is an input to the "
<< HelpfulOperatorTypeName(*op) << " operator producing the output "
<< "array " << op->outputs[0] << ", is lacking min/max data, "
<< "which is necessary for quantization. If accuracy matters, either "
<< "target a non-quantized output format, or run quantized training "
<< "with your model from a floating point checkpoint to change the "
<< "input graph to contain min/max information. If you don't care "
<< "about accuracy, you can pass --default_ranges_min= and "
<< "--default_ranges_max= for easy experimentation.";
}
}
}
int ElementSize(ArrayDataType data_type) {
switch (data_type) {
case ArrayDataType::kBool:
return sizeof(bool);
case ArrayDataType::kFloat:
return 4;
case ArrayDataType::kInt8:
return 1;
case ArrayDataType::kUint8:
return 1;
case ArrayDataType::kInt16:
return 2;
case ArrayDataType::kUint16:
return 2;
case ArrayDataType::kInt32:
return 4;
case ArrayDataType::kUint32:
return 4;
case ArrayDataType::kInt64:
return 8;
case ArrayDataType::kUint64:
return 8;
case ArrayDataType::kComplex64:
return 8;
case ArrayDataType::kComplex128:
return 16;
case ArrayDataType::kFloat64:
return 8;
case ArrayDataType::kString:
LOG(FATAL) << "Transient arrays with strings are not supported yet";
return 0;
default:
LOG(FATAL) << "Unknown data_type = " << static_cast<int>(data_type);
return 0;
}
}
void DropMinMax(Model* model, const std::string& array_name) {
auto& array = model->GetArray(array_name);
if (!!array.minmax) {
LOG(WARNING) << "Dropping MinMax information in array " << array_name
<< ". Expect inaccuracy in quantized inference.";
array.minmax = nullptr;
}
}
bool IsAllocatableTransientArray(const Model& model,
const std::string& array_name) {
if (model.IsOptionalArray(array_name)) return false;
if (IsInputArray(model, array_name) || IsOutputArray(model, array_name)) {
return false;
}
const auto& array = &model.GetArray(array_name);
if (!!array->buffer) {
return false;
}
if (!array->has_shape()) {
return false;
}
if (array->final_data_type == ArrayDataType::kString ||
array->data_type == ArrayDataType::kString) {
return false;
}
return true;
}
std::string AvailableArrayName(const Model& model, const std::string& name) {
std::string sanitized_name = SanitizeNameForTFNode(name);
if (!model.HasArray(sanitized_name) &&
!model.IsOptionalArray(sanitized_name)) {
return sanitized_name;
}
const int kNumSuffixesToTry = 1000;
for (int i = 0; i < kNumSuffixesToTry; i++) {
const std::string& name_with_suffix =
toco::port::StringF("%s_%d", sanitized_name, i);
if (!model.HasArray(name_with_suffix) &&
!model.IsOptionalArray(name_with_suffix)) {
return name_with_suffix;
}
}
LOG(FATAL) << "Could not find an available array name starting with "
<< sanitized_name << ". Tried " << kNumSuffixesToTry
<< " suffixes, all were taken!";
return "";
}
std::string ShapeToString(const Shape& shape) {
if (shape.dimensions_count() == 0) {
return "[]";
}
return absl::StrCat("[ ", absl::StrJoin(shape.dims(), ", "), " ]");
}
void PrintArrayShape(Model* model, const std::string& name) {
if (!model->GetArray(name).has_shape()) {
LOG(INFO) << name << " has no shape";
return;
}
LOG(INFO) << name
<< " has shape: " << ShapeToString(model->GetArray(name).shape());
}
bool IsArrayFullyConnectedWeights(const Model& model, const std::string& name) {
bool is_fc_weights = false;
bool is_something_else = false;
for (const auto& op : model.operators) {
for (int input_index = 0; input_index < op->inputs.size(); input_index++) {
if (op->inputs[input_index] == name) {
if (op->type == OperatorType::kFullyConnected && input_index == 1) {
is_fc_weights = true;
} else {
is_something_else = true;
}
}
}
}
CHECK(!(is_fc_weights && is_something_else));
return is_fc_weights;
}
std::string CreateInt32Array(Model* model, const std::string& param_name,
const std::vector<int>& value) {
auto param_array_name = AvailableArrayName(*model, param_name);
auto& param_array = model->GetOrCreateArray(param_array_name);
param_array.mutable_shape()->ReplaceDims({static_cast<int>(value.size())});
param_array.data_type = ArrayDataType::kInt32;
auto& param_array_data =
param_array.GetMutableBuffer<ArrayDataType::kInt32>().data;
param_array_data.resize(RequiredBufferSizeForShape(param_array.shape()));
for (int i = 0; i < value.size(); ++i) {
param_array_data[i] = value[i];
}
return param_array_name;
}
bool EstimateArithmeticOpsCount(const Model& model, const Operator& op,
int64_t* result) {
switch (op.type) {
case OperatorType::kFullyConnected:
case OperatorType::kConv:
case OperatorType::kDepthwiseConv: {
const auto& output_array = model.GetArray(op.outputs[0]);
const auto& weights_array = model.GetArray(op.inputs[1]);
if (!output_array.has_shape() || !weights_array.has_shape()) {
return false;
}
int64_t cols = 1;
for (int i = 0; i < output_array.shape().dimensions_count() - 1; i++) {
cols *= output_array.shape().dims(i);
}
const int64_t cost_per_col =
2 * RequiredBufferSizeForShape(weights_array.shape());
*result = cost_per_col * cols;
if (op.inputs.size() > 2) {
*result += RequiredBufferSizeForShape(output_array.shape());
}
break;
}
case OperatorType::kTransposeConv: {
const auto& input_array = model.GetArray(op.inputs[2]);
const auto& weights_array = model.GetArray(op.inputs[1]);
if (!input_array.has_shape() || !weights_array.has_shape()) {
return false;
}
const Shape& input = input_array.shape();
const Shape& weights = weights_array.shape();
*result = 2 * input.dims(0) * input.dims(1) * input.dims(2) *
input.dims(3) * weights.dims(1) * weights.dims(2) *
weights.dims(0);
break;
}
case OperatorType::kAdd:
case OperatorType::kSub:
case OperatorType::kMul: {
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = RequiredBufferSizeForShape(output_array.shape());
break;
}
case OperatorType::kAddN: {
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
const int64_t num_adds = op.inputs.size() - 1;
*result = num_adds * RequiredBufferSizeForShape(output_array.shape());
break;
}
case OperatorType::kLogistic:
case OperatorType::kSoftmax:
case OperatorType::kLogSoftmax:
case OperatorType::kTanh: {
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = 64 * RequiredBufferSizeForShape(output_array.shape());
break;
}
case OperatorType::kMaxPool: {
const auto& maxpool = *static_cast<const MaxPoolOperator*>(&op);
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = RequiredBufferSizeForShape(output_array.shape()) *
maxpool.kheight * maxpool.kwidth;
break;
}
case OperatorType::kAveragePool: {
const auto& avgpool = *static_cast<const AveragePoolOperator*>(&op);
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = RequiredBufferSizeForShape(output_array.shape()) *
avgpool.kheight * avgpool.kwidth;
break;
}
case OperatorType::kL2Pool: {
const auto* maxpool = static_cast<const MaxPoolOperator*>(&op);
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
const int64_t cost_per_val = 2 * maxpool->kheight * maxpool->kwidth + 32;
*result = RequiredBufferSizeForShape(output_array.shape()) * cost_per_val;
break;
}
case OperatorType::kL2Normalization: {
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = 3 * RequiredBufferSizeForShape(output_array.shape());
break;
}
default:
*result = 0;
break;
}
return true;
}
bool EstimateArithmeticOpsCount(const Model& model, int64_t* result) {
int64_t total = 0;
for (const auto& op : model.operators) {
int64_t num_ops;
if (!EstimateArithmeticOpsCount(model, *op, &num_ops)) {
return false;
}
total += num_ops;
}
*result = total;
return true;
}
std::string FormattedNumber(int64_t x) {
const int64_t million = 1000000;
const int64_t billion = 1000000000;
if (x < 10000) {
return toco::port::StringF("%d ", x);
} else if (x < billion) {
return toco::port::StringF("%.3f M", static_cast<double>(x) / million);
} else {
return toco::port::StringF("%.3f G", static_cast<double>(x) / billion);
}
}
void GetShuffleShape(AxesOrder input_axes_order, AxesOrder output_axes_order,
std::vector<int>* shuffle) {
CHECK_EQ(AxesCount(input_axes_order), AxesCount(output_axes_order));
shuffle->resize(4);
for (int i = 0; i < 4; i++) {
(*shuffle)[i] = i;
}
if (input_axes_order == output_axes_order) {
} else if (AxesCount(input_axes_order) == 2) {
shuffle->resize(2);
(*shuffle)[0] = 1;
(*shuffle)[1] = 0;
} else if (input_axes_order == AxesOrder::kOHWI &&
output_axes_order == AxesOrder::kHWIO) {
*shuffle = {1, 2, 3, 0};
} else if (input_axes_order == AxesOrder::kHWIO &&
output_axes_order == AxesOrder::kOHWI) {
*shuffle = {3, 0, 1, 2};
} else if (input_axes_order == AxesOrder::kOHWI &&
output_axes_order == AxesOrder::kHWOI) {
*shuffle = {1, 2, 0, 3};
} else {
LOG(FATAL) << "Bad shuffle";
}
}
void ExtendShuffle(const std::vector<int>& input_shuffle, int newdim,
std::vector<int>* extended_shuffle) {
*extended_shuffle = input_shuffle;
CHECK(newdim >= input_shuffle.size());
const int pad_size = newdim - input_shuffle.size();
extended_shuffle->resize(newdim);
for (int i = 0; i < pad_size; i++) {
(*extended_shuffle)[i] = i;
}
for (int i = pad_size; i < newdim; i++) {
(*extended_shuffle)[i] = input_shuffle[i - pad_size] + pad_size;
}
}
void ShuffleDims(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order, Shape* output_shape) {
if (input_axes_order == AxesOrder::kHWIM &&
output_axes_order == AxesOrder::k1HWO) {
*output_shape = Shape({1, input_shape.dims(0), input_shape.dims(1),
input_shape.dims(3) * input_shape.dims(2)});
} else {
std::vector<int> shuffle;
GetShuffleShape(input_axes_order, output_axes_order, &shuffle);
std::vector<int>* output_dims = output_shape->mutable_dims();
output_dims->resize(input_shape.dimensions_count());
for (int i = 0; i < input_shape.dimensions_count(); i++) {
(*output_dims)[i] = input_shape.dims(shuffle[i]);
}
}
}
template <typename T>
void ShuffleArrayTemplate(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order,
const Shape& output_shape, const T* input_data,
T* output_data) {
if (input_axes_order == AxesOrder::kHWIM &&
output_axes_order == AxesOrder::k1HWO) {
memcpy(output_data, input_data,
RequiredBufferSizeForShape(input_shape) * sizeof(output_data[0]));
return;
}
CHECK(input_shape.dimensions_count() == output_shape.dimensions_count());
const int dim = input_shape.dimensions_count();
CHECK_LE(dim, 4);
std::vector<int> shuffle;
GetShuffleShape(input_axes_order, output_axes_order, &shuffle);
CHECK(shuffle.size() >= dim);
for (int i = 0; i < dim; i++) {
CHECK(shuffle[i] >= 0 && shuffle[i] < dim);
CHECK(input_shape.dims(shuffle[i]) == output_shape.dims(i));
}
Shape extended_input_shape = input_shape;
ExtendShape(&extended_input_shape, 4);
Shape extended_output_shape = output_shape;
ExtendShape(&extended_output_shape, 4);
std::vector<int> extended_shuffle;
ExtendShuffle(shuffle, 4, &extended_shuffle);
const std::vector<int>& extended_input_dims = extended_input_shape.dims();
const std::vector<int>& extended_output_dims = extended_output_shape.dims();
int input_strides[4];
input_strides[3] = 1;
input_strides[2] = extended_input_dims[3];
input_strides[1] = input_strides[2] * extended_input_dims[2];
input_strides[0] = input_strides[1] * extended_input_dims[1];
const int input_stride_0 = input_strides[extended_shuffle[3]];
const int input_stride_1 = input_strides[extended_shuffle[2]];
const int input_stride_2 = input_strides[extended_shuffle[1]];
const int input_stride_3 = input_strides[extended_shuffle[0]];
const int output_size_0 = extended_output_dims[3];
const int output_size_1 = extended_output_dims[2];
const int output_size_2 = extended_output_dims[1];
const int output_size_3 = extended_output_dims[0];
const int output_stride_0 = 1;
const int output_stride_1 = output_size_0;
const int output_stride_2 = output_stride_1 * output_size_1;
const int output_stride_3 = output_stride_2 * output_size_2;
for (int i3 = 0; i3 < output_size_3; i3++) {
const T* const input_ptr_3 = input_data + i3 * input_stride_3;
T* const output_ptr_3 = output_data + i3 * output_stride_3;
for (int i2 = 0; i2 < output_size_2; i2++) {
const T* const input_ptr_2 = input_ptr_3 + i2 * input_stride_2;
T* const output_ptr_2 = output_ptr_3 + i2 * output_stride_2;
for (int i1 = 0; i1 < output_size_1; i1++) {
const T* input_ptr = input_ptr_2 + i1 * input_stride_1;
T* output_ptr = output_ptr_2 + i1 * output_stride_1;
T* const output_ptr_end = output_ptr + output_size_0 * output_stride_0;
while (output_ptr != output_ptr_end) {
*output_ptr = *input_ptr;
input_ptr += input_stride_0;
output_ptr += output_stride_0;
}
}
}
}
}
void ShuffleArray(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order, const Shape& output_shape,
const uint8* input_data, uint8* output_data) {
ShuffleArrayTemplate<uint8>(input_shape, input_axes_order, output_axes_order,
output_shape, input_data, output_data);
}
void ShuffleArray(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order, const Shape& output_shape,
const float* input_data, float* output_data) {
ShuffleArrayTemplate<float>(input_shape, input_axes_order, output_axes_order,
output_shape, input_data, output_data);
}
int AxesCount(AxesOrder axes_order) {
switch (axes_order) {
case AxesOrder::kOneAxis:
return 1;
case AxesOrder::kRC:
return 2;
case AxesOrder::kCR:
return 2;
case AxesOrder::kHWIO:
return 4;
case AxesOrder::kOHWI:
return 4;
case AxesOrder::kHWIM:
return 4;
case AxesOrder::k1HWO:
return 4;
case AxesOrder::kNHWC:
return 4;
case AxesOrder::kHWOI:
return 4;
default:
LOG(FATAL) << "Bad AxesOrder";
return 0;
}
}
bool IsDiscardableArray(const Model& model, const std::string& array_name) {
if (IsInputArray(model, array_name) || IsOutputArray(model, array_name)) {
return false;
}
for (const auto& rnn_state : model.flags.rnn_states()) {
if (!rnn_state.discardable()) {
if (array_name == rnn_state.state_array()) {
return false;
}
if (array_name == rnn_state.back_edge_source_array()) {
return false;
}
}
}
return true;
}
bool ReshapeIsEquivalentToTranspose(const Model& model,
const TensorFlowReshapeOperator* op,
bool allow_extra_unary_dims) {
CHECK(!op->shape.empty());
CHECK(model.HasArray(op->inputs[0]));
CHECK(model.HasArray(op->outputs[0]));
const auto& input_array = model.GetArray(op->inputs[0]);
const auto& output_array = model.GetArray(op->outputs[0]);
CHECK(input_array.has_shape());
CHECK(output_array.has_shape());
std::vector<int> in_shape = input_array.shape().dims();
std::vector<int> out_shape = output_array.shape().dims();
if (!allow_extra_unary_dims && in_shape.size() != out_shape.size()) {
return false;
}
in_shape.erase(std::remove(in_shape.begin(), in_shape.end(), 1),
in_shape.end());
out_shape.erase(std::remove(out_shape.begin(), out_shape.end(), 1),
out_shape.end());
return in_shape == out_shape;
}
void CheckFinalDataTypesSatisfied(const Model& model) {
for (const auto& array_entry : model.GetArrayMap()) {
const auto& array = *array_entry.second;
if (array.data_type == ArrayDataType::kBool) {
continue;
}
if (array.final_data_type != ArrayDataType::kNone &&
array.final_data_type != ArrayDataType::kInt16) {
CHECK(array.data_type == array.final_data_type)
<< "Array \"" << array_entry.first
<< "\" has mis-matching actual and final data types (data_type="
<< ArrayDataTypeName(array.data_type)
<< ", final_data_type=" << ArrayDataTypeName(array.final_data_type)
<< ").";
}
}
}
ArrayDataType ConvertIODataTypeToArrayDataType(IODataType type) {
switch (type) {
case FLOAT:
return ArrayDataType::kFloat;
case UINT8:
case QUANTIZED_UINT8:
return ArrayDataType::kUint8;
case INT8:
case QUANTIZED_INT8:
return ArrayDataType::kInt8;
case INT16:
case QUANTIZED_INT16:
return ArrayDataType::kInt16;
case UINT16:
return ArrayDataType::kUint16;
case INT32:
return ArrayDataType::kInt32;
case UINT32:
return ArrayDataType::kUint32;
case INT64:
return ArrayDataType::kInt64;
case UINT64:
return ArrayDataType::kUint64;
case BOOL:
return ArrayDataType::kBool;
case STRING:
return ArrayDataType::kString;
case COMPLEX64:
return ArrayDataType::kComplex64;
case COMPLEX128:
return ArrayDataType::kComplex128;
case FLOAT16:
return ArrayDataType::kFloat16;
case FLOAT64:
return ArrayDataType::kFloat64;
case RESOURCE:
case VARIANT:
default:
return ArrayDataType::kNone;
}
}
void FinishBuildingRNNStates(Model* model) {
for (const auto& rnn_state : model->flags.rnn_states()) {
if (!model->HasArray(rnn_state.back_edge_source_array()) ||
!model->HasArray(rnn_state.state_array())) {
CHECK(model->HasArray(rnn_state.back_edge_source_array()));
CHECK(model->HasArray(rnn_state.state_array()));
continue;
}
const auto& src_array = model->GetArray(rnn_state.back_edge_source_array());
auto& dst_array = model->GetArray(rnn_state.state_array());
if (src_array.data_type == ArrayDataType::kNone &&
dst_array.data_type == ArrayDataType::kNone) {
dst_array.data_type = ArrayDataType::kFloat;
}
}
}
std::unordered_set<std::string> ScanArrayNames(
const Model& model, const toco::ArraysExtraInfo_Entry& entry) {
std::unordered_set<std::string> matches;
if (model.HasArray(entry.name())) {
matches.insert(entry.name());
}
if (!entry.name_regexp().empty()) {
const auto& arrays = model.GetArrayMap();
const RE2 name_regexp = {entry.name_regexp()};
for (auto it = arrays.begin(); it != arrays.end(); ++it) {
if (RE2::FullMatch(it->first, name_regexp)) {
matches.insert(it->first);
}
}
}
return matches;
}
void UseArraysExtraInfo(Model* model, bool quantize_output) {
for (const auto& entry : model->flags.arrays_extra_info().entries()) {
const auto matches = ScanArrayNames(*model, entry);
if (matches.empty()) {
LOG(ERROR) << "arrays_extra_info_file: No matching arrays found for "
<< (entry.has_name() ? entry.name() : "")
<< (entry.has_name_regexp() ? entry.name_regexp() : "");
continue;
}
for (const auto& matched_name : matches) {
auto& array = model->GetArray(matched_name);
if (entry.has_min() || entry.has_max()) {
CHECK_EQ(entry.has_min(), entry.has_max());
auto& minmax = array.GetOrCreateMinMax();
minmax.min = entry.min();
minmax.max = entry.max();
}
if (entry.has_data_type() && quantize_output) {
array.final_data_type =
ConvertIODataTypeToArrayDataType(entry.data_type());
}
if (entry.has_shape()) {
array.clear_shape();
array.mutable_shape();
for (const auto& dim : entry.shape().dims()) {
array.mutable_shape()->mutable_dims()->push_back(dim);
}
}
if (entry.has_constant_float_value()) {
CHECK(array.has_shape());
if (array.data_type == ArrayDataType::kFloat) {
auto& data = array.GetMutableBuffer<ArrayDataType::kFloat>().data;
data.resize(RequiredBufferSizeForShape(array.shape()));
for (float& f : data) {
f = entry.constant_float_value();
}
}
}
}
}
}
void UndoWeightsShuffling(Model* model) {
for (const auto& op : model->operators) {
if (op->type != toco::OperatorType::kFullyConnected) {
continue;
}
const auto& fc_op = static_cast<toco::FullyConnectedOperator&>(*op);
if (fc_op.weights_format == FullyConnectedWeightsFormat::kDefault) {
continue;
}
const std::string& weights_name = fc_op.inputs[1];
QCHECK_EQ(CountOpsWithInput(*model, weights_name), 1);
auto& weights_array = model->GetArray(weights_name);
QCHECK(weights_array.data_type == ArrayDataType::kUint8);
auto& weights_data =
weights_array.GetMutableBuffer<toco::ArrayDataType::kUint8>().data;
const auto& weights_shape = weights_array.shape();
QCHECK_EQ(weights_shape.dimensions_count(), 2);
const int rows = weights_shape.dims(0);
const int cols = weights_shape.dims(1);
QCHECK_EQ(rows % 4, 0);
QCHECK_EQ(cols % 16, 0);
CHECK_EQ(rows * cols, weights_data.size());
std::vector<uint8> deshuffled_data(weights_data.size());
uint8* shuffled_data_ptr = weights_data.data();
for (int r = 0; r < rows; r += 4) {
for (int c = 0; c < cols; c += 16) {
for (int i = 0; i < 4; i++) {
uint8* deshuffled_data_ptr =
deshuffled_data.data() + (r + i) * cols + c;
for (int j = 0; j < 16; j++) {
uint8 shuffled_val = *shuffled_data_ptr++;
uint8 deshuffled_val = shuffled_val ^ 0x80;
*deshuffled_data_ptr++ = deshuffled_val;
}
}
}
}
CHECK_EQ(shuffled_data_ptr, weights_data.data() + rows * cols);
weights_data = std::move(deshuffled_data);
}
}
void CopyMinMaxAndQuantizationRelatedFields(const Array& src, Array* dst) {
if (src.minmax) {
dst->GetOrCreateMinMax() = src.GetMinMax();
}
if (src.quantization_params) {
dst->GetOrCreateQuantizationParams() = src.GetQuantizationParams();
}
dst->narrow_range = src.narrow_range;
}
} | #include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
enum class Agreement { kBroadcast, kExtend, kBroadcastNotExtend, kNeither };
struct ShapePair {
Shape left;
Shape right;
Agreement agreement;
};
std::vector<ShapePair> CreateShapePairs() {
return std::vector<ShapePair>(
{
{Shape({3}), Shape({3}), Agreement::kBroadcast},
{Shape({256, 256, 3}), Shape({256, 256, 3}), Agreement::kBroadcast},
{Shape({256, 256, 3}), Shape({3}), Agreement::kBroadcast},
{Shape({8, 1, 6, 1}), Shape({7, 1, 5}), Agreement::kBroadcast},
{Shape({}), Shape({3}), Agreement::kBroadcast},
{Shape({}), Shape({3, 1}), Agreement::kBroadcast},
{Shape({3}), Shape({3}), Agreement::kExtend},
{Shape({256, 256, 3}), Shape({256, 256, 3}), Agreement::kExtend},
{Shape({1, 1, 3}), Shape({1, 1, 3}), Agreement::kExtend},
{Shape({1, 1, 3}), Shape({3}), Agreement::kExtend},
{Shape({1, 1, 3}), Shape({1, 3}), Agreement::kExtend},
{Shape({256, 256, 3}), Shape({3}), Agreement::kBroadcastNotExtend},
{Shape({5, 4}), Shape({1}), Agreement::kBroadcastNotExtend},
{Shape({5, 4}), Shape({4}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({15, 1, 5}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({3, 5}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({3, 1}), Agreement::kBroadcastNotExtend},
{Shape({3, 1}), Shape({}), Agreement::kBroadcastNotExtend},
{Shape({3}), Shape({4}), Agreement::kNeither},
{Shape({2, 1}), Shape({8, 4, 3}), Agreement::kNeither}});
}
class ShapeTest : public ::testing::TestWithParam<ShapePair> {};
TEST_P(ShapeTest, Agrees) {
const ShapePair& param = GetParam();
switch (param.agreement) {
case Agreement::kBroadcast: {
EXPECT_TRUE(ShapesAgreeUpToBroadcasting(param.left, param.right));
break;
}
case Agreement::kExtend: {
EXPECT_TRUE(ShapesAgreeUpToExtending(param.left, param.right));
EXPECT_TRUE(ShapesAgreeUpToBroadcasting(param.left, param.right));
break;
}
case Agreement::kBroadcastNotExtend: {
EXPECT_TRUE(ShapesAgreeUpToBroadcasting(param.left, param.right));
EXPECT_FALSE(ShapesAgreeUpToExtending(param.left, param.right));
break;
}
case Agreement::kNeither: {
EXPECT_FALSE(ShapesAgreeUpToExtending(param.left, param.right));
EXPECT_FALSE(ShapesAgreeUpToBroadcasting(param.left, param.right));
break;
}
}
}
INSTANTIATE_TEST_SUITE_P(AgreeBroadcast, ShapeTest,
::testing::ValuesIn(CreateShapePairs()));
static const char kNegativeValuesMessage[] =
"Tensor shape should not include negative values";
static const char kLargeTensorMessage[] = "Tensor shape is too large";
TEST(NumElementsTest, Int) {
int count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<int>{1024, 1024, 2047}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 2146435072);
status = NumElements(std::vector<int>{1024, 0, 2048}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 0);
status = NumElements(std::vector<int>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<int>{1024, 1024, 2048}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, Int32) {
int32_t count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<int32_t>{1024, 1024, 2047}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 2146435072);
status = NumElements(std::vector<int32_t>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<int32_t>{1024, 1024, 2048}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, Int64) {
int64_t count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<int64_t>{16777216, 16777216, 32767}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 9223090561878065152LL);
status = NumElements(std::vector<int64_t>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<int64_t>{16777216, 16777216, 32768}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, UnsignedInt32) {
uint32_t count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<uint32_t>{1024, 2048, 2047}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 4292870144);
status = NumElements(std::vector<int>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<uint32_t>{1024, 2048, 2048}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, UnsignedInt64) {
uint64_t count;
tensorflow::Status status = absl::OkStatus();
status =
NumElements(std::vector<uint64_t>{16777216, 16777216, 65535}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 18446462598732840960ULL);
status = NumElements(std::vector<int>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status =
NumElements(std::vector<uint64_t>{16777216, 16777216, 65536}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, Scalar) {
tensorflow::Status status = absl::OkStatus();
int32_t count;
status = NumElements(std::vector<int32_t>{}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 1);
uint64_t countu64;
status = NumElements(std::vector<uint64_t>{}, &countu64);
EXPECT_TRUE(status.ok());
EXPECT_EQ(countu64, 1ULL);
}
TEST(FusedActivationTest, DefaultsToUnfused) {
EXPECT_TRUE(OperatorSupportsFusedActivation(OperatorType::kAdd));
EXPECT_FALSE(OperatorSupportsFusedActivation(OperatorType::kNone));
EXPECT_FALSE(OperatorSupportsFusedActivation(static_cast<OperatorType>(255)));
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tooling_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tooling_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6916f821-7c75-4936-99fa-2bea3e19eb07 | cpp | tensorflow/tensorflow | toco_cmdline_flags | tensorflow/lite/toco/toco_cmdline_flags.cc | tensorflow/lite/toco/toco_cmdline_flags_test.cc | #include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
bool ParseTocoFlagsFromCommandLineFlags(
int* argc, char* argv[], std::string* msg,
ParsedTocoFlags* parsed_toco_flags_ptr) {
using tensorflow::Flag;
ParsedTocoFlags& parsed_flags = *parsed_toco_flags_ptr;
std::vector<tensorflow::Flag> flags = {
Flag("input_file", parsed_flags.input_file.bind(),
parsed_flags.input_file.default_value(),
"Input file (model of any supported format). For Protobuf "
"formats, both text and binary are supported regardless of file "
"extension."),
Flag("savedmodel_directory", parsed_flags.savedmodel_directory.bind(),
parsed_flags.savedmodel_directory.default_value(),
"Deprecated. Full path to the directory containing the SavedModel."),
Flag("output_file", parsed_flags.output_file.bind(),
parsed_flags.output_file.default_value(),
"Output file. "
"For Protobuf formats, the binary format will be used."),
Flag("input_format", parsed_flags.input_format.bind(),
parsed_flags.input_format.default_value(),
"Input file format. One of: TENSORFLOW_GRAPHDEF, TFLITE."),
Flag("output_format", parsed_flags.output_format.bind(),
parsed_flags.output_format.default_value(),
"Output file format. "
"One of TENSORFLOW_GRAPHDEF, TFLITE, GRAPHVIZ_DOT."),
Flag("savedmodel_tagset", parsed_flags.savedmodel_tagset.bind(),
parsed_flags.savedmodel_tagset.default_value(),
"Deprecated. Comma-separated set of tags identifying the "
"MetaGraphDef within the SavedModel to analyze. All tags in the tag "
"set must be specified."),
Flag("default_ranges_min", parsed_flags.default_ranges_min.bind(),
parsed_flags.default_ranges_min.default_value(),
"If defined, will be used as the default value for the min bound "
"of min/max ranges used for quantization of uint8 arrays."),
Flag("default_ranges_max", parsed_flags.default_ranges_max.bind(),
parsed_flags.default_ranges_max.default_value(),
"If defined, will be used as the default value for the max bound "
"of min/max ranges used for quantization of uint8 arrays."),
Flag("default_int16_ranges_min",
parsed_flags.default_int16_ranges_min.bind(),
parsed_flags.default_int16_ranges_min.default_value(),
"If defined, will be used as the default value for the min bound "
"of min/max ranges used for quantization of int16 arrays."),
Flag("default_int16_ranges_max",
parsed_flags.default_int16_ranges_max.bind(),
parsed_flags.default_int16_ranges_max.default_value(),
"If defined, will be used as the default value for the max bound "
"of min/max ranges used for quantization of int16 arrays."),
Flag("inference_type", parsed_flags.inference_type.bind(),
parsed_flags.inference_type.default_value(),
"Target data type of arrays in the output file (for input_arrays, "
"this may be overridden by inference_input_type). "
"One of FLOAT, QUANTIZED_UINT8."),
Flag("inference_input_type", parsed_flags.inference_input_type.bind(),
parsed_flags.inference_input_type.default_value(),
"Target data type of input arrays. "
"If not specified, inference_type is used. "
"One of FLOAT, QUANTIZED_UINT8."),
Flag("input_type", parsed_flags.input_type.bind(),
parsed_flags.input_type.default_value(),
"Deprecated ambiguous flag that set both --input_data_types and "
"--inference_input_type."),
Flag("input_types", parsed_flags.input_types.bind(),
parsed_flags.input_types.default_value(),
"Deprecated ambiguous flag that set both --input_data_types and "
"--inference_input_type. Was meant to be a "
"comma-separated list, but this was deprecated before "
"multiple-input-types was ever properly supported."),
Flag("drop_fake_quant", parsed_flags.drop_fake_quant.bind(),
parsed_flags.drop_fake_quant.default_value(),
"Ignore and discard FakeQuant nodes. For instance, to "
"generate plain float code without fake-quantization from a "
"quantized graph."),
Flag(
"reorder_across_fake_quant",
parsed_flags.reorder_across_fake_quant.bind(),
parsed_flags.reorder_across_fake_quant.default_value(),
"Normally, FakeQuant nodes must be strict boundaries for graph "
"transformations, in order to ensure that quantized inference has "
"the exact same arithmetic behavior as quantized training --- which "
"is the whole point of quantized training and of FakeQuant nodes in "
"the first place. "
"However, that entails subtle requirements on where exactly "
"FakeQuant nodes must be placed in the graph. Some quantized graphs "
"have FakeQuant nodes at unexpected locations, that prevent graph "
"transformations that are necessary in order to generate inference "
"code for these graphs. Such graphs should be fixed, but as a "
"temporary work-around, setting this reorder_across_fake_quant flag "
"allows TOCO to perform necessary graph transformaitons on them, "
"at the cost of no longer faithfully matching inference and training "
"arithmetic."),
Flag("allow_custom_ops", parsed_flags.allow_custom_ops.bind(),
parsed_flags.allow_custom_ops.default_value(),
"If true, allow TOCO to create TF Lite Custom operators for all the "
"unsupported TensorFlow ops."),
Flag("custom_opdefs", parsed_flags.custom_opdefs.bind(),
parsed_flags.custom_opdefs.default_value(),
"List of strings representing custom ops OpDefs that are included "
"in the GraphDef."),
Flag("allow_dynamic_tensors", parsed_flags.allow_dynamic_tensors.bind(),
parsed_flags.allow_dynamic_tensors.default_value(),
"Boolean flag indicating whether the converter should allow models "
"with dynamic Tensor shape. When set to False, the converter will "
"generate runtime memory offsets for activation Tensors (with 128 "
"bits alignment) and error out on models with undetermined Tensor "
"shape. (Default: True)"),
Flag(
"drop_control_dependency",
parsed_flags.drop_control_dependency.bind(),
parsed_flags.drop_control_dependency.default_value(),
"If true, ignore control dependency requirements in input TensorFlow "
"GraphDef. Otherwise an error will be raised upon control dependency "
"inputs."),
Flag("debug_disable_recurrent_cell_fusion",
parsed_flags.debug_disable_recurrent_cell_fusion.bind(),
parsed_flags.debug_disable_recurrent_cell_fusion.default_value(),
"If true, disable fusion of known identifiable cell subgraphs into "
"cells. This includes, for example, specific forms of LSTM cell."),
Flag("propagate_fake_quant_num_bits",
parsed_flags.propagate_fake_quant_num_bits.bind(),
parsed_flags.propagate_fake_quant_num_bits.default_value(),
"If true, use FakeQuant* operator num_bits attributes to adjust "
"array data_types."),
Flag("allow_nudging_weights_to_use_fast_gemm_kernel",
parsed_flags.allow_nudging_weights_to_use_fast_gemm_kernel.bind(),
parsed_flags.allow_nudging_weights_to_use_fast_gemm_kernel
.default_value(),
"Some fast uint8 GEMM kernels require uint8 weights to avoid the "
"value 0. This flag allows nudging them to 1 to allow proceeding, "
"with moderate inaccuracy."),
Flag("dedupe_array_min_size_bytes",
parsed_flags.dedupe_array_min_size_bytes.bind(),
parsed_flags.dedupe_array_min_size_bytes.default_value(),
"Minimum size of constant arrays to deduplicate; arrays smaller "
"will not be deduplicated."),
Flag("split_tflite_lstm_inputs",
parsed_flags.split_tflite_lstm_inputs.bind(),
parsed_flags.split_tflite_lstm_inputs.default_value(),
"Split the LSTM inputs from 5 tensors to 18 tensors for TFLite. "
"Ignored if the output format is not TFLite."),
Flag("quantize_to_float16", parsed_flags.quantize_to_float16.bind(),
parsed_flags.quantize_to_float16.default_value(),
"Used in conjunction with post_training_quantize. Specifies that "
"the weights should be quantized to fp16 instead of the default "
"(int8)"),
Flag("quantize_weights", parsed_flags.quantize_weights.bind(),
parsed_flags.quantize_weights.default_value(),
"Deprecated. Please use --post_training_quantize instead."),
Flag("post_training_quantize", parsed_flags.post_training_quantize.bind(),
parsed_flags.post_training_quantize.default_value(),
"Boolean indicating whether to quantize the weights of the "
"converted float model. Model size will be reduced and there will "
"be latency improvements (at the cost of accuracy)."),
Flag("enable_select_tf_ops", parsed_flags.enable_select_tf_ops.bind(),
parsed_flags.enable_select_tf_ops.default_value(), ""),
Flag("force_select_tf_ops", parsed_flags.force_select_tf_ops.bind(),
parsed_flags.force_select_tf_ops.default_value(), ""),
Flag("unfold_batchmatmul", parsed_flags.unfold_batchmatmul.bind(),
parsed_flags.unfold_batchmatmul.default_value(), ""),
Flag("accumulation_type", parsed_flags.accumulation_type.bind(),
parsed_flags.accumulation_type.default_value(),
"Accumulation type to use with quantize_to_float16"),
Flag("allow_bfloat16", parsed_flags.allow_bfloat16.bind(),
parsed_flags.allow_bfloat16.default_value(), "")};
bool asked_for_help =
*argc == 2 && (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-help"));
if (asked_for_help) {
*msg += tensorflow::Flags::Usage(argv[0], flags);
return false;
} else {
return tensorflow::Flags::Parse(argc, argv, flags);
}
}
namespace {
enum class FlagRequirement {
kNone,
kMustBeSpecified,
kMustNotBeSpecified,
kUseDefault,
};
template <typename T>
void EnforceFlagRequirement(const T& flag, const std::string& flag_name,
FlagRequirement requirement) {
if (requirement == FlagRequirement::kMustBeSpecified) {
QCHECK(flag.specified()) << "Missing required flag " << flag_name;
}
if (requirement == FlagRequirement::kMustNotBeSpecified) {
QCHECK(!flag.specified())
<< "Given other flags, this flag should not have been specified: "
<< flag_name;
}
}
template <typename T>
std::optional<T> GetFlagValue(const Arg<T>& flag, FlagRequirement requirement) {
if (flag.specified()) return flag.value();
if (requirement == FlagRequirement::kUseDefault) return flag.default_value();
return std::optional<T>();
}
}
void ReadTocoFlagsFromCommandLineFlags(const ParsedTocoFlags& parsed_toco_flags,
TocoFlags* toco_flags) {
namespace port = toco::port;
port::CheckInitGoogleIsDone("InitGoogle is not done yet");
#define READ_TOCO_FLAG(name, requirement) \
do { \
EnforceFlagRequirement(parsed_toco_flags.name, #name, requirement); \
auto flag_value = GetFlagValue(parsed_toco_flags.name, requirement); \
if (flag_value.has_value()) { \
toco_flags->set_##name(flag_value.value()); \
} \
} while (false)
#define PARSE_TOCO_FLAG(Type, name, requirement) \
do { \
EnforceFlagRequirement(parsed_toco_flags.name, #name, requirement); \
auto flag_value = GetFlagValue(parsed_toco_flags.name, requirement); \
if (flag_value.has_value()) { \
Type x; \
QCHECK(Type##_Parse(flag_value.value(), &x)) \
<< "Unrecognized " << #Type << " value " \
<< parsed_toco_flags.name.value(); \
toco_flags->set_##name(x); \
} \
} while (false)
PARSE_TOCO_FLAG(FileFormat, input_format, FlagRequirement::kUseDefault);
PARSE_TOCO_FLAG(FileFormat, output_format, FlagRequirement::kUseDefault);
PARSE_TOCO_FLAG(IODataType, inference_type, FlagRequirement::kNone);
PARSE_TOCO_FLAG(IODataType, inference_input_type, FlagRequirement::kNone);
READ_TOCO_FLAG(default_ranges_min, FlagRequirement::kNone);
READ_TOCO_FLAG(default_ranges_max, FlagRequirement::kNone);
READ_TOCO_FLAG(default_int16_ranges_min, FlagRequirement::kNone);
READ_TOCO_FLAG(default_int16_ranges_max, FlagRequirement::kNone);
READ_TOCO_FLAG(drop_fake_quant, FlagRequirement::kNone);
READ_TOCO_FLAG(reorder_across_fake_quant, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_custom_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(drop_control_dependency, FlagRequirement::kNone);
READ_TOCO_FLAG(debug_disable_recurrent_cell_fusion, FlagRequirement::kNone);
READ_TOCO_FLAG(propagate_fake_quant_num_bits, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_nudging_weights_to_use_fast_gemm_kernel,
FlagRequirement::kNone);
READ_TOCO_FLAG(dedupe_array_min_size_bytes, FlagRequirement::kNone);
READ_TOCO_FLAG(split_tflite_lstm_inputs, FlagRequirement::kNone);
READ_TOCO_FLAG(quantize_weights, FlagRequirement::kNone);
READ_TOCO_FLAG(quantize_to_float16, FlagRequirement::kNone);
READ_TOCO_FLAG(post_training_quantize, FlagRequirement::kNone);
READ_TOCO_FLAG(enable_select_tf_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(force_select_tf_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(unfold_batchmatmul, FlagRequirement::kNone);
PARSE_TOCO_FLAG(IODataType, accumulation_type, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_bfloat16, FlagRequirement::kNone);
if (parsed_toco_flags.force_select_tf_ops.value() &&
!parsed_toco_flags.enable_select_tf_ops.value()) {
LOG(WARNING) << "--force_select_tf_ops should always be used with "
"--enable_select_tf_ops.";
}
if (parsed_toco_flags.input_type.specified()) {
LOG(WARNING)
<< "--input_type is deprecated. It was an ambiguous flag that set both "
"--input_data_types and --inference_input_type. If you are trying "
"to complement the input file with information about the type of "
"input arrays, use --input_data_type. If you are trying to control "
"the quantization/dequantization of real-numbers input arrays in "
"the output file, use --inference_input_type.";
toco::IODataType input_type;
QCHECK(toco::IODataType_Parse(parsed_toco_flags.input_type.value(),
&input_type));
toco_flags->set_inference_input_type(input_type);
}
if (parsed_toco_flags.input_types.specified()) {
LOG(WARNING)
<< "--input_types is deprecated. It was an ambiguous flag that set "
"both --input_data_types and --inference_input_type. If you are "
"trying to complement the input file with information about the "
"type of input arrays, use --input_data_type. If you are trying to "
"control the quantization/dequantization of real-numbers input "
"arrays in the output file, use --inference_input_type.";
std::vector<std::string> input_types =
absl::StrSplit(parsed_toco_flags.input_types.value(), ',');
QCHECK(!input_types.empty());
for (size_t i = 1; i < input_types.size(); i++) {
QCHECK_EQ(input_types[i], input_types[0]);
}
toco::IODataType input_type;
QCHECK(toco::IODataType_Parse(input_types[0], &input_type));
toco_flags->set_inference_input_type(input_type);
}
if (parsed_toco_flags.quantize_weights.value()) {
LOG(WARNING)
<< "--quantize_weights is deprecated. Falling back to "
"--post_training_quantize. Please switch --post_training_quantize.";
toco_flags->set_post_training_quantize(
parsed_toco_flags.quantize_weights.value());
}
if (parsed_toco_flags.quantize_weights.value()) {
if (toco_flags->inference_type() == IODataType::QUANTIZED_UINT8) {
LOG(WARNING)
<< "--post_training_quantize quantizes a graph of inference_type "
"FLOAT. Overriding inference type QUANTIZED_UINT8 to FLOAT.";
toco_flags->set_inference_type(IODataType::FLOAT);
}
}
#undef READ_TOCO_FLAG
#undef PARSE_TOCO_FLAG
}
} | #include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
namespace toco {
namespace {
TEST(TocoCmdlineFlagsTest, DefaultValue) {
int argc = 1;
const char* args[] = {"toco", nullptr};
std::string message;
ParsedTocoFlags result_flags;
EXPECT_TRUE(ParseTocoFlagsFromCommandLineFlags(
&argc, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.allow_dynamic_tensors.value(), true);
}
TEST(TocoCmdlineFlagsTest, ParseFlags) {
int argc = 2;
const char* args[] = {"toco", "--allow_dynamic_tensors=false", nullptr};
std::string message;
ParsedTocoFlags result_flags;
EXPECT_TRUE(ParseTocoFlagsFromCommandLineFlags(
&argc, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.allow_dynamic_tensors.value(), false);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_cmdline_flags.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_cmdline_flags_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c94ebb6-18e0-4adb-b0a7-957d43bac92d | cpp | tensorflow/tensorflow | import_tensorflow | tensorflow/lite/toco/import_tensorflow.cc | tensorflow/lite/toco/import_tensorflow_test.cc | #include "tensorflow/lite/toco/import_tensorflow.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/map.h"
#include "google/protobuf/text_format.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_cluster.h"
#include "tensorflow/lite/toco/tensorflow_util.h"
#include "tensorflow/lite/toco/tooling_util.h"
using tensorflow::AttrValue;
using tensorflow::DT_BOOL;
using tensorflow::DT_COMPLEX64;
using tensorflow::DT_FLOAT;
using tensorflow::DT_INT16;
using tensorflow::DT_INT32;
using tensorflow::DT_INT64;
using tensorflow::DT_QUINT8;
using tensorflow::DT_STRING;
using tensorflow::DT_UINT16;
using tensorflow::DT_UINT32;
using tensorflow::DT_UINT8;
using tensorflow::GraphDef;
using tensorflow::NodeDef;
using tensorflow::TensorProto;
using tensorflow::TensorShapeProto;
namespace toco {
namespace {
bool HasAttr(const NodeDef& node, const std::string& attr_name) {
return node.attr().count(attr_name) > 0;
}
bool HasWildcardDimension(const TensorShapeProto& shape) {
for (const auto& dim : shape.dim()) {
if (dim.size() == -1) return true;
}
return false;
}
const std::string& GetStringAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kS);
return attr.s();
}
int64_t GetIntAttr(const NodeDef& node, const std::string& attr_name) {
CHECK(HasAttr(node, attr_name)) << attr_name << " not found in:\n"
<< node.DebugString();
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kI);
return attr.i();
}
float GetFloatAttr(const NodeDef& node, const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kF);
return attr.f();
}
bool GetBoolAttr(const NodeDef& node, const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kB);
return attr.b();
}
tensorflow::DataType GetDataTypeAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kType);
return attr.type();
}
const TensorShapeProto& GetShapeAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kShape);
return attr.shape();
}
const TensorProto& GetTensorAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name)) << "No attr named '" << attr_name << "'";
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kTensor);
return attr.tensor();
}
const AttrValue::ListValue& GetListAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kList);
return attr.list();
}
tensorflow::Status CheckOptionalAttr(const NodeDef& node,
const std::string& attr_name,
const std::string& expected_value) {
if (HasAttr(node, attr_name)) {
const std::string& value = GetStringAttr(node, attr_name);
if (value != expected_value) {
return tensorflow::errors::InvalidArgument(
"Unexpected value for attribute '" + attr_name + "'. Expected '" +
expected_value + "'");
}
}
return absl::OkStatus();
}
tensorflow::Status CheckOptionalAttr(
const NodeDef& node, const std::string& attr_name,
const tensorflow::DataType& expected_value) {
if (HasAttr(node, attr_name)) {
const tensorflow::DataType& value = GetDataTypeAttr(node, attr_name);
if (value != expected_value) {
return tensorflow::errors::InvalidArgument(
"Unexpected value for attribute '" + attr_name + "'. Expected '" +
tensorflow::DataType_Name(expected_value) + "'");
}
}
return absl::OkStatus();
}
template <typename T1, typename T2>
tensorflow::Status ExpectValue(const T1& v1, const T2& v2,
const std::string& description) {
if (v1 == v2) return absl::OkStatus();
return tensorflow::errors::InvalidArgument(absl::StrCat(
"Unexpected ", description, ": got ", v1, ", expected ", v2));
}
ArrayDataType ConvertDataType(tensorflow::DataType dtype) {
if (dtype == DT_UINT8)
return ArrayDataType::kUint8;
else if (dtype == DT_FLOAT)
return ArrayDataType::kFloat;
else if (dtype == DT_BOOL)
return ArrayDataType::kBool;
else if (dtype == DT_INT16)
return ArrayDataType::kInt16;
else if (dtype == DT_UINT16)
return ArrayDataType::kUint16;
else if (dtype == DT_INT32)
return ArrayDataType::kInt32;
else if (dtype == DT_UINT32)
return ArrayDataType::kUint32;
else if (dtype == DT_INT64)
return ArrayDataType::kInt64;
else if (dtype == DT_STRING)
return ArrayDataType::kString;
else if (dtype == DT_COMPLEX64)
return ArrayDataType::kComplex64;
else
LOG(INFO) << "Unsupported data type in placeholder op: " << dtype;
return ArrayDataType::kNone;
}
tensorflow::Status ImportShape(
const TFLITE_PROTO_NS::RepeatedPtrField<tensorflow::TensorShapeProto_Dim>&
input_dims,
int* input_flat_size, Shape* shape) {
std::vector<int> input_dims_only_sizes;
bool zero_sized_shape = false;
for (auto& d : input_dims) {
if (d.size() > std::numeric_limits<int>::max()) {
return tensorflow::errors::InvalidArgument("Shape element overflows");
}
if (d.size() == 0) {
zero_sized_shape = true;
}
input_dims_only_sizes.push_back(d.size());
}
if (zero_sized_shape) {
shape->mutable_dims()->clear();
if (input_flat_size != nullptr) *input_flat_size = 0;
return absl::OkStatus();
}
*shape->mutable_dims() = input_dims_only_sizes;
if (input_flat_size == nullptr) return absl::OkStatus();
return NumElements(input_dims_only_sizes, input_flat_size);
}
template <typename T>
struct TensorTraits;
template <>
struct TensorTraits<float> {
static int size(const TensorProto& p) { return p.float_val_size(); }
static float get(const TensorProto& p, int i) { return p.float_val(i); }
static std::string accessor_name() { return "float_val"; }
static std::string type_name() { return "float"; }
static void CopyFromContent(const TensorProto& p, std::vector<float>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<uint8_t> {
static int size(const TensorProto& p) { return p.int_val_size(); }
static uint8_t get(const TensorProto& p, int i) { return p.int_val(i); }
static std::string accessor_name() { return "int_val"; }
static std::string type_name() { return "uint8"; }
static void CopyFromContent(const TensorProto& p,
std::vector<uint8_t>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<std::complex<float>> {
static int size(const TensorProto& p) { return p.scomplex_val_size() / 2; }
static std::complex<float> get(const TensorProto& p, int i) {
return std::complex<float>(p.scomplex_val(2 * i),
p.scomplex_val(2 * i + 1));
}
static std::string accessor_name() { return "scomplex_val"; }
static std::string type_name() { return "complex64"; }
static void CopyFromContent(const TensorProto& p,
std::vector<std::complex<float>>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<int32> {
static int size(const TensorProto& p) { return p.int_val_size(); }
static int32 get(const TensorProto& p, int i) { return p.int_val(i); }
static std::string accessor_name() { return "int_val"; }
static std::string type_name() { return "int32"; }
static void CopyFromContent(const TensorProto& p, std::vector<int32>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<uint32> {
static int size(const TensorProto& p) { return p.uint32_val_size(); }
static int32 get(const TensorProto& p, int i) { return p.uint32_val(i); }
static std::string accessor_name() { return "uint32_val"; }
static std::string type_name() { return "uint32"; }
static void CopyFromContent(const TensorProto& p, std::vector<uint32>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<int64_t> {
static int size(const TensorProto& p) { return p.int64_val_size(); }
static int64_t get(const TensorProto& p, int i) { return p.int64_val(i); }
static std::string accessor_name() { return "int64_val"; }
static std::string type_name() { return "int64"; }
static void CopyFromContent(const TensorProto& p,
std::vector<int64_t>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<bool> {
static int size(const TensorProto& p) { return p.bool_val_size(); }
static bool get(const TensorProto& p, int i) { return p.bool_val(i); }
static std::string accessor_name() { return "bool_val"; }
static std::string type_name() { return "bool"; }
static void CopyFromContent(const TensorProto& p, std::vector<bool>* data) {
std::vector<char> buf(p.tensor_content().size());
toco::port::CopyToBuffer(p.tensor_content(), buf.data());
for (int i = 0; i < p.tensor_content().size(); i++) {
(*data)[i] = static_cast<bool>(buf[i]);
}
}
};
template <typename T>
tensorflow::Status ImportTensorData(const TensorProto& input_tensor,
int input_flat_size,
std::vector<T>* output_data) {
CHECK_GE(output_data->size(), input_flat_size);
int num_elements_in_tensor = TensorTraits<T>::size(input_tensor);
if (num_elements_in_tensor == input_flat_size) {
for (int i = 0; i < num_elements_in_tensor; i++) {
(*output_data)[i] = TensorTraits<T>::get(input_tensor, i);
}
} else if (input_tensor.tensor_content().size() ==
input_flat_size * sizeof(T)) {
TensorTraits<T>::CopyFromContent(input_tensor, output_data);
} else if (num_elements_in_tensor >= 0 &&
num_elements_in_tensor < input_flat_size) {
int i = 0;
for (; i < num_elements_in_tensor; ++i) {
(*output_data)[i] = TensorTraits<T>::get(input_tensor, i);
}
auto last = i == 0 ? T(0) : (*output_data)[i - 1];
for (; i < input_flat_size; ++i) {
(*output_data)[i] = last;
}
} else {
std::string accessor_name = TensorTraits<T>::accessor_name();
std::string type_name = TensorTraits<T>::type_name();
return tensorflow::errors::InvalidArgument(
absl::StrCat("Neither input_content (",
input_tensor.tensor_content().size() / sizeof(T), ") nor ",
accessor_name, " (", num_elements_in_tensor,
") have the right dimensions (", input_flat_size,
") for this ", type_name, " tensor"));
}
return absl::OkStatus();
}
tensorflow::Status ImportFloatArray(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_FLOAT);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_float_data =
output_array->GetMutableBuffer<ArrayDataType::kFloat>().data;
output_float_data.resize(RequiredBufferSizeForShape(output_array->shape()),
0.f);
return ImportTensorData<float>(input_tensor, input_flat_size,
&output_float_data);
}
tensorflow::Status ImportComplex64Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_COMPLEX64);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 4);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_complex_data =
output_array->GetMutableBuffer<ArrayDataType::kComplex64>().data;
output_complex_data.resize(RequiredBufferSizeForShape(output_array->shape()),
std::complex<float>(0.f, 0.f));
return ImportTensorData<std::complex<float>>(input_tensor, input_flat_size,
&output_complex_data);
}
tensorflow::Status ImportQuint8Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_QUINT8);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kUint8>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<uint8_t>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportInt32Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_INT32);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kInt32>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<int32>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportUint32Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_UINT32);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kUint32>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<uint32>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportInt64Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_INT64);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kInt64>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<int64_t>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportBoolArray(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_BOOL);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_bool_data =
output_array->GetMutableBuffer<ArrayDataType::kBool>().data;
output_bool_data.resize(RequiredBufferSizeForShape(output_array->shape()),
false);
status =
ImportTensorData<bool>(input_tensor, input_flat_size, &output_bool_data);
if (!status.ok() && output_bool_data.size() == 1) {
output_bool_data[0] = false;
return absl::OkStatus();
}
return status;
}
tensorflow::Status ImportStringArray(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_STRING);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
if (input_flat_size != input_tensor.string_val_size()) {
return tensorflow::errors::InvalidArgument(
"Input_content string_val doesn't have the right dimensions "
"for this string tensor");
}
auto& output_string_data =
output_array->GetMutableBuffer<ArrayDataType::kString>().data;
output_string_data.resize(RequiredBufferSizeForShape(output_array->shape()));
CHECK_GE(output_string_data.size(), input_flat_size);
for (int i = 0; i < input_flat_size; ++i) {
output_string_data[i] = input_tensor.string_val(i);
}
return absl::OkStatus();
}
int GetInputsCount(const NodeDef& node,
const TensorFlowImportFlags& tf_import_flags) {
if (tf_import_flags.drop_control_dependency) {
for (size_t i = 0; i < node.input_size(); ++i) {
if (node.input(i)[0] == '^') {
return i;
}
}
}
return node.input_size();
}
tensorflow::Status CheckInputsCount(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
int expected_input_count) {
if (GetInputsCount(node, tf_import_flags) != expected_input_count) {
return tensorflow::errors::FailedPrecondition(
node.op(), " node expects ", expected_input_count,
" input(s) other than control dependencies: ", node.DebugString());
}
return absl::OkStatus();
}
template <ArrayDataType T>
std::string CreateConstArray(
Model* model, std::string const& name,
std::vector<typename toco::DataType<T>> const& data) {
std::string array_name = toco::AvailableArrayName(*model, name);
auto& array = model->GetOrCreateArray(array_name);
array.data_type = T;
array.mutable_shape()->mutable_dims()->emplace_back(
static_cast<int>(data.size()));
array.GetMutableBuffer<T>().data = data;
return array_name;
}
void RetainTensorFlowNodeDef(const NodeDef& node, Operator* op) {
node.SerializeToString(&op->tensorflow_node_def);
}
void GetOutputNamesFromNodeDef(const NodeDef& node,
const tensorflow::OpDef& op_def,
TensorFlowUnsupportedOperator* op) {
int next_output = 0;
auto add_output = [&node, &next_output, op]() {
if (next_output == 0) {
op->outputs.push_back(node.name());
} else {
op->outputs.push_back(absl::StrCat(node.name(), ":", next_output));
}
++next_output;
};
for (int i = 0; i < op_def.output_arg_size(); ++i) {
std::string multiples = op_def.output_arg(i).number_attr();
if (!multiples.empty()) {
CHECK(HasAttr(node, multiples)) << "No attr named " << multiples;
int num_outputs = GetIntAttr(node, multiples);
for (int j = 0; j < num_outputs; ++j) {
add_output();
}
} else {
std::string list = op_def.output_arg(i).type_list_attr();
if (!list.empty()) {
CHECK(HasAttr(node, list)) << "No attr named " << list;
const AttrValue::ListValue& list_value = GetListAttr(node, list);
for (int j = 0; j < list_value.type_size(); ++j) {
add_output();
}
} else {
add_output();
}
}
}
}
void GetOutputTypesFromNodeDef(const NodeDef& node,
const tensorflow::OpDef& op_def,
TensorFlowUnsupportedOperator* op) {
auto add_type = [&node, op](tensorflow::DataType type) {
if (type == tensorflow::DT_INVALID) {
LOG(WARNING) << "Op node missing output type attribute: " << node.name();
op->output_data_types.clear();
} else {
op->output_data_types.push_back(ConvertDataType(type));
}
};
auto get_type = [&node](const tensorflow::OpDef::ArgDef& a) {
if (a.type() != tensorflow::DT_INVALID) {
return a.type();
} else if (HasAttr(node, a.type_attr())) {
return GetDataTypeAttr(node, a.type_attr());
} else {
return tensorflow::DT_INVALID;
}
};
for (int i = 0; i < op_def.output_arg_size(); ++i) {
std::string multiples = op_def.output_arg(i).number_attr();
if (!multiples.empty()) {
CHECK(HasAttr(node, multiples)) << "No attr named " << multiples;
int num_outputs = GetIntAttr(node, multiples);
auto type = get_type(op_def.output_arg(i));
for (int j = 0; j < num_outputs; ++j) {
add_type(type);
}
} else {
std::string list = op_def.output_arg(i).type_list_attr();
if (!list.empty()) {
CHECK(HasAttr(node, list)) << "No attr named " << list;
const AttrValue::ListValue& list_value = GetListAttr(node, list);
for (int j = 0; j < list_value.type_size(); ++j) {
add_type(list_value.type(j));
}
} else {
add_type(get_type(op_def.output_arg(i)));
}
}
}
}
tensorflow::Status ConvertUnsupportedOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
static constexpr char kAttrOutputQuantized[] = "_output_quantized";
static constexpr char kAttrOutputTypes[] = "_output_types";
static constexpr char kAttrOutputShapes[] = "_output_shapes";
static constexpr char kAttrSupportOutputTypeFloatInQuantizedOp[] =
"_support_output_type_float_in_quantized_op";
LOG(INFO) << "Converting unsupported operation: " << node.op();
auto* op = new TensorFlowUnsupportedOperator;
op->tensorflow_op = node.op();
RetainTensorFlowNodeDef(node, op);
model->operators.emplace_back(op);
const int num_inputs = GetInputsCount(node, tf_import_flags);
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
const tensorflow::OpDef* op_def = nullptr;
if (tensorflow::OpRegistry::Global()->LookUpOpDef(node.op(), &op_def).ok()) {
GetOutputNamesFromNodeDef(node, *op_def, op);
} else {
op->outputs.push_back(node.name());
}
if (HasAttr(node, kAttrOutputQuantized)) {
op->quantized = GetBoolAttr(node, kAttrOutputQuantized);
}
if (HasAttr(node, kAttrSupportOutputTypeFloatInQuantizedOp)) {
op->support_output_type_float_in_quantized_op =
GetBoolAttr(node, kAttrSupportOutputTypeFloatInQuantizedOp);
}
if (HasAttr(node, kAttrOutputTypes)) {
const auto& output_types = GetListAttr(node, kAttrOutputTypes);
for (int i = 0; i < output_types.type_size(); ++i) {
op->output_data_types.push_back(ConvertDataType(output_types.type(i)));
}
} else if (HasAttr(node, "Tout")) {
const auto& output_type = GetDataTypeAttr(node, "Tout");
op->output_data_types.push_back(ConvertDataType(output_type));
} else if (op_def != nullptr) {
GetOutputTypesFromNodeDef(node, *op_def, op);
} else {
LOG(INFO) << "Unable to determine output type for op: " << node.op();
}
if (HasAttr(node, kAttrOutputShapes)) {
const auto& output_shapes = GetListAttr(node, kAttrOutputShapes);
Shape output_shape;
for (int i = 0; i < output_shapes.shape_size(); ++i) {
const auto& shape = output_shapes.shape(i);
if (HasWildcardDimension(shape)) {
LOG(INFO) << "Skipping wildcard output shape(s) for node: "
<< node.name();
op->output_shapes.clear();
break;
}
const auto status =
ImportShape(shape.dim(), nullptr, &output_shape);
if (!status.ok()) {
return status;
}
op->output_shapes.push_back(output_shape);
}
}
return absl::OkStatus();
}
tensorflow::Status ConvertConstOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Const");
const auto& tensor = GetTensorAttr(node, "value");
const auto dtype = GetDataTypeAttr(node, "dtype");
tensorflow::Status status = absl::OkStatus();
auto& array = model->GetOrCreateArray(node.name());
switch (dtype) {
case DT_FLOAT:
array.data_type = ArrayDataType::kFloat;
status = ImportFloatArray(tensor, &array);
break;
case DT_INT32:
array.data_type = ArrayDataType::kInt32;
status = ImportInt32Array(tensor, &array);
break;
case DT_UINT32:
array.data_type = ArrayDataType::kUint32;
status = ImportUint32Array(tensor, &array);
break;
case DT_QUINT8:
array.data_type = ArrayDataType::kUint8;
status = ImportQuint8Array(tensor, &array);
break;
case DT_INT64:
array.data_type = ArrayDataType::kInt64;
status = ImportInt64Array(tensor, &array);
break;
case DT_STRING:
array.data_type = ArrayDataType::kString;
status = ImportStringArray(tensor, &array);
break;
case DT_BOOL:
array.data_type = ArrayDataType::kBool;
status = ImportBoolArray(tensor, &array);
break;
case DT_COMPLEX64:
array.data_type = ArrayDataType::kComplex64;
status = ImportComplex64Array(tensor, &array);
break;
default:
array.data_type = ArrayDataType::kNone;
array.GetMutableBuffer<ArrayDataType::kNone>();
break;
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(
status, " (while processing node '" + node.name() + "')");
return absl::OkStatus();
}
tensorflow::Status ConvertConvOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Conv2D");
TF_RETURN_IF_ERROR(CheckInputsCount(node, tf_import_flags, 2));
TF_RETURN_IF_ERROR(CheckOptionalAttr(node, "data_format", "NHWC"));
TF_RETURN_IF_ERROR(CheckOptionalAttr(node, "T", DT_FLOAT));
const auto& input_name = node.input(0);
const auto& weights_name = node.input(1);
const auto& reordered_weights_name =
AvailableArrayName(*model, weights_name + "_reordered");
const Operator* existing_reorder =
GetOpWithOutput(*model, reordered_weights_name);
if (existing_reorder) {
CHECK(existing_reorder->type == OperatorType::kReorderAxes);
} else {
auto* reorder = new ReorderAxesOperator;
reorder->inputs = {weights_name};
reorder->outputs = {reordered_weights_name};
reorder->input_axes_order = AxesOrder::kHWIO;
reorder->output_axes_order = AxesOrder::kOHWI;
model->operators.emplace_back(reorder);
}
if (!HasAttr(node, "strides")) {
return tensorflow::errors::InvalidArgument("Missing attribute 'strides'");
}
const auto& strides = GetListAttr(node, "strides");
TF_RETURN_IF_ERROR(ExpectValue(strides.i_size(), 4, "number of strides"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(0), 1, "strides(0)"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(3), 1, "strides(3)"));
int dilation_height_factor;
int dilation_width_factor;
if (HasAttr(node, "dilations")) {
const auto& dilations = GetListAttr(node, "dilations");
TF_RETURN_IF_ERROR(
ExpectValue(dilations.i_size(), 4, "number of dilations"));
if (dilations.i(0) != 1 || dilations.i(3) != 1) {
return tensorflow::errors::InvalidArgument(absl::StrCat(
"Can only import Conv ops with dilation along the height "
"(1st) or width (2nd) axis. TensorFlow op \"",
node.name(), "\" had dilations:[ ", dilations.i(0), ", ",
dilations.i(1), ", ", dilations.i(2), ", ", dilations.i(3), "]."));
}
dilation_height_factor = dilations.i(1);
dilation_width_factor = dilations.i(2);
} else {
dilation_height_factor = 1;
dilation_width_factor = 1;
}
const auto& padding = GetStringAttr(node, "padding");
PaddingType padding_type;
if (padding == "SAME") {
padding_type = PaddingType::kSame;
} else if (padding == "VALID") {
padding_type = PaddingType::kValid;
} else {
return tensorflow::errors::InvalidArgument(
"Bad padding (only SAME and VALID are supported)");
}
auto* conv = new ConvOperator;
conv->inputs = {input_name, reordered_weights_name};
conv->outputs = {node.name()};
conv->stride_height = strides.i(1);
conv->stride_width = strides.i(2);
conv->dilation_height_factor = dilation_height_factor;
conv->dilation_width_factor = dilation_width_factor;
conv->padding.type = padding_type;
model->operators.emplace_back(conv);
return absl::OkStatus();
}
tensorflow::Status ConvertDepthwiseConvOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "DepthwiseConv2dNative");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
if (HasAttr(node, "data_format")) {
CHECK_EQ(GetStringAttr(node, "data_format"), "NHWC");
}
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
const auto& input_name = node.input(0);
const auto& weights_name = node.input(1);
const auto& reordered_weights_name = weights_name + "_reordered";
const Operator* existing_reorder =
GetOpWithOutput(*model, reordered_weights_name);
if (existing_reorder) {
CHECK(existing_reorder->type == OperatorType::kReorderAxes);
} else {
auto* reorder = new ReorderAxesOperator;
reorder->inputs = {weights_name};
reorder->outputs = {reordered_weights_name};
reorder->input_axes_order = AxesOrder::kHWIM;
reorder->output_axes_order = AxesOrder::k1HWO;
model->operators.emplace_back(reorder);
}
const auto& strides = GetListAttr(node, "strides");
TF_RETURN_IF_ERROR(ExpectValue(strides.i_size(), 4, "number of strides"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(0), 1, "strides(0)"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(3), 1, "strides(3)"));
int dilation_height_factor;
int dilation_width_factor;
if (HasAttr(node, "dilations")) {
const auto& dilations = GetListAttr(node, "dilations");
TF_RETURN_IF_ERROR(
ExpectValue(dilations.i_size(), 4, "number of dilations"));
if (dilations.i(0) != 1 || dilations.i(3) != 1) {
return tensorflow::errors::InvalidArgument(absl::StrCat(
"Can only import Conv ops with dilation along the height "
"(1st) or width (2nd) axis. TensorFlow op \"",
node.name(), "\" had dilations:[ ", dilations.i(0), ", ",
dilations.i(1), ", ", dilations.i(2), ", ", dilations.i(3), "]."));
}
dilation_height_factor = dilations.i(1);
dilation_width_factor = dilations.i(2);
} else {
dilation_height_factor = 1;
dilation_width_factor = 1;
}
const auto& padding = GetStringAttr(node, "padding");
PaddingType padding_type;
if (padding == "SAME") {
padding_type = PaddingType::kSame;
} else if (padding == "VALID") {
padding_type = PaddingType::kValid;
} else {
return tensorflow::errors::InvalidArgument(
"Bad padding (only SAME and VALID are supported)");
}
auto* conv = new DepthwiseConvOperator;
conv->inputs = {input_name, reordered_weights_name};
conv->outputs = {node.name()};
conv->stride_height = strides.i(1);
conv->stride_width = strides.i(2);
conv->dilation_height_factor = dilation_height_factor;
conv->dilation_width_factor = dilation_width_factor;
conv->padding.type = padding_type;
model->operators.emplace_back(conv);
return absl::OkStatus();
}
tensorflow::Status ConvertDepthToSpaceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "DepthToSpace");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
tensorflow::DataType dtype = GetDataTypeAttr(node, "T");
if (dtype != DT_FLOAT && dtype != DT_UINT8 && dtype != DT_INT32 &&
dtype != DT_INT64) {
const auto* enum_descriptor = tensorflow::DataType_descriptor();
LOG(FATAL) << "TFLite does not support DepthToSpace with type T:"
<< enum_descriptor->FindValueByNumber(dtype)->name() << ". "
<< "T must be one of {DT_FLOAT, DT_UINT8, DT_INT32, DT_INT64}.";
}
auto* op = new DepthToSpaceOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
op->block_size = GetIntAttr(node, "block_size");
QCHECK_GE(op->block_size, 2);
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSpaceToDepthOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "SpaceToDepth");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
tensorflow::DataType dtype = GetDataTypeAttr(node, "T");
if (dtype != DT_FLOAT && dtype != DT_UINT8 && dtype != DT_INT32 &&
dtype != DT_INT64) {
const auto* enum_descriptor = tensorflow::DataType_descriptor();
LOG(FATAL) << "TFLite does not support SpaceToDepth with type T:"
<< enum_descriptor->FindValueByNumber(dtype)->name() << ". "
<< "T must be one of {DT_FLOAT, DT_UINT8, DT_INT32, DT_INT64}.";
}
auto* op = new SpaceToDepthOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
op->block_size = GetIntAttr(node, "block_size");
QCHECK_GE(op->block_size, 2);
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertBiasAddOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "BiasAdd");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
const auto& input_name = node.input(0);
const auto& bias_name = node.input(1);
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
auto* biasadd = new AddOperator;
biasadd->inputs.push_back(input_name);
biasadd->inputs.push_back(bias_name);
biasadd->outputs.push_back(node.name());
model->operators.emplace_back(biasadd);
return absl::OkStatus();
}
tensorflow::Status ConvertRandomUniform(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "RandomUniform");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_INT32);
auto op = std::make_unique<RandomUniformOperator>();
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
op->dtype = ConvertDataType(GetDataTypeAttr(node, "dtype"));
op->seed = GetIntAttr(node, "seed");
op->seed2 = GetIntAttr(node, "seed2");
CHECK(model != nullptr);
model->operators.emplace_back(std::move(op));
return absl::OkStatus();
}
tensorflow::Status ConvertIdentityOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK(node.op() == "Identity" || node.op() == "CheckNumerics" ||
node.op() == "PlaceholderWithDefault" || node.op() == "StopGradient" ||
node.op() == "Snapshot" || node.op() == "EnsureShape");
auto* op = new TensorFlowIdentityOperator;
QCHECK_GE(node.input_size(), 1)
<< node.op()
<< " node expects at least 1 input other than control dependencies: "
<< node.DebugString();
const auto& input_name = node.input(0);
op->inputs.push_back(input_name);
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertIdentityNOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "IdentityN");
for (int i = 0; i < node.input_size(); ++i) {
auto* op = new TensorFlowIdentityOperator;
const auto& input_name = node.input(i);
std::string output_name = node.name();
if (i > 0) {
output_name = output_name + ":" + std::to_string(i);
}
op->inputs.push_back(input_name);
op->outputs.push_back(output_name);
model->operators.emplace_back(op);
}
return absl::OkStatus();
}
tensorflow::Status ConvertFakeQuantWithMinMaxArgs(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "FakeQuantWithMinMaxArgs");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
auto* op = new FakeQuantOperator;
op->inputs.push_back(node.input(0));
op->minmax = std::make_unique<MinMax>();
auto& minmax = *op->minmax;
minmax.min = GetFloatAttr(node, "min");
minmax.max = GetFloatAttr(node, "max");
op->outputs.push_back(node.name());
op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
if (HasAttr(node, "narrow_range")) {
op->narrow_range = GetBoolAttr(node, "narrow_range");
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertFakeQuantWithMinMaxVars(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "FakeQuantWithMinMaxVars");
const int num_inputs = GetInputsCount(node, tf_import_flags);
QCHECK(num_inputs == 3 || num_inputs == 4)
<< "FakeQuantWithMinMaxVars node expects 3 or 4 inputs other than "
"control dependencies: "
<< node.DebugString();
auto* op = new FakeQuantOperator;
for (int i = 0; i < 3; i++) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
if (HasAttr(node, "narrow_range")) {
op->narrow_range = GetBoolAttr(node, "narrow_range");
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSqueezeOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Squeeze");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
auto* op = new SqueezeOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
if (HasAttr(node, "squeeze_dims")) {
const auto& squeeze_dims = GetListAttr(node, "squeeze_dims");
for (int i = 0; i < squeeze_dims.i_size(); ++i) {
op->squeeze_dims.push_back(squeeze_dims.i(i));
}
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSplitOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Split");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new TensorFlowSplitOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
const int num_split = GetIntAttr(node, "num_split");
op->outputs.push_back(node.name());
for (int i = 1; i < num_split; i++) {
op->outputs.push_back(absl::StrCat(node.name(), ":", i));
}
op->num_split = num_split;
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSplitVOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "SplitV");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
auto* op = new TensorFlowSplitVOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
const int num_split = GetIntAttr(node, "num_split");
op->outputs.push_back(node.name());
for (int i = 1; i < num_split; i++) {
op->outputs.push_back(absl::StrCat(node.name(), ":", i));
}
op->num_split = num_split;
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSwitchOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Switch");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new TensorFlowSwitchOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
op->outputs.push_back(node.name() + ":1");
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSoftmaxOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Softmax");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto& input_name = node.input(0);
auto* softmax = new SoftmaxOperator;
softmax->inputs.push_back(input_name);
softmax->outputs.push_back(node.name());
CHECK(!node.attr().count("beta"));
if (node.attr().count("_softmax_beta")) {
softmax->beta = GetFloatAttr(node, "_softmax_beta");
} else {
softmax->beta = 1.f;
}
model->operators.emplace_back(softmax);
return absl::OkStatus();
}
tensorflow::Status ConvertLRNOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "LRN");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto& input_name = node.input(0);
auto* lrn = new LocalResponseNormalizationOperator;
lrn->inputs.push_back(input_name);
lrn->outputs.push_back(node.name());
lrn->range = GetIntAttr(node, "depth_radius");
lrn->bias = GetFloatAttr(node, "bias");
lrn->alpha = GetFloatAttr(node, "alpha");
lrn->beta = GetFloatAttr(node, "beta");
model->operators.emplace_back(lrn);
return absl::OkStatus();
}
tensorflow::Status ConvertMaxPoolOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "MaxPool");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto& input_name = node.input(0);
if (node.attr().count("data_format")) {
CHECK_EQ(GetStringAttr(node, "data_format"), "NHWC");
}
if (HasAttr(node, "T")) {
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
} else {
LOG(WARNING) << "Found MaxPool operator missing 'T' attribute";
}
auto* maxpool = new MaxPoolOperator;
maxpool->inputs.push_back(input_name);
maxpool->outputs.push_back(node.name());
const auto& strides = GetListAttr(node, "strides");
CHECK_EQ(strides.i_size(), 4);
CHECK_EQ(strides.i(0), 1);
CHECK_EQ(strides.i(3), 1);
maxpool->stride_height = strides.i(1);
maxpool->stride_width = strides.i(2);
const auto& ksize = GetListAttr(node, "ksize");
CHECK_EQ(ksize.i_size(), 4);
CHECK_EQ(ksize.i(0), 1);
CHECK_EQ(ksize.i(3), 1);
maxpool->kheight = ksize.i(1);
maxpool->kwidth = ksize.i(2);
const auto& padding = GetStringAttr(node, "padding");
if (padding == "SAME") {
maxpool->padding.type = PaddingType::kSame;
} else if (padding == "VALID") {
maxpool->padding.type = PaddingType::kValid;
} else {
LOG(FATAL) << "Bad padding (only SAME and VALID are supported)";
}
model->operators.emplace_back(maxpool);
return absl::OkStatus();
}
tensorflow::Status ConvertAvgPoolOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "AvgPool");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto& input_name = node.input(0);
if (node.attr().count("data_format")) {
CHECK_EQ(GetStringAttr(node, "data_format"), "NHWC");
}
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
auto* avgpool = new AveragePoolOperator;
avgpool->inputs.push_back(input_name);
avgpool->outputs.push_back(node.name());
const auto& strides = GetListAttr(node, "strides");
CHECK_EQ(strides.i_size(), 4);
CHECK_EQ(strides.i(0), 1);
CHECK_EQ(strides.i(3), 1);
avgpool->stride_height = strides.i(1);
avgpool->stride_width = strides.i(2);
const auto& ksize = GetListAttr(node, "ksize");
CHECK_EQ(ksize.i_size(), 4);
CHECK_EQ(ksize.i(0), 1);
CHECK_EQ(ksize.i(3), 1);
avgpool->kheight = ksize.i(1);
avgpool->kwidth = ksize.i(2);
const auto& padding = GetStringAttr(node, "padding");
if (padding == "SAME") {
avgpool->padding.type = PaddingType::kSame;
} else if (padding == "VALID") {
avgpool->padding.type = PaddingType::kValid;
} else {
LOG(FATAL) << "Bad padding (only SAME and VALID are supported)";
}
model->operators.emplace_back(avgpool);
return absl::OkStatus();
}
tensorflow::Status ConvertBatchMatMulOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* batch_matmul = new BatchMatMulOperator;
if (HasAttr(node, "adj_x")) {
batch_matmul->adj_x = GetBoolAttr(node, "adj_x");
}
if (HasAttr(node, "adj_y")) {
batch_matmul->adj_y = GetBoolAttr(node, "adj_y");
}
batch_matmul->inputs = {node.input(0), node.input(1)};
batch_matmul->outputs = {node.name()};
RetainTensorFlowNodeDef(node, batch_matmul);
model->operators.emplace_back(batch_matmul);
return absl::OkStatus();
}
tensorflow::Status ConvertMatMulOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
CHECK(!HasAttr(node, "adjoint_a") ||
(GetBoolAttr(node, "adjoint_a") == false));
CHECK(!HasAttr(node, "adjoint_b") ||
(GetBoolAttr(node, "adjoint_b") == false));
auto* matmul = new TensorFlowMatMulOperator;
if (HasAttr(node, "transpose_a")) {
matmul->transpose_a = GetBoolAttr(node, "transpose_a");
}
if (HasAttr(node, "transpose_b")) {
matmul->transpose_b = GetBoolAttr(node, "transpose_b");
}
matmul->inputs = {node.input(0), node.input(1)};
matmul->outputs = {node.name()};
model->operators.emplace_back(matmul);
return absl::OkStatus();
}
tensorflow::Status ConvertConcatOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
Operator* op = nullptr;
if (node.op() == "Concat") {
op = new TensorFlowConcatOperator;
} else if (node.op() == "ConcatV2") {
op = new TensorFlowConcatV2Operator;
} else {
LOG(FATAL) << "Expected Concat or ConcatV2";
}
const int num_inputs = GetInputsCount(node, tf_import_flags);
QCHECK_GE(num_inputs, 2)
<< node.op()
<< " node expects at least 2 inputs other than control dependencies: "
<< node.DebugString();
CHECK_EQ(num_inputs, 1 + GetIntAttr(node, "N"));
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertMirrorPadOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
if (node.op() != "MirrorPad") {
LOG(FATAL) << "Expected MirrorPad.";
}
const int num_inputs = GetInputsCount(node, tf_import_flags);
CHECK_EQ(num_inputs, 2);
auto* op = new MirrorPadOperator;
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
const auto mode = GetStringAttr(node, "mode");
if (mode == "REFLECT") {
op->mode = toco::MirrorPadMode::kReflect;
} else if (mode == "SYMMETRIC") {
op->mode = toco::MirrorPadMode::kSymmetric;
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
static constexpr int kAnyNumInputs = -1;
enum FlexSupport { kFlexOk, kFlexNotOk };
template <typename Op, int NumInputs, int NumOutputs, FlexSupport flex>
tensorflow::Status ConvertSimpleOperatorGeneric(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
if (NumInputs != kAnyNumInputs) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, NumInputs));
}
auto* op = new Op;
const int num_inputs = GetInputsCount(node, tf_import_flags);
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
if (NumOutputs > 1) {
for (int i = 1; i < NumOutputs; ++i) {
op->outputs.push_back(node.name() + ":" + std::to_string(i));
}
}
if (flex == kFlexOk) {
RetainTensorFlowNodeDef(node, op);
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
template <typename Op, int NumInputs, int NumOutputs>
tensorflow::Status ConvertSimpleOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
return ConvertSimpleOperatorGeneric<Op, NumInputs, NumOutputs, kFlexNotOk>(
node, tf_import_flags, model_flags, model);
}
template <typename Op, int NumInputs, int NumOutputs>
tensorflow::Status ConvertSimpleOperatorFlexOk(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
return ConvertSimpleOperatorGeneric<Op, NumInputs, NumOutputs, kFlexOk>(
node, tf_import_flags, model_flags, model);
}
tensorflow::Status ConditionallyConvertConstOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
const auto& tensor = GetTensorAttr(node, "value");
const auto& shape = tensor.tensor_shape();
for (const auto& dim : shape.dim()) {
if (dim.size() <= 0) {
return ConvertUnsupportedOperator(node, tf_import_flags, model_flags,
model);
}
}
switch (GetDataTypeAttr(node, "dtype")) {
case DT_FLOAT:
case DT_INT32:
case DT_QUINT8:
case DT_INT64:
case DT_STRING:
case DT_BOOL:
case DT_COMPLEX64:
return ConvertConstOperator(node, tf_import_flags, model_flags, model);
default:
return ConvertUnsupportedOperator(node, tf_import_flags, model_flags,
model);
}
}
tensorflow::Status ConvertStridedSliceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "StridedSlice");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 4));
auto* op = new StridedSliceOperator;
for (const auto& input : node.input()) {
op->inputs.push_back(input);
}
op->outputs.push_back(node.name());
op->begin_mask =
HasAttr(node, "begin_mask") ? GetIntAttr(node, "begin_mask") : 0;
op->ellipsis_mask =
HasAttr(node, "ellipsis_mask") ? GetIntAttr(node, "ellipsis_mask") : 0;
op->end_mask = HasAttr(node, "end_mask") ? GetIntAttr(node, "end_mask") : 0;
op->new_axis_mask =
HasAttr(node, "new_axis_mask") ? GetIntAttr(node, "new_axis_mask") : 0;
op->shrink_axis_mask = HasAttr(node, "shrink_axis_mask")
? GetIntAttr(node, "shrink_axis_mask")
: 0;
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertPlaceholderOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK(node.op() == "Placeholder" || node.op() == "LegacyFedInput");
if (node.op() == "Placeholder") {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 0));
}
bool inside_input_arrays = false;
for (const auto& input_array : model_flags.input_arrays()) {
if (node.name() == input_array.name()) {
inside_input_arrays = true;
break;
}
}
if (!inside_input_arrays) {
model->AddInvalidInputArray(node.name());
}
auto& array = model->GetOrCreateArray(node.name());
if (node.attr().count("dtype")) {
array.data_type = ConvertDataType(GetDataTypeAttr(node, "dtype"));
}
if (node.attr().count("shape")) {
const auto& shape = GetShapeAttr(node, "shape");
auto num_dims = shape.dim_size();
if (num_dims > 0 && !HasWildcardDimension(shape)) {
auto& dst_array_dims = *array.mutable_shape()->mutable_dims();
dst_array_dims.resize(num_dims);
for (std::size_t i = 0; i < num_dims; i++) {
dst_array_dims[i] = shape.dim(i).size();
}
}
}
return absl::OkStatus();
}
tensorflow::Status ConvertNoOpOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
return absl::OkStatus();
}
tensorflow::Status ConvertCastOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Cast");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto tf_src_dtype = GetDataTypeAttr(node, "SrcT");
const auto tf_dst_dtype = GetDataTypeAttr(node, "DstT");
auto* op = new CastOperator;
op->src_data_type = ConvertDataType(tf_src_dtype);
op->dst_data_type = ConvertDataType(tf_dst_dtype);
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertFloorOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Floor");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto data_type = GetDataTypeAttr(node, "T");
CHECK(data_type == DT_FLOAT);
auto* op = new FloorOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertCeilOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Ceil");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto data_type = GetDataTypeAttr(node, "T");
CHECK(data_type == DT_FLOAT);
auto* op = new CeilOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertRoundOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Round");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto data_type = GetDataTypeAttr(node, "T");
CHECK(data_type == DT_FLOAT);
auto* op = new RoundOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertGatherOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK(node.op() == "Gather" || node.op() == "GatherV2");
if (node.op() == "Gather")
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
if (node.op() == "GatherV2")
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
const auto indices_data_type = GetDataTypeAttr(node, "Tindices");
CHECK(indices_data_type == DT_INT32 || indices_data_type == DT_INT64);
auto* op = new GatherOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
if (node.input_size() >= 3) {
const auto axis_data_type = GetDataTypeAttr(node, "Taxis");
CHECK(axis_data_type == DT_INT32 || axis_data_type == DT_INT64);
op->inputs.push_back(node.input(2));
} else {
op->axis = {0};
}
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertGatherNdOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "GatherNd");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
const auto indices_data_type = GetDataTypeAttr(node, "Tindices");
CHECK(indices_data_type == DT_INT32 || indices_data_type == DT_INT64);
auto* op = new GatherNdOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
template <typename Op>
tensorflow::Status ConvertArgMinMaxOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
const auto axis_data_type =
HasAttr(node, "Tidx") ? GetDataTypeAttr(node, "Tidx") : DT_INT32;
const auto output_type = HasAttr(node, "output_type")
? GetDataTypeAttr(node, "output_type")
: DT_INT64;
CHECK(axis_data_type == DT_INT64 || axis_data_type == DT_INT32);
CHECK(output_type == DT_INT64 || output_type == DT_INT32);
auto* op = new Op;
op->output_data_type = ConvertDataType(output_type);
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertArgMaxOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ArgMax");
return ConvertArgMinMaxOperator<ArgMaxOperator>(node, tf_import_flags,
model_flags, model);
}
tensorflow::Status ConvertArgMinOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ArgMin");
return ConvertArgMinMaxOperator<ArgMinOperator>(node, tf_import_flags,
model_flags, model);
}
tensorflow::Status ConvertResizeBilinearOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ResizeBilinear");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new ResizeBilinearOperator;
op->align_corners = false;
op->half_pixel_centers = false;
if (HasAttr(node, "align_corners")) {
op->align_corners = GetBoolAttr(node, "align_corners");
}
if (HasAttr(node, "half_pixel_centers")) {
op->half_pixel_centers = GetBoolAttr(node, "half_pixel_centers");
}
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertResizeNearestNeighborOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ResizeNearestNeighbor");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new ResizeNearestNeighborOperator;
op->align_corners = false;
op->half_pixel_centers = false;
if (HasAttr(node, "align_corners")) {
op->align_corners = GetBoolAttr(node, "align_corners");
}
if (HasAttr(node, "half_pixel_centers")) {
op->half_pixel_centers = GetBoolAttr(node, "half_pixel_centers");
}
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertBatchNormWithGlobalNormalizationOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "BatchNormWithGlobalNormalization");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 5));
std::string multiplier = node.name() + "_mul";
if (GetBoolAttr(node, "scale_after_normalization")) {
std::string rsqrt = node.name() + "_rsqrt";
auto* rsqrt_op = new TensorFlowRsqrtOperator;
rsqrt_op->inputs.push_back(node.input(2));
rsqrt_op->outputs.push_back(rsqrt);
model->operators.emplace_back(rsqrt_op);
auto* mul_op = new MulOperator;
mul_op->inputs.push_back(rsqrt);
mul_op->inputs.push_back(node.input(4));
mul_op->outputs.push_back(multiplier);
model->operators.emplace_back(mul_op);
} else {
auto* rsqrt_op = new TensorFlowRsqrtOperator;
rsqrt_op->inputs.push_back(node.input(2));
rsqrt_op->outputs.push_back(multiplier);
model->operators.emplace_back(rsqrt_op);
}
auto* op = new BatchNormalizationOperator;
op->global_normalization = true;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(multiplier);
op->inputs.push_back(node.input(3));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertFusedBatchNormOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK((node.op() == "FusedBatchNorm") || (node.op() == "FusedBatchNormV3"));
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 5));
const std::string& gamma_input = node.input(1);
const std::string& beta_input = node.input(2);
const std::string& moving_mean_input = node.input(3);
const std::string& moving_variance_input = node.input(4);
const std::string epsilon_array_name =
CreateConstArray<ArrayDataType::kFloat>(model,
node.name() + "_epsilon_array",
{GetFloatAttr(node, "epsilon")});
const std::string epsilon_add_op_name = node.name() + "_epsilon";
auto* epsilon_add_op = new AddOperator;
epsilon_add_op->inputs.push_back(moving_variance_input);
epsilon_add_op->inputs.push_back(epsilon_array_name);
epsilon_add_op->outputs.push_back(epsilon_add_op_name);
model->operators.emplace_back(epsilon_add_op);
const std::string rsqrt_op_name = node.name() + "_rsqrt";
auto* rsqrt_op = new TensorFlowRsqrtOperator;
rsqrt_op->inputs.push_back(epsilon_add_op_name);
rsqrt_op->outputs.push_back(rsqrt_op_name);
model->operators.emplace_back(rsqrt_op);
const std::string multiplier = node.name() + "_mul";
auto* mul_op = new MulOperator;
mul_op->inputs.push_back(rsqrt_op_name);
mul_op->inputs.push_back(gamma_input);
mul_op->outputs.push_back(multiplier);
model->operators.emplace_back(mul_op);
auto* op = new BatchNormalizationOperator;
op->global_normalization = true;
op->inputs.push_back(node.input(0));
op->inputs.push_back(moving_mean_input);
op->inputs.push_back(multiplier);
op->inputs.push_back(beta_input);
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSpaceToBatchNDOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "SpaceToBatchND");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
CHECK_EQ(GetDataTypeAttr(node, "Tblock_shape"), DT_INT32);
CHECK_EQ(GetDataTypeAttr(node, "Tpaddings"), DT_INT32);
auto* op = new SpaceToBatchNDOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertBatchToSpaceNDOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "BatchToSpaceND");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
CHECK_EQ(GetDataTypeAttr(node, "Tblock_shape"), DT_INT32);
CHECK_EQ(GetDataTypeAttr(node, "Tcrops"), DT_INT32);
auto* op = new BatchToSpaceNDOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
template <typename T>
tensorflow::Status ConvertReduceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new T;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
if (HasAttr(node, "keepdims")) {
op->keep_dims = GetBoolAttr(node, "keepdims");
} else if (HasAttr(node, "keep_dims")) {
op->keep_dims = GetBoolAttr(node, "keep_dims");
}
return absl::OkStatus();
}
tensorflow::Status ConvertSvdfOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Svdf");
const int input_size = GetInputsCount(node, tf_import_flags);
QCHECK(input_size == 4 || input_size == 5)
<< "Svdf node expects 3 or 4 inputs other than control dependencies: "
<< node.DebugString();
bool has_bias = (input_size == 5);
auto* op = new SvdfOperator;
int index = 0;
op->inputs.push_back(node.input(index++));
op->inputs.push_back(node.input(index++));
op->inputs.push_back(node.input(index++));
if (has_bias) {
op->inputs.push_back(node.input(index++));
}
op->inputs.push_back(node.input(index));
op->outputs.push_back(node.name());
if (node.attr().at("ActivationFunction").s() == "Relu") {
op->fused_activation_function = FusedActivationFunctionType::kRelu;
} else {
op->fused_activation_function = FusedActivationFunctionType::kNone;
}
op->rank = node.attr().at("Rank").i();
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertTransposeConvOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Conv2DBackpropInput");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
auto* op = new TransposeConvOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
op->outputs.push_back(node.name());
const auto& strides = GetListAttr(node, "strides");
op->stride_height = strides.i(1);
op->stride_width = strides.i(2);
CHECK_EQ(strides.i_size(), 4)
<< "Can only import TransposeConv ops with 4D strides. TensorFlow op \""
<< node.name() << "\" has " << strides.i_size() << "D strides.";
CHECK((strides.i(0) == 1) && (strides.i(3) == 1))
<< "Can only import TransposeConv ops with striding along the height "
"(1st) or width (2nd) axis. TensorFlow op \""
<< node.name() << "\" had strides:[ " << strides.i(0) << ", "
<< strides.i(1) << ", " << strides.i(2) << ", " << strides.i(3) << "].";
op->stride_height = strides.i(1);
op->stride_width = strides.i(2);
if (HasAttr(node, "dilations")) {
const auto& dilations = GetListAttr(node, "dilations");
CHECK_EQ(dilations.i_size(), 4)
<< "Dilation unsupported in TransposeConv. TensorFlow op \""
<< node.name() << "\" had dilations";
CHECK((dilations.i(0) == 1) && (dilations.i(1) == 1) &&
(dilations.i(2) == 1) && (dilations.i(3) == 1))
<< "Dilation unsupported in TransposeConv. TensorFlow op \""
<< node.name() << "\" had dilations:[ " << dilations.i(0) << ", "
<< dilations.i(1) << ", " << dilations.i(2) << ", " << dilations.i(3)
<< "].";
}
const std::string& weights_name = node.input(TransposeConvOperator::WEIGHTS);
const std::string& transposed_weights_name = weights_name + "_transposed";
const Operator* existing_transpose =
GetOpWithOutput(*model, transposed_weights_name);
if (existing_transpose) {
CHECK(existing_transpose->type == OperatorType::kTranspose);
} else {
TransposeOperator* transpose = new TransposeOperator;
std::string perm_array = CreateConstArray<ArrayDataType::kInt32>(
model, node.name() + "_transpose_perm", {2, 0, 1, 3});
transpose->inputs = {weights_name, perm_array};
transpose->outputs = {transposed_weights_name};
model->operators.emplace_back(transpose);
}
op->inputs[1] = transposed_weights_name;
auto const& padding = GetStringAttr(node, "padding");
if (padding == "SAME") {
op->padding.type = PaddingType::kSame;
} else if (padding == "VALID") {
op->padding.type = PaddingType::kValid;
} else {
LOG(FATAL) << "Only SAME and VALID padding supported on "
"Conv2DBackpropInput nodes.";
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertRangeOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Range");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
auto* op = new RangeOperator;
if (HasAttr(node, "Tidx")) {
const auto dtype = toco::GetDataTypeAttr(node, "Tidx");
CHECK(dtype == DT_UINT8 || dtype == DT_INT32 || dtype == DT_INT64 ||
dtype == DT_FLOAT);
op->dtype = ConvertDataType(dtype);
}
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertPackOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Pack");
auto op = std::make_unique<PackOperator>();
const int num_inputs = GetInputsCount(node, tf_import_flags);
QCHECK_GE(num_inputs, 1)
<< node.op()
<< " node expects at least 1 input other than control dependencies: "
<< node.DebugString();
CHECK_EQ(num_inputs, GetIntAttr(node, "N"));
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->values_count = HasAttr(node, "N") ? GetIntAttr(node, "N") : num_inputs;
op->axis = HasAttr(node, "axis") ? GetIntAttr(node, "axis") : 0;
op->dtype = ConvertDataType(toco::GetDataTypeAttr(node, "T"));
op->outputs.push_back(node.name());
model->operators.emplace_back(std::move(op));
return absl::OkStatus();
}
tensorflow::Status ConvertUnpackOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Unpack");
auto op = std::make_unique<UnpackOperator>();
const int num_inputs = GetInputsCount(node, tf_import_flags);
QCHECK_EQ(num_inputs, 1);
op->inputs.push_back(node.input(0));
op->num = GetIntAttr(node, "num");
op->axis = HasAttr(node, "axis") ? GetIntAttr(node, "axis") : 0;
op->dtype = ConvertDataType(toco::GetDataTypeAttr(node, "T"));
op->outputs.push_back(node.name());
for (int i = 1; i < op->num; ++i) {
op->outputs.push_back(node.name() + ":" + std::to_string(i));
}
model->operators.emplace_back(std::move(op));
return absl::OkStatus();
}
tensorflow::Status ConvertOperatorSpecialCasedAsRNNBackEdge(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "NextIteration");
CHECK_EQ(node.input_size(), 1);
auto* rnn_state = model->flags.add_rnn_states();
rnn_state->set_discardable(true);
rnn_state->set_state_array(node.name());
rnn_state->set_back_edge_source_array(node.input(0));
rnn_state->set_size(1);
return absl::OkStatus();
}
tensorflow::Status ConvertShapeOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Shape");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto out_type =
HasAttr(node, "out_type") ? GetDataTypeAttr(node, "out_type") : DT_INT32;
CHECK(out_type == DT_INT64 || out_type == DT_INT32);
auto op = std::make_unique<TensorFlowShapeOperator>();
op->output_data_type = ConvertDataType(out_type);
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.push_back(std::move(op));
return absl::OkStatus();
}
tensorflow::Status ConvertReverseSequenceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ReverseSequence");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto op = std::make_unique<ReverseSequenceOperator>();
if (HasAttr(node, "seq_dim")) {
op->seq_dim = GetIntAttr(node, "seq_dim");
}
op->batch_dim =
HasAttr(node, "batch_dim") ? GetIntAttr(node, "batch_dim") : 0;
const int num_inputs = GetInputsCount(node, tf_import_flags);
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
model->operators.push_back(std::move(op));
return absl::OkStatus();
}
void StripCaretFromArrayNames(Model* model) {
for (auto& op : model->operators) {
for (auto& input : op->inputs) {
input = std::string(absl::StripPrefix(input, "^"));
}
for (auto& output : op->outputs) {
output = std::string(absl::StripPrefix(output, "^"));
}
}
for (auto& array : model->GetArrayMap()) {
if (absl::StartsWith(array.first, "^")) {
LOG(FATAL) << "What?";
}
}
}
void StripZeroOutputIndexFromInputs(NodeDef* node) {
for (auto& input : *node->mutable_input()) {
input = std::string(absl::StripSuffix(input, ":0"));
}
}
void AddExtraOutputs(Model* model) {
std::vector<std::string> consumed_arrays;
for (const auto& consumer_op : model->operators) {
for (const std::string& input : consumer_op->inputs) {
consumed_arrays.push_back(input);
}
}
for (const std::string& output_array : model->flags.output_arrays()) {
consumed_arrays.push_back(output_array);
}
for (const auto& rnn_state : model->flags.rnn_states()) {
consumed_arrays.push_back(rnn_state.back_edge_source_array());
}
for (const std::string& consumed_array : consumed_arrays) {
if (GetOpWithOutput(*model, consumed_array)) {
continue;
}
const std::vector<std::string>& split = absl::StrSplit(consumed_array, ':');
if (split.size() != 2) {
continue;
}
int output_index = 0;
if (!absl::SimpleAtoi(split[1], &output_index)) {
continue;
}
auto* producer_op = GetOpWithOutput(*model, split[0]);
if (!producer_op) {
continue;
}
while (producer_op->outputs.size() <= output_index) {
using toco::port::StringF;
producer_op->outputs.push_back(
StringF("%s:%d", split[0], producer_op->outputs.size()));
}
}
}
bool InlineAllFunctions(GraphDef* graphdef) {
if (graphdef->library().function().empty()) {
VLOG(kLogLevelModelUnchanged) << "No functions to inline.";
return false;
}
GraphDef graphdef_copy(*graphdef);
for (auto& function :
(*graphdef_copy.mutable_library()->mutable_function())) {
auto* attributes = function.mutable_attr();
if (attributes->count(tensorflow::kNoInlineAttr) != 0) {
(*attributes)[tensorflow::kNoInlineAttr].set_b(false);
}
}
tensorflow::SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 1});
std::vector<std::unique_ptr<tensorflow::Device>> devices;
TF_CHECK_OK(tensorflow::DeviceFactory::AddDevices(
options, "/job:localhost/replica:0/task:0", &devices));
tensorflow::FunctionLibraryDefinition fld(tensorflow::OpRegistry::Global(),
graphdef_copy.library());
tensorflow::StaticDeviceMgr device_mgr(std::move(devices));
tensorflow::ProcessFunctionLibraryRuntime pflr(
&device_mgr, tensorflow::Env::Default(), &options.config,
TF_GRAPH_DEF_VERSION, &fld,
options.config.graph_options().optimizer_options(), nullptr);
tensorflow::FunctionLibraryRuntime* flr;
flr = pflr.GetFLR("/job:localhost/replica:0/task:0/cpu:0");
tensorflow::Graph graph(fld);
tensorflow::ImportGraphDefOptions gc_opts;
gc_opts.validate_shape = false;
const auto& tf_convert_status = tensorflow::ImportGraphDef(
gc_opts, graphdef_copy, &graph, nullptr, nullptr);
if (!tf_convert_status.ok()) {
LOG(ERROR) << "tensorflow::ImportGraphDef failed with status: "
<< tf_convert_status.ToString();
return false;
}
bool graph_modified = false;
while (tensorflow::ExpandInlineFunctions(flr, &graph)) {
graph_modified = true;
}
if (graph_modified) {
LOG(INFO) << "Found and inlined TensorFlow functions.";
graph.ToGraphDef(graphdef);
}
return graph_modified;
}
tensorflow::Status ConvertTopKV2Operator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK((node.op() == "TopK") || (node.op() == "TopKV2"));
auto op = std::make_unique<TopKV2Operator>();
op->inputs.push_back(node.input(0));
if (HasAttr(node, "k")) {
std::string k_array = CreateConstArray<ArrayDataType::kInt32>(
model, node.name() + "k", {static_cast<int32>(GetIntAttr(node, "k"))});
op->inputs.push_back(k_array);
} else {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
op->inputs.push_back(node.input(1));
}
op->outputs.push_back(node.name());
op->outputs.push_back(node.name() + ":1");
model->operators.emplace_back(op.release());
return absl::OkStatus();
}
tensorflow::Status ConvertDynamicPartitionOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
auto op = std::make_unique<DynamicPartitionOperator>();
CHECK(HasAttr(node, "num_partitions"));
op->num_partitions = GetIntAttr(node, "num_partitions");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
CHECK_GT(op->num_partitions, 1);
op->outputs.push_back(node.name());
for (int i = 1; i < op->num_partitions; ++i) {
op->outputs.push_back(node.name() + ":" + std::to_string(i));
}
model->operators.emplace_back(op.release());
return absl::OkStatus();
}
tensorflow::Status ConvertDynamicStitchOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK(node.op() == "DynamicStitch" || node.op() == "ParallelDynamicStitch");
auto op = std::make_unique<DynamicStitchOperator>();
CHECK(HasAttr(node, "N"));
op->num_partitions = GetIntAttr(node, "N");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, op->num_partitions * 2));
for (int i = 0; i < op->num_partitions * 2; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
model->operators.emplace_back(op.release());
return absl::OkStatus();
}
tensorflow::Status ConvertSparseToDenseOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "SparseToDense");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 4));
auto* op = new SparseToDenseOperator;
for (const std::string& input : node.input()) {
op->inputs.push_back(input);
}
op->outputs.push_back(node.name());
op->validate_indices = HasAttr(node, "validate_indices")
? GetBoolAttr(node, "validate_indices")
: true;
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertOneHotOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "OneHot");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 4));
const auto dtype = GetDataTypeAttr(node, "T");
CHECK(dtype == DT_INT32 || dtype == DT_INT64 || dtype == DT_FLOAT ||
dtype == DT_BOOL);
auto op = std::make_unique<OneHotOperator>();
op->axis = HasAttr(node, "axis") ? GetIntAttr(node, "axis") : -1;
for (const std::string& input : node.input()) {
op->inputs.push_back(input);
}
op->outputs.push_back(node.name());
model->operators.emplace_back(op.release());
return absl::OkStatus();
}
tensorflow::Status ConvertCTCBeamSearchDecoderOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "CTCBeamSearchDecoder");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new CTCBeamSearchDecoderOperator;
for (const std::string& input : node.input()) {
op->inputs.push_back(input);
}
op->beam_width =
HasAttr(node, "beam_width") ? GetIntAttr(node, "beam_width") : 1;
op->top_paths =
HasAttr(node, "top_paths") ? GetIntAttr(node, "top_paths") : 1;
op->merge_repeated = HasAttr(node, "merge_repeated")
? GetBoolAttr(node, "merge_repeated")
: true;
op->outputs.push_back(node.name());
for (int i = 0; i < op->top_paths; ++i) {
op->outputs.push_back(node.name() + ":" + std::to_string(i + 1));
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertUnidirectionalSequenceLstm(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
DCHECK_EQ(node.op(), "UnidirectionalSequenceLstm");
const auto& indices = GetListAttr(node, "_tflite_input_indices");
auto* op = new UnidirectionalSequenceLstmOperator();
const int kInputsSize = 20;
op->inputs.resize(kInputsSize);
if (indices.i_size() != node.input().size()) {
int count = 0;
for (int idx = 0; idx < kInputsSize; ++idx) {
if (count < indices.i_size() && indices.i(count) == idx) {
op->inputs[idx] = node.input(idx);
count++;
} else {
std::string optional_name = node.name() + "_" + std::to_string(idx);
model->CreateOptionalArray(optional_name);
op->inputs[idx] = optional_name;
}
}
} else {
std::vector<bool> done(kInputsSize);
int idx = 0;
for (const std::string& input : node.input()) {
int real_index = indices.i(idx);
op->inputs[real_index] = (input);
done[real_index] = true;
idx++;
}
for (int idx = 0; idx < done.size(); idx++) {
if (!done[idx]) {
std::string optional_name = node.name() + "_" + std::to_string(idx);
model->CreateOptionalArray(optional_name);
op->inputs[idx] = optional_name;
}
}
}
op->outputs.push_back(node.name() + ":2");
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertLeakyReluOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "LeakyRelu");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
const auto& input_name = node.input(0);
auto* op = new LeakyReluOperator;
op->inputs.push_back(input_name);
op->outputs.push_back(node.name());
op->alpha = GetFloatAttr(node, "alpha");
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertUnidirectionalSequenceRnn(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
DCHECK_EQ(node.op(), "UnidirectionalSequenceRnn");
const auto& indices = GetListAttr(node, "_tflite_input_indices");
if (indices.i_size() != node.input().size()) {
return tensorflow::errors::InvalidArgument("Input size does not match.");
}
auto* op = new UnidirectionalSequenceRnnOperator();
for (const std::string& input : node.input()) {
op->inputs.push_back(input);
}
op->outputs.push_back(node.name() + ":1");
model->operators.emplace_back(op);
return absl::OkStatus();
}
}
namespace internal {
using ConverterType = tensorflow::Status (*)(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model);
using ConverterMapType = std::unordered_map<std::string, ConverterType>;
ConverterMapType GetTensorFlowNodeConverterMapForFlex() {
return std::unordered_map<std::string, ConverterType>({
{"LegacyFedInput", ConvertPlaceholderOperator},
{"Placeholder", ConvertPlaceholderOperator},
{"Const", ConditionallyConvertConstOperator},
});
}
ConverterMapType GetTensorFlowNodeConverterMap() {
return std::unordered_map<std::string, ConverterType>({
{"Abs", ConvertSimpleOperator<AbsOperator, kAnyNumInputs, 1>},
{"Add", ConvertSimpleOperator<AddOperator, 2, 1>},
{"AddV2", ConvertSimpleOperator<AddOperator, 2, 1>},
{"AddN", ConvertSimpleOperator<AddNOperator, kAnyNumInputs, 1>},
{"All", ConvertSimpleOperator<TensorFlowAllOperator, kAnyNumInputs, 1>},
{"Any", ConvertReduceOperator<TensorFlowAnyOperator>},
{"ArgMax", ConvertArgMaxOperator},
{"ArgMin", ConvertArgMinOperator},
{"Assert",
ConvertSimpleOperator<TensorFlowAssertOperator, kAnyNumInputs, 1>},
{"AvgPool", ConvertAvgPoolOperator},
{"BatchMatMul", ConvertBatchMatMulOperator},
{"BatchMatMulV2", ConvertBatchMatMulOperator},
{"BatchNormWithGlobalNormalization",
ConvertBatchNormWithGlobalNormalizationOperator},
{"BatchToSpaceND", ConvertBatchToSpaceNDOperator},
{"BiasAdd", ConvertBiasAddOperator},
{"Cast", ConvertCastOperator},
{"Ceil", ConvertCeilOperator},
{"CheckNumerics", ConvertIdentityOperator},
{"Concat", ConvertConcatOperator},
{"ConcatV2", ConvertConcatOperator},
{"Const", ConvertConstOperator},
{"Conv2D", ConvertConvOperator},
{"Conv2DBackpropInput", ConvertTransposeConvOperator},
{"Cos", ConvertSimpleOperator<CosOperator, 1, 1>},
{"CTCBeamSearchDecoder", ConvertCTCBeamSearchDecoderOperator},
{"DepthToSpace", ConvertDepthToSpaceOperator},
{"DepthwiseConv2dNative", ConvertDepthwiseConvOperator},
{"Div", ConvertSimpleOperator<DivOperator, 2, 1>},
{"DynamicPartition", ConvertDynamicPartitionOperator},
{"DynamicStitch", ConvertDynamicStitchOperator},
{"Elu", ConvertSimpleOperator<EluOperator, 1, 1>},
{"EnsureShape", ConvertIdentityOperator},
{"Equal", ConvertSimpleOperator<TensorFlowEqualOperator, 2, 1>},
{"Exp", ConvertSimpleOperator<ExpOperator, 1, 1>},
{"ExpandDims", ConvertSimpleOperator<ExpandDimsOperator, 2, 1>},
{"FakeQuantWithMinMaxArgs", ConvertFakeQuantWithMinMaxArgs},
{"FakeQuantWithMinMaxVars", ConvertFakeQuantWithMinMaxVars},
{"Fill", ConvertSimpleOperator<FillOperator, 2, 1>},
{"Floor", ConvertFloorOperator},
{"FloorDiv", ConvertSimpleOperator<FloorDivOperator, 2, 1>},
{"FloorMod", ConvertSimpleOperator<FloorModOperator, 2, 1>},
{"FusedBatchNorm", ConvertFusedBatchNormOperator},
{"FusedBatchNormV3", ConvertFusedBatchNormOperator},
{"Gather", ConvertGatherOperator},
{"GatherV2", ConvertGatherOperator},
{"GatherNd", ConvertGatherNdOperator},
{"Greater", ConvertSimpleOperator<TensorFlowGreaterOperator, 2, 1>},
{"GreaterEqual",
ConvertSimpleOperator<TensorFlowGreaterEqualOperator, 2, 1>},
{"Identity", ConvertIdentityOperator},
{"IdentityN", ConvertIdentityNOperator},
{"LRN", ConvertLRNOperator},
{"LeakyRelu", ConvertLeakyReluOperator},
{"LegacyFedInput", ConvertPlaceholderOperator},
{"Less", ConvertSimpleOperator<TensorFlowLessOperator, 2, 1>},
{"LessEqual", ConvertSimpleOperator<TensorFlowLessEqualOperator, 2, 1>},
{"Log", ConvertSimpleOperator<LogOperator, 1, 1>},
{"LogicalAnd", ConvertSimpleOperator<LogicalAndOperator, 2, 1>},
{"LogicalOr", ConvertSimpleOperator<LogicalOrOperator, 2, 1>},
{"LogicalNot", ConvertSimpleOperator<LogicalNotOperator, 1, 1>},
{"LogSoftmax", ConvertSimpleOperator<LogSoftmaxOperator, 1, 1>},
{"MatMul", ConvertMatMulOperator},
{"MatrixDiag", ConvertSimpleOperator<MatrixDiagOperator, 1, 1>},
{"MatrixDiagV2", ConvertSimpleOperator<MatrixDiagV2Operator, 5, 1>},
{"MatrixDiagV3", ConvertSimpleOperator<MatrixDiagV3Operator, 5, 1>},
{"MatrixSetDiag", ConvertSimpleOperator<MatrixSetDiagOperator, 2, 1>},
{"MatrixSetDiagV2", ConvertSimpleOperator<MatrixSetDiagV2Operator, 3, 1>},
{"MatrixSetDiagV3", ConvertSimpleOperator<MatrixSetDiagV3Operator, 3, 1>},
{"Max", ConvertReduceOperator<TensorFlowMaxOperator>},
{"MaxPool", ConvertMaxPoolOperator},
{"Maximum", ConvertSimpleOperator<TensorFlowMaximumOperator, 2, 1>},
{"Mean", ConvertReduceOperator<MeanOperator>},
{"Merge",
ConvertSimpleOperator<TensorFlowMergeOperator, kAnyNumInputs, 1>},
{"Min", ConvertReduceOperator<TensorFlowMinOperator>},
{"Minimum", ConvertSimpleOperator<TensorFlowMinimumOperator, 2, 1>},
{"Mul", ConvertSimpleOperator<MulOperator, 2, 1>},
{"Neg", ConvertSimpleOperator<NegOperator, 1, 1>},
{"NextIteration", ConvertOperatorSpecialCasedAsRNNBackEdge},
{"NoOp", ConvertNoOpOperator},
{"NotEqual", ConvertSimpleOperator<TensorFlowNotEqualOperator, 2, 1>},
{"OneHot", ConvertOneHotOperator},
{"Pack", ConvertPackOperator},
{"Pad", ConvertSimpleOperator<PadOperator, 2, 1>},
{"PadV2", ConvertSimpleOperator<PadV2Operator, 3, 1>},
{"ParallelDynamicStitch", ConvertDynamicStitchOperator},
{"Placeholder", ConvertPlaceholderOperator},
{"PlaceholderWithDefault", ConvertIdentityOperator},
{"Pow", ConvertSimpleOperator<PowOperator, 2, 1>},
{"Prod", ConvertReduceOperator<TensorFlowProdOperator>},
{"RandomUniform", ConvertRandomUniform},
{"Range", ConvertRangeOperator},
{"Rank", ConvertSimpleOperator<TensorFlowRankOperator, 1, 1>},
{"RealDiv", ConvertSimpleOperator<DivOperator, 2, 1>},
{"Relu", ConvertSimpleOperator<ReluOperator, 1, 1>},
{"Relu6", ConvertSimpleOperator<Relu6Operator, 1, 1>},
{"Reshape", ConvertSimpleOperator<TensorFlowReshapeOperator, 2, 1>},
{"ResizeBilinear", ConvertResizeBilinearOperator},
{"ResizeNearestNeighbor", ConvertResizeNearestNeighborOperator},
{"ReverseSequence", ConvertReverseSequenceOperator},
{"ReverseV2", ConvertSimpleOperator<ReverseV2Operator, 2, 1>},
{"Round", ConvertRoundOperator},
{"Rsqrt", ConvertSimpleOperator<TensorFlowRsqrtOperator, 1, 1>},
{"ScatterNd", ConvertSimpleOperator<ScatterNdOperator, 3, 1>},
{"SegmentSum", ConvertSimpleOperator<SegmentSumOperator, 2, 1>},
{"Select", ConvertSimpleOperator<SelectOperator, 3, 1>},
{"SelectV2", ConvertSimpleOperator<SelectOperator, 3, 1>},
{"Shape", ConvertShapeOperator},
{"Sigmoid", ConvertSimpleOperator<LogisticOperator, 1, 1>},
{"Sin", ConvertSimpleOperator<SinOperator, 1, 1>},
{"Slice", ConvertSimpleOperator<SliceOperator, 3, 1>},
{"Softmax", ConvertSoftmaxOperator},
{"SpaceToBatchND", ConvertSpaceToBatchNDOperator},
{"SpaceToDepth", ConvertSpaceToDepthOperator},
{"SparseToDense", ConvertSparseToDenseOperator},
{"Split", ConvertSplitOperator},
{"SplitV", ConvertSplitVOperator},
{"Sqrt", ConvertSimpleOperator<TensorFlowSqrtOperator, 1, 1>},
{"Square", ConvertSimpleOperator<TensorFlowSquareOperator, 1, 1>},
{"SquaredDifference",
ConvertSimpleOperator<SquaredDifferenceOperator, 2, 1>},
{"Snapshot", ConvertIdentityOperator},
{"Squeeze", ConvertSqueezeOperator},
{"StopGradient", ConvertIdentityOperator},
{"StridedSlice", ConvertStridedSliceOperator},
{"Sub", ConvertSimpleOperator<SubOperator, 2, 1>},
{"Sum", ConvertReduceOperator<TensorFlowSumOperator>},
{"Svdf", ConvertSvdfOperator},
{"Switch", ConvertSwitchOperator},
{"Tanh", ConvertSimpleOperator<TanhOperator, 1, 1>},
{"Tile", ConvertSimpleOperator<TensorFlowTileOperator, 2, 1>},
{"TopK", ConvertTopKV2Operator},
{"TopKV2", ConvertTopKV2Operator},
{"Transpose", ConvertSimpleOperator<TransposeOperator, 2, 1>},
{"Unpack", ConvertUnpackOperator},
{"ZerosLike", ConvertSimpleOperator<TensorFlowZerosLikeOperator, 1, 1>},
{"UnidirectionalSequenceLstm", ConvertUnidirectionalSequenceLstm},
{"UnidirectionalSequenceRnn", ConvertUnidirectionalSequenceRnn},
{"MirrorPad", ConvertMirrorPadOperator},
{"Unique", ConvertSimpleOperator<UniqueOperator, 1, 2>},
{"Where", ConvertSimpleOperator<WhereOperator, 1, 1>},
});
}
tensorflow::Status ImportTensorFlowNode(
const tensorflow::NodeDef& node,
const TensorFlowImportFlags& tf_import_flags, const ModelFlags& model_flags,
Model* model, const ConverterMapType& converter_map) {
auto converter = converter_map.find(node.op());
if (converter == converter_map.end()) {
return ConvertUnsupportedOperator(node, tf_import_flags, model_flags,
model);
} else {
return converter->second(node, tf_import_flags, model_flags, model);
}
}
}
std::unique_ptr<Model> ImportTensorFlowGraphDef(
const ModelFlags& model_flags, const TensorFlowImportFlags& tf_import_flags,
const GraphDef& tf_graph) {
LogDumpGraphDef(kLogLevelModelChanged, "AT IMPORT", tf_graph);
GraphDef inlined_graph(tf_graph);
if (InlineAllFunctions(&inlined_graph)) {
LogDumpGraphDef(kLogLevelModelChanged, "AFTER INLINING", inlined_graph);
}
for (const auto& specified_input_array : model_flags.input_arrays()) {
CHECK(!absl::EndsWith(specified_input_array.name(), ":0"))
<< "Unsupported explicit zero output index: "
<< specified_input_array.name();
}
for (const std::string& specified_output_array :
model_flags.output_arrays()) {
CHECK(!absl::EndsWith(specified_output_array, ":0"))
<< "Unsupported explicit zero output index: " << specified_output_array;
}
Model* model = new Model;
internal::ConverterMapType converter_map;
if (!tf_import_flags.import_all_ops_as_unsupported) {
converter_map = internal::GetTensorFlowNodeConverterMap();
} else {
converter_map = internal::GetTensorFlowNodeConverterMapForFlex();
}
for (auto node : inlined_graph.node()) {
StripZeroOutputIndexFromInputs(&node);
auto status = internal::ImportTensorFlowNode(
node, tf_import_flags, model_flags, model, converter_map);
CHECK(status.ok()) << status.message();
}
ResolveModelFlags(model_flags, model);
StripCaretFromArrayNames(model);
AddExtraOutputs(model);
FixNoMissingArray(model);
FixNoOrphanedArray(model);
FixOperatorOrdering(model);
CheckInvariants(*model);
for (const auto& rnn_state : model->flags.rnn_states()) {
model->GetArray(rnn_state.state_array()).buffer = nullptr;
}
return std::unique_ptr<Model>(model);
}
std::unique_ptr<Model> ImportTensorFlowGraphDef(
const ModelFlags& model_flags, const TensorFlowImportFlags& tf_import_flags,
const std::string& input_file_contents) {
std::unique_ptr<GraphDef> tf_graph(new GraphDef);
CHECK(ParseFromStringEitherTextOrBinary(input_file_contents, tf_graph.get()));
std::unique_ptr<GraphDef> pruned_graph =
MaybeReplaceCompositeSubgraph(*tf_graph);
if (pruned_graph) {
tf_graph = std::move(pruned_graph);
}
return ImportTensorFlowGraphDef(model_flags, tf_import_flags, *tf_graph);
}
} | #include "tensorflow/lite/toco/import_tensorflow.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
using tensorflow::AttrValue;
using tensorflow::DT_BOOL;
using tensorflow::DT_COMPLEX64;
using tensorflow::DT_FLOAT;
using tensorflow::DT_INT32;
using tensorflow::DT_INT64;
using tensorflow::DT_INVALID;
using tensorflow::DT_QUINT8;
using tensorflow::DT_STRING;
using tensorflow::DT_UINT16;
using tensorflow::DT_UINT32;
using tensorflow::NodeDef;
using tensorflow::Status;
using ::testing::ElementsAre;
namespace internal {
using ConverterType = tensorflow::Status (*)(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model);
using ConverterMapType = std::unordered_map<std::string, ConverterType>;
ConverterMapType GetTensorFlowNodeConverterMap();
ConverterMapType GetTensorFlowNodeConverterMapForFlex();
Status ImportTensorFlowNode(const NodeDef&, const TensorFlowImportFlags&,
const ModelFlags& model_flags, Model*,
const ConverterMapType&);
}
namespace {
Status ImportNode(const NodeDef& node, Model* model) {
const auto converter = internal::GetTensorFlowNodeConverterMap();
return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(),
ModelFlags(), model, converter);
}
Status ImportFlexNode(const NodeDef& node, Model* model) {
const auto converter = internal::ConverterMapType();
return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(),
ModelFlags(), model, converter);
}
Status ImportNode(const NodeDef& node) {
Model model;
return ImportNode(node, &model);
}
NodeDef BuildNode(
const std::string& op,
const std::vector<std::initializer_list<int>>& output_shapes) {
NodeDef node;
node.set_op(op);
node.set_name("Node1");
node.add_input();
node.set_input(0, "Node0");
AttrValue::ListValue* shapes =
(*node.mutable_attr())["_output_shapes"].mutable_list();
for (const auto& output_shape : output_shapes) {
tensorflow::TensorShapeProto* shape = shapes->add_shape();
for (int64_t output_shape_dim : output_shape) {
auto shape_dim = shape->add_dim();
shape_dim->set_size(output_shape_dim);
}
}
return node;
}
namespace {
void BuildConstNode(std::initializer_list<int64_t> shape,
tensorflow::DataType dtype, int64_t num_elements,
NodeDef* node) {
node->set_op("Const");
node->set_name("Node1");
AttrValue dtype_attr;
SetAttrValue(dtype, &dtype_attr);
(*node->mutable_attr())["dtype"] = dtype_attr;
tensorflow::TensorProto t;
t.set_dtype(dtype);
auto* s = t.mutable_tensor_shape();
for (auto d : shape) {
s->add_dim()->set_size(d);
}
switch (dtype) {
case DT_FLOAT:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_float_val(i / 10000.0 + 1);
}
break;
case DT_INT32:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<int>::max() + 1);
}
break;
case DT_UINT32:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<uint32_t>::max() + 1);
}
break;
case DT_QUINT8:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<uint8_t>::max() + 1);
}
break;
case DT_INT64:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int64_val(i + 1);
}
break;
case DT_UINT16:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<uint16_t>::max() + 1);
}
break;
case DT_STRING:
break;
case DT_BOOL:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_bool_val((i % 2) == 0);
}
break;
case DT_COMPLEX64:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_scomplex_val(i / 10000.0 + 1);
t.add_scomplex_val(-i / 10000.0 - 1);
}
break;
default:
break;
}
AttrValue value_attr;
SetAttrValue(t, &value_attr);
(*node->mutable_attr())["value"] = value_attr;
}
}
TEST(FlexImportTest, ConditionalConst) {
Model model;
auto build_and_import_node =
[&model](const std::string& name, std::initializer_list<int64_t> shape,
tensorflow::DataType dtype, int64_t num_elements) {
NodeDef node;
BuildConstNode(shape, dtype, num_elements, &node);
node.set_name(name);
const auto converter = internal::GetTensorFlowNodeConverterMapForFlex();
return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(),
ModelFlags(), &model, converter);
};
EXPECT_TRUE(build_and_import_node("Known", {1, 2, 3}, DT_INT32, 6).ok());
EXPECT_TRUE(build_and_import_node("BadType", {1, 2, 3}, DT_INVALID, 6).ok());
EXPECT_TRUE(build_and_import_node("Unknown", {1, -2, 3}, DT_INT32, 6).ok());
EXPECT_EQ(model.operators.size(), 2);
EXPECT_TRUE(model.HasArray("Known"));
EXPECT_FALSE(model.HasArray("Unknown"));
EXPECT_FALSE(model.HasArray("BadType"));
}
TEST(FlexImportTest, SoftmaxWithBeta) {
NodeDef node;
node.set_op("Softmax");
node.set_name("softmax");
node.add_input();
node.set_input(0, "logits");
AttrValue dtype_attr;
SetAttrValue(0.5, &dtype_attr);
(*node.mutable_attr())["_softmax_beta"] = dtype_attr;
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kSoftmax);
const SoftmaxOperator* op =
static_cast<const SoftmaxOperator*>(model.operators[0].get());
EXPECT_EQ(op->beta, 0.5);
}
TEST(FlexImportTest, SoftmaxWithoutBeta) {
NodeDef node;
node.set_op("Softmax");
node.set_name("softmax");
node.add_input();
node.set_input(0, "logits");
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kSoftmax);
const SoftmaxOperator* op =
static_cast<const SoftmaxOperator*>(model.operators[0].get());
EXPECT_EQ(op->beta, 1.0);
}
class ShapeImportTest : public ::testing::TestWithParam<tensorflow::DataType> {
};
TEST_P(ShapeImportTest, ShapeElementIsNegative) {
NodeDef node;
BuildConstNode({1, -2, 10}, GetParam(), 0, &node);
auto status = ImportNode(node);
EXPECT_EQ(
status.message(),
"Tensor shape should not include negative values\n\t (while processing "
"node 'Node1')");
}
TEST_P(ShapeImportTest, ShapeElementIsZero) {
NodeDef node;
BuildConstNode({1, 0, 10}, GetParam(), 0, &node);
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
const auto& array = model.GetArray("Node1");
EXPECT_THAT(array.shape().dims(), ::testing::ElementsAre());
}
TEST_P(ShapeImportTest, ShapeIsOneDimZero) {
NodeDef node;
BuildConstNode({0}, GetParam(), 0, &node);
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
const auto& array = model.GetArray("Node1");
EXPECT_THAT(array.shape().dims(), ::testing::ElementsAre());
}
TEST_P(ShapeImportTest, ShapeElementTooLarge) {
NodeDef node;
BuildConstNode({3000000000}, GetParam(), 0, &node);
auto status = ImportNode(node);
EXPECT_EQ(status.message(),
"Shape element overflows\n\t (while processing node 'Node1')");
}
TEST_P(ShapeImportTest, ShapeTooLarge) {
NodeDef node;
BuildConstNode({1000000, 2000000, 2000000, 2000000}, GetParam(), 0, &node);
auto status = ImportNode(node);
EXPECT_EQ(status.message(),
"Tensor shape is too large\n\t (while processing node 'Node1')");
}
std::vector<tensorflow::DataType> TestTypes() {
return {DT_FLOAT, DT_INT32, DT_INT64, DT_BOOL, DT_QUINT8, DT_COMPLEX64};
}
INSTANTIATE_TEST_SUITE_P(ShapeImportTest, ShapeImportTest,
::testing::ValuesIn(TestTypes()));
class ContentImportTest : public ::testing::Test {
public:
template <ArrayDataType T>
std::vector<DataType<T>> ImportAndGetData(const NodeDef& node) {
Model model;
auto status = ImportNode(node, &model);
CHECK(status.ok()) << status.message();
const auto& array = model.GetArray("Node1");
return array.GetBuffer<T>().data;
}
void RemoveTrailingElements(NodeDef* node, int num) {
tensorflow::TensorProto* p =
node->mutable_attr()->at("value").mutable_tensor();
for (int i = 0; i < num; ++i) {
if (p->int_val_size() > 0) p->mutable_int_val()->RemoveLast();
if (p->int64_val_size() > 0) p->mutable_int64_val()->RemoveLast();
if (p->float_val_size() > 0) p->mutable_float_val()->RemoveLast();
if (p->bool_val_size() > 0) p->mutable_bool_val()->RemoveLast();
if (p->scomplex_val_size() > 0) p->mutable_scomplex_val()->RemoveLast();
if (p->scomplex_val_size() > 0) p->mutable_scomplex_val()->RemoveLast();
}
}
};
TEST_F(ContentImportTest, Int32) {
constexpr ArrayDataType kType = ArrayDataType::kInt32;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_INT32, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Int64) {
constexpr ArrayDataType kType = ArrayDataType::kInt64;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_INT64, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Quint8) {
constexpr ArrayDataType kType = ArrayDataType::kUint8;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_QUINT8, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Bool) {
constexpr ArrayDataType kType = ArrayDataType::kBool;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_BOOL, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 0));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 1));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Float) {
constexpr ArrayDataType kType = ArrayDataType::kFloat;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_FLOAT, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0005));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0004));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000));
}
TEST_F(ContentImportTest, Complex64) {
constexpr ArrayDataType kType = ArrayDataType::kComplex64;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_COMPLEX64, 6, &node);
using cplx = std::complex<float>;
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0001, -1.0001),
cplx(1.0002, -1.0002), cplx(1.0003, -1.0003),
cplx(1.0004, -1.0004), cplx(1.0005, -1.0005)));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0001, -1.0001),
cplx(1.0002, -1.0002), cplx(1.0003, -1.0003),
cplx(1.0004, -1.0004), cplx(1.0004, -1.0004)));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0000, -1.0000),
cplx(1.0000, -1.0000), cplx(1.0000, -1.0000),
cplx(1.0000, -1.0000), cplx(1.0000, -1.0000)));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(0.0000, 0.0000), cplx(0.0000, 0.0000),
cplx(0.0000, 0.0000), cplx(0.0000, 0.0000),
cplx(0.0000, 0.0000), cplx(0.0000, 0.0000)));
}
std::vector<std::pair<tensorflow::DataType, ArrayDataType>> UnaryTestTypes() {
return {{DT_FLOAT, ArrayDataType::kFloat},
{DT_INT32, ArrayDataType::kInt32},
{DT_INT64, ArrayDataType::kInt64}};
}
class TensorContentTest : public ::testing::Test {
public:
template <ArrayDataType T>
std::vector<DataType<T>> ImportAndGetData(const NodeDef& node) {
Model model;
auto status = ImportNode(node, &model);
CHECK(status.ok()) << status.message();
const auto& nodearray = model.GetArray("Node1");
return nodearray.GetBuffer<T>().data;
}
template <class T>
void NodeWithTensorContent(std::initializer_list<int64_t> shape,
tensorflow::DataType dtype, int64_t num_elements,
NodeDef* node) {
node->set_op("Const");
node->set_name("Node1");
AttrValue dtype_attr;
SetAttrValue(dtype, &dtype_attr);
(*node->mutable_attr())["dtype"] = dtype_attr;
auto allocated_content = std::make_unique<T[]>(num_elements);
tensorflow::TensorProto t;
t.set_dtype(dtype);
auto* s = t.mutable_tensor_shape();
for (const auto& d : shape) {
s->add_dim()->set_size(d);
}
switch (dtype) {
case DT_FLOAT:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i / 10000.0 + 1;
}
break;
case DT_INT32:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i % std::numeric_limits<int>::max() + 1;
}
break;
case DT_QUINT8:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i % std::numeric_limits<uint8_t>::max() + 1;
}
break;
case DT_INT64:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i + 1;
}
break;
case DT_STRING:
break;
case DT_BOOL:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = ((i % 2) == 0);
}
break;
default:
break;
}
t.set_tensor_content(
std::string(reinterpret_cast<const char*>(allocated_content.get()),
num_elements * sizeof(T)));
AttrValue value_attr;
SetAttrValue(t, &value_attr);
(*node->mutable_attr())["value"] = value_attr;
allocated_content.reset();
}
};
TEST_F(TensorContentTest, Int64) {
constexpr ArrayDataType kType = ArrayDataType::kInt64;
NodeDef node;
NodeWithTensorContent<int64_t>({1, 2, 3}, DT_INT64, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
}
TEST_F(TensorContentTest, Int32) {
constexpr ArrayDataType kType = ArrayDataType::kInt32;
NodeDef node;
NodeWithTensorContent<int>({1, 2, 3}, DT_INT32, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
}
TEST_F(TensorContentTest, Float) {
constexpr ArrayDataType kType = ArrayDataType::kFloat;
NodeDef node;
NodeWithTensorContent<float>({1, 2, 3}, DT_FLOAT, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0005));
}
TEST_F(TensorContentTest, Quint8) {
constexpr ArrayDataType kType = ArrayDataType::kUint8;
NodeDef node;
NodeWithTensorContent<uint8_t>({1, 2, 3}, DT_QUINT8, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
}
TEST_F(TensorContentTest, Bool) {
constexpr ArrayDataType kType = ArrayDataType::kBool;
NodeDef node;
NodeWithTensorContent<bool>({1, 2, 3}, DT_BOOL, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 0));
}
class TypeImportTest : public ::testing::TestWithParam<
std::pair<tensorflow::DataType, ArrayDataType>> {
protected:
TypeImportTest() {}
void BuildUnaryNode(const std::string& op_name, tensorflow::DataType dtype,
NodeDef* node) {
node->set_op(op_name);
node->set_name("Node1");
node->add_input();
node->set_input(0, "Node0");
AttrValue dtype_attr;
SetAttrValue(dtype, &dtype_attr);
(*node->mutable_attr())["T"] = dtype_attr;
}
};
TEST_P(TypeImportTest, BasicTypeInference) {
NodeDef node;
BuildUnaryNode("Atan", GetParam().first, &node);
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_THAT(op->output_data_types, ::testing::ElementsAre(GetParam().second));
}
INSTANTIATE_TEST_SUITE_P(BasicTypeInference, TypeImportTest,
::testing::ValuesIn(UnaryTestTypes()));
TEST(ImportTest, TypeInferenceWithFixedOutputType) {
Model model;
EXPECT_TRUE(ImportNode(BuildNode("IsFinite", {{1, 2}, {2, 3}}), &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_THAT(op->output_data_types,
::testing::ElementsAre(ArrayDataType::kBool));
}
TEST(ImportTest, FailedTypeInference) {
NodeDef node;
node.set_op("Atan");
node.set_name("Node1");
node.add_input();
node.set_input(0, "Node0");
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_TRUE(op->output_data_types.empty());
}
TEST(ImportTest, UnsupportedOpWithOutputShapes) {
Model model;
EXPECT_TRUE(ImportNode(BuildNode("Atan", {{1, 2}, {2, 3}}), &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_EQ(op->output_shapes.size(), 2);
ASSERT_THAT(op->output_shapes[0].dims(), ::testing::ElementsAre(1, 2));
ASSERT_THAT(op->output_shapes[1].dims(), ::testing::ElementsAre(2, 3));
}
TEST(ImportTest, UnsupportedOpWithWildcardOutputShapes) {
Model model;
EXPECT_TRUE(ImportNode(BuildNode("Atan", {{-1, 2}}), &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_TRUE(op->output_shapes.empty());
}
TEST(ImportTest, UnsupportedOpWithMultipleOutputs) {
NodeDef node = BuildNode("ParseExample", {});
{
AttrValue value_attr;
SetAttrValue(2, &value_attr);
(*node.mutable_attr())["Nsparse"] = value_attr;
}
{
AttrValue value_attr;
std::vector<tensorflow::DataType> types;
types.push_back(tensorflow::DT_FLOAT);
types.push_back(tensorflow::DT_STRING);
SetAttrValue(types, &value_attr);
(*node.mutable_attr())["sparse_types"] = value_attr;
}
{
AttrValue value_attr;
std::vector<tensorflow::DataType> types;
types.push_back(tensorflow::DT_STRING);
types.push_back(tensorflow::DT_FLOAT);
types.push_back(tensorflow::DT_INT64);
SetAttrValue(types, &value_attr);
(*node.mutable_attr())["Tdense"] = value_attr;
}
Model model;
EXPECT_TRUE(ImportFlexNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_EQ(op->outputs.size(), 9);
ASSERT_EQ(op->output_data_types.size(), 9);
ASSERT_EQ(op->outputs[0], "Node1");
ASSERT_EQ(op->outputs[1], "Node1:1");
ASSERT_EQ(op->output_data_types[0], ArrayDataType::kInt64);
ASSERT_EQ(op->output_data_types[1], ArrayDataType::kInt64);
ASSERT_EQ(op->outputs[2], "Node1:2");
ASSERT_EQ(op->outputs[3], "Node1:3");
ASSERT_EQ(op->output_data_types[2], ArrayDataType::kFloat);
ASSERT_EQ(op->output_data_types[3], ArrayDataType::kString);
ASSERT_EQ(op->outputs[4], "Node1:4");
ASSERT_EQ(op->outputs[5], "Node1:5");
ASSERT_EQ(op->output_data_types[4], ArrayDataType::kInt64);
ASSERT_EQ(op->output_data_types[5], ArrayDataType::kInt64);
ASSERT_EQ(op->outputs[6], "Node1:6");
ASSERT_EQ(op->outputs[7], "Node1:7");
ASSERT_EQ(op->outputs[8], "Node1:8");
ASSERT_EQ(op->output_data_types[6], ArrayDataType::kString);
ASSERT_EQ(op->output_data_types[7], ArrayDataType::kFloat);
ASSERT_EQ(op->output_data_types[8], ArrayDataType::kInt64);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/import_tensorflow.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/import_tensorflow_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0de25d93-901e-4158-b088-a4a0e872cc8c | cpp | tensorflow/tensorflow | toco_convert | tensorflow/lite/toco/toco_convert.cc | tensorflow/lite/toco/toco_convert_test.cc | #include <cstdio>
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_cmdline_flags.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include "tensorflow/lite/toco/toco_flags.pb.h"
#include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/toco/toco_tooling.h"
#include "tensorflow/lite/toco/toco_types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
namespace toco {
namespace {
void CheckOutputFilePermissions(const Arg<std::string>& output_file) {
QCHECK(output_file.specified()) << "Missing required flag --output_file.\n";
QCHECK(port::file::Writable(output_file.value()).ok())
<< "Specified output_file is not writable: " << output_file.value()
<< ".\n";
}
void CheckFrozenModelPermissions(const Arg<std::string>& input_file) {
QCHECK(input_file.specified()) << "Missing required flag --input_file.\n";
QCHECK(port::file::Exists(input_file.value(), port::file::Defaults()).ok())
<< "Specified input_file does not exist: " << input_file.value() << ".\n";
QCHECK(port::file::Readable(input_file.value(), port::file::Defaults()).ok())
<< "Specified input_file exists, but is not readable: "
<< input_file.value() << ".\n";
}
void ReadInputData(const ParsedTocoFlags& parsed_toco_flags,
const ParsedModelFlags& parsed_model_flags,
std::string* graph_def_contents) {
port::CheckInitGoogleIsDone("InitGoogle is not done yet.\n");
QCHECK(!parsed_toco_flags.savedmodel_directory.specified())
<< "Use `tensorflow/lite/python/tflite_convert` script with "
<< "SavedModel directories.\n";
CheckFrozenModelPermissions(parsed_toco_flags.input_file);
CHECK(port::file::GetContents(parsed_toco_flags.input_file.value(),
graph_def_contents, port::file::Defaults())
.ok());
}
}
tensorflow::Status Convert(const std::string& graph_def_contents,
const TocoFlags& toco_flags,
const ModelFlags& model_flags,
std::string* output_file_contents,
int64_t* arithmetic_ops_count = nullptr) {
std::unique_ptr<Model> model =
Import(toco_flags, model_flags, graph_def_contents);
TF_RETURN_IF_ERROR(TransformWithStatus(toco_flags, model.get()));
TF_RETURN_IF_ERROR(Export(toco_flags, *model, toco_flags.allow_custom_ops(),
output_file_contents));
if (arithmetic_ops_count != nullptr) {
*arithmetic_ops_count = model->ArithmeticOpsCount();
}
return absl::OkStatus();
}
tensorflow::Status Convert(const ParsedTocoFlags& parsed_toco_flags,
const ParsedModelFlags& parsed_model_flags) {
ModelFlags model_flags;
ReadModelFlagsFromCommandLineFlags(parsed_model_flags, &model_flags);
TocoFlags toco_flags;
ReadTocoFlagsFromCommandLineFlags(parsed_toco_flags, &toco_flags);
std::string graph_def_contents;
ReadInputData(parsed_toco_flags, parsed_model_flags, &graph_def_contents);
CheckOutputFilePermissions(parsed_toco_flags.output_file);
std::string output_file_contents;
TF_RETURN_IF_ERROR(Convert(graph_def_contents, toco_flags, model_flags,
&output_file_contents));
TF_RETURN_IF_ERROR(
port::file::SetContents(parsed_toco_flags.output_file.value(),
output_file_contents, port::file::Defaults()));
return tensorflow::Status();
}
} | #include "tensorflow/lite/toco/toco_convert.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
namespace {
TEST(TocoTest, MissingInputFile) {
ParsedTocoFlags toco_flags;
ParsedModelFlags model_flags;
EXPECT_DEATH(EXPECT_TRUE(Convert(toco_flags, model_flags).ok()),
"Missing required flag --input_file");
}
TEST(TocoTest, BadInputFormat) {
TocoFlags toco_flags;
ModelFlags model_flags;
std::string input;
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"Unhandled input_format='FILE_FORMAT_UNKNOWN'");
}
TEST(TocoTest, MissingOutputArrays) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
std::string input;
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"This model does not define output arrays, so a --output_arrays "
"flag must be given on the command-line");
}
TEST(TocoTest, BadOutputArray) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
model_flags.add_output_arrays("output1");
std::string input;
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"Specified output array .output1. is not produced by any op "
"in this graph. Is it a typo");
}
TEST(TocoTest, BadOutputFormat) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
model_flags.add_output_arrays("output1");
std::string input = R"GraphDef(
node {
name: "output1"
input: "input1"
input: "input2"
op: "Sub"
attr { key: "T" value { type: DT_FLOAT } }
}
)GraphDef";
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"Unhandled output_format='FILE_FORMAT_UNKNOWN'");
}
TEST(TocoTest, SimpleFloatModel) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
toco_flags.set_output_format(TENSORFLOW_GRAPHDEF);
model_flags.add_output_arrays("output1");
std::string input = R"GraphDef(
node {
name: "input1"
op: "Placeholder"
attr { key: "dtype" value { type: DT_INT64 } }
}
node {
name: "input2"
op: "Placeholder"
attr { key: "dtype" value { type: DT_INT64 } }
}
node {
name: "output1"
input: "input1"
input: "input2"
op: "Sub"
attr { key: "T" value { type: DT_FLOAT } }
}
)GraphDef";
std::string output;
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok());
EXPECT_TRUE(!output.empty());
}
TEST(TocoTest, TransientStringTensors) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
toco_flags.set_output_format(TFLITE);
toco::InputArray* input_1 = model_flags.add_input_arrays();
input_1->set_name("input1");
toco::InputArray* indices_1 = model_flags.add_input_arrays();
indices_1->set_name("indices1");
model_flags.add_output_arrays("output1");
std::string input = R"GraphDef(
node {
name: "input1"
op: "Placeholder"
attr { key: "dtype" value { type: DT_STRING } }
attr { key: "shape" value { shape { dim { size:1 }}}}
}
node {
name: "indices1"
op: "Placeholder"
attr { key: "dtype" value { type: DT_INT64 } }
}
node {
name: "intermediate1"
op: "Gather"
input: "input1"
input: "indices1"
attr { key: "Tparams" value { type: DT_STRING } }
attr { key: "Tindices" value { type: DT_INT64 } }
}
node {
name: "output1"
op: "Gather"
input: "intermediate1"
input: "indices2"
attr { key: "Tparams" value { type: DT_STRING } }
attr { key: "Tindices" value { type: DT_INT64 } }
}
)GraphDef";
std::string output;
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok());
EXPECT_TRUE(!output.empty());
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_convert.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_convert_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5ae6834d-0580-44a1-8bd2-af4b865d5c58 | cpp | tensorflow/tensorflow | toco_port | tensorflow/lite/toco/toco_port.cc | tensorflow/lite/toco/toco_port_test.cc | #include "tensorflow/lite/toco/toco_port.h"
#include <cstring>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/toco/toco_types.h"
#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
namespace std {
double round(double x) { return ::round(x); }
}
#endif
namespace toco {
namespace port {
void CopyToBuffer(const std::string& src, char* dest) {
memcpy(dest, src.data(), src.size());
}
#ifdef PLATFORM_GOOGLE
void CopyToBuffer(const absl::Cord& src, char* dest) { src.CopyToArray(dest); }
#endif
}
}
#if defined(PLATFORM_GOOGLE) && !defined(__APPLE__) && \
!defined(__ANDROID__) && !defined(_WIN32)
#include "base/init_google.h"
#include "file/base/file.h"
#include "file/base/filesystem.h"
#include "file/base/helpers.h"
#include "file/base/options.h"
#include "file/base/path.h"
namespace toco {
namespace port {
void InitGoogle(const char* usage, int* argc, char*** argv, bool remove_flags) {
::InitGoogle(usage, argc, argv, remove_flags);
}
void InitGoogleWasDoneElsewhere() {
}
void CheckInitGoogleIsDone(const char* message) {
::CheckInitGoogleIsDone(message);
}
namespace file {
tensorflow::Status ToStatus(const absl::Status& uts) {
if (!uts.ok()) {
return tensorflow::Status(absl::StatusCode(::util::RetrieveErrorCode(uts)),
uts.message());
}
return absl::OkStatus();
}
toco::port::file::Options ToOptions(const ::file::Options& options) {
CHECK_EQ(&options, &::file::Defaults());
return Options();
}
tensorflow::Status Writable(const std::string& filename) {
File* f = nullptr;
const auto status = ::file::Open(filename, "w", &f, ::file::Defaults());
if (f) {
QCHECK_OK(f->Close(::file::Defaults()));
}
return ToStatus(status);
}
tensorflow::Status Readable(const std::string& filename,
const file::Options& options) {
return ToStatus(::file::Readable(filename, ::file::Defaults()));
}
tensorflow::Status Exists(const std::string& filename,
const file::Options& options) {
auto status = ::file::Exists(filename, ::file::Defaults());
return ToStatus(status);
}
tensorflow::Status GetContents(const std::string& filename,
std::string* contents,
const file::Options& options) {
return ToStatus(::file::GetContents(filename, contents, ::file::Defaults()));
}
tensorflow::Status SetContents(const std::string& filename,
const std::string& contents,
const file::Options& options) {
return ToStatus(::file::SetContents(filename, contents, ::file::Defaults()));
}
std::string JoinPath(const std::string& a, const std::string& b) {
return ::file::JoinPath(a, b);
}
}
}
}
#else
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cstdio>
#if defined(_WIN32)
#include <io.h>
#else
#include <unistd.h>
#endif
#if defined(PLATFORM_GOOGLE)
#include "base/commandlineflags.h"
#endif
namespace toco {
namespace port {
#if defined(_WIN32)
#define close _close
#define open _open
#define read _read
constexpr int kFileCreateMode = _S_IREAD | _S_IWRITE;
constexpr int kFileReadFlags = _O_RDONLY | _O_BINARY;
constexpr int kFileWriteFlags = _O_WRONLY | _O_BINARY | _O_CREAT;
#else
constexpr int kFileCreateMode = 0664;
constexpr int kFileReadFlags = O_RDONLY;
constexpr int kFileWriteFlags = O_CREAT | O_WRONLY;
#endif
static bool port_initialized = false;
void InitGoogleWasDoneElsewhere() { port_initialized = true; }
void InitGoogle(const char* usage, int* argc, char*** argv, bool remove_flags) {
if (!port_initialized) {
#if defined(PLATFORM_GOOGLE)
ParseCommandLineFlags(argc, argv, remove_flags);
#endif
port_initialized = true;
}
}
void CheckInitGoogleIsDone(const char* message) {
CHECK(port_initialized) << message;
}
namespace file {
tensorflow::Status Writable(const string& filename) {
FILE* f = fopen(filename.c_str(), "w");
if (f) {
fclose(f);
return tensorflow::OkStatus();
}
return tensorflow::errors::NotFound("not writable");
}
tensorflow::Status Readable(const string& filename,
const file::Options& options) {
FILE* f = fopen(filename.c_str(), "r");
if (f) {
fclose(f);
return tensorflow::OkStatus();
}
return tensorflow::errors::NotFound("not readable");
}
tensorflow::Status Exists(const string& filename,
const file::Options& options) {
struct stat statbuf;
int ret = stat(filename.c_str(), &statbuf);
if (ret == -1) {
return tensorflow::errors::NotFound("file doesn't exist");
}
return tensorflow::OkStatus();
}
tensorflow::Status GetContents(const string& path, string* output,
const file::Options& options) {
output->clear();
int fd = open(path.c_str(), kFileReadFlags);
if (fd == -1) {
return tensorflow::errors::NotFound("can't open() for read");
}
const int kBufSize = 1 << 16;
char buffer[kBufSize];
while (true) {
int size = read(fd, buffer, kBufSize);
if (size == 0) {
close(fd);
return tensorflow::OkStatus();
} else if (size == -1) {
close(fd);
return tensorflow::errors::Internal("error during read()");
} else {
output->append(buffer, size);
}
}
CHECK(0);
return tensorflow::errors::Internal("internal error");
}
tensorflow::Status SetContents(const string& filename, const string& contents,
const file::Options& options) {
int fd = open(filename.c_str(), kFileWriteFlags, kFileCreateMode);
if (fd == -1) {
return tensorflow::errors::Internal("can't open() for write");
}
size_t i = 0;
while (i < contents.size()) {
size_t to_write = contents.size() - i;
ssize_t written = write(fd, &contents[i], to_write);
if (written == -1) {
close(fd);
return tensorflow::errors::Internal("write() error");
}
i += written;
}
close(fd);
return tensorflow::OkStatus();
}
string JoinPath(const string& base, const string& filename) {
if (base.empty()) return filename;
string base_fixed = base;
if (!base_fixed.empty() && base_fixed.back() == '/') base_fixed.pop_back();
string filename_fixed = filename;
if (!filename_fixed.empty() && filename_fixed.front() == '/')
filename_fixed.erase(0, 1);
return base_fixed + "/" + filename_fixed;
}
}
}
}
#endif | #include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/toco_types.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace toco {
namespace port {
namespace {
#ifdef PLATFORM_GOOGLE
#define TFLITE_PREFIX "third_party/tensorflow/lite/"
#else
#define TFLITE_PREFIX "tensorflow/lite/"
#endif
TEST(TocoPortTest, Exists) {
EXPECT_TRUE(
file::Exists(TFLITE_PREFIX "toco/toco_port_test.cc", file::Defaults())
.ok());
EXPECT_FALSE(
file::Exists("non-existent_file_asldjflasdjf", file::Defaults()).ok());
}
TEST(TocoPortTest, Readable) {
EXPECT_TRUE(
file::Readable(TFLITE_PREFIX "toco/toco_port_test.cc", file::Defaults())
.ok());
EXPECT_FALSE(
file::Readable("non-existent_file_asldjflasdjf", file::Defaults()).ok());
}
TEST(TocoPortTest, JoinPath) {
EXPECT_EQ("part1/part2", file::JoinPath("part1", "part2"));
EXPECT_EQ("part1/part2", file::JoinPath("part1/", "part2"));
EXPECT_EQ("part1/part2", file::JoinPath("part1", "/part2"));
EXPECT_EQ("part1/part2", file::JoinPath("part1/", "/part2"));
}
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_port.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_port_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.