ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
800 | cpp | tensorflow/tensorflow | model_cmdline_flags | tensorflow/lite/toco/model_cmdline_flags.cc | tensorflow/lite/toco/model_cmdline_flags_test.cc | #ifndef TENSORFLOW_LITE_TOCO_MODEL_CMDLINE_FLAGS_H_
#define TENSORFLOW_LITE_TOCO_MODEL_CMDLINE_FLAGS_H_
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/types.pb.h"
namespace toco {
bool ParseModelFlagsFromCommandLineFlags(
int* argc, char* argv[], std::string* msg,
ParsedModelFlags* parsed_model_flags_ptr);
void ReadModelFlagsFromCommandLineFlags(
const ParsedModelFlags& parsed_model_flags, ModelFlags* model_flags);
void ParseModelFlagsOrDie(int* argc, char* argv[]);
ParsedModelFlags* GlobalParsedModelFlags();
}
#endif
#include "tensorflow/lite/toco/model_cmdline_flags.h"
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/toco_graphviz_dump_options.h"
#include "tensorflow/lite/toco/toco_port.h"
#ifdef PLATFORM_GOOGLE
#include "base/commandlineflags.h"
#endif
namespace toco {
bool ParseModelFlagsFromCommandLineFlags(
int* argc, char* argv[], std::string* msg,
ParsedModelFlags* parsed_model_flags_ptr) {
ParsedModelFlags& parsed_flags = *parsed_model_flags_ptr;
using tensorflow::Flag;
std::vector<tensorflow::Flag> flags = {
Flag("input_array", parsed_flags.input_array.bind(),
parsed_flags.input_array.default_value(),
"Deprecated: use --input_arrays instead. Name of the input array. "
"If not specified, will try to read "
"that information from the input file."),
Flag("input_arrays", parsed_flags.input_arrays.bind(),
parsed_flags.input_arrays.default_value(),
"Names of the input arrays, comma-separated. If not specified, "
"will try to read that information from the input file."),
Flag("output_array", parsed_flags.output_array.bind(),
parsed_flags.output_array.default_value(),
"Deprecated: use --output_arrays instead. Name of the output array, "
"when specifying a unique output array. "
"If not specified, will try to read that information from the "
"input file."),
Flag("output_arrays", parsed_flags.output_arrays.bind(),
parsed_flags.output_arrays.default_value(),
"Names of the output arrays, comma-separated. "
"If not specified, will try to read "
"that information from the input file."),
Flag("input_shape", parsed_flags.input_shape.bind(),
parsed_flags.input_shape.default_value(),
"Deprecated: use --input_shapes instead. Input array shape. For "
"many models the shape takes the form "
"batch size, input array height, input array width, input array "
"depth."),
Flag("input_shapes", parsed_flags.input_shapes.bind(),
parsed_flags.input_shapes.default_value(),
"Shapes corresponding to --input_arrays, colon-separated. For "
"many models each shape takes the form batch size, input array "
"height, input array width, input array depth."),
Flag("batch_size", parsed_flags.batch_size.bind(),
parsed_flags.batch_size.default_value(),
"Deprecated. Batch size for the model. Replaces the first dimension "
"of an input size array if undefined. Use only with SavedModels "
"when --input_shapes flag is not specified. Always use "
"--input_shapes flag with frozen graphs."),
Flag("input_data_type", parsed_flags.input_data_type.bind(),
parsed_flags.input_data_type.default_value(),
"Deprecated: use --input_data_types instead. Input array type, if "
"not already provided in the graph. "
"Typically needs to be specified when passing arbitrary arrays "
"to --input_arrays."),
Flag("input_data_types", parsed_flags.input_data_types.bind(),
parsed_flags.input_data_types.default_value(),
"Input arrays types, comma-separated, if not already provided in "
"the graph. "
"Typically needs to be specified when passing arbitrary arrays "
"to --input_arrays."),
Flag("mean_value", parsed_flags.mean_value.bind(),
parsed_flags.mean_value.default_value(),
"Deprecated: use --mean_values instead. mean_value parameter for "
"image models, used to compute input "
"activations from input pixel data."),
Flag("mean_values", parsed_flags.mean_values.bind(),
parsed_flags.mean_values.default_value(),
"mean_values parameter for image models, comma-separated list of "
"doubles, used to compute input activations from input pixel "
"data. Each entry in the list should match an entry in "
"--input_arrays."),
Flag("std_value", parsed_flags.std_value.bind(),
parsed_flags.std_value.default_value(),
"Deprecated: use --std_values instead. std_value parameter for "
"image models, used to compute input "
"activations from input pixel data."),
Flag("std_values", parsed_flags.std_values.bind(),
parsed_flags.std_values.default_value(),
"std_value parameter for image models, comma-separated list of "
"doubles, used to compute input activations from input pixel "
"data. Each entry in the list should match an entry in "
"--input_arrays."),
Flag("variable_batch", parsed_flags.variable_batch.bind(),
parsed_flags.variable_batch.default_value(),
"If true, the model accepts an arbitrary batch size. Mutually "
"exclusive "
"with the 'batch' field: at most one of these two fields can be "
"set."),
Flag("rnn_states", parsed_flags.rnn_states.bind(),
parsed_flags.rnn_states.default_value(), ""),
Flag("model_checks", parsed_flags.model_checks.bind(),
parsed_flags.model_checks.default_value(),
"A list of model checks to be applied to verify the form of the "
"model. Applied after the graph transformations after import."),
Flag("dump_graphviz", parsed_flags.dump_graphviz.bind(),
parsed_flags.dump_graphviz.default_value(),
"Dump graphviz during LogDump call. If string is non-empty then "
"it defines path to dump, otherwise will skip dumping."),
Flag("dump_graphviz_video", parsed_flags.dump_graphviz_video.bind(),
parsed_flags.dump_graphviz_video.default_value(),
"If true, will dump graphviz at each "
"graph transformation, which may be used to generate a video."),
Flag("conversion_summary_dir", parsed_flags.conversion_summary_dir.bind(),
parsed_flags.conversion_summary_dir.default_value(),
"Local file directory to store the conversion logs."),
Flag("allow_nonexistent_arrays",
parsed_flags.allow_nonexistent_arrays.bind(),
parsed_flags.allow_nonexistent_arrays.default_value(),
"If true, will allow passing inexistent arrays in --input_arrays "
"and --output_arrays. This makes little sense, is only useful to "
"more easily get graph visualizations."),
Flag("allow_nonascii_arrays", parsed_flags.allow_nonascii_arrays.bind(),
parsed_flags.allow_nonascii_arrays.default_value(),
"If true, will allow passing non-ascii-printable characters in "
"--input_arrays and --output_arrays. By default (if false), only "
"ascii printable characters are allowed, i.e. character codes "
"ranging from 32 to 127. This is disallowed by default so as to "
"catch common copy-and-paste issues where invisible unicode "
"characters are unwittingly added to these strings."),
Flag(
"arrays_extra_info_file", parsed_flags.arrays_extra_info_file.bind(),
parsed_flags.arrays_extra_info_file.default_value(),
"Path to an optional file containing a serialized ArraysExtraInfo "
"proto allowing to pass extra information about arrays not specified "
"in the input model file, such as extra MinMax information."),
Flag("model_flags_file", parsed_flags.model_flags_file.bind(),
parsed_flags.model_flags_file.default_value(),
"Path to an optional file containing a serialized ModelFlags proto. "
"Options specified on the command line will override the values in "
"the proto."),
Flag("change_concat_input_ranges",
parsed_flags.change_concat_input_ranges.bind(),
parsed_flags.change_concat_input_ranges.default_value(),
"Boolean to change the behavior of min/max ranges for inputs and"
" output of the concat operators."),
};
bool asked_for_help =
*argc == 2 && (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-help"));
if (asked_for_help) {
*msg += tensorflow::Flags::Usage(argv[0], flags);
return false;
} else {
if (!tensorflow::Flags::Parse(argc, argv, flags)) return false;
}
auto& dump_options = *GraphVizDumpOptions::singleton();
dump_options.dump_graphviz_video = parsed_flags.dump_graphviz_video.value();
dump_options.dump_graphviz = parsed_flags.dump_graphviz.value();
return true;
}
void ReadModelFlagsFromCommandLineFlags(
const ParsedModelFlags& parsed_model_flags, ModelFlags* model_flags) {
toco::port::CheckInitGoogleIsDone("InitGoogle is not done yet");
if (parsed_model_flags.model_flags_file.specified()) {
std::string model_flags_file_contents;
QCHECK(port::file::GetContents(parsed_model_flags.model_flags_file.value(),
&model_flags_file_contents,
port::file::Defaults())
.ok())
<< "Specified --model_flags_file="
<< parsed_model_flags.model_flags_file.value()
<< " was not found or could not be read";
QCHECK(ParseFromStringEitherTextOrBinary(model_flags_file_contents,
model_flags))
<< "Specified --model_flags_file="
<< parsed_model_flags.model_flags_file.value()
<< " could not be parsed";
}
#ifdef PLATFORM_GOOGLE
CHECK(!((base::WasPresentOnCommandLine("batch") &&
parsed_model_flags.variable_batch.specified())))
<< "The --batch and --variable_batch flags are mutually exclusive.";
#endif
CHECK(!(parsed_model_flags.output_array.specified() &&
parsed_model_flags.output_arrays.specified()))
<< "The --output_array and --vs flags are mutually exclusive.";
if (parsed_model_flags.output_array.specified()) {
model_flags->add_output_arrays(parsed_model_flags.output_array.value());
}
if (parsed_model_flags.output_arrays.specified()) {
std::vector<std::string> output_arrays =
absl::StrSplit(parsed_model_flags.output_arrays.value(), ',');
for (const std::string& output_array : output_arrays) {
model_flags->add_output_arrays(output_array);
}
}
const bool uses_single_input_flags =
parsed_model_flags.input_array.specified() ||
parsed_model_flags.mean_value.specified() ||
parsed_model_flags.std_value.specified() ||
parsed_model_flags.input_shape.specified();
const bool uses_multi_input_flags =
parsed_model_flags.input_arrays.specified() ||
parsed_model_flags.mean_values.specified() ||
parsed_model_flags.std_values.specified() ||
parsed_model_flags.input_shapes.specified();
QCHECK(!(uses_single_input_flags && uses_multi_input_flags))
<< "Use either the singular-form input flags (--input_array, "
"--input_shape, --mean_value, --std_value) or the plural form input "
"flags (--input_arrays, --input_shapes, --mean_values, --std_values), "
"but not both forms within the same command line.";
if (parsed_model_flags.input_array.specified()) {
QCHECK(uses_single_input_flags);
model_flags->add_input_arrays()->set_name(
parsed_model_flags.input_array.value());
}
if (parsed_model_flags.input_arrays.specified()) {
QCHECK(uses_multi_input_flags);
for (const auto& input_array :
absl::StrSplit(parsed_model_flags.input_arrays.value(), ',')) {
model_flags->add_input_arrays()->set_name(std::string(input_array));
}
}
if (parsed_model_flags.mean_value.specified()) {
QCHECK(uses_single_input_flags);
model_flags->mutable_input_arrays(0)->set_mean_value(
parsed_model_flags.mean_value.value());
}
if (parsed_model_flags.mean_values.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> mean_values =
absl::StrSplit(parsed_model_flags.mean_values.value(), ',');
QCHECK(static_cast<int>(mean_values.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < mean_values.size(); ++i) {
char* last = nullptr;
model_flags->mutable_input_arrays(i)->set_mean_value(
strtod(mean_values[i].data(), &last));
CHECK(last != mean_values[i].data());
}
}
if (parsed_model_flags.std_value.specified()) {
QCHECK(uses_single_input_flags);
model_flags->mutable_input_arrays(0)->set_std_value(
parsed_model_flags.std_value.value());
}
if (parsed_model_flags.std_values.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> std_values =
absl::StrSplit(parsed_model_flags.std_values.value(), ',');
QCHECK(static_cast<int>(std_values.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < std_values.size(); ++i) {
char* last = nullptr;
model_flags->mutable_input_arrays(i)->set_std_value(
strtod(std_values[i].data(), &last));
CHECK(last != std_values[i].data());
}
}
if (parsed_model_flags.input_data_type.specified()) {
QCHECK(uses_single_input_flags);
IODataType type;
QCHECK(IODataType_Parse(parsed_model_flags.input_data_type.value(), &type));
model_flags->mutable_input_arrays(0)->set_data_type(type);
}
if (parsed_model_flags.input_data_types.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> input_data_types =
absl::StrSplit(parsed_model_flags.input_data_types.value(), ',');
QCHECK(static_cast<int>(input_data_types.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < input_data_types.size(); ++i) {
IODataType type;
QCHECK(IODataType_Parse(input_data_types[i], &type));
model_flags->mutable_input_arrays(i)->set_data_type(type);
}
}
if (parsed_model_flags.input_shape.specified()) {
QCHECK(uses_single_input_flags);
if (model_flags->input_arrays().empty()) {
model_flags->add_input_arrays();
}
auto* shape = model_flags->mutable_input_arrays(0)->mutable_shape();
shape->clear_dims();
const IntList& list = parsed_model_flags.input_shape.value();
for (auto& dim : list.elements) {
shape->add_dims(dim);
}
}
if (parsed_model_flags.input_shapes.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> input_shapes =
absl::StrSplit(parsed_model_flags.input_shapes.value(), ':');
QCHECK(static_cast<int>(input_shapes.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < input_shapes.size(); ++i) {
auto* shape = model_flags->mutable_input_arrays(i)->mutable_shape();
shape->clear_dims();
if (input_shapes[i].empty()) {
continue;
}
for (const auto& dim_str : absl::StrSplit(input_shapes[i], ',')) {
int size;
CHECK(absl::SimpleAtoi(dim_str, &size))
<< "Failed to parse input_shape: " << input_shapes[i];
shape->add_dims(size);
}
}
}
#define READ_MODEL_FLAG(name) \
do { \
if (parsed_model_flags.name.specified()) { \
model_flags->set_##name(parsed_model_flags.name.value()); \
} \
} while (false)
READ_MODEL_FLAG(variable_batch);
#undef READ_MODEL_FLAG
for (const auto& element : parsed_model_flags.rnn_states.value().elements) {
auto* rnn_state_proto = model_flags->add_rnn_states();
for (const auto& kv_pair : element) {
const std::string& key = kv_pair.first;
const std::string& value = kv_pair.second;
if (key == "state_array") {
rnn_state_proto->set_state_array(value);
} else if (key == "back_edge_source_array") {
rnn_state_proto->set_back_edge_source_array(value);
} else if (key == "size") {
int32_t size = 0;
CHECK(absl::SimpleAtoi(value, &size));
CHECK_GT(size, 0);
rnn_state_proto->set_size(size);
} else if (key == "num_dims") {
int32_t size = 0;
CHECK(absl::SimpleAtoi(value, &size));
CHECK_GT(size, 0);
rnn_state_proto->set_num_dims(size);
} else {
LOG(FATAL) << "Unknown key '" << key << "' in --rnn_states";
}
}
CHECK(rnn_state_proto->has_state_array() &&
rnn_state_proto->has_back_edge_source_array() &&
rnn_state_proto->has_size())
<< "--rnn_states must include state_array, back_edge_source_array and "
"size.";
}
for (const auto& element : parsed_model_flags.model_checks.value().elements) {
auto* model_check_proto = model_flags->add_model_checks();
for (const auto& kv_pair : element) {
const std::string& key = kv_pair.first;
const std::string& value = kv_pair.second;
if (key == "count_type") {
model_check_proto->set_count_type(value);
} else if (key == "count_min") {
int32_t count = 0;
CHECK(absl::SimpleAtoi(value, &count));
CHECK_GE(count, -1);
model_check_proto->set_count_min(count);
} else if (key == "count_max") {
int32_t count = 0;
CHECK(absl::SimpleAtoi(value, &count));
CHECK_GE(count, -1);
model_check_proto->set_count_max(count);
} else {
LOG(FATAL) << "Unknown key '" << key << "' in --model_checks";
}
}
}
if (!model_flags->has_allow_nonascii_arrays()) {
model_flags->set_allow_nonascii_arrays(
parsed_model_flags.allow_nonascii_arrays.value());
}
if (!model_flags->has_allow_nonexistent_arrays()) {
model_flags->set_allow_nonexistent_arrays(
parsed_model_flags.allow_nonexistent_arrays.value());
}
if (!model_flags->has_change_concat_input_ranges()) {
model_flags->set_change_concat_input_ranges(
parsed_model_flags.change_concat_input_ranges.value());
}
if (parsed_model_flags.arrays_extra_info_file.specified()) {
std::string arrays_extra_info_file_contents;
CHECK(port::file::GetContents(
parsed_model_flags.arrays_extra_info_file.value(),
&arrays_extra_info_file_contents, port::file::Defaults())
.ok());
ParseFromStringEitherTextOrBinary(arrays_extra_info_file_contents,
model_flags->mutable_arrays_extra_info());
}
}
ParsedModelFlags* UncheckedGlobalParsedModelFlags(bool must_already_exist) {
static auto* flags = [must_already_exist]() {
if (must_already_exist) {
fprintf(stderr, __FILE__
":"
"GlobalParsedModelFlags() used without initialization\n");
fflush(stderr);
abort();
}
return new toco::ParsedModelFlags;
}();
return flags;
}
ParsedModelFlags* GlobalParsedModelFlags() {
return UncheckedGlobalParsedModelFlags(true);
}
void ParseModelFlagsOrDie(int* argc, char* argv[]) {
auto* flags = UncheckedGlobalParsedModelFlags(false);
std::string msg;
bool model_success =
toco::ParseModelFlagsFromCommandLineFlags(argc, argv, &msg, flags);
if (!model_success || !msg.empty()) {
fprintf(stderr, "%s", msg.c_str());
fflush(stderr);
abort();
}
}
} | #include <string>
#include <unordered_map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/model_cmdline_flags.h"
namespace toco {
namespace {
TEST(ModelCmdlineFlagsTest, ParseArgsStringMapList) {
int args_count = 3;
const char* args[] = {
"toco", "--input_arrays=input_1",
"--rnn_states={state_array:rnn/BasicLSTMCellZeroState/zeros,"
"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},"
"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,"
"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}",
nullptr};
std::string expected_input_arrays = "input_1";
std::vector<std::unordered_map<std::string, std::string>> expected_rnn_states;
expected_rnn_states.push_back(
{{"state_array", "rnn/BasicLSTMCellZeroState/zeros"},
{"back_edge_source_array", "rnn/basic_lstm_cell/Add_1"},
{"size", "4"}});
expected_rnn_states.push_back(
{{"state_array", "rnn/BasicLSTMCellZeroState/zeros_1"},
{"back_edge_source_array", "rnn/basic_lstm_cell/Mul_2"},
{"size", "4"}});
std::string message;
ParsedModelFlags result_flags;
EXPECT_TRUE(ParseModelFlagsFromCommandLineFlags(
&args_count, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.input_arrays.value(), expected_input_arrays);
EXPECT_EQ(result_flags.rnn_states.value().elements, expected_rnn_states);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} |
801 | cpp | tensorflow/tensorflow | import_tensorflow | tensorflow/lite/toco/import_tensorflow.cc | tensorflow/lite/toco/import_tensorflow_test.cc | #ifndef TENSORFLOW_LITE_TOCO_IMPORT_TENSORFLOW_H_
#define TENSORFLOW_LITE_TOCO_IMPORT_TENSORFLOW_H_
#include <memory>
#include <string>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
namespace toco {
struct TensorFlowImportFlags {
bool drop_control_dependency = false;
bool import_all_ops_as_unsupported = false;
};
std::unique_ptr<Model> ImportTensorFlowGraphDef(
const ModelFlags& model_flags, const TensorFlowImportFlags& tf_import_flags,
const tensorflow::GraphDef& tf_graph);
std::unique_ptr<Model> ImportTensorFlowGraphDef(
const ModelFlags& model_flags, const TensorFlowImportFlags& tf_import_flags,
const std::string& input_file_contents);
}
#endif
#include "tensorflow/lite/toco/import_tensorflow.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/map.h"
#include "google/protobuf/text_format.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_cluster.h"
#include "tensorflow/lite/toco/tensorflow_util.h"
#include "tensorflow/lite/toco/tooling_util.h"
using tensorflow::AttrValue;
using tensorflow::DT_BOOL;
using tensorflow::DT_COMPLEX64;
using tensorflow::DT_FLOAT;
using tensorflow::DT_INT16;
using tensorflow::DT_INT32;
using tensorflow::DT_INT64;
using tensorflow::DT_QUINT8;
using tensorflow::DT_STRING;
using tensorflow::DT_UINT16;
using tensorflow::DT_UINT32;
using tensorflow::DT_UINT8;
using tensorflow::GraphDef;
using tensorflow::NodeDef;
using tensorflow::TensorProto;
using tensorflow::TensorShapeProto;
namespace toco {
namespace {
bool HasAttr(const NodeDef& node, const std::string& attr_name) {
return node.attr().count(attr_name) > 0;
}
bool HasWildcardDimension(const TensorShapeProto& shape) {
for (const auto& dim : shape.dim()) {
if (dim.size() == -1) return true;
}
return false;
}
const std::string& GetStringAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kS);
return attr.s();
}
int64_t GetIntAttr(const NodeDef& node, const std::string& attr_name) {
CHECK(HasAttr(node, attr_name)) << attr_name << " not found in:\n"
<< node.DebugString();
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kI);
return attr.i();
}
float GetFloatAttr(const NodeDef& node, const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kF);
return attr.f();
}
bool GetBoolAttr(const NodeDef& node, const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kB);
return attr.b();
}
tensorflow::DataType GetDataTypeAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kType);
return attr.type();
}
const TensorShapeProto& GetShapeAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kShape);
return attr.shape();
}
const TensorProto& GetTensorAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name)) << "No attr named '" << attr_name << "'";
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kTensor);
return attr.tensor();
}
const AttrValue::ListValue& GetListAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kList);
return attr.list();
}
tensorflow::Status CheckOptionalAttr(const NodeDef& node,
const std::string& attr_name,
const std::string& expected_value) {
if (HasAttr(node, attr_name)) {
const std::string& value = GetStringAttr(node, attr_name);
if (value != expected_value) {
return tensorflow::errors::InvalidArgument(
"Unexpected value for attribute '" + attr_name + "'. Expected '" +
expected_value + "'");
}
}
return absl::OkStatus();
}
tensorflow::Status CheckOptionalAttr(
const NodeDef& node, const std::string& attr_name,
const tensorflow::DataType& expected_value) {
if (HasAttr(node, attr_name)) {
const tensorflow::DataType& value = GetDataTypeAttr(node, attr_name);
if (value != expected_value) {
return tensorflow::errors::InvalidArgument(
"Unexpected value for attribute '" + attr_name + "'. Expected '" +
tensorflow::DataType_Name(expected_value) + "'");
}
}
return absl::OkStatus();
}
template <typename T1, typename T2>
tensorflow::Status ExpectValue(const T1& v1, const T2& v2,
const std::string& description) {
if (v1 == v2) return absl::OkStatus();
return tensorflow::errors::InvalidArgument(absl::StrCat(
"Unexpected ", description, ": got ", v1, ", expected ", v2));
}
ArrayDataType ConvertDataType(tensorflow::DataType dtype) {
if (dtype == DT_UINT8)
return ArrayDataType::kUint8;
else if (dtype == DT_FLOAT)
return ArrayDataType::kFloat;
else if (dtype == DT_BOOL)
return ArrayDataType::kBool;
else if (dtype == DT_INT16)
return ArrayDataType::kInt16;
else if (dtype == DT_UINT16)
return ArrayDataType::kUint16;
else if (dtype == DT_INT32)
return ArrayDataType::kInt32;
else if (dtype == DT_UINT32)
return ArrayDataType::kUint32;
else if (dtype == DT_INT64)
return ArrayDataType::kInt64;
else if (dtype == DT_STRING)
return ArrayDataType::kString;
else if (dtype == DT_COMPLEX64)
return ArrayDataType::kComplex64;
else
LOG(INFO) << "Unsupported data type in placeholder op: " << dtype;
return ArrayDataType::kNone;
}
tensorflow::Status ImportShape(
const TFLITE_PROTO_NS::RepeatedPtrField<tensorflow::TensorShapeProto_Dim>&
input_dims,
int* input_flat_size, Shape* shape) {
std::vector<int> input_dims_only_sizes;
bool zero_sized_shape = false;
for (auto& d : input_dims) {
if (d.size() > std::numeric_limits<int>::max()) {
return tensorflow::errors::InvalidArgument("Shape element overflows");
}
if (d.size() == 0) {
zero_sized_shape = true;
}
input_dims_only_sizes.push_back(d.size());
}
if (zero_sized_shape) {
shape->mutable_dims()->clear();
if (input_flat_size != nullptr) *input_flat_size = 0;
return absl::OkStatus();
}
*shape->mutable_dims() = input_dims_only_sizes;
if (input_flat_size == nullptr) return absl::OkStatus();
return NumElements(input_dims_only_sizes, input_flat_size);
}
template <typename T>
struct TensorTraits;
template <>
struct TensorTraits<float> {
static int size(const TensorProto& p) { return p.float_val_size(); }
static float get(const TensorProto& p, int i) { return p.float_val(i); }
static std::string accessor_name() { return "float_val"; }
static std::string type_name() { return "float"; }
static void CopyFromContent(const TensorProto& p, std::vector<float>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<uint8_t> {
static int size(const TensorProto& p) { return p.int_val_size(); }
static uint8_t get(const TensorProto& p, int i) { return p.int_val(i); }
static std::string accessor_name() { return "int_val"; }
static std::string type_name() { return "uint8"; }
static void CopyFromContent(const TensorProto& p,
std::vector<uint8_t>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<std::complex<float>> {
static int size(const TensorProto& p) { return p.scomplex_val_size() / 2; }
static std::complex<float> get(const TensorProto& p, int i) {
return std::complex<float>(p.scomplex_val(2 * i),
p.scomplex_val(2 * i + 1));
}
static std::string accessor_name() { return "scomplex_val"; }
static std::string type_name() { return "complex64"; }
static void CopyFromContent(const TensorProto& p,
std::vector<std::complex<float>>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<int32> {
static int size(const TensorProto& p) { return p.int_val_size(); }
static int32 get(const TensorProto& p, int i) { return p.int_val(i); }
static std::string accessor_name() { return "int_val"; }
static std::string type_name() { return "int32"; }
static void CopyFromContent(const TensorProto& p, std::vector<int32>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<uint32> {
static int size(const TensorProto& p) { return p.uint32_val_size(); }
static int32 get(const TensorProto& p, int i) { return p.uint32_val(i); }
static std::string accessor_name() { return "uint32_val"; }
static std::string type_name() { return "uint32"; }
static void CopyFromContent(const TensorProto& p, std::vector<uint32>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<int64_t> {
static int size(const TensorProto& p) { return p.int64_val_size(); }
static int64_t get(const TensorProto& p, int i) { return p.int64_val(i); }
static std::string accessor_name() { return "int64_val"; }
static std::string type_name() { return "int64"; }
static void CopyFromContent(const TensorProto& p,
std::vector<int64_t>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<bool> {
static int size(const TensorProto& p) { return p.bool_val_size(); }
static bool get(const TensorProto& p, int i) { return p.bool_val(i); }
static std::string accessor_name() { return "bool_val"; }
static std::string type_name() { return "bool"; }
static void CopyFromContent(const TensorProto& p, std::vector<bool>* data) {
std::vector<char> buf(p.tensor_content().size());
toco::port::CopyToBuffer(p.tensor_content(), buf.data());
for (int i = 0; i < p.tensor_content().size(); i++) {
(*data)[i] = static_cast<bool>(buf[i]);
}
}
};
template <typename T>
tensorflow::Status ImportTensorData(const TensorProto& input_tensor,
int input_flat_size,
std::vector<T>* output_data) {
CHECK_GE(output_data->size(), input_flat_size);
int num_elements_in_tensor = TensorTraits<T>::size(input_tensor);
if (num_elements_in_tensor == input_flat_size) {
for (int i = 0; i < num_elements_in_tensor; i++) {
(*output_data)[i] = TensorTraits<T>::get(input_tensor, i);
}
} else if (input_tensor.tensor_content().size() ==
input_flat_size * sizeof(T)) {
TensorTraits<T>::CopyFromContent(input_tensor, output_data);
} else if (num_elements_in_tensor >= 0 &&
num_elements_in_tensor < input_flat_size) {
int i = 0;
for (; i < num_elements_in_tensor; ++i) {
(*output_data)[i] = TensorTraits<T>::get(input_tensor, i);
}
auto last = i == 0 ? T(0) : (*output_data)[i - 1];
for (; i < input_flat_size; ++i) {
(*output_data)[i] = last;
}
} else {
std::string accessor_name = TensorTraits<T>::accessor_name();
std::string type_name = TensorTraits<T>::type_name();
return tensorflow::errors::InvalidArgument(
absl::StrCat("Neither input_content (",
input_tensor.tensor_content().size() / sizeof(T), ") nor ",
accessor_name, " (", num_elements_in_tensor,
") have the right dimensions (", input_flat_size,
") for this ", type_name, " tensor"));
}
return absl::OkStatus();
}
tensorflow::Status ImportFloatArray(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_FLOAT);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_float_data =
output_array->GetMutableBuffer<ArrayDataType::kFloat>().data;
output_float_data.resize(RequiredBufferSizeForShape(output_array->shape()),
0.f);
return ImportTensorData<float>(input_tensor, input_flat_size,
&output_float_data);
}
tensorflow::Status ImportComplex64Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_COMPLEX64);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 4);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_complex_data =
output_array->GetMutableBuffer<ArrayDataType::kComplex64>().data;
output_complex_data.resize(RequiredBufferSizeForShape(output_array->shape()),
std::complex<float>(0.f, 0.f));
return ImportTensorData<std::complex<float>>(input_tensor, input_flat_size,
&output_complex_data);
}
tensorflow::Status ImportQuint8Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_QUINT8);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kUint8>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<uint8_t>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportInt32Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_INT32);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kInt32>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<int32>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportUint32Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_UINT32);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kUint32>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<uint32>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportInt64Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_INT64);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kInt64>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<int64_t>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportBoolArray(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_BOOL);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_bool_data =
output_array->GetMutableBuffer<ArrayDataType::kBool>().data;
output_bool_data.resize(RequiredBufferSizeForShape(output_array->shape()),
false);
status =
ImportTensorData<bool>(input_tensor, input_flat_size, &output_bool_data);
if (!status.ok() && output_bool_data.size() == 1) {
output_bool_data[0] = false;
return absl::OkStatus();
}
return status;
}
tensorflow::Status ImportStringArray(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_STRING);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
if (input_flat_size != input_tensor.string_val_size()) {
return tensorflow::errors::InvalidArgument(
"Input_content string_val doesn't have the right dimensions "
"for this string tensor");
}
auto& output_string_data =
output_array->GetMutableBuffer<ArrayDataType::kString>().data;
output_string_data.resize(RequiredBufferSizeForShape(output_array->shape()));
CHECK_GE(output_string_data.size(), input_flat_size);
for (int i = 0; i < input_flat_size; ++i) {
output_string_data[i] = input_tensor.string_val(i);
}
return absl::OkStatus();
}
int GetInputsCount(const NodeDef& node,
const TensorFlowImportFlags& tf_import_flags) {
if (tf_import_flags.drop_control_dependency) {
for (size_t i = 0; i < node.input_size(); ++i) {
if (node.input(i)[0] == '^') {
return i;
}
}
}
return node.input_size();
}
tensorflow::Status CheckInputsCount(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
int expected_input_count) {
if (GetInputsCount(node, tf_import_flags) != expected_input_count) {
return tensorflow::errors::FailedPrecondition(
node.op(), " node expects ", expected_input_count,
" input(s) other than control dependencies: ", node.DebugString());
}
return absl::OkStatus();
}
template <ArrayDataType T>
std::string CreateConstArray(
Model* model, std::string const& name,
std::vector<typename toco::DataType<T>> const& data) {
std::string array_name = toco::AvailableArrayName(*model, name);
auto& array = model->GetOrCreateArray(array_name);
array.data_type = T;
array.mutable_shape()->mutable_dims()->emplace_back(
static_cast<int>(data.size()));
array.GetMutableBuffer<T>().data = data;
return array_name;
}
void RetainTensorFlowNodeDef(const NodeDef& node, Operator* op) {
node.SerializeToString(&op->tensorflow_node_def);
}
void GetOutputNamesFromNodeDef(const NodeDef& node,
const tensorflow::OpDef& op_def,
TensorFlowUnsupportedOperator* op) {
int next_output = 0;
auto add_output = [&node, &next_output, op]() {
if (next_output == 0) {
op->outputs.push_back(node.name());
} else {
op->outputs.push_back(absl::StrCat(node.name(), ":", next_output));
}
++next_output;
};
for (int i = 0; i < op_def.output_arg_size(); ++i) {
std::string multiples = op_def.output_arg(i).number_attr();
if (!multiples.empty()) {
CHECK(HasAttr(node, multiples)) << "No attr named " << multiples;
int num_outputs = GetIntAttr(node, multiples);
for (int j = 0; j < num_outputs; ++j) {
add_output();
}
} else {
std::string list = op_def.output_arg(i).type_list_attr();
if (!list.empty()) {
CHECK(HasAttr(node, list)) << "No attr named " << list;
const AttrValue::ListValue& list_value = GetListAttr(node, list);
for (int j = 0; j < list_value.type_size(); ++j) {
add_output();
}
} else {
add_output();
}
}
}
}
void GetOutputTypesFromNodeDef(const NodeDef& node,
const tensorflow::OpDef& op_def,
TensorFlowUnsupportedOperator* op) {
auto add_type = [&node, op](tensorflow::DataType type) {
if (type == tensorflow::DT_INVALID) {
LOG(WARNING) << "Op node missing output type attribute: " << node.name();
op->output_data_types.clear();
} else {
op->output_data_types.push_back(ConvertDataType(type));
}
};
auto get_type = [&node](const tensorflow::OpDef::ArgDef& a) {
if (a.type() != tensorflow::DT_INVALID) {
return a.type();
} else if (HasAttr(node, a.type_attr())) {
return GetDataTypeAttr(node, a.type_attr());
} else {
return tensorflow::DT_INVALID;
}
};
for (int i = 0; i < op_def.output_arg_size(); ++i) {
std::string multiples = op_def.output_arg(i).number_attr();
if (!multiples.empty()) {
CHECK(HasAttr(node, multiples)) << "No attr named " << multiples;
int num_outputs = GetIntAttr(node, multiples);
auto type = get_type(op_def.output_arg(i));
for (int j = 0; j < num_outputs; ++j) {
add_type(type);
}
} else {
std::string list = op_def.output_arg(i).type_list_attr();
if (!list.empty()) {
CHECK(HasAttr(node, list)) << "No attr named " << list;
const AttrValue::ListValue& list_value = GetListAttr(node, list);
for (int j = 0; j < list_value.type_size(); ++j) {
add_type(list_value.type(j));
}
} else {
add_type(get_type(op_def.output_arg(i)));
}
}
}
}
tensorflow::Status ConvertUnsupportedOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
static constexpr char kAttrOutputQuantized[] = "_output_quantized";
static constexpr char kAttrOutputTypes[] = "_output_types";
static constexpr char kAttrOutputShapes[] = "_output_shapes";
static constexpr char kAttrSupportOutputTypeFloatInQuantizedOp[] =
"_support_output_type_float_in_quantized_op";
LOG(INFO) << "Converting unsupported operation: " << node.op();
auto* op = new TensorFlowUnsupportedOperator;
op->tensorflow_op = node.op();
RetainTensorFlowNodeDef(node, op);
model->operators.emplace_back(op);
const int num_inputs = GetInputsCount(node, tf_import_flags);
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
const tensorflow::OpDef* op_def = nullptr;
if (tensorflow::OpRegistry::Global()->LookUpOpDef(node.op(), &op_def).ok()) {
GetOutputNamesFromNodeDef(node, *op_def, op);
} else {
op->outputs.push_back(node.name());
}
if (HasAttr(node, kAttrOutputQuantized)) {
op->quantized = GetBoolAttr(node, kAttrOutputQuantized);
}
if (HasAttr(node, kAttrSupportOutputTypeFloatInQuantizedOp)) {
op->support_output_type_float_in_quantized_op =
GetBoolAttr(node, kAttrSupportOutputTypeFloatInQuantizedOp);
}
if (HasAttr(node, kAttrOutputTypes)) {
const auto& output_types = GetListAttr(node, kAttrOutputTypes);
for (int i = 0; i < output_types.type_size(); ++i) {
op->output_data_types.push_back(ConvertDataType(output_types.type(i)));
}
} else if (HasAttr(node, "Tout")) {
const auto& output_type = GetDataTypeAttr(node, "Tout");
op->output_data_types.push_back(ConvertDataType(output_type));
} else if (op_def != nullptr) {
GetOutputTypesFromNodeDef(node, *op_def, op);
} else {
LOG(INFO) << "Unable to determine output type for op: " << node.op();
}
if (HasAttr(node, kAttrOutputShapes)) {
const auto& output_shapes = GetListAttr(node, kAttrOutputShapes);
Shape output_shape;
for (int i = 0; i < output_shapes.shape_size(); ++i) {
const auto& shape = output_shapes.shape(i);
if (HasWildcardDimension(shape)) {
LOG(INFO) << "Skipping wildcard output shape(s) for node: "
<< node.name();
op->output_shapes.clear();
break;
}
const auto status =
ImportShape(shape.dim(), nullptr, &output_shape);
if (!status.ok()) {
return status;
}
op->output_shapes.push_back(output_shape);
}
}
return absl::OkStatus();
}
tensorflow::Status ConvertConstOperator(
const N | #include "tensorflow/lite/toco/import_tensorflow.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
using tensorflow::AttrValue;
using tensorflow::DT_BOOL;
using tensorflow::DT_COMPLEX64;
using tensorflow::DT_FLOAT;
using tensorflow::DT_INT32;
using tensorflow::DT_INT64;
using tensorflow::DT_INVALID;
using tensorflow::DT_QUINT8;
using tensorflow::DT_STRING;
using tensorflow::DT_UINT16;
using tensorflow::DT_UINT32;
using tensorflow::NodeDef;
using tensorflow::Status;
using ::testing::ElementsAre;
namespace internal {
using ConverterType = tensorflow::Status (*)(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model);
using ConverterMapType = std::unordered_map<std::string, ConverterType>;
ConverterMapType GetTensorFlowNodeConverterMap();
ConverterMapType GetTensorFlowNodeConverterMapForFlex();
Status ImportTensorFlowNode(const NodeDef&, const TensorFlowImportFlags&,
const ModelFlags& model_flags, Model*,
const ConverterMapType&);
}
namespace {
Status ImportNode(const NodeDef& node, Model* model) {
const auto converter = internal::GetTensorFlowNodeConverterMap();
return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(),
ModelFlags(), model, converter);
}
Status ImportFlexNode(const NodeDef& node, Model* model) {
const auto converter = internal::ConverterMapType();
return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(),
ModelFlags(), model, converter);
}
Status ImportNode(const NodeDef& node) {
Model model;
return ImportNode(node, &model);
}
NodeDef BuildNode(
const std::string& op,
const std::vector<std::initializer_list<int>>& output_shapes) {
NodeDef node;
node.set_op(op);
node.set_name("Node1");
node.add_input();
node.set_input(0, "Node0");
AttrValue::ListValue* shapes =
(*node.mutable_attr())["_output_shapes"].mutable_list();
for (const auto& output_shape : output_shapes) {
tensorflow::TensorShapeProto* shape = shapes->add_shape();
for (int64_t output_shape_dim : output_shape) {
auto shape_dim = shape->add_dim();
shape_dim->set_size(output_shape_dim);
}
}
return node;
}
namespace {
void BuildConstNode(std::initializer_list<int64_t> shape,
tensorflow::DataType dtype, int64_t num_elements,
NodeDef* node) {
node->set_op("Const");
node->set_name("Node1");
AttrValue dtype_attr;
SetAttrValue(dtype, &dtype_attr);
(*node->mutable_attr())["dtype"] = dtype_attr;
tensorflow::TensorProto t;
t.set_dtype(dtype);
auto* s = t.mutable_tensor_shape();
for (auto d : shape) {
s->add_dim()->set_size(d);
}
switch (dtype) {
case DT_FLOAT:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_float_val(i / 10000.0 + 1);
}
break;
case DT_INT32:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<int>::max() + 1);
}
break;
case DT_UINT32:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<uint32_t>::max() + 1);
}
break;
case DT_QUINT8:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<uint8_t>::max() + 1);
}
break;
case DT_INT64:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int64_val(i + 1);
}
break;
case DT_UINT16:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<uint16_t>::max() + 1);
}
break;
case DT_STRING:
break;
case DT_BOOL:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_bool_val((i % 2) == 0);
}
break;
case DT_COMPLEX64:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_scomplex_val(i / 10000.0 + 1);
t.add_scomplex_val(-i / 10000.0 - 1);
}
break;
default:
break;
}
AttrValue value_attr;
SetAttrValue(t, &value_attr);
(*node->mutable_attr())["value"] = value_attr;
}
}
TEST(FlexImportTest, ConditionalConst) {
Model model;
auto build_and_import_node =
[&model](const std::string& name, std::initializer_list<int64_t> shape,
tensorflow::DataType dtype, int64_t num_elements) {
NodeDef node;
BuildConstNode(shape, dtype, num_elements, &node);
node.set_name(name);
const auto converter = internal::GetTensorFlowNodeConverterMapForFlex();
return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(),
ModelFlags(), &model, converter);
};
EXPECT_TRUE(build_and_import_node("Known", {1, 2, 3}, DT_INT32, 6).ok());
EXPECT_TRUE(build_and_import_node("BadType", {1, 2, 3}, DT_INVALID, 6).ok());
EXPECT_TRUE(build_and_import_node("Unknown", {1, -2, 3}, DT_INT32, 6).ok());
EXPECT_EQ(model.operators.size(), 2);
EXPECT_TRUE(model.HasArray("Known"));
EXPECT_FALSE(model.HasArray("Unknown"));
EXPECT_FALSE(model.HasArray("BadType"));
}
TEST(FlexImportTest, SoftmaxWithBeta) {
NodeDef node;
node.set_op("Softmax");
node.set_name("softmax");
node.add_input();
node.set_input(0, "logits");
AttrValue dtype_attr;
SetAttrValue(0.5, &dtype_attr);
(*node.mutable_attr())["_softmax_beta"] = dtype_attr;
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kSoftmax);
const SoftmaxOperator* op =
static_cast<const SoftmaxOperator*>(model.operators[0].get());
EXPECT_EQ(op->beta, 0.5);
}
TEST(FlexImportTest, SoftmaxWithoutBeta) {
NodeDef node;
node.set_op("Softmax");
node.set_name("softmax");
node.add_input();
node.set_input(0, "logits");
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kSoftmax);
const SoftmaxOperator* op =
static_cast<const SoftmaxOperator*>(model.operators[0].get());
EXPECT_EQ(op->beta, 1.0);
}
class ShapeImportTest : public ::testing::TestWithParam<tensorflow::DataType> {
};
TEST_P(ShapeImportTest, ShapeElementIsNegative) {
NodeDef node;
BuildConstNode({1, -2, 10}, GetParam(), 0, &node);
auto status = ImportNode(node);
EXPECT_EQ(
status.message(),
"Tensor shape should not include negative values\n\t (while processing "
"node 'Node1')");
}
TEST_P(ShapeImportTest, ShapeElementIsZero) {
NodeDef node;
BuildConstNode({1, 0, 10}, GetParam(), 0, &node);
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
const auto& array = model.GetArray("Node1");
EXPECT_THAT(array.shape().dims(), ::testing::ElementsAre());
}
TEST_P(ShapeImportTest, ShapeIsOneDimZero) {
NodeDef node;
BuildConstNode({0}, GetParam(), 0, &node);
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
const auto& array = model.GetArray("Node1");
EXPECT_THAT(array.shape().dims(), ::testing::ElementsAre());
}
TEST_P(ShapeImportTest, ShapeElementTooLarge) {
NodeDef node;
BuildConstNode({3000000000}, GetParam(), 0, &node);
auto status = ImportNode(node);
EXPECT_EQ(status.message(),
"Shape element overflows\n\t (while processing node 'Node1')");
}
TEST_P(ShapeImportTest, ShapeTooLarge) {
NodeDef node;
BuildConstNode({1000000, 2000000, 2000000, 2000000}, GetParam(), 0, &node);
auto status = ImportNode(node);
EXPECT_EQ(status.message(),
"Tensor shape is too large\n\t (while processing node 'Node1')");
}
std::vector<tensorflow::DataType> TestTypes() {
return {DT_FLOAT, DT_INT32, DT_INT64, DT_BOOL, DT_QUINT8, DT_COMPLEX64};
}
INSTANTIATE_TEST_SUITE_P(ShapeImportTest, ShapeImportTest,
::testing::ValuesIn(TestTypes()));
class ContentImportTest : public ::testing::Test {
public:
template <ArrayDataType T>
std::vector<DataType<T>> ImportAndGetData(const NodeDef& node) {
Model model;
auto status = ImportNode(node, &model);
CHECK(status.ok()) << status.message();
const auto& array = model.GetArray("Node1");
return array.GetBuffer<T>().data;
}
void RemoveTrailingElements(NodeDef* node, int num) {
tensorflow::TensorProto* p =
node->mutable_attr()->at("value").mutable_tensor();
for (int i = 0; i < num; ++i) {
if (p->int_val_size() > 0) p->mutable_int_val()->RemoveLast();
if (p->int64_val_size() > 0) p->mutable_int64_val()->RemoveLast();
if (p->float_val_size() > 0) p->mutable_float_val()->RemoveLast();
if (p->bool_val_size() > 0) p->mutable_bool_val()->RemoveLast();
if (p->scomplex_val_size() > 0) p->mutable_scomplex_val()->RemoveLast();
if (p->scomplex_val_size() > 0) p->mutable_scomplex_val()->RemoveLast();
}
}
};
TEST_F(ContentImportTest, Int32) {
constexpr ArrayDataType kType = ArrayDataType::kInt32;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_INT32, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Int64) {
constexpr ArrayDataType kType = ArrayDataType::kInt64;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_INT64, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Quint8) {
constexpr ArrayDataType kType = ArrayDataType::kUint8;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_QUINT8, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Bool) {
constexpr ArrayDataType kType = ArrayDataType::kBool;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_BOOL, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 0));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 1));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Float) {
constexpr ArrayDataType kType = ArrayDataType::kFloat;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_FLOAT, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0005));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0004));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000));
}
TEST_F(ContentImportTest, Complex64) {
constexpr ArrayDataType kType = ArrayDataType::kComplex64;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_COMPLEX64, 6, &node);
using cplx = std::complex<float>;
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0001, -1.0001),
cplx(1.0002, -1.0002), cplx(1.0003, -1.0003),
cplx(1.0004, -1.0004), cplx(1.0005, -1.0005)));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0001, -1.0001),
cplx(1.0002, -1.0002), cplx(1.0003, -1.0003),
cplx(1.0004, -1.0004), cplx(1.0004, -1.0004)));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0000, -1.0000),
cplx(1.0000, -1.0000), cplx(1.0000, -1.0000),
cplx(1.0000, -1.0000), cplx(1.0000, -1.0000)));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(0.0000, 0.0000), cplx(0.0000, 0.0000),
cplx(0.0000, 0.0000), cplx(0.0000, 0.0000),
cplx(0.0000, 0.0000), cplx(0.0000, 0.0000)));
}
std::vector<std::pair<tensorflow::DataType, ArrayDataType>> UnaryTestTypes() {
return {{DT_FLOAT, ArrayDataType::kFloat},
{DT_INT32, ArrayDataType::kInt32},
{DT_INT64, ArrayDataType::kInt64}};
}
class TensorContentTest : public ::testing::Test {
public:
template <ArrayDataType T>
std::vector<DataType<T>> ImportAndGetData(const NodeDef& node) {
Model model;
auto status = ImportNode(node, &model);
CHECK(status.ok()) << status.message();
const auto& nodearray = model.GetArray("Node1");
return nodearray.GetBuffer<T>().data;
}
template <class T>
void NodeWithTensorContent(std::initializer_list<int64_t> shape,
tensorflow::DataType dtype, int64_t num_elements,
NodeDef* node) {
node->set_op("Const");
node->set_name("Node1");
AttrValue dtype_attr;
SetAttrValue(dtype, &dtype_attr);
(*node->mutable_attr())["dtype"] = dtype_attr;
auto allocated_content = std::make_unique<T[]>(num_elements);
tensorflow::TensorProto t;
t.set_dtype(dtype);
auto* s = t.mutable_tensor_shape();
for (const auto& d : shape) {
s->add_dim()->set_size(d);
}
switch (dtype) {
case DT_FLOAT:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i / 10000.0 + 1;
}
break;
case DT_INT32:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i % std::numeric_limits<int>::max() + 1;
}
break;
case DT_QUINT8:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i % std::numeric_limits<uint8_t>::max() + 1;
}
break;
case DT_INT64:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i + 1;
}
break;
case DT_STRING:
break;
case DT_BOOL:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = ((i % 2) == 0);
}
break;
default:
break;
}
t.set_tensor_content(
std::string(reinterpret_cast<const char*>(allocated_content.get()),
num_elements * sizeof(T)));
AttrValue value_attr;
SetAttrValue(t, &value_attr);
(*node->mutable_attr())["value"] = value_attr;
allocated_content.reset();
}
};
TEST_F(TensorContentTest, Int64) {
constexpr ArrayDataType kType = ArrayDataType::kInt64;
NodeDef node;
NodeWithTensorContent<int64_t>({1, 2, 3}, DT_INT64, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
}
TEST_F(TensorContentTest, Int32) {
constexpr ArrayDataType kType = ArrayDataType::kInt32;
NodeDef node;
NodeWithTensorContent<int>({1, 2, 3}, DT_INT32, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
}
TEST_F(TensorContentTest, Float) {
constexpr ArrayDataType kType = ArrayDataType::kFloat;
NodeDef node;
NodeWithTensorContent<float>({1, 2, 3}, DT_FLOAT, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0005));
}
TEST_F(TensorContentTest, Quint8) {
constexpr ArrayDataType kType = ArrayDataType::kUint8;
NodeDef node;
NodeWithTensorContent<uint8_t>({1, 2, 3}, DT_QUINT8, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
}
TEST_F(TensorContentTest, Bool) {
constexpr ArrayDataType kType = ArrayDataType::kBool;
NodeDef node;
NodeWithTensorContent<bool>({1, 2, 3}, DT_BOOL, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 0));
}
class TypeImportTest : public ::testing::TestWithParam<
std::pair<tensorflow::DataType, ArrayDataType>> {
protected:
TypeImportTest() {}
void BuildUnaryNode(const std::string& op_name, tensorflow::DataType dtype,
NodeDef* node) {
node->set_op(op_name);
node->set_name("Node1");
node->add_input();
node->set_input(0, "Node0");
AttrValue dtype_attr;
SetAttrValue(dtype, &dtype_attr);
(*node->mutable_attr())["T"] = dtype_attr;
}
};
TEST_P(TypeImportTest, BasicTypeInference) {
NodeDef node;
BuildUnaryNode("Atan", GetParam().first, &node);
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_THAT(op->output_data_types, ::testing::ElementsAre(GetParam().second));
}
INSTANTIATE_TEST_SUITE_P(BasicTypeInference, TypeImportTest,
::testing::ValuesIn(UnaryTestTypes()));
TEST(ImportTest, TypeInferenceWithFixedOutputType) {
Model model;
EXPECT_TRUE(ImportNode(BuildNode("IsFinite", {{1, 2}, {2, 3}}), &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_THAT(op->output_data_types,
::testing::ElementsAre(ArrayDataType::kBool));
}
TEST(ImportTest, FailedTypeInference) {
NodeDef node;
node.set_op("Atan");
node.set_name("Node1");
node.add_input();
node.set_input(0, "Node0");
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_TRUE(op->output_data_types.empty());
}
TEST(ImportTest, UnsupportedOpWithOutputShapes) {
Model model;
EXPECT_TRUE(ImportNode(BuildNode("Atan", {{1, 2}, {2, 3}}), &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_EQ(op->output_shapes.size(), 2);
ASSERT_THAT(op->output_shapes[0].dims(), ::testing::ElementsAre(1, 2));
ASSERT_THAT(op->output_shapes[1].dims(), ::testing::ElementsAre(2, 3));
}
TEST(ImportTest, UnsupportedOpWithWildcardOutputShapes) {
Model model;
EXPECT_TRUE(ImportNode(BuildNode("Atan", {{-1, 2}}), &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_TRUE(op->output_shapes.empty());
}
TEST(ImportTest, UnsupportedOpWithMultipleOutputs) {
NodeDef node = BuildNode("ParseExample", {});
{
AttrValue value_attr;
SetAttrValue(2, &value_attr);
(*node.mutable_attr())["Nsparse"] = value_attr;
}
{
AttrValue value_attr;
std::vector<tensorflow::DataType> types;
types.push_back(tensorflow::DT_FLOAT);
types.push_back(tensorflow::DT_STRING);
SetAttrValue(types, &value_attr);
(*node.mutable_attr())["sparse_types"] = value_attr;
}
{
AttrValue value_attr;
std::vector<tensorflow::DataType> types;
types.push_back(tensorflow::DT_STRING);
types.push_back(tensorflow::DT_FLOAT);
types.push_back(tensorflow::DT_INT64);
SetAttrValue(types, &value_attr);
(*node.mutable_attr())["Tdense"] = value_attr;
}
Model model;
EXPECT_TRUE(ImportFlexNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_EQ(op->outputs.size(), 9);
ASSERT_EQ(op->output_data_types.size(), 9);
ASSERT_EQ(op->outputs[0], "Node1");
ASSERT_EQ(op->outputs[1], "Node1:1");
ASSERT_EQ(op->output_data_types[0], ArrayDataType::kInt64);
ASSERT_EQ(op->output_data_types[1], ArrayDataType::kInt64);
ASSERT_EQ(op->outputs[2], "Node1:2");
ASSERT_EQ(op->outputs[3], "Node1:3");
ASSERT_EQ(op->output_data_types[2], ArrayDataType::kFloat);
ASSERT_EQ(op->output_data_types[3], ArrayDataType::kString);
ASSERT_EQ(op->outputs[4], "Node1:4");
ASSERT_EQ(op->outputs[5], "Node1:5");
ASSERT_EQ(op->output_data_types[4], ArrayDataType::kInt64);
ASSERT_EQ(op->output_data_types[5], ArrayDataType::kInt64);
ASSERT_EQ(op->outputs[6], "Node1:6");
ASSERT_EQ(op->outputs[7], "Node1:7");
ASSERT_EQ(op->outputs[8], "Node1:8");
ASSERT_EQ(op->output_data_types[6], ArrayDataType::kString);
ASSERT_EQ(op->output_data_types[7], ArrayDataType::kFloat);
ASSERT_EQ(op->output_data_types[8], ArrayDataType::kInt64);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} |
802 | cpp | tensorflow/tensorflow | tooling_util | tensorflow/lite/toco/tooling_util.cc | tensorflow/lite/toco/tooling_util_test.cc | #ifndef TENSORFLOW_LITE_TOCO_TOOLING_UTIL_H_
#define TENSORFLOW_LITE_TOCO_TOOLING_UTIL_H_
#include <algorithm>
#include <cmath>
#include <functional>
#include <iostream>
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/toco_flags.pb.h"
#include "tensorflow/lite/toco/types.pb.h"
namespace std {
template <>
struct hash<toco::OperatorType> {
size_t operator()(const toco::OperatorType& op) const {
return std::hash<size_t>()(static_cast<size_t>(op));
}
};
}
namespace toco {
constexpr int kLogLevelModelChanged = 1;
constexpr int kLogLevelModelUnchanged = 2;
absl::string_view FindLongestCommonPrefix(absl::string_view a,
absl::string_view b);
std::string LogName(const Operator& op);
std::string ArrayDataTypeName(ArrayDataType data_type);
bool IsInputArray(const Model& model, const std::string& array_name);
bool IsOutputArray(const Model& model, const std::string& array_name);
bool IsArrayConsumed(const Model& model, const std::string& name);
int CountTrueOutputs(const Model& model, const Operator& op);
int CountOpsWithInput(const Model& model, const std::string& array_name);
bool DeleteArrayIfUnused(const std::string& array_name, Model* model);
void DeleteOpAndArrays(Model* model, const Operator* op);
std::vector<std::unique_ptr<Operator>>::const_iterator FindOpWithOutput(
const Model& model, const std::string& array_name);
Operator* GetOpWithOutput(const Model& model, const std::string& array_name);
std::vector<std::unique_ptr<Operator>>::iterator FindOpWithOutput(
Model& model, const std::string& array_name);
std::vector<std::unique_ptr<Operator>>::const_iterator FindOpWithInput(
const Model& model, const std::string& array_name);
std::vector<std::unique_ptr<Operator>>::iterator FindOpWithInput(
Model& model, const std::string& array_name);
Operator* GetOpWithInput(const Model& model, const std::string& array_name);
Operator* GetFirstOpWithInput(const Model& model,
const std::string& array_name);
void ReplaceArrayUsage(Model* model, const std::string& old_array_name,
const std::string& new_array_name);
std::vector<std::unique_ptr<Operator>>::const_iterator FindOp(
const Model& model, const Operator* op);
std::vector<std::unique_ptr<Operator>>::iterator FindOp(Model& model,
const Operator* op);
const char* OperatorTypeName(OperatorType type);
std::string HelpfulOperatorTypeName(const Operator& op);
bool OperatorSupportsFusedActivation(OperatorType type);
void DumpGraphvizVideoFrame(const Model& model);
void LogDump(int log_level, const std::string& message, const Model& model);
void LogSummary(int log_level, const std::string& message, const Model& model);
void ExtendShape(Shape* shape, int new_shape_size);
void UnextendShape(Shape* shape, int new_shape_size);
bool IsNonEmpty(const Shape& shape);
bool ShapesAgreeUpToBroadcasting(const Shape& shape0, const Shape& shape1);
bool ShapesAgreeUpToExtending(const Shape& shape0, const Shape& shape1);
inline ::tflite::RuntimeShape ToRuntimeShape(const Shape& shape) {
return ::tflite::RuntimeShape(shape.dimensions_count(), shape.dims().data());
}
bool IsArrayFullyConnectedWeights(const Model& model, const std::string& name);
int RequiredBufferSizeForShape(const Shape& shape);
bool IsConstantParameterArray(const Model& model, const std::string& name);
bool CompareConstantArrays(const Array& lhs_array, const Array& rhs_array);
void CheckNoMissingArray(const Model& model);
void CheckInvariants(const Model& model);
void CheckModelCounts(const Model& model);
void FixOperatorOrdering(Model* model);
void FixNoMissingArray(Model* model);
void FixNoOrphanedArray(Model* model);
void FixEdgeArrays(Model* model);
void DedupeConstantArrays(Model* model, size_t min_size);
template <ArrayDataType A>
void CopyArrayBuffer(const Array& source_array, Array* target_array) {
int source_buffer_size = RequiredBufferSizeForShape(source_array.shape());
int target_buffer_size = RequiredBufferSizeForShape(target_array->shape());
CHECK_EQ(source_buffer_size, target_buffer_size)
<< "Buffer sizes must match in element count";
CHECK(source_array.data_type == target_array->data_type)
<< "Data types must match";
if (source_array.buffer) {
const auto& source_buffer = source_array.GetBuffer<A>();
auto& target_buffer = target_array->GetMutableBuffer<A>();
target_buffer.data = source_buffer.data;
}
}
void InsertCopyOperator(Model* model, const std::string& source_array_name,
const std::string& target_array_name);
void CloneArray(Model* model, const std::string& source_array_name,
const std::string& target_array_name);
void ResolveModelFlags(const ModelFlags& model_flags, Model* model);
template <typename T>
T ConvertOperator(Operator* o, OperatorType type) {
if (o != nullptr && o->type == type) {
return static_cast<T>(o);
}
return nullptr;
}
void CheckIsReadyForQuantization(const Model& model);
bool ReshapeIsEquivalentToTranspose(const Model& model,
const TensorFlowReshapeOperator* op,
bool allow_extra_unary_dims);
inline int Offset(const Shape& shape, const std::vector<int>& indices) {
DCHECK_EQ(shape.dimensions_count(), indices.size());
const int dims_count = shape.dimensions_count();
int offset = 0;
for (int i = 0; i < dims_count; i++) {
const int index = indices[i];
DCHECK(index >= 0 && index < shape.dims(i));
offset *= shape.dims(i);
offset += index;
}
return offset;
}
inline std::vector<int> ReverseOffset(const Shape& shape, int index) {
DCHECK_GE(index, 0);
DCHECK_LT(index, RequiredBufferSizeForShape(shape));
const int dims_count = shape.dimensions_count();
std::vector<int> indices(dims_count);
int residual = index;
for (int i = dims_count - 1; i >= 0; i--) {
indices[i] = residual % shape.dims(i);
residual /= shape.dims(i);
}
return indices;
}
int ElementSize(ArrayDataType data_type);
void DropMinMax(Model* model, const std::string& array_name);
bool IsAllocatableTransientArray(const Model& model,
const std::string& array_name);
void CreateOrCheckRnnStateArray(const std::string& name, int size,
int state_num_dims, Model* model);
std::string AvailableArrayName(const Model& model, const std::string& name);
std::string ShapeToString(const Shape& shape);
void PrintArrayShape(Model* model, const std::string& name);
void MakeArrayDims(int num_dims, int batch, int height, int width, int depth,
std::vector<int>* out_dims);
std::string CreateInt32Array(Model* model, const std::string& param_name,
const std::vector<int>& value);
bool EstimateArithmeticOpsCount(const Model& model, const Operator& op,
int64_t* result);
bool EstimateArithmeticOpsCount(const Model& model, int64_t* result);
std::string FormattedNumber(int64_t x);
int AxesCount(AxesOrder axes_order);
void GetShuffleShape(AxesOrder input_axes_order, AxesOrder output_axes_order,
std::vector<int>* shuffle);
void ExtendShuffle(const std::vector<int>& input_shuffle, int newdim,
std::vector<int>* extended_shuffle);
void ShuffleDims(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order, Shape* output_shape);
void ShuffleArray(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order, const Shape& output_shape,
const float* input_data, float* output_data);
void ShuffleArray(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order, const Shape& output_shape,
const uint8* input_data, uint8* output_data);
bool IsDiscardableArray(const Model& model, const std::string& array_name);
void CheckFinalDataTypesSatisfied(const Model& model);
ArrayDataType ConvertIODataTypeToArrayDataType(IODataType type);
void FinishBuildingRNNStates(Model* model);
void UseArraysExtraInfo(Model* model, bool quantize_output);
template <typename T, typename U>
tensorflow::Status NumElements(const std::vector<T>& shape, U* num_elements) {
static_assert(
std::numeric_limits<T>::max() <= std::numeric_limits<uint64_t>::max(),
"vector type exceed capabilities of NumElements");
*num_elements = 1;
for (const T& dim : shape) {
if (dim < 0) {
return tensorflow::errors::InvalidArgument(
"Tensor shape should not include negative values");
}
if (*num_elements != 0 &&
static_cast<uint64_t>(dim) >
std::numeric_limits<U>::max() / *num_elements) {
*num_elements = 0;
return tensorflow::errors::InvalidArgument("Tensor shape is too large");
}
*num_elements *= dim;
}
return absl::OkStatus();
}
void UndoWeightsShuffling(Model* model);
void CopyMinMaxAndQuantizationRelatedFields(const Array& src, Array* dst);
bool DeleteArrayIfUnusedOutsideOfOp(const std::string& array_name,
const Operator* op, Model* model);
}
#endif
#include "tensorflow/lite/toco/tooling_util.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "re2/re2.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/toco/dump_graphviz.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_graphviz_dump_options.h"
namespace toco {
absl::string_view FindLongestCommonPrefix(absl::string_view a,
absl::string_view b) {
if (a.empty() || b.empty()) return absl::string_view();
const char* pa = a.data();
const char* pb = b.data();
size_t count = 0;
const size_t limit = std::min(a.size(), b.size());
while (count < limit && *pa == *pb) {
++pa;
++pb;
++count;
}
return absl::string_view(a.data(), count);
}
std::string LogName(const Operator& op) {
const std::string& opname = HelpfulOperatorTypeName(op);
if (op.outputs.empty()) {
return toco::port::StringF("{%s operator}", opname);
} else {
return toco::port::StringF("{%s operator with output %s}", opname,
op.outputs[0]);
}
}
std::string ArrayDataTypeName(ArrayDataType data_type) {
switch (data_type) {
case ArrayDataType::kFloat:
return "float";
case ArrayDataType::kInt8:
return "int8";
case ArrayDataType::kUint8:
return "uint8";
case ArrayDataType::kInt16:
return "int16";
case ArrayDataType::kUint16:
return "uint16";
case ArrayDataType::kInt32:
return "int32";
case ArrayDataType::kUint32:
return "uint32";
case ArrayDataType::kInt64:
return "int64";
case ArrayDataType::kUint64:
return "uint64";
case ArrayDataType::kString:
return "string";
case ArrayDataType::kBool:
return "bool";
case ArrayDataType::kComplex64:
return "complex64";
case ArrayDataType::kNone:
return "None";
default:
LOG(FATAL) << "Unhandled array data type " << static_cast<int>(data_type);
}
}
bool IsInputArray(const Model& model, const std::string& array_name) {
for (const auto& input_array : model.flags.input_arrays()) {
if (array_name == input_array.name()) {
return true;
}
}
return false;
}
bool IsOutputArray(const Model& model, const std::string& array_name) {
for (const auto& output_array : model.flags.output_arrays()) {
if (array_name == output_array) {
return true;
}
}
return false;
}
bool IsArrayConsumed(const Model& model, const std::string& name) {
if (GetOpWithInput(model, name)) {
return true;
}
if (IsOutputArray(model, name)) {
return true;
}
for (const auto& rnn_state : model.flags.rnn_states()) {
if (rnn_state.back_edge_source_array() == name) {
return true;
}
}
return false;
}
int CountTrueOutputs(const Model& model, const Operator& op) {
int count = 0;
for (const std::string& output : op.outputs) {
if (IsArrayConsumed(model, output)) {
++count;
}
}
return count;
}
int CountOpsWithInput(const Model& model, const std::string& array_name) {
int count = 0;
for (const auto& op : model.operators) {
for (auto& input : op->inputs) {
if (input == array_name) {
count++;
break;
}
}
}
return count;
}
bool DeleteArrayIfUnused(const std::string& array_name, Model* model) {
if (IsDiscardableArray(*model, array_name) &&
CountOpsWithInput(*model, array_name) == 0 &&
GetOpWithOutput(*model, array_name) == nullptr) {
model->EraseArray(array_name);
return true;
}
return false;
}
bool DeleteArrayIfUnusedOutsideOfOp(const std::string& array_name,
const Operator* op, Model* model) {
if (!IsDiscardableArray(*model, array_name)) {
return false;
}
if (CountOpsWithInput(*model, array_name) > 1) {
return false;
}
const Operator* op_having_this_as_input = GetOpWithInput(*model, array_name);
if (op_having_this_as_input && op_having_this_as_input != op) {
return false;
}
const Operator* op_having_this_as_output =
GetOpWithOutput(*model, array_name);
if (op_having_this_as_output && op_having_this_as_output != op) {
return false;
}
model->EraseArray(array_name);
return true;
}
void DeleteOpAndArrays(Model* model, const Operator* op) {
for (const std::string& array_name : op->inputs) {
DeleteArrayIfUnusedOutsideOfOp(array_name, op, model);
}
for (const std::string& array_name : op->outputs) {
DeleteArrayIfUnusedOutsideOfOp(array_name, op, model);
}
auto op_it = FindOp(*model, op);
CHECK(op_it != model->operators.end());
model->operators.erase(op_it);
}
std::vector<std::unique_ptr<Operator>>::const_iterator FindOpWithOutput(
const Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& output : it->get()->outputs) {
if (output == array_name) {
return it;
}
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::iterator FindOpWithOutput(
Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& output : it->get()->outputs) {
if (output == array_name) {
return it;
}
}
}
return model.operators.end();
}
Operator* GetOpWithOutput(const Model& model, const std::string& array_name) {
auto it = FindOpWithOutput(model, array_name);
return it == model.operators.end() ? nullptr : it->get();
}
std::vector<std::unique_ptr<Operator>>::const_iterator FindOpWithInput(
const Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& input : it->get()->inputs) {
if (input == array_name) {
return it;
}
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::iterator FindOpWithInput(
Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& input : it->get()->inputs) {
if (input == array_name) {
return it;
}
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::const_iterator FindOp(
const Model& model, const Operator* op) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
if (it->get() == op) {
return it;
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::iterator FindOp(Model& model,
const Operator* op) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
if (it->get() == op) {
return it;
}
}
return model.operators.end();
}
Operator* GetOpWithInput(const Model& model, const std::string& array_name) {
auto it = FindOpWithInput(model, array_name);
return it == model.operators.end() ? nullptr : it->get();
}
Operator* GetFirstOpWithInput(const Model& model,
const std::string& array_name) {
auto it = FindOpWithInput(model, array_name);
return it == model.operators.end() ? nullptr : it->get();
}
void ReplaceArrayUsage(Model* model, const std::string& old_array_name,
const std::string& new_array_name) {
for (auto& op_it : model->operators) {
Operator* op = op_it.get();
for (size_t i = 0; i < op->inputs.size(); ++i) {
if (op->inputs[i] == old_array_name) {
op->inputs[i] = new_array_name;
}
}
for (size_t i = 0; i < op->outputs.size(); ++i) {
if (op->outputs[i] == old_array_name) {
op->outputs[i] = new_array_name;
}
}
}
}
std::string FormatArraysList(const Model& model,
const std::vector<std::string>& list) {
if (list.empty()) {
return "[]";
}
std::string result = "";
if (list.size() > 1) {
result += "[ ";
}
for (std::size_t i = 0; i < list.size(); i++) {
if (i > 0) {
result += ", ";
}
result += list[i];
}
if (list.size() > 1) {
result += " ]";
}
return result;
}
const char* OperatorTypeName(OperatorType type) {
switch (type) {
#define HANDLE_OPERATORTYPENAME_CASE(c) \
case OperatorType::k##c: \
return #c;
HANDLE_OPERATORTYPENAME_CASE(Abs)
HANDLE_OPERATORTYPENAME_CASE(Add)
HANDLE_OPERATORTYPENAME_CASE(AddN)
HANDLE_OPERATORTYPENAME_CASE(AveragePool)
HANDLE_OPERATORTYPENAME_CASE(BatchMatMul)
HANDLE_OPERATORTYPENAME_CASE(BatchNormalization)
HANDLE_OPERATORTYPENAME_CASE(Conv)
HANDLE_OPERATORTYPENAME_CASE(Concatenation)
HANDLE_OPERATORTYPENAME_CASE(DepthwiseConv)
HANDLE_OPERATORTYPENAME_CASE(DepthToSpace)
HANDLE_OPERATORTYPENAME_CASE(SpaceToDepth)
HANDLE_OPERATORTYPENAME_CASE(FullyConnected)
HANDLE_OPERATORTYPENAME_CASE(HardSwish)
HANDLE_OPERATORTYPENAME_CASE(Dequantize)
HANDLE_OPERATORTYPENAME_CASE(L2Normalization)
HANDLE_OPERATORTYPENAME_CASE(LocalResponseNormalization)
HANDLE_OPERATORTYPENAME_CASE(Log)
HANDLE_OPERATORTYPENAME_CASE(Logistic)
HANDLE_OPERATORTYPENAME_CASE(LstmCell)
HANDLE_OPERATORTYPENAME_CASE(MaxPool)
HANDLE_OPERATORTYPENAME_CASE(L2Pool)
HANDLE_OPERATORTYPENAME_CASE(FakeQuant)
HANDLE_OPERATORTYPENAME_CASE(Mul)
HANDLE_OPERATORTYPENAME_CASE(RandomUniform)
HANDLE_OPERATORTYPENAME_CASE(Elu)
HANDLE_OPERATORTYPENAME_CASE(Relu)
HANDLE_OPERATORTYPENAME_CASE(Relu1)
HANDLE_OPERATORTYPENAME_CASE(Relu6)
HANDLE_OPERATORTYPENAME_CASE(PRelu)
HANDLE_OPERATORTYPENAME_CASE(ReorderAxes)
HANDLE_OPERATORTYPENAME_CASE(Softmax)
HANDLE_OPERATORTYPENAME_CASE(LogSoftmax)
HANDLE_OPERATORTYPENAME_CASE(Div)
HANDLE_OPERATORTYPENAME_CASE(Tanh)
HANDLE_OPERATORTYPENAME_CASE(Sin)
HANDLE_OPERATORTYPENAME_CASE(All)
HANDLE_OPERATORTYPENAME_CASE(Assert)
HANDLE_OPERATORTYPENAME_CASE(ExpandDims)
HANDLE_OPERATORTYPENAME_CASE(Fill)
HANDLE_OPERATORTYPENAME_CASE(FloorMod)
HANDLE_OPERATORTYPENAME_CASE(FloorDiv)
HANDLE_OPERATORTYPENAME_CASE(Greater)
HANDLE_OPERATORTYPENAME_CASE(GreaterEqual)
HANDLE_OPERATORTYPENAME_CASE(Identity)
HANDLE_OPERATORTYPENAME_CASE(Less)
HANDLE_OPERATORTYPENAME_CASE(LessEqual)
HANDLE_OPERATORTYPENAME_CASE(MatMul)
HANDLE_OPERATORTYPENAME_CASE(ReduceMax)
HANDLE_OPERATORTYPENAME_CASE(Maximum)
HANDLE_OPERATORTYPENAME_CASE(Merge)
HANDLE_OPERATORTYPENAME_CASE(ReduceMin)
HANDLE_OPERATORTYPENAME_CASE(Minimum)
HANDLE_OPERATORTYPENAME_CASE(Neg)
HANDLE_OPERATORTYPENAME_CASE(OneHot)
HANDLE_OPERATORTYPENAME_CASE(Pack)
HANDLE_OPERATORTYPENAME_CASE(Pad)
HANDLE_OPERATORTYPENAME_CASE(PadV2)
HANDLE_OPERATORTYPENAME_CASE(StridedSlice)
HANDLE_OPERATORTYPENAME_CASE(Range)
HANDLE_OPERATORTYPENAME_CASE(Rank)
HANDLE_OPERATORTYPENAME_CASE(Reshape)
HANDLE_OPERATORTYPENAME_CASE(Squeeze)
HANDLE_OPERATORTYPENAME_CASE(Rsqrt)
HANDLE_OPERATORTYPENAME_CASE(SegmentSum)
HANDLE_OPERATORTYPENAME_CASE(Shape)
HANDLE_OPERATORTYPENAME_CASE(Slice)
HANDLE_OPERATORTYPENAME_CASE(Split)
HANDLE_OPERATORTYPENAME_CASE(SplitV)
HANDLE_OPERATORTYPENAME_CASE(Sqrt)
HANDLE_OPERATORTYPENAME_CASE(Square)
HANDLE_OPERATORTYPENAME_CASE(Switch)
HANDLE_OPERATORTYPENAME_CASE(Sub)
HANDLE_OPERATORTYPENAME_CASE(Sum)
HANDLE_OPERATORTYPENAME_CASE(Tile)
HANDLE_OPERATORTYPENAME_CASE(Transpose)
HANDLE_OPERATORTYPENAME_CASE(TransposeConv)
HANDLE_OPERATORTYPENAME_CASE(Concat)
HANDLE_OPERATORTYPENAME_CASE(ConcatV2)
HANDLE_OPERATORTYPENAME_CASE(Cast)
HANDLE_OPERATORTYPENAME_CASE(Floor)
HANDLE_OPERATORTYPENAME_CASE(Ceil)
HANDLE_OPERATORTYPENAME_CASE(Round)
HANDLE_OPERATORTYPENAME_CASE(Gather)
HANDLE_OPERATORTYPENAME_CASE(GatherNd)
HANDLE_OPERATORTYPENAME_CASE(ResizeBilinear)
HANDLE_OPERATORTYPENAME_CASE(SpaceToBatchND)
HANDLE_OPERATORTYPENAME_CASE(BatchToSpaceND)
HANDLE_OPERATORTYPENAME_CASE(Mean)
HANDLE_OPERATORTYPENAME_CASE(ReduceProd)
HANDLE_OPERATORTYPENAME_CASE(Svdf)
HANDLE_OPERATORTYPENAME_CASE(ArgMax)
HANDLE_OPERATORTYPENAME_CASE(ArgMin)
HANDLE_OPERATORTYPENAME_CASE(TopK_V2)
HANDLE_OPERATORTYPENAME_CASE(Unsupported)
HANDLE_OPERATORTYPENAME_CASE(Exp)
HANDLE_OPERATORTYPENAME_CASE(DynamicPartition)
HANDLE_OPERATORTYPENAME_CASE(DynamicStitch)
HANDLE_OPERATORTYPENAME_CASE(Select)
HANDLE_OPERATORTYPENAME_CASE(SparseToDense)
HANDLE_OPERATORTYPENAME_CASE(Equal)
HANDLE_OPERATORTYPENAME_CASE(NotEqual)
HANDLE_OPERATORTYPENAME_CASE(Pow)
HANDLE_OPERATORTYPENAME_CASE(Any)
HANDLE_OPERATORTYPENAME_CASE(LogicalAnd)
HANDLE_OPERATORTYPENAME_CASE(LogicalNot)
HANDLE_OPERATORTYPENAME_CASE(LogicalOr)
HANDLE_OPERATORTYPENAME_CASE(CTCBeamSearchDecoder)
HANDLE_OPERATORTYPENAME_CASE(Unpack)
HANDLE_OPERATORTYPENAME_CASE(ZerosLike)
HANDLE_OPERATORTYPENAME_CASE(UnidirectionalSequenceLstm)
HANDLE_OPERATORTYPENAME_CASE(BidirectionalSequenceLstm)
HANDLE_OPERATORTYPENAME_CASE(BidirectionalSequenceRnn)
HANDLE_OPERATORTYPENAME_CASE(ResizeNearestNeighbor)
HANDLE_OPERATORTYPENAME_CASE(LeakyRelu)
HANDLE_OPERATORTYPENAME_CASE(SquaredDifference)
HANDLE_OPERATORTYPENAME_CASE(MirrorPad)
HANDLE_OPERATORTYPENAME_CASE(Unique)
HANDLE_OPERATORTYPENAME_CASE(UnidirectionalSequenceRnn)
HANDLE_OPERATORTYPENAME_CASE(ReverseV2)
HANDLE_OPERATORTYPENAME_CASE(Cos)
HANDLE_OPERATORTYPENAME_CASE(Where)
HANDLE_OPERATORTYPENAME_CASE(ReverseSequence)
HANDLE_OPERATORTYPENAME_CASE(MatrixDiag)
HANDLE_OPERATORTYPENAME_CASE(MatrixSetDiag)
HANDLE_OPERATORTYPENAME_CASE(MatrixDiagV2)
HANDLE_OPERATORTYPENAME_CASE(MatrixSetDiagV2)
HANDLE_OPERATORTYPENAME_CASE(MatrixDiagV3)
HANDLE_OPERATORTYPENAME_CASE(MatrixSetDiagV3)
HANDLE_OPERATORTYPENAME_CASE(ScatterNd)
default:
LOG(FATAL) << "Unhandled op type";
#undef HANDLE_OPERATORTYPENAME_CASE
}
}
std::string HelpfulOperatorTypeName(const Operator& op) {
if (op.type == OperatorType::kUnsupported) {
return toco::port::StringF(
"(Unsupported TensorFlow op: %s)",
static_cast<const TensorFlowUnsupportedOperator&>(op).tensorflow_op);
}
return OperatorTypeName(op.type);
}
bool OperatorSupportsFusedActivation(OperatorType type) {
switch (type) {
case OperatorType::kAdd:
case OperatorType::kAveragePool:
case OperatorType::kBatchNormalization:
case OperatorType::kConv:
case OperatorType::kDepthwiseConv:
case OperatorType::kDiv:
case OperatorType::kFullyConnected:
case OperatorType::kL2Pool:
case OperatorType::kMaxPool:
case OperatorType::kMul:
case OperatorType::kSub:
case OperatorType::kSquaredDifference:
return true;
default:
return false;
}
}
void LogSummary(int log_level, const Model& model) {
VLOG(log_level) << "Operators summary (" << model.operators.size()
<< " operators):";
std::unordered_multiset<OperatorType> ops_by_type;
for (const auto& op : model.operators) {
ops_by_type.insert(op->type);
}
auto it = ops_by_type.begin();
while (it != ops_by_type.end()) {
int count = ops_by_type.count(*it);
VLOG(log_level) << " " << OperatorTypeName(*it) << ": " << count;
std::advance(it, count);
}
}
void LogArray(int log_level, const Model& model, const std::string& name) {
VLOG(log_level) << "Array: " << name;
if (!model.HasArray(name)) {
VLOG(log_level) << " DOES NOT EXIST";
return;
}
const auto& array = model.GetArray( | #include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
enum class Agreement { kBroadcast, kExtend, kBroadcastNotExtend, kNeither };
struct ShapePair {
Shape left;
Shape right;
Agreement agreement;
};
std::vector<ShapePair> CreateShapePairs() {
return std::vector<ShapePair>(
{
{Shape({3}), Shape({3}), Agreement::kBroadcast},
{Shape({256, 256, 3}), Shape({256, 256, 3}), Agreement::kBroadcast},
{Shape({256, 256, 3}), Shape({3}), Agreement::kBroadcast},
{Shape({8, 1, 6, 1}), Shape({7, 1, 5}), Agreement::kBroadcast},
{Shape({}), Shape({3}), Agreement::kBroadcast},
{Shape({}), Shape({3, 1}), Agreement::kBroadcast},
{Shape({3}), Shape({3}), Agreement::kExtend},
{Shape({256, 256, 3}), Shape({256, 256, 3}), Agreement::kExtend},
{Shape({1, 1, 3}), Shape({1, 1, 3}), Agreement::kExtend},
{Shape({1, 1, 3}), Shape({3}), Agreement::kExtend},
{Shape({1, 1, 3}), Shape({1, 3}), Agreement::kExtend},
{Shape({256, 256, 3}), Shape({3}), Agreement::kBroadcastNotExtend},
{Shape({5, 4}), Shape({1}), Agreement::kBroadcastNotExtend},
{Shape({5, 4}), Shape({4}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({15, 1, 5}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({3, 5}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({3, 1}), Agreement::kBroadcastNotExtend},
{Shape({3, 1}), Shape({}), Agreement::kBroadcastNotExtend},
{Shape({3}), Shape({4}), Agreement::kNeither},
{Shape({2, 1}), Shape({8, 4, 3}), Agreement::kNeither}});
}
class ShapeTest : public ::testing::TestWithParam<ShapePair> {};
TEST_P(ShapeTest, Agrees) {
const ShapePair& param = GetParam();
switch (param.agreement) {
case Agreement::kBroadcast: {
EXPECT_TRUE(ShapesAgreeUpToBroadcasting(param.left, param.right));
break;
}
case Agreement::kExtend: {
EXPECT_TRUE(ShapesAgreeUpToExtending(param.left, param.right));
EXPECT_TRUE(ShapesAgreeUpToBroadcasting(param.left, param.right));
break;
}
case Agreement::kBroadcastNotExtend: {
EXPECT_TRUE(ShapesAgreeUpToBroadcasting(param.left, param.right));
EXPECT_FALSE(ShapesAgreeUpToExtending(param.left, param.right));
break;
}
case Agreement::kNeither: {
EXPECT_FALSE(ShapesAgreeUpToExtending(param.left, param.right));
EXPECT_FALSE(ShapesAgreeUpToBroadcasting(param.left, param.right));
break;
}
}
}
INSTANTIATE_TEST_SUITE_P(AgreeBroadcast, ShapeTest,
::testing::ValuesIn(CreateShapePairs()));
static const char kNegativeValuesMessage[] =
"Tensor shape should not include negative values";
static const char kLargeTensorMessage[] = "Tensor shape is too large";
TEST(NumElementsTest, Int) {
int count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<int>{1024, 1024, 2047}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 2146435072);
status = NumElements(std::vector<int>{1024, 0, 2048}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 0);
status = NumElements(std::vector<int>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<int>{1024, 1024, 2048}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, Int32) {
int32_t count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<int32_t>{1024, 1024, 2047}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 2146435072);
status = NumElements(std::vector<int32_t>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<int32_t>{1024, 1024, 2048}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, Int64) {
int64_t count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<int64_t>{16777216, 16777216, 32767}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 9223090561878065152LL);
status = NumElements(std::vector<int64_t>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<int64_t>{16777216, 16777216, 32768}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, UnsignedInt32) {
uint32_t count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<uint32_t>{1024, 2048, 2047}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 4292870144);
status = NumElements(std::vector<int>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<uint32_t>{1024, 2048, 2048}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, UnsignedInt64) {
uint64_t count;
tensorflow::Status status = absl::OkStatus();
status =
NumElements(std::vector<uint64_t>{16777216, 16777216, 65535}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 18446462598732840960ULL);
status = NumElements(std::vector<int>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status =
NumElements(std::vector<uint64_t>{16777216, 16777216, 65536}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, Scalar) {
tensorflow::Status status = absl::OkStatus();
int32_t count;
status = NumElements(std::vector<int32_t>{}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 1);
uint64_t countu64;
status = NumElements(std::vector<uint64_t>{}, &countu64);
EXPECT_TRUE(status.ok());
EXPECT_EQ(countu64, 1ULL);
}
TEST(FusedActivationTest, DefaultsToUnfused) {
EXPECT_TRUE(OperatorSupportsFusedActivation(OperatorType::kAdd));
EXPECT_FALSE(OperatorSupportsFusedActivation(OperatorType::kNone));
EXPECT_FALSE(OperatorSupportsFusedActivation(static_cast<OperatorType>(255)));
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} |
803 | cpp | tensorflow/tensorflow | conversion_log_util | tensorflow/lite/toco/logging/conversion_log_util.cc | tensorflow/lite/toco/logging/conversion_log_util_test.cc | #ifndef TENSORFLOW_LITE_TOCO_LOGGING_CONVERSION_LOG_UTIL_H_
#define TENSORFLOW_LITE_TOCO_LOGGING_CONVERSION_LOG_UTIL_H_
#include <map>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/lite/toco/logging/toco_conversion_log.pb.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
std::string SanitizeErrorMessage(absl::string_view error_message);
void PopulateConversionLog(const Model& model, TocoConversionLog* log);
std::vector<std::string> GetOperatorNames(const Model& model);
void CountOperatorsByType(const Model& model,
std::map<std::string, int>* built_in_ops,
std::map<std::string, int>* custom_ops,
std::map<std::string, int>* select_ops);
void GetInputAndOutputTypes(
const Model& model,
TFLITE_PROTO_NS::RepeatedPtrField<std::string>* input_types,
TFLITE_PROTO_NS::RepeatedPtrField<std::string>* output_types);
void GetOpSignatures(
const Model& model,
TFLITE_PROTO_NS::RepeatedPtrField<std::string>* op_signatures);
std::string GetModelHash(const Model& model);
}
#endif
#include "tensorflow/lite/toco/logging/conversion_log_util.h"
#include <string>
#ifdef __linux__
#include <sys/utsname.h>
#endif
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tflite/export.h"
#include "tensorflow/lite/toco/tflite/operator.h"
#include "tensorflow/lite/toco/tooling_util.h"
#include "tensorflow/lite/version.h"
namespace toco {
namespace {
std::string TryGetOperatorName(const Operator& op) {
std::string op_name;
if (!op.tensorflow_node_def.empty()) {
tensorflow::NodeDef node_def;
if (!node_def.ParseFromString(op.tensorflow_node_def)) {
LOG(ERROR) << "Failed to parse Tensorflow NodeDef";
} else {
op_name = node_def.op();
if (!op_name.empty()) return op_name;
}
}
if (op.type == OperatorType::kUnsupported) {
const TensorFlowUnsupportedOperator& unsupported_op =
static_cast<const TensorFlowUnsupportedOperator&>(op);
if (!unsupported_op.tensorflow_op.empty()) {
op_name = unsupported_op.tensorflow_op;
return op_name;
}
}
op_name = OperatorTypeName(op.type);
return op_name;
}
std::string GetOSVersion() {
std::string os_info;
#ifdef __linux__
utsname info;
if (uname(&info)) {
LOG(ERROR) << "Cannot get OS info.";
return "";
}
os_info =
std::string(info.sysname) + ";OSVer=" + std::string(info.release) + ";";
#endif
return os_info;
}
std::string ShapeToStringNoSpace(const Shape& shape) {
if (shape.dimensions_count() == 0) {
return "[]";
}
return absl::StrCat("[", absl::StrJoin(shape.dims(), ","), "]");
}
std::string GetOperatorSignature(
const Model& model, const Operator& op,
const std::map<OperatorType, std::unique_ptr<tflite::BaseOperator>>&
op_types_map) {
std::string op_signature;
constexpr char delimiter[] = "::";
op_signature.append("INPUT:");
for (const auto& input : op.inputs) {
const auto& array = model.GetArray(input);
if (array.has_shape()) {
op_signature.append(ShapeToStringNoSpace(array.shape()));
} else {
op_signature.append("None");
}
op_signature.append(delimiter);
op_signature.append(ArrayDataTypeName(array.data_type) + delimiter);
}
op_signature.append("OUTPUT:");
for (const auto& output : op.outputs) {
const auto& array = model.GetArray(output);
if (array.has_shape()) {
op_signature.append(ShapeToStringNoSpace(array.shape()));
} else {
op_signature.append("None");
}
op_signature.append(delimiter);
op_signature.append(ArrayDataTypeName(array.data_type) + delimiter);
}
op_signature.append("NAME:");
op_signature.append(TryGetOperatorName(op) + delimiter);
op_signature.append("VERSION:");
OperatorSignature toco_op_signature;
toco_op_signature.op = &op;
toco_op_signature.model = &model;
if (op_types_map.find(op.type) != op_types_map.end()) {
const int version = op_types_map.at(op.type)->GetVersion(toco_op_signature);
op_signature.append(std::to_string(version));
} else {
op_signature.append("None");
}
return op_signature;
}
}
std::vector<std::string> GetOperatorNames(const Model& model) {
std::vector<std::string> op_names;
op_names.reserve(model.operators.size());
for (const auto& op : model.operators) {
op_names.push_back(TryGetOperatorName(*op));
}
return op_names;
}
void CountOperatorsByType(const Model& model,
std::map<std::string, int>* built_in_ops,
std::map<std::string, int>* custom_ops,
std::map<std::string, int>* select_ops) {
for (const auto& op : model.operators) {
OperatorSignature op_signature = {op.get(), &model};
const auto ops_by_type =
tflite::BuildOperatorByTypeMap(true );
tflite::details::OperatorKey op_key(op_signature, ops_by_type,
true );
const std::string op_name = TryGetOperatorName(*op);
if (op_key.is_custom_op()) {
(*custom_ops)[op_name]++;
} else if (op_key.is_flex_op()) {
(*select_ops)[op_name]++;
} else {
(*built_in_ops)[op_name]++;
}
}
}
void GetInputAndOutputTypes(
const Model& model,
TFLITE_PROTO_NS::RepeatedPtrField<std::string>* input_types,
TFLITE_PROTO_NS::RepeatedPtrField<std::string>* output_types) {
for (const auto& input_array : model.flags.input_arrays()) {
const Array& array = model.GetArray(input_array.name());
input_types->Add(ArrayDataTypeName(array.data_type));
}
for (const auto& output_array : model.flags.output_arrays()) {
const Array& array = model.GetArray(output_array);
output_types->Add(ArrayDataTypeName(array.data_type));
}
}
std::string GetTfLiteVersion() { return TFLITE_VERSION_STRING; }
std::string GetCachedOSVersion() {
static std::string* version = new std::string(GetOSVersion());
return *version;
}
void GetOpSignatures(
const Model& model,
TFLITE_PROTO_NS::RepeatedPtrField<std::string>* op_signatures) {
const auto& op_types_map =
tflite::BuildOperatorByTypeMap(true );
for (const auto& op : model.operators) {
op_signatures->Add(GetOperatorSignature(model, *op, op_types_map));
}
}
std::string GetModelHash(const Model& model) {
return "";
}
std::string SanitizeErrorMessage(absl::string_view error_message) {
const std::string s1 = "Ops that can be supported by the flex runtime";
const std::string s2 = "Ops that need custom implementation";
std::string pruned_message;
size_t pos = error_message.find(s1);
if (pos != std::string::npos) {
auto end = error_message.find('.', pos);
pruned_message.append(error_message.substr(pos, end - pos + 1));
}
pos = error_message.find(s2);
if (pos != std::string::npos) {
auto end = error_message.find('.', pos);
pruned_message.append(error_message.substr(pos, end - pos + 1));
}
return pruned_message;
}
void PopulateConversionLog(const Model& model, TocoConversionLog* log) {
const std::vector<std::string> op_names = GetOperatorNames(model);
for (const auto& op_name : op_names) {
log->add_op_list(op_name);
}
TFLITE_PROTO_NS::RepeatedPtrField<std::string> op_signatures;
GetOpSignatures(model, &op_signatures);
log->mutable_op_signatures()->CopyFrom(op_signatures);
std::map<std::string, int> custom_ops, select_ops, built_in_ops;
CountOperatorsByType(model, &built_in_ops, &custom_ops, &select_ops);
log->mutable_custom_ops()->insert(custom_ops.cbegin(), custom_ops.cend());
log->mutable_built_in_ops()->insert(built_in_ops.cbegin(),
built_in_ops.cend());
log->mutable_select_ops()->insert(select_ops.cbegin(), select_ops.cend());
TFLITE_PROTO_NS::RepeatedPtrField<std::string> input_types, output_types;
GetInputAndOutputTypes(model, &input_types, &output_types);
log->mutable_input_tensor_types()->CopyFrom(input_types);
log->mutable_output_tensor_types()->CopyFrom(output_types);
log->set_log_generation_ts(absl::ToUnixMicros(absl::Now()));
log->set_model_size(model.operators.size());
log->set_tf_lite_version(GetTfLiteVersion());
log->set_os_version(GetCachedOSVersion());
log->set_model_hash(GetModelHash(model));
}
} | #include "tensorflow/lite/toco/logging/conversion_log_util.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
namespace toco {
namespace {
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
TEST(ConversionLogUtilTest, TestGetOperatorNames) {
Model model;
model.operators.push_back(std::make_unique<ConvOperator>());
model.operators.push_back(std::make_unique<MeanOperator>());
model.operators.push_back(std::make_unique<NegOperator>());
auto avg_pool_3d = std::make_unique<TensorFlowUnsupportedOperator>();
avg_pool_3d->tensorflow_op = "AvgPool3D";
tensorflow::NodeDef node_def;
node_def.set_op("AvgPool3D");
node_def.SerializeToString(&avg_pool_3d->tensorflow_node_def);
model.operators.push_back(std::move(avg_pool_3d));
auto my_custom_op = std::make_unique<TensorFlowUnsupportedOperator>();
my_custom_op->tensorflow_op = "MyAwesomeCustomOp";
model.operators.push_back(std::move(my_custom_op));
const auto& output = GetOperatorNames(model);
EXPECT_THAT(output, ElementsAre("Conv", "Mean", "Neg", "AvgPool3D",
"MyAwesomeCustomOp"));
}
TEST(ConversionLogUtilTest, TestCountOperatorsByType) {
Model model;
std::unique_ptr<ConvOperator> conv1(new ConvOperator());
const std::string conv1_input_name = "conv_input1";
const std::string conv1_filter_name = "conv_filter1";
const std::string conv1_output_name = "conv_output1";
conv1->inputs.push_back(conv1_input_name);
conv1->inputs.push_back(conv1_filter_name);
conv1->outputs.push_back(conv1_output_name);
auto& array_map = model.GetMutableArrayMap();
array_map[conv1_input_name] = std::make_unique<Array>();
array_map[conv1_filter_name] = std::make_unique<Array>();
array_map[conv1_output_name] = std::make_unique<Array>();
std::unique_ptr<ConvOperator> conv2(new ConvOperator());
const std::string conv2_input_name = "conv_input2";
const std::string conv2_filter_name = "conv_filter2";
const std::string conv2_output_name = "conv_output2";
conv2->inputs.push_back(conv2_input_name);
conv2->inputs.push_back(conv2_filter_name);
conv2->outputs.push_back(conv2_output_name);
array_map[conv2_input_name] = std::make_unique<Array>();
array_map[conv2_filter_name] = std::make_unique<Array>();
array_map[conv2_output_name] = std::make_unique<Array>();
std::unique_ptr<MeanOperator> mean(new MeanOperator());
const std::string mean_input_name = "mean_input";
mean->inputs.push_back(mean_input_name);
array_map[mean_input_name] = std::make_unique<Array>();
auto avg_pool_3d = std::make_unique<TensorFlowUnsupportedOperator>();
avg_pool_3d->tensorflow_op = "AvgPool3D";
tensorflow::NodeDef node_def;
node_def.set_op("AvgPool3D");
node_def.SerializeToString(&avg_pool_3d->tensorflow_node_def);
auto elu_grad = std::make_unique<TensorFlowUnsupportedOperator>();
elu_grad->tensorflow_op = "EluGrad";
node_def.set_op("EluGrad");
node_def.SerializeToString(&elu_grad->tensorflow_node_def);
auto my_custom_op = std::make_unique<TensorFlowUnsupportedOperator>();
my_custom_op->tensorflow_op = "MyAwesomeCustomOp";
model.operators.push_back(std::move(conv1));
model.operators.push_back(std::move(conv2));
model.operators.push_back(std::move(mean));
model.operators.push_back(std::move(avg_pool_3d));
model.operators.push_back(std::move(elu_grad));
model.operators.push_back(std::move(my_custom_op));
std::map<std::string, int> built_in_ops, select_ops, custom_ops;
CountOperatorsByType(model, &built_in_ops, &custom_ops, &select_ops);
EXPECT_THAT(built_in_ops,
UnorderedElementsAre(std::pair<std::string, int>("Conv", 2),
std::pair<std::string, int>("Mean", 1)));
EXPECT_THAT(select_ops,
UnorderedElementsAre(std::pair<std::string, int>("AvgPool3D", 1),
std::pair<std::string, int>("EluGrad", 1)));
EXPECT_THAT(custom_ops, UnorderedElementsAre(std::pair<std::string, int>(
"MyAwesomeCustomOp", 1)));
}
TEST(ConversionLogUtilTest, TestGetInputAndOutputTypes) {
Model model;
auto& array_map = model.GetMutableArrayMap();
const std::string input1 = "conv_input";
const std::string input2 = "conv_filter";
const std::string input3 = "feature";
const std::string output = "softmax";
array_map[input1] = std::make_unique<Array>();
array_map[input1]->data_type = ArrayDataType::kFloat;
array_map[input2] = std::make_unique<Array>();
array_map[input2]->data_type = ArrayDataType::kFloat;
array_map[input3] = std::make_unique<Array>();
array_map[input3]->data_type = ArrayDataType::kInt16;
array_map[output] = std::make_unique<Array>();
array_map[output]->data_type = ArrayDataType::kFloat;
InputArray input_arrays[3];
input_arrays[0].set_name(input1);
input_arrays[1].set_name(input2);
input_arrays[2].set_name(input3);
*model.flags.add_input_arrays() = input_arrays[0];
*model.flags.add_input_arrays() = input_arrays[1];
*model.flags.add_input_arrays() = input_arrays[2];
model.flags.add_output_arrays(output);
TFLITE_PROTO_NS::RepeatedPtrField<std::string> input_types, output_types;
GetInputAndOutputTypes(model, &input_types, &output_types);
EXPECT_THAT(input_types, ElementsAre("float", "float", "int16"));
EXPECT_THAT(output_types, ElementsAre("float"));
}
TEST(ConversionLogUtilTest, TestGetOpSignatures) {
Model model;
auto& array_map = model.GetMutableArrayMap();
std::unique_ptr<ConvOperator> conv(new ConvOperator());
const std::string conv_input_name = "conv_input";
const std::string conv_filter_name = "conv_filter";
const std::string conv_output_name = "conv_output";
conv->inputs.push_back(conv_input_name);
conv->inputs.push_back(conv_filter_name);
conv->outputs.push_back(conv_output_name);
array_map[conv_input_name] = std::make_unique<Array>();
array_map[conv_input_name]->data_type = ArrayDataType::kFloat;
array_map[conv_input_name]->copy_shape({4, 4, 3});
array_map[conv_filter_name] = std::make_unique<Array>();
array_map[conv_filter_name]->data_type = ArrayDataType::kFloat;
array_map[conv_filter_name]->copy_shape({2, 2});
array_map[conv_output_name] = std::make_unique<Array>();
array_map[conv_output_name]->data_type = ArrayDataType::kFloat;
array_map[conv_output_name]->copy_shape({4, 4, 2});
const std::string mean_input_name = "mean_input";
const std::string mean_output_name = "mean_output";
std::unique_ptr<MeanOperator> mean(new MeanOperator());
mean->inputs.push_back(mean_input_name);
mean->outputs.push_back(mean_output_name);
array_map[mean_input_name] = std::make_unique<Array>();
array_map[mean_output_name] = std::make_unique<Array>();
const std::string avg_pool_3d_output_name = "avg_pool_output";
auto avg_pool_3d = std::make_unique<TensorFlowUnsupportedOperator>();
avg_pool_3d->tensorflow_op = "AvgPool3D";
tensorflow::NodeDef node_def;
node_def.set_op("AvgPool3D");
node_def.SerializeToString(&avg_pool_3d->tensorflow_node_def);
avg_pool_3d->inputs.push_back(conv_output_name);
avg_pool_3d->outputs.push_back(avg_pool_3d_output_name);
array_map[avg_pool_3d_output_name] = std::make_unique<Array>();
array_map[avg_pool_3d_output_name]->data_type = ArrayDataType::kInt32;
array_map[avg_pool_3d_output_name]->copy_shape({2, 2});
const std::string custom_op_output_name = "custom_op_output";
auto my_custom_op = std::make_unique<TensorFlowUnsupportedOperator>();
my_custom_op->tensorflow_op = "MyAwesomeCustomOp";
my_custom_op->inputs.push_back(avg_pool_3d_output_name);
my_custom_op->outputs.push_back(custom_op_output_name);
array_map[custom_op_output_name] = std::make_unique<Array>();
array_map[custom_op_output_name]->data_type = ArrayDataType::kFloat;
array_map[custom_op_output_name]->copy_shape({3});
model.operators.push_back(std::move(conv));
model.operators.push_back(std::move(mean));
model.operators.push_back(std::move(avg_pool_3d));
model.operators.push_back(std::move(my_custom_op));
TFLITE_PROTO_NS::RepeatedPtrField<std::string> op_signatures;
GetOpSignatures(model, &op_signatures);
EXPECT_THAT(op_signatures,
UnorderedElementsAre(
"INPUT:[4,4,3]::float::[2,2]::float::OUTPUT:[4,4,2]::float::"
"NAME:Conv::VERSION:1",
"INPUT:None::None::OUTPUT:None::None::NAME:Mean::VERSION:1",
"INPUT:[4,4,2]::float::OUTPUT:[2,2]::int32::NAME:AvgPool3D::"
"VERSION:1",
"INPUT:[2,2]::int32::OUTPUT:[3]::float::NAME:"
"MyAwesomeCustomOp::VERSION:1"));
}
TEST(ConversionLogUtilTest, TestSanitizeErrorMessage) {
const std::string error =
"error: failed while converting: 'main': Ops that can be supported by "
"the flex runtime (enabled via setting the -emit-select-tf-ops flag): "
"ResizeNearestNeighbor,ResizeNearestNeighbor. Ops that need custom "
"implementation (enabled via setting the -emit-custom-ops flag): "
"CombinedNonMaxSuppression.\nTraceback (most recent call last): File "
"/usr/local/bin/toco_from_protos, line 8, in <module>";
const std::string pruned_error =
"Ops that can be supported by "
"the flex runtime (enabled via setting the -emit-select-tf-ops flag): "
"ResizeNearestNeighbor,ResizeNearestNeighbor.Ops that need custom "
"implementation (enabled via setting the -emit-custom-ops flag): "
"CombinedNonMaxSuppression.";
EXPECT_EQ(SanitizeErrorMessage(error), pruned_error);
}
TEST(ConversionLogUtilTest, TestSanitizeErrorMessageNoMatching) {
const std::string error =
"error: failed while converting: 'main': Traceback (most recent call "
"last): File "
"/usr/local/bin/toco_from_protos, line 8, in <module>";
EXPECT_EQ(SanitizeErrorMessage(error), "");
}
}
} |
804 | cpp | tensorflow/tensorflow | resolve_svdf | tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf.cc | tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf_test.cc | #ifndef TENSORFLOW_LITE_TOCO_TENSORFLOW_GRAPH_MATCHING_RESOLVE_SVDF_H_
#define TENSORFLOW_LITE_TOCO_TENSORFLOW_GRAPH_MATCHING_RESOLVE_SVDF_H_
#include <string>
#include <vector>
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster_utils.h"
#include "tensorflow/lite/toco/tooling_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace toco {
class SvdfCluster : public Cluster {
public:
void CreateNodes() override;
void AddConstNodePattern(const std::string& const_pattern) {
const_node_patterns_.push_back(const_pattern);
}
~SvdfCluster() override {}
private:
void CreateConstNode(const std::string& const_pattern);
void MaybeMergeConstNodes(
const std::vector<const tensorflow::NodeDef*>& const_node_parts,
bool transpose_tensor_value,
const std::unique_ptr<tensorflow::NodeDef>& merged_node);
int InferFilterRank();
std::vector<std::string> const_node_patterns_;
};
class SvdfClusterFactory : public ClusterFactoryInterface {
public:
std::unique_ptr<Cluster> CreateCluster(
const tensorflow::NodeDef& node,
const tensorflow::GraphDef& graph_def) const override;
};
}
#endif
#include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf.h"
#include <ctype.h>
#include <stddef.h>
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/map.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster_utils.h"
#include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/toco/tooling_util.h"
using tensorflow::GraphDef;
using tensorflow::NodeDef;
namespace toco {
namespace {
void FilterPartitionedConstNodes(
const std::string& const_pattern,
const std::vector<const NodeDef*>& cluster_nodes,
std::vector<const NodeDef*>* const_node_parts) {
for (const NodeDef* node : cluster_nodes) {
std::string node_name_to_upper = node->name();
std::transform(node_name_to_upper.begin(), node_name_to_upper.end(),
node_name_to_upper.begin(), ::toupper);
if (StrContains(node->name(), const_pattern) && node->op() == "Const") {
if (StrContains(node_name_to_upper, "/PART_")) {
const_node_parts->push_back(node);
} else if (StrContains(node->name(), "AXIS") &&
StrContains(node->name(), "CONCAT")) {
const auto& value_attr = node->attr().at("value");
const tensorflow::TensorProto& tensor = value_attr.tensor();
CHECK_EQ(tensor.int_val(0), 0);
}
}
}
std::sort(const_node_parts->begin(), const_node_parts->end(),
[](const NodeDef* a, const NodeDef* b) {
return (a->name().compare(b->name()) < 0 &&
(a->name().size() < b->name().size()));
});
}
}
int SvdfCluster::InferFilterRank() {
for (const NodeDef* node : nodes_) {
if (StrContains(node->name(), "Reshape/shape")) {
const auto& value_attr = node->attr().at("value");
const tensorflow::TensorProto& tensor = value_attr.tensor();
std::vector<int32> shape_values(
tensor.tensor_content().size() / sizeof(int), 0);
port::CopyToBuffer(tensor.tensor_content(),
reinterpret_cast<char*>(shape_values.data()));
CHECK_EQ(shape_values.size(), 3);
CHECK_EQ(shape_values[2], -1);
return shape_values[1];
}
}
return -1;
}
void SvdfCluster::CreateNodes() {
for (const std::string& const_pattern : const_node_patterns_) {
CreateConstNode(const_pattern);
}
std::unique_ptr<tensorflow::NodeDef> svdf_node(new NodeDef);
svdf_node->set_op("Svdf");
svdf_node->set_name(name_);
svdf_node->set_device(device_);
svdf_node->add_input(inputs_[0]);
CHECK(new_nodes_.size() == 3 || new_nodes_.size() == 2);
std::string* weights_feature_input = svdf_node->add_input();
std::string* weights_time_input = svdf_node->add_input();
std::string* bias_input;
if (new_nodes_.size() == 3) {
bias_input = svdf_node->add_input();
}
for (const std::unique_ptr<tensorflow::NodeDef>& node : new_nodes_) {
const std::string node_name = node->name();
if (StrContains(node_name, "SVDF_weights_feature")) {
*weights_feature_input = node_name;
} else if (StrContains(node_name, "SVDF_weights_time")) {
*weights_time_input = node_name;
} else if (StrContains(node_name, "SVDF_bias")) {
CHECK(bias_input) << "Bias input cannot be provided when there are only "
"two Const input nodes!";
*bias_input = node_name;
} else {
LOG(FATAL) << "Unexpected input node for SVDF op! Accepted inputs are: "
"weights_feature, weights_time and bias.";
}
}
const int rank = InferFilterRank();
CHECK_GT(rank, 0);
std::string activation_function =
StrContains(outputs_[0], "Relu") ? "Relu" : "None";
(*svdf_node->mutable_attr())["ActivationFunction"].set_s(activation_function);
(*svdf_node->mutable_attr())["Rank"].set_i(rank);
new_nodes_.push_back(std::move(svdf_node));
}
void SvdfCluster::CreateConstNode(const std::string& const_pattern) {
std::vector<const NodeDef*> const_node_parts;
FilterPartitionedConstNodes(const_pattern, nodes_, &const_node_parts);
if (const_node_parts.empty()) return;
bool transpose_tensor_value =
StrContains(const_pattern, "SVDF_weights_feature");
std::unique_ptr<tensorflow::NodeDef> merged_node(new NodeDef);
MaybeMergeConstNodes(const_node_parts, transpose_tensor_value, merged_node);
new_nodes_.push_back(std::move(merged_node));
}
void SvdfCluster::MaybeMergeConstNodes(
const std::vector<const NodeDef*>& const_node_parts,
bool transpose_tensor_value,
const std::unique_ptr<tensorflow::NodeDef>& merged_node) {
merged_node->set_name(const_node_parts[0]->name());
merged_node->set_op("Const");
merged_node->set_device(const_node_parts[0]->device());
(*merged_node->mutable_attr())["dtype"].set_type(
const_node_parts[0]->attr().at("dtype").type());
int dim0_size = 0;
int dim1_size = 1;
tensorflow::TensorProto* allocated_tensor =
(*merged_node->mutable_attr())["value"].mutable_tensor();
tensorflow::TensorShapeProto* allocated_tensor_shape =
allocated_tensor->mutable_tensor_shape();
auto tensor_shape_dim0 = allocated_tensor_shape->add_dim();
int allocated_content_flat_size = 0;
for (size_t i = 0; i < const_node_parts.size(); i++) {
const auto& value_attr = const_node_parts[i]->attr().at("value");
const tensorflow::TensorProto& tensor = value_attr.tensor();
if (i == 0) {
allocated_tensor->set_dtype(tensor.dtype());
} else {
CHECK_EQ(allocated_tensor->dtype(), tensor.dtype());
}
allocated_content_flat_size += tensor.tensor_content().size();
CHECK(tensor.has_tensor_shape());
const tensorflow::TensorShapeProto shape = tensor.tensor_shape();
dim0_size += shape.dim(0).size();
for (int d = 1; d < shape.dim_size(); d++) {
if (i == 0) {
allocated_tensor_shape->add_dim()->set_size(shape.dim(d).size());
allocated_tensor_shape->set_unknown_rank(shape.unknown_rank());
dim1_size *= shape.dim(d).size();
} else {
CHECK_EQ(shape.dim(d).size(), allocated_tensor_shape->dim(d).size());
CHECK_EQ(allocated_tensor_shape->unknown_rank(), shape.unknown_rank());
}
}
}
std::unique_ptr<char[]> allocated_content(
new char[allocated_content_flat_size]);
char* content_ptr = allocated_content.get();
for (size_t i = 0; i < const_node_parts.size(); i++) {
const auto& value_attr = const_node_parts[i]->attr().at("value");
const tensorflow::TensorProto& tensor = value_attr.tensor();
port::CopyToBuffer(tensor.tensor_content(), content_ptr);
content_ptr += tensor.tensor_content().size();
}
if (transpose_tensor_value) {
std::unique_ptr<float[]> transposed_tensor(
new float[dim0_size * dim1_size]);
Transpose2DTensor(reinterpret_cast<float*>(allocated_content.get()),
dim0_size, dim1_size, transposed_tensor.get());
allocated_tensor_shape->clear_dim();
allocated_tensor_shape->add_dim()->set_size(dim1_size);
allocated_tensor_shape->add_dim()->set_size(dim0_size);
allocated_tensor->set_tensor_content(
std::string(reinterpret_cast<const char*>(transposed_tensor.get()),
allocated_content_flat_size));
} else {
tensor_shape_dim0->set_size(dim0_size);
allocated_tensor->set_tensor_content(
std::string(reinterpret_cast<const char*>(allocated_content.get()),
allocated_content_flat_size));
}
}
std::unique_ptr<Cluster> SvdfClusterFactory::CreateCluster(
const NodeDef& node, const GraphDef& graph_def) const {
std::vector<std::string> node_patterns = {"SVDF_weights_feature",
"SVDF_weights_time", "SVDF_bias"};
std::string node_name_to_upper = node.name();
std::transform(node_name_to_upper.begin(), node_name_to_upper.end(),
node_name_to_upper.begin(), ::toupper);
std::unique_ptr<SvdfCluster> cluster = nullptr;
if (node_name_to_upper.find("SVDF", 0) != std::string::npos) {
size_t weights_pos = node.name().find(node_patterns[0]);
if (weights_pos != std::string::npos) {
size_t cell_pos = node.name().rfind('/', weights_pos - 2) + 1;
std::string cell_name =
node.name().substr(cell_pos, weights_pos - cell_pos - 1);
cluster = std::make_unique<SvdfCluster>();
cluster->SetName(cell_name);
cluster->SetDevice(node.device());
cluster->SetGraphDefInfo(&graph_def);
CHECK(cluster->FindClusterInputsAndOutputs());
for (const std::string& const_pattern : node_patterns) {
cluster->AddConstNodePattern(const_pattern);
}
}
}
return std::move(cluster);
}
} | #include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf.h"
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster_utils.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_cluster.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/logging.h"
using tensorflow::GraphDef;
using tensorflow::NodeDef;
namespace toco {
class ResolveSvdfTest : public ::testing::Test {
public:
ResolveSvdfTest() {
AddNewNode("Input1", "Const", {});
AddNewNode("Svdf1/SVDF_weights_feature/part_0", "Const", {},
{0.1, 0.2, 0.3});
AddNewNode("Svdf1/SVDF_weights_feature/part_0/read", "Identity",
{"Svdf1/SVDF_weights_feature/part_0"});
AddNewNode("Svdf1/SVDF_weights_time/part_0", "Const", {}, {0.1, 0.2, 0.3});
AddNewNode("Svdf1/SVDF_weights_time/part_0/read", "Identity",
{"Svdf1/SVDF_weights_time/part_0"});
AddNewNode("Svdf1/f1", "SVDF_F1",
{"Input1", "Svdf1/SVDF_weights_feature/part_0/read"});
AddNewNode("Svdf1/f2", "SVDF_F2",
{"Svdf1/SVDF_weights_time/part_0/read", "Svdf1/f1"});
AddNewNode("Svdf1/Relu", "Relu", {"Svdf1/f2"});
AddShapeNode("Svdf1/Reshape/shape", {10, 1, -1});
AddNewNode("Output1", "Const", {"Svdf1/Relu"});
AddNewNode("Input2", "Const", {});
AddNewNode("Svdf2/SVDF_weights_feature/part_0", "Const", {},
{0.1, 0.2, 0.3});
AddNewNode("Svdf2/SVDF_weights_feature/part_0/read", "Identity",
{"Svdf2/SVDF_weights_feature/part_0"});
AddNewNode("Svdf2/SVDF_weights_time/part_0", "Const", {}, {0.1, 0.2, 0.3});
AddNewNode("Svdf2/SVDF_weights_time/part_0/read", "Identity",
{"Svdf2/SVDF_weights_time/part_0"});
AddNewNode("Svdf2/f1", "SVDF_F1",
{"Input1", "Svdf2/SVDF_weights_feature/part_0/read"});
AddNewNode("Svdf2/f2", "SVDF_F2",
{"Svdf2/SVDF_weights_time/part_0/read", "Svdf2/f1"});
AddNewNode("Svdf2/Relu", "Relu", {"Svdf2/f2"});
AddShapeNode("Svdf2/Reshape/shape", {10, 2, -1});
AddNewNode("Output2", "Const", {"Svdf2/Relu"});
}
~ResolveSvdfTest() override {}
protected:
void AddNewNode(const std::string& name, const std::string& op,
const std::vector<std::string>& inputs) {
NodeDef* node = graph_.add_node();
node->set_name(name);
node->set_op(op);
node->set_device("");
for (int i = 0; i < inputs.size(); i++) {
node->add_input();
node->set_input(i, inputs[i]);
}
}
void AddNewNode(const std::string& name, const std::string& op,
const std::vector<std::string>& inputs,
const std::vector<float>& values) {
NodeDef* node = graph_.add_node();
node->set_name(name);
node->set_op(op);
node->set_device("");
for (int i = 0; i < inputs.size(); i++) {
node->add_input();
node->set_input(i, inputs[i]);
}
(*node->mutable_attr())["dtype"].set_type(tensorflow::DT_FLOAT);
tensorflow::TensorProto* allocated_tensor = new tensorflow::TensorProto;
tensorflow::TensorShapeProto* allocated_tensor_shape =
new tensorflow::TensorShapeProto;
auto tensor_shape_dim0 = allocated_tensor_shape->add_dim();
tensor_shape_dim0->set_size(values.size());
allocated_tensor->set_allocated_tensor_shape(allocated_tensor_shape);
allocated_tensor->set_tensor_content(
std::string(reinterpret_cast<const char*>(values.data()),
values.size() * sizeof(float)));
(*node->mutable_attr())["value"].set_allocated_tensor(allocated_tensor);
}
void AddShapeNode(const std::string& name, const std::vector<int>& values) {
NodeDef* node = graph_.add_node();
node->set_name(name);
node->set_op("Const");
node->set_device("");
(*node->mutable_attr())["dtype"].set_type(tensorflow::DT_INT32);
tensorflow::TensorProto* allocated_tensor = new tensorflow::TensorProto;
tensorflow::TensorShapeProto* allocated_tensor_shape =
new tensorflow::TensorShapeProto;
auto tensor_shape_dim0 = allocated_tensor_shape->add_dim();
tensor_shape_dim0->set_size(values.size());
allocated_tensor->set_allocated_tensor_shape(allocated_tensor_shape);
allocated_tensor->set_tensor_content(
std::string(reinterpret_cast<const char*>(values.data()),
values.size() * sizeof(int)));
(*node->mutable_attr())["value"].set_allocated_tensor(allocated_tensor);
}
GraphDef graph_;
SvdfClusterFactory svdf_cluster_factory_;
std::vector<std::unique_ptr<Cluster>> clusters_;
};
TEST_F(ResolveSvdfTest, TestTranspose2DTensor) {
static float matrix[] = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.};
static float expected_transposed_matrix[] = {1., 5., 9., 2., 6., 10.,
3., 7., 11., 4., 8., 12.};
float* transposed_matrix = new float[12];
Transpose2DTensor(matrix, 3, 4, transposed_matrix);
std::vector<float> actual;
actual.insert(
actual.end(), transposed_matrix,
transposed_matrix + sizeof(expected_transposed_matrix) / sizeof(float));
std::vector<float> expected;
expected.insert(expected.end(), expected_transposed_matrix,
expected_transposed_matrix +
sizeof(expected_transposed_matrix) / sizeof(float));
delete[] transposed_matrix;
}
TEST_F(ResolveSvdfTest, TestResolveSvdfFlow) {
std::unordered_map<std::string, bool> is_node_in_cluster;
for (const NodeDef& node : graph_.node()) {
is_node_in_cluster[node.name()] = false;
}
std::vector<std::string> cluster_names;
CHECK(FindCluster(svdf_cluster_factory_, graph_, &is_node_in_cluster,
&clusters_));
for (const std::unique_ptr<Cluster>& cluster : clusters_) {
cluster_names.push_back(cluster->GetName());
cluster->CreateNodes();
}
EXPECT_THAT(cluster_names,
testing::UnorderedElementsAreArray({"Svdf1", "Svdf2"}));
std::vector<std::string> new_node_names;
std::vector<float> content_array(3);
for (const std::unique_ptr<Cluster>& cluster : clusters_) {
CHECK_EQ(cluster->GetNewNodes().size(), 3);
for (const std::unique_ptr<tensorflow::NodeDef>& node :
cluster->GetNewNodes()) {
new_node_names.push_back(node->name());
if (node->op() == "Const") {
CHECK_EQ(node->attr().at("dtype").type(), tensorflow::DT_FLOAT);
toco::port::CopyToBuffer(
node->attr().at("value").tensor().tensor_content(),
reinterpret_cast<char*>(content_array.data()));
EXPECT_THAT(content_array,
testing::UnorderedElementsAreArray({0.1, 0.2, 0.3}));
} else {
if (node->name() == "Svdf1") {
CHECK_EQ(node->attr().at("Rank").i(), 1);
} else if (node->name() == "Svdf2") {
CHECK_EQ(node->attr().at("Rank").i(), 2);
}
CHECK_EQ(node->attr().at("ActivationFunction").s(), "Relu");
}
}
}
EXPECT_THAT(new_node_names, testing::UnorderedElementsAreArray(
{"Svdf2/SVDF_weights_feature/part_0",
"Svdf2/SVDF_weights_time/part_0", "Svdf2",
"Svdf1/SVDF_weights_feature/part_0",
"Svdf1/SVDF_weights_time/part_0", "Svdf1"}));
}
} |
805 | cpp | tensorflow/tensorflow | types | third_party/xla/xla/python/ifrt_proxy/common/types.cc | third_party/xla/xla/python/ifrt_proxy/common/types_test.cc | #ifndef TENSORFLOW_TSL_LIB_MONITORING_TYPES_H_
#define TENSORFLOW_TSL_LIB_MONITORING_TYPES_H_
#include <cmath>
#include <vector>
#include "tsl/platform/types.h"
namespace tsl {
namespace monitoring {
enum class UnitOfMeasure {
kNumber,
kTime,
kBytes,
};
struct PercentilePoint {
double percentile = 0.0;
double value = 0.0;
};
struct Percentiles {
UnitOfMeasure unit_of_measure = UnitOfMeasure::kNumber;
uint64 start_nstime = 0;
uint64 end_nstime = 0;
double min_value = NAN;
double max_value = NAN;
double mean = NAN;
double stddev = NAN;
size_t num_samples = 0;
size_t total_samples = 0;
long double accumulator = NAN;
std::vector<PercentilePoint> points;
};
}
}
#endif
#include "xla/python/ifrt_proxy/common/types.h"
#include <cstdint>
#include <string>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
namespace xla {
namespace ifrt {
namespace proxy {
absl::StatusOr<xla::PjRtValueType> FromVariantProto(
const proto::Variant& variant_proto) {
switch (variant_proto.value_case()) {
case proto::Variant::kStringValue:
return variant_proto.string_value();
case proto::Variant::kInt64Value:
return variant_proto.int64_value();
case proto::Variant::kInt64List: {
const auto& values = variant_proto.int64_list().values();
return std::vector<int64_t>(values.begin(), values.end());
}
case proto::Variant::kFloatValue:
return variant_proto.float_value();
default:
return absl::UnimplementedError(absl::StrCat(
"Unknown xla.ifrt.proto.Variant case: ", variant_proto.value_case()));
}
}
absl::StatusOr<proto::Variant> ToVariantProto(const xla::PjRtValueType& value) {
proto::Variant variant;
if (auto* s = std::get_if<std::string>(&value)) {
variant.set_string_value(*s);
} else if (auto* i = std::get_if<int64_t>(&value)) {
variant.set_int64_value(*i);
} else if (auto* is = std::get_if<std::vector<int64_t>>(&value)) {
for (const int64_t i : *is) {
variant.mutable_int64_list()->add_values(i);
}
} else if (auto* f = std::get_if<float>(&value)) {
variant.set_float_value(*f);
} else {
return absl::UnimplementedError("Unknown xla::PjRtValueType type");
}
return variant;
}
proto::ArrayCopySemantics ToArrayCopySemanticsProto(ArrayCopySemantics s) {
switch (s) {
case ArrayCopySemantics::kAlwaysCopy:
return proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY;
case ArrayCopySemantics::kDonateInput:
return proto::ARRAY_COPY_SEMANTICS_DONATE_INPUT;
case ArrayCopySemantics::kReuseInput:
return proto::ARRAY_COPY_SEMANTICS_REUSE_INPUT;
}
}
absl::StatusOr<ArrayCopySemantics> FromArrayCopySemanticsProto(
proto::ArrayCopySemantics s) {
MakeArrayFromHostBufferRequest req;
switch (s) {
case proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY:
return ArrayCopySemantics::kAlwaysCopy;
case proto::ARRAY_COPY_SEMANTICS_DONATE_INPUT:
return ArrayCopySemantics::kDonateInput;
case proto::ARRAY_COPY_SEMANTICS_REUSE_INPUT:
return ArrayCopySemantics::kReuseInput;
default:
return absl::InvalidArgumentError(
absl::StrCat("Unhandled proto-enum value ", s, ":",
proto::ArrayCopySemantics_Name(s)));
}
}
std::vector<int64_t> FromByteStridesProto(const proto::ByteStrides& strides) {
std::vector<int64_t> result;
result.reserve(strides.strides_size());
for (auto x : strides.strides()) {
result.push_back(x);
}
return result;
}
proto::ByteStrides ToByteStridesProto(const absl::Span<const int64_t> strides) {
proto::ByteStrides result;
for (auto x : strides) {
result.add_strides(x);
}
return result;
}
}
}
} | #include "xla/python/ifrt_proxy/common/types.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/pjrt/pjrt_common.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::tsl::testing::IsOkAndHolds;
class VariantTest : public testing::TestWithParam<xla::PjRtValueType> {};
TEST_P(VariantTest, ToFromVariantProto) {
const auto& variant = GetParam();
TF_ASSERT_OK_AND_ASSIGN(proto::Variant variant_proto,
ToVariantProto(variant));
EXPECT_THAT(FromVariantProto(variant_proto), IsOkAndHolds(variant));
}
INSTANTIATE_TEST_SUITE_P(
Variant, VariantTest,
testing::Values(xla::PjRtValueType(std::string("foo")),
xla::PjRtValueType(static_cast<int64_t>(1234)),
xla::PjRtValueType(std::vector<int64_t>{1, 2}),
xla::PjRtValueType(3.14f)));
class ByteStridesTest : public testing::TestWithParam<std::vector<int64_t>> {};
TEST_P(ByteStridesTest, ToFromProto) {
std::vector<int64_t> strides = GetParam();
EXPECT_EQ(FromByteStridesProto(ToByteStridesProto(strides)), strides);
}
INSTANTIATE_TEST_SUITE_P(
ByteStrides, ByteStridesTest,
testing::ValuesIn(std::vector<std::vector<int64_t>>{
{}, {1}, {0}, {4, 8}, {8, 4}, {1, 2, 3, 4}, {0, 4}, {4, 0}}));
TEST(ArrayCopySemanticsTest, FromToFromProto) {
for (int i = 0; i < proto::ArrayCopySemantics_descriptor()->value_count();
++i) {
const auto proto_enum = static_cast<proto::ArrayCopySemantics>(
proto::ArrayCopySemantics_descriptor()->value(i)->number());
if (proto_enum == proto::ARRAY_COPY_SEMANTICS_UNSPECIFIED) {
continue;
}
TF_ASSERT_OK_AND_ASSIGN(const auto cpp_enum,
FromArrayCopySemanticsProto(proto_enum));
TF_ASSERT_OK_AND_ASSIGN(
const auto cpp_enum_copy,
FromArrayCopySemanticsProto(ToArrayCopySemanticsProto(cpp_enum)));
EXPECT_EQ(cpp_enum_copy, cpp_enum);
}
}
}
}
}
} |
806 | cpp | tensorflow/tensorflow | export | tensorflow/lite/toco/tflite/export.cc | tensorflow/lite/toco/tflite/export_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TFRT_UTILS_EXPORT_H_
#define TENSORFLOW_COMPILER_MLIR_TFRT_UTILS_EXPORT_H_
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/core/framework/function.pb.h"
namespace tensorflow {
absl::Status ExportFunctionDefs(
mlir::ModuleOp module,
absl::AnyInvocable<absl::Status(tensorflow::FunctionDef)> callback,
bool export_tf_original_func_name = true);
}
#endif
#include "tensorflow/compiler/mlir/tfrt/utils/export.h"
#include <memory>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/tf_dialect_to_executor.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
absl::Status ExportFunctionDefs(
mlir::ModuleOp module,
absl::AnyInvocable<absl::Status(tensorflow::FunctionDef)> callback,
bool export_tf_original_func_name) {
tsl::profiler::TraceMe traceme([&]() {
return tsl::profiler::TraceMeEncode(
"ExportFunctionDefs",
{{"module_name", absl::string_view(module.getName().value_or("?"))}});
});
TF_RETURN_IF_ERROR(
tensorflow::tf2xla::v1::ExportFromTensorflowDialectToExecutor(module));
{
mlir::StatusScopedDiagnosticHandler diag_handler(module.getContext());
mlir::PassManager pm(module.getContext());
pm.addPass(mlir::CreateBreakUpIslandsPass());
if (mlir::failed(pm.run(module))) {
return diag_handler.ConsumeStatus();
}
}
tensorflow::GraphExportConfig configs;
configs.export_original_tf_func_name = export_tf_original_func_name;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
tensorflow::FunctionDef function_def;
TF_RETURN_IF_ERROR(
tensorflow::tf2xla::v2::ConvertMlirFunctionToFunctionLibraryDef(
func, configs, &function_def));
TF_RETURN_IF_ERROR(callback(std::move(function_def)));
}
return absl::OkStatus();
}
} | #include "tensorflow/lite/toco/tflite/export.h"
#include <algorithm>
#include <initializer_list>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tflite/builtin_operator.h"
#include "tensorflow/lite/toco/tflite/operator.h"
#include "tensorflow/lite/toco/tflite/types.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace toco {
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
class ExportTest : public ::testing::Test {
protected:
void ResetOperators() { input_model_.operators.clear(); }
void AddTensorsByName(std::initializer_list<std::string> names) {
for (const std::string& name : names) {
input_model_.GetOrCreateArray(name);
}
}
void AddOperatorsByName(std::initializer_list<std::string> names) {
for (const std::string& name : names) {
if (name == "Conv") {
auto* op = new ConvOperator;
op->padding.type = PaddingType::kSame;
op->inputs = {"input", "filter"};
op->outputs = {"output"};
Array& input_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& filter_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kFloat;
filter_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
} else if (name == "Add") {
auto* op = new AddOperator;
op->inputs = {"input1", "input2"};
op->outputs = {"output"};
Array& input1_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& input2_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input1_array.data_type = ArrayDataType::kFloat;
input2_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
} else if (name == "Sub") {
auto* op = new SubOperator;
op->inputs = {"input1", "input2"};
op->outputs = {"output"};
Array& input1_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& input2_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input1_array.data_type = ArrayDataType::kFloat;
input2_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input1_array.copy_shape({1, 2, 2, 2});
input2_array.copy_shape({1, 2, 2, 2});
output_array.copy_shape({1, 2, 2, 2});
input_model_.operators.emplace_back(op);
} else if (name == "Assert") {
auto* op = new TensorFlowAssertOperator;
::tensorflow::NodeDef node_def;
node_def.set_name("Assert");
node_def.set_op("Assert");
node_def.SerializeToString(&op->tensorflow_node_def);
input_model_.operators.emplace_back(op);
} else {
auto* op = new TensorFlowUnsupportedOperator;
op->tensorflow_op = name;
input_model_.operators.emplace_back(op);
}
}
}
void BuildQuantizableTestModel() {
input_model_.GetOrCreateArray("inputs");
Array& weight_array = input_model_.GetOrCreateArray("weights");
int buf_size = 1296;
auto weight_buf = std::make_unique<float[]>(buf_size);
for (int i = 0; i < buf_size; i++) {
weight_buf[i] = static_cast<float>(i % 128);
}
weight_array.data_type = ArrayDataType::kFloat;
Shape* weight_array_shape = weight_array.mutable_shape();
std::vector<int>* weight_array_shape_dim =
weight_array_shape->mutable_dims();
weight_array_shape_dim->resize(4, 6);
auto& weight_array_buffer =
weight_array.GetMutableBuffer<ArrayDataType::kFloat>();
weight_array_buffer.data.resize(buf_size);
float* buf_ptr =
weight_array.GetMutableBuffer<ArrayDataType::kFloat>().data.data();
std::copy(weight_buf.get(), weight_buf.get() + buf_size, buf_ptr);
{
auto* op = new ConvOperator;
op->padding.type = PaddingType::kSame;
op->inputs = {"inputs", "weights"};
op->outputs = {"output"};
Array& input_array = input_model_.GetArray(op->inputs[0]);
Array& filter_array = input_model_.GetArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kFloat;
filter_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
}
{
auto* op = new AddOperator;
op->inputs = {"input1", "input2"};
op->outputs = {"output"};
Array& input1_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& input2_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input1_array.data_type = ArrayDataType::kFloat;
input2_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
}
}
tensorflow::Status ExportAndReturnStatus(const ExportParams& params) {
std::string result;
return Export(input_model_, &result, params);
}
std::vector<std::string> ExportAndSummarizeOperators(
const ExportParams& params) {
std::vector<std::string> names;
std::string result;
auto status = Export(input_model_, &result, params);
if (!status.ok()) {
LOG(INFO) << status.message();
return names;
}
auto* model = ::tflite::GetModel(result.data());
for (const ::tflite::OperatorCode* opcode : *model->operator_codes()) {
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code != ::tflite::BuiltinOperator_CUSTOM) {
names.push_back(std::string("builtin:") +
::tflite::EnumNameBuiltinOperator(builtin_code));
} else {
names.push_back(std::string("custom:") +
opcode->custom_code()->c_str());
}
}
return names;
}
std::vector<uint32_t> ExportAndGetOperatorIndices(
const ExportParams& params) {
std::vector<uint32_t> indices;
std::string result;
if (!Export(input_model_, &result, params).ok()) return indices;
auto* model = ::tflite::GetModel(result.data());
auto operators = (*model->subgraphs())[0]->operators();
for (const auto* op : *operators) {
indices.push_back(op->opcode_index());
}
return indices;
}
Model input_model_;
};
TEST_F(ExportTest, LoadTensorsMap) {
AddTensorsByName({"tensor_one", "tensor_two"});
details::TensorsMap tensors;
details::LoadTensorsMap(input_model_, &tensors);
EXPECT_EQ(0, tensors["tensor_one"]);
EXPECT_EQ(1, tensors["tensor_two"]);
}
TEST_F(ExportTest, LoadOperatorsMap) {
AddOperatorsByName({"Conv", "Add", "MyCrazyOp", "Sub"});
details::OperatorsMap operators;
const auto ops_by_type = BuildOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(
0, operators[details::OperatorKey(::tflite::BuiltinOperator_ADD, "", 1)]);
EXPECT_EQ(1, operators[details::OperatorKey(::tflite::BuiltinOperator_CONV_2D,
"", 1)]);
EXPECT_EQ(2, operators[details::OperatorKey(::tflite::BuiltinOperator_CUSTOM,
"MyCrazyOp", 1)]);
EXPECT_EQ(
3, operators[details::OperatorKey(::tflite::BuiltinOperator_SUB, "", 1)]);
}
TEST_F(ExportTest, UnsupportedFunctionality) {
AddOperatorsByName({"Conv"});
ExportParams params;
params.allow_dynamic_tensors = false;
auto status = ExportAndReturnStatus(params);
EXPECT_EQ(status.code(), ::tensorflow::error::UNIMPLEMENTED);
EXPECT_THAT(status.message(),
HasSubstr("Unsupported flag: allow_dynamic_tensors."));
}
TEST_F(ExportTest, Export) {
AddOperatorsByName({"Conv", "Add", "MyCrazyOp", "Sub"});
ExportParams params;
params.allow_custom_ops = true;
params.enable_select_tf_ops = false;
params.quantize_weights = QuantizedBufferType::NONE;
EXPECT_THAT(ExportAndSummarizeOperators(params),
ElementsAre("builtin:ADD", "builtin:CONV_2D", "custom:MyCrazyOp",
"builtin:SUB"));
EXPECT_THAT(ExportAndGetOperatorIndices(params), ElementsAre(1, 0, 2, 3));
}
TEST_F(ExportTest, ExportMinRuntime) {
AddOperatorsByName({"Conv", "Add", "Sub"});
ExportParams params;
params.allow_custom_ops = true;
params.enable_select_tf_ops = false;
params.quantize_weights = QuantizedBufferType::NONE;
std::string output;
auto status = Export(input_model_, &output, params);
auto* model = ::tflite::GetModel(output.data());
EXPECT_EQ(model->metadata()->size(), 1);
EXPECT_EQ(model->metadata()->Get(0)->name()->str(), "min_runtime_version");
auto buf = model->metadata()->Get(0)->buffer();
auto* buffer = (*model->buffers())[buf];
auto* array = buffer->data();
EXPECT_EQ(reinterpret_cast<const char*>(array->data()), std::string("1.6.0"));
}
TEST_F(ExportTest, ExportEmptyMinRuntime) {
AddOperatorsByName({"Switch", "MyCustomOp", "Assert"});
ExportParams params;
params.allow_custom_ops = true;
std::string output;
auto status = Export(input_model_, &output, params);
auto* model = ::tflite::GetModel(output.data());
EXPECT_EQ(model->metadata()->size(), 1);
EXPECT_EQ(model->metadata()->Get(0)->name()->str(), "min_runtime_version");
auto buf = model->metadata()->Get(0)->buffer();
auto* buffer = (*model->buffers())[buf];
auto* array = buffer->data();
EXPECT_EQ(reinterpret_cast<const char*>(array->data()), std::string(""));
}
TEST_F(ExportTest, UnsupportedControlFlowErrors) {
AddOperatorsByName({"Conv", "Add", "Switch", "Merge"});
ExportParams params;
params.allow_custom_ops = false;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(status.message(),
"We are continually in the process of adding support to TensorFlow "
"Lite for more ops. It would be helpful if you could inform us of "
"how this conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nTensorFlow Lite currently doesn't support control "
"flow ops: Merge, Switch. We are working on supporting control "
"flow ops, please see github issue at "
"https:
}
TEST_F(ExportTest, UnsupportedOpsAndNeedEnableFlex) {
AddOperatorsByName({"Conv", "Add", "BatchNormWithGlobalNormalization"});
ExportParams params;
params.allow_custom_ops = false;
params.enable_select_tf_ops = false;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(
status.message(),
"We are continually in the process of adding support to TensorFlow Lite "
"for more ops. It would be helpful if you could inform us of how this "
"conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nSome of the operators in the model are not supported by "
"the standard TensorFlow Lite runtime. If those are native TensorFlow "
"operators, you might be able to use the extended runtime by passing "
"--enable_select_tf_ops, or by setting "
"target_ops=TFLITE_BUILTINS,SELECT_TF_OPS when calling "
"tf.lite.TFLiteConverter(). Otherwise, if you have a custom "
"implementation for them you can disable this error with "
"--allow_custom_ops, or by setting allow_custom_ops=True when calling "
"tf.lite.TFLiteConverter(). Here is a list of builtin operators you are "
"using: ADD, CONV_2D. Here is a list of operators for which you will "
"need custom implementations: BatchNormWithGlobalNormalization.");
}
TEST_F(ExportTest, UnsupportedOpsNeedCustomImplementation) {
AddOperatorsByName({"Conv", "Add", "MyCustomOp1", "MyCustomOp2"});
ExportParams params;
params.allow_custom_ops = false;
params.enable_select_tf_ops = true;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(
status.message(),
"We are continually in the process of adding support to TensorFlow Lite "
"for more ops. It would be helpful if you could inform us of how this "
"conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nSome of the operators in the model are not supported by "
"the standard TensorFlow Lite runtime and are not recognized by "
"TensorFlow. If you have a custom implementation for them you can "
"disable this error with --allow_custom_ops, or by setting "
"allow_custom_ops=True when calling tf.lite.TFLiteConverter(). Here is a "
"list of builtin operators you are using: ADD, CONV_2D. Here is a list "
"of operators for which you will need custom implementations: "
"MyCustomOp1, MyCustomOp2.");
}
TEST_F(ExportTest, UnsupportedControlFlowAndCustomOpsErrors) {
AddOperatorsByName(
{"Conv", "Add", "Switch", "Merge", "MyCustomOp1", "MyCustomOp2"});
ExportParams params;
params.allow_custom_ops = false;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(
status.message(),
"We are continually in the process of adding support to TensorFlow Lite "
"for more ops. It would be helpful if you could inform us of how this "
"conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nTensorFlow Lite currently doesn't support control flow "
"ops: Merge, Switch. We are working on supporting control flow ops, "
"please see github issue at "
"https:
"operators in the model are not supported by the standard TensorFlow "
"Lite runtime. If those are native TensorFlow operators, you might be "
"able to use the extended runtime by passing --enable_select_tf_ops, or "
"by setting target_ops=TFLITE_BUILTINS,SELECT_TF_OPS when calling "
"tf.lite.TFLiteConverter(). Otherwise, if you have a custom "
"implementation for them you can disable this error with "
"--allow_custom_ops, or by setting allow_custom_ops=True when calling "
"tf.lite.TFLiteConverter(). Here is a list of builtin operators you are "
"using: ADD, CONV_2D. Here is a list of operators for which you will "
"need custom implementations: MyCustomOp1, MyCustomOp2.");
}
TEST_F(ExportTest, QuantizeWeights) {
BuildQuantizableTestModel();
std::string unquantized_result;
Export(input_model_, true, false, &unquantized_result);
BuildQuantizableTestModel();
std::string quantized_result;
Export(input_model_, true, true, &quantized_result);
EXPECT_LT(quantized_result.size(), unquantized_result.size());
}
class OpSetsTest : public ExportTest {
public:
enum OpSet { kTfLiteBuiltins, kSelectTfOps, kCustomOps };
void SetAllowedOpSets(std::initializer_list<OpSet> sets) {
import_all_ops_as_unsupported_ = true;
params_.allow_custom_ops = false;
params_.enable_select_tf_ops = false;
params_.quantize_weights = QuantizedBufferType::NONE;
for (const OpSet& i : sets) {
switch (i) {
case kTfLiteBuiltins:
import_all_ops_as_unsupported_ = false;
break;
case kSelectTfOps:
params_.enable_select_tf_ops = true;
break;
case kCustomOps:
params_.allow_custom_ops = true;
break;
}
}
}
std::vector<std::string> ImportExport(
std::initializer_list<std::string> op_names) {
ResetOperators();
if (!import_all_ops_as_unsupported_) {
AddOperatorsByName(op_names);
} else {
for (const std::string& name : op_names) {
auto* op = new TensorFlowUnsupportedOperator;
op->tensorflow_op = name;
input_model_.operators.emplace_back(op);
}
}
return ExportAndSummarizeOperators(params_);
}
private:
bool import_all_ops_as_unsupported_;
ExportParams params_;
};
TEST_F(OpSetsTest, BuiltinsOnly) {
SetAllowedOpSets({kTfLiteBuiltins});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "UnrollAndFold", "Assert"}),
ElementsAre());
EXPECT_THAT(ImportExport({"Add"}), ElementsAre("builtin:ADD"));
SetAllowedOpSets({kTfLiteBuiltins, kCustomOps});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "UnrollAndFold", "Assert"}),
ElementsAre("builtin:ADD", "custom:AdjustHue", "custom:Assert",
"custom:UnrollAndFold"));
}
TEST_F(OpSetsTest, TfSelectOnly) {
SetAllowedOpSets({kSelectTfOps});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "RandomUniform",
"UnrollAndFold", "Assert"}),
ElementsAre());
EXPECT_THAT(ImportExport({"Add"}), ElementsAre("custom:FlexAdd"));
SetAllowedOpSets({kSelectTfOps, kCustomOps});
EXPECT_THAT(
ImportExport(
{"Add", "AdjustHue", "RandomUniform", "UnrollAndFold", "Assert"}),
ElementsAre("custom:FlexAdd", "custom:FlexAdjustHue", "custom:FlexAssert",
"custom:FlexRandomUniform", "custom:UnrollAndFold"));
}
TEST_F(OpSetsTest, BuiltinsAndTfSelect) {
SetAllowedOpSets({kTfLiteBuiltins, kSelectTfOps});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "UnrollAndFold", "Assert"}),
ElementsAre());
EXPECT_THAT(ImportExport({"Add", "RandomUniform"}),
ElementsAre("builtin:ADD", "custom:FlexRandomUniform"));
SetAllowedOpSets({kTfLiteBuiltins, kSelectTfOps, kCustomOps});
EXPECT_THAT(
ImportExport(
{"Add", "AdjustHue", "RandomUniform", "UnrollAndFold", "Assert"}),
ElementsAre("builtin:ADD", "custom:FlexAdjustHue", "custom:FlexAssert",
"custom:FlexRandomUniform", "custom:UnrollAndFold"));
}
class FakeConvolutionOperator
: public BuiltinOperator<ConvOperator, ::tflite::Conv2DOptions,
::tflite::BuiltinOptions_Conv2DOptions> {
public:
FakeConvolutionOperator()
: BuiltinOperator(::tflite::BuiltinOperator_CONV_2D,
OperatorType::kConv) {}
int GetVersion(const OperatorSignature& op_signature) const override {
const TocoOperator& conv_op =
static_cast<const TocoOperator&>(*op_signature.op);
if (conv_op.dilation_width_factor != 1 ||
conv_op.dilation_height_factor != 1) {
return 2;
}
return 1;
}
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateConv2DOptions(*builder, padding, op.stride_width,
op.stride_height, activation_function,
op.dilation_width_factor,
op.dilation_height_factor);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->dilation_width_factor = options.dilation_w_factor();
op->dilation_height_factor = options.dilation_h_factor();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class VersionedOpExportTest : public ::testing::Test {
protected:
void SetUp() override {
input_model_.GetOrCreateArray("input");
input_model_.GetOrCreateArray("filter");
input_model_.GetOrCreateArray("output");
}
void AddConvOp(bool use_dilation) {
{
auto* op = new ConvOperator;
op->inputs.push_back("input");
op->inputs.push_back("filter");
op->outputs.push_back("output");
op->padding.type = PaddingType::kSame;
op->stride_width = 1;
op->stride_height = 1;
if (use_dilation) {
op->dilation_width_factor = 2;
op->dilation_height_factor = 2;
} else {
op->dilation_width_factor = 1;
op->dilation_height_factor = 1;
}
input_model_.operators.emplace_back(op);
}
}
std::map<OperatorType, std::unique_ptr<BaseOperator>>
BuildFakeOperatorByTypeMap() {
std::map<OperatorType, std::unique_ptr<BaseOperator>> result;
result[OperatorType::kConv] =
std::unique_ptr<BaseOperator>(new FakeConvolutionOperator);
return result;
}
Model input_model_;
};
TEST_F(VersionedOpExportTest, LoadOperatorsMapWithOpV1) {
AddConvOp(false);
details::OperatorsMap operators;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(1, operators.size());
EXPECT_EQ(0, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 1)));
}
TEST_F(VersionedOpExportTest, LoadOperatorsMapWithOpV2) {
AddConvOp(true);
details::OperatorsMap operators;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(1, operators.size());
EXPECT_EQ(0, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 2)));
}
TEST_F(VersionedOpExportTest, LoadOperatorsMapWithBothVersions) {
AddConvOp(false);
AddConvOp(true);
details::OperatorsMap operators;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(2, operators.size());
EXPECT_EQ(0, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 1)));
EXPECT_EQ(1, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 2)));
}
TEST_F(VersionedOpExportTest, Export) {
AddConvOp(false);
AddConvOp(true);
std::string result;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
Export(input_model_, true, false, &result, ops_by_type);
auto* model = ::tflite::GetModel(result.data());
auto operator_codes = model->operator_codes();
EXPECT_EQ(2, operator_codes->size());
EXPECT_EQ(::tflite::BuiltinOperator_CONV_2D,
GetBuiltinCode((*operator_codes)[0]));
EXPECT_EQ(1, (*operator_codes)[0]->version());
EXPECT_EQ(::tflite::BuiltinOperator_CONV_2D,
GetBuiltinCode((*operator_codes)[1]));
EXPECT_EQ(2, (*operator_codes)[1]->version());
auto operators = (*model->subgraphs())[0]->operators();
EXPECT_EQ(2, operators->size());
EXPECT_EQ(0, (*operators)[0]->opcode_index());
EXPECT_EQ(1, (*operators)[1]->opcode_index());
}
TEST(OperatorKeyTest, TestBuiltinOp) {
Model model;
auto op = std::make_unique<ConvOperator>();
op->inputs = {"input", "filter"};
op->outputs = {"output"};
Array& input_array = model.GetOrCreateArray(op->inputs[0]);
Array& filter_array = model.GetOrCreateArray(op->inputs[1]);
Array& output_array = model.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kFloat;
filter_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CONV_2D);
EXPECT_EQ(key.custom_code(), "");
EXPECT_EQ(key.version(), 1);
}
TEST(OperatorKeyTest, TestBuiltinOpWithVersionedInputTypes) {
Model model;
auto op = std::make_unique<DequantizeOperator>();
op->inputs = {"input"};
op->outputs = {"output"};
Array& input_array = model.GetOrCreateArray(op->inputs[0]);
Array& output_array = model.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kInt8;
output_array.data_type = ArrayDataType::kFloat;
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_DEQUANTIZE);
EXPECT_EQ(key.custom_code(), "");
EXPECT_EQ(key.version(), 2);
}
TEST(OperatorKeyTest, TestCustomOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "MyCrazyCustomOp";
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "MyCrazyCustomOp");
EXPECT_EQ(key.version(), 1);
}
TEST(OperatorKeyTest, TestFlexOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "BatchMatMul";
const auto ops_by_type = BuildOperatorByTypeMap();
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "BatchMatMul");
EXPECT_EQ(key.version(), 1);
EXPECT_TRUE(key.is_custom_op());
EXPECT_FALSE(key.is_flex_op());
}
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "FlexBatchMatMul");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_custom_op());
EXPECT_TRUE(key.is_flex_op());
}
}
TEST(OperatorKeyTest, TestFlexWithControlFlowOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "Merge";
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "FlexMerge");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_custom_op());
EXPECT_TRUE(key.is_flex_op());
EXPECT_TRUE(key.is_unsupported_flex_op());
}
TEST(OperatorKeyTest, TestFlexWithUnsupportedOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "UnsupportedOp";
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "UnsupportedOp");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_flex_op());
EXPECT_FALSE(key.is_unsupported_flex_op());
}
TEST(OperatorKeyTest, TestFlexWithPartiallySupportedOps) {
Model model;
auto op = std::make_unique<TensorFlowAssertOperator>();
const auto ops_by_type = BuildOperatorByTypeMap();
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "Assert");
EXPECT_EQ(key.version(), 1);
EXPECT_TRUE(key.is_custom_op());
EXPECT_FALSE(key.is_flex_op());
}
::tensorflow::NodeDef node_def;
node_def.set_name("TensorFlowAssert");
node_def.set_op("TensorFlowAssert");
node_def.SerializeToString(&op->tensorflow_node_def);
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "FlexAssert");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_custom_op());
EXPECT_TRUE(key.is_flex_op());
}
}
}
}
} |
807 | cpp | tensorflow/tensorflow | import | tensorflow/lite/toco/tflite/import.cc | tensorflow/lite/toco/tflite/import_test.cc | #ifndef TENSORFLOW_LITE_TOCO_TFLITE_IMPORT_H_
#define TENSORFLOW_LITE_TOCO_TFLITE_IMPORT_H_
#include <string>
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
namespace toco {
namespace tflite {
std::unique_ptr<Model> Import(const ModelFlags &model_flags,
const std::string &input_file_contents);
namespace details {
using TensorsTable = std::vector<std::string>;
using OperatorsTable = std::vector<std::string>;
void LoadTensorsTable(const ::tflite::Model &input_model,
TensorsTable *tensors_table);
void LoadOperatorsTable(const ::tflite::Model &input_model,
OperatorsTable *operators_table);
}
}
}
#endif
#include "tensorflow/lite/toco/tflite/import.h"
#include <memory>
#include <string>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "flatbuffers/verifier.h"
#include "tensorflow/lite/core/tools/verifier.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/tflite/operator.h"
#include "tensorflow/lite/toco/tflite/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace tflite {
namespace details {
void LoadTensorsTable(const ::tflite::Model& input_model,
TensorsTable* tensors_table) {
auto tensors = (*input_model.subgraphs())[0]->tensors();
if (!tensors) return;
for (const auto* tensor : *tensors) {
tensors_table->push_back(tensor->name()->c_str());
}
}
void LoadOperatorsTable(const ::tflite::Model& input_model,
OperatorsTable* operators_table) {
auto opcodes = input_model.operator_codes();
if (!opcodes) return;
for (const auto* opcode : *opcodes) {
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code != ::tflite::BuiltinOperator_CUSTOM) {
operators_table->push_back(EnumNameBuiltinOperator(builtin_code));
} else {
operators_table->push_back(opcode->custom_code()->c_str());
}
}
}
}
void ImportTensors(const ::tflite::Model& input_model, Model* model) {
auto tensors = (*input_model.subgraphs())[0]->tensors();
auto* buffers = input_model.buffers();
if (!tensors) return;
for (const auto* input_tensor : *tensors) {
Array& array = model->GetOrCreateArray(input_tensor->name()->c_str());
array.data_type = DataType::Deserialize(input_tensor->type());
int buffer_index = input_tensor->buffer();
auto* buffer = buffers->Get(buffer_index);
DataBuffer::Deserialize(*input_tensor, *buffer, &array);
auto shape = input_tensor->shape();
if (shape) {
array.mutable_shape()->mutable_dims()->clear();
for (uint32_t i = 0; i < shape->Length(); ++i) {
auto d = shape->Get(i);
array.mutable_shape()->mutable_dims()->push_back(d);
}
}
auto quantization = input_tensor->quantization();
if (quantization) {
if (quantization->min() && quantization->max()) {
CHECK_EQ(1, quantization->min()->Length());
CHECK_EQ(1, quantization->max()->Length());
MinMax& minmax = array.GetOrCreateMinMax();
minmax.min = quantization->min()->Get(0);
minmax.max = quantization->max()->Get(0);
}
if (quantization->scale() && quantization->zero_point()) {
CHECK_EQ(1, quantization->scale()->Length());
CHECK_EQ(1, quantization->zero_point()->Length());
QuantizationParams& q = array.GetOrCreateQuantizationParams();
q.scale = quantization->scale()->Get(0);
q.zero_point = quantization->zero_point()->Get(0);
}
}
}
}
void ImportOperators(
const ::tflite::Model& input_model,
const std::map<std::string, std::unique_ptr<BaseOperator>>& ops_by_name,
const details::TensorsTable& tensors_table,
const details::OperatorsTable& operators_table, Model* model) {
auto ops = (*input_model.subgraphs())[0]->operators();
if (!ops) return;
for (const auto* input_op : *ops) {
uint32_t index = input_op->opcode_index();
if (index > operators_table.size()) {
LOG(FATAL) << "Index " << index << " must be between zero and "
<< operators_table.size();
}
std::string opname = operators_table.at(index);
std::unique_ptr<Operator> new_op = nullptr;
if (ops_by_name.count(opname) == 0) {
std::string effective_opname = "TENSORFLOW_UNSUPPORTED";
if (ops_by_name.count(effective_opname) == 0) {
LOG(FATAL) << "Internal logic error: TENSORFLOW_UNSUPPORTED not found.";
}
new_op = ops_by_name.at(effective_opname)
->Deserialize(input_op->builtin_options(),
input_op->custom_options());
if (new_op->type == OperatorType::kUnsupported) {
auto* unsupported_op =
static_cast<TensorFlowUnsupportedOperator*>(new_op.get());
unsupported_op->tensorflow_op = opname;
unsupported_op->quantized = true;
} else {
LOG(FATAL) << "Expected a TensorFlowUnsupportedOperator";
}
} else {
new_op = ops_by_name.at(opname)->Deserialize(input_op->builtin_options(),
input_op->custom_options());
}
model->operators.emplace_back(new_op.release());
auto* op = model->operators.back().get();
auto inputs = input_op->inputs();
for (uint32_t i = 0; i < inputs->Length(); i++) {
auto input_index = inputs->Get(i);
if (input_index != -1) {
const std::string& input_name = tensors_table.at(input_index);
op->inputs.push_back(input_name);
} else {
const std::string& tensor_name =
toco::AvailableArrayName(*model, "OptionalTensor");
model->CreateOptionalArray(tensor_name);
op->inputs.push_back(tensor_name);
}
}
auto outputs = input_op->outputs();
for (int i = 0, end = outputs->Length(); i < end; i++) {
auto output_index = outputs->Get(i);
const std::string& output_name = tensors_table.at(output_index);
op->outputs.push_back(output_name);
}
}
}
void ImportIOTensors(const ModelFlags& model_flags,
const ::tflite::Model& input_model,
const details::TensorsTable& tensors_table, Model* model) {
if (model_flags.input_arrays().empty()) {
auto inputs = (*input_model.subgraphs())[0]->inputs();
if (inputs) {
for (int input : *inputs) {
const std::string& input_name = tensors_table.at(input);
model->flags.add_input_arrays()->set_name(input_name);
}
}
}
if (model_flags.output_arrays().empty()) {
auto outputs = (*input_model.subgraphs())[0]->outputs();
if (outputs) {
for (int output : *outputs) {
const std::string& output_name = tensors_table.at(output);
model->flags.add_output_arrays(output_name);
}
}
}
}
namespace {
bool Verify(const void* buf, size_t len) {
::flatbuffers::Verifier verifier(static_cast<const uint8_t*>(buf), len);
return ::tflite::VerifyModelBuffer(verifier);
}
}
std::unique_ptr<Model> Import(const ModelFlags& model_flags,
const std::string& input_file_contents) {
::tflite::AlwaysTrueResolver r;
if (!::tflite::Verify(input_file_contents.data(), input_file_contents.size(),
r, ::tflite::DefaultErrorReporter())) {
LOG(FATAL) << "Invalid flatbuffer.";
}
const ::tflite::Model* input_model =
::tflite::GetModel(input_file_contents.data());
const auto ops_by_name = BuildOperatorByNameMap();
if (!input_model->subgraphs() || input_model->subgraphs()->size() != 1) {
LOG(FATAL) << "Number of subgraphs in tflite should be exactly 1.";
}
std::unique_ptr<Model> model;
model = std::make_unique<Model>();
details::TensorsTable tensors_table;
details::LoadTensorsTable(*input_model, &tensors_table);
details::OperatorsTable operators_table;
details::LoadOperatorsTable(*input_model, &operators_table);
ImportTensors(*input_model, model.get());
ImportOperators(*input_model, ops_by_name, tensors_table, operators_table,
model.get());
ImportIOTensors(model_flags, *input_model, tensors_table, model.get());
UndoWeightsShuffling(model.get());
return model;
}
}
} | #include "tensorflow/lite/toco/tflite/import.h"
#include <initializer_list>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_types.h"
#include "tensorflow/lite/version.h"
namespace toco {
namespace tflite {
namespace {
using ::testing::ElementsAre;
using flatbuffers::Offset;
using flatbuffers::Vector;
class ImportTest : public ::testing::Test {
protected:
template <typename T>
Offset<Vector<unsigned char>> CreateDataVector(const std::vector<T>& data) {
return builder_.CreateVector(reinterpret_cast<const uint8_t*>(data.data()),
sizeof(T) * data.size());
}
Offset<Vector<Offset<::tflite::Buffer>>> BuildBuffers() {
auto buf0 = ::tflite::CreateBuffer(builder_, CreateDataVector<float>({}));
auto buf1 = ::tflite::CreateBuffer(
builder_, CreateDataVector<float>({1.0f, 2.0f, 3.0f, 4.0f}));
auto buf2 =
::tflite::CreateBuffer(builder_, CreateDataVector<float>({3.0f, 4.0f}));
return builder_.CreateVector(
std::vector<Offset<::tflite::Buffer>>({buf0, buf1, buf2}));
}
Offset<Vector<Offset<::tflite::Tensor>>> BuildTensors() {
auto q = ::tflite::CreateQuantizationParameters(
builder_,
builder_.CreateVector<float>({0.1f}),
builder_.CreateVector<float>({0.2f}),
builder_.CreateVector<float>({0.3f}),
builder_.CreateVector<int64_t>({100LL}));
auto t1 =
::tflite::CreateTensor(builder_, builder_.CreateVector<int>({1, 2, 2}),
::tflite::TensorType_FLOAT32, 1,
builder_.CreateString("tensor_one"), q);
auto t2 =
::tflite::CreateTensor(builder_, builder_.CreateVector<int>({2, 1}),
::tflite::TensorType_FLOAT32, 0,
builder_.CreateString("tensor_two"), q);
return builder_.CreateVector(
std::vector<Offset<::tflite::Tensor>>({t1, t2}));
}
Offset<Vector<Offset<::tflite::OperatorCode>>> BuildOpCodes(
std::initializer_list<::tflite::BuiltinOperator> op_codes) {
std::vector<Offset<::tflite::OperatorCode>> op_codes_vector;
for (auto op : op_codes) {
op_codes_vector.push_back(::tflite::CreateOperatorCode(builder_, op, 0));
}
return builder_.CreateVector(op_codes_vector);
}
Offset<Vector<Offset<::tflite::OperatorCode>>> BuildOpCodes() {
return BuildOpCodes({::tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::BuiltinOperator_CONV_2D});
}
Offset<Vector<Offset<::tflite::Operator>>> BuildOperators(
std::initializer_list<int> inputs, std::initializer_list<int> outputs) {
auto is = builder_.CreateVector<int>(inputs);
if (inputs.size() == 0) is = 0;
auto os = builder_.CreateVector<int>(outputs);
if (outputs.size() == 0) os = 0;
auto op = ::tflite::CreateOperator(
builder_, 0, is, os, ::tflite::BuiltinOptions_Conv2DOptions,
::tflite::CreateConv2DOptions(builder_, ::tflite::Padding_VALID, 1, 1,
::tflite::ActivationFunctionType_NONE)
.Union(),
0, ::tflite::CustomOptionsFormat_FLEXBUFFERS);
return builder_.CreateVector(std::vector<Offset<::tflite::Operator>>({op}));
}
Offset<Vector<Offset<::tflite::Operator>>> BuildOperators() {
return BuildOperators({0}, {1});
}
Offset<Vector<Offset<::tflite::SubGraph>>> BuildSubGraphs(
Offset<Vector<Offset<::tflite::Tensor>>> tensors,
Offset<Vector<Offset<::tflite::Operator>>> operators,
int num_sub_graphs = 1) {
std::vector<int32_t> inputs = {0};
std::vector<int32_t> outputs = {1};
std::vector<Offset<::tflite::SubGraph>> v;
for (int i = 0; i < num_sub_graphs; ++i) {
v.push_back(::tflite::CreateSubGraph(
builder_, tensors, builder_.CreateVector(inputs),
builder_.CreateVector(outputs), operators,
builder_.CreateString("subgraph")));
}
return builder_.CreateVector(v);
}
void BuildTestModel() {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators);
auto s = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION,
opcodes, subgraphs, s, buffers));
input_model_ = ::tflite::GetModel(builder_.GetBufferPointer());
}
std::string InputModelAsString() {
return std::string(reinterpret_cast<char*>(builder_.GetBufferPointer()),
builder_.GetSize());
}
flatbuffers::FlatBufferBuilder builder_;
const ::tflite::Model* input_model_ = nullptr;
};
TEST_F(ImportTest, LoadTensorsTable) {
BuildTestModel();
details::TensorsTable tensors;
details::LoadTensorsTable(*input_model_, &tensors);
EXPECT_THAT(tensors, ElementsAre("tensor_one", "tensor_two"));
}
TEST_F(ImportTest, LoadOperatorsTable) {
BuildTestModel();
details::OperatorsTable operators;
details::LoadOperatorsTable(*input_model_, &operators);
EXPECT_THAT(operators, ElementsAre("MAX_POOL_2D", "CONV_2D"));
}
TEST_F(ImportTest, Tensors) {
BuildTestModel();
auto model = Import(ModelFlags(), InputModelAsString());
ASSERT_GT(model->HasArray("tensor_one"), 0);
Array& a1 = model->GetArray("tensor_one");
EXPECT_EQ(ArrayDataType::kFloat, a1.data_type);
EXPECT_THAT(a1.GetBuffer<ArrayDataType::kFloat>().data,
ElementsAre(1.0f, 2.0f, 3.0f, 4.0f));
ASSERT_TRUE(a1.has_shape());
EXPECT_THAT(a1.shape().dims(), ElementsAre(1, 2, 2));
const auto& mm = a1.minmax;
ASSERT_TRUE(mm.get());
EXPECT_FLOAT_EQ(0.1, mm->min);
EXPECT_FLOAT_EQ(0.2, mm->max);
const auto& q = a1.quantization_params;
ASSERT_TRUE(q.get());
EXPECT_FLOAT_EQ(0.3, q->scale);
EXPECT_EQ(100, q->zero_point);
}
TEST_F(ImportTest, NoBuffers) {
auto buffers = 0;
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Missing 'buffers' section.");
}
TEST_F(ImportTest, NoInputs) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators({}, {1});
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Missing 'inputs' for operator.");
}
TEST_F(ImportTest, NoOutputs) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators({0}, {});
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Missing 'outputs' for operator.");
}
TEST_F(ImportTest, InvalidOpCode) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes({static_cast<::tflite::BuiltinOperator>(-1),
::tflite::BuiltinOperator_CONV_2D});
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Operator id '-1' is out of range.");
}
TEST_F(ImportTest, MultipleSubGraphs) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators, 2);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
input_model_ = ::tflite::GetModel(builder_.GetBufferPointer());
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Number of subgraphs in tflite should be exactly 1.");
}
}
}
} |
808 | cpp | tensorflow/tensorflow | operator | tensorflow/lite/toco/tflite/operator.cc | tensorflow/lite/toco/tflite/operator_test.cc | #ifndef TENSORFLOW_LITE_CORE_C_OPERATOR_H_
#define TENSORFLOW_LITE_CORE_C_OPERATOR_H_
#include <stdint.h>
#include <stdlib.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TfLiteOperator TfLiteOperator;
TFL_CAPI_EXPORT extern TfLiteOperator* TfLiteOperatorCreate(
TfLiteBuiltinOperator builtin_code, const char* custom_name, int version);
TFL_CAPI_EXPORT extern TfLiteOperator* TfLiteOperatorCreateWithData(
TfLiteBuiltinOperator builtin_code, const char* custom_name, int version,
void* user_data);
TFL_CAPI_EXPORT extern void TfLiteOperatorDelete(TfLiteOperator* registration);
TFL_CAPI_EXPORT extern TfLiteBuiltinOperator TfLiteOperatorGetBuiltInCode(
const TfLiteOperator* registration);
TFL_CAPI_EXPORT extern const char* TfLiteOperatorGetCustomName(
const TfLiteOperator* registration);
TFL_CAPI_EXPORT extern int TfLiteOperatorGetVersion(
const TfLiteOperator* registration);
TFL_CAPI_EXPORT extern void* TfLiteOperatorGetUserData(
const TfLiteOperator* registration);
TFL_CAPI_EXPORT extern void TfLiteOperatorSetInit(
TfLiteOperator* registration,
void* (*init)(TfLiteOpaqueContext* context, const char* buffer,
size_t length));
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOperatorSetInitWithData(
TfLiteOperator* registration,
void* (*init)(void* user_data, TfLiteOpaqueContext* context,
const char* buffer, size_t length));
TFL_CAPI_EXPORT extern void TfLiteOperatorSetFree(
TfLiteOperator* registration,
void (*free)(TfLiteOpaqueContext* context, void* data));
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOperatorSetFreeWithData(
TfLiteOperator* registration,
void (*free)(void* user_data, TfLiteOpaqueContext* context, void* data));
TFL_CAPI_EXPORT extern void TfLiteOperatorSetPrepare(
TfLiteOperator* registration,
TfLiteStatus (*prepare)(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node));
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOperatorSetPrepareWithData(
TfLiteOperator* registration,
TfLiteStatus (*prepare)(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node));
TFL_CAPI_EXPORT extern void TfLiteOperatorSetInvoke(
TfLiteOperator* registration,
TfLiteStatus (*invoke)(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node));
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOperatorSetInvokeWithData(
TfLiteOperator* registration,
TfLiteStatus (*invoke)(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node));
TFL_CAPI_EXPORT extern void TfLiteOperatorSetAsyncKernel(
TfLiteOperator* registration,
struct TfLiteAsyncKernel* (*async_kernel)(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node));
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOperatorSetAsyncKernelWithData(
TfLiteOperator* registration,
struct TfLiteAsyncKernel* (*async_kernel)(void* user_data,
TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node));
TFL_CAPI_EXPORT extern void TfLiteOperatorSetInplaceOperator(
TfLiteOperator* registration, uint64_t inplace_operator);
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/core/c/operator.h"
#include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/c/common_internal.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/c/c_api_types.h"
TfLiteOperator* TfLiteOperatorCreate(TfLiteBuiltinOperator builtin_code,
const char* custom_name, int version) {
return TfLiteOperatorCreateWithData(builtin_code, custom_name, version,
nullptr);
}
TfLiteOperator* TfLiteOperatorCreateWithData(TfLiteBuiltinOperator builtin_code,
const char* custom_name,
int version, void* user_data) {
return new TfLiteOperator{.custom_name = custom_name,
.version = version,
.init = nullptr,
.free = nullptr,
.prepare = nullptr,
.invoke = nullptr,
.async_kernel = nullptr,
.builtin_code = builtin_code,
.node_index = -1,
.inplace_operator = kTfLiteInplaceOpNone,
.user_data = user_data};
}
void TfLiteOperatorDelete(TfLiteOperator* reg) { delete reg; }
void TfLiteOperatorSetInit(TfLiteOperator* registration,
void* (*init)(TfLiteOpaqueContext* context,
const char* buffer, size_t length)) {
registration->init = init;
}
TfLiteStatus TfLiteOperatorSetInitWithData(
TfLiteOperator* registration,
void* (*init)(void* user_data, TfLiteOpaqueContext* context,
const char* buffer, size_t length)) {
registration->init_with_data = init;
return kTfLiteOk;
}
void TfLiteOperatorSetFree(TfLiteOperator* registration,
void (*free)(TfLiteOpaqueContext* context,
void* data)) {
registration->free = free;
}
TfLiteStatus TfLiteOperatorSetFreeWithData(
TfLiteOperator* registration,
void (*free)(void* user_data, TfLiteOpaqueContext* context, void* data)) {
registration->free_with_data = free;
return kTfLiteOk;
}
void TfLiteOperatorSetPrepare(
TfLiteOperator* registration,
TfLiteStatus (*prepare)(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node)) {
registration->prepare = prepare;
}
TfLiteStatus TfLiteOperatorSetPrepareWithData(
TfLiteOperator* registration,
TfLiteStatus (*prepare)(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node)) {
registration->prepare_with_data = prepare;
return kTfLiteOk;
}
void TfLiteOperatorSetInvoke(
TfLiteOperator* registration,
TfLiteStatus (*invoke)(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node)) {
registration->invoke = invoke;
}
TfLiteStatus TfLiteOperatorSetInvokeWithData(
TfLiteOperator* registration,
TfLiteStatus (*invoke)(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node)) {
registration->invoke_with_data = invoke;
return kTfLiteOk;
}
void TfLiteOperatorSetAsyncKernel(
TfLiteOperator* registration,
TfLiteAsyncKernel* (*async_kernel)(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node)) {
registration->async_kernel = async_kernel;
}
TfLiteStatus TfLiteOperatorSetAsyncKernelWithData(
TfLiteOperator* registration,
TfLiteAsyncKernel* (*async_kernel)(void* user_data,
TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node)) {
registration->async_kernel_with_data = async_kernel;
return kTfLiteOk;
}
void TfLiteOperatorSetInplaceOperator(TfLiteOperator* registration,
uint64_t inplace_operator) {
registration->inplace_operator = inplace_operator;
}
TfLiteBuiltinOperator TfLiteOperatorGetBuiltInCode(
const TfLiteOperator* registration) {
return static_cast<TfLiteBuiltinOperator>(registration->builtin_code);
}
const char* TfLiteOperatorGetCustomName(const TfLiteOperator* registration) {
return registration->custom_name;
}
int TfLiteOperatorGetVersion(const TfLiteOperator* registration) {
if (!registration) {
return -1;
}
return registration->version;
}
void* TfLiteOperatorGetUserData(const TfLiteOperator* registration) {
if (!registration) {
return nullptr;
}
return registration->user_data;
} | #include "tensorflow/lite/toco/tflite/operator.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace tflite {
namespace {
class OperatorTest : public ::testing::Test {
protected:
const BaseOperator& GetOperator(const std::string& name, OperatorType type) {
using OpsByName = std::map<std::string, std::unique_ptr<BaseOperator>>;
using OpsByType = std::map<OperatorType, std::unique_ptr<BaseOperator>>;
static auto* by_name = new OpsByName(BuildOperatorByNameMap());
static auto* by_type = new OpsByType(BuildOperatorByTypeMap());
CHECK(by_name->count(name)) << "No operator for '" << name << "'.";
BaseOperator* op1 = by_name->at(name).get();
CHECK(op1->type() == type) << "while verifying '" << name << "'.";
CHECK(by_type->count(type))
<< "No operator for '" << OperatorTypeName(type) << "'.";
BaseOperator* op2 = by_type->at(type).get();
CHECK(op2->name() == name)
<< "while verifying '" << OperatorTypeName(type) << "'.";
return *op1;
}
template <typename T>
std::unique_ptr<T> SerializeAndDeserialize(const BaseOperator& op,
const T& toco_op,
Options* options = nullptr) {
flatbuffers::FlatBufferBuilder builder;
Options input_options = op.Serialize(toco_op, &builder);
if (options) {
*options = input_options;
}
builder.Finish(CreateOperator(builder, 0, 0, 0, input_options.type,
input_options.builtin, input_options.custom,
::tflite::CustomOptionsFormat_FLEXBUFFERS));
auto* output_options =
flatbuffers::GetRoot<::tflite::Operator>(builder.GetBufferPointer());
auto new_toco_op = op.Deserialize(output_options->builtin_options(),
output_options->custom_options());
CHECK(new_toco_op->type == toco_op.type)
<< "The type of the serialized and deserialized"
<< HelpfulOperatorTypeName(*new_toco_op)
<< " does not match the type of the original "
<< HelpfulOperatorTypeName(toco_op);
return std::unique_ptr<T>(dynamic_cast<T*>(new_toco_op.release()));
}
template <typename T>
void CheckSimpleOperator(const std::string& name, OperatorType type) {
Options options;
auto output_toco_op =
SerializeAndDeserialize(GetOperator(name, type), T(), &options);
ASSERT_EQ(0, options.builtin.o);
ASSERT_EQ(0, options.custom.o);
ASSERT_EQ(::tflite::BuiltinOptions_NONE, options.type);
ASSERT_NE(nullptr, output_toco_op.get());
}
template <typename T>
void CheckReducerOperator(const std::string& name, OperatorType type) {
T op;
op.keep_dims = false;
auto output_toco_op = SerializeAndDeserialize(GetOperator(name, type), op);
EXPECT_EQ(op.keep_dims, output_toco_op->keep_dims);
}
};
TEST_F(OperatorTest, SimpleOperators) {
CheckSimpleOperator<FloorOperator>("FLOOR", OperatorType::kFloor);
CheckSimpleOperator<CeilOperator>("CEIL", OperatorType::kCeil);
CheckSimpleOperator<EluOperator>("ELU", OperatorType::kElu);
CheckSimpleOperator<RoundOperator>("ROUND", OperatorType::kRound);
CheckSimpleOperator<ReluOperator>("RELU", OperatorType::kRelu);
CheckSimpleOperator<Relu1Operator>("RELU_N1_TO_1", OperatorType::kRelu1);
CheckSimpleOperator<Relu6Operator>("RELU6", OperatorType::kRelu6);
CheckSimpleOperator<LogisticOperator>("LOGISTIC", OperatorType::kLogistic);
CheckSimpleOperator<TanhOperator>("TANH", OperatorType::kTanh);
CheckSimpleOperator<ExpOperator>("EXP", OperatorType::kExp);
CheckSimpleOperator<CosOperator>("COS", OperatorType::kCos);
CheckSimpleOperator<LogSoftmaxOperator>("LOG_SOFTMAX",
OperatorType::kLogSoftmax);
CheckSimpleOperator<TensorFlowMaximumOperator>(
"MAXIMUM", OperatorType::kMaximum);
CheckSimpleOperator<TensorFlowMinimumOperator>(
"MINIMUM", OperatorType::kMinimum);
CheckSimpleOperator<TensorFlowLessOperator>("LESS", OperatorType::kLess);
CheckSimpleOperator<NegOperator>("NEG", OperatorType::kNeg);
CheckSimpleOperator<SelectOperator>("SELECT", OperatorType::kSelect);
CheckSimpleOperator<SliceOperator>("SLICE", OperatorType::kSlice);
CheckSimpleOperator<SinOperator>("SIN", OperatorType::kSin);
CheckSimpleOperator<TensorFlowEqualOperator>("EQUAL", OperatorType::kEqual);
CheckSimpleOperator<TensorFlowNotEqualOperator>("NOT_EQUAL",
OperatorType::kNotEqual);
CheckSimpleOperator<LogOperator>("LOG", OperatorType::kLog);
CheckSimpleOperator<TensorFlowSqrtOperator>("SQRT", OperatorType::kSqrt);
CheckSimpleOperator<TensorFlowRsqrtOperator>("RSQRT", OperatorType::kRsqrt);
CheckSimpleOperator<PowOperator>("POW", OperatorType::kPow);
CheckSimpleOperator<LogicalOrOperator>("LOGICAL_OR",
OperatorType::kLogicalOr);
CheckSimpleOperator<LogicalAndOperator>("LOGICAL_AND",
OperatorType::kLogicalAnd);
CheckSimpleOperator<LogicalNotOperator>("LOGICAL_NOT",
OperatorType::kLogicalNot);
CheckSimpleOperator<FloorDivOperator>("FLOOR_DIV", OperatorType::kFloorDiv);
CheckSimpleOperator<TensorFlowSquareOperator>("SQUARE",
OperatorType::kSquare);
CheckSimpleOperator<TensorFlowZerosLikeOperator>("ZEROS_LIKE",
OperatorType::kZerosLike);
CheckSimpleOperator<FloorModOperator>("FLOOR_MOD", OperatorType::kFloorMod);
CheckSimpleOperator<RangeOperator>("RANGE", OperatorType::kRange);
CheckSimpleOperator<FillOperator>("FILL", OperatorType::kFill);
CheckSimpleOperator<ReverseV2Operator>("REVERSE_V2",
OperatorType::kReverseV2);
CheckSimpleOperator<TensorFlowRankOperator>("RANK", OperatorType::kRank);
}
TEST_F(OperatorTest, BuiltinAdd) {
AddOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("ADD", OperatorType::kAdd), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinAddN) {
AddNOperator op;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("ADD_N", OperatorType::kAddN), op);
ASSERT_NE(output_toco_op.get(), nullptr);
}
TEST_F(OperatorTest, BuiltinReducerOps) {
CheckReducerOperator<MeanOperator>("MEAN", OperatorType::kMean);
CheckReducerOperator<TensorFlowSumOperator>("SUM", OperatorType::kSum);
CheckReducerOperator<TensorFlowProdOperator>("REDUCE_PROD",
OperatorType::kReduceProd);
CheckReducerOperator<TensorFlowMaxOperator>("REDUCE_MAX",
OperatorType::kReduceMax);
CheckReducerOperator<TensorFlowMinOperator>("REDUCE_MIN",
OperatorType::kReduceMin);
CheckReducerOperator<TensorFlowAnyOperator>("REDUCE_ANY", OperatorType::kAny);
}
TEST_F(OperatorTest, BuiltinCast) {
CastOperator op;
op.src_data_type = ArrayDataType::kFloat;
op.dst_data_type = ArrayDataType::kUint8;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("CAST", OperatorType::kCast), op);
EXPECT_EQ(op.src_data_type, output_toco_op->src_data_type);
EXPECT_EQ(op.dst_data_type, output_toco_op->dst_data_type);
}
TEST_F(OperatorTest, CustomConcatenation) {
ConcatenationOperator op;
op.axis = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("CONCATENATION", OperatorType::kConcatenation), op);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, CustomDepthToSpace) {
DepthToSpaceOperator op;
op.block_size = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("DEPTH_TO_SPACE", OperatorType::kDepthToSpace), op);
EXPECT_EQ(op.block_size, output_toco_op->block_size);
}
TEST_F(OperatorTest, CustomFakeQuant) {
FakeQuantOperator op;
auto* minmax = new MinMax;
minmax->min = -10;
minmax->max = 200;
op.minmax.reset(minmax);
op.num_bits = 16;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("FAKE_QUANT", OperatorType::kFakeQuant), op);
EXPECT_EQ(op.minmax->min, output_toco_op->minmax->min);
EXPECT_EQ(op.minmax->max, output_toco_op->minmax->max);
EXPECT_EQ(op.num_bits, output_toco_op->num_bits);
}
TEST_F(OperatorTest, CustomFullyConnected) {
FullyConnectedOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("FULLY_CONNECTED", OperatorType::kFullyConnected), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinGather) {
GatherOperator op;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("GATHER", OperatorType::kGather), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinGatherNd) {
GatherNdOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("GATHER_ND", OperatorType::kGatherNd), op);
ASSERT_NE(output_toco_op.get(), nullptr);
}
TEST_F(OperatorTest, BuiltinWhere) {
WhereOperator op;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("WHERE", OperatorType::kWhere), op);
ASSERT_NE(output_toco_op.get(), nullptr);
}
TEST_F(OperatorTest, BuiltinL2Pool) {
L2PoolOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.kwidth = 480;
op.kheight = 1080;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("L2_POOL_2D", OperatorType::kL2Pool), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.kwidth, output_toco_op->kwidth);
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
TEST_F(OperatorTest, BuiltinLocalResponseNormalization) {
LocalResponseNormalizationOperator op;
op.range = 123;
op.bias = 1.23;
op.alpha = 12.3;
op.beta = .123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("LOCAL_RESPONSE_NORMALIZATION",
OperatorType::kLocalResponseNormalization),
op);
EXPECT_EQ(op.range, output_toco_op->range);
EXPECT_EQ(op.bias, output_toco_op->bias);
EXPECT_EQ(op.alpha, output_toco_op->alpha);
EXPECT_EQ(op.beta, output_toco_op->beta);
}
TEST_F(OperatorTest, BuiltinMaxPool) {
MaxPoolOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.kwidth = 480;
op.kheight = 1080;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("MAX_POOL_2D", OperatorType::kMaxPool), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.kwidth, output_toco_op->kwidth);
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
TEST_F(OperatorTest, BuiltinReshape) {
TensorFlowReshapeOperator op;
op.shape = {1, 2, 4, 5, 8};
auto output_toco_op = SerializeAndDeserialize(
GetOperator("RESHAPE", OperatorType::kReshape), op);
EXPECT_EQ(op.shape, output_toco_op->shape);
}
TEST_F(OperatorTest, CustomSoftmax) {
SoftmaxOperator op;
op.beta = 123.1;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SOFTMAX", OperatorType::kSoftmax), op);
EXPECT_EQ(op.beta, output_toco_op->beta);
}
TEST_F(OperatorTest, BuiltinSpaceToDepth) {
SpaceToDepthOperator op;
op.block_size = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SPACE_TO_DEPTH", OperatorType::kSpaceToDepth), op);
EXPECT_EQ(op.block_size, output_toco_op->block_size);
}
TEST_F(OperatorTest, CustomSplit) {
TensorFlowSplitOperator op;
op.num_split = 123;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("SPLIT", OperatorType::kSplit), op);
EXPECT_EQ(op.num_split, output_toco_op->num_split);
}
TEST_F(OperatorTest, CustomSplitV) {
TensorFlowSplitVOperator op;
op.num_split = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SPLIT_V", OperatorType::kSplitV), op);
EXPECT_EQ(op.num_split, output_toco_op->num_split);
}
TEST_F(OperatorTest, BuiltinAveragePool) {
AveragePoolOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.kwidth = 480;
op.kheight = 1080;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("AVERAGE_POOL_2D", OperatorType::kAveragePool), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.kwidth, output_toco_op->kwidth);
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
TEST_F(OperatorTest, BuiltinConvolution) {
ConvOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("CONV_2D", OperatorType::kConv), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinDepthwiseConvolution) {
DepthwiseConvOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.depth_multiplier = 6;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("DEPTHWISE_CONV_2D", OperatorType::kDepthwiseConv), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.depth_multiplier, output_toco_op->depth_multiplier);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinL2Norm) {
L2NormalizationOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("L2_NORMALIZATION", OperatorType::kL2Normalization), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinMul) {
MulOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("MUL", OperatorType::kMul), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, ResizeBilinear) {
ResizeBilinearOperator op;
op.align_corners = true;
op.half_pixel_centers = false;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("RESIZE_BILINEAR", OperatorType::kResizeBilinear), op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, ResizeBilinear_HalfPixelCenters) {
ResizeBilinearOperator op;
op.align_corners = true;
op.half_pixel_centers = true;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("RESIZE_BILINEAR", OperatorType::kResizeBilinear), op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, ResizeNearestNeighbor) {
ResizeNearestNeighborOperator op;
op.align_corners = true;
op.half_pixel_centers = false;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("RESIZE_NEAREST_NEIGHBOR",
OperatorType::kResizeNearestNeighbor),
op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, ResizeNearestNeighbor_HalfPixelCenters) {
ResizeNearestNeighborOperator op;
op.align_corners = true;
op.half_pixel_centers = true;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("RESIZE_NEAREST_NEIGHBOR",
OperatorType::kResizeNearestNeighbor),
op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, Svdf) {
SvdfOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu;
op.rank = 1;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("SVDF", OperatorType::kSvdf), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
EXPECT_EQ(op.rank, output_toco_op->rank);
}
TEST_F(OperatorTest, Squeeze) {
SqueezeOperator op;
op.squeeze_dims = {-2, -3, 4, 1, 4};
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SQUEEZE", OperatorType::kSqueeze), op);
EXPECT_EQ(op.squeeze_dims, output_toco_op->squeeze_dims);
}
TEST_F(OperatorTest, StridedSlice) {
StridedSliceOperator op;
op.begin_mask = 1;
op.end_mask = 2;
op.ellipsis_mask = 1;
op.new_axis_mask = 1;
op.shrink_axis_mask = 2;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("STRIDED_SLICE", OperatorType::kStridedSlice), op);
EXPECT_EQ(op.start_indices, output_toco_op->start_indices);
EXPECT_EQ(op.stop_indices, output_toco_op->stop_indices);
EXPECT_EQ(op.strides, output_toco_op->strides);
EXPECT_EQ(op.begin_mask, output_toco_op->begin_mask);
EXPECT_EQ(op.end_mask, output_toco_op->end_mask);
EXPECT_EQ(op.end_mask, output_toco_op->end_mask);
EXPECT_EQ(op.ellipsis_mask, output_toco_op->ellipsis_mask);
EXPECT_EQ(op.new_axis_mask, output_toco_op->new_axis_mask);
EXPECT_EQ(op.shrink_axis_mask, output_toco_op->shrink_axis_mask);
}
TEST_F(OperatorTest, BuiltinTopKV2) {
TopKV2Operator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TOPK_V2", OperatorType::kTopK_V2), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinArgMax) {
ArgMaxOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("ARG_MAX", OperatorType::kArgMax), op);
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
TEST_F(OperatorTest, BuiltinArgMin) {
ArgMinOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("ARG_MIN", OperatorType::kArgMin), op);
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
TEST_F(OperatorTest, BuiltinDequantize) {
DequantizeOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("DEQUANTIZE", OperatorType::kDequantize), op);
}
TEST_F(OperatorTest, BuiltinTransposeConv) {
TransposeConvOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TRANSPOSE_CONV", OperatorType::kTransposeConv), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
}
TEST_F(OperatorTest, BuiltinShape) {
TensorFlowShapeOperator op;
op.output_data_type = ArrayDataType::kInt64;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("SHAPE", OperatorType::kShape), op);
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
TEST_F(OperatorTest, BuiltinSparseToDense) {
SparseToDenseOperator op;
op.validate_indices = false;
std::unique_ptr<toco::SparseToDenseOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("SPARSE_TO_DENSE", OperatorType::kSparseToDense), op);
EXPECT_EQ(op.validate_indices, output_toco_op->validate_indices);
}
TEST_F(OperatorTest, VersioningSpareToDense) {
SparseToDenseOperator op;
op.inputs = {"indices", "output_shape", "input_values", "default_value"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model int32_model;
Array& int32_array = int32_model.GetOrCreateArray(op.inputs[2]);
int32_array.data_type = ArrayDataType::kInt32;
OperatorSignature int32_signature = {.op = &op, .model = &int32_model};
EXPECT_EQ(base_op->GetVersion(int32_signature), 1);
Model int64_model;
Array& int64_array = int64_model.GetOrCreateArray(op.inputs[2]);
int64_array.data_type = ArrayDataType::kInt64;
OperatorSignature int64_signature = {.op = &op, .model = &int64_model};
EXPECT_EQ(base_op->GetVersion(int64_signature), 2);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[2]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 3);
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[2]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 3);
}
TEST_F(OperatorTest, BuiltinPack) {
PackOperator op;
op.values_count = 3;
op.axis = 1;
std::unique_ptr<toco::PackOperator> output_toco_op =
SerializeAndDeserialize(GetOperator("PACK", OperatorType::kPack), op);
EXPECT_EQ(op.values_count, output_toco_op->values_count);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, BuiltinOneHot) {
OneHotOperator op;
op.axis = 2;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("ONE_HOT", OperatorType::kOneHot), op);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, BuiltinUnpack) {
UnpackOperator op;
op.num = 5;
op.axis = 2;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("UNPACK", OperatorType::kUnpack), op);
EXPECT_EQ(op.num, output_toco_op->num);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, BuiltinLeakyRelu) {
LeakyReluOperator op;
op.alpha = 3;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("LEAKY_RELU", OperatorType::kLeakyRelu), op);
EXPECT_EQ(op.alpha, output_toco_op->alpha);
}
TEST_F(OperatorTest, BuiltinSquaredDifference) {
SquaredDifferenceOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SQUARED_DIFFERENCE", OperatorType::kSquaredDifference), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinScatterNd) {
ScatterNdOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SCATTER_ND", OperatorType::kScatterNd), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, CustomCTCBeamSearchDecoder) {
CTCBeamSearchDecoderOperator op;
op.beam_width = 3;
op.top_paths = 2;
op.merge_repeated = false;
std::unique_ptr<toco::CTCBeamSearchDecoderOperator> output_toco_op =
SerializeAndDeserialize(GetOperator("CTC_BEAM_SEARCH_DECODER",
OperatorType::kCTCBeamSearchDecoder),
op);
EXPECT_EQ(op.beam_width, output_toco_op->beam_width);
EXPECT_EQ(op.top_paths, output_toco_op->top_paths);
EXPECT_EQ(op.merge_repeated, output_toco_op->merge_repeated);
}
TEST_F(OperatorTest, TensorFlowUnsupported) {
TensorFlowUnsupportedOperator op;
op.tensorflow_op = "MyCustomUnsupportedOp";
::tensorflow::NodeDef node_def;
auto attr = node_def.mutable_attr();
(*attr)["float_attr"].set_f(2.0);
(*attr)["str_attr"].set_s("Hello World");
(*attr)["int_attr"].set_i(17);
(*attr)["bool_attr"].set_b(true);
{
auto* list = (*attr)["list_string_attr"].mutable_list();
list->add_s("abcde");
list->add_s("1234");
list->add_s("");
list->add_s("zyxwv");
list->add_s("!-.");
}
{
auto* list = (*attr)["list_float_attr"].mutable_list();
list->add_f(std::numeric_limits<float>::min());
list->add_f(2.0);
list->add_f(-std::numeric_limits<float>::max());
}
{
auto* list = (*attr)["list_int_attr"].mutable_list();
list->add_i(1);
list->add_i(20);
list->add_i(1LL << 40);
list->add_i(-(1LL << 40));
}
node_def.SerializeToString(&op.tensorflow_node_def);
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TENSORFLOW_UNSUPPORTED", OperatorType::kUnsupported), op);
::tensorflow::NodeDef output_node_def;
output_node_def.ParseFromString(output_toco_op->tensorflow_node_def);
const auto& output_attr = output_node_def.attr();
EXPECT_EQ(2.0, output_attr.at("float_attr").f());
EXPECT_EQ("Hello World", output_attr.at("str_attr").s());
EXPECT_EQ(17, output_attr.at("int_attr").i());
EXPECT_EQ(true, output_attr.at("bool_attr").b());
{
const auto& list = output_attr.at("list_string_attr").list();
ASSERT_EQ(5, list.s_size());
EXPECT_EQ("abcde", list.s(0));
EXPECT_EQ("1234", list.s(1));
EXPECT_EQ("", list.s(2));
EXPECT_EQ("zyxwv", list.s(3));
EXPECT_EQ("!-.", list.s(4));
}
{
const auto& list = output_attr.at("list_float_attr").list();
ASSERT_EQ(3, list.f_size());
EXPECT_EQ(std::numeric_limits<float>::min(), list.f(0));
EXPECT_EQ(2.0, list.f(1));
EXPECT_EQ(-std::numeric_limits<float>::max(), list.f(2));
}
{
const auto& list = output_attr.at("list_int_attr").list();
ASSERT_EQ(4, list.i_size());
EXPECT_EQ(1, list.i(0));
EXPECT_EQ(20, list.i(1));
EXPECT_EQ(1LL << 40, list.i(2));
EXPECT_EQ(-(1LL << 40), list.i(3));
}
}
TEST_F(OperatorTest, TensorFlowUnsupportedWithoutAttr) {
TensorFlowUnsupportedOperator op;
op.tensorflow_op = "MyCustomUnsupportedOp";
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TENSORFLOW_UNSUPPORTED", OperatorType::kUnsupported), op);
::tensorflow::NodeDef output_node_def;
output_node_def.ParseFromString(output_toco_op->tensorflow_node_def);
EXPECT_TRUE(output_node_def.attr().empty());
}
TEST_F(OperatorTest, TestShouldExportAsFlexOp) {
EXPECT_FALSE(ShouldExportAsFlexOp(false, "Conv2D"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "Conv2D"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "EluGrad"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "RFFT"));
EXPECT_FALSE(ShouldExportAsFlexOp(true, "MyAwesomeCustomOp"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "RandomShuffle"));
}
TEST_F(OperatorTest, BuiltinMirrorPad) {
MirrorPadOperator op;
op.mode = MirrorPadMode::kReflect;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("MIRROR_PAD", OperatorType::kMirrorPad), op);
EXPECT_EQ(op.mode, output_toco_op->mode);
}
TEST_F(OperatorTest, BuiltinUnique) {
UniqueOperator op;
op.idx_out_type = ArrayDataType::kInt64;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("UNIQUE", OperatorType::kUnique), op);
ASSERT_NE(nullptr, output_toco_op.get());
EXPECT_EQ(output_toco_op->idx_out_type, op.idx_out_type);
}
TEST_F(OperatorTest, BuiltinSegmentSum) {
SegmentSumOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SEGMENT_SUM", OperatorType::kSegmentSum), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinReverseSequence) {
ReverseSequenceOperator op;
op.seq_dim = 3;
op.batch_dim = 1;
std::unique_ptr<toco::ReverseSequenceOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("REVERSE_SEQUENCE", OperatorType::kReverseSequence), op);
EXPECT_EQ(op.seq_dim, output_toco_op->seq_dim);
EXPECT_EQ(op.batch_dim, output_toco_op->batch_dim);
}
TEST_F(OperatorTest, BuiltinMatrixDiag) {
MatrixDiagOperator op;
std::unique_ptr<toco::MatrixDiagOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("MATRIX_DIAG", OperatorType::kMatrixDiag), op);
}
TEST_F(OperatorTest, BuiltinMatrixSetDiag) {
MatrixSetDiagOperator op;
std::unique_ptr<toco::MatrixSetDiagOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("MATRIX_SET_DIAG", OperatorType::kMatrixSetDiag), op);
}
template <typename Op>
void SimpleVersioningTest() {
Op op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
}
template <typename Op>
void SimpleOutputVersioningTest() {
Op op;
op.outputs = {"output1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.outputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.outputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
}
TEST_F(OperatorTest, VersioningEqualTest) {
SimpleVersioningTest<TensorFlowEqualOperator>();
}
TEST_F(OperatorTest, VersioningNotEqualTest) {
SimpleVersioningTest<TensorFlowNotEqualOperator>();
}
TEST_F(OperatorTest, VersioningLessTest) {
SimpleVersioningTest<TensorFlowLessOperator>();
}
TEST_F(OperatorTest, VersioningLessEqualTest) {
SimpleVersioningTest<TensorFlowLessEqualOperator>();
}
TEST_F(OperatorTe |
809 | cpp | tensorflow/tensorflow | quantization_util | tensorflow/lite/toco/graph_transformations/quantization_util.cc | tensorflow/lite/kernels/internal/quantization_util_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_QUANTIZATION_UTIL_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_QUANTIZATION_UTIL_H_
#include <stdint.h>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
absl::Status DequantizeInputs(
TfLiteContext* context, const std::vector<uint32_t>& input_indices,
const absl::flat_hash_map<int, int>& quant_conversion_map);
absl::Status DequantizeInputs(
TfLiteContext* context, const std::vector<int64_t>& input_indices,
const absl::flat_hash_map<int, int>& quant_conversion_map);
absl::Status QuantizeOutputs(
TfLiteContext* context, const std::vector<uint32_t>& output_indices,
const absl::flat_hash_map<int, int>& quant_conversion_map);
absl::Status QuantizeOutputs(
TfLiteContext* context, const std::vector<int64_t>& output_indices,
const absl::flat_hash_map<int, int>& quant_conversion_map);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/quantization_util.h"
#include <stdint.h>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace gpu {
namespace {
void DequantizeInput(
TfLiteContext* context, int input_index,
const absl::flat_hash_map<int, int>& quant_conversion_map) {
if (quant_conversion_map.find(input_index) == quant_conversion_map.end()) {
return;
}
int original_tensor_idx = quant_conversion_map.at(input_index);
const TfLiteTensor& dequantized_tflite_tensor = context->tensors[input_index];
const TfLiteTensor& original_tflite_tensor =
context->tensors[original_tensor_idx];
DequantizationParams op_params;
op_params.zero_point = original_tflite_tensor.params.zero_point;
op_params.scale = original_tflite_tensor.params.scale;
if (original_tflite_tensor.type == kTfLiteInt8) {
optimized_ops::Dequantize(op_params,
GetTensorShape(&original_tflite_tensor),
original_tflite_tensor.data.int8,
GetTensorShape(&original_tflite_tensor),
dequantized_tflite_tensor.data.f);
} else if (original_tflite_tensor.type == kTfLiteUInt8) {
optimized_ops::Dequantize(op_params,
GetTensorShape(&original_tflite_tensor),
original_tflite_tensor.data.uint8,
GetTensorShape(&original_tflite_tensor),
dequantized_tflite_tensor.data.f);
}
}
void QuantizeOutput(TfLiteContext* context, int output_index,
const absl::flat_hash_map<int, int>& quant_conversion_map) {
if (quant_conversion_map.find(output_index) == quant_conversion_map.end()) {
return;
}
int original_tensor_idx = quant_conversion_map.at(output_index);
const TfLiteTensor& dequantized_tflite_tensor =
context->tensors[output_index];
const TfLiteTensor& original_tflite_tensor =
context->tensors[original_tensor_idx];
tflite::QuantizationParams op_params;
op_params.zero_point = original_tflite_tensor.params.zero_point;
op_params.scale = original_tflite_tensor.params.scale;
if (original_tflite_tensor.type == kTfLiteInt8) {
optimized_ops::AffineQuantize(op_params,
GetTensorShape(&original_tflite_tensor),
dequantized_tflite_tensor.data.f,
GetTensorShape(&original_tflite_tensor),
original_tflite_tensor.data.int8);
} else if (original_tflite_tensor.type == kTfLiteUInt8) {
optimized_ops::AffineQuantize(op_params,
GetTensorShape(&original_tflite_tensor),
dequantized_tflite_tensor.data.f,
GetTensorShape(&original_tflite_tensor),
original_tflite_tensor.data.uint8);
}
}
}
absl::Status DequantizeInputs(
TfLiteContext* context, const std::vector<uint32_t>& input_indices,
const absl::flat_hash_map<int, int>& quant_conversion_map) {
for (auto index : input_indices) {
DequantizeInput(context, static_cast<int>(index), quant_conversion_map);
}
return absl::OkStatus();
}
absl::Status DequantizeInputs(
TfLiteContext* context, const std::vector<int64_t>& input_indices,
const absl::flat_hash_map<int, int>& quant_conversion_map) {
for (auto index : input_indices) {
DequantizeInput(context, static_cast<int>(index), quant_conversion_map);
}
return absl::OkStatus();
}
absl::Status QuantizeOutputs(
TfLiteContext* context, const std::vector<uint32_t>& output_indices,
const absl::flat_hash_map<int, int>& quant_conversion_map) {
for (auto index : output_indices) {
QuantizeOutput(context, static_cast<int>(index), quant_conversion_map);
}
return absl::OkStatus();
}
absl::Status QuantizeOutputs(
TfLiteContext* context, const std::vector<int64_t>& output_indices,
const absl::flat_hash_map<int, int>& quant_conversion_map) {
for (auto index : output_indices) {
QuantizeOutput(context, static_cast<int>(index), quant_conversion_map);
}
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/quantization_util.h"
#include <stdint.h>
#include <algorithm>
#include <limits>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/common.h"
using ::testing::Eq;
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace {
void PopulateContext(std::vector<TfLiteTensor>& tensors,
TfLiteContext& context) {
context.tensors_size = tensors.size();
context.tensors = tensors.data();
context.recommended_num_threads = 1;
}
int ElementCount(const TfLiteIntArray& dims) {
int result = 1;
for (int i = 0; i < dims.size; ++i) {
result *= dims.data[i];
}
return result;
}
template <typename T>
inline float ScaleFromMinMax(const float min, const float max) {
return (max - min) / ((std::numeric_limits<T>::max() * 1.0) -
std::numeric_limits<T>::min());
}
template <typename T>
inline int ZeroPointFromMinMax(const float min, const float max) {
return static_cast<int>(std::numeric_limits<T>::min()) +
static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
}
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max,
bool is_variable) {
TfLiteTensor result;
result.type = kTfLiteInt8;
result.data.int8 = const_cast<int8_t*>(data);
result.dims = dims;
result.params = {ScaleFromMinMax<int8_t>(min, max),
ZeroPointFromMinMax<int8_t>(min, max)};
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max,
bool is_variable) {
TfLiteTensor result;
result.type = kTfLiteUInt8;
result.data.uint8 = const_cast<uint8_t*>(data);
result.dims = dims;
result.params = {ScaleFromMinMax<uint8_t>(min, max),
ZeroPointFromMinMax<uint8_t>(min, max)};
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(uint8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = false;
return result;
}
TfLiteTensor CreateTensor(TfLiteIntArray* dims, const char* name,
bool is_variable) {
TfLiteTensor result;
result.dims = dims;
result.name = name;
result.params = {};
result.quantization = {kTfLiteNoQuantization, nullptr};
result.is_variable = is_variable;
result.allocation_type = kTfLiteMemNone;
result.allocation = nullptr;
return result;
}
TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims,
const char* name, bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable);
result.type = kTfLiteFloat32;
result.data.f = const_cast<float*>(data);
result.bytes = ElementCount(*dims) * sizeof(float);
return result;
}
TEST(DequantizeInputs, Int8) {
TfLiteContext context;
auto input_dims = BuildTfLiteArray({1, 3, 2, 1});
std::vector<int8_t> data = {-3, -2, -1, 1, 2, 3};
std::vector<float> dequantized_data(data.size());
TfLiteTensor input = CreateQuantizedTensor(
data.data(), input_dims.get(), "input",
-12.8f, 12.7f, false);
TfLiteTensor dequantized_input = CreateFloatTensor(
dequantized_data.data(), input_dims.get(), "input_dequant",
true);
std::vector<TfLiteTensor> tensors{input, dequantized_input};
PopulateContext(tensors, context);
std::vector<uint32_t> input_indices = {1};
absl::flat_hash_map<int, int> quant_conversion_map = {{1, 0}};
auto status = DequantizeInputs(&context, input_indices, quant_conversion_map);
EXPECT_TRUE(status.ok());
EXPECT_THAT(dequantized_data,
Pointwise(FloatNear(1e-6), {-0.3, -0.2, -0.1, 0.1, 0.2, 0.3}));
}
TEST(DequantizeInputs, UInt8) {
TfLiteContext context;
auto input_dims = BuildTfLiteArray({1, 3, 2, 1});
std::vector<uint8_t> data = {0, 1, 2, 3, 4, 5};
std::vector<float> dequantized_data(data.size());
TfLiteTensor input =
CreateQuantizedTensor(data.data(), input_dims.get(), "input",
0.0f, 25.5f, false);
TfLiteTensor dequantized_input = CreateFloatTensor(
dequantized_data.data(), input_dims.get(), "input_dequant",
true);
std::vector<TfLiteTensor> tensors{input, dequantized_input};
PopulateContext(tensors, context);
std::vector<int64_t> input_indices = {1};
absl::flat_hash_map<int, int> quant_conversion_map = {{1, 0}};
auto status = DequantizeInputs(&context, input_indices, quant_conversion_map);
EXPECT_TRUE(status.ok());
EXPECT_THAT(dequantized_data,
Pointwise(FloatNear(1e-6), {0.0, 0.1, 0.2, 0.3, 0.4, 0.5}));
}
TEST(QuantizeOutputs, Int8) {
TfLiteContext context;
auto input_dims = BuildTfLiteArray({1, 3, 2, 1});
std::vector<float> data = {-0.3, -0.2, -0.1, 0.1, 0.2, 0.3};
std::vector<int8_t> quantized_data(data.size());
TfLiteTensor output = CreateFloatTensor(data.data(), input_dims.get(),
"output", false);
TfLiteTensor quantized_output = CreateQuantizedTensor(
quantized_data.data(), input_dims.get(), "output_quant",
-12.8f, 12.7f, true);
std::vector<TfLiteTensor> tensors{output, quantized_output};
PopulateContext(tensors, context);
std::vector<uint32_t> output_indices = {0};
absl::flat_hash_map<int, int> quant_conversion_map = {{0, 1}};
auto status = QuantizeOutputs(&context, output_indices, quant_conversion_map);
EXPECT_TRUE(status.ok());
EXPECT_THAT(quantized_data, Pointwise(Eq(), {-3, -2, -1, 1, 2, 3}));
}
TEST(QuantizeOutputs, UInt8) {
TfLiteContext context;
auto input_dims = BuildTfLiteArray({1, 3, 2, 1});
std::vector<float> data = {0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
std::vector<uint8_t> quantized_data(data.size());
TfLiteTensor output = CreateFloatTensor(data.data(), input_dims.get(),
"output", false);
TfLiteTensor quantized_output = CreateQuantizedTensor(
quantized_data.data(), input_dims.get(), "output_quant",
0.0f, 25.5f, true);
std::vector<TfLiteTensor> tensors{output, quantized_output};
PopulateContext(tensors, context);
std::vector<int64_t> output_indices = {0};
absl::flat_hash_map<int, int> quant_conversion_map = {{0, 1}};
auto status = QuantizeOutputs(&context, output_indices, quant_conversion_map);
EXPECT_TRUE(status.ok());
EXPECT_THAT(quantized_data, Pointwise(Eq(), {0, 1, 2, 3, 4, 5}));
}
}
}
} |
810 | cpp | tensorflow/tensorflow | quantize | tensorflow/lite/toco/graph_transformations/quantize.cc | third_party/xla/xla/client/lib/quantize_test.cc | #ifndef XLA_CLIENT_LIB_QUANTIZE_H_
#define XLA_CLIENT_LIB_QUANTIZE_H_
#include <algorithm>
#include <limits>
#include <numeric>
#include <vector>
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/bfloat16.h"
namespace xla {
struct QuantizedRange {
QuantizedRange() = default;
QuantizedRange(float min_in, float max_in) : min(min_in), max(max_in) {}
bool operator==(const QuantizedRange& rhs) const {
return this->min == rhs.min && this->max == rhs.max;
}
bool operator!=(const QuantizedRange& rhs) const { return !(*this == rhs); }
tsl::bfloat16 min = tsl::bfloat16(0.0f);
tsl::bfloat16 max = tsl::bfloat16(0.0f);
};
template <typename T>
inline std::vector<uint32_t> PackToUint32(absl::Span<const T> input) {
const int64_t kElementsPerPack = sizeof(uint32_t) / sizeof(T);
const int64_t input_size = input.size();
const int64_t output_size = CeilOfRatio(input_size, kElementsPerPack);
std::vector<uint32_t> output_vec;
constexpr int64_t kShiftBits = sizeof(T) / sizeof(uint8_t) * CHAR_BIT;
for (int64_t i = 0; i < output_size; i++) {
uint32_t result = 0;
for (int64_t p = 0; p < kElementsPerPack; p++) {
int64_t index = i * kElementsPerPack + p;
if (index < input_size) {
int64_t total_shift_bits = kShiftBits * (kElementsPerPack - p - 1);
result |= (input[index] << total_shift_bits);
}
}
output_vec.push_back(result);
}
return output_vec;
}
template <typename T>
inline XlaOp Dequantize(XlaOp input, const QuantizedRange& range,
absl::string_view mode_string = "MIN_COMBINED",
bool transpose_output = false) {
XlaBuilder* const builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
float half_range =
!std::is_signed<T>::value
? 0.0f
: (static_cast<float>(std::numeric_limits<T>::max()) -
std::numeric_limits<T>::min() + 1) /
2.0f;
const int64_t unpack_size = sizeof(uint32_t) / sizeof(T);
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(input));
auto element_type = shape.element_type();
if (element_type != U32) {
return InvalidArgument(
"Only U32 is supported for input type of xla::Dequantize Op.");
}
auto broadcast_input = Broadcast(input, {unpack_size});
XlaOp iota_r1 = Iota(builder, U32, unpack_size);
XlaOp shift_bytes =
xla::ConstantR0<uint32_t>(builder, unpack_size - 1) - iota_r1;
const int bytes_of_type = sizeof(T) / sizeof(uint8_t);
std::vector<uint32_t> shift_vec(unpack_size, CHAR_BIT * bytes_of_type);
XlaOp shift_bits =
shift_bytes * xla::ConstantR1<uint32_t>(builder, shift_vec);
uint32_t bit_mask = 0x00000000;
for (int i = 0; i < bytes_of_type; i++) {
bit_mask <<= CHAR_BIT;
bit_mask |= 0x000000ff;
}
std::vector<int64_t> shift_transpose_dimensions(shape.dimensions_size());
std::iota(shift_transpose_dimensions.begin(),
shift_transpose_dimensions.end(), 0);
shift_transpose_dimensions.insert(shift_transpose_dimensions.begin(), 1,
shape.dimensions_size());
XlaOp shifted_input = ShiftRightLogical(
broadcast_input, Transpose(Broadcast(shift_bits, shape.dimensions()),
shift_transpose_dimensions));
XlaOp unpack_input =
And(shifted_input, xla::ConstantR0<uint32_t>(builder, bit_mask));
XlaOp result;
if (mode_string == "MIN_COMBINED") {
const tsl::bfloat16 scale_factor =
(range.max - range.min) /
(static_cast<tsl::bfloat16>(std::numeric_limits<T>::max() -
std::numeric_limits<T>::min()));
XlaOp unpack_input_bf16 = ConvertElementType(unpack_input, BF16);
XlaOp half_range_bf16 = xla::ConstantR0<tsl::bfloat16>(
builder, static_cast<bfloat16>(half_range));
XlaOp sum = unpack_input_bf16 + half_range_bf16;
result = sum * xla::ConstantR0<tsl::bfloat16>(builder, scale_factor) +
xla::ConstantR0<tsl::bfloat16>(builder, range.min);
} else {
return InvalidArgument(
"Only MIN_COMBINED mode is supported in xla::Dequantize Op.");
}
std::vector<int64_t> transpose_dimensions(shape.dimensions_size());
std::iota(transpose_dimensions.begin(), transpose_dimensions.end(), 1);
std::reverse(transpose_dimensions.begin(), transpose_dimensions.end());
transpose_dimensions.insert(transpose_dimensions.begin() + 1, 1, 0);
XlaOp transposed_result = Transpose(result, transpose_dimensions);
XlaOp reshaped_result = Collapse(transposed_result, {0, 1});
if (transpose_output) {
return reshaped_result;
}
std::vector<int64_t> result_dimensions(shape.dimensions_size());
std::iota(result_dimensions.begin(), result_dimensions.end(), 0);
std::reverse(result_dimensions.begin(), result_dimensions.end());
return Transpose(reshaped_result, result_dimensions);
});
}
}
#endif
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/framework/types.pb.h"
namespace mlir {
namespace quant {
namespace {
using ::tensorflow::quantization::OpSet;
enum QuantizationTrait { kFullQuantization, kDynamicRangeQuantization };
template <QuantizationTrait quantization_trait, typename ConcreteT,
typename RootOpT = quantfork::DequantizeCastOp>
struct TFQuantizationBase
: public QuantizationPattern<ConcreteT, quantfork::QuantizeCastOp,
quantfork::DequantizeCastOp,
void, RootOpT> {
explicit TFQuantizationBase(MLIRContext* ctx,
const QuantPassSpec& quant_params)
: QuantizationPattern<ConcreteT, quantfork::QuantizeCastOp,
quantfork::DequantizeCastOp,
void, RootOpT>(ctx, quant_params) {}
static bool IsQuantizableCustomOp(Operation* op,
const CustomMap& custom_op_map) {
return false;
}
static bool AllowDynamicRangeQuantizedOperand(
Operation* quantized_op, const CustomMap& custom_op_map) {
auto call_op = cast<TF::PartitionedCallOp>(quantized_op);
StringRef function_name =
call_op.getFAttr().cast<FlatSymbolRefAttr>().getValue();
const bool is_gather = function_name.contains("gather");
return quantization_trait != kFullQuantization || is_gather;
}
static bool AllowDynamicRangeQuantizedResult(Operation* quantized_op,
const CustomMap& custom_op_map) {
auto call_op = cast<TF::PartitionedCallOp>(quantized_op);
StringRef function_name =
call_op.getFAttr().cast<FlatSymbolRefAttr>().getValue();
bool is_gather = false;
if (function_name.contains("gather")) is_gather = true;
return quantization_trait != kFullQuantization ||
(quantization_trait == kFullQuantization && is_gather);
}
static bool IsWeightOnlyOp(Operation* quantized_op,
absl::flat_hash_set<std::string>& ops_blocklist,
bool weight_only_quantization,
const CustomMap& custom_op_map) {
return weight_only_quantization;
}
};
struct TFFullQuantization
: public TFQuantizationBase<kFullQuantization, TFFullQuantization> {
explicit TFFullQuantization(MLIRContext* ctx,
const QuantPassSpec& quant_params)
: TFQuantizationBase<kFullQuantization, TFFullQuantization>(
ctx, quant_params) {}
};
struct TFFullQuantizationReverse
: public TFQuantizationBase<kFullQuantization, TFFullQuantizationReverse,
quantfork::QuantizeCastOp> {
explicit TFFullQuantizationReverse(MLIRContext* ctx,
const QuantPassSpec& quant_params)
: TFQuantizationBase<kFullQuantization, TFFullQuantizationReverse,
quantfork::QuantizeCastOp>(ctx, quant_params) {}
};
struct TFDynamicRangeQuantization
: public TFQuantizationBase<kDynamicRangeQuantization,
TFDynamicRangeQuantization> {
explicit TFDynamicRangeQuantization(MLIRContext* ctx,
const quant::QuantPassSpec& quant_params)
: TFQuantizationBase<kDynamicRangeQuantization,
TFDynamicRangeQuantization>(ctx, quant_params) {}
};
class RemoveUnusedQdqPattern
: public OpRewritePattern<quantfork::DequantizeCastOp> {
public:
explicit RemoveUnusedQdqPattern(MLIRContext* context)
: OpRewritePattern<quantfork::DequantizeCastOp>(context) {}
LogicalResult matchAndRewrite(quantfork::DequantizeCastOp dq_op,
PatternRewriter& rewriter) const override {
auto q_op = dq_op.getArg().getDefiningOp<quantfork::QuantizeCastOp>();
if (!q_op) return failure();
dq_op.replaceAllUsesWith(q_op.getArg());
return success();
}
};
class QuantizeSameScaleOpsPattern
: public OpRewritePattern<quantfork::DequantizeCastOp> {
public:
explicit QuantizeSameScaleOpsPattern(
MLIRContext* context, OpQuantScaleSpecGetter op_quant_scale_spec_getter,
OpSet target_opset)
: OpRewritePattern<quantfork::DequantizeCastOp>(context, 200),
op_quant_scale_spec_getter_(op_quant_scale_spec_getter),
target_opset_(target_opset) {}
LogicalResult matchAndRewrite(quantfork::DequantizeCastOp op,
PatternRewriter& rewriter) const override {
SmallVector<Operation*, 4> quantizing_ops;
auto users = op.getResult().getUsers();
quantizing_ops.append(users.begin(), users.end());
bool changed = false;
for (Operation* quantizing_op : quantizing_ops) {
if (llvm::isa<quantfork::QuantizeCastOp, quantfork::DequantizeCastOp>(
quantizing_op)) {
return failure();
}
if (quantizing_op->hasTrait<OpTrait::IsTerminator>()) {
return failure();
}
if (!op_quant_scale_spec_getter_(quantizing_op)
->has_same_scale_requirement) {
continue;
}
if (target_opset_ == OpSet::XLA &&
!IsConnectedWithCompsiteFunction(quantizing_op)) {
continue;
}
if (target_opset_ == OpSet::UNIFORM_QUANTIZED) {
continue;
}
SmallVector<Value, 4> inputs;
inputs.reserve(quantizing_op->getNumOperands());
for (const auto& operand : quantizing_op->getOperands()) {
Type operand_type = operand.getType();
if (operand_type.isa<NoneType>()) {
inputs.push_back(operand);
continue;
}
Type elem_type = operand_type.cast<TensorType>().getElementType();
if (auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
operand.getDefiningOp())) {
auto dq_arg_type = dq_op.getArg().getType().cast<TensorType>();
auto qtype = dq_arg_type.getElementType().cast<QuantizedType>();
auto scast_op = rewriter.create<quantfork::StorageCastOp>(
dq_op->getLoc(), dq_arg_type.clone(qtype.getStorageType()),
dq_op.getArg());
inputs.push_back(scast_op.getResult());
} else if (!elem_type.isF32()) {
inputs.push_back(operand);
} else {
return failure();
}
}
llvm::SmallDenseMap<Value, int> outputs_replaced;
SmallVector<Type, 4> output_types;
output_types.reserve(quantizing_op->getNumResults());
for (const auto& enumerated_result :
llvm::enumerate(quantizing_op->getResults())) {
Value result = enumerated_result.value();
Type result_type = result.getType();
if (result_type.isa<NoneType>()) {
outputs_replaced.insert({result, enumerated_result.index()});
output_types.push_back(result_type);
continue;
}
auto result_tensor_type = result_type.cast<TensorType>();
if (result.hasOneUse() &&
llvm::isa<quantfork::QuantizeCastOp>(*result.user_begin())) {
auto user =
llvm::cast<quantfork::QuantizeCastOp>(*result.user_begin());
outputs_replaced.insert(
{user.getResult(), enumerated_result.index()});
auto qtype = user.getType()
.cast<TensorType>()
.getElementType()
.cast<QuantizedType>();
output_types.push_back(
result_tensor_type.clone(qtype.getStorageType()));
} else if (!result_tensor_type.getElementType().isF32()) {
outputs_replaced.insert({result, enumerated_result.index()});
output_types.push_back(result.getType());
} else {
return failure();
}
}
rewriter.setInsertionPointAfter(quantizing_op);
OperationState new_state(quantizing_op->getLoc(),
quantizing_op->getName().getStringRef(), inputs,
output_types, quantizing_op->getAttrs());
for (int i = 0; i < quantizing_op->getNumRegions(); ++i) {
new_state.addRegion();
}
Operation* quantized_op = rewriter.create(new_state);
if (quantizing_op->getNumRegions() != 0) {
for (const auto& indexed_regions :
llvm::enumerate(quantizing_op->getRegions())) {
IRMapping mapping;
indexed_regions.value().cloneInto(
&quantized_op->getRegion(indexed_regions.index()), mapping);
}
}
for (const auto& output_index_pair : outputs_replaced) {
Value output = output_index_pair.getFirst();
int output_index = output_index_pair.getSecond();
auto scast_op = rewriter.create<quantfork::StorageCastOp>(
output.getLoc(), output.getType(),
quantized_op->getResult(output_index));
output.replaceAllUsesWith(scast_op);
}
changed = true;
}
return success(changed);
}
private:
bool IsConnectedWithCompsiteFunction(Operation* same_scale_op) const {
for (const auto& operand : same_scale_op->getOperands()) {
auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
operand.getDefiningOp());
if (!dq_op) continue;
Operation* preceding_op = dq_op.getArg().getDefiningOp();
if (!preceding_op) continue;
if (llvm::isa<TF::PartitionedCallOp>(preceding_op)) {
auto call_op = llvm::cast<TF::PartitionedCallOp>(preceding_op);
if (!IsCompositeFunction(call_op)) continue;
return true;
}
if (llvm::isa<quantfork::StorageCastOp>(preceding_op)) {
auto sc_op = llvm::cast<quantfork::StorageCastOp>(preceding_op);
auto sc_arg_type = sc_op.getArg().getType().dyn_cast<TensorType>();
if (sc_arg_type.getElementType().isInteger(8)) {
return true;
}
}
}
for (const auto& result : same_scale_op->getResults()) {
if (!result.hasOneUse() ||
!llvm::isa<quantfork::QuantizeCastOp>(*result.user_begin())) {
continue;
}
auto q_op = llvm::cast<quantfork::QuantizeCastOp>(*result.user_begin());
for (auto following_op : q_op->getUsers()) {
if (llvm::isa<TF::PartitionedCallOp>(following_op)) {
auto call_op = llvm::cast<TF::PartitionedCallOp>(following_op);
if (!IsCompositeFunction(call_op)) continue;
return true;
}
if (llvm::isa<quantfork::StorageCastOp>(following_op)) {
auto sc_op = llvm::cast<quantfork::StorageCastOp>(following_op);
auto sc_arg_type = sc_op.getResult().getType().dyn_cast<TensorType>();
if (sc_arg_type.getElementType().isInteger(8)) {
return true;
}
}
}
}
return false;
}
bool IsCompositeFunction(TF::PartitionedCallOp call_op) const {
if (!call_op->hasAttr(kQuantTraitAttrName)) {
return false;
}
const auto f_attr = call_op.getFAttr().dyn_cast<FlatSymbolRefAttr>();
if (!f_attr || !f_attr.getValue().starts_with("composite_")) {
return false;
}
bool has_quantized_types = false;
for (Value input : call_op.getArgs()) {
if (auto type = input.getType().dyn_cast<TensorType>()) {
if (type.getElementType().isa<FloatType>()) {
return false;
}
if (type.getElementType().isa<QuantizedType>()) {
has_quantized_types = true;
}
}
}
for (Value output : call_op.getOutput()) {
if (auto type = output.getType().dyn_cast<TensorType>()) {
if (type.getElementType().isa<FloatType>()) {
return false;
}
if (type.getElementType().isa<QuantizedType>()) {
has_quantized_types = true;
}
}
}
return has_quantized_types;
}
OpQuantScaleSpecGetter op_quant_scale_spec_getter_;
OpSet target_opset_;
};
struct QuantizeAvgPoolOpPattern
: public OpRewritePattern<quantfork::StorageCastOp> {
explicit QuantizeAvgPoolOpPattern(MLIRContext* context)
: OpRewritePattern<quantfork::StorageCastOp>(context, 100) {}
LogicalResult matchAndRewrite(quantfork::StorageCastOp sc_op,
PatternRewriter& rewriter) const override {
auto avg_pool_op = sc_op.getArg().getDefiningOp<TF::AvgPoolOp>();
if (!avg_pool_op) return failure();
auto preceding_sc_op = dyn_cast_or_null<quantfork::StorageCastOp>(
avg_pool_op.getValue().getDefiningOp());
if (!preceding_sc_op) return failure();
auto dq_arg_type = preceding_sc_op.getArg().getType().cast<TensorType>();
auto qtype = dq_arg_type.getElementType().cast<QuantizedType>();
auto q_result_type = sc_op.getType().cast<TensorType>();
auto out_qtype = q_result_type.getElementType().cast<QuantizedType>();
if (qtype != out_qtype) {
avg_pool_op.emitError(
"The preceding StorageCastOp and the following "
"StorageCastOp must have the same quantized type");
return failure();
}
OpBuilder::InsertionGuard g(rewriter);
rewriter.setInsertionPointAfter(preceding_sc_op);
auto fcast_op = rewriter.create<TF::CastOp>(
preceding_sc_op->getLoc(), dq_arg_type.clone(rewriter.getF32Type()),
preceding_sc_op.getResult());
TF::AvgPoolOp float_avg_pool_op = rewriter.create<TF::AvgPoolOp>(
avg_pool_op->getLoc(),
avg_pool_op.getType().clone(rewriter.getF32Type()),
fcast_op.getResult(),
avg_pool_op->getAttrs());
auto round_val = rewriter.create<TF::RoundOp>(
sc_op.getLoc(), float_avg_pool_op.getOutput());
auto icast_op = rewriter.create<TF::CastOp>(
sc_op.getLoc(), q_result_type.clone(qtype.getStorageType()), round_val);
avg_pool_op.getResult().replaceAllUsesWith(icast_op.getResult());
return success();
}
};
class QuantizePass
: public PassWrapper<QuantizePass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizePass)
explicit QuantizePass() {
quant_specs_.inference_type = tensorflow::DT_QINT8;
}
explicit QuantizePass(const QuantizationSpecs& quant_specs,
OpSet target_opset)
: quant_specs_(quant_specs) {
weight_quantization_ = quant_specs.weight_quantization;
target_opset_ = target_opset;
}
QuantizePass(const QuantizePass& other) : quant_specs_(other.quant_specs_) {
weight_quantization_ = other.weight_quantization_;
target_opset_ = other.target_opset_;
}
StringRef getArgument() const final {
return "quant-quantize";
}
StringRef getDescription() const final {
return "Apply quantization on models in TensorFlow dialect";
}
bool shouldKeepUnusedQdqPattern();
void runOnOperation() override;
private:
QuantizationSpecs quant_specs_;
Option<bool> weight_quantization_{
*this, "weight-quantization", llvm::cl::init(false),
llvm::cl::desc("Whether to enable weight quantization.")};
Option<OpSet> target_opset_{
*this, "target-opset", llvm::cl::init(OpSet::TF),
llvm::cl::desc("Choose target opset."),
llvm::cl::values(
clEnumValN(OpSet::TF, "TF",
"Uses TF ops that mimic quantization behavior"),
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"),
clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED",
"Uses TF Uniform Quantized ops"))};
};
bool QuantizePass::shouldKeepUnusedQdqPattern() {
return target_opset_ == OpSet::XLA &&
(quant_specs_.weight_only_quantization ||
quant_specs_.weight_quantization);
}
void QuantizePass::runOnOperation() {
RewritePatternSet patterns(&getContext());
auto func = getOperation();
auto* ctx = func.getContext();
quant_specs_.weight_quantization = weight_quantization_;
const QuantPassSpec quant_params = {
{quant_specs_.verify_numeric, 5.0f,
quant_specs_.whole_model_verify, false},
quant_specs_};
if (quant_specs_.weight_quantization) {
patterns.add<TFDynamicRangeQuantization>(ctx, quant_params);
} else {
patterns.add<TFFullQuantization, TFFullQuantizationReverse>(ctx,
quant_params);
patterns.add<QuantizeSameScaleOpsPattern>(ctx, GetTfQuantScaleSpec,
target_opset_);
patterns.add<QuantizeAvgPoolOpPattern>(ctx);
}
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
func.emitWarning("Failed to converge pattern at QuantizePass.");
}
if (!shouldKeepUnusedQdqPattern()) {
RewritePatternSet patterns_2(&getContext());
patterns_2.add<RemoveUnusedQdqPattern>(ctx);
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns_2)))) {
signalPassFailure();
}
}
}
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass() {
QuantizationSpecs quant_specs;
return std::make_unique<QuantizePass>(quant_specs, OpSet::TF);
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
QuantizationSpecs quant_specs, OpSet target_opset) {
return std::make_unique<QuantizePass>(quant_specs, target_opset);
}
static PassRegistration<QuantizePass> pass;
}
} | #include "xla/client/lib/quantize.h"
#include <limits>
#include <vector>
#include "xla/client/xla_builder.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/util.h"
namespace xla {
namespace {
using bfloat16 = tsl::bfloat16;
template <typename NativeT>
std::vector<NativeT> GenerateInput() {
std::vector<NativeT> input;
const auto n = std::numeric_limits<NativeT>::max();
input.reserve(n);
for (int64_t i = std::numeric_limits<NativeT>::min(); i < n; ++i) {
input.push_back(static_cast<NativeT>(i));
}
return input;
}
template <typename NativeT>
Array2D<NativeT> GenerateLargeSizeInput(int num_columns, int num_rows) {
Array2D<NativeT> input(num_columns, num_rows);
input.FillRandom(6, 128);
return input;
}
template <typename NativeT>
Array2D<uint32_t> PackLargeInput(Array2D<NativeT> &input) {
const int64_t size_per_pack = sizeof(uint32_t) / sizeof(NativeT);
int64_t width = input.width();
int64_t padded_output_width = CeilOfRatio(width, size_per_pack);
Array2D<uint32_t> pack_input(input.height(), padded_output_width);
for (int h = 0; h < input.height(); h++) {
std::vector<NativeT> input_row;
input_row.reserve(width);
for (int w = 0; w < width; w++) {
input_row.push_back(input({h, w}));
}
auto pack_input_vec = PackToUint32<uint8_t>(input_row);
for (int w = 0; w < padded_output_width; w++) {
pack_input(h, w) = pack_input_vec[w];
}
}
return pack_input;
}
template <typename NativeT>
Array2D<bfloat16> GenerateLargeSizeMinCombinedOutput(
Array2D<NativeT> &input, const QuantizedRange &range,
bool transpose_output = false) {
const int64_t size_per_pack = sizeof(uint32_t) / sizeof(NativeT);
int64_t width = input.width();
int64_t padded_output_width =
CeilOfRatio(width, size_per_pack) * size_per_pack;
int64_t output_height;
int64_t output_width;
if (transpose_output) {
output_height = padded_output_width;
output_width = input.height();
} else {
output_height = input.height();
output_width = padded_output_width;
}
Array2D<bfloat16> output(output_height, output_width, bfloat16(0.0));
float half_range =
!std::is_signed<NativeT>::value
? 0.0f
: (static_cast<float>(std::numeric_limits<NativeT>::max() -
std::numeric_limits<NativeT>::min() + 1)) /
2.0f;
const bfloat16 scale_factor =
(range.max - range.min) /
(static_cast<bfloat16>(std::numeric_limits<NativeT>::max() -
std::numeric_limits<NativeT>::min()));
for (int h = 0; h < input.height(); h++) {
std::vector<NativeT> input_row;
input_row.reserve(width);
for (int w = 0; w < width; w++) {
bfloat16 result =
static_cast<bfloat16>(input(h, w) + half_range) * scale_factor +
range.min;
if (transpose_output) {
output(w, h) = result;
} else {
output(h, w) = result;
}
}
}
return output;
}
template <typename NativeT>
std::vector<bfloat16> GenerateMinCombinedOutput(const QuantizedRange &range) {
float half_range =
!std::is_signed<NativeT>::value
? 0.0f
: (static_cast<float>(std::numeric_limits<NativeT>::max() -
std::numeric_limits<NativeT>::min() + 1)) /
2.0f;
const bfloat16 scale_factor =
(range.max - range.min) /
(static_cast<bfloat16>(std::numeric_limits<NativeT>::max() -
std::numeric_limits<NativeT>::min()));
std::vector<bfloat16> output;
const auto n = std::numeric_limits<NativeT>::max();
output.reserve(n);
for (int64_t i = std::numeric_limits<NativeT>::min(); i < n; ++i) {
bfloat16 result =
static_cast<bfloat16>(i + half_range) * scale_factor + range.min;
output.push_back(result);
}
const int64_t pack_size = sizeof(uint32_t) / sizeof(NativeT);
const int64_t output_size = output.size();
int64_t num_tailing_zeros =
CeilOfRatio(output_size, pack_size) * pack_size - output_size;
output.insert(output.end(), num_tailing_zeros, bfloat16(0.0));
return output;
}
using DequantizeTest = ClientLibraryTestBase;
TEST(PackTest, PackUint8ToUint32) {
std::vector<uint8_t> input = {0xAB, 0x0B, 0x00, 0xF0, 0x01};
auto output = PackToUint32<uint8_t>(input);
EXPECT_THAT(output, ::testing::ElementsAre(0xAB0B00F0, 0x01000000));
}
TEST(PackTest, PackInt8ToUint32) {
std::vector<int8_t> input = {static_cast<signed char>(0x81), 0x0B, 0x00, 0x20,
0x01};
auto output = PackToUint32<int8_t>(input);
EXPECT_THAT(output, ::testing::ElementsAre(0x810B0020, 0x01000000));
}
TEST(PackTest, PackUint8ToUint32PerfectSize) {
std::vector<uint8_t> input = {3, 2, 1, 0};
auto output = PackToUint32<uint8_t>(input);
EXPECT_THAT(output, ::testing::ElementsAre(0x03020100));
}
XLA_TEST_F(DequantizeTest, MinCombinedUint16R1) {
XlaBuilder builder(TestName());
auto input = GenerateInput<uint16_t>();
auto x = ConstantR1<uint32_t>(&builder, PackToUint32<uint16_t>(input));
QuantizedRange range(0, 255.0f);
xla::Dequantize<uint16_t>(x, range, "MIN_COMBINED");
auto expected = GenerateMinCombinedOutput<uint16_t>(range);
ComputeAndCompareR1<bfloat16>(&builder, expected, {});
}
XLA_TEST_F(DequantizeTest, MinCombinedUint8R1) {
XlaBuilder builder(TestName());
auto input = GenerateInput<uint8_t>();
auto x = ConstantR1<uint32_t>(&builder, PackToUint32<uint8_t>(input));
QuantizedRange range(0, 127.0f);
xla::Dequantize<uint8_t>(x, range, "MIN_COMBINED");
auto expected = GenerateMinCombinedOutput<uint8_t>(range);
ComputeAndCompareR1<bfloat16>(&builder, expected, {});
}
XLA_TEST_F(DequantizeTest, MinCombinedUint8R2) {
XlaBuilder builder(TestName());
std::vector<std::vector<uint8_t>> input = {
{0, 1, 2, 3},
{4, 5, 6, 7},
{8, 9, 10, 11},
{12, 13, 16, 15},
};
auto x =
ConstantR2<uint32_t>(&builder, {{PackToUint32<uint8_t>(input[0])[0]},
{PackToUint32<uint8_t>(input[1])[0]},
{PackToUint32<uint8_t>(input[2])[0]},
{PackToUint32<uint8_t>(input[3])[0]}});
QuantizedRange range(0, 255.0f);
xla::Dequantize<uint8_t>(x, range, "MIN_COMBINED");
const Array2D<bfloat16> expected = {
{bfloat16(0.0), bfloat16(1.0), bfloat16(2.0), bfloat16(3.0)},
{bfloat16(4.0), bfloat16(5.0), bfloat16(6.0), bfloat16(7.0)},
{bfloat16(8.0), bfloat16(9.0), bfloat16(10.0), bfloat16(11.0)},
{bfloat16(12.0), bfloat16(13.0), bfloat16(16.0), bfloat16(15.0)},
};
ComputeAndCompareR2<bfloat16>(&builder, expected, {});
}
XLA_TEST_F(DequantizeTest, MinCombinedUint8R2TransposeOutput) {
XlaBuilder builder(TestName());
std::vector<std::vector<uint8_t>> input = {
{0, 1, 2, 3},
{4, 5, 6, 7},
{8, 9, 10, 11},
{12, 13, 16, 15},
};
auto x =
ConstantR2<uint32_t>(&builder, {{PackToUint32<uint8_t>(input[0])[0]},
{PackToUint32<uint8_t>(input[1])[0]},
{PackToUint32<uint8_t>(input[2])[0]},
{PackToUint32<uint8_t>(input[3])[0]}});
QuantizedRange range(0, 255.0f);
xla::Dequantize<uint8_t>(x, range, "MIN_COMBINED", true);
const Array2D<bfloat16> expected = {
{bfloat16(0.0), bfloat16(4.0), bfloat16(8.0), bfloat16(12.0)},
{bfloat16(1.0), bfloat16(5.0), bfloat16(9.0), bfloat16(13.0)},
{bfloat16(2.0), bfloat16(6.0), bfloat16(10.0), bfloat16(16.0)},
{bfloat16(3.0), bfloat16(7.0), bfloat16(11.0), bfloat16(15.0)},
};
ComputeAndCompareR2<bfloat16>(&builder, expected, {});
}
XLA_TEST_F(DequantizeTest, MinCombinedUint8R2TailingZero) {
XlaBuilder builder(TestName());
std::vector<std::vector<uint8_t>> input = {
{0, 1, 2, 3, 16},
{4, 5, 6, 7, 17},
{8, 9, 10, 11, 18},
{12, 13, 16, 15, 19},
};
auto x = ConstantR2<uint32_t>(
&builder,
{{PackToUint32<uint8_t>(input[0])[0], PackToUint32<uint8_t>(input[0])[1]},
{PackToUint32<uint8_t>(input[1])[0], PackToUint32<uint8_t>(input[1])[1]},
{PackToUint32<uint8_t>(input[2])[0], PackToUint32<uint8_t>(input[2])[1]},
{PackToUint32<uint8_t>(input[3])[0],
PackToUint32<uint8_t>(input[3])[1]}});
QuantizedRange range(0, 255.0f);
xla::Dequantize<uint8_t>(x, range, "MIN_COMBINED");
const Array2D<bfloat16> expected = {
{bfloat16(0.0), bfloat16(1.0), bfloat16(2.0), bfloat16(3.0),
bfloat16(16.0), bfloat16(0.0), bfloat16(0.0), bfloat16(0.0)},
{bfloat16(4.0), bfloat16(5.0), bfloat16(6.0), bfloat16(7.0),
bfloat16(17.0), bfloat16(0.0), bfloat16(0.0), bfloat16(0.0)},
{bfloat16(8.0), bfloat16(9.0), bfloat16(10.0), bfloat16(11.0),
bfloat16(18.0), bfloat16(0.0), bfloat16(0.0), bfloat16(0.0)},
{bfloat16(12.0), bfloat16(13.0), bfloat16(16.0), bfloat16(15.0),
bfloat16(19.0), bfloat16(0.0), bfloat16(0.0), bfloat16(0.0)},
};
ComputeAndCompareR2<bfloat16>(&builder, expected, {});
}
XLA_TEST_F(DequantizeTest, MinCombinedUint8R2TailingZeroTransposeOutput) {
XlaBuilder builder(TestName());
std::vector<std::vector<uint8_t>> input = {
{0, 1, 2, 3, 16},
{4, 5, 6, 7, 17},
{8, 9, 10, 11, 18},
{12, 13, 16, 15, 19},
};
auto x = ConstantR2<uint32_t>(
&builder,
{{PackToUint32<uint8_t>(input[0])[0], PackToUint32<uint8_t>(input[0])[1]},
{PackToUint32<uint8_t>(input[1])[0], PackToUint32<uint8_t>(input[1])[1]},
{PackToUint32<uint8_t>(input[2])[0], PackToUint32<uint8_t>(input[2])[1]},
{PackToUint32<uint8_t>(input[3])[0],
PackToUint32<uint8_t>(input[3])[1]}});
QuantizedRange range(0, 255.0f);
xla::Dequantize<uint8_t>(x, range, "MIN_COMBINED", true);
const Array2D<bfloat16> expected = {
{bfloat16(0.0), bfloat16(4.0), bfloat16(8.0), bfloat16(12.0)},
{bfloat16(1.0), bfloat16(5.0), bfloat16(9.0), bfloat16(13.0)},
{bfloat16(2.0), bfloat16(6.0), bfloat16(10.0), bfloat16(16.0)},
{bfloat16(3.0), bfloat16(7.0), bfloat16(11.0), bfloat16(15.0)},
{bfloat16(16.0), bfloat16(17.0), bfloat16(18.0), bfloat16(19.0)},
{bfloat16(0.0), bfloat16(0.0), bfloat16(0.0), bfloat16(0.0)},
{bfloat16(0.0), bfloat16(0.0), bfloat16(0.0), bfloat16(0.0)},
{bfloat16(0.0), bfloat16(0.0), bfloat16(0.0), bfloat16(0.0)},
};
ComputeAndCompareR2<bfloat16>(&builder, expected, {});
}
XLA_TEST_F(DequantizeTest, MinCombinedUint8LargeSizeTest) {
XlaBuilder builder(TestName());
Array2D<uint8_t> input = GenerateLargeSizeInput<uint8_t>(500, 3547);
Array2D<uint32_t> input_packed = PackLargeInput<uint8_t>(input);
auto x = ConstantR2FromArray2D<uint32_t>(&builder, input_packed);
QuantizedRange range(0, 255.0f);
xla::Dequantize<uint8_t>(x, range, "MIN_COMBINED");
const Array2D<bfloat16> expected =
GenerateLargeSizeMinCombinedOutput<uint8_t>(input, range);
ComputeAndCompareR2<bfloat16>(&builder, expected, {});
}
XLA_TEST_F(DequantizeTest, MinCombinedUint8LargeSizeTestTransposeOutput) {
XlaBuilder builder(TestName());
Array2D<uint8_t> input = GenerateLargeSizeInput<uint8_t>(500, 3547);
Array2D<uint32_t> input_packed = PackLargeInput<uint8_t>(input);
auto x = ConstantR2FromArray2D<uint32_t>(&builder, input_packed);
QuantizedRange range(0, 255.0f);
xla::Dequantize<uint8_t>(x, range, "MIN_COMBINED", true);
const Array2D<bfloat16> expected =
GenerateLargeSizeMinCombinedOutput<uint8_t>(input, range,
true);
ComputeAndCompareR2<bfloat16>(&builder, expected, {});
}
}
} |
811 | cpp | tensorflow/tensorflow | lstm_utils | tensorflow/lite/toco/graph_transformations/lstm_utils.cc | tensorflow/lite/toco/graph_transformations/tests/lstm_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_UTILS_LSTM_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_UTILS_LSTM_UTILS_H_
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
namespace mlir {
namespace TFL {
constexpr char kTFImplements[] = "tf._implements";
constexpr char kLstmCellSimple[] = "LSTMCellSimple";
constexpr char kLayerNormalizedLstmCellSimple[] =
"LayerNormalizedLstmCellSimple";
constexpr char kCoupleInputForgetGates[] = "CoupleInputForgetGates";
class ConvertLSTMCellSimpleToFusedLSTM {
public:
explicit ConvertLSTMCellSimpleToFusedLSTM(mlir::func::FuncOp fused_func_op)
: fused_func_op_(fused_func_op),
couple_input_forget_gates_(false),
builder_(fused_func_op.getBody()) {}
ConvertLSTMCellSimpleToFusedLSTM(const ConvertLSTMCellSimpleToFusedLSTM&) =
delete;
ConvertLSTMCellSimpleToFusedLSTM& operator=(
const ConvertLSTMCellSimpleToFusedLSTM&) = delete;
virtual ~ConvertLSTMCellSimpleToFusedLSTM() = default;
virtual llvm::StringRef GetCompositeOpName() { return kLstmCellSimple; }
LogicalResult RewriteFunc();
int GetNumInputs() { return n_input_; }
protected:
virtual LogicalResult InitializeFromFuncAttributes();
virtual LogicalResult Initialize();
void UpdateFuncSignature();
void GenerateFusedOpOperands();
void SetWeightForInputToCellGate();
void SetWeightForInputToInputGate();
void SetWeightForInputToForgetGate();
void SetWeightForInputToOutputGate();
void SetWeightForRecurrentToCellGate();
void SetWeightForRecurrentToInputGate();
void SetWeightForRecurrentToForgetGate();
void SetWeightForRecurrentToOutputGate();
void SetBiasToCellGate();
void SetBiasToInputGate();
void SetBiasToForgetGate();
void SetBiasToOutputGate();
void SetProjection();
void SetProjectionBias();
void SetInputActivationState();
void SetInputCellState();
virtual void SetCellLayerNormCoefficients();
virtual void SetInputLayerNormCoefficients();
virtual void SetForgetLayerNormCoefficients();
virtual void SetOutputLayerNormCoefficients();
func::FuncOp fused_func_op_;
Value input_;
Value weight_;
Value bias_;
Value projection_;
bool couple_input_forget_gates_;
Value weight_transposed_;
Value projection_transposed_;
RankedTensorType weight_type_;
RankedTensorType projection_type_;
int num_gates_;
int n_cell_;
int n_output_;
int n_input_;
int num_cols_weight_transposed_;
int num_cols_projection_transposed_;
Value input2input_;
Value input2forget_;
Value input2cell_;
Value input2output_;
Value rec2input_;
Value rec2forget_;
Value rec2cell_;
Value rec2output_;
Value bias2input_;
Value bias2forget_;
Value bias2cell_;
Value bias2output_;
Value proj_weight_;
Value proj_bias_;
Value input_activation_state_;
Value input_cell_state_;
Value input_layer_norm_coefficients_;
Value forget_layer_norm_coefficients_;
Value cell_layer_norm_coefficients_;
Value output_layer_norm_coefficients_;
mlir::TFL::LSTMOp lstm_;
Value none_;
SmallVector<int64_t, 1> bias_slice_shape_;
SmallVector<int64_t, 1> bias_size_values_;
SmallVector<int64_t, 2> weight_slice_shape_;
SmallVector<int64_t, 2> weight_slice_size_input_values_;
SmallVector<int64_t, 2> weight_slice_size_recurrent_values_;
OpBuilder builder_;
};
class ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM
: public ConvertLSTMCellSimpleToFusedLSTM {
public:
explicit ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM(
mlir::func::FuncOp fused_func_op)
: ConvertLSTMCellSimpleToFusedLSTM(fused_func_op) {}
ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM(
const ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM&) = delete;
ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM& operator=(
const ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM&) = delete;
~ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM() override = default;
llvm::StringRef GetCompositeOpName() override {
return kLayerNormalizedLstmCellSimple;
}
protected:
LogicalResult Initialize() override;
void SetCellLayerNormCoefficients() override;
void SetInputLayerNormCoefficients() override;
void SetForgetLayerNormCoefficients() override;
void SetOutputLayerNormCoefficients() override;
private:
Value layer_norm_scale_;
RankedTensorType layer_norm_scale_type_;
SmallVector<int64_t, 1> layer_norm_slice_shape_;
SmallVector<int64_t, 1> layer_norm_size_values_;
};
LogicalResult ConvertKerasLSTMLayer(mlir::func::FuncOp func_op,
OpBuilder* builder);
LogicalResult ConvertKerasLSTMLayer(mlir::func::FuncOp func_op,
OpBuilder* builder, bool indy);
}
}
#endif
#include "tensorflow/compiler/mlir/lite/utils/lstm_utils.h"
#include <algorithm>
#include <optional>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
namespace mlir {
namespace TFL {
namespace {
Value CreateI32SplatConst(OpBuilder* builder, ArrayRef<int64_t> shape,
int32_t val, mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getIntegerType(32));
auto attr = DenseElementsAttr::get(type, val);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreateF32SplatConst(OpBuilder* builder, ArrayRef<int64_t> shape,
float val, mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getF32Type());
auto attr = DenseElementsAttr::get(type, val);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreatTfF32ConstOp(OpBuilder* builder, ArrayRef<int64_t> shape, float val,
mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getF32Type());
auto ele_type = RankedTensorType::get({1}, builder->getF32Type());
auto attr = DenseElementsAttr::get(ele_type, val);
return builder->create<TF::ConstOp>(location, type, attr);
}
Value CreateI64DenseConst(OpBuilder* builder, ArrayRef<int64_t> shape,
ArrayRef<int64_t> values, mlir::Location location) {
auto type = RankedTensorType::get(static_cast<int>(shape.size()),
builder->getIntegerType(64));
auto attr = DenseElementsAttr::get(type, values);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreateI32DenseConst(OpBuilder* builder, ArrayRef<int32_t> values,
mlir::Location location) {
auto type = RankedTensorType::get(static_cast<int>(values.size()),
builder->getIntegerType(32));
auto attr = DenseElementsAttr::get(type, values);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreateNoneValue(OpBuilder* builder, mlir::Location location) {
return builder->create<TFL::NoValueOp>(location, builder->getNoneType(),
builder->getUnitAttr());
}
Value Transpose(OpBuilder* builder, Value value_to_transpose,
SmallVector<int32_t, 4> perm, RankedTensorType original_type,
mlir::Location location) {
auto perm_op = CreateI32DenseConst(builder, perm, location);
auto transpose_type = original_type;
auto transpose_shape =
llvm::to_vector<8>(llvm::map_range(perm, [transpose_type](int32_t dim) {
return transpose_type.getDimSize(dim);
}));
auto elem_type = transpose_type.getElementType();
auto result_type = RankedTensorType::get(transpose_shape, elem_type);
return builder->create<TF::TransposeOp>(location, result_type,
value_to_transpose, perm_op);
}
Value Transpose2D(OpBuilder* builder, Value value_to_transpose,
RankedTensorType type, mlir::Location location) {
SmallVector<int32_t, 4> perm = {1, 0};
return Transpose(builder, value_to_transpose, perm, type, location);
}
Value Reverse(OpBuilder* builder, Value value_to_reverse, int axis,
RankedTensorType type, mlir::Location location) {
auto axis_op = CreateI32SplatConst(builder, {1}, axis, location);
return builder->create<TF::ReverseV2Op>(location, type, value_to_reverse,
axis_op);
}
ArrayRef<int64_t> GetRankedTensorShape(Value value) {
return mlir::cast<RankedTensorType>(value.getType()).getShape();
}
Value SliceRankedTensor(OpBuilder* builder, Value input,
ArrayRef<int64_t> begin_shape,
ArrayRef<int64_t> begin_values,
ArrayRef<int64_t> size_shape,
ArrayRef<int64_t> size_values,
mlir::Location location) {
ArrayRef<int64_t> input_shape = GetRankedTensorShape(input);
for (int i = 0, end = input_shape.size(); i < end; i++) {
if (begin_values[i] < 0 ||
(begin_values[i] + size_values[i] > input_shape[i])) {
return CreateF32SplatConst(builder, size_shape, 0, location);
}
}
auto slice_i2c_begin =
CreateI64DenseConst(builder, begin_shape, begin_values, location);
auto slice_i2c_size =
CreateI64DenseConst(builder, size_shape, size_values, location);
return builder->create<TF::SliceOp>(
location,
RankedTensorType::get(
size_values,
mlir::cast<RankedTensorType>(input.getType()).getElementType()),
input, slice_i2c_begin, slice_i2c_size);
}
Value CreateStridedSliceOp(mlir::Location loc, ArrayRef<int64_t> output_shape,
Value input, ArrayRef<int32_t> begin,
ArrayRef<int32_t> end, ArrayRef<int32_t> strides,
int64_t begin_mask, int64_t end_mask,
int64_t ellipsis_mask, int64_t new_axis_mask,
int64_t shrink_axis_mask, OpBuilder* builder) {
auto output_type = RankedTensorType::get(
output_shape,
mlir::cast<RankedTensorType>(input.getType()).getElementType());
auto begin_tensor = CreateI32DenseConst(builder, begin, loc);
auto end_tensor = CreateI32DenseConst(builder, end, loc);
auto strides_tensor = CreateI32DenseConst(builder, strides, loc);
return builder->create<TF::StridedSliceOp>(
loc, output_type, input, begin_tensor, end_tensor, strides_tensor,
builder->getI64IntegerAttr(begin_mask),
builder->getI64IntegerAttr(end_mask),
builder->getI64IntegerAttr(ellipsis_mask),
builder->getI64IntegerAttr(new_axis_mask),
builder->getI64IntegerAttr(shrink_axis_mask));
}
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToCellGate() {
SmallVector<int64_t, 2> begin_i2c_values = {0, 0};
input2cell_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2c_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToInputGate() {
SmallVector<int64_t, 2> begin_i2i_values = {n_cell_, 0};
input2input_ = couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, weight_transposed_,
weight_slice_shape_, begin_i2i_values,
weight_slice_shape_,
weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToForgetGate() {
int input_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 2> begin_i2f_values = {input_forget_start, 0};
input2forget_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2f_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToOutputGate() {
int input_output_start =
couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 2> begin_i2o_values = {input_output_start, 0};
input2output_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2o_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToCellGate() {
SmallVector<int64_t, 2> begin_rec2c_values = {0, n_input_};
rec2cell_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2c_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToInputGate() {
SmallVector<int64_t, 2> begin_rec2i_values = {n_cell_, n_input_};
rec2input_ = couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, weight_transposed_,
weight_slice_shape_, begin_rec2i_values,
weight_slice_shape_,
weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToForgetGate() {
int rec_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 2> begin_rec2f_values = {rec_forget_start, n_input_};
rec2forget_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2f_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToOutputGate() {
int rec_output_start = couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 2> begin_rec2o_values = {rec_output_start, n_input_};
rec2output_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2o_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToCellGate() {
SmallVector<int64_t, 1> begin_bias2c_values = {0};
bias2cell_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2c_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToInputGate() {
SmallVector<int64_t, 1> begin_bias2i_values = {n_cell_};
bias2input_ =
couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2i_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToForgetGate() {
int bias_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 1> begin_bias2f_values = {bias_forget_start};
bias2forget_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2f_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToOutputGate() {
int bias_output_start =
couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 1> begin_bias2o_values = {bias_output_start};
bias2output_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2o_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetProjection() {
SmallVector<int64_t, 2> projection_slice_shape = {
1, num_cols_projection_transposed_};
SmallVector<int64_t, 2> projection_slice_size_values = {n_output_, n_cell_};
SmallVector<int64_t, 2> projection_slice_begin_values = {0, 0};
proj_weight_ =
!projection_
? none_
: SliceRankedTensor(
&builder_, projection_transposed_, projection_slice_shape,
projection_slice_begin_values, projection_slice_shape,
projection_slice_size_values, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetProjectionBias() {
proj_bias_ = !projection_type_
? none_
: CreateF32SplatConst(&builder_, {n_output_}, 0,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputActivationState() {
input_activation_state_ = CreateF32SplatConst(&builder_, {1, n_output_}, 0,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputCellState() {
input_cell_state_ =
CreateF32SplatConst(&builder_, {1, n_cell_}, 0, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetCellLayerNormCoefficients() {
cell_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputLayerNormCoefficients() {
input_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetForgetLayerNormCoefficients() {
forget_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetOutputLayerNormCoefficients() {
output_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::GenerateFusedOpOperands() {
weight_transposed_ =
Transpose2D(&builder_, weight_, weight_type_, fused_func_op_.getLoc());
projection_transposed_ = Transpose2D(&builder_, projection_, projection_type_,
fused_func_op_.getLoc());
none_ = CreateNoneValue(&builder_, fused_func_op_.getLoc());
SetWeightForInputToCellGate();
SetWeightForInputToInputGate();
SetWeightForInputToForgetGate();
SetWeightForInputToOutputGate();
SetWeightForRecurrentToCellGate();
SetWeightForRecurrentToInputGate();
SetWeightForRecurrentToForgetGate();
SetWeightForRecurrentToOutputGate();
SetBiasToCellGate();
SetBiasToInputGate();
SetBiasToForgetGate();
SetBiasToOutputGate();
SetProjection();
SetProjectionBias();
SetInputActivationState();
SetInputCellState();
SetCellLayerNormCoefficients();
SetInputLayerNormCoefficients();
SetForgetLayerNormCoefficients();
SetOutputLayerNormCoefficients();
}
void ConvertLSTMCellSimpleToFusedLSTM::UpdateFuncSignature() {
SmallVector<int64_t, 2> output_shape{1, tensorflow::kTFDynamicSize};
auto input_types = fused_func_op_.getFunctionType().getInputs();
auto output_type = tensorflow::GetTypeFromTFTensorShape(
output_shape,
mlir::cast<RankedTensorType>(input_.getType()).getElementType());
fused_func_op_.setType(mlir::FunctionType::get(fused_func_op_.getContext(),
input_types, output_type));
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::RewriteFunc() {
LogicalResult result = Initialize();
if (failed(result)) {
return result;
}
UpdateFuncSignature();
GenerateFusedOpOperands();
SmallVector<int64_t, 2> output_shape = {1, n_output_};
auto result_type = mlir::RankedTensorType::get(
output_shape,
mlir::cast<RankedTensorType>(input_.getType()).getElementType());
lstm_ = builder_.create<mlir::TFL::LSTMOp>(
fused_func_op_.getLoc(), result_type, input_, input2input_, input2forget_,
input2cell_, input2output_, rec2input_, rec2forget_, rec2cell_,
rec2output_, none_,
none_,
none_, bias2input_, bias2forget_, bias2cell_,
bias2output_, proj_weight_, proj_bias_, input_activation_state_,
input_cell_state_, input_layer_norm_coefficients_,
forget_layer_norm_coefficients_, cell_layer_norm_coefficients_,
output_layer_norm_coefficients_, builder_.getStringAttr("TANH"),
builder_.getF32FloatAttr(10.0), builder_.getF32FloatAttr(0.0),
mlir::TFL::LSTMKernelTypeAttr::get(builder_.getContext(),
mlir::TFL::LSTMKernelType::FULL),
mlir::BoolAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr());
SmallVector<int64_t, 2> func_output_shape = {1, tensorflow::kTFDynamicSize};
auto func_result_type = tensorflow::GetTypeFromTFTensorShape(
func_output_shape,
mlir::cast<RankedTensorType>(input_.getType()).getElementType());
auto tensor_cast = builder_.create<mlir::tensor::CastOp>(
fused_func_op_.getLoc(), func_result_type, lstm_.getResult());
builder_.create<mlir::func::ReturnOp>(fused_func_op_.getLoc(),
tensor_cast.getResult());
return success();
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::InitializeFromFuncAttributes() {
auto attr = fused_func_op_->getAttrOfType<StringAttr>(kTFImplements);
if (!attr) {
return fused_func_op_.emitError()
<< "Invalid function attribute, expected " << kTFImplements
<< " attribute "
"not found";
}
llvm::SmallVector<llvm::StringRef, 4> attr_tokens;
attr.getValue().split(attr_tokens, ",");
if (attr_tokens.empty()) {
return fused_func_op_.emitError()
<< kTFImplements << " attribute should be set";
}
if (GetCompositeOpName().str() != attr_tokens[0]) {
return fused_func_op_.emitError()
<< "Unexpected interface for the composite op. Expected: "
<< GetCompositeOpName() << " Actual: " << attr_tokens[0];
}
couple_input_forget_gates_ =
std::find(attr_tokens.begin() + 1, attr_tokens.end(),
kCoupleInputForgetGates) != attr_tokens.end();
return success();
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::Initialize() {
if (failed(InitializeFromFuncAttributes())) {
return fused_func_op_.emitError()
<< "Expected function attributes were not set on the function "
"encapsulating the composite op";
}
num_gates_ = couple_input_forget_gates_ ? 3 : 4;
input_ = fused_func_op_.getArgument(0);
bias_ = fused_func_op_.getArgument(2);
weight_ = fused_func_op_.getArgument(1);
weight_type_ = mlir::cast<RankedTensorType>(weight_.getType());
if (weight_type_.getRank() != 2) {
return fused_func_op_.emitError() << "The weight tensor was not of rank 2";
}
if (weight_type_.getDimSize(1) % num_gates_ != 0) {
return fused_func_op_.emitError()
<< "Invalid dimension 1 of weight tensor, "
"should be divisible by the number of gates";
}
n_cell_ = weight_type_.getDimSize(1) / num_gates_;
projection_ = fused_func_op_.getArgument(3);
projection_type_ = mlir::cast<RankedTensorType>(projection_.getType());
if (projection_type_.getRank() != 2) {
n_output_ = n_cell_;
} else {
n_output_ = projection_type_.getDimSize(1);
}
n_input_ = weight_type_.getDimSize(0) - n_output_;
num_cols_weight_transposed_ = weight_type_.getDimSize(0);
num_cols_projection_transposed_ = projection_type_.getDimSize(0);
bias_slice_shape_ = {n_cell_};
bias_size_values_ = {n_cell_};
weight_slice_shape_ = {1, num_cols_weight_transposed_};
weight_slice_size_input_values_ = {n_cell_, n_input_};
weight_slice_size_recurrent_values_ = {n_cell_, n_output_};
return success();
}
LogicalResult ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::Initialize() {
if (failed(ConvertLSTMCellSimpleToFusedLSTM::Initialize())) {
return fused_func_op_.emitError()
<< "Specified LayerNormalizedLSTMCellSimple was not of the expected "
"interface and cannot not be converted to the fused LSTM op";
}
layer_norm_scale_ = fused_func_op_.getArgument(4);
layer_norm_scale_type_ =
mlir::cast<RankedTensorType>(layer_norm_scale_.getType());
if (layer_norm_scale_type_.getRank() != 1) {
return fused_func_op_.emitError()
<< "The layer_norm_scale tensor was not of rank 1";
}
layer_norm_slice_shape_ = {n_cell_};
layer_norm_size_values_ = {n_cell_};
return success();
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetCellLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_cell_layer_norm_values = {0};
cell_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_cell_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetInputLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_input_layer_norm_values = {n_cell_};
input_layer_norm_coefficients_ =
couple_input_forget_gates_
? none_
: SliceRankedTensor(
&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_input_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetForgetLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_forget_layer_norm_values = {2 * n_cell_};
forget_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_forget_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetOutputLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_output_layer_norm_values = {3 * n_cell_};
output_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_output_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
TF::ConstOp Create1DConstantOp(const std::vector<int>& value, Location loc,
OpBuilder* builder) {
auto type =
mlir::RankedTensorType::get(value.size(), builder->getIntegerType(32));
auto dense_values = mlir::DenseIntElementsAttr::get(type, value);
return builder->create<TF::ConstOp>(loc, den | #include "tensorflow/compiler/mlir/lite/utils/lstm_utils.h"
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
func::FuncOp createLstmCompositeFunc(mlir::Builder* builder, bool ln,
bool cifg) {
SmallVector<int64_t, 2> input_shape{1, 2};
SmallVector<int64_t, 2> weight_shape{3, 12};
SmallVector<int64_t, 1> bias_shape{2};
SmallVector<int64_t, 2> projection_shape{1, 2};
SmallVector<int64_t, 1> layer_norm_scale{4};
SmallVector<int64_t, 2> output_shape{1, 2};
auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
auto weight_type = RankedTensorType::get(weight_shape, builder->getF32Type());
auto bias_type = RankedTensorType::get(bias_shape, builder->getF32Type());
auto projection_type =
RankedTensorType::get(projection_shape, builder->getF32Type());
auto layer_norm_scale_type =
RankedTensorType::get(layer_norm_scale, builder->getF32Type());
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type());
SmallVector<mlir::Type, 4> input_types{input_type, weight_type, bias_type,
projection_type,
layer_norm_scale_type};
auto func_type = builder->getFunctionType(input_types, output_type);
auto func = func::FuncOp::create(
mlir::NameLoc::get(builder->getStringAttr("fused_func")), "fused_func",
func_type, {});
func.addEntryBlock();
std::vector<std::string> attributes;
if (ln) {
attributes.push_back(kLayerNormalizedLstmCellSimple);
} else {
attributes.push_back(kLstmCellSimple);
}
if (cifg) {
attributes.push_back(kCoupleInputForgetGates);
}
mlir::StringAttr attr_values =
builder->getStringAttr(llvm::join(attributes, ","));
func->setAttr(kTFImplements, attr_values);
return func;
}
class LstmUtilsTest : public ::testing::Test {
protected:
LstmUtilsTest() {}
void SetUp() override {
context_ = std::make_unique<mlir::MLIRContext>();
context_->loadDialect<arith::ArithDialect, mlir::func::FuncDialect,
tensor::TensorDialect, mlir::TF::TensorFlowDialect,
TensorFlowLiteDialect>();
builder_ = std::make_unique<mlir::Builder>(context_.get());
fused_lstm_func_ = createLstmCompositeFunc(builder_.get(), false, false);
fused_lstm_func_cifg_ =
createLstmCompositeFunc(builder_.get(), false, true);
fused_ln_lstm_func_ = createLstmCompositeFunc(builder_.get(), true, false);
}
void TearDown() override {
fused_lstm_func_.erase();
fused_lstm_func_cifg_.erase();
fused_ln_lstm_func_.erase();
builder_.reset();
}
func::FuncOp fused_lstm_func_;
func::FuncOp fused_lstm_func_cifg_;
func::FuncOp fused_ln_lstm_func_;
std::unique_ptr<mlir::MLIRContext> context_;
std::unique_ptr<mlir::Builder> builder_;
};
TEST_F(LstmUtilsTest, ConvertLSTMCellSimple) {
mlir::TFL::ConvertLSTMCellSimpleToFusedLSTM convert(fused_lstm_func_);
auto result = convert.RewriteFunc();
EXPECT_FALSE(failed(result));
fused_lstm_func_.dump();
EXPECT_EQ(
fused_lstm_func_->getAttrOfType<StringAttr>(kTFImplements).getValue(),
convert.GetCompositeOpName());
EXPECT_EQ(fused_lstm_func_.getNumArguments(), 5);
EXPECT_EQ(fused_lstm_func_.getFunctionType().getNumResults(), 1);
auto transpose_op = fused_lstm_func_.getBody().front().begin();
transpose_op++;
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getOperand(0).getType())
.getDimSize(0),
3);
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getOperand(0).getType())
.getDimSize(1),
12);
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getResult(0).getType())
.getDimSize(0),
12);
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getResult(0).getType())
.getDimSize(1),
3);
auto it = fused_lstm_func_.getBody().back().rbegin();
EXPECT_EQ(it->getName().getStringRef(),
mlir::func::ReturnOp::getOperationName());
it++;
it++;
EXPECT_EQ(it->getName().getStringRef(),
mlir::TFL::LSTMOp::getOperationName());
EXPECT_EQ(it->getNumOperands(), 24);
EXPECT_EQ(it->getNumResults(), 1);
EXPECT_FALSE(mlir::isa<NoneType>(it->getOperand(1).getType()));
EXPECT_TRUE(mlir::isa<NoneType>(it->getOperand(20).getType()));
EXPECT_TRUE(mlir::cast<RankedTensorType>(it->getOperand(17).getType())
.getElementType()
.isF32());
EXPECT_TRUE(
mlir::cast<ElementsAttr>(mlir::cast<mlir::arith::ConstantOp>(
it->getOpOperand(15).get().getDefiningOp())
.getValue())
.getValues<FloatAttr>()[0]
.getValue()
.isExactlyValue(0.0f));
EXPECT_EQ(fused_lstm_func_.getFunctionType().getNumResults(), 1);
auto output_types = fused_lstm_func_.getFunctionType().getResults();
SmallVector<int64_t, 2> output_shape{1, mlir::ShapedType::kDynamic};
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getShape().size(),
output_shape.size());
for (int i = 0; i < output_shape.size(); i++) {
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getDimSize(i),
output_shape[i]);
}
}
TEST_F(LstmUtilsTest, ConvertLSTMCellSimpleToFusedLSTMCoupleInputForget) {
mlir::TFL::ConvertLSTMCellSimpleToFusedLSTM convert(fused_lstm_func_cifg_);
auto result = convert.RewriteFunc();
EXPECT_FALSE(failed(result));
fused_lstm_func_cifg_.dump();
llvm::SmallVector<std::string, 2> attributes{kLstmCellSimple,
kCoupleInputForgetGates};
EXPECT_EQ(fused_lstm_func_cifg_->getAttrOfType<StringAttr>(kTFImplements)
.getValue(),
llvm::join(attributes, ","));
auto it = fused_lstm_func_cifg_.getBody().back().rbegin();
EXPECT_EQ(it->getName().getStringRef(),
mlir::func::ReturnOp::getOperationName());
it++;
it++;
EXPECT_EQ(it->getName().getStringRef(),
mlir::TFL::LSTMOp::getOperationName());
EXPECT_EQ(it->getNumOperands(), 24);
EXPECT_EQ(it->getNumResults(), 1);
EXPECT_TRUE(mlir::isa<NoneType>(it->getOperand(1).getType()));
}
TEST_F(LstmUtilsTest, ConvertLayerNormLSTMCellSimpleToFusedLSTM) {
mlir::TFL::ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM convert(
fused_ln_lstm_func_);
auto result = convert.RewriteFunc();
EXPECT_FALSE(failed(result));
fused_ln_lstm_func_.dump();
EXPECT_EQ(
fused_ln_lstm_func_->getAttrOfType<StringAttr>(kTFImplements).getValue(),
convert.GetCompositeOpName());
EXPECT_EQ(fused_ln_lstm_func_.getNumArguments(), 5);
EXPECT_EQ(fused_ln_lstm_func_.getFunctionType().getNumResults(), 1);
auto it = fused_ln_lstm_func_.getBody().back().rbegin();
EXPECT_EQ(it->getName().getStringRef(),
mlir::func::ReturnOp::getOperationName());
it++;
it++;
EXPECT_EQ(it->getName().getStringRef(),
mlir::TFL::LSTMOp::getOperationName());
EXPECT_EQ(it->getNumOperands(), 24);
EXPECT_EQ(it->getNumResults(), 1);
EXPECT_FALSE(mlir::isa<NoneType>(it->getOperand(1).getType()));
EXPECT_FALSE(mlir::isa<NoneType>(it->getOperand(20).getType()));
EXPECT_EQ(mlir::cast<RankedTensorType>(it->getOperand(20).getType())
.getShape()
.size(),
1);
EXPECT_EQ(
mlir::cast<RankedTensorType>(it->getOperand(20).getType()).getDimSize(0),
3);
EXPECT_EQ(fused_ln_lstm_func_.getFunctionType().getNumResults(), 1);
auto output_types = fused_ln_lstm_func_.getFunctionType().getResults();
SmallVector<int64_t, 2> output_shape{1, mlir::ShapedType::kDynamic};
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getShape().size(),
output_shape.size());
for (int i = 0; i < output_shape.size(); i++) {
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getDimSize(i),
output_shape[i]);
}
}
}
} |
812 | cpp | tensorflow/tensorflow | dequantize | tensorflow/lite/toco/graph_transformations/dequantize.cc | tensorflow/lite/kernels/dequantize_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_integer_ops {
template <typename T>
inline void Dequantize(const tflite::DequantizationParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int32 zero_point = op_params.zero_point;
const double scale = op_params.scale;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
const int32 val = static_cast<int32>(input_data[i]);
const float result = static_cast<float>(scale * (val - zero_point));
output_data[i] = result;
}
}
}
}
#endif
#include "tensorflow/lite/kernels/dequantize.h"
#include <stddef.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace dequantize {
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
TfLiteTensor* output;
};
struct OpData {
bool float_dequantized_weights_initialized;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->float_dequantized_weights_initialized = false;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type == kTfLiteInt4 ||
op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16 ||
op_context.input->type == kTfLiteFloat16);
if (op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point, 0);
}
op_context.output->type = kTfLiteFloat32;
if (IsConstantTensor(op_context.input)) {
op_context.output->allocation_type = kTfLiteArenaRwPersistent;
}
return context->ResizeTensor(context, op_context.output,
TfLiteIntArrayCopy(op_context.input->dims));
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
if (IsConstantTensor(op_context.input) &&
op_data->float_dequantized_weights_initialized) {
return kTfLiteOk;
}
auto status = DequantizeImpl<kernel_type>(context, node, op_context.input,
op_context.output);
if (status != kTfLiteOk) {
return status;
}
if (IsConstantTensor(op_context.input)) {
op_data->float_dequantized_weights_initialized = true;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DEQUANTIZE_OPT() {
static TfLiteRegistration r = {
dequantize::Init, dequantize::Free, dequantize::Prepare,
dequantize::Eval<dequantize::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_DEQUANTIZE_REF() {
static TfLiteRegistration r = {dequantize::Init, dequantize::Free,
dequantize::Prepare,
dequantize::Eval<dequantize::kReference>};
return &r;
}
TfLiteRegistration* Register_DEQUANTIZE() {
#ifdef USE_NEON
return Register_DEQUANTIZE_OPT();
#else
return Register_DEQUANTIZE_REF();
#endif
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "Eigen/Core"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_DEQUANTIZE();
}
}
namespace {
using ::testing::ElementsAreArray;
class DequantizeOpModel : public SingleOpModel {
public:
explicit DequantizeOpModel() {}
DequantizeOpModel(TensorType type, std::initializer_list<int> shape,
float scale, int32_t zero_point, int version) {
const TensorData input_tensor_data = {type, shape, 0, 0, scale, zero_point};
input_ = AddInput(input_tensor_data);
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEQUANTIZE, ops::builtin::Register_DEQUANTIZE(),
version);
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
template <typename T>
void SetInputInt4(int input, const std::vector<T> data) {
auto non_const = *const_cast<std::vector<T>*>(&data);
std::vector<int8_t> data_int8(non_const.size());
std::copy(non_const.begin(), non_const.end(), data_int8.begin());
PopulateTensor4bit(input, 0, data_int8.data(),
data_int8.data() + data_int8.size());
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int output_;
};
TEST(DequantizeOpTest, Int4) {
DequantizeOpModel m(TensorType_INT4, {2, 2}, 0.5, -1, 6);
m.SetInputInt4<int8_t>(0, {7, 6, -7, -8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({4, 3.5, -3, -3.5})));
}
TEST(DequantizeOpTest, Uint8) {
DequantizeOpModel m(TensorType_UINT8, {2, 5}, 0.5, 127, 1);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizeOpTest, Int8) {
DequantizeOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizeOpTest, Float16) {
DequantizeOpModel m(TensorType_FLOAT16, {2, 3}, 1.0f, 0, 3);
std::vector<Eigen::half> half{Eigen::half{-535.54f}, Eigen::half{-100.0f},
Eigen::half{-1.0f}, Eigen::half{0.f},
Eigen::half{1.0f}, Eigen::half{100.32f}};
m.PopulateTensor(0, 0, reinterpret_cast<TfLiteFloat16*>(half.data()),
reinterpret_cast<TfLiteFloat16*>(half.data()) + half.size());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{-535.54f, -100.0f, -1.0f, 0.f, 1.0f, 100.32f},
0.1f)));
}
TEST(DequantizeOpTest, Int16) {
DequantizeOpModel m(TensorType_INT16, {2, 5}, 0.5, 0, 4);
m.SetInput<int16_t>({-129, -126, -125, -124, -123, 124, 125, 126, 127, 131});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-64.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 65.5})));
}
class DequantizePerChannelOpModel : public DequantizeOpModel {
public:
DequantizePerChannelOpModel(TensorType type, std::initializer_list<int> shape,
std::initializer_list<float> scales,
std::initializer_list<int64_t> zero_points,
int channel_dim, int version) {
std::vector<float> per_channel_scales(scales);
std::vector<int64_t> input_offsets(zero_points);
const TensorData input_tensor_data = {
type, shape, 0, 0, 0.0f, 0, true, per_channel_scales,
input_offsets, channel_dim};
input_ = AddInput(input_tensor_data);
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEQUANTIZE, ops::builtin::Register_DEQUANTIZE(),
version);
BuildInterpreter({GetShape(input_)});
}
};
TEST(DequantizePerChannelOpTest, Uint8) {
DequantizePerChannelOpModel m(TensorType_UINT8, {2, 5}, {0.5, 0.5},
{127, 127}, 0, 5);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizePerChannelOpTest, Int8) {
DequantizePerChannelOpModel m(TensorType_INT8, {2, 5}, {0.5, 0.5}, {-1, -1},
0, 5);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
}
} |
813 | cpp | tensorflow/tensorflow | profile_summary_formatter | tensorflow/lite/profiling/profile_summary_formatter.cc | tensorflow/lite/profiling/profile_summary_formatter_test.cc | #ifndef TENSORFLOW_LITE_PROFILING_PROFILE_SUMMARY_FORMATTER_H_
#define TENSORFLOW_LITE_PROFILING_PROFILE_SUMMARY_FORMATTER_H_
#include <cstddef>
#include <cstdint>
#include <fstream>
#include <functional>
#include <map>
#include <memory>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/util/stat_summarizer_options.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/profiling/proto/profiling_info.pb.h"
namespace tflite {
namespace profiling {
class ProfileSummaryFormatter {
public:
ProfileSummaryFormatter() = default;
virtual ~ProfileSummaryFormatter() {}
virtual std::string GetOutputString(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const = 0;
virtual std::string GetShortSummary(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const = 0;
virtual tensorflow::StatSummarizerOptions GetStatSummarizerOptions()
const = 0;
virtual void HandleOutput(const std::string& init_output,
const std::string& run_output,
std::string output_file_path) const = 0;
};
class ProfileSummaryDefaultFormatter : public ProfileSummaryFormatter {
public:
ProfileSummaryDefaultFormatter() = default;
~ProfileSummaryDefaultFormatter() override {}
std::string GetOutputString(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const override;
std::string GetShortSummary(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const override;
tensorflow::StatSummarizerOptions GetStatSummarizerOptions() const override;
void HandleOutput(const std::string& init_output,
const std::string& run_output,
std::string output_file_path) const override;
private:
std::string GenerateReport(
const std::string& tag, bool include_output_string,
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const;
void WriteOutput(const std::string& header, const std::string& data,
std::ostream* stream) const {
(*stream) << header << std::endl;
(*stream) << data << std::endl;
}
};
class ProfileSummaryCSVFormatter : public ProfileSummaryDefaultFormatter {
public:
ProfileSummaryCSVFormatter() = default;
tensorflow::StatSummarizerOptions GetStatSummarizerOptions() const override;
};
class ProfileSummaryProtoFormatter : public ProfileSummaryFormatter {
public:
std::string GetOutputString(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const override;
std::string GetShortSummary(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const override;
tensorflow::StatSummarizerOptions GetStatSummarizerOptions() const override;
void HandleOutput(const std::string& init_output,
const std::string& run_output,
std::string output_file_path) const override;
private:
std::string GenerateReport(
const std::string& tag, bool include_output_string,
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const;
void GenerateSubGraphProfilingData(
const tensorflow::StatsCalculator* stats_calculator, int subgraph_index,
const std::map<uint32_t, std::string>& subgraph_name_map,
SubGraphProfilingData* sub_graph_profiling_data) const;
void GenerateDelegateProfilingData(
const tensorflow::StatsCalculator* stats_calculator,
DelegateProfilingData* delegate_profiling_data) const;
void GenerateOpProfileDataFromDetail(
const tensorflow::StatsCalculator::Detail* detail,
const tensorflow::StatsCalculator* stats_calculator,
OpProfileData* op_profile_data) const;
std::vector<tensorflow::StatsCalculator::Detail> GetDetailsSortedByRunOrder(
const tensorflow::StatsCalculator* stats_calculator) const;
};
}
}
#endif
#include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include <fstream>
#include <iomanip>
#include <ios>
#include <map>
#include <memory>
#include <ostream>
#include <queue>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/profiling/proto/profiling_info.pb.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace profiling {
std::string ProfileSummaryDefaultFormatter::GetOutputString(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
return GenerateReport("profile", true,
stats_calculator_map, delegate_stats_calculator,
subgraph_name_map);
}
std::string ProfileSummaryDefaultFormatter::GetShortSummary(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
return GenerateReport("summary", false,
stats_calculator_map, delegate_stats_calculator,
subgraph_name_map);
}
std::string ProfileSummaryDefaultFormatter::GenerateReport(
const std::string& tag, bool include_output_string,
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
std::stringstream stream;
bool has_non_primary_graph =
(stats_calculator_map.size() - stats_calculator_map.count(0)) > 0;
for (const auto& stats_calc : stats_calculator_map) {
auto subgraph_index = stats_calc.first;
auto subgraph_stats = stats_calc.second.get();
std::string subgraph_name = "";
if (subgraph_name_map.find(subgraph_index) != subgraph_name_map.end()) {
subgraph_name = subgraph_name_map.at(subgraph_index);
}
if (has_non_primary_graph) {
if (subgraph_index == 0) {
stream << "Primary graph (name: " << subgraph_name << ") " << tag << ":"
<< std::endl;
} else {
stream << "Subgraph (index: " << subgraph_index
<< ", name: " << subgraph_name << ") " << tag << ":"
<< std::endl;
}
}
if (include_output_string) {
stream << subgraph_stats->GetOutputString();
}
if (subgraph_index != 0) {
stream << "Subgraph (index: " << subgraph_index
<< ", name: " << subgraph_name << ") ";
}
stream << subgraph_stats->GetShortSummary() << std::endl;
}
if (delegate_stats_calculator.num_runs() > 0) {
stream << "Delegate internal: " << std::endl;
if (include_output_string) {
stream << delegate_stats_calculator.GetOutputString();
}
stream << delegate_stats_calculator.GetShortSummary() << std::endl;
}
return stream.str();
}
void ProfileSummaryDefaultFormatter::HandleOutput(
const std::string& init_output, const std::string& run_output,
std::string output_file_path) const {
std::ofstream output_file(output_file_path);
std::ostream* output_stream = nullptr;
if (output_file.good()) {
output_stream = &output_file;
}
if (!init_output.empty()) {
WriteOutput("Profiling Info for Benchmark Initialization:", init_output,
output_stream == nullptr ? &TFLITE_LOG(INFO) : output_stream);
}
if (!run_output.empty()) {
WriteOutput(
"Operator-wise Profiling Info for Regular Benchmark Runs:", run_output,
output_stream == nullptr ? &TFLITE_LOG(INFO) : output_stream);
}
}
tensorflow::StatSummarizerOptions
ProfileSummaryDefaultFormatter::GetStatSummarizerOptions() const {
auto options = tensorflow::StatSummarizerOptions();
options.show_summary = false;
options.show_memory = false;
return options;
}
tensorflow::StatSummarizerOptions
ProfileSummaryCSVFormatter::GetStatSummarizerOptions() const {
auto options = ProfileSummaryDefaultFormatter::GetStatSummarizerOptions();
options.format_as_csv = true;
return options;
}
std::vector<tensorflow::StatsCalculator::Detail>
ProfileSummaryProtoFormatter::GetDetailsSortedByRunOrder(
const tensorflow::StatsCalculator* stats_calculator) const {
std::vector<tensorflow::StatsCalculator::Detail> details;
std::map<std::string, tensorflow::StatsCalculator::Detail> unsorted_details =
stats_calculator->GetDetails();
std::priority_queue<
std::pair<std::string, const tensorflow::StatsCalculator::Detail*>>
sorted_list;
const int num_nodes = unsorted_details.size();
for (const auto& det : unsorted_details) {
const tensorflow::StatsCalculator::Detail* detail = &(det.second);
std::stringstream stream_for_sort;
stream_for_sort << std::setw(20) << std::right << std::setprecision(10)
<< std::fixed;
stream_for_sort << num_nodes - detail->run_order;
sorted_list.emplace(stream_for_sort.str(), detail);
}
while (!sorted_list.empty()) {
auto entry = sorted_list.top();
sorted_list.pop();
details.push_back(*entry.second);
}
return details;
}
void ProfileSummaryProtoFormatter::GenerateOpProfileDataFromDetail(
const tensorflow::StatsCalculator::Detail* detail,
const tensorflow::StatsCalculator* stats_calculator,
OpProfileData* const op_profile_data) const {
if (detail == nullptr) {
return;
}
op_profile_data->set_node_type(detail->type);
OpProfilingStat* inference_stat =
op_profile_data->mutable_inference_microseconds();
inference_stat->set_first(detail->elapsed_time.first());
inference_stat->set_last(detail->elapsed_time.newest());
inference_stat->set_avg(detail->elapsed_time.avg());
inference_stat->set_stddev(detail->elapsed_time.std_deviation());
inference_stat->set_variance(detail->elapsed_time.variance());
inference_stat->set_min(detail->elapsed_time.min());
inference_stat->set_max(detail->elapsed_time.max());
inference_stat->set_sum(detail->elapsed_time.sum());
inference_stat->set_count(detail->elapsed_time.count());
OpProfilingStat* memory_stat = op_profile_data->mutable_mem_kb();
memory_stat->set_first(detail->mem_used.first() / 1000.0);
memory_stat->set_last(detail->mem_used.newest() / 1000.0);
memory_stat->set_avg(detail->mem_used.avg() / 1000.0);
memory_stat->set_stddev(detail->mem_used.std_deviation() / 1000.0);
memory_stat->set_variance(detail->mem_used.variance() / 1000000.0);
memory_stat->set_min(detail->mem_used.min() / 1000.0);
memory_stat->set_max(detail->mem_used.max() / 1000.0);
memory_stat->set_sum(detail->mem_used.sum() / 1000.0);
memory_stat->set_count(detail->mem_used.count());
op_profile_data->set_times_called(detail->times_called /
stats_calculator->num_runs());
op_profile_data->set_name(detail->name);
op_profile_data->set_run_order(detail->run_order);
}
void ProfileSummaryProtoFormatter::GenerateSubGraphProfilingData(
const tensorflow::StatsCalculator* stats_calculator, int subgraph_index,
const std::map<uint32_t, std::string>& subgraph_name_map,
SubGraphProfilingData* const sub_graph_profiling_data) const {
sub_graph_profiling_data->set_subgraph_index(subgraph_index);
std::string subgraph_name = "";
if (subgraph_name_map.find(subgraph_index) != subgraph_name_map.end()) {
subgraph_name = subgraph_name_map.at(subgraph_index);
}
sub_graph_profiling_data->set_subgraph_name(subgraph_name);
for (tensorflow::StatsCalculator::Detail& detail :
GetDetailsSortedByRunOrder(stats_calculator)) {
OpProfileData* const op_profile_data =
sub_graph_profiling_data->add_per_op_profiles();
GenerateOpProfileDataFromDetail(&detail, stats_calculator, op_profile_data);
}
}
void ProfileSummaryProtoFormatter::GenerateDelegateProfilingData(
const tensorflow::StatsCalculator* stats_calculator,
DelegateProfilingData* const delegate_profiling_data) const {
for (const tensorflow::StatsCalculator::Detail& detail :
GetDetailsSortedByRunOrder(stats_calculator)) {
OpProfileData* const op_profile_data =
delegate_profiling_data->add_per_op_profiles();
GenerateOpProfileDataFromDetail(&detail, stats_calculator, op_profile_data);
}
}
std::string ProfileSummaryProtoFormatter::GetShortSummary(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
TFLITE_LOG(ERROR) << "GetShortSummary is not supported for proto formatter.";
return "";
}
std::string ProfileSummaryProtoFormatter::GetOutputString(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
ModelProfilingData model_profiling_data;
for (const auto& stats_calc : stats_calculator_map) {
auto subgraph_index = stats_calc.first;
tensorflow::StatsCalculator* subgraph_stats = stats_calc.second.get();
SubGraphProfilingData* const sub_graph_profiling_data =
model_profiling_data.add_subgraph_profiles();
GenerateSubGraphProfilingData(subgraph_stats, subgraph_index,
subgraph_name_map, sub_graph_profiling_data);
}
if (delegate_stats_calculator.num_runs() > 0) {
DelegateProfilingData* const delegate_profiling_data =
model_profiling_data.add_delegate_profiles();
GenerateDelegateProfilingData(&delegate_stats_calculator,
delegate_profiling_data);
}
return model_profiling_data.SerializeAsString();
}
tensorflow::StatSummarizerOptions
ProfileSummaryProtoFormatter::GetStatSummarizerOptions() const {
auto options = tensorflow::StatSummarizerOptions();
options.show_summary = false;
options.show_memory = false;
return options;
}
void ProfileSummaryProtoFormatter::HandleOutput(
const std::string& init_output, const std::string& run_output,
std::string output_file_path) const {
std::ofstream output_file(output_file_path, std::ios_base::binary);
std::ostream* output_stream = nullptr;
if (output_file.good()) {
output_stream = &output_file;
}
BenchmarkProfilingData benchmark_profiling_data;
if (!init_output.empty()) {
benchmark_profiling_data.mutable_init_profile()->ParseFromString(
init_output);
}
if (!run_output.empty()) {
benchmark_profiling_data.mutable_runtime_profile()->ParseFromString(
run_output);
}
if (output_stream == nullptr) {
TFLITE_LOG(INFO) << benchmark_profiling_data.DebugString();
} else {
benchmark_profiling_data.SerializeToOstream(output_stream);
}
}
}
} | #include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include <fstream>
#include <ios>
#include <map>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/lite/profiling/proto/profiling_info.pb.h"
namespace tflite {
namespace profiling {
namespace {
TEST(SummaryWriterTest, SummaryOptionStdOut) {
ProfileSummaryDefaultFormatter writer;
tensorflow::StatSummarizerOptions options = writer.GetStatSummarizerOptions();
EXPECT_EQ(options.show_summary, false);
EXPECT_EQ(options.show_memory, false);
EXPECT_EQ(options.format_as_csv, false);
}
TEST(SummaryWriterTest, SummaryOptionCSV) {
ProfileSummaryCSVFormatter writer;
tensorflow::StatSummarizerOptions options = writer.GetStatSummarizerOptions();
EXPECT_EQ(options.show_summary, false);
EXPECT_EQ(options.show_memory, false);
EXPECT_EQ(options.format_as_csv, true);
}
TEST(SummaryWriterTest, EmptyOutputString) {
ProfileSummaryDefaultFormatter writer;
std::string output = writer.GetOutputString(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()), {});
EXPECT_EQ(output.size(), 0);
}
TEST(SummaryWriterTest, EmptyShortSummary) {
ProfileSummaryDefaultFormatter writer;
std::string output = writer.GetShortSummary(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()), {});
EXPECT_EQ(output.size(), 0);
}
TEST(SummaryWriterTest, SingleSubgraphOutputString) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetOutputString(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()), {});
ASSERT_TRUE(absl::StrContains(output, "Run Order"));
ASSERT_TRUE(absl::StrContains(output, "Top by Computation Time"));
ASSERT_TRUE(!absl::StrContains(output, "Top by Memory Use"));
ASSERT_TRUE(absl::StrContains(output, "Summary by node type"));
ASSERT_TRUE(absl::StrContains(output, "nodes observed"));
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, SingleSubgraphShortSummary) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetShortSummary(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}});
ASSERT_TRUE(!absl::StrContains(output, "Run Order"));
ASSERT_TRUE(!absl::StrContains(output, "Top by Computation Time"));
ASSERT_TRUE(!absl::StrContains(output, "Top by Memory Use"));
ASSERT_TRUE(!absl::StrContains(output, "Summary by node type"));
ASSERT_TRUE(absl::StrContains(output, "nodes observed"));
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, MultiSubgraphOutputString) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
stats_calculator_map[1] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetOutputString(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}, {1, "Subgraph 1"}});
ASSERT_TRUE(absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, MultiSubgraphOutputStringForProto) {
ProfileSummaryProtoFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string kernel_name_1 = "Kernel 1";
std::string kernel_name_2 = "Kernel 2";
std::string kernel_name_3 = "Kernel 3";
std::string op_name_1 = "Convolution";
std::string op_name_2 = "Reshape";
std::string op_name_3 = "Convolution";
stats_calculator_map[0]->AddNodeStats(kernel_name_1, op_name_1, 1, 10, 10000);
stats_calculator_map[0]->AddNodeStats(kernel_name_1, op_name_1, 1, 20, 20000);
stats_calculator_map[0]->AddNodeStats(kernel_name_2, op_name_2, 2, 15, 10000);
stats_calculator_map[0]->UpdateRunTotalUs(25);
stats_calculator_map[1] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
stats_calculator_map[1]->AddNodeStats(kernel_name_3, op_name_3, 3, 10, 10000);
stats_calculator_map[1]->UpdateRunTotalUs(10);
std::string output = writer.GetOutputString(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}, {1, "Subgraph 1"}});
ModelProfilingData model_profiling_data;
model_profiling_data.ParseFromString(output);
ASSERT_TRUE(absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
ASSERT_EQ(model_profiling_data.subgraph_profiles().size(), 2);
ASSERT_EQ(model_profiling_data.subgraph_profiles(0).subgraph_name(),
"Primary graph");
ASSERT_EQ(model_profiling_data.subgraph_profiles(0).per_op_profiles().size(),
2);
OpProfileData op_profile_data_1;
op_profile_data_1.set_node_type(op_name_1);
OpProfilingStat* inference_microseconds_stat_1 =
op_profile_data_1.mutable_inference_microseconds();
inference_microseconds_stat_1->set_first(10);
inference_microseconds_stat_1->set_last(20);
inference_microseconds_stat_1->set_max(20);
inference_microseconds_stat_1->set_min(10);
inference_microseconds_stat_1->set_avg(15);
inference_microseconds_stat_1->set_stddev(5);
inference_microseconds_stat_1->set_variance(25);
inference_microseconds_stat_1->set_sum(30);
inference_microseconds_stat_1->set_count(2);
OpProfilingStat* memory_stat_1 = op_profile_data_1.mutable_mem_kb();
memory_stat_1->set_first(10);
memory_stat_1->set_last(20);
memory_stat_1->set_max(20);
memory_stat_1->set_min(10);
memory_stat_1->set_avg(15);
memory_stat_1->set_stddev(5);
memory_stat_1->set_variance(25);
memory_stat_1->set_sum(30);
memory_stat_1->set_count(2);
op_profile_data_1.set_name(kernel_name_1);
op_profile_data_1.set_run_order(1);
op_profile_data_1.set_times_called(2);
EXPECT_THAT(model_profiling_data.subgraph_profiles(0).per_op_profiles(0),
testing::EqualsProto(op_profile_data_1));
OpProfileData op_profile_data_2;
op_profile_data_2.set_node_type(op_name_2);
OpProfilingStat* inference_microseconds_stat_2 =
op_profile_data_2.mutable_inference_microseconds();
inference_microseconds_stat_2->set_first(15);
inference_microseconds_stat_2->set_last(15);
inference_microseconds_stat_2->set_max(15);
inference_microseconds_stat_2->set_min(15);
inference_microseconds_stat_2->set_avg(15);
inference_microseconds_stat_2->set_stddev(0);
inference_microseconds_stat_2->set_variance(0);
inference_microseconds_stat_2->set_sum(15);
inference_microseconds_stat_2->set_count(1);
OpProfilingStat* memory_stat_2 = op_profile_data_2.mutable_mem_kb();
memory_stat_2->set_first(10);
memory_stat_2->set_last(10);
memory_stat_2->set_max(10);
memory_stat_2->set_min(10);
memory_stat_2->set_avg(10);
memory_stat_2->set_stddev(0);
memory_stat_2->set_variance(0);
memory_stat_2->set_sum(10);
memory_stat_2->set_count(1);
op_profile_data_2.set_times_called(1);
op_profile_data_2.set_name(kernel_name_2);
op_profile_data_2.set_run_order(2);
EXPECT_THAT(model_profiling_data.subgraph_profiles(0).per_op_profiles(1),
testing::EqualsProto(op_profile_data_2));
ASSERT_EQ(model_profiling_data.subgraph_profiles(1).subgraph_name(),
"Subgraph 1");
ASSERT_EQ(model_profiling_data.subgraph_profiles(1).per_op_profiles().size(),
1);
OpProfileData op_profile_data_3;
op_profile_data_3.set_node_type(op_name_3);
OpProfilingStat* inference_microseconds_stat_3 =
op_profile_data_3.mutable_inference_microseconds();
inference_microseconds_stat_3->set_first(10);
inference_microseconds_stat_3->set_last(10);
inference_microseconds_stat_3->set_max(10);
inference_microseconds_stat_3->set_min(10);
inference_microseconds_stat_3->set_avg(10);
inference_microseconds_stat_3->set_stddev(0);
inference_microseconds_stat_3->set_variance(0);
inference_microseconds_stat_3->set_sum(10);
inference_microseconds_stat_3->set_count(1);
OpProfilingStat* memory_stat_3 = op_profile_data_3.mutable_mem_kb();
memory_stat_3->set_first(10);
memory_stat_3->set_last(10);
memory_stat_3->set_max(10);
memory_stat_3->set_min(10);
memory_stat_3->set_avg(10);
memory_stat_3->set_stddev(0);
memory_stat_3->set_variance(0);
memory_stat_3->set_sum(10);
memory_stat_3->set_count(1);
op_profile_data_3.set_times_called(1);
op_profile_data_3.set_name(kernel_name_3);
op_profile_data_3.set_run_order(3);
EXPECT_THAT(model_profiling_data.subgraph_profiles(1).per_op_profiles(0),
testing::EqualsProto(op_profile_data_3));
}
TEST(SummaryWriterTest, MultiSubgraphHandleOutputForProto) {
ProfileSummaryProtoFormatter writer;
ModelProfilingData model_profiling_data_run;
SubGraphProfilingData* subgraph_profiling_data =
model_profiling_data_run.add_subgraph_profiles();
subgraph_profiling_data->set_subgraph_name("Primary graph");
OpProfileData* op_profile_data_1 =
subgraph_profiling_data->add_per_op_profiles();
op_profile_data_1->set_node_type("Convolution");
OpProfilingStat* inference_stat_1 =
op_profile_data_1->mutable_inference_microseconds();
inference_stat_1->set_first(10);
inference_stat_1->set_avg(10);
OpProfilingStat* mem_stat_1 = op_profile_data_1->mutable_mem_kb();
mem_stat_1->set_first(10);
mem_stat_1->set_avg(10);
op_profile_data_1->set_times_called(1);
op_profile_data_1->set_name("Kernel 1");
op_profile_data_1->set_run_order(1);
OpProfileData* op_profile_data_2 =
subgraph_profiling_data->add_per_op_profiles();
op_profile_data_2->set_node_type("Reshape");
OpProfilingStat* inference_stat_2 =
op_profile_data_2->mutable_inference_microseconds();
inference_stat_2->set_first(15);
inference_stat_2->set_avg(15);
OpProfilingStat* mem_stat_2 = op_profile_data_2->mutable_mem_kb();
mem_stat_2->set_first(10);
mem_stat_2->set_avg(10);
op_profile_data_2->set_times_called(1);
op_profile_data_2->set_name("Kernel 2");
op_profile_data_2->set_run_order(2);
SubGraphProfilingData* subgraph_profiling_data_1 =
model_profiling_data_run.add_subgraph_profiles();
subgraph_profiling_data_1->set_subgraph_name("Subgraph 1");
OpProfileData* op_profile_data_3 =
subgraph_profiling_data_1->add_per_op_profiles();
op_profile_data_3->set_node_type("Convolution");
OpProfilingStat* inference_stat_3 =
op_profile_data_3->mutable_inference_microseconds();
inference_stat_3->set_first(10);
inference_stat_3->set_avg(10);
OpProfilingStat* mem_stat_3 = op_profile_data_3->mutable_mem_kb();
mem_stat_3->set_first(10);
mem_stat_3->set_avg(10);
op_profile_data_3->set_times_called(1);
op_profile_data_3->set_name("Kernel 3");
op_profile_data_3->set_run_order(3);
DelegateProfilingData* delegate_profiling_data =
model_profiling_data_run.add_delegate_profiles();
OpProfileData* op_profile_data_4 =
delegate_profiling_data->add_per_op_profiles();
op_profile_data_4->set_node_type("Convolution");
OpProfilingStat* inference_stat_4 =
op_profile_data_4->mutable_inference_microseconds();
inference_stat_4->set_first(10);
inference_stat_4->set_avg(10);
OpProfilingStat* mem_stat_4 = op_profile_data_4->mutable_mem_kb();
mem_stat_4->set_first(10);
mem_stat_4->set_avg(10);
op_profile_data_4->set_times_called(1);
op_profile_data_4->set_name("Kernel 4");
op_profile_data_4->set_run_order(4);
ModelProfilingData model_profiling_data_init;
SubGraphProfilingData* subgraph_profiling_data_init =
model_profiling_data_init.add_subgraph_profiles();
subgraph_profiling_data_init->set_subgraph_name("Primary graph");
OpProfileData* op_profile_data_init_1 =
subgraph_profiling_data_init->add_per_op_profiles();
op_profile_data_init_1->set_node_type("Convolution");
OpProfilingStat* inference_stat_init_1 =
op_profile_data_init_1->mutable_inference_microseconds();
inference_stat_init_1->set_first(10);
inference_stat_init_1->set_avg(10);
op_profile_data_init_1->set_times_called(1);
OpProfilingStat* mem_stat_init_1 = op_profile_data_init_1->mutable_mem_kb();
mem_stat_init_1->set_first(10);
mem_stat_init_1->set_avg(10);
op_profile_data_init_1->set_name("ModifyGraphWithDelegate");
op_profile_data_init_1->set_run_order(1);
#ifdef __ANDROID__
std::string file_name = "/data/local/tmp/test_file.proto";
#else
std::string file_name = "/tmp/test_file.proto";
#endif
writer.HandleOutput(model_profiling_data_init.SerializeAsString(),
model_profiling_data_run.SerializeAsString(), file_name);
std::ifstream file(file_name, std::ios::binary);
ASSERT_TRUE(file.good());
BenchmarkProfilingData benchmark_profiling_data;
benchmark_profiling_data.ParseFromIstream(&file);
file.close();
ASSERT_TRUE(benchmark_profiling_data.model_name().empty());
EXPECT_THAT(benchmark_profiling_data.init_profile(),
testing::EqualsProto(model_profiling_data_init));
EXPECT_THAT(benchmark_profiling_data.runtime_profile(),
testing::EqualsProto(model_profiling_data_run));
}
TEST(SummaryWriterTest, MultiSubgraphShortSummary) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
stats_calculator_map[1] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetShortSummary(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}, {1, "Subgraph 1"}});
ASSERT_TRUE(absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, DelegationOutputString) {
ProfileSummaryDefaultFormatter writer;
auto delegate_stats_calculator =
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions());
delegate_stats_calculator.UpdateRunTotalUs(1);
std::string output = writer.GetOutputString(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
delegate_stats_calculator, {});
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, DelegationShortSummary) {
ProfileSummaryDefaultFormatter writer;
auto delegate_stats_calculator =
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions());
delegate_stats_calculator.UpdateRunTotalUs(1);
std::string output = writer.GetShortSummary(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
delegate_stats_calculator, {});
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(absl::StrContains(output, "Delegate internal"));
}
}
}
} |
814 | cpp | tensorflow/tensorflow | atrace_profiler | tensorflow/lite/profiling/atrace_profiler.cc | tensorflow/lite/profiling/atrace_profiler_test.cc | #ifndef TENSORFLOW_LITE_PROFILING_ATRACE_PROFILER_H_
#define TENSORFLOW_LITE_PROFILING_ATRACE_PROFILER_H_
#include <memory>
#include "tensorflow/lite/core/api/profiler.h"
namespace tflite {
namespace profiling {
std::unique_ptr<tflite::Profiler> MaybeCreateATraceProfiler();
}
}
#endif
#include "tensorflow/lite/profiling/atrace_profiler.h"
#include <dlfcn.h>
#if defined(__ANDROID__)
#include <sys/system_properties.h>
#endif
#include <string>
#include <type_traits>
namespace tflite {
namespace profiling {
class ATraceProfiler : public tflite::Profiler {
public:
using FpIsEnabled = std::add_pointer<bool()>::type;
using FpBeginSection = std::add_pointer<void(const char*)>::type;
using FpEndSection = std::add_pointer<void()>::type;
ATraceProfiler() {
handle_ = dlopen("libandroid.so", RTLD_NOW | RTLD_LOCAL);
if (handle_) {
atrace_is_enabled_ =
reinterpret_cast<FpIsEnabled>(dlsym(handle_, "ATrace_isEnabled"));
atrace_begin_section_ = reinterpret_cast<FpBeginSection>(
dlsym(handle_, "ATrace_beginSection"));
atrace_end_section_ =
reinterpret_cast<FpEndSection>(dlsym(handle_, "ATrace_endSection"));
if (!atrace_is_enabled_ || !atrace_begin_section_ ||
!atrace_end_section_) {
dlclose(handle_);
handle_ = nullptr;
}
}
}
~ATraceProfiler() override {
if (handle_) {
dlclose(handle_);
}
}
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
if (handle_ && atrace_is_enabled_()) {
std::string trace_event_tag = tag;
trace_event_tag += "@";
trace_event_tag += std::to_string(event_metadata1) + "/" +
std::to_string(event_metadata2);
atrace_begin_section_(trace_event_tag.c_str());
}
return 0;
}
void EndEvent(uint32_t event_handle) override {
if (handle_) {
atrace_end_section_();
}
}
private:
void* handle_;
FpIsEnabled atrace_is_enabled_;
FpBeginSection atrace_begin_section_;
FpEndSection atrace_end_section_;
};
std::unique_ptr<tflite::Profiler> MaybeCreateATraceProfiler() {
#if defined(TFLITE_ENABLE_DEFAULT_PROFILER)
return std::unique_ptr<tflite::Profiler>(new ATraceProfiler());
#else
#if defined(__ANDROID__)
constexpr char kTraceProp[] = "debug.tflite.trace";
char trace_enabled[PROP_VALUE_MAX] = "";
int length = __system_property_get(kTraceProp, trace_enabled);
if (length == 1 && trace_enabled[0] == '1') {
return std::unique_ptr<tflite::Profiler>(new ATraceProfiler());
}
#endif
return nullptr;
#endif
}
}
} | #include "tensorflow/lite/profiling/atrace_profiler.h"
#if defined(__ANDROID__)
#include <sys/system_properties.h>
#endif
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace {
TEST(ATraceProfilerTest, MaybeCreateATraceProfiler) {
auto initial_state_profiler = MaybeCreateATraceProfiler();
#if !defined(TFLITE_ENABLE_DEFAULT_PROFILER)
EXPECT_EQ(nullptr, initial_state_profiler.get());
#else
EXPECT_NE(nullptr, initial_state_profiler.get());
#endif
#if defined(__ANDROID__)
if (__system_property_set("debug.tflite.trace", "1") == 0) {
auto on_state_profiler = MaybeCreateATraceProfiler();
EXPECT_NE(nullptr, on_state_profiler.get());
}
if (__system_property_set("debug.tflite.trace", "0") == 0) {
auto off_state_profiler = MaybeCreateATraceProfiler();
#if !defined(TFLITE_ENABLE_DEFAULT_PROFILER)
EXPECT_EQ(nullptr, off_state_profiler.get());
#else
EXPECT_NE(nullptr, off_state_profiler.get());
#endif
}
#endif
}
}
}
} |
815 | cpp | tensorflow/tensorflow | memory_info | tensorflow/lite/profiling/memory_info.cc | tensorflow/lite/profiling/memory_info_test.cc | #ifndef TENSORFLOW_LITE_PROFILING_MEMORY_INFO_H_
#define TENSORFLOW_LITE_PROFILING_MEMORY_INFO_H_
#include <cstdint>
#include <sstream>
namespace tflite {
namespace profiling {
namespace memory {
struct MemoryUsage {
static const size_t kValueNotSet;
static bool IsSupported();
MemoryUsage()
: mem_footprint_kb(kValueNotSet),
total_allocated_bytes(kValueNotSet),
in_use_allocated_bytes(kValueNotSet) {}
int64_t mem_footprint_kb;
size_t total_allocated_bytes;
size_t in_use_allocated_bytes;
MemoryUsage operator+(MemoryUsage const& obj) const {
MemoryUsage res;
res.mem_footprint_kb = mem_footprint_kb + obj.mem_footprint_kb;
res.total_allocated_bytes =
total_allocated_bytes + obj.total_allocated_bytes;
res.in_use_allocated_bytes =
in_use_allocated_bytes + obj.in_use_allocated_bytes;
return res;
}
MemoryUsage operator-(MemoryUsage const& obj) const {
MemoryUsage res;
res.mem_footprint_kb = mem_footprint_kb - obj.mem_footprint_kb;
res.total_allocated_bytes =
total_allocated_bytes - obj.total_allocated_bytes;
res.in_use_allocated_bytes =
in_use_allocated_bytes - obj.in_use_allocated_bytes;
return res;
}
void AllStatsToStream(std::ostream* stream) const;
friend std::ostream& operator<<(std::ostream& stream,
const MemoryUsage& obj) {
obj.AllStatsToStream(&stream);
return stream;
}
};
MemoryUsage GetMemoryUsage();
}
}
}
#endif
#include "tensorflow/lite/profiling/memory_info.h"
#ifdef __linux__
#include <malloc.h>
#include <sys/resource.h>
#include <sys/time.h>
#elif defined(__APPLE__)
#include <mach/mach.h>
#include <malloc/malloc.h>
#endif
namespace tflite {
namespace profiling {
namespace memory {
const size_t MemoryUsage::kValueNotSet = 0;
bool MemoryUsage::IsSupported() {
#if defined(__linux__) || defined(__APPLE__)
return true;
#endif
return false;
}
MemoryUsage GetMemoryUsage() {
MemoryUsage result;
#ifdef __linux__
rusage res;
if (getrusage(RUSAGE_SELF, &res) == 0) {
result.mem_footprint_kb = res.ru_maxrss;
}
#if defined(__NO_MALLINFO__)
result.total_allocated_bytes = -1;
result.in_use_allocated_bytes = -1;
#elif defined(__GLIBC__) && __GLIBC_MINOR__ >= 33
const auto mem = mallinfo2();
result.total_allocated_bytes = mem.arena;
result.in_use_allocated_bytes = mem.uordblks;
#else
const auto mem = mallinfo();
result.total_allocated_bytes = mem.arena;
result.in_use_allocated_bytes = mem.uordblks;
#endif
#elif defined(__APPLE__)
struct task_vm_info vm_info;
mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
auto status = task_info(mach_task_self(), TASK_VM_INFO,
reinterpret_cast<task_info_t>(&vm_info), &count);
if (status == KERN_SUCCESS) {
result.mem_footprint_kb =
static_cast<int64_t>(vm_info.phys_footprint / 1024.0);
}
struct mstats stats = mstats();
result.total_allocated_bytes = stats.bytes_total;
result.in_use_allocated_bytes = stats.bytes_used;
#endif
return result;
}
void MemoryUsage::AllStatsToStream(std::ostream* stream) const {
*stream << "max resident set size/physical footprint = "
<< mem_footprint_kb / 1024.0 << " MB, total malloc-ed size = "
<< total_allocated_bytes / 1024.0 / 1024.0
<< " MB, in-use allocated/mmapped size = "
<< in_use_allocated_bytes / 1024.0 / 1024.0 << " MB";
}
}
}
} | #include "tensorflow/lite/profiling/memory_info.h"
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace memory {
TEST(MemoryUsage, AddAndSub) {
MemoryUsage mem1, mem2;
mem1.mem_footprint_kb = 5;
mem1.total_allocated_bytes = 7000;
mem1.in_use_allocated_bytes = 2000;
mem2.mem_footprint_kb = 3;
mem2.total_allocated_bytes = 7000;
mem2.in_use_allocated_bytes = 4000;
const auto add_mem = mem1 + mem2;
EXPECT_EQ(8, add_mem.mem_footprint_kb);
EXPECT_EQ(14000, add_mem.total_allocated_bytes);
EXPECT_EQ(6000, add_mem.in_use_allocated_bytes);
const auto sub_mem = mem1 - mem2;
EXPECT_EQ(2, sub_mem.mem_footprint_kb);
EXPECT_EQ(0, sub_mem.total_allocated_bytes);
EXPECT_EQ(-2000, sub_mem.in_use_allocated_bytes);
}
TEST(MemoryUsage, GetMemoryUsage) {
MemoryUsage result;
EXPECT_EQ(MemoryUsage::kValueNotSet, result.mem_footprint_kb);
EXPECT_EQ(MemoryUsage::kValueNotSet, result.total_allocated_bytes);
EXPECT_EQ(MemoryUsage::kValueNotSet, result.in_use_allocated_bytes);
#if defined(__linux__) || defined(__APPLE__)
std::unique_ptr<int[]> int_array(new int[1204]);
for (int i = 0; i < 1024; ++i) int_array[i] = i;
result = GetMemoryUsage();
EXPECT_NE(MemoryUsage::kValueNotSet, result.total_allocated_bytes);
#endif
}
TEST(MemoryUsage, IsSupported) {
#if defined(__linux__) || defined(__APPLE__)
EXPECT_TRUE(MemoryUsage::IsSupported());
#else
EXPECT_FALSE(MemoryUsage::IsSupported());
#endif
}
}
}
} |
816 | cpp | tensorflow/tensorflow | memory_usage_monitor | tensorflow/lite/profiling/memory_usage_monitor.cc | tensorflow/lite/profiling/memory_usage_monitor_test.cc | #ifndef TENSORFLOW_LITE_PROFILING_MEMORY_USAGE_MONITOR_H_
#define TENSORFLOW_LITE_PROFILING_MEMORY_USAGE_MONITOR_H_
#include <memory>
#include <thread>
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/lite/profiling/memory_info.h"
namespace tflite {
namespace profiling {
namespace memory {
class MemoryUsageMonitor {
public:
class Sampler {
public:
virtual ~Sampler() {}
virtual bool IsSupported() { return MemoryUsage::IsSupported(); }
virtual MemoryUsage GetMemoryUsage() {
return tflite::profiling::memory::GetMemoryUsage();
}
virtual void SleepFor(const absl::Duration& duration) {
absl::SleepFor(duration);
}
};
static constexpr float kInvalidMemUsageMB = -1.0f;
explicit MemoryUsageMonitor(int sampling_interval_ms = 50)
: MemoryUsageMonitor(sampling_interval_ms, std::make_unique<Sampler>()) {}
MemoryUsageMonitor(int sampling_interval_ms,
std::unique_ptr<Sampler> sampler);
~MemoryUsageMonitor() { StopInternal(); }
void Start();
void Stop();
float GetPeakMemUsageInMB() const {
if (!is_supported_ || check_memory_thd_ != nullptr) {
return kInvalidMemUsageMB;
}
return peak_mem_footprint_kb_ / 1024.0;
}
MemoryUsageMonitor(MemoryUsageMonitor&) = delete;
MemoryUsageMonitor& operator=(const MemoryUsageMonitor&) = delete;
MemoryUsageMonitor(MemoryUsageMonitor&&) = delete;
MemoryUsageMonitor& operator=(const MemoryUsageMonitor&&) = delete;
private:
void StopInternal();
std::unique_ptr<Sampler> sampler_ = nullptr;
bool is_supported_ = false;
std::unique_ptr<absl::Notification> stop_signal_ = nullptr;
absl::Duration sampling_interval_;
std::unique_ptr<std::thread> check_memory_thd_ = nullptr;
int64_t peak_mem_footprint_kb_ =
static_cast<int64_t>(kInvalidMemUsageMB * 1024);
};
}
}
}
#endif
#include "tensorflow/lite/profiling/memory_usage_monitor.h"
#include <memory>
#include <utility>
#include "absl/synchronization/notification.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/memory_info.h"
namespace tflite {
namespace profiling {
namespace memory {
constexpr float MemoryUsageMonitor::kInvalidMemUsageMB;
MemoryUsageMonitor::MemoryUsageMonitor(int sampling_interval_ms,
std::unique_ptr<Sampler> sampler)
: sampler_(std::move(sampler)),
is_supported_(false),
sampling_interval_(absl::Milliseconds(sampling_interval_ms)) {
is_supported_ = (sampler_ != nullptr && sampler_->IsSupported());
if (!is_supported_) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Getting memory usage isn't supported on this platform!\n");
return;
}
}
void MemoryUsageMonitor::Start() {
if (!is_supported_) return;
if (check_memory_thd_ != nullptr) {
TFLITE_LOG(TFLITE_LOG_INFO, "Memory monitoring has already started!\n");
return;
}
stop_signal_ = std::make_unique<absl::Notification>();
check_memory_thd_ = std::make_unique<std::thread>(([this]() {
while (true) {
const auto mem_info = sampler_->GetMemoryUsage();
if (mem_info.mem_footprint_kb > peak_mem_footprint_kb_) {
peak_mem_footprint_kb_ = mem_info.mem_footprint_kb;
}
if (stop_signal_->HasBeenNotified()) break;
sampler_->SleepFor(sampling_interval_);
}
}));
}
void MemoryUsageMonitor::Stop() {
if (!is_supported_) return;
if (check_memory_thd_ == nullptr) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Memory monitoring hasn't started yet or has stopped!\n");
return;
}
StopInternal();
}
void MemoryUsageMonitor::StopInternal() {
if (check_memory_thd_ == nullptr) return;
stop_signal_->Notify();
if (check_memory_thd_ != nullptr) {
check_memory_thd_->join();
}
stop_signal_.reset(nullptr);
check_memory_thd_.reset(nullptr);
}
}
}
} | #include "tensorflow/lite/profiling/memory_usage_monitor.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/lite/profiling/memory_info.h"
namespace tflite {
namespace profiling {
namespace memory {
class MemoryUsageNotSupportedSampler : public MemoryUsageMonitor::Sampler {
public:
bool IsSupported() override { return false; }
};
TEST(MemoryUsageMonitor, NotSupported) {
MemoryUsageMonitor monitor1(50, std::unique_ptr<MemoryUsageMonitor::Sampler>(
new MemoryUsageNotSupportedSampler()));
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor1.GetPeakMemUsageInMB());
MemoryUsageMonitor monitor2(50, nullptr);
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor2.GetPeakMemUsageInMB());
}
class MemoryUsageMonitorTest : public ::testing::Test {
protected:
class FakeMemoryUsageSampler : public MemoryUsageMonitor::Sampler {
public:
explicit FakeMemoryUsageSampler(int64_t* num_sleeps)
: sleep_cnt_(num_sleeps) {}
bool IsSupported() override { return true; }
MemoryUsage GetMemoryUsage() override {
MemoryUsage result;
result.mem_footprint_kb = 5 * ((*sleep_cnt_) + 1) * 1024;
return result;
}
void SleepFor(const absl::Duration& duration) override {
(*sleep_cnt_)++;
absl::SleepFor(duration);
}
private:
int64_t* const sleep_cnt_ = nullptr;
};
void SetUp() override {
monitor_ = std::make_unique<MemoryUsageMonitor>(
50, std::unique_ptr<MemoryUsageMonitor::Sampler>(
new FakeMemoryUsageSampler(&num_sleeps_)));
}
int64_t num_sleeps_ = 0;
std::unique_ptr<MemoryUsageMonitor> monitor_ = nullptr;
};
TEST_F(MemoryUsageMonitorTest, StartAndStop) {
monitor_->Start();
monitor_->Stop();
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, NoStartAndStop) {
monitor_->Stop();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, StartAndNoStop) {
monitor_->Start();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, StopFirst) {
monitor_->Stop();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
monitor_->Start();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, MultiStartAndStops) {
monitor_->Start();
monitor_->Start();
monitor_->Stop();
monitor_->Stop();
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, StartStopPairs) {
monitor_->Start();
monitor_->Stop();
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
monitor_->Start();
absl::SleepFor(absl::Milliseconds(100));
monitor_->Stop();
EXPECT_GE(num_sleeps_, 1);
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
}
}
}
} |
817 | cpp | tensorflow/tensorflow | time | tensorflow/lite/profiling/time.cc | tensorflow/lite/profiling/time_test.cc | #ifndef TENSORFLOW_LITE_PROFILING_TIME_H_
#define TENSORFLOW_LITE_PROFILING_TIME_H_
#include <cstdint>
namespace tflite {
namespace profiling {
namespace time {
uint64_t NowMicros();
void SleepForMicros(uint64_t micros);
}
}
}
#endif
#include "tensorflow/lite/profiling/time.h"
#if defined(_MSC_VER)
#include <chrono>
#include <thread>
#else
#include <sys/time.h>
#include <time.h>
#endif
namespace tflite {
namespace profiling {
namespace time {
#if defined(_MSC_VER)
uint64_t NowMicros() {
return static_cast<uint64_t>(
std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count());
}
void SleepForMicros(uint64_t micros) {
std::this_thread::sleep_for(std::chrono::microseconds(micros));
}
#else
uint64_t NowMicros() {
#if defined(__APPLE__)
return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW) / 1e3;
#else
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<uint64_t>(ts.tv_sec) * 1e6 +
static_cast<uint64_t>(ts.tv_nsec) / 1e3;
#endif
}
void SleepForMicros(uint64_t micros) {
timespec sleep_time;
sleep_time.tv_sec = micros / 1e6;
micros -= sleep_time.tv_sec * 1e6;
sleep_time.tv_nsec = micros * 1e3;
nanosleep(&sleep_time, nullptr);
}
#endif
}
}
} | #include "tensorflow/lite/profiling/time.h"
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace time {
TEST(TimeTest, NowMicros) {
auto now0 = NowMicros();
EXPECT_GT(now0, 0);
auto now1 = NowMicros();
EXPECT_GE(now1, now0);
}
TEST(TimeTest, SleepForMicros) {
SleepForMicros(0);
auto now0 = NowMicros();
SleepForMicros(50);
auto now1 = NowMicros();
EXPECT_GE(now1, now0 + 50);
now0 = NowMicros();
SleepForMicros(1e6 + 50);
now1 = NowMicros();
EXPECT_GE(now1, now0 + 1e6 + 50);
}
}
}
} |
818 | cpp | tensorflow/tensorflow | root_profiler | tensorflow/lite/profiling/root_profiler.cc | tensorflow/lite/profiling/root_profiler_test.cc | #ifndef TENSORFLOW_LITE_PROFILING_ROOT_PROFILER_H_
#define TENSORFLOW_LITE_PROFILING_ROOT_PROFILER_H_
#include <cstdint>
#include <map>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/api/profiler.h"
namespace tflite {
namespace profiling {
class RootProfiler : public Profiler {
public:
RootProfiler() = default;
~RootProfiler() override = default;
RootProfiler(const RootProfiler&) = delete;
RootProfiler& operator=(const RootProfiler&) = delete;
RootProfiler(RootProfiler&&) = default;
RootProfiler& operator=(RootProfiler&&) = default;
void AddProfiler(Profiler* profiler);
void AddProfiler(std::unique_ptr<Profiler>&& profiler);
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override;
void EndEvent(uint32_t event_handle, int64_t event_metadata1,
int64_t event_metadata2) override;
void EndEvent(uint32_t event_handle) override;
void AddEvent(const char* tag, EventType event_type, uint64_t metric,
int64_t event_metadata1, int64_t event_metadata2) override;
void AddEventWithData(const char* tag, EventType event_type,
const void* data) override;
void RemoveChildProfilers();
private:
uint32_t next_event_id_ = 1;
std::vector<std::unique_ptr<Profiler>> owned_profilers_;
std::vector<Profiler*> profilers_;
std::map<uint32_t, std::vector<uint32_t>> events_;
};
}
}
#endif
#include "tensorflow/lite/profiling/root_profiler.h"
#include <memory>
#include <utility>
#include <vector>
namespace tflite {
namespace profiling {
void RootProfiler::AddProfiler(Profiler* profiler) {
if (profiler == nullptr) return;
profilers_.push_back(profiler);
}
void RootProfiler::AddProfiler(std::unique_ptr<Profiler>&& profiler) {
if (profiler == nullptr) return;
owned_profilers_.emplace_back(std::move(profiler));
profilers_.push_back(owned_profilers_.back().get());
}
uint32_t RootProfiler::BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
if (profilers_.size() == 1) {
return profilers_[0]->BeginEvent(tag, event_type, event_metadata1,
event_metadata2);
}
auto id = next_event_id_++;
std::vector<uint32_t> event_ids;
event_ids.reserve(profilers_.size());
for (auto* profiler : profilers_) {
event_ids.push_back(profiler->BeginEvent(tag, event_type, event_metadata1,
event_metadata2));
}
events_.emplace(id, std::move(event_ids));
return id;
}
void RootProfiler::EndEvent(uint32_t event_handle, int64_t event_metadata1,
int64_t event_metadata2) {
if (profilers_.size() == 1) {
return profilers_[0]->EndEvent(event_handle, event_metadata1,
event_metadata2);
}
if (const auto it = events_.find(event_handle); it != events_.end()) {
const auto& event_ids = it->second;
for (auto idx = 0; idx < event_ids.size(); idx++) {
profilers_[idx]->EndEvent(event_ids[idx], event_metadata1,
event_metadata2);
}
events_.erase(it);
}
}
void RootProfiler::EndEvent(uint32_t event_handle) {
if (profilers_.size() == 1) {
return profilers_[0]->EndEvent(event_handle);
}
if (const auto it = events_.find(event_handle); it != events_.end()) {
const auto& event_ids = it->second;
for (auto idx = 0; idx < event_ids.size(); idx++) {
profilers_[idx]->EndEvent(event_ids[idx]);
}
events_.erase(it);
}
}
void RootProfiler::AddEvent(const char* tag, EventType event_type,
uint64_t metric, int64_t event_metadata1,
int64_t event_metadata2) {
for (auto* profiler : profilers_) {
profiler->AddEvent(tag, event_type, metric, event_metadata1,
event_metadata2);
}
}
void RootProfiler::AddEventWithData(const char* tag, EventType event_type,
const void* data) {
for (auto* profiler : profilers_) {
profiler->AddEventWithData(tag, event_type, data);
}
}
void RootProfiler::RemoveChildProfilers() {
owned_profilers_.clear();
profilers_.clear();
events_.clear();
}
}
} | #include "tensorflow/lite/profiling/root_profiler.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/profiler.h"
using ::testing::_;
using ::testing::StrictMock;
namespace tflite {
namespace profiling {
namespace {
constexpr char kTag[] = "tag";
class MockProfiler : public Profiler {
public:
MOCK_METHOD(uint32_t, BeginEvent,
(const char* tag, EventType event_type, int64_t event_metadata1,
int64_t event_metadata2),
(override));
MOCK_METHOD(void, EndEvent, (uint32_t event_handle), (override));
MOCK_METHOD(void, EndEvent,
(uint32_t event_handle, int64_t event_metadata1,
int64_t event_metadata2),
(override));
MOCK_METHOD(void, AddEvent,
(const char* tag, EventType event_type, uint64_t metric,
int64_t event_metadata1, int64_t event_metadata2),
(override));
MOCK_METHOD(void, AddEventWithData,
(const char* tag, EventType event_type, const void* data),
(override));
};
using MockProfilerT = StrictMock<MockProfiler>;
TEST(RootProfilerTest, ChildProfilerTest) {
auto mock_profiler = std::make_unique<MockProfilerT>();
auto* mock = mock_profiler.get();
RootProfiler root;
root.AddProfiler(mock_profiler.get());
ON_CALL(*mock, BeginEvent(_, _, _, _)).WillByDefault(testing::Return(42));
EXPECT_CALL(*mock, BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2));
EXPECT_CALL(*mock, EndEvent(42, 3, 4));
EXPECT_CALL(*mock, AddEvent(kTag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
5, 6, 7));
EXPECT_CALL(*mock, AddEventWithData(kTag, Profiler::EventType::DEFAULT, _));
auto begin = root.BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2);
root.EndEvent(begin, 3, 4);
root.AddEvent(kTag, Profiler::EventType::OPERATOR_INVOKE_EVENT, 5, 6, 7);
root.AddEventWithData(kTag, Profiler::EventType::DEFAULT, nullptr);
}
TEST(RootProfilerTest, OwnedProfilerTest) {
auto mock_profiler = std::make_unique<MockProfilerT>();
auto* mock = mock_profiler.get();
RootProfiler root;
root.AddProfiler(std::move(mock_profiler));
ON_CALL(*mock, BeginEvent(_, _, _, _)).WillByDefault(testing::Return(42));
EXPECT_CALL(*mock, BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2));
EXPECT_CALL(*mock, EndEvent(42));
EXPECT_CALL(*mock, AddEvent(kTag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
3, 4, 5));
auto begin = root.BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2);
root.EndEvent(begin);
root.AddEvent(kTag, Profiler::EventType::OPERATOR_INVOKE_EVENT, 3, 4, 5);
}
TEST(RootProfilerTest, MultipleProfilerTest) {
auto mock_profiler0 = std::make_unique<MockProfilerT>();
auto* mock0 = mock_profiler0.get();
auto mock_profiler1 = std::make_unique<MockProfilerT>();
auto* mock1 = mock_profiler1.get();
RootProfiler root;
root.AddProfiler(std::move(mock_profiler0));
root.AddProfiler(std::move(mock_profiler1));
ON_CALL(*mock0, BeginEvent(_, _, _, _)).WillByDefault(testing::Return(42));
ON_CALL(*mock1, BeginEvent(_, _, _, _)).WillByDefault(testing::Return(24));
EXPECT_CALL(*mock0, BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2));
EXPECT_CALL(*mock0, EndEvent(42));
EXPECT_CALL(*mock1, BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2));
EXPECT_CALL(*mock1, EndEvent(24));
auto begin = root.BeginEvent(kTag, Profiler::EventType::DEFAULT, 1, 2);
root.EndEvent(begin);
}
}
}
} |
819 | cpp | tensorflow/tensorflow | profile_summarizer | tensorflow/lite/profiling/profile_summarizer.cc | tensorflow/lite/profiling/profile_summarizer_test.cc | #ifndef TENSORFLOW_LITE_PROFILING_PROFILE_SUMMARIZER_H_
#define TENSORFLOW_LITE_PROFILING_PROFILE_SUMMARIZER_H_
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
#include "tensorflow/lite/profiling/profile_summary_formatter.h"
namespace tflite {
namespace profiling {
class ProfileSummarizer {
public:
explicit ProfileSummarizer(
std::shared_ptr<ProfileSummaryFormatter> summary_formatter =
std::make_shared<ProfileSummaryDefaultFormatter>());
virtual ~ProfileSummarizer() {}
void ProcessProfiles(const std::vector<const ProfileEvent*>& profile_stats,
const tflite::Interpreter& interpreter);
std::string GetOutputString() {
return summary_formatter_->GetOutputString(
stats_calculator_map_, *delegate_stats_calculator_, subgraph_name_map_);
}
std::string GetShortSummary() {
return summary_formatter_->GetShortSummary(
stats_calculator_map_, *delegate_stats_calculator_, subgraph_name_map_);
}
tensorflow::StatsCalculator* GetStatsCalculator(uint32_t subgraph_index);
bool HasProfiles() {
for (auto& stats_calc : stats_calculator_map_) {
auto subgraph_stats = stats_calc.second.get();
if (subgraph_stats->num_runs() >= 1) return true;
}
return false;
}
private:
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map_;
std::unique_ptr<tensorflow::StatsCalculator> delegate_stats_calculator_;
std::shared_ptr<ProfileSummaryFormatter> summary_formatter_;
std::map<uint32_t, std::string> subgraph_name_map_;
void SetSubgraphNameMap(const tflite::Interpreter& interpreter) {
subgraph_name_map_.clear();
for (int subgraph_index = 0; subgraph_index < interpreter.subgraphs_size();
++subgraph_index) {
subgraph_name_map_[subgraph_index] =
interpreter.subgraph(subgraph_index)->GetName();
}
}
};
}
}
#endif
#include "tensorflow/lite/profiling/profile_summarizer.h"
#include <memory>
#include <sstream>
#include <string>
#include "tensorflow/lite/profiling/memory_info.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace profiling {
namespace {
struct OperatorDetails {
uint32_t subgraph_index;
uint32_t node_index;
std::string op_description;
std::vector<std::string> inputs;
std::vector<std::string> outputs;
};
std::string GetTensorName(const tflite::Interpreter& interpreter,
int tensor_index) {
const auto tensor = interpreter.tensor(tensor_index);
if (tensor == nullptr || tensor->name == nullptr) {
return "Unknown";
}
return tensor->name;
}
std::vector<std::string> GetTensorNames(const tflite::Interpreter& interpreter,
const TfLiteIntArray* tensor_indices) {
std::vector<std::string> tensors;
tensors.reserve(tensor_indices->size);
for (int i = 0; i < tensor_indices->size; i++) {
tensors.push_back(GetTensorName(interpreter, tensor_indices->data[i]));
}
return tensors;
}
std::string ToString(const std::vector<std::string>& str_vector) {
std::stringstream stream;
stream << "[";
bool first = true;
for (const auto& s : str_vector) {
if (!first) {
stream << ", ";
} else {
first = false;
}
stream << s;
}
stream << "]";
return stream.str();
}
OperatorDetails GetOperatorDetails(const tflite::Interpreter& interpreter,
uint32_t subgraph_index,
uint32_t node_index) {
auto subgraph =
const_cast<tflite::Interpreter&>(interpreter).subgraph(subgraph_index);
auto node_reg = subgraph->node_and_registration(node_index);
auto inputs = node_reg->first.inputs;
auto outputs = node_reg->first.outputs;
const char* profiling_string =
interpreter.OpProfilingString(node_reg->second, &node_reg->first);
OperatorDetails details;
if (profiling_string) {
details.op_description = std::string(profiling_string);
}
details.inputs = GetTensorNames(interpreter, inputs);
details.outputs = GetTensorNames(interpreter, outputs);
return details;
}
}
ProfileSummarizer::ProfileSummarizer(
std::shared_ptr<ProfileSummaryFormatter> summary_formatter)
: summary_formatter_(summary_formatter) {
stats_calculator_map_[0] = std::make_unique<tensorflow::StatsCalculator>(
summary_formatter_->GetStatSummarizerOptions());
delegate_stats_calculator_ = std::make_unique<tensorflow::StatsCalculator>(
summary_formatter_->GetStatSummarizerOptions());
}
void ProfileSummarizer::ProcessProfiles(
const std::vector<const ProfileEvent*>& profile_stats,
const tflite::Interpreter& interpreter) {
if (profile_stats.empty()) return;
int node_num = 0;
std::map<uint32_t, int64_t> total_us_per_subgraph_map;
int64_t delegate_internal_total_us = 0;
for (auto event : profile_stats) {
const auto subgraph_index = event->extra_event_metadata;
auto stats_calculator = GetStatsCalculator(subgraph_index);
int64_t node_exec_time = event->elapsed_time;
if (event->event_type == Profiler::EventType::OPERATOR_INVOKE_EVENT) {
const auto node_index = event->event_metadata;
const auto op_details =
GetOperatorDetails(interpreter, subgraph_index, node_index);
std::string type_in_stats(event->tag);
if (!op_details.op_description.empty()) {
type_in_stats += "/" + op_details.op_description;
}
const auto node_name = ToString(op_details.outputs);
const auto node_name_in_stats =
node_name + ":" + std::to_string(node_index);
stats_calculator->AddNodeStats(node_name_in_stats, type_in_stats,
node_num, node_exec_time, 0 );
} else if (event->event_type ==
Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT) {
const std::string node_name(event->tag);
const auto node_name_in_stats =
"Delegate/" + node_name + ":" + std::to_string(event->event_metadata);
delegate_stats_calculator_->AddNodeStats(node_name_in_stats,
"DelegateOpInvoke", node_num,
node_exec_time, 0 );
} else if (event->event_type ==
Profiler::EventType::DELEGATE_PROFILED_OPERATOR_INVOKE_EVENT) {
const std::string node_name(event->tag);
const std::string type_in_stats(node_name);
const auto node_name_in_stats =
"Delegate/" + node_name + ":" + std::to_string(event->event_metadata);
stats_calculator->AddNodeStats(node_name_in_stats, type_in_stats,
node_num, node_exec_time, 0 );
} else {
const memory::MemoryUsage node_mem_usage =
event->end_mem_usage - event->begin_mem_usage;
std::string node_name(event->tag);
if (node_name == "Invoke") {
continue;
}
node_name += "/" + std::to_string(event->extra_event_metadata);
stats_calculator->AddNodeStats(node_name, event->tag, node_num,
node_exec_time,
node_mem_usage.mem_footprint_kb * 1000.0);
}
if (event->event_type !=
Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT) {
total_us_per_subgraph_map[subgraph_index] += node_exec_time;
} else {
delegate_internal_total_us += node_exec_time;
}
++node_num;
}
for (auto& total_us_per_subgraph_pair : total_us_per_subgraph_map) {
auto stats_calculator =
GetStatsCalculator(total_us_per_subgraph_pair.first);
stats_calculator->UpdateRunTotalUs(total_us_per_subgraph_pair.second);
}
if (delegate_internal_total_us > 0) {
delegate_stats_calculator_->UpdateRunTotalUs(delegate_internal_total_us);
}
SetSubgraphNameMap(interpreter);
}
tensorflow::StatsCalculator* ProfileSummarizer::GetStatsCalculator(
uint32_t subgraph_index) {
if (stats_calculator_map_.count(subgraph_index) == 0) {
stats_calculator_map_[subgraph_index] =
std::make_unique<tensorflow::StatsCalculator>(
summary_formatter_->GetStatSummarizerOptions());
}
return stats_calculator_map_[subgraph_index].get();
}
}
} | #include "tensorflow/lite/profiling/profile_summarizer.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/context.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/profiling/buffered_profiler.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace profiling {
namespace {
const char* kOpName = "SimpleOpEval";
TfLiteStatus SimpleOpEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, 0, &output));
int32_t* output_data = output->data.i32;
*output_data = *(input1->data.i32) + *(input2->data.i32);
return kTfLiteOk;
}
const char* SimpleOpProfilingString(const TfLiteContext* context,
const TfLiteNode* node) {
return "Profile";
}
TfLiteRegistration* RegisterSimpleOp() {
static TfLiteRegistration registration = {
nullptr, nullptr, nullptr,
SimpleOpEval, nullptr, tflite::BuiltinOperator_CUSTOM,
"SimpleOpEval", 1};
return ®istration;
}
TfLiteRegistration* RegisterSimpleOpWithProfilingDetails() {
static TfLiteRegistration registration = {nullptr,
nullptr,
nullptr,
SimpleOpEval,
SimpleOpProfilingString,
tflite::BuiltinOperator_CUSTOM,
kOpName,
1};
return ®istration;
}
class SimpleOpModel : public SingleOpModel {
public:
void Init(const std::function<TfLiteRegistration*()>& registration);
tflite::Interpreter* GetInterpreter() { return interpreter_.get(); }
void SetInputs(int32_t x, int32_t y) {
PopulateTensor(inputs_[0], {x});
PopulateTensor(inputs_[1], {y});
}
int32_t GetOutput() { return ExtractVector<int32_t>(output_)[0]; }
private:
int inputs_[2];
int output_;
};
void SimpleOpModel::Init(
const std::function<TfLiteRegistration*()>& registration) {
inputs_[0] = AddInput({TensorType_INT32, {1}});
inputs_[1] = AddInput({TensorType_INT32, {1}});
output_ = AddOutput({TensorType_INT32, {}});
SetCustomOp(kOpName, {}, registration);
BuildInterpreter({GetShape(inputs_[0]), GetShape(inputs_[1])});
}
TEST(ProfileSummarizerTest, Empty) {
ProfileSummarizer summarizer;
std::string output = summarizer.GetOutputString();
EXPECT_GT(output.size(), 0);
}
TEST(ProfileSummarizerTest, Interpreter) {
BufferedProfiler profiler(1024);
SimpleOpModel m;
m.Init(RegisterSimpleOp);
auto interpreter = m.GetInterpreter();
interpreter->SetProfiler(&profiler);
profiler.StartProfiling();
m.SetInputs(1, 2);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_EQ(m.GetOutput(), 3);
profiler.StopProfiling();
ProfileSummarizer summarizer;
auto events = profiler.GetProfileEvents();
EXPECT_EQ(2, events.size());
summarizer.ProcessProfiles(profiler.GetProfileEvents(), *interpreter);
auto output = summarizer.GetOutputString();
ASSERT_TRUE(output.find("SimpleOpEval") != std::string::npos) << output;
ASSERT_TRUE(output.find("Invoke") == std::string::npos) << output;
}
TEST(ProfileSummarizerTest, InterpreterPlusProfilingDetails) {
BufferedProfiler profiler(1024);
SimpleOpModel m;
m.Init(RegisterSimpleOpWithProfilingDetails);
auto interpreter = m.GetInterpreter();
interpreter->SetProfiler(&profiler);
profiler.StartProfiling();
m.SetInputs(1, 2);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_EQ(m.GetOutput(), 3);
profiler.StopProfiling();
ProfileSummarizer summarizer;
auto events = profiler.GetProfileEvents();
EXPECT_EQ(2, events.size());
summarizer.ProcessProfiles(profiler.GetProfileEvents(), *interpreter);
auto output = summarizer.GetOutputString();
ASSERT_TRUE(output.find("SimpleOpEval/Profile") != std::string::npos)
<< output;
}
class ProfileSummarizerIfOpTest : public subgraph_test_util::ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildMulSubgraph(interpreter_->subgraph(2));
builder_->BuildIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[2]), {1, 2});
}
};
TEST_F(ProfileSummarizerIfOpTest, TestIfTrue) {
BufferedProfiler profiler(1024);
interpreter_->SetProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
profiler.StartProfiling();
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
profiler.StopProfiling();
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, {1, 2}, {6, 9});
auto events = profiler.GetProfileEvents();
EXPECT_EQ(5, events.size());
int event_count_of_subgraph_zero = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 0; });
int event_count_of_subgraph_one = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 1; });
int event_count_of_subgraph_two = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 2; });
EXPECT_EQ(2, event_count_of_subgraph_zero);
EXPECT_EQ(3, event_count_of_subgraph_one);
EXPECT_EQ(0, event_count_of_subgraph_two);
}
TEST_F(ProfileSummarizerIfOpTest, TestIfFalse) {
BufferedProfiler profiler(1024);
interpreter_->SetProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
profiler.StartProfiling();
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
profiler.StopProfiling();
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, {1, 2}, {5, 14});
auto events = profiler.GetProfileEvents();
EXPECT_EQ(5, events.size());
int event_count_of_subgraph_zero = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 0; });
int event_count_of_subgraph_one = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 1; });
int event_count_of_subgraph_two = std::count_if(
events.begin(), events.end(),
[](auto event) { return event->extra_event_metadata == 2; });
EXPECT_EQ(2, event_count_of_subgraph_zero);
EXPECT_EQ(0, event_count_of_subgraph_one);
EXPECT_EQ(3, event_count_of_subgraph_two);
}
}
}
} |
820 | cpp | tensorflow/tensorflow | subgraph_tensor_profiler | tensorflow/lite/profiling/subgraph_tensor_profiler.cc | tensorflow/lite/profiling/subgraph_tensor_profiler_test.cc | #ifndef TENSORFLOW_LITE_PROFILING_SUBGRAPH_TENSOR_PROFILER_H_
#define TENSORFLOW_LITE_PROFILING_SUBGRAPH_TENSOR_PROFILER_H_
#include <functional>
#include <vector>
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite::profiling {
class SubgraphTensorProfiler : public tflite::Profiler {
public:
using CallbackT = std::function<void(const TfLiteTensor*)>;
SubgraphTensorProfiler(const Interpreter& interpreter, CallbackT callback);
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override;
void EndEvent(uint32_t event_handle) override;
private:
std::vector<int64_t> events_;
const Interpreter& interpreter_;
CallbackT callback_;
};
}
#endif
#include "tensorflow/lite/profiling/subgraph_tensor_profiler.h"
#include <cstring>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
namespace tflite::profiling {
SubgraphTensorProfiler::SubgraphTensorProfiler(const Interpreter& interpreter,
CallbackT callback)
: interpreter_(interpreter), callback_(callback) {
events_.reserve(interpreter.subgraphs_size());
}
uint32_t SubgraphTensorProfiler::BeginEvent(const char* tag,
EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
if (strcmp(tag, "Invoke")) {
return 0;
}
events_.push_back(event_metadata2);
return events_.size();
}
void SubgraphTensorProfiler::EndEvent(uint32_t event_handle) {
if (!event_handle || events_.size() < event_handle) {
return;
}
const Subgraph* subgraph = interpreter_.subgraph(events_[event_handle - 1]);
for (int i = 0; i < subgraph->tensors_size(); ++i) {
callback_(subgraph->tensor(i));
}
}
} | #include "tensorflow/lite/profiling/subgraph_tensor_profiler.h"
#include <functional>
#include <string>
#include <unordered_set>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
namespace tflite::profiling {
namespace {
using ::testing::IsSupersetOf;
using ::testing::Not;
constexpr const char* kIfSubgraphTensorNames[] = {
"if_cond",
"if_input2",
"if_input3",
"if_output1",
};
constexpr const char* kAddSubgraphTensorNames[] = {
"add_input1",
"add_input2",
"add_output1",
};
constexpr const char* kMulSubgraphTensorNames[] = {
"mul_input1",
"mul_input2",
"mul_output1",
};
struct TensorGatherer {
void operator()(const TfLiteTensor* tensor) { tensors.insert(tensor->name); }
std::unordered_set<std::string> tensors;
};
class SubgraphTensorProfilerTest
: public subgraph_test_util::ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildMulSubgraph(interpreter_->subgraph(2));
builder_->BuildIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[2]), {1, 2});
NameTensors();
}
private:
void NameTensors() {
auto set_names = [](Subgraph* subgraph, auto names) {
for (int j = 0; j < subgraph->tensors_size(); ++j) {
subgraph->tensor(j)->name = names[j];
}
};
set_names(interpreter_->subgraph(0), kIfSubgraphTensorNames);
set_names(interpreter_->subgraph(1), kAddSubgraphTensorNames);
set_names(interpreter_->subgraph(2), kMulSubgraphTensorNames);
}
};
TEST_F(SubgraphTensorProfilerTest, TestMulSubgraph) {
TensorGatherer tensor_gatherer;
tflite::profiling::SubgraphTensorProfiler profiler(*interpreter_,
std::ref(tensor_gatherer));
interpreter_->AddProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kIfSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kMulSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors,
Not(IsSupersetOf(kAddSubgraphTensorNames)));
}
TEST_F(SubgraphTensorProfilerTest, TestAddSubgraph) {
TensorGatherer tensor_gatherer;
tflite::profiling::SubgraphTensorProfiler profiler(*interpreter_,
std::ref(tensor_gatherer));
interpreter_->AddProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kIfSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kAddSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors,
Not(IsSupersetOf(kMulSubgraphTensorNames)));
}
TEST_F(SubgraphTensorProfilerTest, TestBeginEvent) {
TensorGatherer tensor_gatherer;
tflite::profiling::SubgraphTensorProfiler profiler(*interpreter_,
std::ref(tensor_gatherer));
const int subgraph_id = 1;
uint32_t valid_event = profiler.BeginEvent(
"Invoke", Profiler::EventType::DEFAULT, 0, subgraph_id);
EXPECT_EQ(valid_event, 1);
uint32_t invalid_event = profiler.BeginEvent(
"NotInvoke", Profiler::EventType::DEFAULT, 0, subgraph_id);
EXPECT_EQ(invalid_event, 0);
}
}
} |
821 | cpp | tensorflow/tensorflow | profile_buffer | tensorflow/lite/profiling/profile_buffer.cc | tensorflow/lite/profiling/profile_buffer_test.cc | #ifndef TENSORFLOW_LITE_PROFILING_PROFILE_BUFFER_H_
#define TENSORFLOW_LITE_PROFILING_PROFILE_BUFFER_H_
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/profiling/memory_info.h"
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace profiling {
constexpr uint32_t kInvalidEventHandle = static_cast<uint32_t>(~0) - 1;
struct ProfileEvent {
using EventType = tflite::Profiler::EventType;
std::string tag;
uint64_t begin_timestamp_us;
uint64_t elapsed_time;
memory::MemoryUsage begin_mem_usage;
memory::MemoryUsage end_mem_usage;
EventType event_type;
int64_t event_metadata;
int64_t extra_event_metadata;
};
class ProfileBuffer {
public:
ProfileBuffer(uint32_t max_num_entries, bool enabled,
bool allow_dynamic_expansion = false)
: enabled_(enabled),
current_index_(0),
event_buffer_(max_num_entries),
allow_dynamic_expansion_(allow_dynamic_expansion) {}
uint32_t BeginEvent(const char* tag, ProfileEvent::EventType event_type,
int64_t event_metadata1, int64_t event_metadata2);
void SetEnabled(bool enabled) { enabled_ = enabled; }
void EndEvent(uint32_t event_handle, const int64_t* event_metadata1 = nullptr,
const int64_t* event_metadata2 = nullptr);
void AddEvent(const char* tag, ProfileEvent::EventType event_type,
uint64_t elapsed_time, int64_t event_metadata1,
int64_t event_metadata2);
size_t Size() const {
return (current_index_ >= event_buffer_.size()) ? event_buffer_.size()
: current_index_;
}
void Reset() {
enabled_ = false;
current_index_ = 0;
}
const struct ProfileEvent* At(size_t index) const;
private:
std::pair<int, bool> GetNextEntryIndex();
bool enabled_;
uint32_t current_index_;
std::vector<ProfileEvent> event_buffer_;
const bool allow_dynamic_expansion_;
};
}
}
#endif
#include "tensorflow/lite/profiling/profile_buffer.h"
#include <utility>
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace profiling {
uint32_t ProfileBuffer::BeginEvent(const char* tag,
ProfileEvent::EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
if (!enabled_) {
return kInvalidEventHandle;
}
uint64_t timestamp = time::NowMicros();
const auto next_index = GetNextEntryIndex();
if (next_index.second) {
return next_index.first;
}
const int index = next_index.first;
event_buffer_[index].tag = tag;
event_buffer_[index].event_type = event_type;
event_buffer_[index].event_metadata = event_metadata1;
event_buffer_[index].extra_event_metadata = event_metadata2;
event_buffer_[index].begin_timestamp_us = timestamp;
event_buffer_[index].elapsed_time = 0;
if (event_type != Profiler::EventType::OPERATOR_INVOKE_EVENT) {
event_buffer_[index].begin_mem_usage = memory::GetMemoryUsage();
}
current_index_++;
return index;
}
void ProfileBuffer::EndEvent(uint32_t event_handle,
const int64_t* event_metadata1,
const int64_t* event_metadata2) {
if (!enabled_ || event_handle == kInvalidEventHandle ||
event_handle > current_index_) {
return;
}
const uint32_t max_size = event_buffer_.size();
if (current_index_ > (max_size + event_handle)) {
return;
}
int event_index = event_handle % max_size;
event_buffer_[event_index].elapsed_time =
time::NowMicros() - event_buffer_[event_index].begin_timestamp_us;
if (event_buffer_[event_index].event_type !=
Profiler::EventType::OPERATOR_INVOKE_EVENT) {
event_buffer_[event_index].end_mem_usage = memory::GetMemoryUsage();
}
if (event_metadata1) {
event_buffer_[event_index].event_metadata = *event_metadata1;
}
if (event_metadata2) {
event_buffer_[event_index].extra_event_metadata = *event_metadata2;
}
}
const struct ProfileEvent* ProfileBuffer::At(size_t index) const {
size_t size = Size();
if (index >= size) {
return nullptr;
}
const uint32_t max_size = event_buffer_.size();
uint32_t start =
(current_index_ > max_size) ? current_index_ % max_size : max_size;
index = (index + start) % max_size;
return &event_buffer_[index];
}
void ProfileBuffer::AddEvent(const char* tag,
ProfileEvent::EventType event_type,
uint64_t elapsed_time, int64_t event_metadata1,
int64_t event_metadata2) {
if (!enabled_) {
return;
}
const auto next_index = GetNextEntryIndex();
if (next_index.second) {
return;
}
const int index = next_index.first;
event_buffer_[index].tag = tag;
event_buffer_[index].event_type = event_type;
event_buffer_[index].event_metadata = event_metadata1;
event_buffer_[index].extra_event_metadata = event_metadata2;
event_buffer_[index].begin_timestamp_us = 0;
event_buffer_[index].elapsed_time = elapsed_time;
current_index_++;
}
std::pair<int, bool> ProfileBuffer::GetNextEntryIndex() {
int index = current_index_ % event_buffer_.size();
if (current_index_ == 0 || index != 0) {
return std::make_pair(index, false);
}
if (!allow_dynamic_expansion_) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"Warning: Dropping ProfileBuffer event.");
return std::make_pair(current_index_, true);
} else {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"Warning: Doubling internal profiling buffer.");
event_buffer_.resize(current_index_ * 2);
return std::make_pair(current_index_, false);
}
}
}
} | #include "tensorflow/lite/profiling/profile_buffer.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace {
std::vector<const ProfileEvent*> GetProfileEvents(const ProfileBuffer& buffer) {
std::vector<const ProfileEvent*> events;
for (size_t i = 0; i < buffer.Size(); i++) {
events.push_back(buffer.At(i));
}
return events;
}
TEST(ProfileBufferTest, Empty) {
ProfileBuffer buffer( 0, true);
EXPECT_EQ(0, buffer.Size());
}
TEST(ProfileBufferTest, AddEvent) {
ProfileBuffer buffer( 10, true);
EXPECT_EQ(0, buffer.Size());
auto event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
EXPECT_GE(event_handle, 0);
EXPECT_EQ(1, buffer.Size());
auto event = GetProfileEvents(buffer)[0];
EXPECT_EQ(event->tag, "hello");
EXPECT_GT(event->begin_timestamp_us, 0);
EXPECT_EQ(event->event_type, ProfileEvent::EventType::DEFAULT);
EXPECT_EQ(event->event_metadata, 42);
buffer.EndEvent(event_handle);
EXPECT_EQ(1, buffer.Size());
EXPECT_GE(event->elapsed_time, 0);
}
TEST(ProfileBufferTest, EndEventWithMetadata) {
ProfileBuffer buffer( 10, true);
EXPECT_EQ(0, buffer.Size());
auto event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
const int64_t kEventMetadata1 = 18;
const int64_t kEventMetadata2 = 36;
buffer.EndEvent(event_handle, &kEventMetadata1, &kEventMetadata2);
EXPECT_GE(event_handle, 0);
EXPECT_EQ(1, buffer.Size());
auto event = GetProfileEvents(buffer)[0];
EXPECT_EQ(event->tag, "hello");
EXPECT_GT(event->begin_timestamp_us, 0);
EXPECT_EQ(event->event_type, ProfileEvent::EventType::DEFAULT);
EXPECT_EQ(event->event_metadata, kEventMetadata1);
EXPECT_EQ(event->extra_event_metadata, kEventMetadata2);
EXPECT_EQ(1, buffer.Size());
EXPECT_GE(event->elapsed_time, 0);
}
TEST(ProfileBufferTest, OverFlow) {
const int max_size = 4;
ProfileBuffer buffer{max_size, true};
std::vector<std::string> eventNames = {"first", "second", "third", "fourth"};
for (int i = 0; i < 2 * max_size; i++) {
buffer.BeginEvent(eventNames[i % 4].c_str(),
ProfileEvent::EventType::DEFAULT, i, 0);
size_t expected_size = std::min(i + 1, max_size);
EXPECT_EQ(expected_size, buffer.Size());
}
EXPECT_EQ(max_size, buffer.Size());
for (size_t j = 0; j < buffer.Size(); ++j) {
auto event = buffer.At(j);
EXPECT_EQ(eventNames[j % 4], event->tag);
EXPECT_EQ(ProfileEvent::EventType::DEFAULT, event->event_type);
EXPECT_EQ(j, event->event_metadata);
}
}
TEST(ProfileBufferTest, DynamicIncrease) {
const int max_initial_size = 4;
ProfileBuffer buffer{max_initial_size, true,
true };
std::vector<std::string> eventNames = {"first", "second", "third", "fourth"};
for (int i = 0; i < 2 * max_initial_size; i++) {
buffer.BeginEvent(eventNames[i % 4].c_str(),
ProfileEvent::EventType::DEFAULT, i, 0);
const size_t expected_size = i + 1;
EXPECT_EQ(expected_size, buffer.Size());
}
EXPECT_EQ(2 * max_initial_size, buffer.Size());
for (size_t j = 0; j < buffer.Size(); ++j) {
auto event = buffer.At(j);
EXPECT_EQ(eventNames[j % 4], event->tag);
EXPECT_EQ(ProfileEvent::EventType::DEFAULT, event->event_type);
EXPECT_EQ(j, event->event_metadata);
}
}
TEST(ProfileBufferTest, Enable) {
ProfileBuffer buffer( 10, false);
EXPECT_EQ(0, buffer.Size());
auto event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
EXPECT_EQ(kInvalidEventHandle, event_handle);
EXPECT_EQ(0, buffer.Size());
buffer.SetEnabled(true);
event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
EXPECT_GE(event_handle, 0);
EXPECT_EQ(1, buffer.Size());
}
}
}
} |
822 | cpp | tensorflow/tensorflow | profiler | third_party/xla/xla/python/profiler.cc | tensorflow/lite/profiling/telemetry/profiler_test.cc | #ifndef XLA_PYTHON_PROFILER_H_
#define XLA_PYTHON_PROFILER_H_
#include "third_party/nanobind/include/nanobind/nanobind.h"
namespace xla {
void BuildProfilerSubmodule(nanobind::module_& m);
}
#endif
#include "xla/python/profiler.h"
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "third_party/nanobind/include/nanobind/nanobind.h"
#include "third_party/nanobind/include/nanobind/stl/string.h"
#include "third_party/nanobind/include/nanobind/stl/string_view.h"
#include "third_party/nanobind/include/nanobind/stl/unique_ptr.h"
#include "third_party/nanobind/include/nanobind/stl/vector.h"
#include "xla/backends/profiler/plugin/plugin_tracer.h"
#include "xla/backends/profiler/plugin/profiler_c_api.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_profiler_extension.h"
#include "xla/pjrt/exceptions.h"
#include "xla/pjrt/status_casters.h"
#include "xla/python/aggregate_profile.h"
#include "xla/python/profiler_utils.h"
#include "xla/python/xplane_to_profile_instructions.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/protobuf.h"
#include "tsl/profiler/lib/profiler_factory.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/lib/profiler_session.h"
#include "tsl/profiler/lib/traceme.h"
#include "tsl/profiler/rpc/client/capture_profile.h"
#include "tsl/profiler/rpc/profiler_server.h"
namespace xla {
namespace nb = nanobind;
namespace {
class TraceMeWrapper {
public:
TraceMeWrapper(const nb::str& name, const nb::kwargs& kwargs)
: traceme_(
[&]() {
std::string name_and_metadata = nb::cast<std::string>(name);
if (kwargs.size() > 0) {
AppendMetadata(&name_and_metadata, kwargs);
}
return name_and_metadata;
},
1) {}
void SetMetadata(const nb::kwargs& kwargs) {
if (TF_PREDICT_FALSE(kwargs.size() > 0)) {
traceme_.AppendMetadata([&]() {
std::string metadata;
AppendMetadata(&metadata, kwargs);
return metadata;
});
}
}
void Stop() { traceme_.Stop(); }
static bool IsEnabled() { return tsl::profiler::TraceMe::Active(); }
private:
static void AppendMetadata(std::string* name, const nb::kwargs& kwargs) {
name->push_back('#');
for (const auto& kv : kwargs) {
absl::StrAppend(name, nb::cast<std::string_view>(kv.first), "=",
EncodePyObject(kv.second), ",");
}
name->back() = '#';
}
static std::string EncodePyObject(nb::handle handle) {
if (nb::isinstance<nb::bool_>(handle)) {
return nb::cast<bool>(handle) ? "1" : "0";
}
return nb::cast<std::string>(nb::str(handle));
}
tsl::profiler::TraceMe traceme_;
};
tensorflow::ProfileOptions DefaultPythonProfileOptions() {
tensorflow::ProfileOptions options = tsl::ProfilerSession::DefaultOptions();
options.set_python_tracer_level(1);
options.set_enable_hlo_proto(true);
return options;
}
}
struct ProfilerSessionWrapper {
explicit ProfilerSessionWrapper(std::unique_ptr<tsl::ProfilerSession> session)
: session(std::move(session)) {}
std::unique_ptr<tsl::ProfilerSession> session;
};
static std::string GetFdoProfile(const std::string& xspace,
bool as_textproto = false) {
tensorflow::profiler::XSpace xspace_proto;
xspace_proto.ParseFromString(std::string(xspace.c_str(), xspace.size()));
tensorflow::profiler::ProfiledInstructionsProto fdo_profile;
xla::ThrowIfError(xla::ConvertXplaneToProfiledInstructionsProto(
{xspace_proto}, &fdo_profile));
if (as_textproto) {
std::string textproto;
if (tsl::protobuf::TextFormat::PrintToString(fdo_profile, &textproto)) {
return textproto;
}
throw xla::XlaRuntimeError("Unable to serialize format to textproto");
}
return fdo_profile.SerializeAsString();
}
void BuildProfilerSubmodule(nb::module_& m) {
nb::module_ profiler =
m.def_submodule("profiler", "TensorFlow profiler integration");
nb::class_<tsl::profiler::ProfilerServer> profiler_server_class(
profiler, "ProfilerServer");
profiler.def(
"start_server",
[](int port) -> std::unique_ptr<tsl::profiler::ProfilerServer> {
auto server = std::make_unique<tsl::profiler::ProfilerServer>();
server->StartProfilerServer(port);
return server;
},
nb::arg("port"));
profiler.def("register_plugin_profiler", [](nb::capsule c_api) -> void {
if (std::string_view(c_api.name()) != "pjrt_c_api") {
throw xla::XlaRuntimeError(
"Argument to register_plugin_profiler was not a pjrt_c_api capsule.");
}
RegisterProfiler(static_cast<const PJRT_Api*>(c_api.data()));
});
nb::class_<ProfilerSessionWrapper> profiler_session_class(profiler,
"ProfilerSession");
profiler_session_class
.def("__init__",
[](ProfilerSessionWrapper* wrapper) {
new (wrapper) ProfilerSessionWrapper(
tsl::ProfilerSession::Create(DefaultPythonProfileOptions()));
})
.def("__init__",
[](ProfilerSessionWrapper* wrapper,
const tensorflow::ProfileOptions& options) {
new (wrapper)
ProfilerSessionWrapper(tsl::ProfilerSession::Create(options));
})
.def("stop_and_export",
[](ProfilerSessionWrapper* sess,
const std::string& tensorboard_dir) -> void {
tensorflow::profiler::XSpace xspace;
xla::ThrowIfError(sess->session->CollectData(&xspace));
xla::ThrowIfError(tsl::profiler::ExportToTensorBoard(
xspace, tensorboard_dir, true));
})
.def("stop",
[](ProfilerSessionWrapper* sess) -> nb::bytes {
tensorflow::profiler::XSpace xspace;
xla::ThrowIfError(sess->session->CollectData(&xspace));
std::string xspace_str = xspace.SerializeAsString();
return nb::bytes(xspace_str.data(), xspace_str.size());
})
.def("export",
[](ProfilerSessionWrapper* sess, nb::bytes xspace,
const std::string& tensorboard_dir) -> void {
tensorflow::profiler::XSpace xspace_proto;
xspace_proto.ParseFromString(
std::string(xspace.c_str(), xspace.size()));
xla::ThrowIfError(tsl::profiler::ExportToTensorBoard(
xspace_proto, tensorboard_dir,
true));
});
nb::class_<tensorflow::ProfileOptions> profile_options_class(
profiler, "ProfileOptions");
profile_options_class
.def("__init__",
[](tensorflow::ProfileOptions* options) {
new (options)
tensorflow::ProfileOptions(DefaultPythonProfileOptions());
})
.def_prop_rw("include_dataset_ops",
&tensorflow::ProfileOptions::include_dataset_ops,
&tensorflow::ProfileOptions::set_include_dataset_ops)
.def_prop_rw("host_tracer_level",
&tensorflow::ProfileOptions::host_tracer_level,
&tensorflow::ProfileOptions::set_host_tracer_level)
.def_prop_rw("python_tracer_level",
&tensorflow::ProfileOptions::python_tracer_level,
&tensorflow::ProfileOptions::set_python_tracer_level)
.def_prop_rw("enable_hlo_proto",
&tensorflow::ProfileOptions::enable_hlo_proto,
&tensorflow::ProfileOptions::set_enable_hlo_proto)
.def_prop_rw("start_timestamp_ns",
&tensorflow::ProfileOptions::start_timestamp_ns,
&tensorflow::ProfileOptions::set_start_timestamp_ns)
.def_prop_rw("duration_ms", &tensorflow::ProfileOptions::duration_ms,
&tensorflow::ProfileOptions::set_duration_ms)
.def_prop_rw(
"repository_path", &tensorflow::ProfileOptions::repository_path,
[](tensorflow::ProfileOptions* options, const std::string& path) {
options->set_repository_path(path);
});
nb::class_<TraceMeWrapper> traceme_class(profiler, "TraceMe");
traceme_class.def(nb::init<nb::str, nb::kwargs>())
.def("__enter__", [](nb::object self) -> nb::object { return self; })
.def(
"__exit__",
[](nb::object self, const nb::object& ex_type,
const nb::object& ex_value,
const nb::object& traceback) -> nb::object {
nb::cast<TraceMeWrapper*>(self)->Stop();
return nb::none();
},
nb::arg("ex_type").none(), nb::arg("ex_value").none(),
nb::arg("traceback").none())
.def("set_metadata", &TraceMeWrapper::SetMetadata)
.def_static("is_enabled", &TraceMeWrapper::IsEnabled);
profiler.def(
"get_profiled_instructions_proto",
[](std::string tensorboard_dir) -> nb::bytes {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
xla::ThrowIfError(
xla::ConvertXplaneUnderLogdirToProfiledInstructionsProto(
tensorboard_dir, &profile_proto));
std::string profile_proto_str = profile_proto.SerializeAsString();
return nb::bytes(profile_proto_str.data(), profile_proto_str.size());
},
nb::arg("tensorboard_dir"));
profiler.def("get_fdo_profile",
[](nb::bytes xspace, bool as_textproto = false) -> nb::object {
std::string out = GetFdoProfile(
std::string(xspace.c_str(), xspace.size()), as_textproto);
return nb::bytes(out.data(), out.size());
});
profiler.def("get_fdo_profile", [](nb::bytes xspace) -> nb::object {
std::string out = GetFdoProfile(std::string(xspace.c_str(), xspace.size()));
return nb::bytes(out.data(), out.size());
});
profiler.def(
"aggregate_profiled_instructions",
[](const std::vector<nb::bytes>& profiles, int percentile) -> nb::object {
std::vector<tensorflow::profiler::ProfiledInstructionsProto>
fdo_profiles;
for (const nb::bytes& profile : profiles) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
profile_proto.ParseFromString(
std::string(profile.c_str(), profile.size()));
fdo_profiles.push_back(std::move(profile_proto));
}
tensorflow::profiler::ProfiledInstructionsProto result_proto;
xla::AggregateProfiledInstructionsProto(fdo_profiles, percentile,
&result_proto);
auto result = result_proto.SerializeAsString();
return nb::bytes(result.data(), result.size());
},
nb::arg("profiles") = nb::list(), nb::arg("percentile"));
}
} | #include "tensorflow/lite/profiling/telemetry/profiler.h"
#include <cstdint>
#include <iostream>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/profiling/telemetry/c/telemetry_setting.h"
#include "tensorflow/lite/profiling/telemetry/telemetry_status.h"
namespace tflite::telemetry {
namespace {
constexpr char kEventName[] = "event_name";
constexpr char kSettingName[] = "setting_name";
class MockTelemtryProfiler : public TelemetryProfiler {
public:
MOCK_METHOD(void, ReportTelemetryEvent,
(const char* event_name, TelemetryStatusCode status), (override));
MOCK_METHOD(void, ReportTelemetryOpEvent,
(const char* event_name, int64_t op_idx, int64_t subgraph_idx,
TelemetryStatusCode status),
(override));
MOCK_METHOD(void, ReportSettings,
(const char* setting_name,
const TfLiteTelemetrySettings* settings),
(override));
MOCK_METHOD(uint32_t, ReportBeginOpInvokeEvent,
(const char* op_name, int64_t op_idx, int64_t subgraph_idx),
(override));
MOCK_METHOD(void, ReportEndOpInvokeEvent, (uint32_t event_handle),
(override));
MOCK_METHOD(void, ReportOpInvokeEvent,
(const char* op_name, uint64_t elapsed_time, int64_t op_idx,
int64_t subgraph_idx),
(override));
};
class TelemetryStructTest : public ::testing::Test {
protected:
TelemetryStructTest() {
context_.profiler = &profiler_;
profiler_struct_.data = &mock_profiler_;
profiler_struct_.ReportTelemetryEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler,
const char* event_name, uint64_t status) {
static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportTelemetryEvent(
event_name, tflite::telemetry::TelemetryStatusCode(status));
};
profiler_struct_.ReportTelemetryOpEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler,
const char* event_name, int64_t op_idx, int64_t subgraph_idx,
uint64_t status) {
static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportTelemetryOpEvent(
event_name, op_idx, subgraph_idx,
tflite::telemetry::TelemetryStatusCode(status));
};
profiler_struct_.ReportSettings =
[](struct TfLiteTelemetryProfilerStruct* profiler,
const char* setting_name, const TfLiteTelemetrySettings* settings) {
static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportSettings(setting_name, settings);
};
profiler_struct_.ReportBeginOpInvokeEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler, const char* op_name,
int64_t op_idx, int64_t subgraph_idx) -> uint32_t {
return static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportBeginOpInvokeEvent(op_name, op_idx, subgraph_idx);
};
profiler_struct_.ReportEndOpInvokeEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler,
uint32_t event_handle) {
return static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportEndOpInvokeEvent(event_handle);
};
profiler_struct_.ReportOpInvokeEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler, const char* op_name,
uint64_t elapsed_time, int64_t op_idx, int64_t subgraph_idx) {
return static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportOpInvokeEvent(op_name, elapsed_time, op_idx,
subgraph_idx);
};
profiler_.reset(telemetry::MakeTfLiteTelemetryProfiler(&profiler_struct_));
}
MockTelemtryProfiler mock_profiler_;
std::unique_ptr<TelemetryProfiler> profiler_;
TfLiteContext context_;
TfLiteTelemetryProfilerStruct profiler_struct_;
};
TEST_F(TelemetryStructTest, TelemetryReportEvent) {
EXPECT_CALL(mock_profiler_,
ReportTelemetryEvent(kEventName, TelemetryStatusCode(kTfLiteOk)));
profiler_->ReportTelemetryEvent(kEventName, TelemetryStatusCode(kTfLiteOk));
}
TEST_F(TelemetryStructTest, TelemetryReportOpEvent) {
EXPECT_CALL(
mock_profiler_,
ReportTelemetryOpEvent(kEventName, 1, 2, TelemetryStatusCode(kTfLiteOk)));
profiler_->ReportTelemetryOpEvent(kEventName, 1, 2,
TelemetryStatusCode(kTfLiteOk));
}
TEST_F(TelemetryStructTest, TelemetryReportSettings) {
EXPECT_CALL(mock_profiler_, ReportSettings(kSettingName, testing::_));
TfLiteTelemetrySettings settings{};
profiler_->ReportSettings(kSettingName, &settings);
}
TEST_F(TelemetryStructTest, TelemetryReportBeginOpInvokeEvent) {
EXPECT_CALL(mock_profiler_, ReportBeginOpInvokeEvent(kSettingName, 1, 2));
profiler_->ReportBeginOpInvokeEvent(kSettingName, 1, 2);
}
TEST_F(TelemetryStructTest, TelemetryReportEndOpInvokeEvent) {
EXPECT_CALL(mock_profiler_, ReportEndOpInvokeEvent(1));
profiler_->ReportEndOpInvokeEvent(1);
}
TEST_F(TelemetryStructTest, TelemetryReportOpInvokeEvent) {
EXPECT_CALL(mock_profiler_, ReportOpInvokeEvent(kSettingName, 1, 2, 3));
profiler_->ReportOpInvokeEvent(kSettingName, 1, 2, 3);
}
}
} |
823 | cpp | tensorflow/tensorflow | telemetry | tensorflow/lite/delegates/telemetry.cc | tensorflow/lite/delegates/telemetry_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_TELEMETRY_H_
#define TENSORFLOW_LITE_DELEGATES_TELEMETRY_H_
#include <cstdint>
#include <limits>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace delegates {
constexpr char kDelegateSettingsTag[] = "delegate_settings";
constexpr char kDelegateStatusTag[] = "delegate_status";
enum class DelegateStatusSource {
NONE = 0,
TFLITE_GPU = 1,
TFLITE_NNAPI = 2,
TFLITE_HEXAGON = 3,
TFLITE_XNNPACK = 4,
TFLITE_COREML = 5,
MAX_NUM_SOURCES = std::numeric_limits<int32_t>::max(),
};
class DelegateStatus {
public:
DelegateStatus() : DelegateStatus(DelegateStatusSource::NONE, 0) {}
explicit DelegateStatus(int32_t code)
: DelegateStatus(DelegateStatusSource::NONE, code) {}
explicit DelegateStatus(int64_t full_status)
: DelegateStatus(
static_cast<DelegateStatusSource>(
full_status >> 32 &
static_cast<int32_t>(DelegateStatusSource::MAX_NUM_SOURCES)),
static_cast<int32_t>(full_status &
std::numeric_limits<int32_t>::max())) {}
DelegateStatus(DelegateStatusSource source, int32_t code)
: source_(static_cast<int32_t>(source)), code_(code) {}
int64_t full_status() const {
return static_cast<int64_t>(source_) << 32 | code_;
}
DelegateStatusSource source() const {
return static_cast<DelegateStatusSource>(source_);
}
int32_t code() const { return code_; }
private:
int32_t source_;
int32_t code_;
};
TfLiteStatus ReportDelegateSettings(TfLiteContext* context,
TfLiteDelegate* delegate,
const TFLiteSettings& settings);
TfLiteStatus ReportDelegateStatus(TfLiteContext* context,
TfLiteDelegate* delegate,
const DelegateStatus& status);
}
}
#endif
#include "tensorflow/lite/delegates/telemetry.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace delegates {
TfLiteStatus ReportDelegateSettings(TfLiteContext* context,
TfLiteDelegate* delegate,
const TFLiteSettings& settings) {
auto* profiler = reinterpret_cast<Profiler*>(context->profiler);
const int64_t event_metadata1 = reinterpret_cast<int64_t>(delegate);
const int64_t event_metadata2 = reinterpret_cast<int64_t>(&settings);
TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT(profiler, kDelegateSettingsTag,
event_metadata1, event_metadata2);
return kTfLiteOk;
}
TfLiteStatus ReportDelegateStatus(TfLiteContext* context,
TfLiteDelegate* delegate,
const DelegateStatus& status) {
auto* profiler = reinterpret_cast<Profiler*>(context->profiler);
TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT(profiler, kDelegateStatusTag,
status.full_status(),
static_cast<int64_t>(kTfLiteOk));
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/delegates/telemetry.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
namespace tflite {
namespace delegates {
namespace {
constexpr int32_t kDummyCode = 2;
constexpr bool kDummyGpuPrecisionLossAllowed = true;
constexpr tflite::Delegate kDummyDelegate = tflite::Delegate_GPU;
constexpr DelegateStatusSource kDummySource =
DelegateStatusSource::TFLITE_NNAPI;
TEST(TelemetryTest, StatusConversion) {
DelegateStatus status(kDummySource, kDummyCode);
int64_t serialized_int = status.full_status();
DelegateStatus deserialized_status(serialized_int);
EXPECT_EQ(kDummyCode, deserialized_status.code());
EXPECT_EQ(kDummySource, deserialized_status.source());
EXPECT_EQ(serialized_int, deserialized_status.full_status());
}
class DelegateProfiler : public Profiler {
public:
DelegateProfiler() {}
~DelegateProfiler() override = default;
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
int event_handle = -1;
if (event_type ==
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT &&
std::string(tag) == kDelegateSettingsTag) {
event_buffer_.emplace_back();
event_handle = event_buffer_.size();
EXPECT_NE(event_metadata1, 0);
auto* delegate = reinterpret_cast<TfLiteDelegate*>(event_metadata1);
EXPECT_EQ(delegate->flags, kTfLiteDelegateFlagsNone);
EXPECT_NE(event_metadata2, 0);
auto* settings = reinterpret_cast<TFLiteSettings*>(event_metadata2);
EXPECT_EQ(settings->delegate(), kDummyDelegate);
EXPECT_EQ(settings->gpu_settings()->is_precision_loss_allowed(),
kDummyGpuPrecisionLossAllowed);
} else if (event_type ==
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT &&
std::string(tag) == kDelegateStatusTag) {
event_buffer_.emplace_back();
event_handle = event_buffer_.size();
EXPECT_EQ(event_metadata2, static_cast<int64_t>(kTfLiteOk));
DelegateStatus reported_status(event_metadata1);
EXPECT_EQ(reported_status.source(), kDummySource);
EXPECT_EQ(reported_status.code(), kDummyCode);
}
EXPECT_NE(-1, event_handle);
return event_handle;
}
void EndEvent(uint32_t event_handle) override {
EXPECT_EQ(event_handle, event_buffer_.size());
}
int NumRecordedEvents() { return event_buffer_.size(); }
private:
std::vector<profiling::ProfileEvent> event_buffer_;
};
TEST(TelemetryTest, DelegateStatusReport) {
DelegateProfiler profiler;
TfLiteDelegate delegate = TfLiteDelegateCreate();
TfLiteContext context;
context.profiler = &profiler;
DelegateStatus status(kDummySource, kDummyCode);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 2);
}
TEST(TelemetryTest, DelegateSettingsReport) {
DelegateProfiler profiler;
TfLiteDelegate delegate = TfLiteDelegateCreate();
TfLiteContext context;
context.profiler = &profiler;
flatbuffers::FlatBufferBuilder flatbuffer_builder;
flatbuffers::Offset<tflite::GPUSettings> gpu_settings =
tflite::CreateGPUSettings(
flatbuffer_builder,
kDummyGpuPrecisionLossAllowed);
auto* tflite_settings_ptr = flatbuffers::GetTemporaryPointer(
flatbuffer_builder,
CreateTFLiteSettings(flatbuffer_builder, kDummyDelegate,
0,
gpu_settings));
EXPECT_EQ(ReportDelegateSettings(&context, &delegate, *tflite_settings_ptr),
kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 1);
DelegateStatus status(kDummySource, kDummyCode);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 3);
}
}
}
} |
824 | cpp | tensorflow/tensorflow | nnapi_implementation | tensorflow/lite/nnapi/nnapi_implementation.cc | tensorflow/lite/nnapi/nnapi_implementation_test.cc | #ifndef TENSORFLOW_LITE_NNAPI_NNAPI_IMPLEMENTATION_H_
#define TENSORFLOW_LITE_NNAPI_NNAPI_IMPLEMENTATION_H_
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <memory>
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
struct NnApi {
bool nnapi_exists;
int32_t android_sdk_version;
int64_t nnapi_runtime_feature_level;
int (*ANeuralNetworksMemory_createFromFd)(size_t size, int protect, int fd,
size_t offset,
ANeuralNetworksMemory** memory);
void (*ANeuralNetworksMemory_free)(ANeuralNetworksMemory* memory);
int (*ANeuralNetworksModel_create)(ANeuralNetworksModel** model);
void (*ANeuralNetworksModel_free)(ANeuralNetworksModel* model);
int (*ANeuralNetworksModel_finish)(ANeuralNetworksModel* model);
int (*ANeuralNetworksModel_addOperand)(
ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type);
int (*ANeuralNetworksModel_setOperandValue)(ANeuralNetworksModel* model,
int32_t index, const void* buffer,
size_t length);
int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams)(
ANeuralNetworksModel* model, int32_t index,
const ANeuralNetworksSymmPerChannelQuantParams* channelQuant);
int (*ANeuralNetworksModel_setOperandValueFromMemory)(
ANeuralNetworksModel* model, int32_t index,
const ANeuralNetworksMemory* memory, size_t offset, size_t length);
int (*ANeuralNetworksModel_addOperation)(ANeuralNetworksModel* model,
ANeuralNetworksOperationType type,
uint32_t inputCount,
const uint32_t* inputs,
uint32_t outputCount,
const uint32_t* outputs);
int (*ANeuralNetworksModel_identifyInputsAndOutputs)(
ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
uint32_t outputCount, const uint32_t* outputs);
int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16)(
ANeuralNetworksModel* model, bool allow);
int (*ANeuralNetworksCompilation_create)(
ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation);
void (*ANeuralNetworksCompilation_free)(
ANeuralNetworksCompilation* compilation);
int (*ANeuralNetworksCompilation_setPreference)(
ANeuralNetworksCompilation* compilation, int32_t preference);
int (*ANeuralNetworksCompilation_finish)(
ANeuralNetworksCompilation* compilation);
int (*ANeuralNetworksExecution_create)(
ANeuralNetworksCompilation* compilation,
ANeuralNetworksExecution** execution);
void (*ANeuralNetworksExecution_free)(ANeuralNetworksExecution* execution);
int (*ANeuralNetworksExecution_setInput)(
ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type, const void* buffer,
size_t length);
int (*ANeuralNetworksExecution_setInputFromMemory)(
ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type,
const ANeuralNetworksMemory* memory, size_t offset, size_t length);
int (*ANeuralNetworksExecution_setOutput)(
ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type, void* buffer, size_t length);
int (*ANeuralNetworksExecution_setOutputFromMemory)(
ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type,
const ANeuralNetworksMemory* memory, size_t offset, size_t length);
int (*ANeuralNetworksExecution_startCompute)(
ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event);
int (*ANeuralNetworksEvent_wait)(ANeuralNetworksEvent* event);
void (*ANeuralNetworksEvent_free)(ANeuralNetworksEvent* event);
int (*ASharedMemory_create)(const char* name, size_t size);
int (*ANeuralNetworks_getDeviceCount)(uint32_t* numDevices);
int (*ANeuralNetworks_getDevice)(uint32_t devIndex,
ANeuralNetworksDevice** device);
int (*ANeuralNetworksDevice_getName)(const ANeuralNetworksDevice* device,
const char** name);
int (*ANeuralNetworksDevice_getVersion)(const ANeuralNetworksDevice* device,
const char** version);
int (*ANeuralNetworksDevice_getFeatureLevel)(
const ANeuralNetworksDevice* device, int64_t* featureLevel);
int (*ANeuralNetworksDevice_getType)(const ANeuralNetworksDevice* device,
int32_t* type);
int (*ANeuralNetworksModel_getSupportedOperationsForDevices)(
const ANeuralNetworksModel* model,
const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
bool* supportedOps);
int (*ANeuralNetworksCompilation_createForDevices)(
ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
uint32_t numDevices, ANeuralNetworksCompilat | #include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include <gtest/gtest.h>
namespace {
TEST(NnapiLibTest, NnApiImplementation) {
const NnApi* nnapi = NnApiImplementation();
EXPECT_NE(nnapi, nullptr);
#ifdef __ANDROID__
EXPECT_GT(nnapi->android_sdk_version, 0);
if (nnapi.android_sdk_version < 27) {
EXPECT_FALSE(nnapi->nnapi_exists);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_EQ(nnapi->ASharedMemory_create, nullptr);
} else {
EXPECT_TRUE(nnapi->nnapi_exists);
EXPECT_NE(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
if (nnapi->android_sdk_version >= 28) {
EXPECT_NE(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
} else {
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
}
EXPECT_NE(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_NE(nnapi->ASharedMemory_create, nullptr);
}
#else
EXPECT_FALSE(nnapi->nnapi_exists);
EXPECT_EQ(nnapi->android_sdk_version, 0);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_EQ(nnapi->ASharedMemory_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworks_getDeviceCount, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworks_getDevice, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getName, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getVersion, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getFeatureLevel, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_createForDevices, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setCaching, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_compute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getOutputOperandRank, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksBurst_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksBurst_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_burstCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setMeasureTiming, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getDuration, nullptr);
#endif
}
} |
825 | cpp | tensorflow/tensorflow | nnapi_handler | tensorflow/lite/nnapi/nnapi_handler.cc | tensorflow/lite/nnapi/nnapi_handler_test.cc | #ifndef TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
#define TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
#include <string>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
namespace tflite {
namespace nnapi {
class NnApiHandler {
public:
static NnApiHandler* Instance();
void Reset();
template <int Value>
void GetDeviceCountReturns() {
nnapi_->ANeuralNetworks_getDeviceCount = [](uint32_t* numDevices) -> int {
*numDevices = 1;
return Value;
};
}
template <int DeviceCount>
void GetDeviceCountReturnsCount() {
nnapi_->ANeuralNetworks_getDeviceCount = [](uint32_t* numDevices) -> int {
*numDevices = DeviceCount;
return ANEURALNETWORKS_NO_ERROR;
};
}
void StubGetDeviceCountWith(int(stub)(uint32_t*)) {
nnapi_->ANeuralNetworks_getDeviceCount = stub;
}
template <int Value>
void GetDeviceReturns() {
nnapi_->ANeuralNetworks_getDevice =
[](uint32_t devIndex, ANeuralNetworksDevice** device) -> int {
*device =
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice);
return Value;
};
}
void StubGetDeviceWith(int(stub)(uint32_t, ANeuralNetworksDevice**)) {
nnapi_->ANeuralNetworks_getDevice = stub;
}
template <int Value>
void GetDeviceNameReturns() {
nnapi_->ANeuralNetworksDevice_getName =
[](const ANeuralNetworksDevice* device, const char** name) -> int {
*name = NnApiHandler::nnapi_device_name_;
return Value;
};
}
void GetDeviceNameReturnsName(const std::string& name);
void StubGetDeviceNameWith(int(stub)(const ANeuralNetworksDevice*,
const char**)) {
nnapi_->ANeuralNetworksDevice_getName = stub;
}
void SetNnapiSupportedDevice(const std::string& name, int feature_level = 29);
template <int Value>
void ModelCreateReturns() {
nnapi_->ANeuralNetworksModel_create = [](ANeuralNetworksModel** model) {
*model = reinterpret_cast<ANeuralNetworksModel*>(1);
return Value;
};
}
void StubModelCreateWith(int(stub)(ANeuralNetworksModel** model)) {
nnapi_->ANeuralNetworksModel_create = stub;
}
template <int Value>
void AddOperandReturns() {
nnapi_->ANeuralNetworksModel_addOperand =
[](ANeuralNetworksModel* model,
const ANeuralNetworksOperandType* type) { return Value; };
}
void StubAddOperandWith(int(stub)(ANeuralNetworksModel* model,
const ANeuralNetworksOperandType* type)) {
nnapi_->ANeuralNetworksModel_addOperand = stub;
}
template <int Value>
void SetOperandValueReturns() {
nnapi_->ANeuralNetworksModel_setOperandValue =
[](ANeuralNetworksModel* model, int32_t index, const void* buffer,
size_t length) { return Value; };
}
template <int Value>
void AddOperationReturns() {
nnapi_->ANeuralNetworksModel_addOperation =
[](ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
const uint32_t* outputs) { return Value; };
}
void StubAddOperationWith(
int(stub)(ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
uint32_t inputCount, const uint32_t* inputs,
uint32_t outputCount, const uint32_t* outputs)) {
nnapi_->ANeuralNetworksModel_addOperation = stub;
}
template <int Value>
void IdentifyInputAndOutputsReturns() {
nnapi_->ANeuralNetworksModel_identifyInputsAndOutputs =
[](ANeuralNetworksModel* model, uint32_t inputCount,
const uint32_t* inputs, uint32_t outputCount,
const uint32_t* outputs) { return Value; };
}
template <int Value>
void RelaxComputationFloatReturns() {
nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16 =
[](ANeuralNetworksModel* model, bool allow) { return Value; };
}
template <int Value>
void ModelFinishReturns() {
nnapi_->ANeuralNetworksModel_finish = [](ANeuralNetworksModel* model) {
return Value;
};
}
template <int Value>
void MemoryCreateFromFdReturns() {
nnapi_->ANeuralNetworksMemory_createFromFd =
[](size_t size, int protect, int fd, size_t offset,
ANeuralNetworksMemory** memory) {
*memory = reinterpret_cast<ANeuralNetworksMemory*>(2);
return Value;
};
}
template <int Value>
void CompilationCreateReturns() {
nnapi_->ANeuralNetworksCompilation_create =
[](ANeuralNetworksModel* model,
ANeuralNetworksCompilation** compilation) {
*compilation = reinterpret_cast<ANeuralNetworksCompilation*>(3);
return Value;
};
}
template <int Value>
void CompilationCreateForDevicesReturns() {
nnapi_->ANeuralNetworksCompilation_createForDevices =
[](ANeuralNetworksModel* model,
const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
ANeuralNetworksCompilation** compilation) {
*compilation = reinterpret_cast<ANeuralNetworksCompilation*>(3);
return Value;
};
}
void StubCompilationCreateForDevicesWith(int(stub)(
ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
uint32_t numDevices, ANeuralNetworksCompilation** compilation)) {
nnapi_->ANeuralNetworksCompilation_createForDevices = stub;
}
template <int Value>
void CompilationFinishReturns() {
nnapi_->ANeuralNetworksCompilation_finish =
[](ANeuralNetworksCompilation* compilation) { return Value; };
}
template <int Value>
void ExecutionCreateReturns() {
nnapi_->ANeuralNetworksExecution_create =
[](ANeuralNetworksCompilation* compilation,
ANeuralNetworksExecution** execution) {
if (compilation == nullptr) return 1;
*execution = reinterpret_cast<ANeuralNetworksExecution*>(4);
return Value;
};
}
template <int Value>
void ExecutionSetInputFromMemoryReturns() {
nnapi_->ANeuralNetworksExecution_setInputFromMemory =
[](ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type,
const ANeuralNetworksMemory* memory, size_t offset,
size_t length) { return Value; };
}
template <int Value>
void ExecutionSetOutputFromMemoryReturns() {
nnapi_->ANeuralNetworksExecution_setOutputFromMemory =
[](ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type,
const ANeuralNetworksMemory* memory, size_t offset,
size_t length) { return Value; };
}
template <int Value>
void ExecutionComputeReturns() {
nnapi_->ANeuralNetworksExecution_compute =
[](ANeuralNetworksExecution* execution) { return Value; };
}
template <int Value>
void GetSupportedOperationsForDevicesReturns() {
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices =
[](const ANeuralNetworksModel* model,
const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
bool* supportedOps) { return Value; };
}
void StubGetSupportedOperationsForDevicesWith(
int(stub)(const ANeuralNetworksModel* model,
const ANeuralNetworksDevice* const* devices,
uint32_t numDevices, bool* supportedOps)) {
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices = stub;
}
template <int Value>
void ExecutionStartComputeReturns() {
nnapi_->ANeuralNetworksExecution_startCompute =
[](ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event) {
*event = reinterpret_cast<ANeuralNetworksEvent*>(1);
return Value;
};
}
template <int Value>
void EventWaitReturns() {
nnapi_->ANeuralNetworksEvent_wait = [](ANeuralNetworksEvent* event) {
return Value;
};
}
template <int Value>
void SetPriorityReturns() {
nnapi_->ANeuralNetworksCompilation_setPriority =
[](ANeuralNetworksCompilation* compilation, int priority) -> int {
return Value;
};
}
template <int Value>
void SetOperandSymmPerChannelQuantParamsReturns() {
nnapi_->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams =
[](ANeuralNetworksModel* model, int32_t index,
const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) {
return Value;
};
}
void SetAndroidSdkVersion(int version,
bool set_unsupported_ops_to_null = false);
const NnApi* GetNnApi() { return nnapi_; }
protected:
explicit NnApiHandler(NnApi* nnapi) : nnapi_(nnapi) { DCHECK(nnapi); }
NnApi* nnapi_;
static const char kNnapiReferenceDeviceName[];
static const int kNnapiReferenceDevice;
static const int kNnapiDevice;
static void SetDeviceName(const std::string& name);
private:
static char* nnapi_device_name_;
static int nnapi_device_feature_level_;
};
const NnApi* NnApiPassthroughInstance();
NnApiHandler* NnApiProxyInstance();
}
}
#endif
#include "tensorflow/lite/nnapi/nnapi_handler.h"
#include <cstdio>
#include <string>
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
namespace tflite {
namespace nnapi {
const char NnApiHandler::kNnapiReferenceDeviceName[] = "nnapi-reference";
const int NnApiHandler::kNnapiReferenceDevice = 1;
const int NnApiHandler::kNnapiDevice = 2;
char* NnApiHandler::nnapi_device_name_ = nullptr;
int NnApiHandler::nnapi_device_feature_level_;
const NnApi* NnApiPassthroughInstance() {
static const NnApi orig_nnapi_copy = *NnApiImplementation();
return &orig_nnapi_copy;
}
NnApiHandler* NnApiHandler::Instance() {
NnApiPassthroughInstance();
static NnApiHandler handler{const_cast<NnApi*>(NnApiImplementation())};
return &handler;
}
void NnApiHandler::Reset() {
*nnapi_ = *NnApiPassthroughInstance();
}
void NnApiHandler::SetAndroidSdkVersion(int version,
bool set_unsupported_ops_to_null) {
nnapi_->android_sdk_version = version;
nnapi_->nnapi_runtime_feature_level = version;
if (!set_unsupported_ops_to_null) {
return;
}
if (version < 29) {
nnapi_->ANeuralNetworks_getDeviceCount = nullptr;
nnapi_->ANeuralNetworks_getDevice = nullptr;
nnapi_->ANeuralNetworksDevice_getName = nullptr;
nnapi_->ANeuralNetworksDevice_getVersion = nullptr;
nnapi_->ANeuralNetworksDevice_getFeatureLevel = nullptr;
nnapi_->ANeuralNetworksDevice_getType = nullptr;
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices = nullptr;
nnapi_->ANeuralNetworksCompilation_createForDevices = nullptr;
nnapi_->ANeuralNetworksCompilation_setCaching = nullptr;
nnapi_->ANeuralNetworksExecution_compute = nullptr;
nnapi_->ANeuralNetworksExecution_getOutputOperandRank = nullptr;
nnapi_->ANeuralNetworksExecution_getOutputOperandDimensions = nullptr;
nnapi_->ANeuralNetworksBurst_create = nullptr;
nnapi_->ANeuralNetworksBurst_free = nullptr;
nnapi_->ANeuralNetworksExecution_burstCompute = nullptr;
nnapi_->ANeuralNetworksMemory_createFromAHardwareBuffer = nullptr;
nnapi_->ANeuralNetworksExecution_setMeasureTiming = nullptr;
nnapi_->ANeuralNetworksExecution_getDuration = nullptr;
nnapi_->ANeuralNetworksDevice_getExtensionSupport = nullptr;
nnapi_->ANeuralNetworksModel_getExtensionOperandType = nullptr;
nnapi_->ANeuralNetworksModel_getExtensionOperationType = nullptr;
nnapi_->ANeuralNetworksModel_setOperandExtensionData = nullptr;
}
if (version < 28) {
nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16 = nullptr;
}
}
void NnApiHandler::SetDeviceName(const std::string& name) {
delete[] nnapi_device_name_;
nnapi_device_name_ = new char[name.size() + 1];
std::strcpy(nnapi_device_name_, name.c_str());
}
void NnApiHandler::GetDeviceNameReturnsName(const std::string& name) {
NnApiHandler::SetDeviceName(name);
GetDeviceNameReturns<0>();
}
void NnApiHandler::SetNnapiSupportedDevice(const std::string& name,
int feature_level) {
NnApiHandler::SetDeviceName(name);
nnapi_device_feature_level_ = feature_level;
GetDeviceCountReturnsCount<2>();
nnapi_->ANeuralNetworks_getDevice =
[](uint32_t devIndex, ANeuralNetworksDevice** device) -> int {
if (devIndex > 1) {
return ANEURALNETWORKS_BAD_DATA;
}
if (devIndex == 1) {
*device =
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice);
} else {
*device = reinterpret_cast<ANeuralNetworksDevice*>(
NnApiHandler::kNnapiReferenceDevice);
}
return ANEURALNETWORKS_NO_ERROR;
};
nnapi_->ANeuralNetworksDevice_getName =
[](const ANeuralNetworksDevice* device, const char** name) -> int {
if (device ==
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice)) {
*name = NnApiHandler::nnapi_device_name_;
return ANEURALNETWORKS_NO_ERROR;
}
if (device == reinterpret_cast<ANeuralNetworksDevice*>(
NnApiHandler::kNnapiReferenceDevice)) {
*name = NnApiHandler::kNnapiReferenceDeviceName;
return ANEURALNETWORKS_NO_ERROR;
}
return ANEURALNETWORKS_BAD_DATA;
};
nnapi_->ANeuralNetworksDevice_getFeatureLevel =
[](const ANeuralNetworksDevice* device, int64_t* featureLevel) -> int {
if (device ==
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice)) {
*featureLevel = NnApiHandler::nnapi_device_feature_level_;
return ANEURALNETWORKS_NO_ERROR;
}
if (device == reinterpret_cast<ANeuralNetworksDevice*>(
NnApiHandler::kNnapiReferenceDevice)) {
*featureLevel = 1000;
return ANEURALNETWORKS_NO_ERROR;
}
return ANEURALNETWORKS_BAD_DATA;
};
}
}
} | #include "tensorflow/lite/nnapi/nnapi_handler.h"
#include <cstdint>
#include <cstdio>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
namespace tflite {
namespace nnapi {
using testing::Eq;
using testing::Ne;
using testing::NotNull;
void ExpectEquals(const NnApi& left, const NnApi& right);
class NnApiHandlerTest : public ::testing::Test {
protected:
~NnApiHandlerTest() override { NnApiHandler::Instance()->Reset(); }
};
TEST_F(NnApiHandlerTest, ShouldAlterNnApiInstanceBehaviour) {
const NnApi* nnapi = NnApiImplementation();
const auto device_count_stub = [](uint32_t* device_count) -> int {
*device_count = 999;
return ANEURALNETWORKS_NO_ERROR;
};
NnApiHandler::Instance()->StubGetDeviceCountWith(device_count_stub);
ASSERT_THAT(nnapi->ANeuralNetworks_getDeviceCount, NotNull());
uint32_t device_count = 0;
nnapi->ANeuralNetworks_getDeviceCount(&device_count);
EXPECT_THAT(device_count, Eq(999));
}
TEST_F(NnApiHandlerTest, ShouldRestoreNnApiToItsOriginalValueWithReset) {
NnApi nnapi_orig_copy = *NnApiImplementation();
auto device_count_override = [](uint32_t* device_count) -> int {
*device_count = 777;
return ANEURALNETWORKS_NO_ERROR;
};
NnApiHandler::Instance()->StubGetDeviceCountWith(device_count_override);
EXPECT_THAT(nnapi_orig_copy.ANeuralNetworks_getDeviceCount,
Ne(NnApiImplementation()->ANeuralNetworks_getDeviceCount));
NnApiHandler::Instance()->Reset();
ExpectEquals(nnapi_orig_copy, *NnApiImplementation());
}
int (*device_count_ptr)(uint32_t*);
TEST_F(NnApiHandlerTest, ShouldSupportPassthroughCalls) {
const NnApi* nnapi = NnApiImplementation();
device_count_ptr = nnapi->ANeuralNetworks_getDeviceCount;
NnApiHandler::Instance()->StubGetDeviceCountWith(
[](uint32_t* device_count) -> int {
return NnApiPassthroughInstance()->ANeuralNetworks_getDeviceCount ==
device_count_ptr;
});
uint32_t device_count = 0;
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount(&device_count), Eq(1));
}
TEST_F(NnApiHandlerTest, ShouldSetNnApiMembersToNullAsPerSdkVersion_NNAPI11) {
auto* handler = NnApiHandler::Instance();
handler->SetNnapiSupportedDevice("devvice", 1000);
handler->GetSupportedOperationsForDevicesReturns<1>();
handler->CompilationCreateForDevicesReturns<1>();
handler->ExecutionComputeReturns<1>();
handler->MemoryCreateFromFdReturns<1>();
handler->SetAndroidSdkVersion(28, true);
const NnApi* nnapi = NnApiImplementation();
using ::testing::IsNull;
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworks_getDevice, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getName, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getVersion, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getFeatureLevel, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_createForDevices, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_setCaching, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_compute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandRank, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_create, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_free, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_burstCompute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_setMeasureTiming, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getDuration, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getExtensionSupport, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperandType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperationType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_setOperandExtensionData, IsNull());
}
TEST_F(NnApiHandlerTest, ShouldSetNnApiMembersToNullAsPerSdkVersion_NNAPI10) {
auto* handler = NnApiHandler::Instance();
handler->SetNnapiSupportedDevice("devvice", 1000);
handler->GetSupportedOperationsForDevicesReturns<1>();
handler->CompilationCreateForDevicesReturns<1>();
handler->ExecutionComputeReturns<1>();
handler->MemoryCreateFromFdReturns<1>();
handler->SetAndroidSdkVersion(27, true);
const NnApi* nnapi = NnApiImplementation();
using ::testing::IsNull;
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworks_getDevice, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getName, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getVersion, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getFeatureLevel, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_createForDevices, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_setCaching, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_compute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandRank, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_create, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_free, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_burstCompute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_setMeasureTiming, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getDuration, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getExtensionSupport, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperandType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperationType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_setOperandExtensionData, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
IsNull());
}
void ExpectEquals(const NnApi& left, const NnApi& right) {
#define EXPECT_NNAPI_MEMBER_EQ(name) EXPECT_EQ(left.name, right.name)
EXPECT_NNAPI_MEMBER_EQ(nnapi_exists);
EXPECT_NNAPI_MEMBER_EQ(android_sdk_version);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_createFromFd);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_finish);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_addOperand);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_setOperandValue);
EXPECT_NNAPI_MEMBER_EQ(
ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_setOperandValueFromMemory);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_addOperation);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_identifyInputsAndOutputs);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_setPreference);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_finish);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setInput);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setInputFromMemory);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setOutput);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setOutputFromMemory);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_startCompute);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksEvent_wait);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksEvent_free);
EXPECT_NNAPI_MEMBER_EQ(ASharedMemory_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworks_getDeviceCount);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworks_getDevice);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getName);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getVersion);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getFeatureLevel);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getType);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_createForDevices);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_setCaching);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_compute);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getOutputOperandRank);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getOutputOperandDimensions);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksBurst_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksBurst_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_burstCompute);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setMeasureTiming);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getDuration);
#undef EXPECT_NNAPI_MEMBER_EQ
}
}
} |
826 | cpp | tensorflow/tensorflow | c_api_opaque_internal | tensorflow/lite/c/c_api_opaque_internal.cc | tensorflow/lite/c/c_api_opaque_internal_test.cc | #ifndef TENSORFLOW_LITE_C_C_API_OPAQUE_INTERNAL_H_
#define TENSORFLOW_LITE_C_C_API_OPAQUE_INTERNAL_H_
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace internal {
class CommonOpaqueConversionUtil {
public:
CommonOpaqueConversionUtil() = delete;
static TfLiteOperator* ObtainOperator(TfLiteContext* context,
const TfLiteRegistration* registration,
int node_index);
private:
static TfLiteOperator* CachedObtainOperator(
::tflite::internal::OperatorsCache* registration_externals_cache,
const TfLiteRegistration* registration, int node_index);
};
}
}
#endif
#include "tensorflow/lite/c/c_api_opaque_internal.h"
#include <memory>
#include <unordered_map>
#include <utility>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/c/operator.h"
#include "tensorflow/lite/core/subgraph.h"
namespace tflite {
namespace internal {
namespace {
TfLiteOperator* MakeOperator(const TfLiteRegistration* registration,
int node_index) {
auto* registration_external = TfLiteOperatorCreateWithData(
static_cast<TfLiteBuiltinOperator>(registration->builtin_code),
registration->custom_name, registration->version,
nullptr);
registration_external->node_index = node_index;
return registration_external;
}
}
TfLiteOperator* CommonOpaqueConversionUtil::CachedObtainOperator(
OperatorsCache* registration_externals_cache,
const TfLiteRegistration* registration, int node_index) {
OpResolver::OpId op_id{registration->builtin_code, registration->custom_name,
registration->version};
auto it = registration_externals_cache->find(op_id);
if (it != registration_externals_cache->end()) {
return it->second.get();
}
auto* registration_external = MakeOperator(registration, node_index);
registration_externals_cache->insert(
it, std::make_pair(op_id, registration_external));
return registration_external;
}
TfLiteOperator* CommonOpaqueConversionUtil::ObtainOperator(
TfLiteContext* context, const TfLiteRegistration* registration,
int node_index) {
auto* subgraph = static_cast<tflite::Subgraph*>(context->impl_);
if (!subgraph->registration_externals_) {
subgraph->registration_externals_ = std::make_shared<OperatorsCache>();
}
return CachedObtainOperator(subgraph->registration_externals_.get(),
registration, node_index);
}
}
} | #include "tensorflow/lite/c/c_api_opaque_internal.h"
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/interpreter_builder.h"
#include "tensorflow/lite/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
using tflite::FlatBufferModel;
using tflite::Interpreter;
using tflite::InterpreterBuilder;
using tflite::internal::CommonOpaqueConversionUtil;
using tflite::ops::builtin::BuiltinOpResolver;
TEST(ObtainRegistrationFromContext, ProducesValidResult) {
BuiltinOpResolver op_resolver;
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<FlatBufferModel> model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
InterpreterBuilder builder(*model, op_resolver);
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
TfLiteContext* context = interpreter->primary_subgraph().context();
const TfLiteRegistration* registration = tflite::ops::builtin::Register_ADD();
TfLiteOperator* registration_external =
CommonOpaqueConversionUtil::ObtainOperator(context, registration, 42);
ASSERT_EQ(registration_external->builtin_code, kTfLiteBuiltinAdd);
ASSERT_EQ(registration_external->version, registration->version);
ASSERT_EQ(registration_external->custom_name, registration->custom_name);
ASSERT_EQ(registration_external->node_index, 42);
}
TEST(ObtainRegistrationFromContext, CachingWorks) {
BuiltinOpResolver op_resolver;
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<FlatBufferModel> model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
InterpreterBuilder builder(*model, op_resolver);
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
TfLiteContext* context = interpreter->primary_subgraph().context();
const TfLiteRegistration* registration = tflite::ops::builtin::Register_ADD();
TfLiteOperator* registration_external1 =
CommonOpaqueConversionUtil::ObtainOperator(context, registration, 0);
TfLiteOperator* registration_external2 =
CommonOpaqueConversionUtil::ObtainOperator(context, registration, 1);
ASSERT_EQ(registration_external1, registration_external2);
} |
827 | cpp | tensorflow/tensorflow | test_util | third_party/xla/xla/python/ifrt/test_util.cc | tensorflow/lite/kernels/shim/test_util_test.cc | #include "tsl/lib/core/status_test_util.h"
#ifndef XLA_PYTHON_IFRT_TEST_UTIL_H_
#define XLA_PYTHON_IFRT_TEST_UTIL_H_
#include <functional>
#include <memory>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace test_util {
void RegisterClientFactory(
std::function<absl::StatusOr<std::shared_ptr<Client>>()> factory);
bool IsClientFactoryRegistered();
absl::StatusOr<std::shared_ptr<Client>> GetClient();
void SetTestFilterIfNotUserSpecified(absl::string_view custom_filter);
template <typename ElementT>
void AssertPerShardData(
tsl::RCReference<Array> actual, DType expected_dtype,
Shape expected_per_shard_shape,
absl::Span<const absl::Span<const ElementT>> expected_per_shard_data,
DeviceList expected_device_list) {
ASSERT_EQ(actual->dtype(), expected_dtype);
EXPECT_THAT(GetDeviceIds(actual->sharding().devices()),
testing::ElementsAreArray(GetDeviceIds(expected_device_list)));
TF_ASSERT_OK_AND_ASSIGN(auto actual_per_shard_arrays,
actual->DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(actual_per_shard_arrays.size(), expected_per_shard_data.size());
for (int i = 0; i < actual_per_shard_arrays.size(); ++i) {
SCOPED_TRACE(absl::StrCat("Shard ", i));
tsl::RCReference<Array> array = actual_per_shard_arrays[i];
ASSERT_EQ(array->shape(), expected_per_shard_shape);
std::vector<ElementT> actual_data(expected_per_shard_shape.num_elements());
TF_ASSERT_OK(array
->CopyToHostBuffer(actual_data.data(),
std::nullopt,
ArrayCopySemantics::kAlwaysCopy)
.Await());
EXPECT_THAT(actual_data,
testing::ElementsAreArray(expected_per_shard_data[i]));
}
}
absl::StatusOr<DeviceList> GetDevices(Client* client,
absl::Span<const int> device_indices);
}
}
}
#endif
#include "xla/python/ifrt/test_util.h"
#include <functional>
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
namespace xla {
namespace ifrt {
namespace test_util {
namespace {
class ClientFactory {
public:
void Register(
std::function<absl::StatusOr<std::shared_ptr<Client>>()> factory) {
absl::MutexLock lock(&mu_);
CHECK(!factory_) << "Client factory has been already registered.";
factory_ = std::move(factory);
}
std::function<absl::StatusOr<std::shared_ptr<Client>>()> Get() const {
absl::MutexLock lock(&mu_);
return factory_;
}
private:
mutable absl::Mutex mu_;
std::function<absl::StatusOr<std::shared_ptr<Client>>()> factory_
ABSL_GUARDED_BY(mu_);
};
ClientFactory& GetGlobalClientFactory() {
static auto* const factory = new ClientFactory;
return *factory;
}
}
void RegisterClientFactory(
std::function<absl::StatusOr<std::shared_ptr<Client>>()> factory) {
GetGlobalClientFactory().Register(std::move(factory));
}
absl::StatusOr<std::shared_ptr<Client>> GetClient() {
auto factory = GetGlobalClientFactory().Get();
CHECK(factory) << "Client factory has not been registered.";
return factory();
}
void SetTestFilterIfNotUserSpecified(absl::string_view custom_filter) {
static constexpr absl::string_view kDefaultTestFilter = "*";
#ifdef GTEST_FLAG_SET
if (GTEST_FLAG_GET(filter) == kDefaultTestFilter) {
GTEST_FLAG_SET(filter, custom_filter);
}
#else
if (testing::GTEST_FLAG(filter) == kDefaultTestFilter) {
testing::GTEST_FLAG(filter) = custom_filter;
}
#endif
}
absl::StatusOr<DeviceList> GetDevices(Client* client,
absl::Span<const int> device_indices) {
DeviceList::Devices devices;
devices.reserve(device_indices.size());
for (int device_index : device_indices) {
if (device_index < 0 || device_index >= client->devices().size()) {
return absl::InvalidArgumentError(
absl::StrCat("Out of range device index: ", device_index));
}
devices.push_back(client->devices()[device_index]);
}
return DeviceList(std::move(devices));
}
}
}
} | #include "tensorflow/core/data/service/test_util.h"
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace testing {
namespace {
using ::tensorflow::testing::IsOkAndHolds;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
template <class T>
StatusOr<std::vector<T>> GetIteratorOutput(standalone::Iterator& iterator) {
std::vector<T> result;
for (bool end_of_sequence = false; !end_of_sequence;) {
std::vector<tensorflow::Tensor> tensors;
TF_RETURN_IF_ERROR(iterator.GetNext(&tensors, &end_of_sequence));
if (end_of_sequence) {
break;
}
if (tensors.size() != 1) {
return errors::Internal("GetNext Tensor size is not 1.");
}
result.push_back(tensors[0].unaligned_flat<T>().data()[0]);
}
return result;
}
TEST(TestUtilTest, RangeDataset) {
const auto dataset_def = RangeDataset(10);
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<int64_t>(*iterator),
IsOkAndHolds(ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
}
TEST(TestUtilTest, RangeSquareDataset) {
const auto dataset_def = RangeSquareDataset(10);
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<int64_t>(*iterator),
IsOkAndHolds(ElementsAre(0, 1, 4, 9, 16, 25, 36, 49, 64, 81)));
}
TEST(TestUtilTest, InfiniteDataset) {
const auto dataset_def = InfiniteDataset();
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
for (int64_t i = 0; i < 10; ++i) {
std::vector<tensorflow::Tensor> outputs;
bool end_of_sequence;
TF_ASSERT_OK(iterator->GetNext(&outputs, &end_of_sequence));
test::ExpectEqual(outputs[0], Tensor(i));
}
}
TEST(TestUtilTest, EmptyDataset) {
const auto dataset_def = RangeSquareDataset(0);
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<int64_t>(*iterator), IsOkAndHolds(IsEmpty()));
}
TEST(TestUtilTest, InterleaveTextline) {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
TF_ASSERT_OK_AND_ASSIGN(const DatasetDef dataset_def,
InterleaveTextlineDataset(filenames, {"0", "1"}));
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<tstring>(*iterator),
IsOkAndHolds(ElementsAre("0", "1")));
}
TEST(TestUtilTest, InterleaveTextlineWithNewLines) {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
TF_ASSERT_OK_AND_ASSIGN(
const DatasetDef dataset_def,
InterleaveTextlineDataset(filenames, {"0\n2\n4\n6\n8", "1\n3\n5\n7\n9"}));
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<tstring>(*iterator),
IsOkAndHolds(ElementsAre("0", "1", "2", "3", "4", "5", "6", "7",
"8", "9")));
}
TEST(TestUtilTest, InterleaveTextlineEmptyFiles) {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
TF_ASSERT_OK_AND_ASSIGN(const DatasetDef dataset_def,
InterleaveTextlineDataset(filenames, {"", ""}));
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<tstring>(*iterator), IsOkAndHolds(IsEmpty()));
}
TEST(TestUtilTest, GetTestDataset) {
TF_ASSERT_OK_AND_ASSIGN(const DatasetDef dataset_def,
GetTestDataset("choose_from_datasets"));
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<tstring>(*iterator),
IsOkAndHolds(ElementsAre("a", "b", "c", "a", "b", "c", "a", "b",
"c", "a", "b", "c", "a", "b", "c")));
}
}
}
}
} |
828 | cpp | tensorflow/tensorflow | generator | tensorflow/lite/schema/builtin_ops_list/generator.cc | tensorflow/lite/schema/builtin_ops_list/generator_test.cc | #ifndef TENSORFLOW_LITE_SCHEMA_BUILTIN_OPS_HEADER_GENERATOR_H_
#define TENSORFLOW_LITE_SCHEMA_BUILTIN_OPS_HEADER_GENERATOR_H_
#include <iostream>
#include <string>
namespace tflite {
namespace builtin_ops_header {
bool IsValidInputEnumName(const std::string& name);
std::string ConstantizeVariableName(const std::string& name);
bool GenerateHeader(std::ostream& os);
}
}
#endif
#include "tensorflow/lite/schema/builtin_ops_header/generator.h"
#include <string>
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace builtin_ops_header {
namespace {
const char* kFileHeader =
R"(
#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_
#define TENSORFLOW_LITE_BUILTIN_OPS_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
)";
const char* kFileFooter =
R"(} TfLiteBuiltinOperator;
#ifdef __cplusplus
}
#endif
#endif
)";
}
bool IsValidInputEnumName(const std::string& name) {
const char* begin = name.c_str();
const char* ch = begin;
while (*ch != '\0') {
if (ch != begin) {
if (*ch != '_') {
return false;
}
++ch;
}
bool empty = true;
while (isupper(*ch) || isdigit(*ch)) {
empty = false;
++ch;
}
if (empty) {
return false;
}
}
return true;
}
std::string ConstantizeVariableName(const std::string& name) {
std::string result = "kTfLiteBuiltin";
bool uppercase = true;
for (char input_char : name) {
if (input_char == '_') {
uppercase = true;
} else if (uppercase) {
result += toupper(input_char);
uppercase = false;
} else {
result += tolower(input_char);
}
}
return result;
}
bool GenerateHeader(std::ostream& os) {
auto enum_names = tflite::EnumNamesBuiltinOperator();
for (auto enum_value : EnumValuesBuiltinOperator()) {
auto enum_name = enum_names[enum_value];
if (!IsValidInputEnumName(enum_name)) {
std::cerr << "Invalid input enum name: " << enum_name << std::endl;
return false;
}
}
os << kFileHeader;
for (auto enum_value : EnumValuesBuiltinOperator()) {
auto enum_name = enum_names[enum_value];
os << " ";
os << ConstantizeVariableName(enum_name);
os << " = ";
os << enum_value;
os << ",\n";
}
os << kFileFooter;
return true;
}
}
} | #include "tensorflow/lite/schema/builtin_ops_header/generator.h"
#include <fstream>
#include <gtest/gtest.h>
namespace {
using tflite::builtin_ops_header::ConstantizeVariableName;
using tflite::builtin_ops_header::IsValidInputEnumName;
TEST(TestIsValidInputEnumName, TestWithValidInputNames) {
EXPECT_TRUE(IsValidInputEnumName("ADD"));
EXPECT_TRUE(IsValidInputEnumName("CONV_2D"));
EXPECT_TRUE(IsValidInputEnumName("L2_POOL_2D"));
}
TEST(TestIsValidInputEnumName, TestWithLeadingUnderscore) {
EXPECT_FALSE(IsValidInputEnumName("_ADD"));
EXPECT_FALSE(IsValidInputEnumName("_CONV_2D"));
}
TEST(TestIsValidInputEnumName, TestWithLowerCase) {
EXPECT_FALSE(IsValidInputEnumName("_AdD"));
EXPECT_FALSE(IsValidInputEnumName("_COnV_2D"));
}
TEST(TestIsValidInputEnumName, TestWithOtherCharacters) {
EXPECT_FALSE(IsValidInputEnumName("_AdD!2D"));
EXPECT_FALSE(IsValidInputEnumName("_COnV?2D"));
}
TEST(TestIsValidInputEnumName, TestWithDoubleUnderscores) {
EXPECT_FALSE(IsValidInputEnumName("ADD__2D"));
EXPECT_FALSE(IsValidInputEnumName("CONV__2D"));
}
TEST(TestConstantizeVariableName, TestWithValidInputNames) {
EXPECT_EQ(ConstantizeVariableName("ADD"), "kTfLiteBuiltinAdd");
EXPECT_EQ(ConstantizeVariableName("CONV_2D"), "kTfLiteBuiltinConv2d");
EXPECT_EQ(ConstantizeVariableName("L2_POOL_2D"), "kTfLiteBuiltinL2Pool2d");
}
} |
829 | cpp | tensorflow/tensorflow | backend_async_kernel_interface | tensorflow/lite/async/backend_async_kernel_interface.cc | tensorflow/lite/async/backend_async_kernel_interface_test.cc | #ifndef TENSORFLOW_LITE_CORE_ASYNC_BACKEND_ASYNC_KERNEL_INTERFACE_H_
#define TENSORFLOW_LITE_CORE_ASYNC_BACKEND_ASYNC_KERNEL_INTERFACE_H_
#include "tensorflow/lite/async/backend_async_kernel_interface.h"
#endif
#include "tensorflow/lite/async/backend_async_kernel_interface.h"
#include <vector>
#include "tensorflow/lite/async/c/async_kernel.h"
#include "tensorflow/lite/async/c/types.h"
namespace tflite {
namespace delegates {
namespace internal {
TfLiteStatus RegisterBuffer(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->RegisterBuffer(context, io_type, buffer, attrs, handle);
}
TfLiteStatus RegisterBufferSlice(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context,
TfLiteBufferHandle buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->RegisterBufferSlice(context, buffer, attrs, handle);
}
TfLiteStatus UnregisterBuffer(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context,
const TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->UnregisterBuffer(context, handle);
}
void SupportedBufferTypes(const TfLiteAsyncKernel* async_kernel,
TfLiteIoType io_type, const char* const** types,
size_t* n_types) {
if (types == nullptr || n_types == nullptr) return;
const auto& buf_types = reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SupportedBufferTypes(io_type);
*types = buf_types.data();
*n_types = buf_types.size();
}
void SupportedSynchronizations(const TfLiteAsyncKernel* async_kernel,
TfLiteIoType io_type, const char* const** types,
size_t* n_types) {
if (types == nullptr || n_types == nullptr) return;
const auto& sync_types = reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SupportedSynchronizations(io_type);
*types = sync_types.data();
*n_types = sync_types.size();
}
bool ReconcileRestrictions(const TfLiteAsyncKernel* async_kernel,
const TfLiteOpaqueContext* context,
const TfLiteOpaqueNode* node, int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged,
TfLiteAttributeMap* conflict) {
return reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->ReconcileRestrictions(context, node, tensor_index,
user_provided_attributes, merged, conflict);
}
TfLiteStatus SetAttributes(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteOpaqueNode* node,
int tensor_index, const TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SetAttributes(context, node, tensor_index, attrs);
}
TfLiteStatus Prepare(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Prepare(context, node);
}
TfLiteStatus Eval(TfLiteAsyncKernel* async_kernel, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node, TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Eval(context, node, task);
}
TfLiteStatus Wait(TfLiteAsyncKernel* async_kernel, TfLiteOpaqueContext* context,
TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Wait(context, task);
}
TfLiteStatus Finish(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Finish(context, task);
}
TfLiteStatus SetBufferAttributes(TfLiteAsyncKernel* async_kernel,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SetBufferAttributes(buffer, attrs);
}
TfLiteStatus GetBufferAttributes(TfLiteAsyncKernel* async_kernel,
const TfLiteBackendBuffer* buffer,
TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->GetBufferAttributes(buffer, attrs);
}
}
BackendAsyncKernelInterface::BackendAsyncKernelInterface() {
kernel_ = TfLiteAsyncKernelCreate(this);
TfLiteAsyncKernelSetRegisterBuffer(kernel_, internal::RegisterBuffer);
TfLiteAsyncKernelSetRegisterBufferSlice(kernel_,
internal::RegisterBufferSlice);
TfLiteAsyncKernelSetUnregisterBuffer(kernel_, internal::UnregisterBuffer);
TfLiteAsyncKernelSetSupportedBufferTypes(kernel_,
internal::SupportedBufferTypes);
TfLiteAsyncKernelSetSupportedSynchronizations(
kernel_, internal::SupportedSynchronizations);
TfLiteAsyncKernelSetReconcileRestrictions(kernel_,
internal::ReconcileRestrictions);
TfLiteAsyncKernelSetSetAttributes(kernel_, internal::SetAttributes);
TfLiteAsyncKernelSetSetBufferAttributes(kernel_,
internal::SetBufferAttributes);
TfLiteAsyncKernelSetGetBufferAttributes(kernel_,
internal::GetBufferAttributes);
TfLiteAsyncKernelSetPrepare(kernel_, internal::Prepare);
TfLiteAsyncKernelSetEval(kernel_, internal::Eval);
TfLiteAsyncKernelSetWait(kernel_, internal::Wait);
TfLiteAsyncKernelSetFinish(kernel_, internal::Finish);
}
}
} | #include "tensorflow/lite/async/backend_async_kernel_interface.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/async/c/types.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/testing/mock_async_kernel.h"
using ::testing::_;
namespace tflite::delegates {
namespace {
TEST(BackendAsyncKernelInterfaceTest, BasicTest) {
testing::StrictMock<async::testing::MockAsyncKernel> kernel;
EXPECT_CALL(kernel, RegisterBuffer(_, _, _, _, _));
EXPECT_CALL(kernel, RegisterBufferSlice(_, _, _, _));
EXPECT_CALL(kernel, UnregisterBuffer(_, _));
EXPECT_CALL(kernel, ReconcileRestrictions(_, _, _, _, _, _));
EXPECT_CALL(kernel, SetAttributes(_, _, _, _));
EXPECT_CALL(kernel, SetBufferAttributes(_, _));
EXPECT_CALL(kernel, GetBufferAttributes(_, _));
EXPECT_CALL(kernel, Prepare(_, _));
EXPECT_CALL(kernel, Eval(_, _, _));
EXPECT_CALL(kernel, Wait(_, _));
EXPECT_CALL(kernel, Finish(_, _));
auto* tflite_kernel = kernel.kernel();
tflite_kernel->register_buffer(tflite_kernel, nullptr, kTfLiteIoTypeInput,
nullptr, nullptr, 0);
tflite_kernel->register_buffer_slice(tflite_kernel, nullptr, 0, nullptr, 0);
tflite_kernel->unregister_buffer(tflite_kernel, nullptr, 0);
tflite_kernel->reconcile_restrictions(tflite_kernel, nullptr, nullptr, 0,
nullptr, nullptr, nullptr);
tflite_kernel->set_attributes(tflite_kernel, nullptr, nullptr, 0, nullptr);
tflite_kernel->set_buffer_attributes(tflite_kernel, nullptr, nullptr);
tflite_kernel->get_buffer_attributes(tflite_kernel, nullptr, nullptr);
tflite_kernel->prepare(tflite_kernel, nullptr, nullptr);
tflite_kernel->eval(tflite_kernel, nullptr, nullptr, nullptr);
tflite_kernel->wait(tflite_kernel, nullptr, nullptr);
tflite_kernel->finish(tflite_kernel, nullptr, nullptr);
}
}
} |
830 | cpp | tensorflow/tensorflow | command_line_flags | third_party/xla/xla/tsl/util/command_line_flags.cc | tensorflow/lite/tools/command_line_flags_test.cc | #ifndef XLA_TSL_UTIL_COMMAND_LINE_FLAGS_H_
#define XLA_TSL_UTIL_COMMAND_LINE_FLAGS_H_
#include <functional>
#include <string>
#include <vector>
#include "tsl/platform/types.h"
namespace tsl {
class Flag {
public:
Flag(const char* name, int32* dst, const string& usage_text,
bool* dst_updated = nullptr);
Flag(const char* name, int64_t* dst, const string& usage_text,
bool* dst_updated = nullptr);
Flag(const char* name, bool* dst, const string& usage_text,
bool* dst_updated = nullptr);
Flag(const char* name, string* dst, const string& usage_text,
bool* dst_updated = nullptr);
Flag(const char* name, float* dst, const string& usage_text,
bool* dst_updated = nullptr);
Flag(const char* name, std::function<bool(int32_t)> int32_hook,
int32_t default_value_for_display, const string& usage_text);
Flag(const char* name, std::function<bool(int64_t)> int64_hook,
int64_t default_value_for_display, const string& usage_text);
Flag(const char* name, std::function<bool(float)> float_hook,
float default_value_for_display, const string& usage_text);
Flag(const char* name, std::function<bool(bool)> bool_hook,
bool default_value_for_display, const string& usage_text);
Flag(const char* name, std::function<bool(string)> string_hook,
string default_value_for_display, const string& usage_text);
private:
friend class Flags;
bool Parse(string arg, bool* value_parsing_ok) const;
string name_;
enum {
TYPE_INT32,
TYPE_INT64,
TYPE_BOOL,
TYPE_STRING,
TYPE_FLOAT,
} type_;
std::function<bool(int32_t)> int32_hook_;
int32 int32_default_for_display_;
std::function<bool(int64_t)> int64_hook_;
int64_t int64_default_for_display_;
std::function<bool(float)> float_hook_;
float float_default_for_display_;
std::function<bool(bool)> bool_hook_;
bool bool_default_for_display_;
std::function<bool(string)> string_hook_;
string string_default_for_display_;
string usage_text_;
};
class Flags {
public:
static bool Parse(int* argc, char** argv, const std::vector<Flag>& flag_list);
static bool Parse(std::vector<std::string>& flags,
const std::vector<Flag>& flag_list);
static string Usage(const string& cmdline,
const std::vector<Flag>& flag_list);
};
}
#endif
#include "xla/tsl/util/command_line_flags.h"
#include <algorithm>
#include <cinttypes>
#include <cstring>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/stringprintf.h"
namespace tsl {
namespace {
bool ParseStringFlag(StringPiece arg, StringPiece flag,
const std::function<bool(string)>& hook,
bool* value_parsing_ok) {
*value_parsing_ok = true;
if (absl::ConsumePrefix(&arg, "--") && absl::ConsumePrefix(&arg, flag) &&
absl::ConsumePrefix(&arg, "=")) {
*value_parsing_ok = hook(string(arg));
return true;
}
return false;
}
bool ParseInt32Flag(StringPiece arg, StringPiece flag,
const std::function<bool(int32_t)>& hook,
bool* value_parsing_ok) {
*value_parsing_ok = true;
if (absl::ConsumePrefix(&arg, "--") && absl::ConsumePrefix(&arg, flag) &&
absl::ConsumePrefix(&arg, "=")) {
char extra;
int32_t parsed_int32;
if (sscanf(arg.data(), "%d%c", &parsed_int32, &extra) != 1) {
LOG(ERROR) << "Couldn't interpret value " << arg << " for flag " << flag
<< ".";
*value_parsing_ok = false;
} else {
*value_parsing_ok = hook(parsed_int32);
}
return true;
}
return false;
}
bool ParseInt64Flag(StringPiece arg, StringPiece flag,
const std::function<bool(int64_t)>& hook,
bool* value_parsing_ok) {
*value_parsing_ok = true;
if (absl::ConsumePrefix(&arg, "--") && absl::ConsumePrefix(&arg, flag) &&
absl::ConsumePrefix(&arg, "=")) {
char extra;
int64_t parsed_int64;
if (sscanf(arg.data(), "%" SCNd64 "%c", &parsed_int64, &extra) != 1) {
LOG(ERROR) << "Couldn't interpret value " << arg << " for flag " << flag
<< ".";
*value_parsing_ok = false;
} else {
*value_parsing_ok = hook(parsed_int64);
}
return true;
}
return false;
}
bool ParseBoolFlag(StringPiece arg, StringPiece flag,
const std::function<bool(bool)>& hook,
bool* value_parsing_ok) {
*value_parsing_ok = true;
if (absl::ConsumePrefix(&arg, "--") && absl::ConsumePrefix(&arg, flag)) {
if (arg.empty()) {
*value_parsing_ok = hook(true);
return true;
}
if (!absl::ConsumePrefix(&arg, "=")) {
return false;
}
if (absl::EqualsIgnoreCase(arg, "true") || arg == "1") {
*value_parsing_ok = hook(true);
return true;
} else if (absl::EqualsIgnoreCase(arg, "false") || arg == "0") {
*value_parsing_ok = hook(false);
return true;
} else {
LOG(ERROR) << "Couldn't interpret value " << arg << " for flag " << flag
<< ".";
*value_parsing_ok = false;
return true;
}
}
return false;
}
bool ParseFloatFlag(StringPiece arg, StringPiece flag,
const std::function<bool(float)>& hook,
bool* value_parsing_ok) {
*value_parsing_ok = true;
if (absl::ConsumePrefix(&arg, "--") && absl::ConsumePrefix(&arg, flag) &&
absl::ConsumePrefix(&arg, "=")) {
char extra;
float parsed_float;
if (sscanf(arg.data(), "%f%c", &parsed_float, &extra) != 1) {
LOG(ERROR) << "Couldn't interpret value " << arg << " for flag " << flag
<< ".";
*value_parsing_ok = false;
} else {
*value_parsing_ok = hook(parsed_float);
}
return true;
}
return false;
}
}
Flag::Flag(const char* name, int32_t* dst, const string& usage_text,
bool* dst_updated)
: name_(name),
type_(TYPE_INT32),
int32_hook_([dst, dst_updated](int32_t value) {
*dst = value;
if (dst_updated) *dst_updated = true;
return true;
}),
int32_default_for_display_(*dst),
usage_text_(usage_text) {}
Flag::Flag(const char* name, int64_t* dst, const string& usage_text,
bool* dst_updated)
: name_(name),
type_(TYPE_INT64),
int64_hook_([dst, dst_updated](int64_t value) {
*dst = value;
if (dst_updated) *dst_updated = true;
return true;
}),
int64_default_for_display_(*dst),
usage_text_(usage_text) {}
Flag::Flag(const char* name, float* dst, const string& usage_text,
bool* dst_updated)
: name_(name),
type_(TYPE_FLOAT),
float_hook_([dst, dst_updated](float value) {
*dst = value;
if (dst_updated) *dst_updated = true;
return true;
}),
float_default_for_display_(*dst),
usage_text_(usage_text) {}
Flag::Flag(const char* name, bool* dst, const string& usage_text,
bool* dst_updated)
: name_(name),
type_(TYPE_BOOL),
bool_hook_([dst, dst_updated](bool value) {
*dst = value;
if (dst_updated) *dst_updated = true;
return true;
}),
bool_default_for_display_(*dst),
usage_text_(usage_text) {}
Flag::Flag(const char* name, string* dst, const string& usage_text,
bool* dst_updated)
: name_(name),
type_(TYPE_STRING),
string_hook_([dst, dst_updated](string value) {
*dst = std::move(value);
if (dst_updated) *dst_updated = true;
return true;
}),
string_default_for_display_(*dst),
usage_text_(usage_text) {}
Flag::Flag(const char* name, std::function<bool(int32_t)> int32_hook,
int32_t default_value_for_display, const string& usage_text)
: name_(name),
type_(TYPE_INT32),
int32_hook_(std::move(int32_hook)),
int32_default_for_display_(default_value_for_display),
usage_text_(usage_text) {}
Flag::Flag(const char* name, std::function<bool(int64_t)> int64_hook,
int64_t default_value_for_display, const string& usage_text)
: name_(name),
type_(TYPE_INT64),
int64_hook_(std::move(int64_hook)),
int64_default_for_display_(default_value_for_display),
usage_text_(usage_text) {}
Flag::Flag(const char* name, std::function<bool(float)> float_hook,
float default_value_for_display, const string& usage_text)
: name_(name),
type_(TYPE_FLOAT),
float_hook_(std::move(float_hook)),
float_default_for_display_(default_value_for_display),
usage_text_(usage_text) {}
Flag::Flag(const char* name, std::function<bool(bool)> bool_hook,
bool default_value_for_display, const string& usage_text)
: name_(name),
type_(TYPE_BOOL),
bool_hook_(std::move(bool_hook)),
bool_default_for_display_(default_value_for_display),
usage_text_(usage_text) {}
Flag::Flag(const char* name, std::function<bool(string)> string_hook,
string default_value_for_display, const string& usage_text)
: name_(name),
type_(TYPE_STRING),
string_hook_(std::move(string_hook)),
string_default_for_display_(std::move(default_value_for_display)),
usage_text_(usage_text) {}
bool Flag::Parse(string arg, bool* value_parsing_ok) const {
bool result = false;
if (type_ == TYPE_INT32) {
result = ParseInt32Flag(arg, name_, int32_hook_, value_parsing_ok);
} else if (type_ == TYPE_INT64) {
result = ParseInt64Flag(arg, name_, int64_hook_, value_parsing_ok);
} else if (type_ == TYPE_BOOL) {
result = ParseBoolFlag(arg, name_, bool_hook_, value_parsing_ok);
} else if (type_ == TYPE_STRING) {
result = ParseStringFlag(arg, name_, string_hook_, value_parsing_ok);
} else if (type_ == TYPE_FLOAT) {
result = ParseFloatFlag(arg, name_, float_hook_, value_parsing_ok);
}
return result;
}
bool Flags::Parse(int* argc, char** argv,
const std::vector<Flag>& flag_list) {
bool result = true;
std::vector<char*> unknown_flags;
for (int i = 1; i < *argc; ++i) {
if (string(argv[i]) == "--") {
while (i < *argc) {
unknown_flags.push_back(argv[i]);
++i;
}
break;
}
bool was_found = false;
for (const Flag& flag : flag_list) {
bool value_parsing_ok;
was_found = flag.Parse(argv[i], &value_parsing_ok);
if (!value_parsing_ok) {
result = false;
}
if (was_found) {
break;
}
}
if (!was_found) {
unknown_flags.push_back(argv[i]);
}
}
int dst = 1;
for (char* f : unknown_flags) {
argv[dst++] = f;
}
argv[dst++] = nullptr;
*argc = unknown_flags.size() + 1;
return result && (*argc < 2 || strcmp(argv[1], "--help") != 0);
}
bool Flags::Parse(std::vector<std::string>& flags,
const std::vector<Flag>& flag_list) {
bool result = true;
std::vector<std::string> unknown_flags;
for (auto& flag : flags) {
for (const Flag& flag_object : flag_list) {
bool value_parsing_ok;
bool was_found = flag_object.Parse(flag, &value_parsing_ok);
if (!value_parsing_ok) {
result = false;
}
if (was_found) {
flag.clear();
break;
}
}
}
auto IsEmpty = [](const std::string& flag) { return flag.empty(); };
flags.erase(std::remove_if(flags.begin(), flags.end(), IsEmpty), flags.end());
return result;
}
string Flags::Usage(const string& cmdline,
const std::vector<Flag>& flag_list) {
string usage_text;
if (!flag_list.empty()) {
strings::Appendf(&usage_text, "usage: %s\nFlags:\n", cmdline.c_str());
} else {
strings::Appendf(&usage_text, "usage: %s\n", cmdline.c_str());
}
for (const Flag& flag : flag_list) {
const char* type_name = "";
string flag_string;
if (flag.type_ == Flag::TYPE_INT32) {
type_name = "int32";
flag_string = strings::Printf("--%s=%d", flag.name_.c_str(),
flag.int32_default_for_display_);
} else if (flag.type_ == Flag::TYPE_INT64) {
type_name = "int64";
flag_string = strings::Printf(
"--%s=%lld", flag.name_.c_str(),
static_cast<long long>(flag.int64_default_for_display_));
} else if (flag.type_ == Flag::TYPE_BOOL) {
type_name = "bool";
flag_string =
strings::Printf("--%s=%s", flag.name_.c_str(),
flag.bool_default_for_display_ ? "true" : "false");
} else if (flag.type_ == Flag::TYPE_STRING) {
type_name = "string";
flag_string = strings::Printf("--%s=\"%s\"", flag.name_.c_str(),
flag.string_default_for_display_.c_str());
} else if (flag.type_ == Flag::TYPE_FLOAT) {
type_name = "float";
flag_string = strings::Printf("--%s=%f", flag.name_.c_str(),
flag.float_default_for_display_);
}
strings::Appendf(&usage_text, "\t%-33s\t%s\t%s\n", flag_string.c_str(),
type_name, flag.usage_text_.c_str());
}
return usage_text;
}
} | #include <ctype.h>
#include <vector>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/command_line_flags.h"
namespace tensorflow {
namespace {
std::vector<char *> CharPointerVectorFromStrings(
const std::vector<string> &strings) {
std::vector<char *> result;
result.reserve(strings.size());
for (const string &string : strings) {
result.push_back(const_cast<char *>(string.c_str()));
}
return result;
}
}
TEST(CommandLineFlagsTest, BasicUsage) {
int some_int32_set_directly = 10;
int some_int32_set_via_hook = 20;
int64_t some_int64_set_directly = 21474836470;
int64_t some_int64_set_via_hook = 21474836479;
bool some_switch_set_directly = false;
bool some_switch_set_via_hook = true;
bool some_switch_set_capitalized = false;
bool some_switch_set_by_number = false;
string some_name_set_directly = "something_a";
string some_name_set_via_hook = "something_b";
float some_float_set_directly = -23.23f;
float some_float_set_via_hook = -25.23f;
std::vector<string> argv_strings = {"program_name",
"--some_int32_set_directly=20",
"--some_int32_set_via_hook=50",
"--some_int64_set_directly=214748364700",
"--some_int64_set_via_hook=214748364710",
"--some_switch_set_directly",
"--some_switch_set_via_hook=false",
"--some_switch_set_capitalized=True",
"--some_switch_set_by_number=1",
"--some_name_set_directly=somethingelse",
"--some_name_set_via_hook=anythingelse",
"--some_float_set_directly=42.0",
"--some_float_set_via_hook=43.0"};
int argc = argv_strings.size();
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
bool parsed_ok = Flags::Parse(
&argc, argv_array.data(),
{
Flag("some_int32_set_directly", &some_int32_set_directly,
"some int32 set directly"),
Flag(
"some_int32_set_via_hook",
[&](int32_t value) {
some_int32_set_via_hook = value;
return true;
},
some_int32_set_via_hook, "some int32 set via hook"),
Flag("some_int64_set_directly", &some_int64_set_directly,
"some int64 set directly"),
Flag(
"some_int64_set_via_hook",
[&](int64_t value) {
some_int64_set_via_hook = value;
return true;
},
some_int64_set_via_hook, "some int64 set via hook"),
Flag("some_switch_set_directly", &some_switch_set_directly,
"some switch set directly"),
Flag(
"some_switch_set_via_hook",
[&](bool value) {
some_switch_set_via_hook = value;
return true;
},
some_switch_set_via_hook, "some switch set via hook"),
Flag("some_switch_set_capitalized", &some_switch_set_capitalized,
"some switch set capitalized"),
Flag("some_switch_set_by_number", &some_switch_set_by_number,
"some switch set by number"),
Flag("some_name_set_directly", &some_name_set_directly,
"some name set directly"),
Flag(
"some_name_set_via_hook",
[&](string value) {
some_name_set_via_hook = std::move(value);
return true;
},
some_name_set_via_hook, "some name set via hook"),
Flag("some_float_set_directly", &some_float_set_directly,
"some float set directly"),
Flag(
"some_float_set_via_hook",
[&](float value) {
some_float_set_via_hook = value;
return true;
},
some_float_set_via_hook, "some float set via hook"),
});
EXPECT_EQ(true, parsed_ok);
EXPECT_EQ(20, some_int32_set_directly);
EXPECT_EQ(50, some_int32_set_via_hook);
EXPECT_EQ(214748364700, some_int64_set_directly);
EXPECT_EQ(214748364710, some_int64_set_via_hook);
EXPECT_EQ(true, some_switch_set_directly);
EXPECT_EQ(false, some_switch_set_via_hook);
EXPECT_EQ(true, some_switch_set_capitalized);
EXPECT_EQ(true, some_switch_set_by_number);
EXPECT_EQ("somethingelse", some_name_set_directly);
EXPECT_EQ("anythingelse", some_name_set_via_hook);
EXPECT_NEAR(42.0f, some_float_set_directly, 1e-5f);
EXPECT_NEAR(43.0f, some_float_set_via_hook, 1e-5f);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, BadIntValue) {
int some_int = 10;
int argc = 2;
std::vector<string> argv_strings = {"program_name", "--some_int=notanumber"};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
bool parsed_ok = Flags::Parse(&argc, argv_array.data(),
{Flag("some_int", &some_int, "some int")});
EXPECT_EQ(false, parsed_ok);
EXPECT_EQ(10, some_int);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, BadBoolValue) {
bool some_switch = false;
int argc = 2;
std::vector<string> argv_strings = {"program_name", "--some_switch=notabool"};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
bool parsed_ok =
Flags::Parse(&argc, argv_array.data(),
{Flag("some_switch", &some_switch, "some switch")});
EXPECT_EQ(false, parsed_ok);
EXPECT_EQ(false, some_switch);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, BadFloatValue) {
float some_float = -23.23f;
int argc = 2;
std::vector<string> argv_strings = {"program_name",
"--some_float=notanumber"};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
bool parsed_ok =
Flags::Parse(&argc, argv_array.data(),
{Flag("some_float", &some_float, "some float")});
EXPECT_EQ(false, parsed_ok);
EXPECT_NEAR(-23.23f, some_float, 1e-5f);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, FailedInt32Hook) {
int argc = 2;
std::vector<string> argv_strings = {"program_name", "--some_int32=200"};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
bool parsed_ok =
Flags::Parse(&argc, argv_array.data(),
{Flag(
"some_int32", [](int32_t value) { return false; }, 30,
"some int32")});
EXPECT_EQ(false, parsed_ok);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, FailedInt64Hook) {
int argc = 2;
std::vector<string> argv_strings = {"program_name", "--some_int64=200"};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
bool parsed_ok =
Flags::Parse(&argc, argv_array.data(),
{Flag(
"some_int64", [](int64_t value) { return false; }, 30,
"some int64")});
EXPECT_EQ(false, parsed_ok);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, FailedFloatHook) {
int argc = 2;
std::vector<string> argv_strings = {"program_name", "--some_float=200.0"};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
bool parsed_ok =
Flags::Parse(&argc, argv_array.data(),
{Flag("some_float", [](float value) { return false; }, 30.0f,
"some float")});
EXPECT_EQ(false, parsed_ok);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, FailedBoolHook) {
int argc = 2;
std::vector<string> argv_strings = {"program_name", "--some_switch=true"};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
bool parsed_ok =
Flags::Parse(&argc, argv_array.data(),
{Flag("some_switch", [](bool value) { return false; }, false,
"some switch")});
EXPECT_EQ(false, parsed_ok);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, FailedStringHook) {
int argc = 2;
std::vector<string> argv_strings = {"program_name", "--some_name=true"};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
bool parsed_ok = Flags::Parse(
&argc, argv_array.data(),
{Flag("some_name", [](string value) { return false; }, "", "some name")});
EXPECT_EQ(false, parsed_ok);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, RepeatedStringHook) {
int argc = 3;
std::vector<string> argv_strings = {"program_name", "--some_name=this",
"--some_name=that"};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
int call_count = 0;
bool parsed_ok = Flags::Parse(&argc, argv_array.data(),
{Flag("some_name",
[&call_count](string value) {
call_count++;
return true;
},
"", "some name")});
EXPECT_EQ(true, parsed_ok);
EXPECT_EQ(argc, 1);
EXPECT_EQ(call_count, 2);
}
static bool MatchWithAnyWhitespace(const string &str, const string &pat) {
bool matching = true;
int pat_i = 0;
for (int str_i = 0; str_i != str.size() && matching; str_i++) {
if (isspace(str[str_i])) {
matching = (pat_i != pat.size() && isspace(pat[pat_i]));
} else {
while (pat_i != pat.size() && isspace(pat[pat_i])) {
pat_i++;
}
matching = (pat_i != pat.size() && str[str_i] == pat[pat_i++]);
}
}
while (pat_i != pat.size() && isspace(pat[pat_i])) {
pat_i++;
}
return (matching && pat_i == pat.size());
}
TEST(CommandLineFlagsTest, UsageString) {
int some_int = 10;
int64_t some_int64 = 21474836470;
bool some_switch = false;
string some_name = "something";
const string tool_name = "some_tool_name";
string usage = Flags::Usage(tool_name + "<flags>",
{Flag("some_int", &some_int, "some int"),
Flag("some_int64", &some_int64, "some int64"),
Flag("some_switch", &some_switch, "some switch"),
Flag("some_name", &some_name, "some name")});
const char *expected_usage =
" usage: some_tool_name <flags>\n"
"Flags:\n"
"--some_int=10 int32 some int\n"
"--some_int64=21474836470 int64 some int64\n"
"--some_switch=false bool some switch\n"
"--some_name=\"something\" string some name\n";
ASSERT_EQ(MatchWithAnyWhitespace(usage, expected_usage), true);
usage = Flags::Usage(tool_name, {});
ASSERT_EQ(MatchWithAnyWhitespace(usage, " usage: some_tool_name\n"), true);
}
namespace {
template <typename T, typename ExpectationFun>
void PrefixTestTempl(ExpectationFun expectation_fun, const T &value0,
const T &value1, string str0, string str1) {
int argc = 3;
std::vector<string> argv_strings = {
"program_name",
"--hello" + str0,
"--hello_world" + str1,
};
std::vector<char *> argv_array = CharPointerVectorFromStrings(argv_strings);
T hello{};
T hello_world{};
bool parsed_ok = Flags::Parse(
&argc, argv_array.data(),
{
Flag("hello", &hello, "usage of hello"),
Flag("hello_world", &hello_world, "usage of hello world"),
});
EXPECT_EQ(true, parsed_ok);
expectation_fun(value0, hello);
expectation_fun(value1, hello_world);
EXPECT_EQ(argc, 1);
}
}
TEST(CommandLineFlagsTest, OneArgumentIsAPrefixOfAnother) {
auto expect_eq = [](auto a, auto b) { EXPECT_EQ(a, b); };
auto expect_near = [](auto a, auto b) { EXPECT_NEAR(a, b, 1e-5f); };
PrefixTestTempl<int32_t>(expect_eq, 1, 2, "=1", "=2");
PrefixTestTempl<int64_t>(expect_eq, 1, 2, "=1", "=2");
PrefixTestTempl<bool>(expect_eq, false, true, "=false", "=true");
PrefixTestTempl<bool>(expect_eq, false, true, "=false", "");
PrefixTestTempl<bool>(expect_eq, true, false, "=true", "=false");
PrefixTestTempl<bool>(expect_eq, true, false, "", "=false");
PrefixTestTempl<string>(expect_eq, "a", "b", "=a", "=b");
PrefixTestTempl<float>(expect_near, 0.1f, 0.2f, "=0.1", "=0.2");
}
} |
831 | cpp | tensorflow/tensorflow | list_flex_ops | tensorflow/lite/tools/list_flex_ops.cc | tensorflow/lite/tools/list_flex_ops_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_LIST_FLEX_OPS_H_
#define TENSORFLOW_LITE_TOOLS_LIST_FLEX_OPS_H_
#include <set>
#include <string>
#include "tensorflow/lite/core/model.h"
namespace tflite {
namespace flex {
struct OpKernel {
std::string op_name;
std::string kernel_name;
};
struct OpKernelCompare {
bool operator()(const OpKernel& lhs, const OpKernel& rhs) const {
if (lhs.op_name == rhs.op_name) {
return lhs.kernel_name < rhs.kernel_name;
}
return lhs.op_name < rhs.op_name;
}
};
using OpKernelSet = std::set<OpKernel, OpKernelCompare>;
void AddFlexOpsFromModel(const tflite::Model* model, OpKernelSet* flex_ops);
std::string OpListToJSONString(const OpKernelSet& flex_ops);
}
}
#endif
#include "tensorflow/lite/tools/list_flex_ops.h"
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "json/json.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace flex {
std::string OpListToJSONString(const OpKernelSet& flex_ops) {
Json::Value result(Json::arrayValue);
for (const OpKernel& op : flex_ops) {
Json::Value op_kernel(Json::arrayValue);
op_kernel.append(Json::Value(op.op_name));
op_kernel.append(Json::Value(op.kernel_name));
result.append(op_kernel);
}
return Json::FastWriter().write(result);
}
string FindTensorflowKernelClass(tensorflow::NodeDef* node_def) {
if (!node_def || node_def->op().empty()) {
LOG(FATAL) << "Invalid NodeDef";
}
const tensorflow::OpRegistrationData* op_reg_data;
auto status =
tensorflow::OpRegistry::Global()->LookUp(node_def->op(), &op_reg_data);
if (!status.ok()) {
LOG(FATAL) << "Op " << node_def->op() << " not found: " << status;
}
AddDefaultsToNodeDef(op_reg_data->op_def, node_def);
tensorflow::DeviceNameUtils::ParsedName parsed_name;
if (!tensorflow::DeviceNameUtils::ParseFullName(node_def->device(),
&parsed_name)) {
LOG(FATAL) << "Failed to parse device from node_def: "
<< node_def->ShortDebugString();
}
string class_name;
if (!tensorflow::FindKernelDef(
tensorflow::DeviceType(parsed_name.type.c_str()), *node_def,
nullptr , &class_name)
.ok()) {
LOG(FATAL) << "Failed to find kernel class for op: " << node_def->op();
}
return class_name;
}
void AddFlexOpsFromModel(const tflite::Model* model, OpKernelSet* flex_ops) {
auto* subgraphs = model->subgraphs();
if (!subgraphs) return;
for (int subgraph_index = 0; subgraph_index < subgraphs->size();
++subgraph_index) {
const tflite::SubGraph* subgraph = subgraphs->Get(subgraph_index);
auto* operators = subgraph->operators();
auto* opcodes = model->operator_codes();
if (!operators || !opcodes) continue;
for (int i = 0; i < operators->size(); ++i) {
const tflite::Operator* op = operators->Get(i);
const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
if (tflite::GetBuiltinCode(opcode) != tflite::BuiltinOperator_CUSTOM ||
!tflite::IsFlexOp(opcode->custom_code()->c_str())) {
continue;
}
std::string flex_op_name(opcode->custom_code()->c_str());
std::string tf_op_name =
flex_op_name.substr(strlen(tflite::kFlexCustomCodePrefix));
if (op->custom_options_format() !=
tflite::CustomOptionsFormat_FLEXBUFFERS) {
LOG(FATAL) << "Invalid CustomOptionsFormat";
}
const flatbuffers::Vector<uint8_t>* custom_opt_bytes =
op->custom_options();
if (custom_opt_bytes && custom_opt_bytes->size()) {
const flexbuffers::Vector& v =
flexbuffers::GetRoot(custom_opt_bytes->data(),
custom_opt_bytes->size())
.AsVector();
std::string nodedef_str = v[1].AsString().str();
tensorflow::NodeDef nodedef;
if (nodedef_str.empty() || !nodedef.ParseFromString(nodedef_str)) {
LOG(FATAL) << "Failed to parse data into a valid NodeDef";
}
*nodedef.mutable_device() = "/CPU:0";
std::string kernel_class = FindTensorflowKernelClass(&nodedef);
flex_ops->insert({tf_op_name, kernel_class});
}
}
}
}
}
} | #include "tensorflow/lite/tools/list_flex_ops.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace flex {
class FlexOpsListTest : public ::testing::Test {
protected:
FlexOpsListTest() {}
void ReadOps(const string& path) {
std::string full_path = tensorflow::GetDataDependencyFilepath(path);
auto model = FlatBufferModel::BuildFromFile(full_path.data());
AddFlexOpsFromModel(model->GetModel(), &flex_ops_);
output_text_ = OpListToJSONString(flex_ops_);
}
void ReadOps(const tflite::Model* model) {
AddFlexOpsFromModel(model, &flex_ops_);
output_text_ = OpListToJSONString(flex_ops_);
}
std::string output_text_;
OpKernelSet flex_ops_;
};
TfLiteRegistration* Register_TEST() {
static TfLiteRegistration r = {nullptr, nullptr, nullptr, nullptr};
return &r;
}
std::vector<uint8_t> CreateFlexCustomOptions(std::string nodedef_raw_string) {
tensorflow::NodeDef node_def;
tensorflow::protobuf::TextFormat::ParseFromString(nodedef_raw_string,
&node_def);
std::string node_def_str = node_def.SerializeAsString();
auto flex_builder = std::make_unique<flexbuffers::Builder>();
flex_builder->Vector([&]() {
flex_builder->String(node_def.op());
flex_builder->String(node_def_str);
});
flex_builder->Finish();
return flex_builder->GetBuffer();
}
class FlexOpModel : public SingleOpModel {
public:
FlexOpModel(const std::string& op_name, const TensorData& input1,
const TensorData& input2, const TensorType& output,
const std::vector<uint8_t>& custom_options) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetCustomOp(op_name, custom_options, Register_TEST);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
TEST_F(FlexOpsListTest, TestModelsNoFlex) {
ReadOps("tensorflow/lite/testdata/test_model.bin");
EXPECT_EQ(output_text_, "[]\n");
}
TEST_F(FlexOpsListTest, TestBrokenModel) {
EXPECT_DEATH_IF_SUPPORTED(
ReadOps("tensorflow/lite/testdata/test_model_broken.bin"), "");
}
TEST_F(FlexOpsListTest, TestZeroSubgraphs) {
ReadOps("tensorflow/lite/testdata/0_subgraphs.bin");
EXPECT_EQ(output_text_, "[]\n");
}
TEST_F(FlexOpsListTest, TestFlexAdd) {
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
EXPECT_EQ(output_text_,
"[[\"AddV2\",\"BinaryOp<CPUDevice, functor::add<float>>\"]]\n");
}
TEST_F(FlexOpsListTest, TestTwoModel) {
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
ReadOps("tensorflow/lite/testdata/softplus_flex.bin");
EXPECT_EQ(output_text_,
"[[\"AddV2\",\"BinaryOp<CPUDevice, "
"functor::add<float>>\"],[\"Softplus\",\"SoftplusOp<CPUDevice, "
"float>\"]]\n");
}
TEST_F(FlexOpsListTest, TestDuplicatedOp) {
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
EXPECT_EQ(output_text_,
"[[\"AddV2\",\"BinaryOp<CPUDevice, functor::add<float>>\"]]\n");
}
TEST_F(FlexOpsListTest, TestInvalidCustomOptions) {
std::vector<uint8_t> random_custom_options(20);
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
random_custom_options);
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())),
"Failed to parse data into a valid NodeDef");
}
TEST_F(FlexOpsListTest, TestOpNameEmpty) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_FLOAT } }";
std::string random_fieldname = "random string";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())), "Invalid NodeDef");
}
TEST_F(FlexOpsListTest, TestOpNotFound) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"FlexInvalidOp\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_FLOAT } }";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())),
"Op FlexInvalidOp not found");
}
TEST_F(FlexOpsListTest, TestKernelNotFound) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"Add\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_BOOL } }";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())),
"Failed to find kernel class for op: Add");
}
TEST_F(FlexOpsListTest, TestFlexAddWithSingleOpModel) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"Add\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_FLOAT } }";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
ReadOps(tflite::GetModel(max_model.GetModelBuffer()));
EXPECT_EQ(output_text_,
"[[\"Add\",\"BinaryOp<CPUDevice, functor::add<float>>\"]]\n");
}
}
} |
832 | cpp | tensorflow/tensorflow | model_loader | tensorflow/lite/tools/model_loader.cc | tensorflow/lite/tools/model_loader_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_MODEL_LOADER_H_
#define TENSORFLOW_LITE_TOOLS_MODEL_LOADER_H_
#ifndef _WIN32
#include <unistd.h>
#endif
#include <cstddef>
#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/lite/core/model_builder.h"
namespace tflite {
namespace tools {
class ModelLoader {
public:
enum class Type : int {
kPathModelLoader = 0,
kBufferModelLoader = 1,
kMmapModelLoader = 2,
kPipeModelLoader = 3,
};
virtual ~ModelLoader() = default;
virtual bool Init();
virtual Type type() const = 0;
const FlatBufferModel* GetModel() const { return model_.get(); }
protected:
virtual bool InitInternal() = 0;
std::unique_ptr<FlatBufferModel> model_;
};
class PathModelLoader : public ModelLoader {
public:
explicit PathModelLoader(absl::string_view model_path)
: ModelLoader(), model_path_(model_path) {}
Type type() const override { return Type::kPathModelLoader; }
protected:
bool InitInternal() override;
private:
const std::string model_path_;
};
class BufferModelLoader : public ModelLoader {
public:
BufferModelLoader(const char* caller_owned_buffer, size_t model_size)
: caller_owned_buffer_(caller_owned_buffer), model_size_(model_size) {}
BufferModelLoader(BufferModelLoader&&) = default;
BufferModelLoader& operator=(BufferModelLoader&&) = default;
~BufferModelLoader() override = default;
Type type() const override { return Type::kBufferModelLoader; }
protected:
bool InitInternal() override;
private:
const char* caller_owned_buffer_ = nullptr;
size_t model_size_ = 0;
};
#ifndef _WIN32
class MmapModelLoader : public ModelLoader {
public:
MmapModelLoader(int model_fd, size_t model_offset, size_t model_size)
: ModelLoader(),
model_fd_(dup(model_fd)),
model_offset_(model_offset),
model_size_(model_size) {}
~MmapModelLoader() override {
if (model_fd_ >= 0) {
close(model_fd_);
}
}
Type type() const override { return Type::kMmapModelLoader; }
protected:
bool InitInternal() override;
private:
const int model_fd_ = -1;
const size_t model_offset_ = 0;
const size_t model_size_ = 0;
};
class PipeModelLoader : public ModelLoader {
public:
PipeModelLoader(int pipe_fd, size_t model_size)
: ModelLoader(), pipe_fd_(pipe_fd), model_size_(model_size) {}
PipeModelLoader(PipeModelLoader&&) = default;
PipeModelLoader& operator=(PipeModelLoader&&) = default;
~PipeModelLoader() override { std::free(model_buffer_); }
Type type() const override { return Type::kPipeModelLoader; }
protected:
bool InitInternal() override;
private:
const int pipe_fd_ = -1;
const size_t model_size_ = 0;
uint8_t* model_buffer_ = nullptr;
};
#endif
std::unique_ptr<ModelLoader> CreateModelLoaderFromPath(const std::string& path);
}
}
#endif
#include "tensorflow/lite/tools/model_loader.h"
#include <cstdlib>
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace tools {
bool ModelLoader::Init() {
if (model_ && model_->initialized()) {
return true;
}
if (!InitInternal()) {
return false;
}
if (!model_ || !model_->initialized()) {
return false;
}
return true;
}
bool PathModelLoader::InitInternal() {
if (model_path_.empty()) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "model_path is empty.");
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromFile(model_path_.c_str());
return true;
}
bool BufferModelLoader::InitInternal() {
if (!caller_owned_buffer_ || model_size_ <= 0) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to create BufferModelLoader: caller_owned_buffer "
"is %s; model_size: %zu",
caller_owned_buffer_ ? "not null" : "null", model_size_);
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromBuffer(caller_owned_buffer_,
model_size_);
return true;
}
#ifndef _WIN32
bool MmapModelLoader::InitInternal() {
if (model_fd_ < 0 || model_offset_ < 0 || model_size_ < 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Invalid model file descriptor. file descriptor: %d model_offset: "
"%zu model_size: %zu",
model_fd_, model_offset_, model_size_);
return false;
}
if (!MMAPAllocation::IsSupported()) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "MMAPAllocation is not supported.");
return false;
}
auto allocation = std::make_unique<MMAPAllocation>(
model_fd_, model_offset_, model_size_, tflite::DefaultErrorReporter());
if (!allocation->valid()) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "MMAPAllocation is not valid.");
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromAllocation(std::move(allocation));
#if FLATBUFFERS_LITTLEENDIAN == 0
model_ = FlatBufferModel::ByteConvertModel(std::move(model_));
#endif
return true;
}
bool PipeModelLoader::InitInternal() {
if (pipe_fd_ < 0) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Invalid pipe file descriptor %d",
pipe_fd_);
return false;
}
std::free(model_buffer_);
model_buffer_ = reinterpret_cast<uint8_t*>(std::malloc(model_size_));
int read_bytes = 0;
int remaining_bytes = model_size_;
uint8_t* buffer = model_buffer_;
while (remaining_bytes > 0 &&
(read_bytes = read(pipe_fd_, buffer, remaining_bytes)) > 0) {
remaining_bytes -= read_bytes;
buffer += read_bytes;
}
close(pipe_fd_);
if (read_bytes < 0 || remaining_bytes != 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Read Model from pipe failed: %s. Expect to read %zu bytes, "
"%d bytes missing.",
std::strerror(errno), model_size_, remaining_bytes);
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromBuffer(
reinterpret_cast<const char*>(model_buffer_), model_size_);
return true;
}
#endif
std::unique_ptr<ModelLoader> CreateModelLoaderFromPath(
const std::string& path) {
std::vector<absl::string_view> parts = absl::StrSplit(path, ':');
if (parts.empty()) {
return nullptr;
}
#ifndef _WIN32
if (parts[0] == "fd") {
int model_fd;
size_t model_offset, model_size;
if (parts.size() != 4 || !absl::SimpleAtoi(parts[1], &model_fd) ||
!absl::SimpleAtoi(parts[2], &model_offset) ||
!absl::SimpleAtoi(parts[3], &model_size)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to parse model path: %s",
path.c_str());
return nullptr;
}
return std::make_unique<MmapModelLoader>(model_fd, model_offset,
model_size);
}
if (parts[0] == "pipe") {
int read_fd, write_fd;
size_t model_size;
if (parts.size() != 4 || !absl::SimpleAtoi(parts[1], &read_fd) ||
!absl::SimpleAtoi(parts[2], &write_fd) ||
!absl::SimpleAtoi(parts[3], &model_size)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to parse model path: %s",
path.c_str());
return nullptr;
}
if (write_fd >= 0) {
close(write_fd);
}
return std::make_unique<PipeModelLoader>(read_fd, model_size);
}
#endif
if (parts[0] == "buffer") {
int64_t buffer_handle;
size_t model_size;
if (parts.size() != 3 || !absl::SimpleAtoi(parts[1], &buffer_handle) ||
!absl::SimpleAtoi(parts[2], &model_size)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to parse model path: %s",
path.c_str());
return nullptr;
}
return std::make_unique<BufferModelLoader>(
reinterpret_cast<const char*>(buffer_handle), model_size);
}
return std::make_unique<PathModelLoader>(path);
}
}
} | #include "tensorflow/lite/tools/model_loader.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <unistd.h>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace tools {
namespace {
static constexpr char kModelPath[] =
"../tflite_mobilenet_float/"
"mobilenet_v1_1.0_224.tflite";
using ::testing::IsNull;
using ::testing::Not;
using ::testing::WhenDynamicCastTo;
class ModelLoaderTest : public ::testing::Test {};
TEST_F(ModelLoaderTest, CreateFromModelPath) {
auto model_loader = std::make_unique<PathModelLoader>(kModelPath);
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
}
TEST_F(ModelLoaderTest, CreateFromFdPath) {
int fd = open(kModelPath, O_RDONLY);
ASSERT_GE(fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(fd, &stat_buf), 0);
auto model_loader =
std::make_unique<MmapModelLoader>(fd, 0, stat_buf.st_size);
close(fd);
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
}
TEST_F(ModelLoaderTest, CreateFromPipePath) {
auto model = FlatBufferModel::BuildFromFile(kModelPath);
flatbuffers::FlatBufferBuilder fbb;
ModelT model_obj;
model->GetModel()->UnPackTo(&model_obj);
std::string model_description = model_obj.description;
FinishModelBuffer(fbb, CreateModel(fbb, &model_obj));
int pipe_fds[2];
ASSERT_EQ(pipe(pipe_fds), 0);
pid_t r = fork();
if (r == 0) {
close(pipe_fds[0]);
int written_bytes = 0;
int remaining_bytes = fbb.GetSize();
uint8_t* buffer = fbb.GetBufferPointer();
while (remaining_bytes > 0 &&
(written_bytes = write(pipe_fds[1], buffer, remaining_bytes)) > 0) {
remaining_bytes -= written_bytes;
buffer += written_bytes;
}
close(pipe_fds[1]);
ASSERT_TRUE(written_bytes > 0 && remaining_bytes == 0);
_exit(0);
}
close(pipe_fds[1]);
auto model_loader =
std::make_unique<PipeModelLoader>(pipe_fds[0], fbb.GetSize());
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
EXPECT_EQ(model_loader->GetModel()->GetModel()->description()->string_view(),
model_description);
}
TEST_F(ModelLoaderTest, CreateBufferModelLoader) {
auto model = FlatBufferModel::BuildFromFile(kModelPath);
flatbuffers::FlatBufferBuilder fbb;
ModelT model_obj;
model->GetModel()->UnPackTo(&model_obj);
std::string model_description = model_obj.description;
FinishModelBuffer(fbb, CreateModel(fbb, &model_obj));
ASSERT_NE(model->allocation(), nullptr);
auto model_loader = std::make_unique<BufferModelLoader>(
reinterpret_cast<const char*>(fbb.GetBufferPointer()), fbb.GetSize());
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
EXPECT_EQ(model_loader->GetModel()->GetModel()->description()->string_view(),
model_description);
}
TEST_F(ModelLoaderTest, InvalidModelPath) {
auto model_loader = std::make_unique<PathModelLoader>("invalid/path");
ASSERT_NE(model_loader, nullptr);
EXPECT_FALSE(model_loader->Init());
}
TEST_F(ModelLoaderTest, InvalidFd) {
auto model_loader = std::make_unique<MmapModelLoader>(0, 5, 10);
ASSERT_NE(model_loader, nullptr);
EXPECT_FALSE(model_loader->Init());
}
TEST_F(ModelLoaderTest, InvalidPipe) {
auto model_loader = std::make_unique<PipeModelLoader>(-1, 10);
ASSERT_NE(model_loader, nullptr);
EXPECT_FALSE(model_loader->Init());
}
TEST_F(ModelLoaderTest, CreateModelLoaderFromValidPath) {
EXPECT_THAT(CreateModelLoaderFromPath("a/b/c").get(),
WhenDynamicCastTo<PathModelLoader*>(Not(IsNull())));
EXPECT_THAT(CreateModelLoaderFromPath("fd:1:2:3").get(),
WhenDynamicCastTo<MmapModelLoader*>(Not(IsNull())));
EXPECT_THAT(CreateModelLoaderFromPath("pipe:1:2:3").get(),
WhenDynamicCastTo<PipeModelLoader*>(Not(IsNull())));
EXPECT_THAT(CreateModelLoaderFromPath("buffer:1:2").get(),
WhenDynamicCastTo<BufferModelLoader*>(Not(IsNull())));
}
TEST_F(ModelLoaderTest, CreateModelLoaderFromInvalidPath) {
EXPECT_EQ(CreateModelLoaderFromPath("fd:1"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("fd:1:2:3:4"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("pipe:1"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("pipe:1:2:3:4"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("buffer:1:2:3"), nullptr);
}
}
}
} |
833 | cpp | tensorflow/tensorflow | utils | third_party/xla/xla/pjrt/utils.cc | third_party/xla/xla/service/gpu/llvm_gpu_backend/utils_test.cc | #ifndef XLA_PJRT_UTILS_H_
#define XLA_PJRT_UTILS_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "mlir/IR/BuiltinOps.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/pjrt/layout_mode.h"
#include "xla/service/computation_placer.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
namespace xla {
using MemorySpaceColor = int;
absl::Status ParseDeviceAssignmentCompileOptions(
bool compile_portable_executable, ExecutableBuildOptions* build_options,
std::function<absl::StatusOr<DeviceAssignment>(int, int)>
GetDefaultDeviceAssignmentFunction,
int* num_replicas, int* num_partitions,
std::shared_ptr<DeviceAssignment>* device_assignment);
absl::StatusOr<std::vector<LayoutMode>> GetArgLayoutModes(
mlir::ModuleOp module);
absl::StatusOr<std::vector<LayoutMode>> GetOutputLayoutModes(
mlir::ModuleOp module);
absl::StatusOr<std::vector<MemorySpaceColor>> GetArgMemoryKinds(
mlir::ModuleOp module);
absl::StatusOr<std::vector<MemorySpaceColor>> GetOutputMemoryKinds(
mlir::ModuleOp module);
absl::Status AddLayoutModesToFrontendAttrs(mlir::ModuleOp module,
XlaComputation& xla_computation);
absl::Status AddMemoryKindsToFrontendAttrs(mlir::ModuleOp module,
XlaComputation& xla_computation);
absl::StatusOr<std::vector<LayoutMode>> GetArgLayoutModes(
const XlaComputation& computation);
absl::StatusOr<std::vector<LayoutMode>> GetOutputLayoutModes(
const XlaComputation& computation);
absl::StatusOr<std::vector<MemorySpaceColor>> GetArgMemoryKinds(
const XlaComputation& computation);
absl::StatusOr<std::vector<MemorySpaceColor>> GetOutputMemoryKinds(
const XlaComputation& computation);
absl::StatusOr<std::pair<std::vector<Shape>, Shape>> LayoutModesToXlaShapes(
const XlaComputation& computation, std::vector<LayoutMode> arg_layout_modes,
std::vector<LayoutMode> out_layout_modes,
const std::vector<MemorySpaceColor>& arg_memory_spaces,
const std::vector<MemorySpaceColor>& out_memory_spaces,
std::function<absl::StatusOr<Shape>(Shape)>
choose_compact_layout_for_shape_function);
absl::StatusOr<std::pair<std::vector<Shape>, std::vector<const Shape*>>>
LayoutModesToXla(const XlaComputation& computation,
std::vector<LayoutMode> arg_layout_modes,
std::vector<LayoutMode> out_layout_modes,
const std::vector<MemorySpaceColor>& arg_memory_spaces,
const std::vector<MemorySpaceColor>& out_memory_spaces,
std::function<absl::StatusOr<Shape>(Shape)>
choose_compact_layout_for_shape_function,
ExecutableBuildOptions& build_options);
absl::Status DetermineArgumentLayoutsFromCompileOptions(
const XlaComputation& computation,
std::function<absl::StatusOr<Shape>(Shape)>
choose_compact_layout_for_shape_function,
std::optional<std::vector<Shape>>& argument_layouts,
ExecutableBuildOptions* build_options,
std::vector<const Shape*>* argument_layout_pointers);
absl::StatusOr<std::vector<int>> ComputeParametersThatMustBeDonated(
const HloModule& hlo_module, bool tuple_inputs);
int DefaultThreadPoolSize();
bool HasMajorToMinorLayout(PrimitiveType type, absl::Span<int64_t const> dims,
absl::Span<int64_t const> byte_strides);
absl::StatusOr<Shape> MakeShapeWithTrivialByteStrides(
PrimitiveType element_type, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> byte_strides);
absl::Status TestBufferDonationClashes(
void* opaque_key,
absl::flat_hash_map<const void*, std::pair<bool, int>>& donation_clashes,
bool is_donated, int arg_idx, int replica, int partition);
}
#endif
#include "xla/pjrt/utils.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/types/span.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Support/LLVM.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/pjrt/layout_mode.h"
#include "xla/primitive_util.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<Shape> GetShardedShape(const Shape& shape,
const OpSharding& sharding) {
TF_ASSIGN_OR_RETURN(HloSharding hlo_sharding,
HloSharding::FromProto(sharding));
if (shape.IsTuple()) {
Shape sharded_shape = shape;
ShapeUtil::ForEachMutableSubshape(
&sharded_shape, [&](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsTuple()) {
HloSharding subsharding = hlo_sharding.GetSubSharding(shape, index);
*subshape = subsharding.TileShape(*subshape);
}
});
return sharded_shape;
} else {
return hlo_sharding.TileShape(shape);
}
}
absl::StatusOr<Shape> GetShardedShape(const HloInstructionProto& instr) {
const Shape unsharded_shape(instr.shape());
Shape sharded_shape;
if (instr.has_sharding()) {
TF_ASSIGN_OR_RETURN(sharded_shape,
GetShardedShape(unsharded_shape, instr.sharding()));
} else {
sharded_shape = unsharded_shape;
}
LayoutUtil::ClearLayout(&sharded_shape);
return sharded_shape;
}
absl::StatusOr<std::pair<std::vector<Shape>, Shape>> GetShardedProgramShapes(
const XlaComputation& computation, const ProgramShape& program_shape) {
std::vector<Shape> arg_shapes;
arg_shapes.resize(program_shape.parameters_size());
Shape result_shape;
for (const HloComputationProto& comp : computation.proto().computations()) {
if (comp.id() != computation.proto().entry_computation_id()) {
continue;
}
for (const HloInstructionProto& instr : comp.instructions()) {
if (instr.opcode() == HloOpcodeString(HloOpcode::kParameter)) {
if (instr.parameter_number() >= program_shape.parameters_size()) {
return InvalidArgument(
"Got invalid parameter number %d, expected %d parameters",
instr.parameter_number(), program_shape.parameters_size());
}
TF_ASSIGN_OR_RETURN(arg_shapes[instr.parameter_number()],
GetShardedShape(instr));
}
if (instr.id() == comp.root_id()) {
if (result_shape.element_type() != PRIMITIVE_TYPE_INVALID) {
return InvalidArgument("Found multiple root instructions");
}
TF_ASSIGN_OR_RETURN(result_shape, GetShardedShape(instr));
}
}
}
for (int i = 0; i < arg_shapes.size(); ++i) {
if (arg_shapes[i].element_type() == PRIMITIVE_TYPE_INVALID) {
return InvalidArgument("Couldn't find parameter %d", i);
}
}
if (result_shape.element_type() == PRIMITIVE_TYPE_INVALID) {
return InvalidArgument("Couldn't find root instruction");
}
return std::make_pair(arg_shapes, result_shape);
}
}
absl::Status ParseDeviceAssignmentCompileOptions(
bool compile_portable_executable, ExecutableBuildOptions* build_options,
std::function<absl::StatusOr<DeviceAssignment>(int, int)>
GetDefaultDeviceAssignmentFunction,
int* num_replicas, int* num_partitions,
std::shared_ptr<DeviceAssignment>* device_assignment) {
if (compile_portable_executable) {
if (build_options->has_device_assignment()) {
return InvalidArgument(
"CompileOptions requests portable executable but "
"ExecutableBuildOptions includes a device assignment");
}
if (build_options->num_replicas() != 1 ||
build_options->num_partitions() != 1) {
return InvalidArgument(
"CompileOptions requests portable executable but "
"ExecutableBuildOptions includes num_replicas %d and num_partitions "
"%d.",
build_options->num_replicas(), build_options->num_partitions());
}
*num_replicas = 1;
*num_partitions = 1;
} else {
if (!build_options->has_device_assignment()) {
VLOG(2) << "Compile using default device_assignment.";
TF_ASSIGN_OR_RETURN(
DeviceAssignment device_assignment,
GetDefaultDeviceAssignmentFunction(build_options->num_replicas(),
build_options->num_partitions()));
build_options->set_device_assignment(device_assignment);
}
VLOG(2) << "Compile device_assignment:\n"
<< build_options->device_assignment().ToString();
*num_replicas = build_options->device_assignment().replica_count();
*num_partitions = build_options->device_assignment().computation_count();
*device_assignment =
std::make_shared<DeviceAssignment>(build_options->device_assignment());
}
return absl::OkStatus();
}
static absl::StatusOr<std::vector<LayoutMode>> MlirAttrsToLayoutModes(
mlir::ArrayAttr all_attrs, size_t num_values) {
if (all_attrs == nullptr) {
return std::vector<LayoutMode>(num_values);
}
if (all_attrs.size() != num_values) {
return InvalidArgument(
"MlirAttrsToLayoutModes got unexpected number of attributes: %d, "
"expected: %d",
all_attrs.size(), num_values);
}
std::vector<LayoutMode> result;
result.reserve(all_attrs.size());
for (const mlir::Attribute& dict_attr : all_attrs) {
mlir::StringAttr attr =
mlir::cast<mlir::DictionaryAttr>(dict_attr).getAs<mlir::StringAttr>(
"mhlo.layout_mode");
if (attr != nullptr) {
TF_ASSIGN_OR_RETURN(LayoutMode mode,
LayoutMode::FromString(attr.getValue().str()));
result.emplace_back(std::move(mode));
} else {
result.emplace_back();
}
}
return result;
}
absl::StatusOr<MemorySpaceColor> GetMemorySpaceColor(
const std::string& memory_kind) {
if (memory_kind == "unpinned_host" || memory_kind == "pinned_host") {
return xla::Layout::kHostMemorySpace;
} else if (memory_kind == "device") {
return xla::Layout::kDefaultMemorySpace;
} else {
return InvalidArgument("Unknown memory kind %s", memory_kind);
}
}
static absl::StatusOr<std::vector<MemorySpaceColor>> MlirAttrsToMemoryKinds(
mlir::ArrayAttr all_attrs, size_t num_values) {
if (all_attrs == nullptr) {
return std::vector<MemorySpaceColor>(num_values,
xla::Layout::kDefaultMemorySpace);
}
if (all_attrs.size() != num_values) {
return InvalidArgument(
"MlirAttrsToMemoryKinds got unexpected number of attributes: %d, "
"expected: %d",
all_attrs.size(), num_values);
}
std::vector<MemorySpaceColor> result;
result.reserve(all_attrs.size());
for (const mlir::Attribute& dict_attr : all_attrs) {
mlir::StringAttr attr =
mlir::cast<mlir::DictionaryAttr>(dict_attr).getAs<mlir::StringAttr>(
"mhlo.memory_kind");
if (attr != nullptr) {
TF_ASSIGN_OR_RETURN(MemorySpaceColor memory_space,
GetMemorySpaceColor(attr.getValue().str()));
result.emplace_back(memory_space);
} else {
result.emplace_back(xla::Layout::kDefaultMemorySpace);
}
}
return result;
}
static absl::StatusOr<std::optional<std::vector<LayoutMode>>>
GetTupleLayoutModes(mlir::ArrayRef<mlir::Type> types,
mlir::ArrayAttr all_attrs) {
if (types.size() != 1 || !llvm::isa<mlir::TupleType>(types[0])) {
return std::nullopt;
}
if (all_attrs != nullptr) {
if (all_attrs.size() != 1) {
return InvalidArgument(
"GetTupleLayoutModes expected single tuple attr, got %d attrs",
all_attrs.size());
}
mlir::StringAttr attr = mlir::cast<mlir::DictionaryAttr>(*all_attrs.begin())
.getAs<mlir::StringAttr>("mhlo.layout_mode");
if (attr != nullptr) {
return Unimplemented("mhlo.layout_mode not supported with tupled values");
}
}
return std::vector<LayoutMode>(mlir::cast<mlir::TupleType>(types[0]).size());
}
static absl::StatusOr<std::optional<std::vector<MemorySpaceColor>>>
GetTupleMemoryKinds(mlir::ArrayRef<mlir::Type> types,
mlir::ArrayAttr all_attrs) {
if (types.size() != 1 || !llvm::isa<mlir::TupleType>(types[0])) {
return std::nullopt;
}
if (all_attrs != nullptr) {
if (all_attrs.size() != 1) {
return InvalidArgument(
"GetTupleMemoryKinds expected single tuple attr, got %d attrs",
all_attrs.size());
}
mlir::StringAttr attr = mlir::cast<mlir::DictionaryAttr>(*all_attrs.begin())
.getAs<mlir::StringAttr>("mhlo.memory_kind");
if (attr != nullptr) {
return Unimplemented("mhlo.memory_kind not supported with tupled values");
}
}
return std::vector<MemorySpaceColor>(
mlir::cast<mlir::TupleType>(types[0]).size(),
xla::Layout::kDefaultMemorySpace);
}
absl::StatusOr<std::vector<LayoutMode>> GetArgLayoutModes(
mlir::ModuleOp module) {
mlir::func::FuncOp main = module.lookupSymbol<mlir::func::FuncOp>("main");
if (main == nullptr) {
return InvalidArgument(
"GetArgLayoutModes passed module without main function");
}
TF_ASSIGN_OR_RETURN(std::optional<std::vector<LayoutMode>> maybe_result,
GetTupleLayoutModes(main.getFunctionType().getInputs(),
main.getAllArgAttrs()));
if (maybe_result) return *maybe_result;
return MlirAttrsToLayoutModes(main.getAllArgAttrs(), main.getNumArguments());
}
absl::StatusOr<std::vector<LayoutMode>> GetOutputLayoutModes(
mlir::ModuleOp module) {
mlir::func::FuncOp main = module.lookupSymbol<mlir::func::FuncOp>("main");
if (main == nullptr) {
return InvalidArgument(
"GetOutputLayoutModes passed module without main function");
}
TF_ASSIGN_OR_RETURN(std::optional<std::vector<LayoutMode>> maybe_tuple_result,
GetTupleLayoutModes(main.getFunctionType().getResults(),
main.getAllResultAttrs()));
if (maybe_tuple_result) return *maybe_tuple_result;
return MlirAttrsToLayoutModes(main.getAllResultAttrs(), main.getNumResults());
}
absl::StatusOr<std::vector<MemorySpaceColor>> GetArgMemoryKinds(
mlir::ModuleOp module) {
mlir::func::FuncOp main = module.lookupSymbol<mlir::func::FuncOp>("main");
if (main == nullptr) {
return InvalidArgument(
"GetArgMemoryKinds passed module without main function");
}
TF_ASSIGN_OR_RETURN(
std::optional<std::vector<MemorySpaceColor>> maybe_tuple_result,
GetTupleMemoryKinds(main.getFunctionType().getInputs(),
main.getAllArgAttrs()));
if (maybe_tuple_result) return *maybe_tuple_result;
return MlirAttrsToMemoryKinds(main.getAllArgAttrs(), main.getNumArguments());
}
absl::StatusOr<std::vector<MemorySpaceColor>> GetOutputMemoryKinds(
mlir::ModuleOp module) {
mlir::func::FuncOp main = module.lookupSymbol<mlir::func::FuncOp>("main");
if (main == nullptr) {
return InvalidArgument(
"GetOutputMemoryKinds passed module without main function");
}
TF_ASSIGN_OR_RETURN(
std::optional<std::vector<MemorySpaceColor>> maybe_tuple_result,
GetTupleMemoryKinds(main.getFunctionType().getResults(),
main.getAllResultAttrs()));
if (maybe_tuple_result) return *maybe_tuple_result;
return MlirAttrsToMemoryKinds(main.getAllResultAttrs(), main.getNumResults());
}
static const char* kDelimiter = ";";
static std::string GetFrontendAttr(absl::Span<const LayoutMode> layout_modes) {
return absl::StrJoin(layout_modes, kDelimiter,
[](std::string* out, const LayoutMode& mode) {
absl::StrAppend(out, mode.ToString());
});
}
absl::Status AddLayoutModesToFrontendAttrs(mlir::ModuleOp module,
XlaComputation& xla_computation) {
TF_ASSIGN_OR_RETURN(std::vector<LayoutMode> arg_layout_modes,
GetArgLayoutModes(module));
TF_ASSIGN_OR_RETURN(std::vector<LayoutMode> out_layout_modes,
GetOutputLayoutModes(module));
auto& frontend_attrs = *xla_computation.mutable_proto()
->mutable_frontend_attributes()
->mutable_map();
frontend_attrs["arg_layout_modes"] = GetFrontendAttr(arg_layout_modes);
frontend_attrs["out_layout_modes"] = GetFrontendAttr(out_layout_modes);
return absl::OkStatus();
}
static std::string GetFrontendAttrForMemorySpace(
const std::vector<MemorySpaceColor>& memory_spaces) {
return absl::StrJoin(
memory_spaces, kDelimiter,
[](std::string* out, const MemorySpaceColor memory_kind) {
absl::StrAppend(out, memory_kind);
});
}
absl::Status AddMemoryKindsToFrontendAttrs(mlir::ModuleOp module,
XlaComputation& xla_computation) {
TF_ASSIGN_OR_RETURN(std::vector<MemorySpaceColor> arg_memory_spaces,
GetArgMemoryKinds(module));
TF_ASSIGN_OR_RETURN(std::vector<MemorySpaceColor> out_memory_spaces,
GetOutputMemoryKinds(module));
auto& frontend_attrs = *xla_computation.mutable_proto()
->mutable_frontend_attributes()
->mutable_map();
frontend_attrs["arg_memory_spaces"] =
GetFrontendAttrForMemorySpace(arg_memory_spaces);
frontend_attrs["out_memory_spaces"] =
GetFrontendAttrForMemorySpace(out_memory_spaces);
return absl::OkStatus();
}
static absl::StatusOr<std::vector<LayoutMode>> GetLayoutModesFromFrontendAttr(
absl::string_view attr) {
std::vector<std::string> str_modes =
absl::StrSplit(attr, kDelimiter, absl::SkipEmpty());
std::vector<LayoutMode> result;
for (const std::string& str_mode : str_modes) {
TF_ASSIGN_OR_RETURN(LayoutMode mode, LayoutMode::FromString(str_mode));
result.emplace_back(std::move(mode));
}
return result;
}
static absl::StatusOr<std::vector<LayoutMode>> GetLayoutModes(
const XlaComputation& computation, absl::string_view frontend_attr_name,
size_t num_values) {
const auto& frontend_attrs = computation.proto().frontend_attributes().map();
auto iter = frontend_attrs.find(frontend_attr_name);
if (iter == frontend_attrs.end()) {
return std::vector<LayoutMode>(num_values);
}
return GetLayoutModesFromFrontendAttr(iter->second);
}
static absl::StatusOr<std::vector<MemorySpaceColor>>
GetMemoryKindsFromFrontendAttr(absl::string_view attr) {
std::vector<std::string> str_memory_spaces =
absl::StrSplit(attr, kDelimiter, absl::SkipEmpty());
std::vector<MemorySpaceColor> result;
result.reserve(str_memory_spaces.size());
for (const std::string& str_mem_space : str_memory_spaces) {
MemorySpaceColor memory_space;
CHECK(absl::SimpleAtoi(str_mem_space, &memory_space));
result.emplace_back(memory_space);
}
return result;
}
static absl::StatusOr<std::vector<MemorySpaceColor>> GetMemoryKinds(
const XlaComputation& computation, absl::string_view frontend_attr_name,
size_t num_values) {
const auto& frontend_attrs = computation.proto().frontend_attributes().map();
auto iter = frontend_attrs.find(frontend_attr_name);
if (iter == frontend_attrs.end()) {
return std::vector<MemorySpaceColor>(num_values,
xla::Layout::kDefaultMemorySpace);
}
return GetMemoryKindsFromFrontendAttr(iter->second);
}
absl::StatusOr<std::vector<LayoutMode>> GetArgLayoutModes(
const XlaComputation& computation) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
computation.GetProgramShape());
size_t num_args = program_shape.parameters_size() == 1 &&
program_shape.parameters(0).IsTuple()
? program_shape.parameters(0).tuple_shapes_size()
: program_shape.parameters_size();
return GetLayoutModes(computation, "arg_layout_modes", num_args);
}
absl::StatusOr<std::vector<MemorySpaceColor>> GetArgMemoryKinds(
const XlaComputation& computation) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
computation.GetProgramShape());
size_t num_args = program_shape.parameters_size() == 1 &&
program_shape.parameters(0).IsTuple()
? program_shape.parameters(0).tuple_shapes_size()
: program_shape.parameters_size();
return GetMemoryKinds(computation, "arg_memory_spaces", num_args);
}
absl::StatusOr<std::vector<LayoutMode>> GetOutputLayoutModes(
const XlaComputation& computation) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
computation.GetProgramShape());
size_t num_outputs = program_shape.result().IsTuple()
? program_shape.result().tuple_shapes_size()
: 1;
return GetLayoutModes(computation, "out_layout_modes", num_outputs);
}
absl::StatusOr<std::vector<MemorySpaceColor>> GetOutputMemoryKinds(
const XlaComputation& computation) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
computation.GetProgramShape());
size_t num_outputs = program_shape.result().IsTuple()
? program_shape.result().tuple_shapes_size()
: 1;
return GetMemoryKinds(computation, "out_memory_spaces", num_outputs);
}
static absl::StatusOr<Shape> LayoutModeToXlaShape(
const LayoutMode& layout_mode, const Shape& unsharded_shape,
const Shape& sharded_shape,
std::function<absl::StatusOr<Shape>(Shape)>
choose_compact_layout_for_shape_function) {
if (unsharded_shape.IsToken() || unsharded_shape.IsOpaque()) {
return unsharded_shape;
}
if (!unsharded_shape.IsArray() || !sharded_shape.IsArray()) {
return InvalidArgument(
"LayoutModeToXlaShape must be passed array shapes, got "
"unsharded_shape: %s, sharded_shape: %s",
unsharded_shape.ToString(), sharded_shape.ToString());
}
Shape result = unsharded_shape;
LayoutUtil::ClearLayout(&result);
switch (layout_mode.mode) {
case LayoutMode::Mode::kDefault: {
TF_ASSIGN_OR_RETURN(
Shape layout,
choose_compact_layout_for_shape_function(sharded_shape));
*result.mutable_layout() = layout.layout();
break;
}
case LayoutMode::Mode::kUserSpecified: {
CHECK(layout_mode.user_layout);
*result.mutable_layout() = *layout_mode.user_layout;
break;
}
case LayoutMode::Mode::kAuto: {
break;
}
}
return result;
}
absl::StatusOr<std::pair<std::vector<Shape>, Shape>> LayoutModesToXlaShapes(
const XlaComputation& computation, std::vector<LayoutMode> arg_layout_modes,
std::vector<LayoutMode> out_layout_modes,
const std::vector<MemorySpaceColor>& arg_memory_spaces,
const std::vector<MemorySpaceColor>& out_memory_spaces,
std::function<absl::StatusOr<Shape>(Shape)>
choose_compact_layout_for_shape_function) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(auto sharded_shapes,
GetShardedProgramShapes(computation, program_shape));
bool args_tupled = program_shape.parameters_size() == 1 &&
program_shape.parameters(0).IsTuple();
const std::vector<Shape>& unsharded_arg_shapes =
args_tupled ? program_shape.parameters(0).tuple_shapes()
: program_shape.parameters();
const std::vector<Shape>& sharded_arg_shapes =
args_tupled ? sharded_shapes.first[0].tuple_shapes()
: sharded_shapes.first;
bool out_tupled = program_shape.result().IsTuple();
const std::vector<Shape>& unsharded_out_shapes =
out_tupled ? program_shape.result().tuple_shapes()
: std::vector<Shape>{program_shape.result()};
const std::vector<Shape>& sharded_out_shapes =
out_tupled ? sharded_shapes.second.tuple_shapes()
: std::vector<Shape>{sharded_shapes.second};
if (unsharded_arg_shapes.size() != arg_layout_modes.size()) {
return InvalidArgument(
"LayoutModesToXlaShapes got mismatched number of arguments and layout "
"modes (%d vs %d)",
unsharded_arg_shapes.size(), arg_layout_modes.size());
}
if (sharded_arg_shapes.size() != arg_layout_modes.s | #include "xla/service/gpu/llvm_gpu_backend/utils.h"
#include <memory>
#include <string>
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
std::string SaxpyIRFile() {
return tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu",
"llvm_gpu_backend", "tests_data", "saxpy.ll");
}
TEST(UtilsTest, TestLoadIRModule) {
llvm::LLVMContext llvm_context;
std::string test_srcdir = tsl::testing::TensorFlowSrcRoot();
std::unique_ptr<llvm::Module> module =
LoadIRModule(SaxpyIRFile(), &llvm_context);
ASSERT_NE(nullptr, module);
ASSERT_NE(std::string::npos, module->getModuleIdentifier().find("saxpy.ll"));
ASSERT_NE(nullptr, module->getFunction("cuda_saxpy"));
}
TEST(UtilsTest, TestReplaceFilenameExtension) {
ASSERT_EQ(ReplaceFilenameExtension("baz.tx", "cc"), "baz.cc");
ASSERT_EQ(ReplaceFilenameExtension("/foo/baz.txt", "cc"), "/foo/baz.cc");
ASSERT_EQ(ReplaceFilenameExtension("/foo/baz.", "-nvptx.dummy"),
"/foo/baz.-nvptx.dummy");
ASSERT_EQ(ReplaceFilenameExtension("/foo/baz", "cc"), "/foo/baz.cc");
}
}
}
} |
834 | cpp | tensorflow/tensorflow | tool_params | tensorflow/lite/tools/tool_params.cc | tensorflow/lite/tools/tool_params_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_TOOL_PARAMS_H_
#define TENSORFLOW_LITE_TOOLS_TOOL_PARAMS_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
namespace tflite {
namespace tools {
template <typename T>
class TypedToolParam;
class ToolParam {
protected:
enum class ParamType { TYPE_INT32, TYPE_FLOAT, TYPE_BOOL, TYPE_STRING };
template <typename T>
static ParamType GetValueType();
public:
template <typename T>
static std::unique_ptr<ToolParam> Create(const T& default_value,
int position = 0) {
auto* param = new TypedToolParam<T>(default_value);
param->SetPosition(position);
return std::unique_ptr<ToolParam>(param);
}
template <typename T>
TypedToolParam<T>* AsTyped() {
AssertHasSameType(GetValueType<T>(), type_);
return static_cast<TypedToolParam<T>*>(this);
}
template <typename T>
const TypedToolParam<T>* AsConstTyped() const {
AssertHasSameType(GetValueType<T>(), type_);
return static_cast<const TypedToolParam<T>*>(this);
}
virtual ~ToolParam() {}
explicit ToolParam(ParamType type)
: has_value_set_(false), position_(0), type_(type) {}
bool HasValueSet() const { return has_value_set_; }
int GetPosition() const { return position_; }
void SetPosition(int position) { position_ = position; }
virtual void Set(const ToolParam&) {}
virtual std::unique_ptr<ToolParam> Clone() const = 0;
protected:
bool has_value_set_;
int position_;
private:
static void AssertHasSameType(ParamType a, ParamType b);
const ParamType type_;
};
template <typename T>
class TypedToolParam : public ToolParam {
public:
explicit TypedToolParam(const T& value)
: ToolParam(GetValueType<T>()), value_(value) {}
void Set(const T& value) {
value_ = value;
has_value_set_ = true;
}
const T& Get() const { return value_; }
void Set(const ToolParam& other) override {
Set(other.AsConstTyped<T>()->Get());
SetPosition(other.AsConstTyped<T>()->GetPosition());
}
std::unique_ptr<ToolParam> Clone() const override {
return ToolParam::Create<T>(value_, position_);
}
private:
T value_;
};
class ToolParams {
public:
void AddParam(const std::string& name, std::unique_ptr<ToolParam> value) {
params_[name] = std::move(value);
}
void RemoveParam(const std::string& name) { params_.erase(name); }
bool HasParam(const std::string& name) const {
return params_.find(name) != params_.end();
}
bool Empty() const { return params_.empty(); }
const ToolParam* GetParam(const std::string& name) const {
const auto& entry = params_.find(name);
if (entry == params_.end()) return nullptr;
return entry->second.get();
}
template <typename T>
void Set(const std::string& name, const T& value, int position = 0) {
AssertParamExists(name);
params_.at(name)->AsTyped<T>()->Set(value);
params_.at(name)->AsTyped<T>()->SetPosition(position);
}
template <typename T>
bool HasValueSet(const std::string& name) const {
AssertParamExists(name);
return params_.at(name)->AsConstTyped<T>()->HasValueSet();
}
template <typename T>
int GetPosition(const std::string& name) const {
AssertParamExists(name);
return params_.at(name)->AsConstTyped<T>()->GetPosition();
}
template <typename T>
T Get(const std::string& name) const {
AssertParamExists(name);
return params_.at(name)->AsConstTyped<T>()->Get();
}
void Set(const ToolParams& other);
void Merge(const ToolParams& other, bool overwrite = false);
private:
void AssertParamExists(const std::string& name) const;
std::unordered_map<std::string, std::unique_ptr<ToolParam>> params_;
};
#define LOG_TOOL_PARAM(params, type, name, description, verbose) \
do { \
TFLITE_MAY_LOG(INFO, (verbose) || params.HasValueSet<type>(name)) \
<< description << ": [" << params.Get<type>(name) << "]"; \
} while (0)
}
}
#endif
#include "tensorflow/lite/tools/tool_params.h"
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace tools {
void ToolParam::AssertHasSameType(ToolParam::ParamType a,
ToolParam::ParamType b) {
TFLITE_TOOLS_CHECK(a == b) << "Type mismatch while accessing parameter.";
}
template <>
ToolParam::ParamType ToolParam::GetValueType<int32_t>() {
return ToolParam::ParamType::TYPE_INT32;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<bool>() {
return ToolParam::ParamType::TYPE_BOOL;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<float>() {
return ToolParam::ParamType::TYPE_FLOAT;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<std::string>() {
return ToolParam::ParamType::TYPE_STRING;
}
void ToolParams::AssertParamExists(const std::string& name) const {
TFLITE_TOOLS_CHECK(HasParam(name)) << name << " was not found.";
}
void ToolParams::Set(const ToolParams& other) {
for (const auto& param : params_) {
const ToolParam* other_param = other.GetParam(param.first);
if (other_param == nullptr) continue;
param.second->Set(*other_param);
}
}
void ToolParams::Merge(const ToolParams& other, bool overwrite) {
for (const auto& one : other.params_) {
auto it = params_.find(one.first);
if (it == params_.end()) {
AddParam(one.first, one.second->Clone());
} else if (overwrite) {
it->second->Set(*one.second);
}
}
}
}
} | #include "tensorflow/lite/tools/tool_params.h"
#include <gtest/gtest.h>
namespace tflite {
namespace tools {
namespace {
TEST(ToolParams, SetTest) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true, 1));
params.Set(others);
EXPECT_EQ(19, params.Get<int>("some-int1"));
EXPECT_EQ(5, params.GetPosition<int>("some-int1"));
EXPECT_TRUE(params.HasValueSet<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_EQ(0, params.GetPosition<int>("some-int2"));
EXPECT_FALSE(params.HasValueSet<int>("some-int2"));
EXPECT_FALSE(params.HasParam("some-bool"));
}
TEST(ToolParams, MergeTestOverwriteTrue) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true ));
params.Merge(others, true );
EXPECT_EQ(19, params.Get<int>("some-int1"));
EXPECT_EQ(5, params.GetPosition<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_TRUE(params.Get<bool>("some-bool"));
}
TEST(ToolParams, MergeTestOverwriteFalse) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true ));
params.Merge(others);
EXPECT_EQ(13, params.Get<int>("some-int1"));
EXPECT_EQ(0, params.GetPosition<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_TRUE(params.Get<bool>("some-bool"));
}
}
}
} |
835 | cpp | tensorflow/tensorflow | gen_op_registration | tensorflow/lite/tools/gen_op_registration.cc | tensorflow/lite/tools/gen_op_registration_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_GEN_OP_REGISTRATION_H_
#define TENSORFLOW_LITE_TOOLS_GEN_OP_REGISTRATION_H_
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
string NormalizeCustomOpName(const string& op);
typedef std::map<string, std::pair<int, int>> RegisteredOpMap;
void ReadOpsFromModel(const ::tflite::Model* model,
RegisteredOpMap* builtin_ops,
RegisteredOpMap* custom_ops);
}
#endif
#include "tensorflow/lite/tools/gen_op_registration.h"
#include <algorithm>
#include <string>
#include <vector>
#include "re2/re2.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
string NormalizeCustomOpName(const string& op) {
string method(op);
RE2::GlobalReplace(&method, "([a-z])([A-Z])", "\\1_\\2");
std::transform(method.begin(), method.end(), method.begin(), ::toupper);
return method;
}
void ReadOpsFromModel(const ::tflite::Model* model,
tflite::RegisteredOpMap* builtin_ops,
tflite::RegisteredOpMap* custom_ops) {
if (!model) return;
auto opcodes = model->operator_codes();
if (!opcodes) return;
for (const auto* opcode : *opcodes) {
const int version = opcode->version();
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code != ::tflite::BuiltinOperator_CUSTOM) {
auto iter_and_bool = builtin_ops->insert(
std::make_pair(tflite::EnumNameBuiltinOperator(builtin_code),
std::make_pair(version, version)));
auto& versions = iter_and_bool.first->second;
versions.first = std::min(versions.first, version);
versions.second = std::max(versions.second, version);
} else {
auto iter_and_bool = custom_ops->insert(std::make_pair(
opcode->custom_code()->c_str(), std::make_pair(version, version)));
auto& versions = iter_and_bool.first->second;
versions.first = std::min(versions.first, version);
versions.second = std::max(versions.second, version);
}
}
}
} | #include "tensorflow/lite/tools/gen_op_registration.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
using ::testing::ElementsAreArray;
namespace tflite {
class GenOpRegistrationTest : public ::testing::Test {
protected:
GenOpRegistrationTest() {}
void ReadOps(const string& model_path) {
auto model = FlatBufferModel::BuildFromFile(model_path.data());
if (model) {
ReadOpsFromModel(model->GetModel(), &builtin_ops_, &custom_ops_);
}
}
std::map<string, std::pair<int, int>> builtin_ops_;
std::map<string, std::pair<int, int>> custom_ops_;
};
TEST_F(GenOpRegistrationTest, TestNonExistentFiles) {
ReadOps("/tmp/tflite_model_1234");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestModels) {
ReadOps("tensorflow/lite/testdata/test_model.bin");
RegisteredOpMap builtin_expected{{"CONV_2D", {1, 1}}};
RegisteredOpMap custom_expected{{"testing_op", {1, 1}}};
EXPECT_THAT(builtin_ops_, ElementsAreArray(builtin_expected));
EXPECT_THAT(custom_ops_, ElementsAreArray(custom_expected));
}
TEST_F(GenOpRegistrationTest, TestVersionedModels) {
ReadOps("tensorflow/lite/testdata/test_model_versioned_ops.bin");
RegisteredOpMap builtin_expected{{"CONV_2D", {3, 3}}};
RegisteredOpMap custom_expected{{"testing_op", {2, 2}}};
EXPECT_THAT(builtin_ops_, ElementsAreArray(builtin_expected));
EXPECT_THAT(custom_ops_, ElementsAreArray(custom_expected));
}
TEST_F(GenOpRegistrationTest, TestBothModels) {
ReadOps("tensorflow/lite/testdata/test_model.bin");
ReadOps("tensorflow/lite/testdata/test_model_versioned_ops.bin");
RegisteredOpMap builtin_expected{{"CONV_2D", {1, 3}}};
RegisteredOpMap custom_expected{{"testing_op", {1, 2}}};
EXPECT_THAT(builtin_ops_, ElementsAreArray(builtin_expected));
EXPECT_THAT(custom_ops_, ElementsAreArray(custom_expected));
}
TEST_F(GenOpRegistrationTest, TestEmptyModels) {
ReadOps("tensorflow/lite/testdata/empty_model.bin");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestZeroSubgraphs) {
ReadOps("tensorflow/lite/testdata/0_subgraphs.bin");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestBrokenMmap) {
ReadOps("tensorflow/lite/testdata/test_model_broken.bin");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestNormalizeCustomOpName) {
std::vector<std::pair<string, string>> testcase = {
{"CustomOp", "CUSTOM_OP"},
{"a", "A"},
{"custom_op", "CUSTOM_OP"},
{"customop", "CUSTOMOP"},
};
for (const auto& test : testcase) {
EXPECT_EQ(NormalizeCustomOpName(test.first), test.second);
}
}
} |
836 | cpp | tensorflow/tensorflow | op_version | tensorflow/lite/tools/versioning/op_version.cc | tensorflow/lite/tools/versioning/op_version_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_VERSIONING_OP_VERSION_H_
#define TENSORFLOW_LITE_TOOLS_VERSIONING_OP_VERSION_H_
#include <vector>
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
int GetBuiltinOperatorVersion(const OpSignature& op_sig);
void UpdateOpVersion(uint8_t* model_buffer_pointer);
}
#endif
#include "tensorflow/lite/tools/versioning/op_version.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
namespace {
bool NeedBroadcastForBinaryInputs(const OpSignature& op_sig) {
if (op_sig.inputs.size() < 2) {
return false;
}
return (op_sig.inputs.at(0).dims != op_sig.inputs.at(1).dims);
}
int GetInputMaxDims(const OpSignature& op_sig) {
int max_dims = 0;
for (auto& input : op_sig.inputs) {
if (input.dims.size() > max_dims) {
max_dims = input.dims.size();
}
}
return max_dims;
}
}
int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
switch (op_sig.op) {
case BuiltinOperator_CONV_2D: {
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
auto conv_params =
reinterpret_cast<TfLiteConvParams*>(op_sig.builtin_data);
TFLITE_DCHECK(conv_params != nullptr);
if (conv_params->quantized_bias_type) {
return 8;
}
}
if (op_sig.ext_options.conv_2d.is_grouped_convolution) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt16 &&
op_sig.outputs.at(1).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 7;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (op_sig.ext_options.conv_2d.is_per_channel_quantized) {
return 5;
}
return 2;
}
return 1;
}
case BuiltinOperator_DEPTHWISE_CONV_2D: {
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt16 &&
op_sig.outputs.at(1).type == kTfLiteInt16) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (op_sig.ext_options.depthwise_conv_2d.is_per_channel_quantized) {
return 6;
}
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 7;
}
auto depthwise_conv_params =
reinterpret_cast<TfLiteDepthwiseConvParams*>(op_sig.builtin_data);
TFLITE_DCHECK(depthwise_conv_params != nullptr);
if (depthwise_conv_params->dilation_width_factor != 1 ||
depthwise_conv_params->dilation_height_factor != 1) {
return 2;
}
return 1;
}
case BuiltinOperator_FAKE_QUANT: {
auto fake_quant_params =
reinterpret_cast<TfLiteFakeQuantParams*>(op_sig.builtin_data);
TFLITE_DCHECK(fake_quant_params != nullptr);
if (fake_quant_params->narrow_range) {
return 2;
}
return 1;
}
case BuiltinOperator_FULLY_CONNECTED: {
auto fully_connected_params =
reinterpret_cast<TfLiteFullyConnectedParams*>(op_sig.builtin_data);
TFLITE_DCHECK(fully_connected_params != nullptr);
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32 &&
op_sig.ext_options.fully_connected.is_per_channel_quantized) {
return 12;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
if (fully_connected_params->quantized_bias_type) {
return 11;
}
}
if (op_sig.ext_options.fully_connected.sparse_weight) {
return 8;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 7;
}
if (op_sig.inputs.size() == 2) {
return 6;
}
if (fully_connected_params->keep_num_dims) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 10;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (fully_connected_params->asymmetric_quantize_inputs) {
return 9;
}
return 3;
}
if (fully_connected_params->weights_format ==
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8) {
return 2;
}
return 1;
}
case BuiltinOperator_GATHER: {
if (op_sig.inputs.at(0).type == kTfLiteInt4) {
return 7;
}
if (op_sig.inputs.at(1).type == kTfLiteInt16) {
return 6;
}
auto gather_params =
reinterpret_cast<TfLiteGatherParams*>(op_sig.builtin_data);
if (gather_params && gather_params->batch_dims != 0) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_SVDF: {
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto svdf_params =
reinterpret_cast<TfLiteSVDFParams*>(op_sig.builtin_data);
if (svdf_params && svdf_params->asymmetric_quantize_inputs) {
return 4;
}
return 2;
}
return 1;
}
case BuiltinOperator_SIGN:
if (op_sig.inputs.at(0).type == kTfLiteInt32) {
return 2;
}
return 1;
case BuiltinOperator_MUL:
if ((op_sig.inputs.at(0).type == kTfLiteInt16 &&
!op_sig.ext_options.mul.input_quantized) ||
op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 7;
}
if (op_sig.inputs.at(0).type == kTfLiteComplex64) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt64) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.ext_options.mul.input1_scale != 0 &&
op_sig.ext_options.mul.input2_scale != 0 &&
op_sig.ext_options.mul.output_scale != 0 &&
(op_sig.ext_options.mul.input1_scale *
op_sig.ext_options.mul.input2_scale /
op_sig.ext_options.mul.output_scale) >= 1.0) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_MAX_POOL_2D:
case BuiltinOperator_AVERAGE_POOL_2D:
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_TRANSPOSE:
if (op_sig.inputs.at(0).dims.size() > 5) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 5;
}
if (op_sig.inputs.at(0).dims.size() > 4) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_TRANSPOSE_CONV: {
auto transpose_conv_params =
reinterpret_cast<TfLiteTransposeConvParams*>(op_sig.builtin_data);
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
TFLITE_DCHECK(transpose_conv_params != nullptr);
if (transpose_conv_params->quantized_bias_type) {
return 5;
}
}
if (transpose_conv_params != nullptr &&
transpose_conv_params->activation) {
return 4;
}
if (op_sig.inputs.size() == 4 &&
op_sig.inputs.at(3).type != kTfLiteNoType) {
return 3;
}
if (op_sig.inputs.at(1).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_LSTM: {
auto lstm_params =
reinterpret_cast<TfLiteLSTMParams*>(op_sig.builtin_data);
if (lstm_params->kernel_type == kTfLiteLSTMFullKernel &&
op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 5;
}
TFLITE_DCHECK(lstm_params != nullptr);
if (lstm_params->kernel_type == kTfLiteLSTMFullKernel &&
op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (lstm_params->asymmetric_quantize_inputs) {
return 4;
}
return 3;
}
if (lstm_params->kernel_type == kTfLiteLSTMBasicKernel) {
return 2;
}
return 1;
}
case BuiltinOperator_SPLIT:
if (op_sig.inputs.at(1).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(1).type == kTfLiteInt32) {
return 3;
}
if (op_sig.inputs.at(1).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_SPARSE_TO_DENSE:
if (op_sig.inputs.at(2).type == kTfLiteInt8 ||
op_sig.inputs.at(2).type == kTfLiteUInt8) {
return 3;
}
if (op_sig.inputs.at(2).type == kTfLiteInt64) {
return 2;
}
return 1;
case BuiltinOperator_SLICE:
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 6;
}
if (op_sig.inputs.at(0).dims.size() > 4) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_UNPACK:
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 4;
}
return 1;
case BuiltinOperator_DEQUANTIZE:
if (op_sig.inputs.at(0).type == kTfLiteInt4) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(0).type == kTfLiteFloat16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
if (op_sig.ext_options.dequantize.is_per_channel_quantized) {
return 5;
}
return 2;
}
return 1;
case BuiltinOperator_QUANTIZE:
if (op_sig.inputs.at(0).type == kTfLiteInt4 ||
op_sig.outputs.at(0).type == kTfLiteInt4) {
return 4;
}
if (op_sig.ext_options.quantize.is_per_channel_quantized) {
return 3;
}
if (op_sig.outputs.at(0).type == kTfLiteInt16) {
return 2;
}
return 1;
case BuiltinOperator_FLOOR_DIV:
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32) {
return 2;
}
return 1;
case BuiltinOperator_FLOOR_MOD:
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_L2_NORMALIZATION:
if (op_sig.outputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_ABS:
if (op_sig.inputs.at(0).type == kTfLiteInt32) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return op_sig.ext_options.abs.input_quantized ? 3 : 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
return 1;
case BuiltinOperator_RELU:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
return 1;
case BuiltinOperator_STRIDED_SLICE: {
auto strided_slice_params =
reinterpret_cast<TfLiteStridedSliceParams*>(op_sig.builtin_data);
TFLITE_DCHECK(strided_slice_params != nullptr);
if (strided_slice_params->offset == true) {
return 8;
}
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 7;
}
if (strided_slice_params->ellipsis_mask != 0 ||
strided_slice_params->new_axis_mask != 0) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 5;
}
if (op_sig.ext_options.strided_slice.num_dims > 4) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_REVERSE_V2:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 2;
}
return 1;
case BuiltinOperator_RESIZE_BILINEAR: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
auto resize_bilinear_params =
reinterpret_cast<TfLiteResizeBilinearParams*>(op_sig.builtin_data);
TFLITE_DCHECK(resize_bilinear_params != nullptr);
if (resize_bilinear_params->half_pixel_centers) {
return 3;
} else if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
auto resize_nearest_neighbor_params =
reinterpret_cast<TfLiteResizeNearestNeighborParams*>(
op_sig.builtin_data);
TFLITE_DCHECK(resize_nearest_neighbor_params != nullptr);
if (resize_nearest_neighbor_params->half_pixel_centers ||
resize_nearest_neighbor_params->align_corners) {
return 3;
} else if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_MAXIMUM:
case BuiltinOperator_MINIMUM:
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_PACK:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 4;
}
return 1;
case BuiltinOperator_TILE:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 2;
}
return 1;
case BuiltinOperator_SQUEEZE:
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 2;
}
return 1;
case BuiltinOperator_SPACE_TO_BATCH_ND:
case BuiltinOperator_BATCH_TO_SPACE_ND:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).dims.size() != 4) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_ADD: {
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt16 &&
!op_sig.ext_options.add.input_quantized) {
return 5;
}
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt64) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
auto add_params =
reinterpret_cast<TfLiteAddParams*>(op_sig.builtin_data);
if (add_params && !add_params->pot_scale_int16) {
return 3;
}
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_SUB: {
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
auto sub_params =
reinterpret_cast<TfLiteSubParams*>(op_sig.builtin_data);
if (sub_params && !sub_params->pot_scale_int16) {
return 5;
}
}
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt64) {
return 4;
}
if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_GATHER_ND:
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 5;
}
if (op_sig.inputs.at(1).type == kTfLiteInt16) {
return 4;
}
if (!op_sig.inputs.empty() &&
(op_sig.inputs.at(0).type == kTfLiteInt16)) {
return 3;
}
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteString) {
return 2;
}
return 1;
case BuiltinOperator_DIV:
if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
return 2;
}
return 1;
case BuiltinOperator_TANH:
case BuiltinOperator_LOGISTIC:
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_FILL:
if (op_sig.inputs.size() >= 2) {
if (op_sig.inputs.at(1).type == kTfLiteFloat16) return 4;
if (op_sig.inputs.at(1).type == kTfLiteInt8 ||
op_sig.inputs.at(1).type == kTfLiteInt16) {
return 3;
} else if ((op_sig.inputs.at(1).type == kTfLiteBool ||
op_sig.inputs.at(1).type == kTfLiteString)) {
return 2;
}
}
return 1;
case BuiltinOperator_EQUAL:
if (!op_sig.inputs.empty()) {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
}
return 1;
case BuiltinOperator_NOT_EQUAL:
if (!op_sig.inputs.empty()) {
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
}
return 1;
case BuiltinOperator_LEAKY_RELU:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 2;
}
return 1;
case BuiltinOperator_RANGE:
if (op_sig.inputs.at(0).type == kTfLiteInt64) {
return 2;
}
return 1;
case BuiltinOperator_BATCH_MATMUL: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto batch_mat_mul_params =
reinterpret_cast<TfLiteBatchMatMulParams*>(op_sig.builtin_data);
if (batch_mat_mul_params &&
batch_mat_mul_params->asymmetric_quantize_inputs) {
return 4;
}
}
return 1;
}
case BuiltinOperator_PAD:
case BuiltinOperator_PADV2:
if (op_sig.inputs.at(0).dims.size() > 4) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_CONCATENATION:
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_SOFTMAX:
case BuiltinOperator_MEAN:
case BuiltinOperator_MIRROR_PAD:
case BuiltinOperator_REDUCE_MAX:
case BuiltinOperator_REDUCE_MIN:
case BuiltinOperator_RELU6:
case BuiltinOperator_RSQRT:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_RNN: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto rnn_params =
reinterpret_cast<TfLiteRNNParams*>(op_sig.builtin_data);
if (rnn_params && rnn_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto sequence_rnn_params =
reinterpret_cast<TfLiteSequenceRNNParams*>(op_sig.builtin_data);
if (sequence_rnn_params &&
sequence_rnn_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto bidirectional_sequence_rnn_params =
reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>(
op_sig.builtin_data);
if (bidirectional_sequence_rnn_params &&
bidirectional_sequence_rnn_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto bidirectional_sequence_lstm_params =
reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
op_sig.builtin_data);
if (bidirectional_sequence_lstm_params &&
bidirectional_sequence_lstm_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
auto unidirectional_sequence_lstm_params =
reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
op_sig.builtin_data);
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 5;
}
if (unidirectional_sequence_lstm_params &&
unidirectional_sequence_lstm_params->diagonal_recurrent_tensors) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (unidirectional_sequence_lstm_params &&
unidirectional_sequence_lstm_params->asymmetric_quantize_inputs) {
return 3;
}
return 2;
}
return 1;
}
case BuiltinOperator_ARG_MAX:
case BuiltinOperator_ARG_MIN:
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_SELECT: {
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 4;
}
if (op_sig.inputs.at(0).dims.size() == 5 ||
op_sig.inputs.at(1).dims.size() == 5 ||
op_sig.inputs.at(2).dims.size() == 5)
return 3;
if (op_sig.inputs.at(0).type == kTfLiteInt8 | #include "tensorflow/lite/tools/versioning/op_version.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const std::vector<TfLiteType>& types) {
std::vector<OpSignatureTensorSpec> tensor_specs;
for (auto type : types) {
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const std::vector<TfLiteType>& types, int rank) {
std::vector<OpSignatureTensorSpec> tensor_specs;
for (auto type : types) {
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
for (int i = 0; i < rank; i++) {
tensor_spec.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const TfLiteType type) {
std::vector<OpSignatureTensorSpec> tensor_specs;
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
tensor_specs.push_back(tensor_spec);
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const TfLiteType type, const int dim) {
std::vector<OpSignatureTensorSpec> tensor_specs;
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
for (int i = 0; i < dim; i++) {
tensor_spec.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec);
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const TfLiteType type, const int dim1, const int dim2) {
std::vector<OpSignatureTensorSpec> tensor_specs;
OpSignatureTensorSpec tensor_spec1 = {};
tensor_spec1.type = type;
for (int i = 0; i < dim1; i++) {
tensor_spec1.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec1);
OpSignatureTensorSpec tensor_spec2 = {};
tensor_spec2.type = type;
for (int i = 0; i < dim2; i++) {
tensor_spec2.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec2);
return tensor_specs;
}
}
TEST(OpVersionTest, VersioningSpareToDense) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8, kTfLiteInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8, kTfLiteUInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt64, kTfLiteInt64, kTfLiteInt64}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
void SimpleVersioningTest(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = op,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
void SimpleVersioningTestExtended(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
SimpleVersioningTest(op);
}
void SimpleOutputVersioningTest(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.inputs = std::vector<OpSignatureTensorSpec>{},
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = op,
.inputs = std::vector<OpSignatureTensorSpec>{},
.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningEqualTest) {
SimpleVersioningTest(BuiltinOperator_EQUAL);
OpSignature fake_op_sig = {
.op = BuiltinOperator_EQUAL,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteString),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningNotEqualTest) {
SimpleVersioningTest(BuiltinOperator_NOT_EQUAL);
OpSignature fake_op_sig = {
.op = BuiltinOperator_NOT_EQUAL,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteString),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningLessTest) {
SimpleVersioningTest(BuiltinOperator_LESS);
}
TEST(OpVersionTest, VersioningLessEqualTest) {
SimpleVersioningTest(BuiltinOperator_LESS_EQUAL);
}
TEST(OpVersionTest, VersioningGreaterTest) {
SimpleVersioningTest(BuiltinOperator_GREATER);
}
TEST(OpVersionTest, VersioningGreaterEqualTest) {
SimpleVersioningTest(BuiltinOperator_GREATER_EQUAL);
}
TEST(OpVersionTest, VersioningSpaceToBatchNDTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SPACE_TO_BATCH_ND,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningLogSoftmaxTest) {
SimpleVersioningTest(BuiltinOperator_LOG_SOFTMAX);
}
TEST(OpVersionTest, VersioningPackTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningUnpackTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_UNPACK,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_UNPACK,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_UNPACK,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningRangeTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_RANGE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningReluTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningBatchToSpaceNDTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_BATCH_TO_SPACE_ND,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningTanhTest) {
SimpleVersioningTest(BuiltinOperator_TANH);
}
TEST(OpVersionTest, VersioningStridedSliceTest) {
TfLiteStridedSliceParams strided_slice_params = {};
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_STRIDED_SLICE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
fake_op_sig.builtin_data = reinterpret_cast<void*>(&strided_slice_params);
strided_slice_params.ellipsis_mask = 0;
strided_slice_params.new_axis_mask = 2;
fake_op_sig.ext_options.strided_slice.num_dims = 5;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
strided_slice_params.new_axis_mask = 0;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.ext_options.strided_slice.num_dims = 4;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 7);
strided_slice_params.offset = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 8);
}
TEST(OpVersionTest, VersioningSpaceToDepthTest) {
SimpleVersioningTest(BuiltinOperator_SPACE_TO_DEPTH);
}
TEST(OpVersionTest, VersioningSliceTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SLICE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteString, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SLICE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
}
TEST(OpVersionTest, VersioningLogisticTest) {
SimpleVersioningTest(BuiltinOperator_SPACE_TO_DEPTH);
}
TEST(OpVersionTest, VersioningL2NormTest) {
SimpleOutputVersioningTest(BuiltinOperator_L2_NORMALIZATION);
}
TEST(OpVersionTest, VersioningMaxTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_MAXIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 5, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_MAXIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningMinTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_MINIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 5, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_MINIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningMeanTest) {
SimpleVersioningTestExtended(BuiltinOperator_MEAN);
}
TEST(OpVersionTest, VersioningSumTest) {
SimpleVersioningTest(BuiltinOperator_SUM);
}
TEST(OpVersionTest, VersioningReduceMinTest) {
SimpleVersioningTestExtended(BuiltinOperator_REDUCE_MIN);
}
TEST(OpVersionTest, VersioningReduceMaxTest) {
SimpleVersioningTestExtended(BuiltinOperator_REDUCE_MAX);
}
TEST(OpVersionTest, VersioningMirrorPadTest) {
SimpleVersioningTestExtended(BuiltinOperator_MIRROR_PAD);
}
TEST(OpVersionTest, VersioningReduceProdTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_REDUCE_PROD;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningAddTest) {
TfLiteAddParams add_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_ADD,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&add_params)};
add_params.pot_scale_int16 = false;
fake_op_sig.ext_options.add.input_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.ext_options.add.input_quantized = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
SimpleVersioningTest(BuiltinOperator_ADD);
}
TEST(OpVersionTest, VersioningSubTest) {
TfLiteSubParams sub_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_SUB,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&sub_params)};
sub_params.pot_scale_int16 = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
SimpleVersioningTest(BuiltinOperator_SUB);
}
TEST(OpVersionTest, VersioningMUL7TestInt16) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
fake_op_sig.ext_options.mul.input_quantized = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 7);
}
TEST(OpVersionTest, VersioningMUL7TestUInt32) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 7);
}
TEST(OpVersionTest, VersioningMUL6Test) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteComplex64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
}
TEST(OpVersionTest, VersioningMUL5Test) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
}
TEST(OpVersionTest, VersioningSub4Test) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SUB,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
void SimpleMulVersioningTest(TfLiteType data_type, float multiplier,
int version) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_MUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{data_type, data_type}),
.outputs = CreateOpSignatureTensorSpecs(data_type),
};
fake_op_sig.ext_options.mul = {1.0f, 1.0f, 1.0f / multiplier};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), version);
}
TEST(OpVersionTest, VersioningMulTest) {
SimpleMulVersioningTest(kTfLiteUInt8, 0.5f, 1);
SimpleMulVersioningTest(kTfLiteInt8, 0.5f, 2);
SimpleMulVersioningTest(kTfLiteInt8, 2.0f, 3);
}
TEST(OpVersionTest, VersioningPadTest) {
SimpleVersioningTest(BuiltinOperator_PAD);
}
TEST(OpVersionTest, VersioningPadV2Test) {
SimpleVersioningTest(BuiltinOperator_PADV2);
}
TEST(OpVersionTest, VersioningConcatenationTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_CONCATENATION;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningSelectTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt32, kTfLiteUInt32, kTfLiteUInt32}, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8, kTfLiteUInt8}, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8, kTfLiteInt8}, 4);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteFloat32},
4);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningSelectV2Test) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT_V2;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt32, kTfLiteUInt32, kTfLiteUInt32}, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT_V2;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32, kTfLiteInt32}, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningRelu6Test) {
SimpleVersioningTestExtended(BuiltinOperator_RELU6);
}
TEST(OpVersionTest, VersioningFullyConnectedTest) {
TfLiteFullyConnectedParams fully_connected_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.weights_format =
kTfLiteFullyConnectedWeightsFormatDefault;
fake_op_sig.ext_options.fully_connected.sparse_weight = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 8);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.asymmetric_quantize_inputs = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fully_connected_params.asymmetric_quantize_inputs = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 9);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.quantized_bias_type = kTfLiteInt32;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 11);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fake_op_sig.ext_options.fully_connected.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 12);
}
TEST(OpVersionTest, VersioningDequantizeTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.ext_options.dequantize.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningQuantizeTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_QUANTIZE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.ext_options.quantize.is_per_channel_quantized = false;
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.ext_options.quantize.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningConv2DTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
fake_op_sig.ext_options.conv_2d.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig.op = BuiltinOperator_CONV_2D;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8});
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.ext_options.conv_2d.is_grouped_convolution = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
TfLiteConvParams conv_params = {};
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&conv_params),
};
conv_params.quantized_bias_type = kTfLiteInt32;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 8);
}
TEST(OpVersionTest, VersioningFloorDivOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_FLOOR_DIV,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_FLOOR_DIV,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_FLOOR_DIV,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningFloorModOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_FLOOR_MOD,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_FLOOR_MOD,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningTransposeConvOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteUInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt8, kTfLiteInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt32, kTfLiteInt8, kTfLiteInt8, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
const auto none_type = kTfLiteNoType;
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt32, kTfLiteInt8, kTfLiteInt8, none_type}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
TfLiteTransposeConvParams transpose_conv_params = {};
transpose_conv_params.activation = kTfLiteActRelu;
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt32, kTfLiteInt8, kTfLiteInt8, none_type}),
.builtin_data = reinterpret_cast<void*>(&transpose_conv_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
transpose_conv_params = {};
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&transpose_conv_params),
};
transpose_conv_params.qu |
837 | cpp | tensorflow/tensorflow | op_signature | tensorflow/lite/tools/versioning/op_signature.cc | tensorflow/lite/tools/versioning/op_signature_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_VERSIONING_OP_SIGNATURE_H_
#define TENSORFLOW_LITE_TOOLS_VERSIONING_OP_SIGNATURE_H_
#include <string>
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
typedef struct {
TfLiteType type;
std::vector<int32_t> dims;
bool is_const;
bool is_shape_dynamic;
} OpSignatureTensorSpec;
typedef struct {
BuiltinOperator op;
std::vector<OpSignatureTensorSpec> inputs;
std::vector<OpSignatureTensorSpec> outputs;
void* builtin_data;
int version;
const void* custom_initial_data;
std::string custom_name;
union {
struct {
bool is_per_channel_quantized;
bool is_grouped_convolution;
} conv_2d;
struct {
bool is_per_channel_quantized;
} depthwise_conv_2d;
struct {
bool sparse_weight;
bool is_per_channel_quantized;
} fully_connected;
struct {
float input1_scale;
float input2_scale;
float output_scale;
bool input_quantized;
} mul;
struct {
int32_t num_dims;
} strided_slice;
struct {
bool input_quantized;
} abs;
struct {
bool is_per_channel_quantized;
} dequantize;
struct {
bool is_per_channel_quantized;
} quantize;
struct {
bool input_quantized;
} add;
} ext_options;
} OpSignature;
OpSignature GetOpSignature(const OperatorCode* op_code, const Operator* op,
const SubGraph* subgraph, const Model* model);
OpSignature GetOpSignature(const TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration);
}
#endif
#include "tensorflow/lite/tools/versioning/op_signature.h"
#include <cstdlib>
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/stderr_reporter.h"
namespace tflite {
namespace {
class MallocDataAllocator : public BuiltinDataAllocator {
public:
void* Allocate(size_t size, size_t alignment_hint) override {
return malloc(size);
}
void Deallocate(void* data) override { free(data); }
};
inline int GetNumDims(const SubGraph* subgraph, const Operator* op, int idx) {
const flatbuffers::Vector<int32_t>* ret =
subgraph->tensors()->Get(op->inputs()->Get(idx))->shape();
if (ret) {
return ret->size();
} else {
return 0;
}
}
std::vector<OpSignatureTensorSpec> GetOpSignatureTensorSpecs(
const flatbuffers::Vector<int32_t>* tensors, const SubGraph* subgraph,
const Model* model) {
std::vector<OpSignatureTensorSpec> tensor_specs;
if (!tensors) {
return tensor_specs;
}
StderrReporter error_reporter;
for (int32_t i = 0; i < tensors->Length(); ++i) {
int32_t tensor_no = tensors->Get(i);
OpSignatureTensorSpec tensor_spec = {kTfLiteNoType};
if (tensor_no >= 0) {
if (subgraph->tensors() && tensor_no < subgraph->tensors()->Length()) {
auto* fb_tensor = subgraph->tensors()->Get(tensor_no);
ConvertTensorType(fb_tensor->type(), &tensor_spec.type,
&error_reporter);
auto buffer_idx = fb_tensor->buffer();
if (buffer_idx != 0 && buffer_idx < model->buffers()->Length()) {
auto* buffer = model->buffers()->Get(buffer_idx);
if (buffer->data() && buffer->data()->size() != 0) {
tensor_spec.is_const = true;
}
}
const flatbuffers::Vector<int32_t>* shape_vec = fb_tensor->shape();
if (shape_vec) {
for (int32_t j = 0; j < shape_vec->Length(); ++j) {
tensor_spec.dims.push_back(shape_vec->Get(j));
}
}
const flatbuffers::Vector<int32_t>* shape_signature_vec =
fb_tensor->shape_signature();
tensor_spec.is_shape_dynamic = false;
if (shape_signature_vec) {
for (int32_t j = 0; j < shape_signature_vec->Length(); ++j) {
if (shape_signature_vec->Get(j) == -1) {
tensor_spec.is_shape_dynamic = true;
break;
}
}
}
}
}
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> GetOpSignatureTensorSpecs(
TfLiteIntArray* tensors, const TfLiteContext* context,
const TfLiteNode* tflite_node) {
std::vector<OpSignatureTensorSpec> tensor_specs;
for (int32_t i = 0; i < tensors->size; ++i) {
int32_t tensor_no = tensors->data[i];
OpSignatureTensorSpec tensor_spec = {kTfLiteNoType};
if (tensor_no >= 0) {
const TfLiteTensor* tfl_tensor;
if (context->tensors != nullptr) {
tfl_tensor = &context->tensors[tensor_no];
} else {
tfl_tensor = context->GetTensor(context, tensor_no);
}
if (tfl_tensor != nullptr) {
tensor_spec.type = tfl_tensor->type;
tensor_spec.is_const = (tfl_tensor->allocation_type == kTfLiteMmapRo);
if (tfl_tensor->dims) {
for (int32_t j = 0; j < tfl_tensor->dims->size; ++j) {
tensor_spec.dims.push_back(tfl_tensor->dims->data[j]);
}
}
tensor_spec.is_shape_dynamic = HasUnspecifiedDimension(tfl_tensor);
}
}
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
}
OpSignature GetOpSignature(const OperatorCode* op_code, const Operator* op,
const SubGraph* subgraph, const Model* model) {
auto builtin_code = GetBuiltinCode(op_code);
OpSignature op_sig = {builtin_code};
std::memset(&op_sig.ext_options, 0, sizeof(op_sig.ext_options));
if (builtin_code != BuiltinOperator_CUSTOM) {
StderrReporter error_reporter;
MallocDataAllocator allocator;
ParseOpData(op, builtin_code, &error_reporter, &allocator,
&op_sig.builtin_data);
} else {
op_sig.custom_name = op_code->custom_code()->str();
}
switch (builtin_code) {
case BuiltinOperator_DEPTHWISE_CONV_2D: {
const Tensor* filter_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
const QuantizationParameters* filter_quant =
filter_tensor->quantization();
int num_channels = filter_tensor->shape()->Get(3);
if (filter_quant && filter_quant->scale() &&
filter_quant->scale()->Length() &&
filter_quant->scale()->Length() == num_channels) {
op_sig.ext_options.depthwise_conv_2d.is_per_channel_quantized = true;
}
} break;
case BuiltinOperator_FULLY_CONNECTED: {
const Tensor* weight_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
op_sig.ext_options.fully_connected.sparse_weight =
(weight_tensor->sparsity() != nullptr);
const QuantizationParameters* weight_quant =
weight_tensor->quantization();
if (weight_quant && weight_quant->scale() &&
weight_quant->scale()->size() && weight_tensor->shape() &&
weight_tensor->shape()->size()) {
op_sig.ext_options.fully_connected.is_per_channel_quantized =
weight_quant->scale()->size() > 1 &&
weight_quant->scale()->size() == weight_tensor->shape()->Get(0);
}
} break;
case BuiltinOperator_MUL: {
if (op->inputs()->Length() < 2 || op->outputs()->Length() < 1) {
break;
}
const Tensor* input1_tensor =
subgraph->tensors()->Get(op->inputs()->Get(0));
const Tensor* input2_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
const Tensor* output_tensor =
subgraph->tensors()->Get(op->outputs()->Get(0));
const QuantizationParameters* input1_quant =
input1_tensor->quantization();
const QuantizationParameters* input2_qunt = input2_tensor->quantization();
const QuantizationParameters* output_quant =
output_tensor->quantization();
if (input1_quant && input1_quant->scale() &&
input1_quant->scale()->Length() && input2_qunt &&
input2_qunt->scale() && input2_qunt->scale()->Length() &&
output_quant && output_quant->scale() &&
output_quant->scale()->Length()) {
op_sig.ext_options.mul.input1_scale = input1_quant->scale()->Get(0);
op_sig.ext_options.mul.input2_scale = input2_qunt->scale()->Get(0);
op_sig.ext_options.mul.output_scale = output_quant->scale()->Get(0);
}
if (input1_quant || input2_qunt) {
op_sig.ext_options.mul.input_quantized = true;
}
} break;
case BuiltinOperator_CONV_2D: {
const Tensor* input_tensor =
subgraph->tensors()->Get(op->inputs()->Get(0));
const Tensor* filter_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
const QuantizationParameters* filter_quant =
filter_tensor->quantization();
int num_filters = filter_tensor->shape()->Get(0);
if (filter_quant && filter_quant->scale() &&
filter_quant->scale()->Length() &&
filter_quant->scale()->Length() == num_filters) {
op_sig.ext_options.conv_2d.is_per_channel_quantized = true;
}
if (input_tensor->shape() && input_tensor->shape()->size()) {
int num_input_channels = input_tensor->shape()->Get(3);
int num_filter_input_channels = filter_tensor->shape()->Get(3);
op_sig.ext_options.conv_2d.is_grouped_convolution =
num_input_channels != num_filter_input_channels;
} else {
op_sig.ext_options.conv_2d.is_grouped_convolution = false;
}
} break;
case BuiltinOperator_STRIDED_SLICE: {
op_sig.ext_options.strided_slice.num_dims = GetNumDims(subgraph, op, 0);
} break;
case BuiltinOperator_ABS: {
if (subgraph->tensors()->Get(op->inputs()->Get(0))->quantization()) {
op_sig.ext_options.abs.input_quantized = true;
}
} break;
case BuiltinOperator_DEQUANTIZE: {
const Tensor* input_tensor =
subgraph->tensors()->Get(op->inputs()->Get(0));
const QuantizationParameters* input_quant = input_tensor->quantization();
if (input_quant && input_quant->scale() &&
input_quant->scale()->Length() > 1 &&
input_quant->scale()->Length() ==
input_tensor->shape()->Get(input_quant->quantized_dimension())) {
op_sig.ext_options.dequantize.is_per_channel_quantized = true;
}
} break;
case BuiltinOperator_QUANTIZE: {
const Tensor* output_tensor =
subgraph->tensors()->Get(op->outputs()->Get(0));
const QuantizationParameters* output_quant =
output_tensor->quantization();
if (output_quant && output_quant->scale() &&
output_quant->scale()->Length() > 1 &&
output_quant->scale()->Length() ==
output_tensor->shape()->Get(
output_quant->quantized_dimension())) {
op_sig.ext_options.quantize.is_per_channel_quantized = true;
}
} break;
case BuiltinOperator_ADD: {
if (subgraph->tensors()->Get(op->inputs()->Get(0))->quantization()) {
op_sig.ext_options.add.input_quantized = true;
}
} break;
default:
break;
}
op_sig.inputs = GetOpSignatureTensorSpecs(op->inputs(), subgraph, model);
op_sig.outputs = GetOpSignatureTensorSpecs(op->outputs(), subgraph, model);
op_sig.version = op_code->version();
return op_sig;
}
OpSignature GetOpSignature(const TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration) {
OpSignature op_sig = {
static_cast<BuiltinOperator>(registration->builtin_code)};
op_sig.builtin_data = node->builtin_data;
if (op_sig.op == BuiltinOperator_CUSTOM) {
op_sig.custom_name = registration->custom_name;
op_sig.custom_initial_data = node->custom_initial_data;
}
std::memset(&op_sig.ext_options, 0, sizeof(op_sig.ext_options));
op_sig.inputs = GetOpSignatureTensorSpecs(node->inputs, context, node);
op_sig.outputs = GetOpSignatureTensorSpecs(node->outputs, context, node);
op_sig.version = registration->version;
return op_sig;
}
} | #include "tensorflow/lite/tools/versioning/op_signature.h"
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/model_builder.h"
namespace tflite {
class StubTfLiteContext : public TfLiteContext {
public:
StubTfLiteContext(const int builtin_code, const int op_version,
const int num_inputs)
: TfLiteContext({0}) {
exec_plan_ = TfLiteIntArrayCreate(3);
for (int i = 0; i < 3; ++i) exec_plan_->data[i] = i;
int tensor_no = 0;
std::memset(nodes_, 0, sizeof(nodes_));
std::memset(registrations_, 0, sizeof(registrations_));
nodes_[0].inputs = TfLiteIntArrayCreate(1);
nodes_[0].inputs->data[0] = tensor_no++;
nodes_[0].outputs = TfLiteIntArrayCreate(1);
nodes_[0].outputs->data[0] = tensor_no;
nodes_[0].builtin_data = nullptr;
nodes_[1].inputs = TfLiteIntArrayCreate(num_inputs);
for (int i = 0; i < num_inputs; i++) {
nodes_[1].inputs->data[i] = tensor_no++;
}
nodes_[1].outputs = TfLiteIntArrayCreate(1);
nodes_[1].outputs->data[0] = tensor_no;
nodes_[1].builtin_data = malloc(1024);
std::memset(nodes_[1].builtin_data, 0, 1024);
nodes_[2].inputs = TfLiteIntArrayCreate(1);
nodes_[2].inputs->data[0] = tensor_no++;
nodes_[2].outputs = TfLiteIntArrayCreate(1);
nodes_[2].outputs->data[0] = tensor_no++;
nodes_[2].builtin_data = nullptr;
tensors_.resize(tensor_no);
for (size_t i = 0; i < tensors_.size(); i++) {
std::memset(&tensors_[i], 0, sizeof(tensors_[i]));
tensors_[i].buffer_handle = kTfLiteNullBufferHandle;
tensors_[i].type = kTfLiteFloat32;
tensors_[i].dims = TfLiteIntArrayCreate(4);
for (int d = 0; d < 4; d++) {
tensors_[i].dims->data[d] = 1;
}
}
tensors = tensors_.data();
tensors_size = tensors_.size();
registrations_[0].builtin_code = kTfLiteBuiltinAdd;
registrations_[1].builtin_code = builtin_code;
registrations_[1].version = op_version;
registrations_[2].builtin_code = kTfLiteBuiltinAdd;
this->GetExecutionPlan = StubGetExecutionPlan;
this->GetNodeAndRegistration = StubGetNodeAndRegistration;
}
~StubTfLiteContext() {
for (auto& node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
if (node.builtin_data) {
free(node.builtin_data);
}
}
for (auto& tensor : tensors_) {
TfLiteIntArrayFree(tensor.dims);
}
TfLiteIntArrayFree(exec_plan_);
}
TfLiteIntArray* exec_plan() const { return exec_plan_; }
TfLiteNode* node() { return &nodes_[1]; }
TfLiteRegistration* registration() { return ®istrations_[1]; }
TfLiteNode* node(int node_index) { return &nodes_[node_index]; }
TfLiteRegistration* registration(int reg_index) {
return ®istrations_[reg_index];
}
TfLiteTensor* tensor(int tensor_index) { return &tensors_[tensor_index]; }
private:
static TfLiteStatus StubGetExecutionPlan(TfLiteContext* context,
TfLiteIntArray** execution_plan) {
StubTfLiteContext* stub = reinterpret_cast<StubTfLiteContext*>(context);
*execution_plan = stub->exec_plan();
return kTfLiteOk;
}
static TfLiteStatus StubGetNodeAndRegistration(
TfLiteContext* context, int node_index, TfLiteNode** node,
TfLiteRegistration** registration) {
StubTfLiteContext* stub = reinterpret_cast<StubTfLiteContext*>(context);
*node = stub->node(node_index);
*registration = stub->registration(node_index);
return kTfLiteOk;
}
TfLiteIntArray* exec_plan_;
TfLiteNode nodes_[3];
TfLiteRegistration registrations_[3];
std::vector<TfLiteTensor> tensors_;
};
TEST(GetOpSignature, FlatBufferModel) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
auto model = fb_model->GetModel();
auto subgraphs = model->subgraphs();
const SubGraph* subgraph = subgraphs->Get(0);
const Operator* op1 = subgraph->operators()->Get(0);
const OperatorCode* op_code1 =
model->operator_codes()->Get(op1->opcode_index());
OpSignature op_sig = GetOpSignature(op_code1, op1, subgraph, model);
EXPECT_EQ(op_sig.op, BuiltinOperator_ADD);
EXPECT_EQ(op_sig.inputs[0].type, kTfLiteFloat32);
EXPECT_EQ(op_sig.inputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.inputs[0].is_const);
EXPECT_FALSE(op_sig.inputs[0].is_shape_dynamic);
EXPECT_EQ(op_sig.outputs[0].type, kTfLiteFloat32);
EXPECT_FALSE(op_sig.outputs[0].is_const);
EXPECT_EQ(op_sig.outputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.outputs[0].is_shape_dynamic);
EXPECT_NE(op_sig.builtin_data, nullptr);
EXPECT_EQ(op_sig.version, 1);
free(op_sig.builtin_data);
const Operator* op2 = subgraph->operators()->Get(1);
const OperatorCode* op_code2 =
model->operator_codes()->Get(op2->opcode_index());
op_sig = GetOpSignature(op_code2, op2, subgraph, model);
EXPECT_EQ(op_sig.op, BuiltinOperator_ADD);
EXPECT_EQ(op_sig.inputs[0].type, kTfLiteFloat32);
EXPECT_EQ(op_sig.inputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.inputs[0].is_const);
EXPECT_FALSE(op_sig.inputs[0].is_shape_dynamic);
EXPECT_EQ(op_sig.outputs[0].type, kTfLiteFloat32);
EXPECT_FALSE(op_sig.outputs[0].is_const);
EXPECT_EQ(op_sig.outputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.outputs[0].is_shape_dynamic);
EXPECT_NE(op_sig.builtin_data, nullptr);
EXPECT_EQ(op_sig.version, 1);
free(op_sig.builtin_data);
const std::string& full_path3 = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/multi_signatures.bin");
auto fb_model3 = FlatBufferModel::BuildFromFile(full_path3.data());
ASSERT_TRUE(fb_model3);
auto model3 = fb_model3->GetModel();
auto subgraphs3 = model3->subgraphs();
const SubGraph* subgraph3 = subgraphs3->Get(0);
const Operator* op3 = subgraph3->operators()->Get(0);
const OperatorCode* op_code3 =
model3->operator_codes()->Get(op3->opcode_index());
op_sig = GetOpSignature(op_code3, op3, subgraph3, model3);
EXPECT_EQ(op_sig.op, BuiltinOperator_ADD);
EXPECT_EQ(op_sig.inputs[0].type, kTfLiteFloat32);
EXPECT_EQ(op_sig.inputs[0].dims.size(), 1);
EXPECT_FALSE(op_sig.inputs[0].is_const);
EXPECT_TRUE(op_sig.inputs[0].is_shape_dynamic);
EXPECT_EQ(op_sig.outputs[0].type, kTfLiteFloat32);
EXPECT_FALSE(op_sig.outputs[0].is_const);
EXPECT_EQ(op_sig.outputs[0].dims.size(), 1);
EXPECT_TRUE(op_sig.outputs[0].is_shape_dynamic);
EXPECT_NE(op_sig.builtin_data, nullptr);
EXPECT_EQ(op_sig.version, 1);
free(op_sig.builtin_data);
}
TEST(GetOpSignature, TfLiteContext) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAdd,
1,
4);
OpSignature op_sig =
GetOpSignature(context.get(), context->node(), context->registration());
EXPECT_EQ(op_sig.op, BuiltinOperator_ADD);
EXPECT_EQ(op_sig.inputs[0].type, kTfLiteFloat32);
EXPECT_EQ(op_sig.inputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.inputs[0].is_const);
EXPECT_FALSE(op_sig.inputs[0].is_shape_dynamic);
EXPECT_EQ(op_sig.outputs[0].type, kTfLiteFloat32);
EXPECT_FALSE(op_sig.outputs[0].is_const);
EXPECT_EQ(op_sig.outputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.outputs[0].is_shape_dynamic);
EXPECT_NE(op_sig.builtin_data, nullptr);
EXPECT_EQ(op_sig.version, 1);
}
} |
838 | cpp | tensorflow/tensorflow | runtime_version | tensorflow/lite/tools/versioning/runtime_version.cc | tensorflow/lite/tools/versioning/runtime_version_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_VERSIONING_RUNTIME_VERSION_H_
#define TENSORFLOW_LITE_TOOLS_VERSIONING_RUNTIME_VERSION_H_
#include <string>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
namespace tflite {
void UpdateMinimumRuntimeVersionForModel(uint8_t* model_buffer_pointer);
std::string FindMinimumRuntimeVersionForOp(tflite::BuiltinOperator op_code,
int op_version);
bool CompareRuntimeVersion(const std::string&, const std::string&);
}
#endif
#include "tensorflow/lite/tools/versioning/runtime_version.h"
#include <cstring>
#include <map>
#include <string>
#include <utility>
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
bool CompareRuntimeVersion(const std::string& v1, const std::string& v2) {
const std::vector<std::string> vec1 = absl::StrSplit(v1, '.');
const std::vector<std::string> vec2 = absl::StrSplit(v2, '.');
int i = 0;
while (i < vec1.size() && i < vec2.size()) {
int v1_val, v2_val;
if (absl::SimpleAtoi(vec1[i], &v1_val) &&
absl::SimpleAtoi(vec2[i], &v2_val)) {
if (v1_val != v2_val) return v1_val < v2_val;
}
++i;
}
return i < vec2.size();
}
std::string FindMinimumRuntimeVersionForOp(tflite::BuiltinOperator op_code,
int op_version) {
static const std::map<std::pair<BuiltinOperator, int>,
std::string>* op_version_map =
new std::map<std::pair<BuiltinOperator, int>, std::string>(
{{{BuiltinOperator_AVERAGE_POOL_2D, 1}, "1.5.0"},
{{BuiltinOperator_AVERAGE_POOL_2D, 2}, "1.14.0"},
{{BuiltinOperator_AVERAGE_POOL_2D, 3}, "2.3.0"},
{{BuiltinOperator_BATCH_MATMUL, 1}, "2.3.0"},
{{BuiltinOperator_BATCH_MATMUL, 2}, "2.3.0"},
{{BuiltinOperator_BATCH_MATMUL, 3}, "2.4.0"},
{{BuiltinOperator_BATCH_MATMUL, 4}, "2.5.0"},
{{BuiltinOperator_BROADCAST_TO, 2}, "2.5.0"},
{{BuiltinOperator_BROADCAST_TO, 3}, "2.5.0"},
{{BuiltinOperator_CONV_2D, 1}, "1.5.0"},
{{BuiltinOperator_CONV_2D, 2}, "1.14.0"},
{{BuiltinOperator_CONV_2D, 3}, "1.14.0"},
{{BuiltinOperator_CONV_2D, 4}, "2.3.0"},
{{BuiltinOperator_CONV_2D, 5}, "2.4.0"},
{{BuiltinOperator_CONV_2D, 6}, "2.9.0"},
{{BuiltinOperator_CONV_2D, 7}, "2.11.0"},
{{BuiltinOperator_CONV_2D, 8}, "2.15.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 1}, "1.5.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 2}, "1.12.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 3}, "1.14.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 4}, "2.2.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 5}, "2.3.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 6}, "2.3.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 7}, "2.11.0"},
{{BuiltinOperator_ADD, 1}, "1.5.0"},
{{BuiltinOperator_ADD, 2}, "1.14.0"},
{{BuiltinOperator_ADD, 3}, "2.4.0"},
{{BuiltinOperator_ADD, 4}, "2.6.0"},
{{BuiltinOperator_ADD, 5}, "2.13.0"},
{{BuiltinOperator_ADD_N, 1}, "1.14.0"},
{{BuiltinOperator_SPACE_TO_BATCH_ND, 1}, "1.6.0"},
{{BuiltinOperator_SPACE_TO_BATCH_ND, 2}, "1.14.0"},
{{BuiltinOperator_SPACE_TO_BATCH_ND, 3}, "2.3.0"},
{{BuiltinOperator_SPACE_TO_BATCH_ND, 4}, "2.12.0"},
{{BuiltinOperator_SUB, 1}, "1.6.0"},
{{BuiltinOperator_SUB, 2}, "1.14.0"},
{{BuiltinOperator_SUB, 3}, "2.3.0"},
{{BuiltinOperator_SUB, 4}, "2.4.0"},
{{BuiltinOperator_SUB, 5}, "2.4.0"},
{{BuiltinOperator_DENSIFY, 1}, "2.2.0"},
{{BuiltinOperator_DIV, 1}, "1.6.0"},
{{BuiltinOperator_DIV, 2}, "2.3.0"},
{{BuiltinOperator_BATCH_TO_SPACE_ND, 1}, "1.6.0"},
{{BuiltinOperator_BATCH_TO_SPACE_ND, 2}, "1.14.0"},
{{BuiltinOperator_BATCH_TO_SPACE_ND, 3}, "2.3.0"},
{{BuiltinOperator_BATCH_TO_SPACE_ND, 4}, "2.12.0"},
{{BuiltinOperator_CAST, 1}, "1.5.0"},
{{BuiltinOperator_CAST, 2}, "2.7.0"},
{{BuiltinOperator_CAST, 3}, "2.8.0"},
{{BuiltinOperator_CAST, 4}, "2.9.0"},
{{BuiltinOperator_CAST, 5}, "2.12.0"},
{{BuiltinOperator_CAST, 6}, "2.15.0"},
{{BuiltinOperator_CONCATENATION, 1}, "1.5.0"},
{{BuiltinOperator_CONCATENATION, 2}, "1.14.0"},
{{BuiltinOperator_CONCATENATION, 3}, "2.3.0"},
{{BuiltinOperator_CONCATENATION, 4}, "2.14.0"},
{{BuiltinOperator_DEPTH_TO_SPACE, 1}, "2.1.0"},
{{BuiltinOperator_DEPTH_TO_SPACE, 2}, "2.5.0"},
{{BuiltinOperator_EMBEDDING_LOOKUP, 1}, "1.13.0"},
{{BuiltinOperator_EMBEDDING_LOOKUP, 2}, "1.14.0"},
{{BuiltinOperator_EMBEDDING_LOOKUP, 3}, "1.14.0"},
{{BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, 1}, "1.5.0"},
{{BuiltinOperator_FAKE_QUANT, 1}, "1.5.0"},
{{BuiltinOperator_FAKE_QUANT, 2}, "1.10.0"},
{{BuiltinOperator_FULLY_CONNECTED, 1}, "1.5.0"},
{{BuiltinOperator_FULLY_CONNECTED, 2}, "1.10.0"},
{{BuiltinOperator_FULLY_CONNECTED, 3}, "1.14.0"},
{{BuiltinOperator_FULLY_CONNECTED, 4}, "1.14.0"},
{{BuiltinOperator_FULLY_CONNECTED, 5}, "2.0.0"},
{{BuiltinOperator_FULLY_CONNECTED, 6}, "2.1.0"},
{{BuiltinOperator_FULLY_CONNECTED, 7}, "2.3.0"},
{{BuiltinOperator_FULLY_CONNECTED, 8}, "2.3.0"},
{{BuiltinOperator_FULLY_CONNECTED, 9}, "2.3.0"},
{{BuiltinOperator_FULLY_CONNECTED, 10}, "2.11.0"},
{{BuiltinOperator_FULLY_CONNECTED, 11}, "2.15.0"},
{{BuiltinOperator_FULLY_CONNECTED, 12}, "2.17.0"},
{{BuiltinOperator_GATHER, 1}, "1.6.0"},
{{BuiltinOperator_GATHER, 2}, "1.14.0"},
{{BuiltinOperator_GATHER, 3}, "1.15.0"},
{{BuiltinOperator_GATHER, 4}, "2.4.0"},
{{BuiltinOperator_GATHER, 5}, "2.5.0"},
{{BuiltinOperator_GATHER, 6}, "2.13.0"},
{{BuiltinOperator_GATHER, 7}, "2.15.0"},
{{BuiltinOperator_GATHER_ND, 1}, "1.14.0"},
{{BuiltinOperator_GATHER_ND, 2}, "2.3.0"},
{{BuiltinOperator_GATHER_ND, 3}, "2.5.0"},
{{BuiltinOperator_GATHER_ND, 4}, "2.13.0"},
{{BuiltinOperator_GATHER_ND, 5}, "2.16.0"},
{{BuiltinOperator_HASHTABLE_LOOKUP, 1}, "1.5.0"},
{{BuiltinOperator_SVDF, 1}, "1.5.0"},
{{BuiltinOperator_SVDF, 2}, "1.14.0"},
{{BuiltinOperator_SVDF, 3}, "2.2.0"},
{{BuiltinOperator_SVDF, 4}, "2.3.0"},
{{BuiltinOperator_L2_NORMALIZATION, 1}, "1.5.0"},
{{BuiltinOperator_L2_NORMALIZATION, 2}, "1.14.0"},
{{BuiltinOperator_L2_POOL_2D, 1}, "1.5.0"},
{{BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, 1}, "1.5.0"},
{{BuiltinOperator_MAX_POOL_2D, 1}, "1.5.0"},
{{BuiltinOperator_MAX_POOL_2D, 2}, "1.14.0"},
{{BuiltinOperator_MAX_POOL_2D, 3}, "2.3.0"},
{{BuiltinOperator_MAXIMUM, 1}, "1.14.0"},
{{BuiltinOperator_MAXIMUM, 2}, "1.14.0"},
{{BuiltinOperator_MAXIMUM, 3}, "2.3.0"},
{{BuiltinOperator_MAXIMUM, 4}, "2.3.0"},
{{BuiltinOperator_MINIMUM, 1}, "1.14.0"},
{{BuiltinOperator_MINIMUM, 2}, "1.14.0"},
{{BuiltinOperator_MINIMUM, 3}, "2.3.0"},
{{BuiltinOperator_MINIMUM, 4}, "2.3.0"},
{{BuiltinOperator_MUL, 1}, "1.5.0"},
{{BuiltinOperator_MUL, 2}, "1.14.0"},
{{BuiltinOperator_MUL, 3}, "1.15.0"},
{{BuiltinOperator_MUL, 4}, "2.3.0"},
{{BuiltinOperator_MUL, 5}, "2.6.0"},
{{BuiltinOperator_MUL, 6}, "2.11.0"},
{{BuiltinOperator_MUL, 7}, "2.13.0"},
{{BuiltinOperator_NON_MAX_SUPPRESSION_V4, 1}, "2.1.0"},
{{BuiltinOperator_NON_MAX_SUPPRESSION_V5, 1}, "2.1.0"},
{{BuiltinOperator_PAD, 1}, "1.5.0"},
{{BuiltinOperator_PAD, 2}, "1.14.0"},
{{BuiltinOperator_PAD, 3}, "2.4.0"},
{{BuiltinOperator_PAD, 4}, "2.6.0"},
{{BuiltinOperator_TILE, 1}, "1.10.1"},
{{BuiltinOperator_TILE, 2}, "2.2.0"},
{{BuiltinOperator_TILE, 3}, "2.8.0"},
{{BuiltinOperator_PADV2, 1}, "1.9.0"},
{{BuiltinOperator_PADV2, 2}, "1.14.0"},
{{BuiltinOperator_PADV2, 3}, "2.4.0"},
{{BuiltinOperator_PADV2, 4}, "2.6.0"},
{{BuiltinOperator_RESHAPE, 1}, "1.5.0"},
{{BuiltinOperator_SOFTMAX, 1}, "1.5.0"},
{{BuiltinOperator_SOFTMAX, 2}, "1.14.0"},
{{BuiltinOperator_SOFTMAX, 3}, "2.3.0"},
{{BuiltinOperator_SPACE_TO_DEPTH, 1}, "1.5.0"},
{{BuiltinOperator_SPACE_TO_DEPTH, 2}, "1.14.0"},
{{BuiltinOperator_TRANSPOSE, 1}, "1.6.0"},
{{BuiltinOperator_TRANSPOSE, 2}, "1.14.0"},
{{BuiltinOperator_TRANSPOSE, 3}, "1.15.0"},
{{BuiltinOperator_TRANSPOSE, 4}, "2.3.0"},
{{BuiltinOperator_TRANSPOSE, 5}, "2.4.0"},
{{BuiltinOperator_TRANSPOSE, 6}, "2.12.0"},
{{BuiltinOperator_LSTM, 1}, "1.7.0"},
{{BuiltinOperator_LSTM, 2}, "1.10.0"},
{{BuiltinOperator_LSTM, 3}, "1.14.0"},
{{BuiltinOperator_LSTM, 4}, "2.3.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, 1}, "1.13.1"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, 2}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, 3}, "2.3.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, 4}, "2.12.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, 1}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, 2}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, 3}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, 1}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, 2}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, 3}, "2.3.0"},
{{BuiltinOperator_MEAN, 1}, "1.6.0"},
{{BuiltinOperator_MEAN, 2}, "1.14.0"},
{{BuiltinOperator_MEAN, 3}, "2.4.0"},
{{BuiltinOperator_SUM, 1}, "1.10.0"},
{{BuiltinOperator_SUM, 2}, "1.15.0"},
{{BuiltinOperator_REDUCE_MAX, 1}, "1.11.0"},
{{BuiltinOperator_REDUCE_MAX, 2}, "1.14.0"},
{{BuiltinOperator_REDUCE_MAX, 3}, "2.5.0"},
{{BuiltinOperator_REDUCE_MIN, 1}, "1.11.0"},
{{BuiltinOperator_REDUCE_MIN, 2}, "1.14.0"},
{{BuiltinOperator_REDUCE_MIN, 3}, "2.5.0"},
{{BuiltinOperator_REDUCE_PROD, 1}, "1.11.0"},
{{BuiltinOperator_REDUCE_PROD, 2}, "2.6.0"},
{{BuiltinOperator_REDUCE_ANY, 1}, "1.11.0"},
{{BuiltinOperator_RELU6, 1}, "1.5.0"},
{{BuiltinOperator_RELU6, 2}, "1.14.0"},
{{BuiltinOperator_RELU6, 3}, "2.5.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 1}, "1.7.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 2}, "1.14.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 3}, "2.2.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 4}, "2.5.0"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 1}, "1.13.1"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 2}, "1.14.0"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 3}, "2.3.0"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 4}, "2.4.0"},
{{BuiltinOperator_RNN, 1}, "1.5.0"},
{{BuiltinOperator_RNN, 2}, "1.14.0"},
{{BuiltinOperator_RNN, 3}, "2.3.0"},
{{BuiltinOperator_SKIP_GRAM, 1}, "1.5.0"},
{{BuiltinOperator_SQUEEZE, 1}, "1.6.0"},
{{BuiltinOperator_SQUEEZE, 2}, "2.5.0"},
{{BuiltinOperator_SPLIT, 1}, "1.5.0"},
{{BuiltinOperator_SPLIT, 2}, "1.14.0"},
{{BuiltinOperator_SPLIT, 3}, "1.14.0"},
{{BuiltinOperator_SPLIT, 4}, "2.3.0"},
{{BuiltinOperator_SPLIT_V, 1}, "1.13.1"},
{{BuiltinOperator_SPLIT_V, 2}, "2.3.0"},
{{BuiltinOperator_STRIDED_SLICE, 1}, "1.6.0"},
{{BuiltinOperator_STRIDED_SLICE, 2}, "1.14.0"},
{{BuiltinOperator_STRIDED_SLICE, 3}, "2.1.0"},
{{BuiltinOperator_STRIDED_SLICE, 4}, "2.2.0"},
{{BuiltinOperator_STRIDED_SLICE, 5}, "2.5.0"},
{{BuiltinOperator_STRIDED_SLICE, 6}, "2.6.0"},
{{BuiltinOperator_STRIDED_SLICE, 7}, "2.14.0"},
{{BuiltinOperator_STRIDED_SLICE, 8}, "2.14.0"},
{{BuiltinOperator_TOPK_V2, 1}, "1.7.0"},
{{BuiltinOperator_TOPK_V2, 2}, "1.14.0"},
{{BuiltinOperator_TOPK_V2, 3}, "2.13.0"},
{{BuiltinOperator_ARG_MAX, 1}, "1.9.0"},
{{BuiltinOperator_ARG_MAX, 2}, "1.14.0"},
{{BuiltinOperator_ARG_MAX, 3}, "2.9.0"},
{{BuiltinOperator_ARG_MIN, 1}, "1.9.0"},
{{BuiltinOperator_ARG_MIN, 2}, "1.14.0"},
{{BuiltinOperator_ARG_MIN, 3}, "2.9.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 1}, "1.9.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 2}, "2.2.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 3}, "2.3.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 4}, "2.13.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 5}, "2.15.0"},
{{BuiltinOperator_SPARSE_TO_DENSE, 1}, "1.9.0"},
{{BuiltinOperator_SPARSE_TO_DENSE, 2}, "1.14.0"},
{{BuiltinOperator_SPARSE_TO_DENSE, 3}, "1.15.0"},
{{BuiltinOperator_EXPAND_DIMS, 1}, "1.10.0"},
{{BuiltinOperator_PACK, 1}, "1.11.0"},
{{BuiltinOperator_PACK, 2}, "1.14.0"},
{{BuiltinOperator_PACK, 3}, "2.3.0"},
{{BuiltinOperator_PACK, 4}, "2.13.0"},
{{BuiltinOperator_SHAPE, 1}, "1.10.0"},
{{BuiltinOperator_SLICE, 1}, "1.14.0"},
{{BuiltinOperator_SLICE, 2}, "1.14.0"},
{{BuiltinOperator_SLICE, 3}, "1.14.0"},
{{BuiltinOperator_SLICE, 4}, "2.4.0"},
{{BuiltinOperator_SLICE, 5}, "2.5.0"},
{{BuiltinOperator_SLICE, 6}, "2.14.0"},
{{BuiltinOperator_TANH, 1}, "1.14.0"},
{{BuiltinOperator_TANH, 2}, "1.14.0"},
{{BuiltinOperator_TANH, 3}, "2.3.0"},
{{BuiltinOperator_ONE_HOT, 1}, "1.11.0"},
{{BuiltinOperator_UNPACK, 1}, "1.11.0"},
{{BuiltinOperator_UNPACK, 2}, "1.14.0"},
{{BuiltinOperator_UNPACK, 3}, "2.2.0"},
{{BuiltinOperator_UNPACK, 4}, "2.3.0"},
{{BuiltinOperator_LEAKY_RELU, 1}, "1.13.1"},
{{BuiltinOperator_LEAKY_RELU, 2}, "2.3.0"},
{{BuiltinOperator_LOGISTIC, 1}, "1.14.0"},
{{BuiltinOperator_LOGISTIC, 2}, "1.14.0"},
{{BuiltinOperator_LOGISTIC, 3}, "2.3.0"},
{{BuiltinOperator_LOG_SOFTMAX, 1}, "1.14.0"},
{{BuiltinOperator_LOG_SOFTMAX, 2}, "1.14.0"},
{{BuiltinOperator_LSH_PROJECTION, 1}, "1.5.0"},
{{BuiltinOperator_SQUARED_DIFFERENCE, 1}, "1.13.1"},
{{BuiltinOperator_SQUARED_DIFFERENCE, 2}, "2.5.0"},
{{BuiltinOperator_MIRROR_PAD, 1}, "1.13.1"},
{{BuiltinOperator_MIRROR_PAD, 2}, "2.3.0"},
{{BuiltinOperator_MIRROR_PAD, 3}, "2.12.0"},
{{BuiltinOperator_UNIQUE, 1}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, 1}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, 2}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, 3}, "2.3.0"},
{{BuiltinOperator_WHERE, 1}, "1.14.0"},
{{BuiltinOperator_DEQUANTIZE, 1}, "1.13.1"},
{{BuiltinOperator_DEQUANTIZE, 2}, "1.14.0"},
{{BuiltinOperator_DEQUANTIZE, 3}, "1.15.0"},
{{BuiltinOperator_DEQUANTIZE, 4}, "2.2.0"},
{{BuiltinOperator_DEQUANTIZE, 5}, "2.7.0"},
{{BuiltinOperator_DEQUANTIZE, 6}, "2.18.0"},
{{BuiltinOperator_REVERSE_SEQUENCE, 1}, "1.14.0"},
{{BuiltinOperator_EQUAL, 1}, "1.14.0"},
{{BuiltinOperator_EQUAL, 2}, "1.14.0"},
{{BuiltinOperator_EQUAL, 3}, "2.3.0"},
{{BuiltinOperator_EQUAL, 4}, "2.13.0"},
{{BuiltinOperator_NOT_EQUAL, 1}, "1.14.0"},
{{BuiltinOperator_NOT_EQUAL, 2}, "1.14.0"},
{{BuiltinOperator_NOT_EQUAL, 3}, "2.3.0"},
{{BuiltinOperator_GREATER, 1}, "1.14.0"},
{{BuiltinOperator_GREATER, 2}, "1.14.0"},
{{BuiltinOperator_GREATER_EQUAL, 1}, "1.14.0"},
{{BuiltinOperator_GREATER_EQUAL, 2}, "1.14.0"},
{{BuiltinOperator_GREATER_EQUAL, 3}, "2.13.0"},
{{BuiltinOperator_LESS, 1}, "1.14.0"},
{{BuiltinOperator_LESS, 2}, "1.14.0"},
{{BuiltinOperator_LESS, 3}, "2.13.0"},
{{BuiltinOperator_LESS_EQUAL, 1}, "1.14.0"},
{{BuiltinOperator_LESS_EQUAL, 2}, "1.14.0"},
{{BuiltinOperator_SCATTER_ND, 1}, "2.1.0"},
{{BuiltinOperator_SEGMENT_SUM, 1}, "2.2.0"},
{{BuiltinOperator_SELECT, 1}, "1.14.0"},
{{BuiltinOperator_SELECT, 2}, "1.14.0"},
{{BuiltinOperator_SELECT, 3}, "2.12.0"},
{{BuiltinOperator_SELECT, 4}, "2.12.0"},
{{BuiltinOperator_SELECT_V2, 1}, "2.2.0"},
{{BuiltinOperator_SELECT_V2, 2}, "2.12.0"},
{{BuiltinOperator_IF, 1}, "1.15.0"},
{{BuiltinOperator_FLOOR_DIV, 1}, "1.14.0"},
{{BuiltinOperator_FLOOR_DIV, 2}, "1.14.0"},
{{BuiltinOperator_FLOOR_DIV, 3}, "2.13.0"},
{{BuiltinOperator_FLOOR, 1}, "1.9.0"},
{{BuiltinOperator_CEIL, 1}, "1.14.0"},
{{BuiltinOperator_MATRIX_DIAG, 1}, "1.14.0"},
{{BuiltinOperator_MATRIX_SET_DIAG, 1}, "1.14.0"},
{{BuiltinOperator_ELU, 1}, "1.14.0"},
{{BuiltinOperator_QUANTIZE, 1}, "1.14.0"},
{{BuiltinOperator_QUANTIZE, 2}, "1.15.0"},
{{BuiltinOperator_QUANTIZE, 3}, "2.7.0"},
{{BuiltinOperator_ROUND, 1}, "1.14.0"},
{{BuiltinOperator_RELU, 1}, "1.5.0"},
{{BuiltinOperator_RELU, 2}, "2.1.0"},
{{BuiltinOperator_RELU, 3}, "2.5.0"},
{{BuiltinOperator_RELU_N1_TO_1, 1}, "1.5.0"},
{{BuiltinOperator_RELU_0_TO_1, 1}, "2.10.0"},
{{BuiltinOperator_PRELU, 1}, "1.8.0"},
{{BuiltinOperator_EXP, 1}, "1.7.0"},
{{BuiltinOperator_EXP, 2}, "2.12.0"},
{{BuiltinOperator_COS, 1}, "1.14.0"},
{{BuiltinOperator_NEG, 1}, "1.9.0"},
{{BuiltinOperator_POW, 1}, "1.10.0"},
{{BuiltinOperator_LOGICAL_OR, 1}, "1.11.0"},
{{BuiltinOperator_LOGICAL_AND, 1}, "1.11.0"},
{{BuiltinOperator_LOGICAL_NOT, 1}, "1.11.0"},
{{BuiltinOperator_FLOOR_MOD, 1}, "1.13.0"},
{{BuiltinOperator_FLOOR_MOD, 2}, "2.13.0"},
{{BuiltinOperator_RANGE, 1}, "1.13.0"},
{{BuiltinOperator_RANGE, 2}, "2.14.0"},
{{BuiltinOperator_SIN, 1}, "1.9.0"},
{{BuiltinOperator_LOG, 1}, "1.14.0"},
{{BuiltinOperator_LOG, 2}, "2.15.0"},
{{BuiltinOperator_SQRT, 1}, "1.10.0"},
{{BuiltinOperator_RSQRT, 1}, "1.10.0"},
{{BuiltinOperator_RSQRT, 2}, "2.5.0"},
{{BuiltinOperator_RSQRT, 3}, "2.15.0"},
{{BuiltinOperator_SQUARE, 1}, "1.12.0"},
{{BuiltinOperator_ZEROS_LIKE, 1}, "1.12.0"},
{{BuiltinOperator_ABS, 1}, "1.13.0"},
{{BuiltinOperator_ABS, 2}, "2.4.0"},
{{BuiltinOperator_ABS, 3}, "2.5.0"},
{{BuiltinOperator_ABS, 4}, "2.6.0"},
{{BuiltinOperator_ABS, 5}, "2.12.0"},
{{BuiltinOperator_HARD_SWISH, 1}, "1.15.0"},
{{BuiltinOperator_FILL, 1}, "1.13.0"},
{{BuiltinOperator_FILL, 2}, "2.3.0"},
{{BuiltinOperator_FILL, 3}, "2.5.0"},
{{BuiltinOperator_FILL, 4}, "2.12.0"},
{{BuiltinOperator_REVERSE_V2, 1}, "1.14.0"},
{{BuiltinOperator_REVERSE_V2, 2}, "2.2.0"},
{{BuiltinOperator_REVERSE_V2, 3}, "2.5.0"},
{{BuiltinOperator_RANK, 1}, "1.14.0"},
{{BuiltinOperator_WHILE, 1}, "1.15.0"},
{{BuiltinOperator_CUMSUM, 1}, "2.4.0"},
{{BuiltinOperator_CALL_ONCE, 1}, "2.5.0"},
{{BuiltinOperator_RFFT2D, 1}, "2.5.0"},
{{BuiltinOperator_CONV_3D, 1}, "2.5.0"},
{{BuiltinOperator_IMAG, 1}, "2.5.0"},
{{BuiltinOperator_REAL, 1}, "2.5.0"},
{{BuiltinOperator_COMPLEX_ABS, 1}, "2.5.0"},
{{BuiltinOperator_HASHTABLE, 1}, "2.5.0"},
{{BuiltinOperator_HASHTABLE_FIND, 1}, "2.5.0"},
{{BuiltinOperator_HASHTABLE_IMPORT, 1}, "2.5.0"},
{{BuiltinOperator_HASHTABLE_SIZE, 1}, "2.5.0"},
{{BuiltinOperator_REDUCE_ALL, 1}, "2.6.0"},
{{BuiltinOperator_CONV_3D_TRANSPOSE, 1}, "2.6.0"},
{{BuiltinOperator_VAR_HANDLE, 1}, "2.6.0"},
{{BuiltinOperator_READ_VARIABLE, 1}, "2.6.0"},
{{BuiltinOperator_ASSIGN_VARIABLE, 1}, "2.6.0"},
{{BuiltinOperator_BROADCAST_ARGS, 1}, "2.6.0"},
{{BuiltinOperator_RANDOM_STANDARD_NORMAL, 1}, "2.8.0"},
{{BuiltinOperator_BUCKETIZE, 1}, "2.8.0"},
{{BuiltinOperator_WHERE, 2}, "2.8.0"},
{{BuiltinOperator_RANDOM_UNIFORM, 1}, "2.8.0"},
{{BuiltinOperator_MULTINOMIAL, 1}, "2.8.0"},
{{BuiltinOperator_GELU, 1}, "2.9.0"},
{{BuiltinOperator_GELU, 2}, "2.9.0"},
{{BuiltinOperator_DYNAMIC_UPDATE_SLICE, 1}, "2.9.0"},
{{BuiltinOperator_DYNAMIC_UPDATE_SLICE, 2}, "2.17.0"},
{{BuiltinOperator_UNSORTED_SEGMENT_PROD, 1}, "2.10.0"},
{{BuiltinOperator_UNSORTED_SEGMENT_MAX, 1}, "2.10.0"},
{{BuiltinOperator_UNSORTED_SEGMENT_MIN, 1}, "2.11.0"},
{{BuiltinOperator_UNSORTED_SEGMENT_SUM, 1}, "2.10.0"},
{{BuiltinOperator_ATAN2, 1}, "2.10.0"},
{{BuiltinOperator_SIGN, 1}, "2.11.0"},
{{BuiltinOperator_SIGN, 2}, "2.12.0"},
{{BuiltinOperator_BITCAST, 1}, "2.13.0"},
{{BuiltinOperator_BITWISE_XOR, 1}, "2.13.0"},
{{BuiltinOperator_RIGHT_SHIFT, 1}, "2.13.0"},
{{BuiltinOperator_STABLEHLO_SCATTER, 1}, "2.15.0"},
{{BuiltinOperator_DILATE, 1}, "2.15.0"},
{{BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR, 1}, "2.15.0"},
{{BuiltinOperator_REDUCE_WINDOW, 1}, "2.15.0"},
{{BuiltinOperator_STABLEHLO_GATHER, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_ADD, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_MULTIPLY, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_REDUCE_WINDOW, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_MAXIMUM, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_MINIMUM, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_PAD, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_COMPOSITE, 1}, "2.17.0"}});
std::pair<BuiltinOperator, int> version_key = {op_code, op_version};
auto it = op_version_map->find(version_key);
if (it == op_version_map->end()) {
return std::string();
}
return it->second;
}
void UpdateMinimumRuntimeVersionForModel(uint8_t* model_buffer_pointer) {
auto model = GetMutableModel(model_buffer_pointer);
std::string model_min_version;
auto subgraphs = model->subgraphs();
for (int i = 0; i < subgraphs->Length(); ++i) {
const SubGraph* subgraph = subgraphs->Get(i);
for (int j = 0; j < subgraph->operators()->Length(); ++j) {
const Operator* op = subgraph->operators()->Get(j);
const OperatorCode* op_code =
model->operator_codes()->Get(op->opcode_index());
std::string runtime_version = FindMinimumRuntimeVersionForOp(
GetBuiltinCode(op_code), op_code->version());
if (runtime_version.empty()) {
continue;
}
if (CompareRuntimeVersion(model_min_version, runtime_version)) {
model_min_version = runtime_version;
}
}
}
if (model_min_version.size() >= 16) {
TFLITE_LOG(TFLITE_LOG_WARNING,
"Skip writing minimum runtime version string since it's "
"longer than 16 bytes.");
return;
}
for (int i = 0; i < model->metadata()->size(); ++i) {
if (model->metadata()->Get(i)->name()->str() == "min_runtime_version") {
auto buffer = model->metadata()->Get(i)->buffer();
auto buffer_data =
model->mutable_buffers()->GetMutableObject(buffer)->mutable_data();
memset(buffer_data->data(), 0, buffer_data->size());
memcpy(buffer_data->data(), model_min_version.data(),
model_min_version.size());
break;
}
}
}
} | #include "tensorflow/lite/tools/versioning/runtime_version.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
TEST(OpVersionTest, CompareRuntimeVersion) {
EXPECT_TRUE(CompareRuntimeVersion("1.9", "1.13"));
EXPECT_FALSE(CompareRuntimeVersion("1.13", "1.13"));
EXPECT_TRUE(CompareRuntimeVersion("1.14", "1.14.1"));
EXPECT_FALSE(CompareRuntimeVersion("1.14.1", "1.14"));
EXPECT_FALSE(CompareRuntimeVersion("1.14.1", "1.9"));
EXPECT_FALSE(CompareRuntimeVersion("1.0.9", "1.0.8"));
EXPECT_FALSE(CompareRuntimeVersion("2.1.0", "1.2.0"));
EXPECT_TRUE(CompareRuntimeVersion("", "1.13"));
EXPECT_FALSE(CompareRuntimeVersion("", ""));
}
TEST(OpVersionTest, OpversionMissing) {
tflite::ops::builtin::BuiltinOpResolver resolver;
for (int id = BuiltinOperator_MIN; id <= BuiltinOperator_MAX; ++id) {
for (int version = 1;; ++version) {
auto op_code = static_cast<tflite::BuiltinOperator>(id);
if (resolver.FindOp(op_code, version) == nullptr) break;
std::string runtime_version =
FindMinimumRuntimeVersionForOp(op_code, version);
EXPECT_NE(runtime_version, "")
<< "Please add the version " << version << " of "
<< tflite::EnumNamesBuiltinOperator()[op_code]
<< " to runtime_version.cc";
}
}
}
} |
839 | cpp | tensorflow/tensorflow | signature_def_util | tensorflow/lite/tools/signature/signature_def_util.cc | tensorflow/lite/tools/signature/signature_def_util_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_SIGNATURE_SIGNATURE_DEF_UTIL_H_
#define TENSORFLOW_LITE_TOOLS_SIGNATURE_SIGNATURE_DEF_UTIL_H_
#include <string>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
constexpr char kSignatureDefsMetadataName[] = "signature_defs_metadata";
tensorflow::Status SetSignatureDefMap(
const Model* model,
const std::map<std::string, tensorflow::SignatureDef>& signature_def_map,
std::string* model_data_with_signature_defs);
bool HasSignatureDef(const Model* model, const std::string& signature_key);
tensorflow::Status GetSignatureDefMap(
const Model* model,
std::map<std::string, tensorflow::SignatureDef>* signature_def_map);
tensorflow::Status ClearSignatureDefMap(const Model* model,
std::string* model_data);
}
#endif
#include "tensorflow/lite/tools/signature/signature_def_util.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using tensorflow::Status;
using SerializedSignatureDefMap = std::map<std::string, std::string>;
using SignatureDefMap = std::map<std::string, tensorflow::SignatureDef>;
const Metadata* GetSignatureDefMetadata(const Model* model) {
if (!model || !model->metadata()) {
return nullptr;
}
for (int i = 0; i < model->metadata()->size(); ++i) {
const Metadata* metadata = model->metadata()->Get(i);
if (metadata->name()->str() == kSignatureDefsMetadataName) {
return metadata;
}
}
return nullptr;
}
Status ReadSignatureDefMap(const Model* model, const Metadata* metadata,
SerializedSignatureDefMap* map) {
if (!model || !metadata || !map) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
const flatbuffers::Vector<uint8_t>* flatbuffer_data =
model->buffers()->Get(metadata->buffer())->data();
const auto signature_defs =
flexbuffers::GetRoot(flatbuffer_data->data(), flatbuffer_data->size())
.AsMap();
for (int i = 0; i < signature_defs.Keys().size(); ++i) {
const std::string key = signature_defs.Keys()[i].AsString().c_str();
(*map)[key] = signature_defs[key].AsString().c_str();
}
return absl::OkStatus();
}
}
Status SetSignatureDefMap(const Model* model,
const SignatureDefMap& signature_def_map,
std::string* model_data_with_signature_def) {
if (!model || !model_data_with_signature_def) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
if (signature_def_map.empty()) {
return tensorflow::errors::InvalidArgument(
"signature_def_map should not be empty");
}
flexbuffers::Builder fbb;
const size_t start_map = fbb.StartMap();
auto mutable_model = std::make_unique<ModelT>();
model->UnPackTo(mutable_model.get(), nullptr);
int buffer_id = mutable_model->buffers.size();
const Metadata* metadata = GetSignatureDefMetadata(model);
if (metadata) {
buffer_id = metadata->buffer();
} else {
auto buffer = std::make_unique<BufferT>();
mutable_model->buffers.emplace_back(std::move(buffer));
auto sigdef_metadata = std::make_unique<MetadataT>();
sigdef_metadata->buffer = buffer_id;
sigdef_metadata->name = kSignatureDefsMetadataName;
mutable_model->metadata.emplace_back(std::move(sigdef_metadata));
}
for (const auto& entry : signature_def_map) {
fbb.String(entry.first.c_str(), entry.second.SerializeAsString());
}
fbb.EndMap(start_map);
fbb.Finish();
mutable_model->buffers[buffer_id]->data = fbb.GetBuffer();
flatbuffers::FlatBufferBuilder builder;
auto packed_model = Model::Pack(builder, mutable_model.get());
FinishModelBuffer(builder, packed_model);
*model_data_with_signature_def =
std::string(reinterpret_cast<const char*>(builder.GetBufferPointer()),
builder.GetSize());
return absl::OkStatus();
}
bool HasSignatureDef(const Model* model, const std::string& signature_key) {
if (!model) {
return false;
}
const Metadata* metadata = GetSignatureDefMetadata(model);
if (!metadata) {
return false;
}
SerializedSignatureDefMap signature_defs;
if (ReadSignatureDefMap(model, metadata, &signature_defs) !=
absl::OkStatus()) {
return false;
}
return (signature_defs.find(signature_key) != signature_defs.end());
}
Status GetSignatureDefMap(const Model* model,
SignatureDefMap* signature_def_map) {
if (!model || !signature_def_map) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
SignatureDefMap retrieved_signature_def_map;
const Metadata* metadata = GetSignatureDefMetadata(model);
if (metadata) {
SerializedSignatureDefMap signature_defs;
auto status = ReadSignatureDefMap(model, metadata, &signature_defs);
if (status != absl::OkStatus()) {
return tensorflow::errors::Internal("Error reading signature def map: ",
status.message());
}
for (const auto& entry : signature_defs) {
tensorflow::SignatureDef signature_def;
if (!signature_def.ParseFromString(entry.second)) {
return tensorflow::errors::Internal(
"Cannot parse signature def found in flatbuffer.");
}
retrieved_signature_def_map[entry.first] = signature_def;
}
*signature_def_map = retrieved_signature_def_map;
}
return absl::OkStatus();
}
Status ClearSignatureDefMap(const Model* model, std::string* model_data) {
if (!model || !model_data) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
auto mutable_model = std::make_unique<ModelT>();
model->UnPackTo(mutable_model.get(), nullptr);
for (int id = 0; id < model->metadata()->size(); ++id) {
const Metadata* metadata = model->metadata()->Get(id);
if (metadata->name()->str() == kSignatureDefsMetadataName) {
auto* buffers = &(mutable_model->buffers);
buffers->erase(buffers->begin() + metadata->buffer());
mutable_model->metadata.erase(mutable_model->metadata.begin() + id);
break;
}
}
flatbuffers::FlatBufferBuilder builder;
auto packed_model = Model::Pack(builder, mutable_model.get());
FinishModelBuffer(builder, packed_model);
*model_data =
std::string(reinterpret_cast<const char*>(builder.GetBufferPointer()),
builder.GetSize());
return absl::OkStatus();
}
} | #include "tensorflow/lite/tools/signature/signature_def_util.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/cc/saved_model/signature_constants.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
using tensorflow::kClassifyMethodName;
using tensorflow::kDefaultServingSignatureDefKey;
using tensorflow::kPredictMethodName;
using tensorflow::SignatureDef;
using tensorflow::Status;
constexpr char kSignatureInput[] = "input";
constexpr char kSignatureOutput[] = "output";
constexpr char kTestFilePath[] = "tensorflow/lite/testdata/add.bin";
class SimpleSignatureDefUtilTest : public testing::Test {
protected:
void SetUp() override {
flatbuffer_model_ = FlatBufferModel::BuildFromFile(kTestFilePath);
ASSERT_NE(flatbuffer_model_, nullptr);
model_ = flatbuffer_model_->GetModel();
ASSERT_NE(model_, nullptr);
}
SignatureDef GetTestSignatureDef() {
auto signature_def = SignatureDef();
tensorflow::TensorInfo input_tensor;
tensorflow::TensorInfo output_tensor;
*input_tensor.mutable_name() = kSignatureInput;
*output_tensor.mutable_name() = kSignatureOutput;
*signature_def.mutable_method_name() = kClassifyMethodName;
(*signature_def.mutable_inputs())[kSignatureInput] = input_tensor;
(*signature_def.mutable_outputs())[kSignatureOutput] = output_tensor;
return signature_def;
}
std::unique_ptr<FlatBufferModel> flatbuffer_model_;
const Model* model_;
};
TEST_F(SimpleSignatureDefUtilTest, SetSignatureDefTest) {
SignatureDef expected_signature_def = GetTestSignatureDef();
std::string model_output;
const std::map<string, SignatureDef> expected_signature_def_map = {
{kDefaultServingSignatureDefKey, expected_signature_def}};
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(model_, expected_signature_def_map, &model_output));
const Model* add_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_TRUE(HasSignatureDef(add_model, kDefaultServingSignatureDefKey));
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(add_model, &test_signature_def_map));
SignatureDef test_signature_def =
test_signature_def_map[kDefaultServingSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
}
TEST_F(SimpleSignatureDefUtilTest, OverwriteSignatureDefTest) {
auto expected_signature_def = GetTestSignatureDef();
std::string model_output;
std::map<string, SignatureDef> expected_signature_def_map = {
{kDefaultServingSignatureDefKey, expected_signature_def}};
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(model_, expected_signature_def_map, &model_output));
const Model* add_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_TRUE(HasSignatureDef(add_model, kDefaultServingSignatureDefKey));
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(add_model, &test_signature_def_map));
SignatureDef test_signature_def =
test_signature_def_map[kDefaultServingSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
*expected_signature_def.mutable_method_name() = kPredictMethodName;
expected_signature_def_map.erase(
expected_signature_def_map.find(kDefaultServingSignatureDefKey));
constexpr char kTestSignatureDefKey[] = "ServingTest";
expected_signature_def_map[kTestSignatureDefKey] = expected_signature_def;
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(add_model, expected_signature_def_map, &model_output));
const Model* final_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_FALSE(HasSignatureDef(final_model, kDefaultServingSignatureDefKey));
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(final_model, &test_signature_def_map));
EXPECT_NE(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
EXPECT_TRUE(HasSignatureDef(final_model, kTestSignatureDefKey));
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(final_model, &test_signature_def_map));
test_signature_def = test_signature_def_map[kTestSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
}
TEST_F(SimpleSignatureDefUtilTest, GetSignatureDefTest) {
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(model_, &test_signature_def_map));
EXPECT_FALSE(HasSignatureDef(model_, kDefaultServingSignatureDefKey));
}
TEST_F(SimpleSignatureDefUtilTest, ClearSignatureDefTest) {
const int expected_num_buffers = model_->buffers()->size();
auto expected_signature_def = GetTestSignatureDef();
std::string model_output;
std::map<string, SignatureDef> expected_signature_def_map = {
{kDefaultServingSignatureDefKey, expected_signature_def}};
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(model_, expected_signature_def_map, &model_output));
const Model* add_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_TRUE(HasSignatureDef(add_model, kDefaultServingSignatureDefKey));
SignatureDef test_signature_def;
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(add_model, &test_signature_def_map));
test_signature_def = test_signature_def_map[kDefaultServingSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
EXPECT_EQ(absl::OkStatus(), ClearSignatureDefMap(add_model, &model_output));
const Model* clear_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_FALSE(HasSignatureDef(clear_model, kDefaultServingSignatureDefKey));
EXPECT_EQ(expected_num_buffers, clear_model->buffers()->size());
}
TEST_F(SimpleSignatureDefUtilTest, SetSignatureDefErrorsTest) {
std::map<string, SignatureDef> test_signature_def_map;
std::string model_output;
EXPECT_TRUE(tensorflow::errors::IsInvalidArgument(
SetSignatureDefMap(model_, test_signature_def_map, &model_output)));
SignatureDef test_signature_def;
test_signature_def_map[kDefaultServingSignatureDefKey] = test_signature_def;
EXPECT_TRUE(tensorflow::errors::IsInvalidArgument(
SetSignatureDefMap(model_, test_signature_def_map, nullptr)));
}
}
} |
840 | cpp | tensorflow/tensorflow | benchmark_model | tensorflow/lite/tools/benchmark/benchmark_model.cc | tensorflow/tools/benchmark/benchmark_model_test.cc | #ifndef TENSORFLOW_TOOLS_BENCHMARK_BENCHMARK_MODEL_H_
#define TENSORFLOW_TOOLS_BENCHMARK_BENCHMARK_MODEL_H_
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/stat_summarizer.h"
namespace tensorflow {
namespace benchmark_model {
struct InputLayerInfo {
string name;
DataType data_type;
TensorShape shape;
std::vector<float> initialization_values;
};
Status InitializeSession(int num_threads, const string& graph,
std::unique_ptr<Session>* session,
std::unique_ptr<GraphDef>* graph_def);
Status RunBenchmark(const std::vector<InputLayerInfo>& inputs,
const std::vector<string>& outputs,
const std::vector<string>& targets, Session* session,
StatSummarizer* stats, int64_t* inference_time_us);
Status TimeMultipleRuns(double sleep_seconds, int num_runs, double max_time_s,
const std::vector<InputLayerInfo>& inputs,
const std::vector<string>& outputs,
const std::vector<string>& targets, Session* session,
StatSummarizer* stats, int64_t* total_time_us,
int64_t* actual_num_runs);
int Main(int argc, char** argv);
}
}
#endif
#include "tensorflow/tools/benchmark/benchmark_model.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/numbers.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/core/util/reporter.h"
#include "tensorflow/core/util/stat_summarizer.h"
#include "tensorflow/core/util/stat_summarizer_options.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace benchmark_model {
namespace {
Status InitializeVariables(Session* session,
const std::vector<string>& init_ops) {
LOG(INFO) << "Initializing graph variables";
for (const string& init_op : init_ops) {
TF_RETURN_IF_ERROR(session->Run({}, {}, {init_op}, nullptr));
}
return absl::OkStatus();
}
template <class T>
void InitializeTensor(const std::vector<float>& initialization_values,
Tensor* input_tensor) {
auto type_tensor = input_tensor->flat<T>();
type_tensor = type_tensor.constant(0);
if (!initialization_values.empty()) {
for (int i = 0; i < initialization_values.size(); ++i) {
type_tensor(i) = static_cast<T>(initialization_values[i]);
}
}
}
void CreateTensorsFromInputInfo(
const std::vector<InputLayerInfo>& inputs,
std::vector<std::pair<string, tensorflow::Tensor> >* input_tensors) {
for (const InputLayerInfo& input : inputs) {
Tensor input_tensor(input.data_type, input.shape);
switch (input.data_type) {
case DT_INT32: {
InitializeTensor<int32>(input.initialization_values, &input_tensor);
break;
}
case DT_INT64: {
InitializeTensor<int64>(input.initialization_values, &input_tensor);
break;
}
case DT_FLOAT: {
InitializeTensor<float>(input.initialization_values, &input_tensor);
break;
}
case DT_QUINT8: {
InitializeTensor<quint8>(input.initialization_values, &input_tensor);
break;
}
case DT_UINT8: {
InitializeTensor<uint8>(input.initialization_values, &input_tensor);
break;
}
case DT_BOOL: {
InitializeTensor<bool>(input.initialization_values, &input_tensor);
break;
}
case DT_STRING: {
if (!input.initialization_values.empty()) {
LOG(FATAL) << "Initialization values are not supported for strings";
}
auto type_tensor = input_tensor.flat<tstring>();
type_tensor = type_tensor.constant("");
break;
}
default:
LOG(FATAL) << "Unsupported input type: "
<< DataTypeString(input.data_type);
}
input_tensors->push_back({input.name, input_tensor});
}
}
Status GetOutputShapes(const std::vector<InputLayerInfo>& inputs,
const std::set<string>& wanted_shapes, Session* session,
std::unordered_map<string, TensorShape>* node_shapes) {
std::vector<std::pair<string, tensorflow::Tensor> > input_tensors;
CreateTensorsFromInputInfo(inputs, &input_tensors);
std::vector<tensorflow::Tensor> output_tensors;
std::vector<string> output_tensor_names;
for (const string& wanted_shape : wanted_shapes) {
bool is_input = false;
for (const std::pair<string, tensorflow::Tensor>& input_tensor :
input_tensors) {
if (input_tensor.first == wanted_shape) {
(*node_shapes)[wanted_shape] = input_tensor.second.shape();
is_input = true;
break;
}
}
if (!is_input) {
output_tensor_names.push_back(wanted_shape);
}
}
TF_RETURN_IF_ERROR(
session->Run(input_tensors, output_tensor_names, {}, &output_tensors));
CHECK_EQ(output_tensors.size(), output_tensor_names.size());
for (int i = 0; i < output_tensor_names.size(); ++i) {
const string& wanted_shape_name = output_tensor_names[i];
const TensorShape& found_shape = output_tensors[i].shape();
(*node_shapes)[wanted_shape_name] = found_shape;
}
return absl::OkStatus();
}
Status CalculateFlops(const GraphDef& graph,
const std::vector<InputLayerInfo>& inputs,
Session* session, int64_t* total_flops,
std::unordered_map<string, int64_t>* flops_by_op) {
std::unordered_set<string> floppable_ops = {
"Conv2D", "MatMul", "QuantizedConv2D", "QuantizedMatMul",
"DepthwiseConv2dNative"};
std::set<string> wanted_shapes;
for (const NodeDef& node : graph.node()) {
if (floppable_ops.count(node.op())) {
for (const string& input : node.input()) {
wanted_shapes.insert(input);
}
wanted_shapes.insert(node.name());
}
}
std::unordered_map<string, TensorShape> found_shapes;
TF_RETURN_IF_ERROR(
GetOutputShapes(inputs, wanted_shapes, session, &found_shapes));
*total_flops = 0;
for (const NodeDef& node : graph.node()) {
if (floppable_ops.count(node.op())) {
int64_t current_flops = 0;
if ((node.op() == "Conv2D") || (node.op() == "QuantizedConv2D")) {
const TensorShape& filter_shape = found_shapes[node.input(1)];
const TensorShape& output_shape = found_shapes[node.name()];
int64_t filter_height = filter_shape.dim_size(0);
int64_t filter_width = filter_shape.dim_size(1);
int64_t filter_in_depth = filter_shape.dim_size(2);
int64_t output_count = output_shape.num_elements();
current_flops =
output_count * filter_in_depth * filter_height * filter_width * 2;
} else if ((node.op() == "MatMul") || (node.op() == "QuantizedMatMul")) {
const bool transpose_a = node.attr().at("transpose_a").b();
const TensorShape& a_shape = found_shapes[node.input(0)];
const TensorShape& output_shape = found_shapes[node.name()];
int64_t k;
if (transpose_a) {
k = a_shape.dim_size(0);
} else {
k = a_shape.dim_size(1);
}
int64_t output_count = output_shape.num_elements();
current_flops = k * output_count * 2;
} else if (node.op() == "DepthwiseConv2dNative") {
const TensorShape& filter_shape = found_shapes[node.input(1)];
const TensorShape& output_shape = found_shapes[node.name()];
int64_t filter_height = filter_shape.dim_size(0);
int64_t filter_width = filter_shape.dim_size(1);
int64_t output_count = output_shape.num_elements();
current_flops = output_count * filter_height * filter_width * 2;
}
(*flops_by_op)[node.op()] += current_flops;
*total_flops += current_flops;
}
}
return absl::OkStatus();
}
void RecordBenchmarkEntry(const string& output_prefix,
const string& benchmark_name, const string& postfix,
int num_runs, double total_time_s,
double throughput = -1.0) {
std::stringstream stream;
stream << benchmark_name;
if (!postfix.empty()) {
stream << "_" << postfix;
}
TestReporter node_reporter(output_prefix, stream.str());
TF_QCHECK_OK(node_reporter.Initialize());
TF_QCHECK_OK(
node_reporter.Benchmark(num_runs, -1.0, total_time_s, throughput));
TF_QCHECK_OK(node_reporter.Close());
}
void SleepSeconds(double sleep_seconds) {
if (sleep_seconds <= 0.0) {
return;
}
#ifdef PLATFORM_WINDOWS
Env::Default()->SleepForMicroseconds(sleep_seconds * 1000 * 1000);
#else
timespec req;
req.tv_sec = static_cast<time_t>(sleep_seconds);
req.tv_nsec = (sleep_seconds - req.tv_sec) * 1000000000;
nanosleep(&req, nullptr);
#endif
}
}
Status InitializeSession(int num_threads, const string& graph,
std::unique_ptr<Session>* session,
std::unique_ptr<GraphDef>* graph_def) {
LOG(INFO) << "Loading TensorFlow.";
tensorflow::SessionOptions options;
tensorflow::ConfigProto& config = options.config;
if (num_threads > 0) {
config.set_intra_op_parallelism_threads(num_threads);
config.set_inter_op_parallelism_threads(num_threads);
}
LOG(INFO) << "Got config, " << config.device_count_size() << " devices";
session->reset(tensorflow::NewSession(options));
graph_def->reset(new GraphDef());
tensorflow::GraphDef tensorflow_graph;
Status s = ReadBinaryProto(Env::Default(), graph, graph_def->get());
if (!s.ok()) {
s = ReadTextProto(Env::Default(), graph, graph_def->get());
}
if (!s.ok()) {
LOG(ERROR) << "Could not create TensorFlow Graph: " << s;
return s;
}
s = (*session)->Create(*(graph_def->get()));
if (!s.ok()) {
LOG(ERROR) << "Could not create TensorFlow Session: " << s;
return s;
}
return absl::OkStatus();
}
Status RunBenchmark(const std::vector<InputLayerInfo>& inputs,
const std::vector<string>& outputs,
const std::vector<string>& targets, Session* session,
StatSummarizer* stats, int64_t* inference_time_us) {
std::vector<std::pair<string, tensorflow::Tensor> > input_tensors;
CreateTensorsFromInputInfo(inputs, &input_tensors);
std::vector<tensorflow::Tensor> output_tensors;
tensorflow::Status s;
RunOptions run_options;
if (stats != nullptr) {
run_options.set_trace_level(RunOptions::FULL_TRACE);
}
RunMetadata run_metadata;
const int64_t start_time = Env::Default()->NowMicros();
s = session->Run(run_options, input_tensors, outputs, targets,
&output_tensors, &run_metadata);
const int64_t end_time = Env::Default()->NowMicros();
*inference_time_us = end_time - start_time;
if (!s.ok()) {
LOG(ERROR) << "Error during inference: " << s;
return s;
}
if (stats != nullptr) {
assert(run_metadata.has_step_stats());
const StepStats& step_stats = run_metadata.step_stats();
stats->ProcessStepStats(step_stats);
}
return s;
}
Status TimeMultipleRuns(double sleep_seconds, int num_runs, double max_time_s,
const std::vector<InputLayerInfo>& inputs,
const std::vector<string>& outputs,
const std::vector<string>& targets, Session* session,
StatSummarizer* stats, int64_t* total_time_us,
int64_t* actual_num_runs) {
*total_time_us = 0;
LOG(INFO) << "Running benchmark for max " << num_runs << " iterations, max "
<< max_time_s << " seconds "
<< (stats != nullptr ? "with" : "without")
<< " detailed stat logging, with " << sleep_seconds
<< "s sleep between inferences";
Stat<int64_t> stat;
const bool until_max_time = num_runs <= 0;
for (int i = 0; until_max_time || i < num_runs; ++i) {
int64_t time;
Status run_status =
RunBenchmark(inputs, outputs, targets, session, stats, &time);
stat.UpdateStat(time);
(*total_time_us) += time;
++(*actual_num_runs);
if (max_time_s > 0.0 && (*total_time_us / 1000000.0) > max_time_s) {
break;
}
if (!run_status.ok()) {
LOG(INFO) << "Failed on run " << i;
return run_status;
}
if (sleep_seconds > 0.0) {
SleepSeconds(sleep_seconds);
}
}
std::stringstream stream;
stat.OutputToStream(&stream);
LOG(INFO) << stream.str() << std::endl;
return absl::OkStatus();
}
int Main(int argc, char** argv) {
string graph = "/data/local/tmp/tensorflow_inception_graph.pb";
string init_ops_string = "";
string input_layer_string = "input:0";
string input_layer_shape_string = "1,224,224,3";
string input_layer_type_string = "float";
string input_layer_values_string = "";
string output_layer_string = "output:0";
string target_layer_string = "";
int max_num_runs = 1000;
string max_time = "10.0";
string inference_delay = "-1.0";
string inter_benchmark_delay = "-1.0";
int num_threads = -1;
string benchmark_name = "";
string output_prefix = "";
bool show_sizes = false;
bool show_run_order = true;
int run_order_limit = 0;
bool show_time = true;
int time_limit = 10;
bool show_memory = true;
int memory_limit = 10;
bool show_type = true;
bool show_summary = true;
bool show_flops = false;
int warmup_runs = 1;
std::vector<Flag> flag_list = {
Flag("graph", &graph, "graph file name"),
Flag("init_ops", &init_ops_string, "init ops"),
Flag("input_layer", &input_layer_string, "input layer names"),
Flag("input_layer_shape", &input_layer_shape_string, "input layer shape"),
Flag("input_layer_type", &input_layer_type_string, "input layer type"),
Flag("input_layer_values", &input_layer_values_string,
"values to initialize the inputs with"),
Flag("output_layer", &output_layer_string, "output layer name"),
Flag("target_layer", &target_layer_string, "target layer name"),
Flag("max_num_runs", &max_num_runs, "number of runs max"),
Flag("max_time", &max_time, "length to run max"),
Flag("inference_delay", &inference_delay,
"delay between runs in seconds"),
Flag("inter_benchmark_delay", &inter_benchmark_delay,
"delay between benchmarks in seconds"),
Flag("num_threads", &num_threads, "number of threads"),
Flag("benchmark_name", &benchmark_name, "benchmark name"),
Flag("output_prefix", &output_prefix, "benchmark output prefix"),
Flag("show_sizes", &show_sizes, "whether to show sizes"),
Flag("show_run_order", &show_run_order,
"whether to list stats by run order"),
Flag("run_order_limit", &run_order_limit,
"how many items to show by run order"),
Flag("show_time", &show_time, "whether to list stats by time taken"),
Flag("time_limit", &time_limit, "how many items to show by time taken"),
Flag("show_memory", &show_memory, "whether to list stats by memory used"),
Flag("memory_limit", &memory_limit,
"how many items to show by memory used"),
Flag("show_type", &show_type, "whether to list stats by op type"),
Flag("show_summary", &show_summary,
"whether to show a summary of the stats"),
Flag("show_flops", &show_flops, "whether to estimate the model's FLOPs"),
Flag("warmup_runs", &warmup_runs, "how many runs to initialize model"),
};
string usage = Flags::Usage(argv[0], flag_list);
const bool parse_result = Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
LOG(ERROR) << usage;
return -1;
}
std::vector<string> init_ops = str_util::Split(init_ops_string, ',');
std::vector<string> input_layers = str_util::Split(input_layer_string, ',');
std::vector<string> input_layer_shapes =
str_util::Split(input_layer_shape_string, ':');
std::vector<string> input_layer_types =
str_util::Split(input_layer_type_string, ',');
std::vector<string> input_layer_values =
str_util::Split(input_layer_values_string, ':');
std::vector<string> output_layers = str_util::Split(output_layer_string, ',');
std::vector<string> target_layers = str_util::Split(target_layer_string, ',');
if ((input_layers.size() != input_layer_shapes.size()) ||
(input_layers.size() != input_layer_types.size())) {
LOG(ERROR) << "There must be the same number of items in --input_layer,"
<< " --input_layer_shape, and --input_layer_type, for example"
<< " --input_layer=input1,input2 --input_layer_type=float,float "
<< " --input_layer_shape=1,224,224,4:1,20";
LOG(ERROR) << "--input_layer=" << input_layer_string << " ("
<< input_layers.size() << " items)";
LOG(ERROR) << "--input_layer_type=" << input_layer_type_string << " ("
<< input_layer_types.size() << " items)";
LOG(ERROR) << "--input_layer_shape=" << input_layer_shape_string << " ("
<< input_layer_shapes.size() << " items)";
return -1;
}
const size_t inputs_count = input_layers.size();
::tensorflow::port::InitMain(argv[0], &argc, &argv);
if (argc > 1) {
LOG(ERROR) << "Unknown argument " << argv[1] << "\n" << usage;
return -1;
}
LOG(INFO) << "Graph: [" << graph << "]";
LOG(INFO) << "Init ops:" << init_ops_string;
LOG(INFO) << "Input layers: [" << input_layer_string << "]";
LOG(INFO) << "Input shapes: [" << input_layer_shape_string << "]";
LOG(INFO) << "Input types: [" << input_layer_type_string << "]";
LOG(INFO) << "Output layers: [" << output_layer_string << "]";
LOG(INFO) << "Target layers: [" << target_layer_string << "]";
LOG(INFO) << "Num runs: [" << max_num_runs << "]";
LOG(INFO) << "Inter-inference delay (seconds): [" << inference_delay << "]";
LOG(INFO) << "Inter-benchmark delay (seconds): [" << inter_benchmark_delay
<< "]";
LOG(INFO) << "Num threads: [" << num_threads << "]";
LOG(INFO) << "Benchmark name: [" << benchmark_name << "]";
LOG(INFO) << "Output prefix: [" << output_prefix << "]";
LOG(INFO) << "Show sizes: [" << show_sizes << "]";
LOG(INFO) << "Warmup runs: [" << warmup_runs << "]";
std::unique_ptr<Session> session;
std::unique_ptr<StatSummarizer> stats;
std::unique_ptr<GraphDef> graph_def;
int64_t initialization_start_us = Env::Default()->NowMicros();
Status initialize_status =
InitializeSession(num_threads, graph, &session, &graph_def);
int64_t initialization_end_us = Env::Default()->NowMicros();
double initialization_time_s =
(initialization_end_us - initialization_start_us) / 1000000.0;
LOG(INFO) << "Initialized session in " << initialization_time_s << "s";
if (!initialize_status.ok()) {
return -1;
}
if (!init_ops.empty()) {
Status initialize_variables_status =
InitializeVariables(session.get(), init_ops);
if (!initialize_variables_status.ok()) {
LOG(ERROR) << "Graph variables initialization failed with "
<< initialize_variables_status;
return -1;
}
}
StatSummarizerOptions stats_options;
stats_options.show_run_order = show_run_order;
stats_options.run_order_limit = run_order_limit;
stats_options.show_time = show_time;
stats_options.time_limit = time_limit;
stats_options.show_memory = show_memory;
stats_options.memory_limit = memory_limit;
stats_options.show_type = show_type;
stats_options.show_summary = show_summary;
stats.reset(new tensorflow::StatSummarizer(stats_options));
const double inter_inference_sleep_seconds =
std::strtod(inference_delay.c_str(), nullptr);
const double inter_benchmark_sleep_seconds =
std::strtod(inter_benchmark_delay.c_str(), nullptr);
const double max_benchmark_time_seconds =
std::strtod(max_time.c_str(), nullptr);
std::vector<InputLayerInfo> inputs;
for (int n = 0; n < inputs_count; ++n) {
InputLayerInfo input;
CHECK(DataTypeFromString(input_layer_types[n], &input.data_type))
<< input_layer_types[n] << " was an invalid type";
std::vector<string> split_layer_shapes =
str_util::Split(input_layer_shapes[n], ',');
for (const string& layer_shape : split_layer_shapes) {
int32_t tmp;
CHECK(strings::safe_strto32(layer_shape, &tmp))
<< "Incorrect size string specified: " << input_layer_shapes[n];
if (tmp == -1) {
LOG(ERROR) << "Any unknown sizes in the shapes (-1's) must be replaced"
<< " with the size you want to benchmark with.";
return -1;
} else {
input.shape.AddDim(tmp);
}
}
input.name = input_layers[n];
if (n < input_layer_values.size()) {
std::vector<string> string_tokens =
str_util::Split(input_layer_values[n], ',');
input.initialization_values.clear();
input.initialization_values.reserve(string_tokens.size());
for (const string& str_val : string_tokens) {
float val;
CHECK(strings::safe_strtof(str_val, &val))
<< "Incorrect initialization values string specified: "
<< input_layer_values[n];
input.initialization_values.push_back(val);
}
}
inputs.push_back(input);
}
int64_t warmup_time_us = 0;
int64_t num_warmup_runs = 0;
if (warmup_runs > 0) {
Status warmup_time_status =
TimeMultipleRuns(inter_inference_sleep_seconds, warmup_runs, -1.0,
inputs, output_layers, target_layers, session.get(),
nullptr, &warmup_time_us, &num_warmup_runs);
if (!warmup_time_status.ok()) {
LOG(ERROR) << "Timing failed with " << warmup_time_status;
return -1;
}
}
SleepSeconds(inter_benchmark_sleep_seconds);
int64_t no_stat_time_us = 0;
int64_t no_stat_num_runs = 0;
Status no_stat_time_status = TimeMultipleRuns(
inter_inference_sleep_seconds, max_num_runs, max_benchmark_time_seconds,
inputs, output_layers, target_layers, session.get(), nullptr,
&no_stat_time_us, &no_stat_num_runs);
const double no_stat_wall_time = no_stat_time_us / 1000000.0;
if (!no_stat_time_status.ok()) {
LOG(ERROR) << "Timing failed with " << no_stat_time_status;
return -1;
}
SleepSeconds(inter_benchmark_sleep_seconds);
int64_t stat_time_us = 0;
int64_t stat_num_runs = 0;
Status stat_time_status = TimeMultipleRuns(
inter_inference_sleep_seconds, max_num_runs, max_benchmark_time_seconds,
inputs, output_layers, target_layers, session.get(), stats.get(),
&stat_time_us, &stat_num_runs);
if (!stat_time_status.ok()) {
LOG(ERROR) << "Timing failed with " << stat_time_status;
return -1;
}
LOG(INFO) << "Average inference timings in us: "
<< "Warmup: "
<< (warmup_runs > 0 ? warmup_time_us / warmup_runs : 0) << ", "
<< "no stats: " << no_stat_time_us / no_stat_num_runs << ", "
<< "with stats: " << stat_time_us / stat_num_runs;
stats->PrintStepStats();
if (show_sizes) {
stats->PrintOutputs();
}
if (show_flops) {
int64_t total_flops;
std::unordered_map<string, int64_t> flops_by_op;
Status flop_status = CalculateFlops(*graph_def, inputs, session.get(),
&total_flops, &flops_by_op);
if (!flop_status.ok()) {
LOG(ERROR) << "FLOPs calculation failed with " << flop_status;
return -1;
}
string pretty_flops;
if (total_flops < 1000) {
pretty_flops = strings::StrCat(total_flops, " FLOPs");
} else if (total_flops < (1000 * 1000)) {
const float rounded_flops = (total_flops / 1000.0f);
pretty_flops = strings::StrCat(rounded_flops, "k FLOPs");
} else if (total_flops < (1000 * 1000 * 1000)) {
const float rounded_flops = round(total_flops / 1000.0f) / 1000.0f;
pretty_flops = strings::StrCat(rounded_flops, " million FLOPs");
} else {
const float rounded_flops =
round(total_flops / (1000.0f * 1000.0f)) / 1000.0f;
pretty_flops = strings::StrCat(rounded_flops, " billion FLOPs");
}
LOG(INFO) << "FLOPs estimate: " << strings::HumanReadableNum(total_flops);
const double mean_run_time = no_stat_wall_time / no_stat_num_runs;
LOG(INFO) << "FLOPs/second: "
<< strings::HumanReadableNum(
static_cast<int64_t>(total_flops / mean_run_time));
}
if (!benchmark_name.empty() && !output_prefix.empty()) {
int64_t total_size = inputs[0].shape.num_elements();
const double throughput =
DataTypeSize(inputs[0].data_type) * total_size * no_stat_num_runs /
static_cast<double>(no_stat_wall_time) / (1024 * 1024);
RecordBenchmarkEntry(output_prefix, benchmark_name, "", no_stat_num_runs,
no_stat_wall_time, throughput);
RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-init", 1,
initialization_time_s);
RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-first-inference",
warmup_runs, warmup_time_us / 1000000.0);
RecordBenchmarkEntry(
output_prefix, benchmark_name, "meta-init-plus-first-inference", 1,
initialization_time_s + (warmup_time_us / 1000000.0) / warmup_runs);
std::map<std::string, int64_t> node_type_map_count;
std::map<std::string, int64_t> node_type_map_time;
std::map<std::string, int64_t> node_type_map_memory;
std::map<std::string, int64_t> node_type_map_times_called;
int64_t accumulated_us;
stats->ComputeStatsByType(&node_type_map_count, &node_type_map_time,
&node_type_map_memory,
&node_type_map_times_called, &accumulated_us);
for (const auto& time : node_type_map_time) {
LOG(INFO) << "Outputting: [" << time.first << "]";
RecordBenchmarkEntry(output_prefix, benchmark_name, time.first,
stat_num_runs,
(time.second * stat_num_runs) / 1000000.0f);
}
}
return 0;
}
}
} | #include "tensorflow/tools/benchmark/benchmark_model.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/stat_summarizer.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
void CreateTestGraph(const ::tensorflow::Scope& root,
benchmark_model::InputLayerInfo* input,
string* output_name, GraphDef* graph_def) {
const int input_width = 400;
const int input_height = 10;
input->shape = TensorShape({input_width, input_height});
input->data_type = DT_FLOAT;
const TensorShape constant_shape({input_height, input_width});
Tensor constant_tensor(DT_FLOAT, constant_shape);
test::FillFn<float>(&constant_tensor, [](int) -> float { return 3.0; });
auto placeholder =
ops::Placeholder(root, DT_FLOAT, ops::Placeholder::Shape(input->shape));
input->name = placeholder.node()->name();
auto m = ops::MatMul(root, placeholder, constant_tensor);
*output_name = m.node()->name();
TF_ASSERT_OK(root.ToGraphDef(graph_def));
}
TEST(BenchmarkModelTest, InitializeAndRun) {
const string dir = testing::TmpDir();
const string filename_pb = io::JoinPath(dir, "graphdef.pb");
auto root = Scope::NewRootScope().ExitOnError();
benchmark_model::InputLayerInfo input;
string output_name;
GraphDef graph_def;
CreateTestGraph(root, &input, &output_name, &graph_def);
string graph_def_serialized;
graph_def.SerializeToString(&graph_def_serialized);
TF_ASSERT_OK(
WriteStringToFile(Env::Default(), filename_pb, graph_def_serialized));
std::unique_ptr<Session> session;
std::unique_ptr<GraphDef> loaded_graph_def;
TF_ASSERT_OK(benchmark_model::InitializeSession(1, filename_pb, &session,
&loaded_graph_def));
std::unique_ptr<StatSummarizer> stats;
stats.reset(new tensorflow::StatSummarizer(*(loaded_graph_def.get())));
int64_t time;
int64_t num_runs = 0;
TF_ASSERT_OK(benchmark_model::TimeMultipleRuns(
0.0, 10, 0.0, {input}, {output_name}, {}, session.get(), stats.get(),
&time, &num_runs));
ASSERT_EQ(num_runs, 10);
}
TEST(BenchmarkModeTest, TextProto) {
const string dir = testing::TmpDir();
const string filename_txt = io::JoinPath(dir, "graphdef.pb.txt");
auto root = Scope::NewRootScope().ExitOnError();
benchmark_model::InputLayerInfo input;
string output_name;
GraphDef graph_def;
CreateTestGraph(root, &input, &output_name, &graph_def);
TF_ASSERT_OK(WriteTextProto(Env::Default(), filename_txt, graph_def));
std::unique_ptr<Session> session;
std::unique_ptr<GraphDef> loaded_graph_def;
TF_ASSERT_OK(benchmark_model::InitializeSession(1, filename_txt, &session,
&loaded_graph_def));
std::unique_ptr<StatSummarizer> stats;
stats.reset(new tensorflow::StatSummarizer(*(loaded_graph_def.get())));
int64_t time;
int64_t num_runs = 0;
TF_ASSERT_OK(benchmark_model::TimeMultipleRuns(
0.0, 10, 0.0, {input}, {output_name}, {}, session.get(), stats.get(),
&time, &num_runs));
ASSERT_EQ(num_runs, 10);
}
}
} |
841 | cpp | tensorflow/tensorflow | benchmark_tflite_model | tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc | tensorflow/lite/tools/benchmark/benchmark_tflite_model_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_BENCHMARK_BENCHMARK_TFLITE_MODEL_H_
#define TENSORFLOW_LITE_TOOLS_BENCHMARK_BENCHMARK_TFLITE_MODEL_H_
#include <algorithm>
#include <map>
#include <memory>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/profiling/profiler.h"
#include "tensorflow/lite/signature_runner.h"
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
#include "tensorflow/lite/tools/model_loader.h"
#include "tensorflow/lite/tools/utils.h"
namespace tflite {
namespace benchmark {
TfLiteStatus SplitInputLayerNameAndValueFile(
const std::string& name_and_value_file,
std::pair<std::string, std::string>& name_file_pair);
class BenchmarkInterpreterRunner {
public:
BenchmarkInterpreterRunner(tflite::Interpreter* const interpreter,
tflite::SignatureRunner* const signature_runner,
tflite::Subgraph* const subgraph)
: interpreter_(interpreter), subgraph_(subgraph) {
if (signature_runner != nullptr) {
signature_runner_.reset(signature_runner);
}
}
~BenchmarkInterpreterRunner() {
if (signature_runner_ != nullptr) {
signature_runner_.release();
}
}
static std::pair<TfLiteStatus, std::unique_ptr<BenchmarkInterpreterRunner>>
Create(tflite::Interpreter* interpreter, std::string signature_key);
TfLiteStatus AllocateTensors();
TfLiteStatus Invoke();
const std::vector<int>& execution_plan() const;
const std::vector<int>& inputs() const;
const std::vector<int>& outputs() const;
TfLiteTensor* tensor(int tensor_index);
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_registration(
int node_index) const;
TfLiteStatus ResizeInputTensor(int tensor_index,
const std::vector<int>& new_size);
private:
BenchmarkInterpreterRunner() = delete;
tflite::Interpreter* const interpreter_ = nullptr;
std::unique_ptr<tflite::SignatureRunner> signature_runner_;
tflite::Subgraph* const subgraph_ = nullptr;
};
class BenchmarkTfLiteModel : public BenchmarkModel {
public:
struct InputLayerInfo {
InputLayerInfo() : has_value_range(false), low(0), high(0) {}
std::string name;
std::vector<int> shape;
bool has_value_range;
int low;
int high;
std::string input_file_path;
};
explicit BenchmarkTfLiteModel(BenchmarkParams params = DefaultParams());
~BenchmarkTfLiteModel() override;
std::vector<Flag> GetFlags() override;
void LogParams() override;
TfLiteStatus ValidateParams() override;
uint64_t ComputeInputBytes() override;
TfLiteStatus Init() override;
TfLiteStatus RunImpl() override;
static BenchmarkParams DefaultParams();
protected:
TfLiteStatus PrepareInputData() override;
TfLiteStatus ResetInputsAndOutputs() override;
int64_t MayGetModelFileSize() override;
virtual TfLiteStatus LoadModel();
virtual std::unique_ptr<tflite::OpResolver> GetOpResolver() const;
virtual TfLiteStatus InitInterpreter();
virtual std::unique_ptr<BenchmarkListener> MayCreateProfilingListener() const;
void CleanUp();
utils::InputTensorData LoadInputTensorData(
const TfLiteTensor& t, const std::string& input_file_path);
std::vector<InputLayerInfo> inputs_;
std::vector<utils::InputTensorData> inputs_data_;
std::unique_ptr<tflite::FlatBufferModel> model_;
std::unique_ptr<tflite::Interpreter> interpreter_;
std::unique_ptr<BenchmarkInterpreterRunner> interpreter_runner_;
std::unique_ptr<tflite::ExternalCpuBackendContext> external_context_;
private:
utils::InputTensorData CreateRandomTensorData(
const TfLiteTensor& t, const InputLayerInfo* layer_info);
void AddOwnedListener(std::unique_ptr<BenchmarkListener> listener) {
if (listener == nullptr) return;
owned_listeners_.emplace_back(std::move(listener));
AddListener(owned_listeners_.back().get());
}
std::vector<std::unique_ptr<BenchmarkListener>> owned_listeners_;
std::mt19937 random_engine_;
std::vector<Interpreter::TfLiteDelegatePtr> owned_delegates_;
BenchmarkLoggingListener log_output_;
std::unique_ptr<tools::ModelLoader> model_loader_;
};
}
}
#endif
#include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <functional>
#include <iostream>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "ruy/profiler/profiler.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/core/signature_runner.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include "tensorflow/lite/tools/benchmark/profiling_listener.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/model_loader.h"
#include "tensorflow/lite/tools/utils.h"
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
void ABSL_ATTRIBUTE_WEAK
RegisterSelectedOps(::tflite::MutableOpResolver* resolver) {}
namespace tflite {
namespace benchmark {
namespace {
using utils::InputTensorData;
using utils::VoidUniquePtr;
#if defined(TFLITE_PROFILING_ENABLED)
constexpr bool kOpProfilingEnabledDefault = true;
#else
constexpr bool kOpProfilingEnabledDefault = false;
#endif
constexpr char kOpProfilingOutputModeStdout[] = "stdout";
constexpr char kOpProfilingOutputModeCsv[] = "csv";
constexpr char kOpProfilingOutputModeProto[] = "proto";
const char* kOpProfilingOutputModes[] = {kOpProfilingOutputModeStdout,
kOpProfilingOutputModeCsv,
kOpProfilingOutputModeProto};
class RuyProfileListener : public BenchmarkListener {
public:
void OnBenchmarkStart(const BenchmarkParams& params) override;
void OnBenchmarkEnd(const BenchmarkResults& results) override;
private:
std::unique_ptr<ruy::profiler::ScopeProfile> ruy_profile_;
};
void RuyProfileListener::OnBenchmarkStart(const BenchmarkParams& params) {
ruy_profile_ = std::make_unique<ruy::profiler::ScopeProfile>();
}
void RuyProfileListener::OnBenchmarkEnd(const BenchmarkResults& results) {
ruy_profile_ = nullptr;
}
class InterpreterStatePrinter : public BenchmarkListener {
public:
explicit InterpreterStatePrinter(Interpreter* interpreter)
: interpreter_(interpreter) {}
void OnBenchmarkStart(const BenchmarkParams& params) override {
params_ = ¶ms;
if (params_->Get<bool>("print_preinvoke_state")) {
TFLITE_LOG(INFO) << "\n====Printing out TfLite interpreter pre-invoke "
"state begins====";
tflite::PrintInterpreterState(
interpreter_, params_->Get<int32_t>("tensor_name_display_length"),
params_->Get<int32_t>("tensor_type_display_length"),
params_->Get<int32_t>("alloc_type_display_length"));
TFLITE_LOG(INFO) << "====Printing out TfLite interpreter pre-invoke "
"state ends====\n";
}
}
void OnBenchmarkEnd(const BenchmarkResults& results) override {
if (params_->Get<bool>("print_postinvoke_state")) {
TFLITE_LOG(INFO) << "\n====Printing out TfLite interpreter post-invoke "
"state begins====";
tflite::PrintInterpreterState(
interpreter_, params_->Get<int32_t>("tensor_name_display_length"),
params_->Get<int32_t>("tensor_type_display_length"),
params_->Get<int32_t>("alloc_type_display_length"));
TFLITE_LOG(INFO) << "====Printing out TfLite interpreter post-invoke "
"state ends====\n";
}
}
private:
Interpreter* const interpreter_ = nullptr;
const BenchmarkParams* params_ = nullptr;
};
class OutputSaver : public BenchmarkListener {
public:
explicit OutputSaver(BenchmarkInterpreterRunner* runner)
: interpreter_runner_(runner) {}
void OnBenchmarkStart(const BenchmarkParams& params) override {
params_ = ¶ms;
}
void OnBenchmarkEnd(const BenchmarkResults& results) override {
std::string path = params_->Get<std::string>("output_filepath");
if (path.empty()) return;
std::ofstream ofs(path, std::ofstream::out);
if (ofs.good()) {
for (int i = 0; i < interpreter_runner_->outputs().size(); i++) {
int tensor_index = interpreter_runner_->outputs()[i];
ofs.write(interpreter_runner_->tensor(tensor_index)->data.raw,
interpreter_runner_->tensor(tensor_index)->bytes);
}
ofs.close();
}
}
private:
BenchmarkInterpreterRunner* const interpreter_runner_;
const BenchmarkParams* params_ = nullptr;
};
std::vector<std::string> Split(const std::string& str, const char delim) {
if (str.empty()) {
return {};
}
return absl::StrSplit(str, delim);
}
int GetNumElements(const TfLiteIntArray* dim_array) {
int num_elements = 1;
for (size_t i = 0; i < dim_array->size; i++) {
num_elements *= dim_array->data[i];
}
return num_elements;
}
void FillRandomString(tflite::DynamicBuffer* buffer,
const TfLiteIntArray* dim_array,
const std::function<std::string()>& random_func) {
int num_elements = GetNumElements(dim_array);
for (int i = 0; i < num_elements; ++i) {
auto str = random_func();
buffer->AddString(str.data(), str.length());
}
}
int FindLayerInfoIndex(std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info,
const std::string& input_name,
const string& names_string) {
for (int i = 0; i < info->size(); ++i) {
if (info->at(i).name == input_name) {
return i;
}
}
TFLITE_LOG(FATAL) << "Cannot find the corresponding input_layer name("
<< input_name << ") in --input_layer as " << names_string;
return -1;
}
TfLiteStatus PopulateInputValueRanges(
const std::string& names_string, const std::string& value_ranges_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
std::vector<std::string> value_ranges = Split(value_ranges_string, ':');
for (const auto& val : value_ranges) {
std::vector<std::string> name_range = Split(val, ',');
if (name_range.size() != 3) {
TFLITE_LOG(ERROR) << "Wrong input value range item specified: " << val;
return kTfLiteError;
}
int layer_info_idx = FindLayerInfoIndex(info, name_range[0], names_string);
int low, high;
bool has_low = absl::SimpleAtoi(name_range[1], &low);
bool has_high = absl::SimpleAtoi(name_range[2], &high);
if (!has_low || !has_high || low > high) {
TFLITE_LOG(ERROR)
<< "Wrong low and high value of the input value range specified: "
<< val;
return kTfLiteError;
}
info->at(layer_info_idx).has_value_range = true;
info->at(layer_info_idx).low = low;
info->at(layer_info_idx).high = high;
}
return kTfLiteOk;
}
TfLiteStatus PopulateInputValueFiles(
const std::string& names_string, const std::string& value_files_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
std::vector<std::string> value_files = Split(value_files_string, ',');
for (const auto& val : value_files) {
std::pair<std::string, std::string> name_file_pair;
TfLiteStatus status = SplitInputLayerNameAndValueFile(val, name_file_pair);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Wrong input value file item specified: " << val;
TFLITE_LOG(ERROR) << status;
return status;
}
int layer_info_idx =
FindLayerInfoIndex(info, name_file_pair.first, names_string);
if (info->at(layer_info_idx).has_value_range) {
TFLITE_LOG(WARN)
<< "The input_name:" << info->at(layer_info_idx).name
<< " appears both in input_layer_value_files and "
"input_layer_value_range. The input_layer_value_range of the "
"input_name will be ignored.";
}
info->at(layer_info_idx).input_file_path = name_file_pair.second;
}
return kTfLiteOk;
}
TfLiteStatus PopulateInputLayerInfo(
const std::string& names_string, const std::string& shapes_string,
const std::string& value_ranges_string,
const std::string& value_files_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
info->clear();
std::vector<std::string> names = Split(names_string, ',');
std::vector<std::string> shapes = Split(shapes_string, ':');
if (names.size() != shapes.size()) {
TFLITE_LOG(ERROR)
<< "The number of items in --input_layer_shape (" << shapes_string
<< ", with " << shapes.size()
<< " items) must match the number of items in --input_layer ("
<< names_string << ", with " << names.size()
<< " items). For example --input_layer=input1,input2 "
"--input_layer_shape=1,224,224,4:1,20";
return kTfLiteError;
}
for (int i = 0; i < names.size(); ++i) {
info->push_back(BenchmarkTfLiteModel::InputLayerInfo());
BenchmarkTfLiteModel::InputLayerInfo& input = info->back();
input.name = names[i];
TFLITE_TOOLS_CHECK(util::SplitAndParse(shapes[i], ',', &input.shape))
<< "Incorrect size string specified: " << shapes[i];
for (int dim : input.shape) {
if (dim == -1) {
TFLITE_LOG(ERROR)
<< "Any unknown sizes in the shapes (-1's) must be replaced"
<< " with the size you want to benchmark with.";
return kTfLiteError;
}
}
}
TF_LITE_ENSURE_STATUS(
PopulateInputValueRanges(names_string, value_ranges_string, info));
TF_LITE_ENSURE_STATUS(
PopulateInputValueFiles(names_string, value_files_string, info));
return kTfLiteOk;
}
std::shared_ptr<profiling::ProfileSummaryFormatter>
CreateProfileSummaryFormatter(const std::string& output_mode) {
if (output_mode == kOpProfilingOutputModeCsv) {
return std::make_shared<profiling::ProfileSummaryCSVFormatter>();
} else if (output_mode == kOpProfilingOutputModeProto) {
return std::make_shared<profiling::ProfileSummaryProtoFormatter>();
} else {
return std::make_shared<profiling::ProfileSummaryDefaultFormatter>();
}
}
}
TfLiteStatus SplitInputLayerNameAndValueFile(
const std::string& name_and_value_file,
std::pair<std::string, std::string>& name_file_pair) {
int delim_index = -1;
for (int i = 0; i < name_and_value_file.length() - 1; ++i) {
if (name_and_value_file[i] == ':') {
if (name_and_value_file[i + 1] == ':') {
++i;
} else {
if (delim_index == -1) {
delim_index = i;
} else {
TFLITE_LOG(ERROR)
<< name_and_value_file << " contains more than one delimiter.";
return kTfLiteError;
}
}
}
}
if (delim_index == -1) {
TFLITE_LOG(ERROR) << name_and_value_file
<< " doesn't contain any delimiter.";
return kTfLiteError;
}
name_file_pair.first = absl::StrReplaceAll(
name_and_value_file.substr(0, delim_index), {{"::", ":"}});
name_file_pair.second = absl::StrReplaceAll(
name_and_value_file.substr(delim_index + 1), {{"::", ":"}});
return kTfLiteOk;
}
std::pair<TfLiteStatus, std::unique_ptr<BenchmarkInterpreterRunner>>
BenchmarkInterpreterRunner::Create(tflite::Interpreter* const interpreter,
std::string signature_key) {
if (!signature_key.empty()) {
const std::vector<const std::string*>& keys = interpreter->signature_keys();
bool found = std::any_of(
keys.begin(), keys.end(),
[&signature_key](const auto& k) { return *k == signature_key; });
if (keys.size() > 1 && (signature_key.empty() || !found)) {
TFLITE_LOG(ERROR)
<< "Signature not specified or incorrect for graph with multiple "
"signatures. Pass one of the following to the flag "
"\"--signature_to_run_for\"";
for (const std::string* k : keys) {
TFLITE_LOG(ERROR) << " #> Signature key: " << *k;
}
return {kTfLiteError, nullptr};
} else if (keys.size() == 1 && signature_key.empty()) {
signature_key = *keys[0];
}
if (!signature_key.empty() && !keys.empty()) {
TFLITE_LOG(INFO) << "Using signature: " << signature_key;
auto signature_runner =
interpreter->GetSignatureRunner(signature_key.c_str());
if (signature_runner == nullptr) {
return {kTfLiteError, nullptr};
} else {
int subgraph_index =
interpreter->GetSubgraphIndexFromSignature(signature_key.c_str());
return {kTfLiteOk, std::make_unique<BenchmarkInterpreterRunner>(
interpreter, signature_runner,
interpreter->subgraph(subgraph_index))};
}
}
}
return {kTfLiteOk, std::make_unique<BenchmarkInterpreterRunner>(
interpreter, nullptr, nullptr)};
}
TfLiteStatus BenchmarkInterpreterRunner::AllocateTensors() {
if (signature_runner_ != nullptr) {
return signature_runner_->AllocateTensors();
} else {
return interpreter_->AllocateTensors();
}
}
TfLiteStatus BenchmarkInterpreterRunner::Invoke() {
if (signature_runner_ != nullptr) {
return signature_runner_->Invoke();
} else {
return interpreter_->Invoke();
}
}
const std::vector<int>& BenchmarkInterpreterRunner::execution_plan() const {
if (signature_runner_ != nullptr) {
return subgraph_->execution_plan();
} else {
return interpreter_->execution_plan();
}
}
const std::vector<int>& BenchmarkInterpreterRunner::inputs() const {
if (signature_runner_ != nullptr) {
return subgraph_->inputs();
} else {
return interpreter_->inputs();
}
}
const std::vector<int>& BenchmarkInterpreterRunner::outputs() const {
if (signature_runner_ != nullptr) {
return subgraph_->outputs();
} else {
return interpreter_->outputs();
}
}
TfLiteTensor* BenchmarkInterpreterRunner::tensor(int tensor_index) {
if (signature_runner_ != nullptr) {
return subgraph_->tensor(tensor_index);
} else {
return interpreter_->tensor(tensor_index);
}
}
const std::pair<TfLiteNode, TfLiteRegistration>*
BenchmarkInterpreterRunner::node_and_registration(int node_index) const {
if (signature_runner_ != nullptr) {
return subgraph_->node_and_registration(node_index);
} else {
return interpreter_->node_and_registration(node_index);
}
}
TfLiteStatus BenchmarkInterpreterRunner::ResizeInputTensor(
int tensor_index, const std::vector<int>& new_size) {
if (signature_runner_ != nullptr) {
return subgraph_->ResizeInputTensor(tensor_index, new_size);
} else {
return interpreter_->ResizeInputTensor(tensor_index, new_size);
}
}
BenchmarkParams BenchmarkTfLiteModel::DefaultParams() {
BenchmarkParams default_params = BenchmarkModel::DefaultParams();
default_params.AddParam("graph", BenchmarkParam::Create<std::string>(""));
default_params.AddParam("signature_to_run_for",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("list_signatures",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("input_layer",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_shape",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_value_range",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_value_files",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("allow_fp16", BenchmarkParam::Create<bool>(false));
default_params.AddParam("require_full_delegation",
BenchmarkParam::Create<bool>(false));
default_params.AddParam(
"enable_op_profiling",
BenchmarkParam::Create<bool>(kOpProfilingEnabledDefault));
default_params.AddParam(
"op_profiling_output_mode",
BenchmarkParam::Create<std::string>(kOpProfilingOutputModeStdout));
default_params.AddParam("op_profiling_output_file",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("max_profiling_buffer_entries",
BenchmarkParam::Create<int32_t>(1024));
default_params.AddParam("allow_dynamic_profiling_buffer_increase",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("profiling_output_csv_file",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("print_preinvoke_state",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("print_postinvoke_state",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("release_dynamic_tensors",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("optimize_memory_for_large_tensors",
BenchmarkParam::Create<int32_t>(0));
default_params.AddParam("disable_delegate_clustering",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("enable_builtin_cast_constant_cache",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("output_filepath",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("tensor_name_display_length",
BenchmarkParam::Create<int32_t>(25));
default_params.AddParam("tensor_type_display_length",
BenchmarkParam::Create<int32_t>(15));
default_params.AddParam("alloc_type_display_length",
BenchmarkParam::Create<int32_t>(18));
tools::ProvidedDelegateList delegate_providers(&default_params);
delegate_providers.AddAllDelegateParams();
return default_params;
}
BenchmarkTfLiteModel::BenchmarkTfLiteModel(BenchmarkParams params)
: BenchmarkModel(std::move(params)),
random_engine_(std::random_device()()) {
AddListener(&log_output_);
}
void BenchmarkTfLiteModel::CleanUp() {
inputs_data_.clear();
}
BenchmarkTfLiteModel::~BenchmarkTfLiteModel() {
CleanUp();
interpreter_runner_.reset();
interpreter_.reset();
}
std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
std::vector<Flag> flags = BenchmarkModel::GetFlags();
std::vector<Flag> specific_flags = {
CreateFlag<std::string>("graph", ¶ms_, "graph file name"),
CreateFlag<std::string>("input_layer", ¶ms_, "input layer names"),
CreateFlag<std::string>("input_layer_shape", ¶ms_,
"input layer shape"),
CreateFlag<std::string>(
"input_layer_value_range", ¶ms_,
"A map-like string representing value range for *integer* input "
"layers. Each item is separated by ':', and the item value consists "
"of input layer name and integer-only range values (both low and "
"high are inclusive) separated by ',', e.g. input1,1,2:input2,0,254"),
CreateFlag<std::string>(
"input_layer_value_files", ¶ms_,
"A map-like string representing value file. Each item is separated "
"by ',', and the item value consists "
"of input layer name and value file path separated by ':', e.g. "
"input1:file_path1,input2:file_path2. In case the input layer name "
"contains ':' e.g. \"input:0\", escape it with \"\\:\". If the "
"input_name appears both in input_layer_value_range and "
"input_layer_value_files, input_layer_value_range of the input_name "
"will be ignored. The file format is binary and it should be array "
"format or null separated strings format."),
CreateFlag<bool>("allow_fp16", ¶ms_, "allow fp16"),
CreateFlag<bool>("require_full_delegation", ¶ms_,
"require delegate to run the entire graph"),
CreateFlag<bool>("enable_op_profiling", ¶ms_, "enable op profiling"),
CreateFlag<std::string>(
"op_profiling_output_mode", ¶ms_,
"Output mode for op profiling results. Supported values are: "
"'stdout', 'csv' and 'proto'."),
CreateFlag<std::string>("op_profiling_output_file", ¶ms_,
"Output file for op profiling results."),
CreateFlag<int32_t>("max_profiling_buffer_entries", ¶ms_,
"max initial profiling buffer entries"),
CreateFlag<bool>("allow_dynamic_profiling_buffer_increase", ¶ms_,
"allow dynamic increase on profiling buffer entries"),
CreateFlag<std::string>("profiling_output_csv_file", ¶ms_,
"[DEPRECATED: Use op_profiling_output_file and "
"op_profiling_output_mode instead] File path to "
"export profile data as CSV, if not set "
"prints to stdout."),
CreateFlag<bool>(
"print_preinvoke_state", ¶ms_,
"print out the interpreter internals just before calling Invoke. The "
"internals will include allocated memory size of each tensor etc."),
CreateFlag<bool>(
"print_postinvoke_state", ¶ms_,
"print out the interpreter internals just before benchmark completes "
"(i.e. after all repeated Invoke calls complete). The internals will "
"include allocated memory size of each tensor etc."),
CreateFlag<bool>("release_dynamic_tensors", ¶ms_,
"Ensure dynamic tensor's memory is released when they "
"are not used."),
CreateFlag<int32_t>(
"optimize_memory_for_large_tensors", ¶ms_, | #include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
#include "tensorflow/lite/tools/benchmark/benchmark_params.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace benchmark {
namespace {
static constexpr char kModelPath[] =
"../tflite_mobilenet_float/"
"mobilenet_v1_1.0_224.tflite";
class TestBenchmarkListener : public BenchmarkListener {
public:
void OnBenchmarkEnd(const BenchmarkResults& results) override {
results_ = results;
}
BenchmarkResults results_;
};
TEST(BenchmarkTfLiteModelTest, GetModelSizeFromPathSucceeded) {
BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams();
params.Set<std::string>("graph", kModelPath);
params.Set<int>("num_runs", 1);
params.Set<int>("warmup_runs", 0);
BenchmarkTfLiteModel benchmark = BenchmarkTfLiteModel(std::move(params));
TestBenchmarkListener listener;
benchmark.AddListener(&listener);
benchmark.Run();
EXPECT_GE(listener.results_.model_size_mb(), 0);
}
TEST(BenchmarkTfLiteModelTest, GetModelSizeFromFileDescriptorSucceeded) {
BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams();
int fd = open(kModelPath, O_RDONLY);
ASSERT_GE(fd, 0);
int model_offset = 0;
struct stat stat_buf = {0};
ASSERT_EQ(fstat(fd, &stat_buf), 0);
params.Set<std::string>("graph", absl::StrCat("fd:", fd, ":", model_offset,
":", stat_buf.st_size));
params.Set<int>("num_runs", 1);
params.Set<int>("warmup_runs", 0);
BenchmarkTfLiteModel benchmark = BenchmarkTfLiteModel(std::move(params));
TestBenchmarkListener listener;
benchmark.AddListener(&listener);
benchmark.Run();
EXPECT_EQ(listener.results_.model_size_mb(), stat_buf.st_size / 1e6);
}
TEST(BenchmarkTfLiteModelTest, ResizeInputWithDelegate) {
BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams();
params.Set<std::string>("graph", kModelPath);
params.Set<bool>("use_xnnpack", true);
params.Set<std::string>("input_layer", "input_87");
params.Set<std::string>("input_layer_shape", "2,224,224,3");
BenchmarkTfLiteModel benchmark = BenchmarkTfLiteModel(std::move(params));
EXPECT_EQ(benchmark.Run(), kTfLiteOk);
}
}
}
} |
842 | cpp | tensorflow/tensorflow | benchmark_utils | tensorflow/lite/tools/benchmark/benchmark_utils.cc | tensorflow/lite/tools/benchmark/benchmark_utils_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_BENCHMARK_BENCHMARK_UTILS_H_
#define TENSORFLOW_LITE_TOOLS_BENCHMARK_BENCHMARK_UTILS_H_
#include <sstream>
#include <string>
#include <vector>
namespace tflite {
namespace benchmark {
namespace util {
void SleepForSeconds(double sleep_seconds);
template <typename T>
bool SplitAndParse(const std::string& str, char delim, std::vector<T>* values) {
std::istringstream input(str);
for (std::string line; std::getline(input, line, delim);) {
std::istringstream to_parse(line);
T val;
to_parse >> val;
if (!to_parse.eof() && !to_parse.good()) {
return false;
}
values->emplace_back(val);
}
return true;
}
}
}
}
#endif
#include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace benchmark {
namespace util {
void SleepForSeconds(double sleep_seconds) {
if (sleep_seconds <= 0.0) {
return;
}
tflite::profiling::time::SleepForMicros(
static_cast<uint64_t>(sleep_seconds * 1e6));
}
}
}
} | #include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace benchmark {
namespace {
TEST(BenchmarkHelpersTest, SleepForNegativeSeconds) {
const auto start_ts = tflite::profiling::time::NowMicros();
util::SleepForSeconds(-5.0);
const auto end_ts = tflite::profiling::time::NowMicros();
EXPECT_LT(end_ts - start_ts, 1000000);
}
TEST(BenchmarkHelpersTest, SleepForSomeSeconds) {
const auto start_ts = tflite::profiling::time::NowMicros();
util::SleepForSeconds(2.0);
const auto end_ts = tflite::profiling::time::NowMicros();
EXPECT_GT(end_ts - start_ts, 1900000);
}
TEST(BenchmarkHelpersTest, SplitAndParseFailed) {
std::vector<int> results;
const bool splitted = util::SplitAndParse("hello;world", ';', &results);
EXPECT_FALSE(splitted);
}
TEST(BenchmarkHelpersTest, SplitAndParseString) {
std::vector<std::string> results;
const bool splitted = util::SplitAndParse("hello,world", ',', &results);
EXPECT_TRUE(splitted);
EXPECT_EQ(2, results.size());
EXPECT_EQ("hello", results[0]);
EXPECT_EQ("world", results[1]);
}
TEST(BenchmarkHelpersTest, SplitAndParseInts) {
std::vector<int> results;
const bool splitted = util::SplitAndParse("1,2", ',', &results);
EXPECT_TRUE(splitted);
EXPECT_EQ(2, results.size());
EXPECT_EQ(1, results[0]);
EXPECT_EQ(2, results[1]);
}
}
}
} |
843 | cpp | tensorflow/tensorflow | latency_benchmark | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.cc | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/test/native/latency_benchmark_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_BENCHMARK_EXPERIMENTAL_DELEGATE_PERFORMANCE_ANDROID_SRC_MAIN_NATIVE_LATENCY_BENCHMARK_H_
#define TENSORFLOW_LITE_TOOLS_BENCHMARK_EXPERIMENTAL_DELEGATE_PERFORMANCE_ANDROID_SRC_MAIN_NATIVE_LATENCY_BENCHMARK_H_
#include <string>
#include <vector>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/proto/delegate_performance.pb.h"
namespace tflite {
namespace benchmark {
namespace latency {
proto::benchmark::LatencyResults Benchmark(
const TFLiteSettings& tflite_settings,
const std::string& tflite_settings_path, int model_fd, size_t model_offset,
size_t model_size, const std::vector<std::string>& args);
}
}
}
#endif
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.h"
#include <errno.h>
#include <sys/stat.h>
#include <fstream>
#include <iterator>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/memory_info.h"
#include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/proto/delegate_performance.pb.h"
namespace tflite {
namespace benchmark {
namespace latency {
namespace {
static constexpr char kBenchmarkToolName[] = "(BenchmarkModelAndroid)";
class DelegatePerformanceReportingListener : public BenchmarkListener {
public:
void OnBenchmarkStart(const BenchmarkParams& unused) override {
results_proto_.set_event_type(proto::benchmark::BENCHMARK_EVENT_TYPE_START);
}
void OnBenchmarkEnd(const BenchmarkResults& results) override {
ReportResult(results);
}
void ReportFailure(TfLiteStatus status) {
std::string status_msg =
status == kTfLiteError
? "TFLite error"
: (status == kTfLiteDelegateError ? "TFLite delegate error"
: "unexpected TFLite status");
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Benchmark failed due to %s with status code %d.",
status_msg.c_str(), status);
results_proto_.set_event_type(proto::benchmark::BENCHMARK_EVENT_TYPE_ERROR);
results_proto_.mutable_error()->mutable_error_code()->set_tflite_error(
status);
results_proto_.mutable_error()->set_error_message(status_msg);
}
const proto::benchmark::LatencyResults& GetResults() {
return results_proto_;
}
private:
void ReportResult(const BenchmarkResults& results) {
tensorflow::Stat<int64_t> warmup_us = results.warmup_time_us();
tensorflow::Stat<int64_t> inference_us = results.inference_time_us();
profiling::memory::MemoryUsage init_mem_usage = results.init_mem_usage();
profiling::memory::MemoryUsage overall_mem_usage =
results.overall_mem_usage();
if (results.model_size_mb() > 0) {
AddMetric("model_size_megabyte",
results.model_size_mb());
}
AddMetric("initialization_latency_us",
results.startup_latency_us());
AddMetric("warmup_latency_average_us", warmup_us.avg());
AddMetric("warmup_latency_min_us", warmup_us.min());
AddMetric("warmup_latency_max_us", warmup_us.max());
AddMetric("warmup_latency_standard_deviation_us",
warmup_us.std_deviation());
AddMetric("inference_latency_average_us",
inference_us.avg());
AddMetric("inference_latency_min_us",
inference_us.min());
AddMetric("inference_latency_max_us",
inference_us.max());
AddMetric("inference_latency_standard_deviation_us",
inference_us.std_deviation());
AddMetric("initialization_memory_max_rss_mebibyte",
init_mem_usage.mem_footprint_kb / 1024.0);
AddMetric("initialization_memory_total_allocated_mebibyte",
init_mem_usage.total_allocated_bytes / 1024.0 / 1024.0);
AddMetric(
"initialization_memory_in_use_mebibyte",
init_mem_usage.in_use_allocated_bytes / 1024.0 / 1024.0);
AddMetric("overall_memory_max_rss_mebibyte",
overall_mem_usage.mem_footprint_kb / 1024.0);
AddMetric(
"overall_memory_total_allocated_mebibyte",
overall_mem_usage.total_allocated_bytes / 1024.0 / 1024.0);
AddMetric(
"overall_memory_in_use_mebibyte",
overall_mem_usage.in_use_allocated_bytes / 1024.0 / 1024.0);
results_proto_.set_event_type(proto::benchmark::BENCHMARK_EVENT_TYPE_END);
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Benchmark finished.");
}
void AddMetric(std::string name, float value) {
proto::benchmark::BenchmarkMetric* metric = results_proto_.add_metrics();
metric->set_name(name);
metric->set_value(value);
}
proto::benchmark::LatencyResults results_proto_;
};
std::vector<std::string> ParseArgumentsFromTfLiteSettings(
const TFLiteSettings& tflite_settings,
const std::string& tflite_settings_path) {
std::vector<std::string> args;
if (tflite_settings_path.empty()) {
return args;
}
if (tflite_settings.stable_delegate_loader_settings()) {
args.push_back(absl::StrFormat("--stable_delegate_settings_file=%s",
tflite_settings_path));
return args;
}
switch (tflite_settings.delegate()) {
case Delegate_XNNPACK: {
args.push_back("--use_xnnpack=true");
if (tflite_settings.xnnpack_settings()) {
if (tflite_settings.xnnpack_settings()->num_threads()) {
args.push_back(absl::StrFormat(
"--num_threads=%d",
tflite_settings.xnnpack_settings()->num_threads()));
}
if (tflite_settings.xnnpack_settings()->flags() ==
XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16) {
args.push_back("--xnnpack_force_fp16=true");
}
}
return args;
}
case Delegate_GPU: {
args.push_back("--use_gpu=true");
const tflite::GPUSettings* gpu_settings = tflite_settings.gpu_settings();
if (gpu_settings) {
if (gpu_settings->is_precision_loss_allowed()) {
args.push_back("--gpu_precision_loss_allowed=true");
}
if (gpu_settings->enable_quantized_inference()) {
args.push_back("--gpu_experimental_enable_quant=true");
}
if (gpu_settings->inference_preference() ==
GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED) {
args.push_back("--gpu_inference_for_sustained_speed=true");
}
if (gpu_settings->force_backend() == GPUBackend_OPENCL) {
args.push_back("--gpu_backend=cl");
} else if (gpu_settings->force_backend() == GPUBackend_OPENGL) {
args.push_back("--gpu_backend=gl");
}
if (gpu_settings->cache_directory()) {
args.push_back(
absl::StrFormat("--delegate_serialize_dir=%s",
gpu_settings->cache_directory()->c_str()));
}
if (gpu_settings->model_token()) {
args.push_back(absl::StrFormat("--delegate_serialize_token=%s",
gpu_settings->model_token()->c_str()));
}
}
break;
}
case Delegate_EDGETPU: {
args.push_back("--use_edgetpu=true");
break;
}
default:
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Delegate type %s is not enabled by the latency module.",
EnumNameDelegate(tflite_settings.delegate()));
break;
}
if (tflite_settings.disable_default_delegates()) {
args.push_back("--use_xnnpack=false");
}
return args;
}
}
proto::benchmark::LatencyResults Benchmark(
const TFLiteSettings& tflite_settings,
const std::string& tflite_settings_path, int model_fd, size_t model_offset,
size_t model_size, const std::vector<std::string>& args) {
std::vector<char*> argv;
argv.push_back(const_cast<char*>(kBenchmarkToolName));
std::string arg_graph =
absl::StrCat("--graph=fd:", model_fd, ":", model_offset, ":", model_size);
argv.push_back(const_cast<char*>(arg_graph.data()));
std::vector<std::string> args_from_tflite_settings =
ParseArgumentsFromTfLiteSettings(tflite_settings, tflite_settings_path);
for (const std::string& arg : args_from_tflite_settings) {
argv.push_back(const_cast<char*>(arg.data()));
}
for (const std::string& arg : args) {
argv.push_back(const_cast<char*>(arg.data()));
}
BenchmarkTfLiteModel benchmark;
DelegatePerformanceReportingListener delegatePerformanceReporting;
benchmark.AddListener(&delegatePerformanceReporting);
TfLiteStatus status = benchmark.Run(argv.size(), argv.data());
if (status != kTfLiteOk) {
delegatePerformanceReporting.ReportFailure(status);
}
return delegatePerformanceReporting.GetResults();
}
}
}
} | #include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.h"
#include <fcntl.h>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/proto/delegate_performance.pb.h"
namespace tflite {
namespace benchmark {
namespace latency {
namespace {
static constexpr char kModelPath[] =
"../tflite_mobilenet_float/"
"mobilenet_v1_1.0_224.tflite";
static constexpr char kSettingsFilePath[] =
"tensorflow/lite/tools/delegates/experimental/stable_delegate/"
"test_sample_stable_delegate_settings.json";
class LatencyBenchmarkTest : public ::testing::Test {
protected:
void SetUp() override {
model_fp_ = fopen(kModelPath, "rb");
ASSERT_TRUE(model_fp_ != nullptr);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_END), 0);
model_size_ = ftell(model_fp_);
ASSERT_NE(model_size_, -1);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_SET), 0);
settings_ = parser_.Parse(kSettingsFilePath);
}
delegates::utils::TfLiteSettingsJsonParser parser_;
const TFLiteSettings* settings_;
size_t model_size_;
FILE* model_fp_;
std::vector<std::string> args_;
};
TEST_F(LatencyBenchmarkTest, FailedWithNullFileDescriptor) {
EXPECT_TRUE(Benchmark(*settings_, kSettingsFilePath,
0, 0,
0, args_)
.has_error());
}
TEST_F(LatencyBenchmarkTest, FailedWithInvalidNumThreadsSettings) {
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<tflite::XNNPackSettings> xnnpack_settings =
CreateXNNPackSettings(fbb, -3);
TFLiteSettingsBuilder tflite_settings_builder(fbb);
tflite_settings_builder.add_delegate(Delegate_XNNPACK);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_TRUE(Benchmark(*settings,
"example_path",
fileno(model_fp_),
0, model_size_, args_)
.has_error());
}
TEST_F(LatencyBenchmarkTest, SucceedWithEmptyTfLiteSettings) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsBuilder tflite_settings_builder(fbb);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_EQ(Benchmark(*settings, "example_path",
fileno(model_fp_), 0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
TEST_F(LatencyBenchmarkTest, SucceedWithCpuTfLiteSettings) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsBuilder tflite_settings_builder(fbb);
tflite_settings_builder.add_disable_default_delegates(true);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_EQ(Benchmark(*settings, "example_path",
fileno(model_fp_), 0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
#ifdef __ANDROID__
TEST_F(LatencyBenchmarkTest, SucceedWithGpuTfLiteSettings) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsBuilder tflite_settings_builder(fbb);
tflite_settings_builder.add_delegate(Delegate_GPU);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_EQ(Benchmark(*settings, "example_path",
fileno(model_fp_), 0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
#endif
TEST_F(LatencyBenchmarkTest, SucceedWithSampleStableDelegate) {
EXPECT_EQ(Benchmark(*settings_, kSettingsFilePath, fileno(model_fp_),
0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
TEST_F(LatencyBenchmarkTest,
SucceedWithSampleStableDelegateAndBenchmarkToolArguments) {
std::vector<std::string> args = {"--warmup_runs=10"};
EXPECT_EQ(Benchmark(*settings_, kSettingsFilePath, fileno(model_fp_),
0, model_size_, args)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
}
}
}
} |
844 | cpp | tensorflow/tensorflow | accuracy_benchmark | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/accuracy_benchmark.cc | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/test/native/accuracy_benchmark_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_BENCHMARK_EXPERIMENTAL_DELEGATE_PERFORMANCE_ANDROID_SRC_MAIN_NATIVE_ACCURACY_BENCHMARK_H_
#define TENSORFLOW_LITE_TOOLS_BENCHMARK_EXPERIMENTAL_DELEGATE_PERFORMANCE_ANDROID_SRC_MAIN_NATIVE_ACCURACY_BENCHMARK_H_
#include <cstddef>
#include <string>
#include <vector>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace tflite {
namespace benchmark {
namespace accuracy {
flatbuffers::Offset<BenchmarkEvent> Benchmark(
flatbuffers::FlatBufferBuilder& fbb, const TFLiteSettings& tflite_settings,
int model_fd, size_t model_offset, size_t model_size,
const char* result_path_chars);
}
}
}
#endif
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/accuracy_benchmark.h"
#include <errno.h>
#include <stdio.h>
#include <sys/stat.h>
#include <cstddef>
#include <string>
#include <vector>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/c/c_api.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/status_codes.h"
namespace tflite {
namespace benchmark {
namespace accuracy {
namespace {
std::vector<const tflite::BenchmarkEvent*> ToBenchmarkEvents(uint8_t* data,
size_t size) {
std::vector<const tflite::BenchmarkEvent*> results;
uint8_t* current_root = data;
while (current_root < data + size) {
flatbuffers::uoffset_t current_size =
flatbuffers::GetPrefixedSize(current_root);
results.push_back(
flatbuffers::GetSizePrefixedRoot<tflite::BenchmarkEvent>(current_root));
current_root += current_size + sizeof(flatbuffers::uoffset_t);
}
TFLITE_CHECK_EQ(current_root, data + size);
return results;
}
}
flatbuffers::Offset<BenchmarkEvent> Benchmark(
flatbuffers::FlatBufferBuilder& fbb, const TFLiteSettings& tflite_settings,
int model_fd, size_t model_offset, size_t model_size,
const char* result_path_chars) {
std::string result_path(result_path_chars);
std::string storage_path = result_path + "/storage_path.fb";
int return_code = std::remove(storage_path.c_str());
if (return_code) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Failed to remove storage file (%s): %s.",
storage_path.c_str(), strerror(errno));
}
flatbuffers::FlatBufferBuilder mini_benchmark_fbb;
TFLiteSettingsT tflite_settings_t;
tflite_settings.UnPackTo(&tflite_settings_t);
flatbuffers::Offset<TFLiteSettings> tflite_settings_offset =
CreateTFLiteSettings(mini_benchmark_fbb, &tflite_settings_t);
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TFLiteSettings>>>
tflite_settings_vector_offset =
mini_benchmark_fbb.CreateVector({tflite_settings_offset});
ModelFileBuilder model_file_builder(mini_benchmark_fbb);
model_file_builder.add_fd(model_fd);
model_file_builder.add_offset(model_offset);
model_file_builder.add_length(model_size);
flatbuffers::Offset<ModelFile> model_file_offset =
model_file_builder.Finish();
flatbuffers::Offset<BenchmarkStoragePaths> storage_paths_offset =
CreateBenchmarkStoragePaths(mini_benchmark_fbb,
mini_benchmark_fbb.CreateString(storage_path),
mini_benchmark_fbb.CreateString(result_path));
flatbuffers::Offset<ValidationSettings> validation_settings_offset =
CreateValidationSettings(mini_benchmark_fbb,
5000);
mini_benchmark_fbb.Finish(CreateMinibenchmarkSettings(
mini_benchmark_fbb, tflite_settings_vector_offset, model_file_offset,
storage_paths_offset, validation_settings_offset));
TfLiteMiniBenchmarkSettings* settings = TfLiteMiniBenchmarkSettingsCreate();
TfLiteMiniBenchmarkSettingsSetFlatBufferData(
settings, mini_benchmark_fbb.GetBufferPointer(),
mini_benchmark_fbb.GetSize());
TfLiteMiniBenchmarkResult* result =
TfLiteBlockingValidatorRunnerTriggerValidation(settings);
std::vector<const BenchmarkEvent*> events =
ToBenchmarkEvents(TfLiteMiniBenchmarkResultFlatBufferData(result),
TfLiteMiniBenchmarkResultFlatBufferDataSize(result));
TfLiteMiniBenchmarkSettingsFree(settings);
if (events.size() != 1) {
TfLiteMiniBenchmarkResultFree(result);
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Number of result events (%zu) doesn't match the expectation (%zu).",
events.size(), 1);
flatbuffers::Offset<BenchmarkError> error =
CreateBenchmarkError(fbb, BenchmarkStage_INFERENCE,
kBenchmarkResultCountMismatch);
BenchmarkEventBuilder builder(fbb);
builder.add_event_type(BenchmarkEventType_ERROR);
builder.add_error(error);
return builder.Finish();
}
BenchmarkEventT benchmark_event;
events[0]->UnPackTo(&benchmark_event);
TfLiteMiniBenchmarkResultFree(result);
return CreateBenchmarkEvent(fbb, &benchmark_event);
}
}
}
} | #include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/accuracy_benchmark.h"
#include <fcntl.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/status_codes.h"
namespace tflite {
namespace benchmark {
namespace accuracy {
namespace {
class AccuracyBenchmarkTest : public ::testing::Test {
protected:
void SetUp() override {
acceleration::MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
if (!should_perform_test_) {
return;
}
std::string embedded_model_path = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_FALSE(embedded_model_path.empty());
model_fp_ = fopen(embedded_model_path.c_str(), "rb");
ASSERT_NE(model_fp_, nullptr);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_END), 0);
model_size_ = ftell(model_fp_);
ASSERT_NE(model_size_, -1);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_SET), 0);
result_path_ = ::testing::TempDir();
}
void TearDown() override { fclose(model_fp_); }
std::string result_path_;
size_t model_size_;
FILE* model_fp_;
bool should_perform_test_ = true;
};
TEST_F(AccuracyBenchmarkTest, FailedWithInvalidModelFileDescriptor) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
delegates::utils::TfLiteSettingsJsonParser parser;
flatbuffers::FlatBufferBuilder builder;
std::vector<std::string> args;
const TFLiteSettings* tflite_settings = parser.Parse(
"tensorflow/lite/tools/delegates/experimental/"
"stable_delegate/test_sample_stable_delegate_settings.json");
flatbuffers::Offset<BenchmarkEvent> offset =
Benchmark(builder, *tflite_settings, 0,
0, 0, result_path_.c_str());
builder.Finish(offset);
const BenchmarkEvent* event =
flatbuffers::GetRoot<BenchmarkEvent>(builder.GetBufferPointer());
ASSERT_NE(event, nullptr);
EXPECT_EQ(event->event_type(), BenchmarkEventType_ERROR);
ASSERT_NE(event->error(), nullptr);
EXPECT_EQ(event->error()->stage(), BenchmarkStage_INFERENCE);
EXPECT_EQ(event->error()->exit_code(),
DelegatePerformanceBenchmarkStatus::kBenchmarkResultCountMismatch);
}
TEST_F(AccuracyBenchmarkTest, SucceedWithSampleStableDelegate) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
delegates::utils::TfLiteSettingsJsonParser parser;
flatbuffers::FlatBufferBuilder builder;
const TFLiteSettings* tflite_settings = parser.Parse(
"tensorflow/lite/tools/delegates/experimental/"
"stable_delegate/test_sample_stable_delegate_settings.json");
flatbuffers::Offset<BenchmarkEvent> offset = Benchmark(
builder, *tflite_settings, fileno(model_fp_),
0, model_size_, result_path_.c_str());
builder.Finish(offset);
const BenchmarkEvent* event =
flatbuffers::GetRoot<BenchmarkEvent>(builder.GetBufferPointer());
ASSERT_NE(event, nullptr);
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_EQ(event->error(), nullptr);
}
TEST_F(AccuracyBenchmarkTest, SucceedWithEmbeddedValidationAndXNNPack) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
delegates::utils::TfLiteSettingsJsonParser parser;
flatbuffers::FlatBufferBuilder builder;
const TFLiteSettings* tflite_settings = parser.Parse(
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/test_xnnpack_settings.json");
flatbuffers::Offset<BenchmarkEvent> offset = Benchmark(
builder, *tflite_settings, fileno(model_fp_),
0, model_size_, result_path_.c_str());
builder.Finish(offset);
const BenchmarkEvent* event =
flatbuffers::GetRoot<BenchmarkEvent>(builder.GetBufferPointer());
ASSERT_NE(event, nullptr);
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_EQ(event->error(), nullptr);
}
}
}
}
} |
845 | cpp | tensorflow/tensorflow | quantize_model | tensorflow/lite/tools/optimize/quantize_model.cc | tensorflow/lite/tools/optimize/quantize_model_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_PYTHON_QUANTIZE_MODEL_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_PYTHON_QUANTIZE_MODEL_H_
#include <string>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace tensorflow {
namespace quantization {
inline constexpr absl::string_view kTfQuantPtqPreCalibrationStepName =
"tf_quant_ptq_pre_calibration";
inline constexpr absl::string_view kTfQuantPtqPostCalibrationStepName =
"tf_quant_ptq_post_calibration";
inline constexpr absl::string_view kTfQuantQatStepName = "tf_quant_qat";
inline constexpr absl::string_view kTfQuantPtqDynamicRangeStepName =
"tf_quant_ptq_dynamic_range";
inline constexpr absl::string_view kTfQuantWeightOnlyStepName =
"tf_quant_weight_only";
absl::StatusOr<ExportedModel> QuantizeQatModel(
absl::string_view saved_model_path,
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const QuantizationOptions& quantization_options);
absl::StatusOr<ExportedModel> QuantizeDynamicRangePtq(
absl::string_view saved_model_path,
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const QuantizationOptions& quantization_options);
absl::StatusOr<ExportedModel> QuantizeWeightOnly(
absl::string_view saved_model_path,
const QuantizationOptions& quantization_options);
absl::StatusOr<ExportedModel> QuantizeStaticRangePtq(
absl::string_view saved_model_path,
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const QuantizationOptions& quantization_options,
const absl::flat_hash_map<std::string, SignatureDef>& signature_def_map,
const PyFunctionLibrary& py_function_library,
const absl::flat_hash_map<std::string, RepresentativeDatasetFile>&
representative_dataset_file_map_serialized);
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.h"
#include <memory>
#include <optional>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/context.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/debugger.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/post_calibration.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/run_passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/unfreeze_constants.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_import_options.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace quantization {
namespace {
using ::mlir::quant::stablehlo::ConvertMlirModuleToExportedModel;
using ::mlir::quant::stablehlo::CreateMlirContextForQuantization;
using ::mlir::quant::stablehlo::ExportOptions;
using ::mlir::quant::stablehlo::FunctionAlias;
using ::mlir::quant::stablehlo::FunctionName;
using ::mlir::quant::stablehlo::GetFunctionAliases;
using ::mlir::quant::stablehlo::kExportStepSuffix;
using ::mlir::quant::stablehlo::PostCalibrationComponent;
using ::mlir::quant::stablehlo::PreCalibrationComponent;
using ::mlir::quant::stablehlo::RunCalibrationPasses;
using ::mlir::quant::stablehlo::UpdateFunctionAliases;
using ::mlir::quant::stablehlo::WeightOnlyPtqComponent;
using ::stablehlo::quantization::AddCalibrationStatistics;
using ::stablehlo::quantization::ChangeToQuantizedFilename;
using ::stablehlo::quantization::DebuggerConfig;
using ::stablehlo::quantization::ExpandPresets;
using ::stablehlo::quantization::IsCalibrationRequired;
using ::stablehlo::quantization::PopulateDefaults;
using ::stablehlo::quantization::QuantizationConfig;
using ::stablehlo::quantization::io::CreateTmpDir;
using ::stablehlo::quantization::io::GetLocalTmpFileName;
using ::tensorflow::quantization::PyFunctionLibrary;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ImportAndPreprocessSavedModel(
absl::string_view saved_model_path,
const std::vector<std::string> &signature_keys,
const std::unordered_set<std::string> &tags, mlir::MLIRContext *context,
const bool is_inliner_run, const bool run_tf_to_stablehlo,
const bool deserialize_xla_call_module,
absl::flat_hash_map<std::string, std::string> &function_aliases) {
MLIRImportOptions import_options;
import_options.upgrade_legacy = true;
import_options.lift_variables = false;
import_options.include_variables_in_initializers = true;
auto bundle = std::make_unique<SavedModelBundle>();
std::vector<std::string> exported_names = signature_keys;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
SavedModelSignatureDefsToMlirImport(saved_model_path, tags,
absl::MakeSpan(exported_names),
context, import_options, &bundle);
if (!module.status().ok()) {
return absl::InternalError(absl::StrCat("Failed to import SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
UpdateFunctionAliases(function_aliases, *module_ref);
absl::flat_hash_set<std::string> aliased_function_names;
absl::c_for_each(function_aliases, [&](const auto &aliases) {
return aliased_function_names.insert(aliases.first);
});
TF_RETURN_IF_ERROR(PreprocessAndFreezeGraph(
kDefaultTfQuantMlirDumpFilePrefix,
is_inliner_run,
aliased_function_names, module_ref.get(), context,
bundle ? bundle->GetSession() : nullptr, run_tf_to_stablehlo,
deserialize_xla_call_module));
return module_ref;
}
absl::StatusOr<ExportedModel> ModuleOpToExportedModel(
mlir::ModuleOp module_op, mlir::MLIRContext *ctx,
absl::string_view step_name, const bool unfreeze_constants,
const absl::flat_hash_map<std::string, std::string> &function_aliases) {
TF_ASSIGN_OR_RETURN(const std::string checkpoint_dir, GetLocalTmpFileName());
const auto export_opts =
ExportOptions{true,
unfreeze_constants, checkpoint_dir,
absl::StrCat(step_name, kExportStepSuffix)};
TF_ASSIGN_OR_RETURN(const llvm::SmallVector<AssetFileDef> asset_file_defs,
RunExportPasses(export_opts, *ctx, module_op));
return ConvertMlirModuleToExportedModel(
module_op, checkpoint_dir, function_aliases,
{asset_file_defs.begin(), asset_file_defs.end()});
}
absl::StatusOr<ExportedModel> ExportCalibrationModel(
mlir::ModuleOp module_op, mlir::MLIRContext *context,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, std::string> &function_aliases,
absl::string_view calibration_data_dir) {
mlir::OwningOpRef<mlir::ModuleOp> cloned_module_ref(module_op.clone());
TF_RETURN_IF_ERROR(
RunCalibrationPasses(*cloned_module_ref, *context, calibration_data_dir,
quantization_options.calibration_options()
.force_regenerate_calibration_data()));
if (!IsCalibrationRequired(*cloned_module_ref)) return ExportedModel();
absl::StatusOr<ExportedModel> exported_model = ModuleOpToExportedModel(
*cloned_module_ref, context, kTfQuantPtqPreCalibrationStepName,
!quantization_options.freeze_all_variables(),
function_aliases);
if (!exported_model.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to export calibration model: ",
exported_model.status().message()));
}
return *exported_model;
}
absl::StatusOr<ExportedModel> ExportDebuggingModel(
mlir::ModuleOp module_op, mlir::MLIRContext *context,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, std::string> &function_aliases) {
mlir::OwningOpRef<mlir::ModuleOp> cloned_module_ref(module_op.clone());
absl::StatusOr<ExportedModel> exported_model = ModuleOpToExportedModel(
*cloned_module_ref, context, kTfQuantPtqPreCalibrationStepName,
!quantization_options.freeze_all_variables(),
function_aliases);
if (!exported_model.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to export debugging model: ",
exported_model.status().message()));
}
return *exported_model;
}
QuantizationConfig GetQuantizationConfigForStaticRangePtq(
const QuantizationOptions &quantization_options) {
QuantizationConfig quantization_config{};
quantization_config.mutable_static_range_ptq_preset()
->set_enable_per_channel_quantized_weight(
quantization_options.enable_per_channel_quantization());
quantization_config.mutable_pipeline_config()->set_unpack_quantized_types(
true);
*quantization_config.mutable_debugger_config() =
quantization_options.debugger_config();
quantization_config.mutable_static_range_ptq_preset();
*quantization_config.mutable_calibration_options() =
quantization_options.calibration_options();
return ExpandPresets(PopulateDefaults(quantization_config));
}
QuantizationConfig GetQuantizationConfigForWeightOnlyPtq(
const QuantizationOptions &quantization_options) {
QuantizationConfig quantization_config{};
quantization_config.mutable_weight_only_ptq_preset();
quantization_config.mutable_pipeline_config()->set_unpack_quantized_types(
true);
*quantization_config.mutable_debugger_config() =
quantization_options.debugger_config();
return ExpandPresets(PopulateDefaults(quantization_config));
}
absl::StatusOr<ExportedModel> QuantizePtqModelPreCalibrationImpl(
mlir::ModuleOp module_op, mlir::MLIRContext *context,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, std::string> &function_aliases,
absl::string_view calibration_data_dir) {
const bool is_stablehlo = quantization_options.op_set() == OpSet::STABLEHLO;
if (is_stablehlo) {
const QuantizationConfig quantization_config =
GetQuantizationConfigForStaticRangePtq(quantization_options);
PreCalibrationComponent pre_calibration_component(context);
TF_ASSIGN_OR_RETURN(module_op, pre_calibration_component.Run(
module_op, quantization_config));
} else {
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantPtqPreCalibrationStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizePtqPreCalibrationPasses(pm, quantization_options);
},
*context, module_op));
}
return ExportCalibrationModel(module_op, context, quantization_options,
function_aliases, calibration_data_dir);
}
absl::StatusOr<ExportedModel> QuantizePtqModelPostCalibrationImpl(
mlir::ModuleOp module_op, mlir::MLIRContext *context,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, std::string> &function_aliases) {
const bool is_stablehlo = quantization_options.op_set() == OpSet::STABLEHLO;
if (is_stablehlo) {
const QuantizationConfig quantization_config =
GetQuantizationConfigForStaticRangePtq(quantization_options);
PostCalibrationComponent post_calibration_component(context);
TF_ASSIGN_OR_RETURN(module_op, post_calibration_component.Run(
module_op, quantization_config));
} else {
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantPtqPostCalibrationStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizePtqPostCalibrationPasses(
pm, quantization_options, kTfQuantPtqPostCalibrationStepName);
},
*context, module_op));
}
return ModuleOpToExportedModel(
module_op, context, kTfQuantPtqPostCalibrationStepName,
!quantization_options.freeze_all_variables(),
function_aliases);
}
}
absl::StatusOr<ExportedModel> QuantizeQatModel(
absl::string_view saved_model_path,
const std::vector<std::string> &signature_keys,
const std::unordered_set<std::string> &tags,
const QuantizationOptions &quantization_options) {
std::unique_ptr<mlir::MLIRContext> context =
CreateMlirContextForQuantization();
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
function_aliases = GetFunctionAliases(saved_model_path, tags);
if (!function_aliases.ok()) {
return absl::InternalError(absl::StrCat(
"Failed to get function alias: ", function_aliases.status().message()));
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
ImportAndPreprocessSavedModel(
saved_model_path, signature_keys, tags, context.get(),
true,
false,
false, *function_aliases);
if (!module.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to import and preprocess SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantQatStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizeQatPasses(pm, quantization_options, kTfQuantQatStepName);
},
*context, *module_ref));
return ModuleOpToExportedModel(
*module_ref, context.get(), kTfQuantQatStepName,
!quantization_options.freeze_all_variables(),
*function_aliases);
}
absl::StatusOr<ExportedModel> QuantizeDynamicRangePtq(
absl::string_view saved_model_path,
const std::vector<std::string> &signature_keys,
const std::unordered_set<std::string> &tags,
const QuantizationOptions &quantization_options) {
std::unique_ptr<mlir::MLIRContext> context =
CreateMlirContextForQuantization();
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
function_aliases = GetFunctionAliases(saved_model_path, tags);
if (!function_aliases.ok()) {
return absl::InternalError(absl::StrCat(
"Failed to get function alias: ", function_aliases.status().message()));
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
ImportAndPreprocessSavedModel(
saved_model_path, signature_keys, tags, context.get(),
true,
false, false,
*function_aliases);
if (!module.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to import and preprocess SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantPtqDynamicRangeStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizePtqDynamicRangePasses(pm, quantization_options,
kTfQuantPtqDynamicRangeStepName);
},
*context, *module_ref));
return ModuleOpToExportedModel(
*module_ref, context.get(), kTfQuantPtqDynamicRangeStepName,
!quantization_options.freeze_all_variables(),
*function_aliases);
}
absl::StatusOr<ExportedModel> QuantizeWeightOnly(
absl::string_view saved_model_path,
const QuantizationOptions &quantization_options) {
std::unique_ptr<mlir::MLIRContext> context =
CreateMlirContextForQuantization();
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
function_aliases = GetFunctionAliases(
saved_model_path, {quantization_options.tags().begin(),
quantization_options.tags().end()});
if (!function_aliases.ok()) {
return absl::InternalError(absl::StrCat(
"Failed to get function alias: ", function_aliases.status().message()));
}
const bool is_stablehlo = quantization_options.op_set() == OpSet::STABLEHLO;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
ImportAndPreprocessSavedModel(
saved_model_path,
{quantization_options.signature_keys().begin(),
quantization_options.signature_keys().end()},
{quantization_options.tags().begin(),
quantization_options.tags().end()},
context.get(), true,
is_stablehlo,
false, *function_aliases);
if (!module.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to import and preprocess SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
if (is_stablehlo) {
const QuantizationConfig quantization_config =
GetQuantizationConfigForWeightOnlyPtq(quantization_options);
WeightOnlyPtqComponent weight_only_ptq_component(context.get());
TF_ASSIGN_OR_RETURN(*module_ref, weight_only_ptq_component.Run(
*module_ref, quantization_config));
} else {
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantWeightOnlyStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizeWeightOnlyPasses(pm, quantization_options,
kTfQuantWeightOnlyStepName);
},
*context, *module_ref));
}
return ModuleOpToExportedModel(
*module_ref, context.get(), kTfQuantWeightOnlyStepName,
!quantization_options.freeze_all_variables(),
*function_aliases);
}
absl::StatusOr<ExportedModel> QuantizeStaticRangePtq(
absl::string_view saved_model_path,
const std::vector<std::string> &signature_keys,
const std::unordered_set<std::string> &tags,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, SignatureDef> &signature_def_map,
const PyFunctionLibrary &py_function_library,
const absl::flat_hash_map<std::string, RepresentativeDatasetFile>
&representative_dataset_file_map_serialized) {
std::unique_ptr<mlir::MLIRContext> context =
CreateMlirContextForQuantization();
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
function_aliases = GetFunctionAliases(saved_model_path, tags);
if (!function_aliases.ok()) {
return absl::InternalError(absl::StrCat(
"Failed to get function alias: ", function_aliases.status().message()));
}
const bool is_stablehlo = quantization_options.op_set() == OpSet::STABLEHLO;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
ImportAndPreprocessSavedModel(
saved_model_path, signature_keys, tags, context.get(),
true,
is_stablehlo,
false, *function_aliases);
if (!module.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to import and preprocess SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
std::string calibration_data_dir =
quantization_options.calibration_options().calibration_data_dir();
if (calibration_data_dir.empty()) {
TF_ASSIGN_OR_RETURN(calibration_data_dir, CreateTmpDir());
}
TF_ASSIGN_OR_RETURN(ExportedModel calibration_exported_model,
QuantizePtqModelPreCalibrationImpl(
*module_ref, context.get(), quantization_options,
*function_aliases, calibration_data_dir));
if (calibration_exported_model.has_graph_def()) {
TF_ASSIGN_OR_RETURN(std::string calibration_saved_model_dir,
CreateTmpDir());
py_function_library.SaveExportedModel(
calibration_saved_model_dir, calibration_exported_model,
saved_model_path, tags, signature_def_map);
py_function_library.RunCalibration(
calibration_saved_model_dir, signature_keys, tags,
quantization_options.force_graph_mode_calibration(),
representative_dataset_file_map_serialized);
}
if (absl::Status status = AddCalibrationStatistics(
*module_ref, calibration_data_dir,
quantization_options.calibration_options(), py_function_library);
!status.ok()) {
LOG(WARNING) << "Some CustomAggregator ops do not have min or max "
"values. Parts of the graph are not quantized. "
<< status;
}
if (quantization_options.has_debugger_config() &&
quantization_options.debugger_config().debugger_type() ==
DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL) {
TF_ASSIGN_OR_RETURN(
ExportedModel debugging_exported_model,
ExportDebuggingModel(*module_ref, context.get(), quantization_options,
*function_aliases));
ChangeToQuantizedFilename(*module_ref);
absl::string_view unquantized_dump_model_path =
quantization_options.debugger_config().unquantized_dump_model_path();
py_function_library.SaveExportedModel(
unquantized_dump_model_path, debugging_exported_model, saved_model_path,
tags, signature_def_map);
}
return QuantizePtqModelPostCalibrationImpl(
*module_ref, context.get(), quantization_options, *function_aliases);
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/quantization/lite/test_util.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/model_builder.h"
#include "tsl/lib/core/status_test_util.h"
namespace {
tensorflow::string* g_test_model_dir = nullptr;
}
namespace tflite {
namespace optimize {
namespace {
using testing::Eq;
using testing::FloatEq;
using testing::FloatNear;
using testing::IsEmpty;
using testing::NotNull;
using testing::SizeIs;
ModelT UnPackFlatBufferModel(const Model& flatbuffer_model) {
ModelT model;
flatbuffer_model.UnPackTo(&model);
return model;
}
absl::Status QuantizeModel(
ModelT* model, const TensorType& input_type, const TensorType& output_type,
const bool allow_float,
const std::unordered_set<std::string>& operator_names,
const TensorType& activations_type, std::string& output_buffer,
const bool disable_per_channel = false,
const absl::flat_hash_set<std::string>& blocked_ops = {},
const absl::flat_hash_set<std::string>& blocked_nodes = {},
const bool disable_per_channel_for_dense_layers = false) {
TensorType inference_tensor_type = activations_type;
const bool fully_quantize = !allow_float;
flatbuffers::FlatBufferBuilder input_builder;
tflite::FinishModelBuffer(input_builder,
tflite::Model::Pack(input_builder, model));
const std::string input_buffer(
reinterpret_cast<const char*>(input_builder.GetBufferPointer()),
input_builder.GetSize());
auto status = mlir::lite::QuantizeModel(
input_buffer, input_type, output_type, inference_tensor_type,
{}, disable_per_channel, fully_quantize, output_buffer,
false, false,
true, blocked_ops, blocked_nodes,
false,
disable_per_channel_for_dense_layers);
if (!status.ok()) {
return status;
}
auto flatbuffer_model = FlatBufferModel::BuildFromBuffer(
output_buffer.data(), output_buffer.size());
*model = UnPackFlatBufferModel(*flatbuffer_model->GetModel());
return absl::OkStatus();
}
absl::Status QuantizeModel(ModelT* model, const TensorType& input_type,
const TensorType& output_type, bool allow_float,
std::string& output_buffer) {
return QuantizeModel(model, input_type, output_type, allow_float,
{}, TensorType_INT8, output_buffer);
}
absl::Status QuantizeModel(ModelT* model, const TensorType& input_type,
const TensorType& output_type,
std::string& output_buffer) {
return QuantizeModel(model, input_type, output_type,
false, output_buffer);
}
absl::Status QuantizeModel(ModelT* model, std::string& output_buffer) {
return QuantizeModel(model, TensorType_FLOAT32, TensorType_FLOAT32,
true, output_buffer);
}
absl::Status QuantizeModelAllOperators(
ModelT* model, const TensorType& input_type, const TensorType& output_type,
bool allow_float, const TensorType& activations_type,
bool disable_per_channel, std::string& output_buffer) {
return QuantizeModel(model, input_type, output_type, allow_float,
{}, activations_type, output_buffer,
disable_per_channel);
}
absl::Status QuantizeModelAllOperators(ModelT* model,
const TensorType& input_type,
const TensorType& output_type,
bool allow_float,
const TensorType& activations_type,
std::string& output_buffer) {
return QuantizeModel(model, input_type, output_type, allow_float,
{}, activations_type, output_buffer);
}
absl::Status QuantizeModelAllOperators(
ModelT* model, const TensorType& input_type, const TensorType& output_type,
bool allow_float, const TensorType& activations_type,
std::string& output_buffer, bool disable_per_channel_for_dense_layers) {
return QuantizeModel(model, input_type, output_type, allow_float,
{}, activations_type, output_buffer,
false,
{},
{},
disable_per_channel_for_dense_layers);
}
std::unique_ptr<FlatBufferModel> ReadModel(const std::string& model_name) {
auto model_path = tensorflow::io::JoinPath(*g_test_model_dir, model_name);
return FlatBufferModel::BuildFromFile(model_path.c_str());
}
template <typename T>
std::vector<T> GetAsVector(const flatbuffers::Vector<T>* vec) {
return std::vector<T>(vec->begin(), vec->end());
}
void VerifyQuantizationScale(
const QuantizationParameters& float_quant_params,
const QuantizationParametersT& quantized_quant_params, const int bit_num,
const bool symmetric) {
const float eps = 1e-7;
ASSERT_THAT(*float_quant_params.min(), SizeIs(1));
ASSERT_THAT(*float_quant_params.max(), SizeIs(1));
float float_min = std::min(0.f, float_quant_params.min()->Get(0));
float float_max = std::max(0.f, float_quant_params.max()->Get(0));
if (symmetric) {
float_max = std::max(std::abs(float_min), std::abs(float_max));
float_min = -float_max;
}
ASSERT_THAT(quantized_quant_params.scale, SizeIs(1));
ASSERT_THAT(quantized_quant_params.zero_point, SizeIs(1));
float scale = (float_max - float_min) / ((1 << bit_num) - 1);
EXPECT_THAT(scale, FloatNear(quantized_quant_params.scale[0], eps));
}
class QuantizeModelTest : public testing::Test {
protected:
QuantizeModelTest() {
input_model_ =
ReadModel(::mlir::lite::internal::kConvModelWith0Plus10Weights);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
std::unique_ptr<FlatBufferModel> input_model_;
const Model* readonly_model_;
tflite::ModelT model_;
std::string output_buffer_;
};
void ExpectEqualTensor(TensorT* tensor, TensorT* expected_tensor) {
const float eps = 1e-7;
EXPECT_THAT(expected_tensor, NotNull());
EXPECT_THAT(tensor->is_variable, Eq(expected_tensor->is_variable));
EXPECT_THAT(tensor->shape, Eq(expected_tensor->shape));
EXPECT_THAT(tensor->type, Eq(expected_tensor->type));
const auto quantization_params = tensor->quantization.get();
const auto expected_quantization_params = expected_tensor->quantization.get();
if (quantization_params != nullptr &&
expected_quantization_params != nullptr) {
for (int i = 0; i < quantization_params->scale.size(); ++i) {
if (quantization_params->scale[i] > 3e-5) {
EXPECT_THAT(quantization_params->scale[i],
FloatNear(expected_quantization_params->scale[i], eps));
}
}
EXPECT_THAT(quantization_params->zero_point,
Eq(expected_quantization_params->zero_point));
}
}
TensorT* FindMatchingExpectedTensor(const SubGraphT& expected_graph,
const ModelT& expected_model,
const ModelT& quant_model,
const OperatorT& quant_op, int idx) {
const auto& builtin_code =
GetBuiltinCode(quant_model.operator_codes[quant_op.opcode_index].get());
for (const auto& expected_op : expected_graph.operators) {
const auto& op_code =
expected_model.operator_codes[expected_op->opcode_index].get();
const auto& expected_code = GetBuiltinCode(op_code);
if (expected_code == builtin_code) {
return expected_graph.tensors[expected_op->inputs[idx]].get();
}
}
return nullptr;
}
void ExpectSameModels(const ModelT& model, const ModelT& expected_model) {
ASSERT_THAT(model.subgraphs, SizeIs(expected_model.subgraphs.size()));
for (size_t subgraph_idx = 0; subgraph_idx < model.subgraphs.size();
subgraph_idx++) {
const auto graph = model.subgraphs[subgraph_idx].get();
const auto expected_graph = expected_model.subgraphs[subgraph_idx].get();
for (auto& op : graph->operators) {
for (int idx = 0; idx < op->inputs.size(); idx++) {
if (op->inputs[idx] < 0) {
continue;
}
const auto& tensor = graph->tensors[op->inputs[idx]];
auto* expected_tensor = FindMatchingExpectedTensor(
*expected_graph, expected_model, model, *op, idx);
if (!expected_tensor) {
continue;
}
ExpectEqualTensor(tensor.get(), expected_tensor);
if (expected_tensor->buffer > 0) {
const int buffer_idx = tensor->buffer;
const int expected_buffer_idx = expected_tensor->buffer;
const auto buffer = model.buffers[buffer_idx].get()->data;
const auto expected_buffer =
expected_model.buffers[expected_buffer_idx].get()->data;
EXPECT_THAT(buffer, Eq(expected_buffer));
}
}
}
}
}
class QuantizeConvModelTest : public QuantizeModelTest,
public testing::WithParamInterface<TensorType> {
protected:
QuantizeConvModelTest() {
tensor_type_ = GetParam();
input_model_ =
ReadModel(::mlir::lite::internal::kConvModelWith0Plus10Weights);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
auto& subgraph = model_.subgraphs[0];
auto* input = subgraph->tensors[subgraph->inputs[0]].get();
auto* output = subgraph->tensors[subgraph->outputs[0]].get();
input->quantization = std::make_unique<QuantizationParametersT>();
output->quantization = std::make_unique<QuantizationParametersT>();
input->quantization->min.push_back(0.0);
output->quantization->min.push_back(0.0);
input->quantization->max.push_back(6.0);
output->quantization->max.push_back(6.0);
}
TensorType tensor_type_;
};
INSTANTIATE_TEST_SUITE_P(QuantizeConvModelTestInst, QuantizeConvModelTest,
testing::ValuesIn({TensorType_INT8}));
TEST_P(QuantizeConvModelTest, QuantizationSucceeds) {
TF_EXPECT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
output_buffer_));
const Model* output_model = GetModel(output_buffer_.data());
ASSERT_TRUE(output_model);
}
TEST_P(QuantizeConvModelTest, SkipUnspecifiedLayer) {
TF_EXPECT_OK(QuantizeModel(&model_, TensorType_FLOAT32, TensorType_FLOAT32,
true, {},
TensorType_FLOAT32, output_buffer_,
false, {"CONV_2D"}));
ModelT expected_model;
readonly_model_->UnPackTo(&expected_model);
ExpectSameModels(model_, expected_model);
}
TEST_P(QuantizeConvModelTest, SkipUnspecifiedLayerByName) {
TF_EXPECT_OK(QuantizeModel(&model_, TensorType_FLOAT32, TensorType_FLOAT32,
true, {},
TensorType_FLOAT32, output_buffer_,
false,
{}, {"output"}));
ModelT expected_model;
readonly_model_->UnPackTo(&expected_model);
ExpectSameModels(model_, expected_model);
}
TEST_P(QuantizeConvModelTest, GraphIsFullyQuantized) {
TF_EXPECT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
output_buffer_));
for (const auto& subgraph : model_.subgraphs) {
for (const auto& tensor : subgraph->tensors) {
EXPECT_TRUE(tensor->type == TensorType_INT32 ||
tensor->type == TensorType_INT8);
}
}
}
class QuantizeConvNoBiasModelTest : public QuantizeModelTest {
protected:
QuantizeConvNoBiasModelTest() {
input_model_ = ReadModel(::mlir::lite::internal::kConvModelWithNoBias);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
class QuantizeSplitModelTest : public QuantizeModelTest {
protected:
QuantizeSplitModelTest() {
input_model_ = ReadModel(::mlir::lite::internal::kModelSplit);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeSplitModelTest, QuantizeSplit) {
TF_EXPECT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
const int32_t subgraph_idx = 0;
const auto& subgraph = model_.subgraphs[subgraph_idx];
const auto& readonly_subgraph =
readonly_model_->subgraphs()->Get(subgraph_idx);
EXPECT_THAT(*readonly_subgraph->operators(), SizeIs(2));
EXPECT_THAT(subgraph->operators, SizeIs(2));
const auto& split = subgraph->operators[0];
const auto& add = subgraph->operators[1];
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[split->opcode_index].get()),
Eq(BuiltinOperator_SPLIT));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[add->opcode_index].get()),
Eq(BuiltinOperator_ADD));
EXPECT_THAT(subgraph->tensors, SizeIs(5));
const int input_idx = 0;
EXPECT_THAT(subgraph->tensors[input_idx]->type, Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[input_idx]->name, Eq("input"));
EXPECT_THAT(subgraph->tensors[input_idx]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[input_idx]->quantization->zero_point,
SizeIs(1));
EXPECT_THAT(subgraph->tensors[input_idx]->quantization->scale[0],
FloatEq(1.0));
EXPECT_THAT(subgraph->tensors[input_idx]->quantization->zero_point[0],
Eq(-128));
const int output_idx = 4;
EXPECT_THAT(subgraph->tensors[output_idx]->type, Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[output_idx]->name, Eq("output"));
EXPECT_THAT(subgraph->tensors[output_idx]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[output_idx]->quantization->zero_point,
SizeIs(1));
EXPECT_THAT(subgraph->tensors[output_idx]->quantization->scale[0],
FloatEq(1.0));
EXPECT_THAT(subgraph->tensors[output_idx]->quantization->zero_point[0],
Eq(-128));
const int split0_idx = 2;
EXPECT_THAT(subgraph->tensors[split0_idx]->type, Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[split0_idx]->name, Eq("split;split:1"));
EXPECT_THAT(subgraph->tensors[split0_idx]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[split0_idx]->quantization->zero_point,
SizeIs(1));
EXPECT_THAT(subgraph->tensors[split0_idx]->quantization->scale[0],
FloatEq(1.0));
EXPECT_THAT(subgraph->tensors[split0_idx]->quantization->zero_point[0],
Eq(-128));
const int split1_idx = 3;
EXPECT_THAT(subgraph->tensors[split1_idx]->type, Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[split1_idx]->name, Eq("split;split:11"));
EXPECT_THAT(subgraph->tensors[split1_idx]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[split1_idx]->quantization->zero_point,
SizeIs(1));
EXPECT_THAT(subgraph->tensors[split1_idx]->quantization->scale[0],
FloatEq(1.0));
EXPECT_THAT(subgraph->tensors[split1_idx]->quantization->zero_point[0],
Eq(-128));
EXPECT_THAT(model_.operator_codes, SizeIs(2));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_SPLIT));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(2));
}
class QuantizeConvModel2Test : public QuantizeModelTest,
public testing::WithParamInterface<TensorType> {
protected:
QuantizeConvModel2Test() {
tensor_type_ = GetParam();
input_model_ =
ReadModel(::mlir::lite::internal::kConvModelWith0Plus10Weights);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
auto& subgraph = model_.subgraphs[0];
auto* input = subgraph->tensors[subgraph->inputs[0]].get();
auto* output = subgraph->tensors[subgraph->outputs[0]].get();
input->quantization = std::make_unique<QuantizationParametersT>();
output->quantization = std::make_unique<QuantizationParametersT>();
input->quantization->min.push_back(0.0);
output->quantization->min.push_back(0.0);
input->quantization->max.push_back(6.0);
output->quantization->max.push_back(6.0);
}
TensorType tensor_type_;
};
INSTANTIATE_TEST_SUITE_P(QuantizeConvModel2TestInst, QuantizeConvModel2Test,
testing::ValuesIn({TensorType_INT8}));
TEST_P(QuantizeConvModel2Test, VerifyConvQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto conv_op = subgraph->operators[0].get();
const int input_tensor_idx = 0;
const int weights_tensor_idx = 1;
const int bias_tensor_index = 2;
const int output_tensor_idx = 0;
const auto bias_tensor =
subgraph->tensors[conv_op->inputs[bias_tensor_index]].get();
const auto input_tensor =
subgraph->tensors[conv_op->inputs[input_tensor_idx]].get();
const auto weights_tensor =
subgraph->tensors[conv_op->inputs[weights_tensor_idx]].get();
const auto output_tensor =
subgraph->tensors[conv_op->outputs[output_tensor_idx]].get();
EXPECT_THAT(bias_tensor->type,
Eq(tensor_type_ == TensorType_INT8 ? TensorType_INT32
: TensorType_INT64));
EXPECT_THAT(input_tensor->type, Eq(tensor_type_));
EXPECT_THAT(weights_tensor->type, Eq(TensorType_INT8));
ASSERT_TRUE(weights_tensor->quantization);
ASSERT_TRUE(bias_tensor->quantization);
ASSERT_TRUE(weights_tensor->quantization);
const std::vector<float>& bias_scales = bias_tensor->quantization->scale;
const std::vector<float>& weights_scales =
weights_tensor->quantization->scale;
const std::vector<int64_t>& weights_zero_points =
weights_tensor->quantization->zero_point;
const int out_channel_size = weights_tensor->shape[0];
ASSERT_THAT(bias_scales, SizeIs(out_channel_size));
ASSERT_THAT(weights_scales, SizeIs(out_channel_size));
ASSERT_THAT(weights_zero_points, SizeIs(out_channel_size));
ASSERT_THAT(input_tensor->quantization->scale, SizeIs(1));
ASSERT_THAT(output_tensor->quantization->scale, SizeIs(1));
const float eps = 1e-7;
for (size_t i = 0; i < out_channel_size; i++) {
EXPECT_THAT(bias_scales[i], FloatNear(input_tensor->quantization->scale[0] *
weights_scales[i],
eps));
}
const auto bias_buffer = model_.buffers[bias_tensor->buffer].get();
auto control_size = tensor_type_ == TensorType_INT8
? sizeof(int32_t) * bias_tensor->shape[0]
: sizeof(int64_t) * bias_tensor->shape[0];
const auto float_op =
readonly_model_->subgraphs()->Get(0)->operators()->Get(0);
const auto original_bias_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(2));
ASSERT_THAT(bias_buffer->data, SizeIs(control_size));
const auto original_bias_buffer =
readonly_model_->buffers()->Get(original_bias_tensor->buffer());
const float* bias_float_buffer =
reinterpret_cast<const float*>(original_bias_buffer->data()->data());
if (tensor_type_ == TensorType_INT8) {
int32_t* bias_values = reinterpret_cast<int32_t*>(bias_buffer->data.data());
for (size_t i = 0; i < out_channel_size; i++) {
auto dequantized_value = bias_values[i] * bias_scales[i];
EXPECT_THAT(dequantized_value,
FloatNear(bias_float_buffer[i], bias_scales[i] / 2));
}
}
const auto weights_buffer = model_.buffers[weights_tensor->buffer].get();
const auto original_weights_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(1));
const auto original_weights_buffer =
readonly_model_->buffers()->Get(original_weights_tensor->buffer());
const int8_t* weight_values =
reinterpret_cast<int8_t*>(weights_buffer->data.data());
const float* weights_float_buffer =
reinterpret_cast<const float*>(original_weights_buffer->data()->data());
ASSERT_THAT(sizeof(float) * weights_buffer->data.size(),
Eq(original_weights_buffer->data()->size()));
int num_values_in_channel = weights_buffer->data.size() / out_channel_size;
for (size_t channel_idx = 0; channel_idx < out_channel_size; channel_idx++) {
for (size_t j = 0; j < num_values_in_channel; j++) {
size_t element_idx = channel_idx * out_channel_size + j;
auto scale = weights_scales[channel_idx];
auto zero_point = weights_zero_points[channel_idx];
auto dequantized_value = weight_values[element_idx] * scale;
EXPECT_THAT(dequantized_value,
FloatNear(weights_float_buffer[element_idx], scale / 2));
EXPECT_THAT(zero_point, Eq(0));
}
}
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_CONV_2D));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(3));
}
TEST_P(QuantizeConvModel2Test, VerifyConvDisablePerChannelQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
true,
output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto conv_op = subgraph->operators[0].get();
const int input_tensor_idx = 0;
const int weights_tensor_idx = 1;
const int bias_tensor_index = 2;
const int output_tensor_idx = 0;
const auto bias_tensor =
subgraph->tensors[conv_op->inputs[bias_tensor_index]].get();
const auto input_tensor =
subgraph->tensors[conv_op->inputs[input_tensor_idx]].get();
const auto weights_tensor =
subgraph->tensors[conv_op->inputs[weights_tensor_idx]].get();
const auto output_tensor =
subgraph->tensors[conv_op->outputs[output_tensor_idx]].get();
EXPECT_THAT(bias_tensor->type,
Eq(tensor_type_ == TensorType_INT8 ? TensorType_INT32
: TensorType_INT64));
EXPECT_THAT(input_tensor->type, Eq(tensor_type_));
EXPECT_THAT(weights_tensor->type, Eq(TensorType_INT8));
ASSERT_TRUE(weights_tensor->quantization);
ASSERT_TRUE(bias_tensor->quantization);
ASSERT_TRUE(weights_tensor->quantization);
const std::vector<float>& bias_scales = bias_tensor->quantization->scale;
const std::vector<float>& weights_scales =
weights_tensor->quantization->scale;
const std::vector<int64_t>& weights_zero_points =
weights_tensor->quantization->zero_point;
const int out_channel_size = 1;
ASSERT_THAT(bias_scales, SizeIs(out_channel_size));
ASSERT_THAT(weights_scales, SizeIs(out_channel_size));
ASSERT_THAT(weights_zero_points, SizeIs(out_channel_size));
ASSERT_THAT(input_tensor->quantization->scale, SizeIs(1));
ASSERT_THAT(output_tensor->quantization->scale, SizeIs(1));
const float eps = 1e-7;
for (size_t i = 0; i < out_channel_size; i++) {
EXPECT_THAT(bias_scales[i], FloatNear(input_tensor->quantization->scale[0] *
weights_scales[i],
eps));
}
const auto bias_buffer = model_.buffers[bias_tensor->buffer].get();
auto control_size = tensor_type_ == TensorType_INT8
? sizeof(int32_t) * bias_tensor->shape[0]
: sizeof(int64_t) * bias_tensor->shape[0];
ASSERT_THAT(bias_buffer->data, SizeIs(control_size));
const auto float_op =
readonly_model_->subgraphs()->Get(0)->operators()->Get(0);
const auto original_bias_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(2));
ASSERT_THAT(bias_buffer->data, SizeIs(control_size));
const auto original_bias_buffer =
readonly_model_->buffers()->Get(original_bias_tensor->buffer());
const float* bias_float_buffer =
reinterpret_cast<const float*>(original_bias_buffer->data()->data());
if (tensor_type_ == TensorType_INT8) {
int32_t* bias_values = reinterpret_cast<int32_t*>(bias_buffer->data.data());
for (size_t i = 0; i < out_channel_size; i++) {
auto dequantized_value = bias_values[i] * bias_scales[i];
EXPECT_THAT(dequantized_value,
FloatNear(bias_float_buffer[i], bias_scales[i] / 2));
}
}
const auto weights_buffer = model_.buffers[weights_tensor->buffer].get();
const auto original_weights_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(1));
const auto original_weights_buffer =
readonly_model_->buffers()->Get(original_weights_tensor->buffer());
const int8_t* weight_values =
reinterpret_cast<int8_t*>(weights_buffer->data.data());
const float* weights_float_buffer =
reinterpret_cast<const float*>(original_weights_buffer->data()->data());
ASSERT_THAT(sizeof(float) * weights_buffer->data.size(),
Eq(original_weights_buffer->data()->size()));
int num_values_in_channel = weights_buffer->data.size() / out_channel_size;
for (size_t channel_idx = 0; channel_idx < out_channel_size; channel_idx++) {
for (size_t j = 0; j < num_values_in_channel; j++) {
size_t element_idx = channel_idx * out_channel_size + j;
auto scale = weights_scales[channel_idx];
auto zero_point = weights_zero_points[channel_idx];
auto dequantized_value = weight_values[element_idx] * scale;
EXPECT_THAT(dequantized_value,
FloatNear(weights_float_buffer[element_idx], scale / 2));
EXPECT_THAT(zero_point, Eq(0));
}
}
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_CONV_2D));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(3));
}
class QuantizeSoftmaxTest : public QuantizeModelTest {
protected:
QuantizeSoftmaxTest() {
input_model_ =
ReadModel(::mlir::lite::internal::kSingleSoftmaxModelMinMinus5MaxPlus5);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeSoftmaxTest, VerifySoftmaxQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto op = subgraph->operators[0].get();
ASSERT_THAT(op->opcode_index, Eq(0));
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_SOFTMAX));
ASSERT_THAT(op->inputs, SizeIs(1));
ASSERT_THAT(op->outputs, SizeIs(1));
auto float_graph = readonly_model_->subgraphs()->Get(0);
ASSERT_THAT(float_graph->tensors()->Get(op->inputs[0])->type(),
Eq(TensorType_FLOAT32));
ASSERT_THAT(float_graph->tensors()->Get(op->outputs[0])->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[0]].get()->type,
Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[op->outputs[0]].get()->type,
Eq(TensorType_INT8));
auto float_input_quant_params =
float_graph->tensors()->Get(op->inputs[0])->quantization();
auto input_quant_params =
subgraph->tensors[op->inputs[0]]->quantization.get();
VerifyQuantizationScale(*float_input_quant_params, *input_quant_params,
8, false);
auto float_output_quant_params =
float_graph->tensors()->Get(op->outputs[0])->quantization();
auto output_quant_params =
subgraph->tensors[op->outputs[0]]->quantization.get();
ASSERT_THAT(*float_output_quant_params->min(), SizeIs(1));
ASSERT_THAT(*float_output_quant_params->max(), SizeIs(1));
ASSERT_THAT(output_quant_params->scale, SizeIs(1));
ASSERT_THAT(output_quant_params->zero_point, SizeIs(1));
ASSERT_THAT(1.0f / 256.0f, Eq(output_quant_params->scale[0]));
ASSERT_THAT(-128, Eq(output_quant_params->zero_point[0]));
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_SOFTMAX));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(2));
}
class QuantizeAvgPoolTest : public QuantizeModelTest {
protected:
QuantizeAvgPoolTest() {
input_model_ =
ReadModel(::mlir::lite::internal::kSingleAvgPoolModelMinMinus5MaxPlus5);
readonly_model_ = |
846 | cpp | tensorflow/tensorflow | model_utils | tensorflow/lite/tools/optimize/model_utils.cc | tensorflow/lite/tools/optimize/model_utils_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_MODEL_UTILS_H_
#define TENSORFLOW_LITE_TOOLS_OPTIMIZE_MODEL_UTILS_H_
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace optimize {
namespace utils {
void MakeDequantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output);
void MakeQuantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output);
void MakeTensor(const string& name, const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, std::unique_ptr<TensorT>* tensor);
void MakeTensorWithQuantParam(const string& name,
const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, float scale,
int64_t zero_point,
std::unique_ptr<TensorT>* tensor);
bool QuantizationParametersExist(const TensorT* tensor);
bool HasBuffer(const ModelT* model, const SubGraphT* subgraph,
int tensor_index);
bool HasMinMax(const TensorT* tensor);
void SetOperatorCodeVersion(ModelT* model);
void WriteFile(const std::string& out_file, const uint8_t* bytes,
size_t num_bytes);
std::unique_ptr<flatbuffers::FlatBufferBuilder> FinishModel(
const tflite::ModelT* model);
std::unique_ptr<tflite::ModelT> CreateMutableModelFromFile(
const string& model_filepath);
}
}
}
#endif
#include "tensorflow/lite/tools/optimize/model_utils.h"
#include <fstream>
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/tools/optimize/operator_property.h"
namespace tflite {
namespace optimize {
namespace utils {
namespace {
int32_t GetOrInsertOpCodeIndex(ModelT* model, const BuiltinOperator& op_code,
int32_t version) {
for (size_t i = 0; i < model->operator_codes.size(); ++i) {
if (GetBuiltinCode(model->operator_codes[i].get()) == op_code) {
return i;
}
}
model->operator_codes.push_back(std::make_unique<OperatorCodeT>());
int op_code_idx = model->operator_codes.size() - 1;
model->operator_codes[op_code_idx]->builtin_code = op_code;
model->operator_codes[op_code_idx]->deprecated_builtin_code =
ConvertBuiltinCodeToDeprecatedBuiltinCode(op_code);
model->operator_codes[op_code_idx]->version = version;
return op_code_idx;
}
}
void MakeDequantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output) {
OperatorT* op_raw = new OperatorT;
op_raw->opcode_index =
GetOrInsertOpCodeIndex(model, BuiltinOperator_DEQUANTIZE, 2);
op_raw->inputs = {input};
op_raw->outputs = {output};
op->reset(op_raw);
}
void MakeQuantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output) {
OperatorT* op_raw = new OperatorT;
op_raw->opcode_index =
GetOrInsertOpCodeIndex(model, BuiltinOperator_QUANTIZE, 1);
op_raw->inputs = {input};
op_raw->outputs = {output};
op->reset(op_raw);
}
void MakeTensor(const string& name, const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, std::unique_ptr<TensorT>* tensor) {
TensorT* tensor_raw = new TensorT;
tensor_raw->name = name;
tensor_raw->shape = shape;
if (!shape_signature.empty()) {
tensor_raw->shape_signature = shape_signature;
}
tensor_raw->type = type;
tensor->reset(tensor_raw);
}
void MakeTensorWithQuantParam(const string& name,
const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, float scale,
int64_t zero_point,
std::unique_ptr<TensorT>* tensor) {
MakeTensor(name, shape, shape_signature, type, tensor);
(*tensor)->quantization = std::make_unique<QuantizationParametersT>();
(*tensor)->quantization->scale.push_back(scale);
(*tensor)->quantization->zero_point.push_back(zero_point);
}
bool QuantizationParametersExist(const TensorT* tensor) {
return tensor->quantization != nullptr &&
!tensor->quantization->scale.empty() &&
!tensor->quantization->zero_point.empty();
}
bool HasBuffer(const ModelT* model, const SubGraphT* subgraph,
int tensor_index) {
const int buffer_index = subgraph->tensors[tensor_index]->buffer;
BufferT* buffer = model->buffers[buffer_index].get();
if (buffer == nullptr || buffer->data.empty()) {
return false;
}
return true;
}
bool HasMinMax(const TensorT* tensor) {
return tensor->quantization && !tensor->quantization->min.empty() &&
!tensor->quantization->max.empty();
}
void SetOperatorCodeVersion(ModelT* model) {
for (int subgraph_idx = 0, end = model->subgraphs.size(); subgraph_idx < end;
subgraph_idx++) {
SubGraphT* subgraph = model->subgraphs.at(subgraph_idx).get();
for (int op_idx = subgraph->operators.size() - 1; op_idx >= 0; op_idx--) {
OperatorT* op = subgraph->operators[op_idx].get();
OperatorCodeT* op_code = model->operator_codes[op->opcode_index].get();
operator_property::OperatorProperty property =
operator_property::GetOperatorProperty(model, subgraph_idx, op_idx);
if (property.quantizable && op_code->version < property.version) {
op_code->version = property.version;
}
}
}
}
void WriteFile(const std::string& out_file, const uint8_t* bytes,
size_t num_bytes) {
std::fstream stream(out_file, std::ios::binary | std::ios::out);
for (size_t i = 0; i < num_bytes; i++) {
stream << bytes[i];
}
TFLITE_DCHECK(!stream.bad() && !stream.fail());
}
std::unique_ptr<flatbuffers::FlatBufferBuilder> FinishModel(
const tflite::ModelT* model) {
std::unique_ptr<flatbuffers::FlatBufferBuilder> builder(
new flatbuffers::FlatBufferBuilder());
auto packed_model = tflite::Model::Pack(*builder, model);
tflite::FinishModelBuffer(*builder, packed_model);
return builder;
}
std::unique_ptr<tflite::ModelT> CreateMutableModelFromFile(
const string& model_filepath) {
auto fb_model =
tflite::FlatBufferModel::BuildFromFile(model_filepath.c_str());
auto tflite_model = fb_model->GetModel();
auto copied_model = std::make_unique<tflite::ModelT>();
tflite_model->UnPackTo(copied_model.get(), nullptr);
return copied_model;
}
}
}
} | #include "tensorflow/lite/tools/optimize/model_utils.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace optimize {
namespace utils {
namespace {
TEST(ModelUtilsTest, QuantizationParametersExist) {
TensorT tensor;
tensor.quantization = std::make_unique<QuantizationParametersT>();
tensor.quantization->scale.push_back(0.5);
tensor.quantization->scale.push_back(1.5);
EXPECT_FALSE(QuantizationParametersExist(&tensor));
tensor.quantization->zero_point.push_back(1);
tensor.quantization->zero_point.push_back(-1);
EXPECT_TRUE(QuantizationParametersExist(&tensor));
}
TEST(ModelUtilsTest, HasBuffer) {
tflite::ModelT model;
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto tensor = std::make_unique<tflite::TensorT>();
tensor->buffer = 0;
subgraph->tensors.push_back(std::move(tensor));
model.subgraphs.push_back(std::move(subgraph));
auto buffer = std::make_unique<tflite::BufferT>();
model.buffers.push_back(std::move(buffer));
EXPECT_FALSE(HasBuffer(&model, model.subgraphs[0].get(), 0));
model.buffers[0]->data = {0, 1, 2, 3};
EXPECT_TRUE(HasBuffer(&model, model.subgraphs[0].get(), 0));
}
TEST(ModelUtilsTest, HasMinMax) {
TensorT tensor;
tensor.quantization = std::make_unique<QuantizationParametersT>();
tensor.quantization->min.push_back(0.5);
EXPECT_FALSE(HasMinMax(&tensor));
tensor.quantization->max.push_back(1.5);
EXPECT_TRUE(HasMinMax(&tensor));
}
}
}
}
} |
847 | cpp | tensorflow/tensorflow | quantize_weights | tensorflow/lite/tools/optimize/quantize_weights.cc | tensorflow/lite/tools/optimize/quantize_weights_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_LITE_QUANTIZE_WEIGHTS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_LITE_QUANTIZE_WEIGHTS_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
namespace mlir {
namespace lite {
enum class BufferType { QUANTIZED_INT8, QUANTIZED_FLOAT16 };
struct CustomOpInfo {
std::vector<std::int32_t> quantizable_input_indices;
bool is_weight_only = false;
bool no_side_effect = true;
};
using BuiltinOperatorSet = absl::flat_hash_set<tflite::BuiltinOperator>;
using CustomOpMap = std::unordered_map<std::string, CustomOpInfo>;
absl::Status QuantizeWeights(
flatbuffers::FlatBufferBuilder* builder, const tflite::Model* input_model,
const tflite::TensorType& inference_type,
const absl::flat_hash_set<std::string>& denylisted_ops,
const CustomOpMap& custom_op_map,
int64_t minimum_elements_for_weights = 1024,
bool disable_per_channel = false, bool weight_only_quantization = false,
bool legacy_float_scale = false);
absl::Status QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
const tflite::Model* input_model,
int64_t weights_min_num_elements,
bool use_hybrid_evaluation = true);
absl::Status QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
const tflite::Model* input_model,
BufferType quant_type = BufferType::QUANTIZED_INT8,
bool use_updated_hybrid_scheme = true);
absl::Status QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
const tflite::Model* input_model,
int64_t weights_min_num_elements,
const CustomOpMap& custom_op_map,
bool use_updated_hybrid_scheme = true,
const BuiltinOperatorSet& op_denylist = {});
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status QuantizeWeights(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
int32_t minimum_size;
TF_RETURN_IF_ERROR(
context.GetOneInt32Parameter("minimum_size", 1024, &minimum_size));
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def, {"Const"},
[minimum_size](const NodeMatch& match,
const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& old_const_node = match.node;
if (!old_const_node.attr().count("dtype")) {
return errors::InvalidArgument("No 'dtype' attribute for Const node ",
old_const_node.name());
}
if (!old_const_node.attr().count("value")) {
return errors::InvalidArgument("No 'value' attribute for Const node ",
old_const_node.name());
}
const DataType old_dtype = old_const_node.attr().at("dtype").type();
Tensor old_tensor;
if (!old_tensor.FromProto(old_const_node.attr().at("value").tensor())) {
return errors::InvalidArgument("Decoding Tensor failed for node",
old_const_node.name());
}
const size_t num_elements = old_tensor.NumElements();
if ((old_dtype != DT_FLOAT) || (num_elements < minimum_size)) {
new_nodes->push_back(old_const_node);
return OkStatus();
}
const float* old_values = old_tensor.flat<float>().data();
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::min();
for (int i = 0; i < num_elements; ++i) {
const float value = old_values[i];
min = std::min(min, value);
max = std::max(max, value);
}
min = std::min(min, 0.0f);
max = std::max(0.0f, max);
if (min == max) {
if (std::abs(min) < 0.000001f) {
max = min + 1.0f;
} else if (min > 0) {
max = 2.0f * min;
} else {
max = min / 2.0f;
}
}
Tensor quantized_tensor(DT_QUINT8, old_tensor.shape());
FloatTensorToQuantizedInPlace<quint8>(old_tensor, min, max,
&quantized_tensor);
NodeDef quantized_const_node;
quantized_const_node.set_op("Const");
quantized_const_node.set_name(old_const_node.name() +
"_quantized_const");
SetNodeAttr("dtype", DT_QUINT8, &quantized_const_node);
SetNodeTensorAttr<float>("value", quantized_tensor,
&quantized_const_node);
new_nodes->push_back(quantized_const_node);
NodeDef min_node;
min_node.set_op("Const");
min_node.set_name(old_const_node.name() + "_quantized_min");
SetNodeAttr("dtype", DT_FLOAT, &min_node);
Tensor min_tensor(DT_FLOAT, {});
min_tensor.scalar<float>()() = min;
SetNodeTensorAttr<float>("value", min_tensor, &min_node);
new_nodes->push_back(min_node);
NodeDef max_node;
max_node.set_op("Const");
max_node.set_name(old_const_node.name() + "_quantized_max");
SetNodeAttr("dtype", DT_FLOAT, &max_node);
Tensor max_tensor(DT_FLOAT, {});
max_tensor.scalar<float>()() = max;
SetNodeTensorAttr<float>("value", max_tensor, &max_node);
new_nodes->push_back(max_node);
NodeDef dequantize_node;
dequantize_node.set_op("Dequantize");
dequantize_node.set_name(old_const_node.name());
SetNodeAttr("T", DT_QUINT8, &dequantize_node);
SetNodeAttr("mode", "MIN_FIRST", &dequantize_node);
AddNodeInput(quantized_const_node.name(), &dequantize_node);
AddNodeInput(min_node.name(), &dequantize_node);
AddNodeInput(max_node.name(), &dequantize_node);
new_nodes->push_back(dequantize_node);
return OkStatus();
},
{}, output_graph_def));
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("quantize_weights", QuantizeWeights);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status QuantizeWeights(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class QuantizeWeightsTest : public ::testing::Test {
protected:
void BuildGraphDef(const TensorShape& input_shape,
std::initializer_list<float> input_values,
const TensorShape& weight_shape,
std::initializer_list<float> weight_values,
GraphDef* original_graph_def) {
auto root = tensorflow::Scope::DisabledShapeInferenceScope();
Tensor input_data(DT_FLOAT, input_shape);
test::FillValues<float>(&input_data, input_values);
Output input_op =
ops::Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, weight_shape);
test::FillValues<float>(&weights_data, weight_values);
Output weights_op = ops::Const(root.WithOpName("weights_op"),
Input::Initializer(weights_data));
Output conv_op = ops::Conv2D(root.WithOpName("output"), input_op,
weights_op, {1, 1, 1, 1}, "VALID");
TF_ASSERT_OK(root.ToGraphDef(original_graph_def));
}
void TestQuantizeWeights() {
GraphDef original_graph_def;
BuildGraphDef({1, 1, 6, 2},
{1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f},
{1, 2, 2, 10},
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f,
3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f,
0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f,
0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f},
&original_graph_def);
TransformFuncContext context;
context.output_names = {"output"};
context.params["minimum_size"] = {"16"};
GraphDef quantized_graph_def;
TF_ASSERT_OK(
QuantizeWeights(original_graph_def, context, &quantized_graph_def));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(quantized_graph_def, &node_lookup);
EXPECT_EQ(1, node_lookup.count("input_op"));
const NodeDef* q_input_op = node_lookup.at("input_op");
EXPECT_EQ(DT_FLOAT, q_input_op->attr().at("dtype").type());
EXPECT_EQ(1, node_lookup.count("weights_op"));
const NodeDef* q_weights_op = node_lookup.at("weights_op");
EXPECT_EQ("Dequantize", q_weights_op->op());
const string& weights_const_name =
NodeNameFromInput(q_weights_op->input(0));
EXPECT_EQ(1, node_lookup.count(weights_const_name));
const NodeDef* q_weights_const = node_lookup.at(weights_const_name);
EXPECT_EQ("Const", q_weights_const->op());
EXPECT_EQ(DT_QUINT8, q_weights_const->attr().at("dtype").type());
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
std::unique_ptr<Session> quantized_session(NewSession(SessionOptions()));
TF_ASSERT_OK(quantized_session->Create(quantized_graph_def));
std::vector<Tensor> quantized_outputs;
TF_ASSERT_OK(
quantized_session->Run({}, {"output"}, {}, &quantized_outputs));
test::ExpectTensorNear<float>(original_outputs[0], quantized_outputs[0],
0.5);
}
};
TEST_F(QuantizeWeightsTest, TestQuantizeWeights) { TestQuantizeWeights(); }
TEST_F(QuantizeWeightsTest, RangesAlwaysIncludeZero) {
GraphDef original_graph_def;
BuildGraphDef({1, 1, 4, 4},
{-1.0f, -4.0f, -2.0f, -5.0f, -1.0f, -4.0f, -2.0f, -5.0f, -1.0f,
-4.0f, -2.0f, -5.0f, -1.0f, -4.0f, -2.0f, -5.0f},
{1, 2, 2, 10},
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f,
3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f,
0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f,
0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f},
&original_graph_def);
TransformFuncContext context;
context.output_names = {"output"};
context.params["minimum_size"] = {"16"};
GraphDef quantized_graph_def;
TF_ASSERT_OK(
QuantizeWeights(original_graph_def, context, &quantized_graph_def));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(quantized_graph_def, &node_lookup);
auto expected_tensor = [](float value) {
Tensor tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&tensor, {value});
return tensor;
};
auto existing_tensor = [&node_lookup](string op) {
const NodeDef* node_def = node_lookup.at(op);
CHECK(node_def);
return GetNodeTensorAttr(*node_def, "value");
};
test::ExpectTensorNear<float>(
expected_tensor(-5.0), existing_tensor("input_op_quantized_min"), 1e-5);
test::ExpectTensorNear<float>(
expected_tensor(0.0), existing_tensor("input_op_quantized_max"), 1e-5);
test::ExpectTensorNear<float>(
expected_tensor(0.0), existing_tensor("weights_op_quantized_min"), 1e-5);
test::ExpectTensorNear<float>(
expected_tensor(4.0), existing_tensor("weights_op_quantized_max"), 1e-5);
}
}
} |
848 | cpp | tensorflow/tensorflow | quantization_utils | tensorflow/lite/tools/optimize/quantization_utils.cc | tensorflow/lite/tools/optimize/quantization_utils_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_QUANTIZATION_UTILS_H_
#define TENSORFLOW_CORE_KERNELS_QUANTIZATION_UTILS_H_
#include <cmath>
#define EIGEN_USE_THREADS
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#define QUANTIZATION_UTILS_USE_NEON
#include <arm_neon.h>
#endif
#include <array>
#include "unsupported/Eigen/CXX11/Tensor"
#define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
#include "public/gemmlowp.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/threadpool.h"
namespace tensorflow {
template <class T>
inline int64_t FloatToQuantizedUnclamped(float input, float range_min,
float range_max) {
const int64_t lowest_quantized =
static_cast<double>(Eigen::NumTraits<T>::lowest());
if (range_min == range_max) {
return lowest_quantized;
}
const int number_of_bits = sizeof(T) * 8;
const int64_t number_of_steps = static_cast<int64_t>(1) << number_of_bits;
const double range_adjust = (number_of_steps / (number_of_steps - 1.0));
const double range = ((range_max - range_min) * range_adjust);
const double range_scale = (number_of_steps / range);
int64_t quantized =
(round(input * range_scale) - round(range_min * range_scale));
quantized += lowest_quantized;
return quantized;
}
template <>
inline int64_t FloatToQuantizedUnclamped<float>(float input, float range_min,
float range_max) {
return -1;
}
template <class T>
T FloatToQuantized(float input, float range_min, float range_max) {
if (std::is_same<T, float>::value) {
return input;
}
int64_t quantized = FloatToQuantizedUnclamped<T>(input, range_min, range_max);
const int64_t lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<T>::lowest());
const int64_t highest_quantized =
static_cast<int64_t>(Eigen::NumTraits<T>::highest());
quantized = std::max(quantized, lowest_quantized);
quantized = std::min(quantized, highest_quantized);
return static_cast<T>(static_cast<int32>(quantized));
}
template <class T>
float QuantizedToFloat(T input, float range_min, float range_max) {
if (std::is_same<T, float>::value) {
return input;
}
if (range_min == range_max) {
return range_min;
}
const int number_of_bits = sizeof(T) * 8;
const int64_t number_of_steps = static_cast<int64_t>(1) << number_of_bits;
const double range_adjust = (number_of_steps / (number_of_steps - 1.0));
const double range = ((range_max - range_min) * range_adjust);
const double range_scale = (range / number_of_steps);
const int64_t lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<T>::lowest());
const double offset_input = static_cast<double>(input) - lowest_quantized;
const double range_min_rounded =
std::round(range_min / static_cast<float>(range_scale)) *
static_cast<float>(range_scale);
const double result = range_min_rounded + (offset_input * range_scale);
return static_cast<float>(result);
}
template <class T>
float FloatForOneQuantizedLevel(float range_min, float range_max) {
const int64_t highest = static_cast<int64_t>(Eigen::NumTraits<T>::highest());
const int64_t lowest = static_cast<int64_t>(Eigen::NumTraits<T>::lowest());
const float float_for_one_quantized_level =
(range_max - range_min) / (highest - lowest);
return float_for_one_quantized_level;
}
template <class T1, class T2, class T3>
void QuantizationRangeForMultiplication(float min_a, float max_a, float min_b,
float max_b, float* min_c,
float* max_c) {
const float a_float_for_one_quant_level =
FloatForOneQuantizedLevel<T1>(min_a, max_a);
const float b_float_for_one_quant_level =
FloatForOneQuantizedLevel<T2>(min_b, max_b);
const int64_t c_highest =
static_cast<int64_t>(Eigen::NumTraits<T3>::highest());
const int64_t c_lowest = static_cast<int64_t>(Eigen::NumTraits<T3>::lowest());
const float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
*min_c = c_float_for_one_quant_level * c_lowest;
*max_c = c_float_for_one_quant_level * c_highest;
}
#define DEQUANTIZE_WITH_EIGEN(input_array, q2f) \
((q2f.range_min_rounded - q2f.lowest_quantized() * q2f.range_scale) + \
input_array.template cast<float>() * q2f.range_scale)
#define QUANTIZE_WITH_EIGEN(input_array, f2q, OutputType) \
((input_array * f2q.range_scale).round() - \
(f2q.range_min_scaled - f2q.lowest_quantized())) \
.cwiseMax(f2q.lower_bound_float()) \
.cwiseMin(f2q.upper_bound_float()) \
.template cast<int32>() \
.template cast<OutputType>()
template <typename T>
struct QuantizedToFloatStruct {
static constexpr int number_of_bits = sizeof(T) * 8;
static constexpr int64_t number_of_steps = static_cast<int64_t>(1)
<< number_of_bits;
static float lowest_quantized() {
return static_cast<float>(Eigen::NumTraits<T>::lowest());
}
QuantizedToFloatStruct(float range_min, float range_max)
: range_min(range_min),
range_scale((range_max - range_min) / (number_of_steps - 1.0)),
range_min_rounded(range_max == range_min
? range_min
: std::round(range_min / range_scale) *
range_scale) {}
const float range_min;
const float range_scale;
const float range_min_rounded;
};
template <typename T>
struct FloatToQuantizedStruct {
static constexpr int number_of_bits = sizeof(T) * 8;
static constexpr int64_t number_of_steps = static_cast<int64_t>(1)
<< number_of_bits;
static constexpr double range_adjust =
(number_of_steps / (number_of_steps - 1.0));
static float lower_bound_float() {
return Eigen::numext::maxi(
static_cast<float>(Eigen::NumTraits<T>::lowest()), -2.147483648e+09f);
}
static float upper_bound_float() {
return Eigen::numext::mini(
static_cast<float>(Eigen::NumTraits<T>::highest()), +2.147483520e+09f);
}
static float lowest_quantized() {
return static_cast<float>(Eigen::NumTraits<T>::lowest());
}
FloatToQuantizedStruct(float range_min, float range_max)
: range_min(range_min),
range_scale(range_max == range_min
? 0.0
: (number_of_steps - 1.0) / (range_max - range_min)),
range_min_scaled(std::round(range_min * range_scale)) {}
const float range_min;
const float range_scale;
const float range_min_scaled;
};
template <class T1, class T2>
inline T2 RequantizeInNewRange(T1 input, float min_input, float max_input,
float min_new, float max_new) {
const float input_float = QuantizedToFloat<T1>(input, min_input, max_input);
return FloatToQuantized<T2>(input_float, min_new, max_new);
}
template <class T1, class T2>
inline void RequantizeManyInNewRange(const T1* input, int64_t count,
float min_input, float max_input,
float min_output, float max_output,
T2* output) {
for (size_t index = 0; index < count; ++index) {
const float input_float =
QuantizedToFloat<T1>(input[index], min_input, max_input);
output[index] = FloatToQuantized<T2>(input_float, min_output, max_output);
}
}
inline void RequantizeManyInNewRangeReference(const qint32* input,
int64_t count, float min_input,
float max_input, float min_output,
float max_output,
quint8* output) {
const int fp_shift = 16;
const float input_range = max_input - min_input;
const float output_range = max_output - min_output;
const float recip_output_range =
output_range == 0.0 ? 0.0 : (255.0 / output_range);
const float input_rezero = (min_input + max_input) / 2.0;
const int64_t range_scale_fp =
output_range == 0.0 ? 0.0
: static_cast<int64_t>(255.0 * (1 << fp_shift) *
input_range / output_range);
const int64_t input_offset_fp =
static_cast<int64_t>(input_rezero * recip_output_range * (1 << fp_shift));
const int64_t output_offset_fp =
output_range == 0.0
? 0
: std::lround((1 << fp_shift) * (min_output * 255.0) / output_range);
const int64_t rounding_delta = 1 << (fp_shift - 1);
for (int64_t index = 0; index < count; ++index) {
const int64_t input_value = static_cast<int64_t>(input[index]);
const int64_t fp_value =
((input_value * range_scale_fp) >> 32) + input_offset_fp;
const int64_t offset_intermediate = fp_value - output_offset_fp;
const int64_t round_intermediate = offset_intermediate + rounding_delta;
int64_t quantized_int64 = round_intermediate >> fp_shift;
quantized_int64 = std::max(quantized_int64, int64_t{0});
quantized_int64 = std::min(quantized_int64, int64_t{255});
output[index] = static_cast<quint8>(static_cast<int32>(quantized_int64));
}
}
inline void RequantizeManyInNewRange8To32BitReference(
const quint8* input, int64_t count, float min_input, float max_input,
float min_output, float max_output, qint32* output) {
const float code_0_float = QuantizedToFloat<quint8>(0, min_input, max_input);
const float code_1_float = QuantizedToFloat<quint8>(1, min_input, max_input);
const int64_t code_0_int64 =
FloatToQuantizedUnclamped<qint32>(code_0_float, min_output, max_output);
const int64_t code_1_int64 =
FloatToQuantizedUnclamped<qint32>(code_1_float, min_output, max_output);
const int32_t mult_int32 = code_1_int64 - code_0_int64;
const int64_t lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest());
const int64_t highest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::highest());
for (int64_t i = 0; i < count; ++i) {
const int64_t input_value = static_cast<int64_t>(input[i]);
int64_t output_value = code_0_int64 + (input_value * mult_int32);
output_value = std::max(output_value, lowest_quantized);
output_value = std::min(output_value, highest_quantized);
output[i] = static_cast<int32>(output_value);
}
}
#ifdef QUANTIZATION_UTILS_USE_NEON
inline void RequantizeManyInNewRangeNeon(const qint32* input, int64 count,
float min_input, float max_input,
float min_output, float max_output,
quint8* output) {
const int fp_shift = 16;
const float input_range = max_input - min_input;
const float output_range = max_output - min_output;
const float recip_output_range =
output_range == 0.0 ? 0.0 : (255.0 / output_range);
const float input_rezero = (min_input + max_input) / 2.0;
const int32 range_scale_fp =
output_range == 0.0 ? 0.0
: static_cast<int32>(255.0 * (1 << (fp_shift - 16)) *
input_range / output_range);
const int32 input_offset_fp =
static_cast<int32>(input_rezero * recip_output_range * (1 << fp_shift));
const int32 output_offset_fp =
output_range == 0.0
? 0
: static_cast<int32>((1 << fp_shift) * (min_output * 255.0) /
output_range);
const int32 rounding_delta = 1 << (fp_shift - 1);
const int32x4_t range_scale_fp_32x4 = vmovq_n_s32(range_scale_fp);
const int32x4_t input_offset_fp_32x4 = vmovq_n_s32(input_offset_fp);
const int32x4_t output_offset_fp_32x4 = vmovq_n_s32(output_offset_fp);
const int32x4_t rounding_delta_32x4 = vmovq_n_s32(rounding_delta);
int64 index = 0;
for (; index < (count - 7); index += 8) {
const int32* input_ptr = &(input->value) + index;
const int32x4_t input_value_low_32x4 = vld1q_s32(input_ptr);
const int32x4_t input_value_high_32x4 = vld1q_s32(input_ptr + 4);
const int32x4_t fp_value_low_32x4 = vaddq_s32(
input_offset_fp_32x4,
vmulq_s32(vshrq_n_s32(input_value_low_32x4, 16), range_scale_fp_32x4));
const int32x4_t fp_value_high_32x4 = vaddq_s32(
input_offset_fp_32x4,
vmulq_s32(vshrq_n_s32(input_value_high_32x4, 16), range_scale_fp_32x4));
const int32x4_t offset_intermediate_low_32x4 =
vsubq_s32(fp_value_low_32x4, output_offset_fp_32x4);
const int32x4_t offset_intermediate_high_32x4 =
vsubq_s32(fp_value_high_32x4, output_offset_fp_32x4);
const int32x4_t round_intermediate_low_32x4 =
vaddq_s32(offset_intermediate_low_32x4, rounding_delta_32x4);
const int32x4_t round_intermediate_high_32x4 =
vaddq_s32(offset_intermediate_high_32x4, rounding_delta_32x4);
const int16x4_t quantized_low_16x4 =
vqmovn_s32(vshrq_n_s32(round_intermediate_low_32x4, fp_shift));
const int16x4_t quantized_high_16x4 =
vqmovn_s32(vshrq_n_s32(round_intermediate_high_32x4, fp_shift));
const uint8x8_t quantized_8x8 =
vqmovun_s16(vcombine_s16(quantized_low_16x4, quantized_high_16x4));
uint8* output_ptr = &(output->value) + index;
vst1_u8(output_ptr, quantized_8x8);
}
for (; index < count; ++index) {
const int32 input_value = static_cast<int32>(input[index]);
const int32 fp_value =
static_cast<int32>(
(static_cast<int32>(input_value >> 16) * (range_scale_fp))) +
input_offset_fp;
const int32 offset_intermediate = fp_value - output_offset_fp;
const int32 round_intermediate = offset_intermediate + rounding_delta;
int32 quantized_int32 = round_intermediate >> fp_shift;
quantized_int32 = std::max(quantized_int32, 0);
quantized_int32 = std::min(quantized_int32, 255);
output[index] = static_cast<quint8>(static_cast<int32>(quantized_int32));
}
}
template <>
inline void RequantizeManyInNewRange<qint32, quint8>(
const qint32* input, int64 count, float min_input, float max_input,
float min_output, float max_output, quint8* output) {
const float input_range = max_input - min_input;
const float output_range = max_output - min_output;
if ((input_range / output_range) > 16384.0f) {
RequantizeManyInNewRangeReference(input, count, min_input, max_input,
min_output, max_output, output);
} else {
RequantizeManyInNewRangeNeon(input, count, min_input, max_input, min_output,
max_output, output);
}
}
template <int POW>
inline int16x8_t Divide16x8PowRound(const int16x8_t val) {
const int16x8_t val_sign = vshrq_n_s16(val, 15);
const int16x8_t val_xor = veorq_s16(val, val_sign);
const int16x8_t val_pos = vsubq_s16(val_xor, val_sign);
const int16x8_t shifted_val_pos = vrshrq_n_s16(val_pos, POW);
const int16x8_t shifted_val_pos_xor = veorq_s16(shifted_val_pos, val_sign);
const int16x8_t shifted_val = vsubq_s16(shifted_val_pos_xor, val_sign);
return shifted_val;
}
template <int POW>
inline int64x2_t Divide64x2PowRound(const int64x2_t val) {
const int64x2_t val_sign = vshrq_n_s64(val, 63);
const int64x2_t val_xor = veorq_s64(val, val_sign);
const int64x2_t val_pos = vsubq_s64(val_xor, val_sign);
const int64x2_t shifted_val_pos = vrshrq_n_s64(val_pos, POW);
const int64x2_t shifted_val_pos_xor = veorq_s64(shifted_val_pos, val_sign);
const int64x2_t shifted_val = vsubq_s64(shifted_val_pos_xor, val_sign);
return shifted_val;
}
template <int POW>
inline int16x8_t Divide16x8Pow(const int16x8_t val) {
static constexpr int16 FIRST_BIT_VAL = 0x0000000000000001;
static const int16x8_t FIRST_BIT = vmovq_n_s16(FIRST_BIT_VAL);
const int16x8_t val_sign = vshrq_n_s16(val, 15);
const int16x8_t neg_offset = vandq_s16(val_sign, FIRST_BIT);
const int16x8_t val_with_offset = vsubq_s16(val, neg_offset);
const int16x8_t shifted_wo_offset =
vsraq_n_s16(neg_offset, val_with_offset, POW);
return shifted_wo_offset;
}
template <int POW>
inline int64x2_t Divide64x2Pow(const int64x2_t val) {
static constexpr int64 FIRST_BIT_VAL = 0x0000000000000001;
static const int64x2_t FIRST_BIT = vmovq_n_s64(FIRST_BIT_VAL);
const int64x2_t val_sign = vshrq_n_s64(val, 63);
const int64x2_t neg_offset = vandq_s64(val_sign, FIRST_BIT);
const int64x2_t val_with_offset = vsubq_s64(val, neg_offset);
const int64x2_t shifted_wo_offset =
vsraq_n_s64(neg_offset, val_with_offset, POW);
return shifted_wo_offset;
}
template <int RESOLUTION>
inline int32x2_t ComputeLerp32x2(const int32x2_t top_left,
const int32x2_t top_right,
const int32x2_t bottom_left,
const int32x2_t bottom_right,
const int32x2_t x_lerp,
const int32x2_t y_lerp) {
static_assert(RESOLUTION < 31, "RESOLUTION must be less than 31");
constexpr int32 RESOLUTION_MULT32 = (1 << RESOLUTION);
static const int32x2_t RESOLUTION_MULT32x2 = vmov_n_s32(RESOLUTION_MULT32);
const int64x2_t top_left_x_res = vmull_s32(top_left, RESOLUTION_MULT32x2);
const int64x2_t bottom_left_x_res =
vmull_s32(bottom_left, RESOLUTION_MULT32x2);
const int32x2_t top_right_sub_top_left = vsub_s32(top_right, top_left);
const int64x2_t top_x_res =
vmlal_s32(top_left_x_res, top_right_sub_top_left, x_lerp);
const int32x2_t bottom_right_sub_bottom_left =
vsub_s32(bottom_right, bottom_left);
const int64x2_t bottom_x_res =
vmlal_s32(bottom_left_x_res, bottom_right_sub_bottom_left, x_lerp);
const int64x2_t bottom_sub_top_x_res = vsubq_s64(bottom_x_res, top_x_res);
const int64x2_t bottom_sub_top =
Divide64x2Pow<RESOLUTION>(bottom_sub_top_x_res);
const int32x2_t bottom_sub_top_32 = vqmovn_s64(bottom_sub_top);
const int64x2_t top_add_bottom_sub_top_mul_ylerp_x_res =
vmlal_s32(top_x_res, bottom_sub_top_32, y_lerp);
const int64x2_t retval =
Divide64x2PowRound<RESOLUTION>(top_add_bottom_sub_top_mul_ylerp_x_res);
const int32x2_t retval32 = vqmovn_s64(retval);
return retval32;
}
template <int RESOLUTION>
inline uint8x8_t ComputeLerp8x8(const uint8x8_t top_left8x8,
const uint8x8_t top_right8x8,
const uint8x8_t bottom_left8x8,
const uint8x8_t bottom_right8x8,
const int16x8_t x_lerp,
const int16x8_t y_lerp) {
static_assert(RESOLUTION < 8, "RESOLUTION must be less than 8");
constexpr uint8 RESOLUTION_MULT_VAL = (1 << RESOLUTION);
static const uint8x8_t RESOLUTION_MULT = vdup_n_u8(RESOLUTION_MULT_VAL);
const int16x8_t top_left_x_res =
vreinterpretq_s16_u16(vmull_u8(top_left8x8, RESOLUTION_MULT));
const int16x8_t bottom_left_x_res =
vreinterpretq_s16_u16(vmull_u8(bottom_left8x8, RESOLUTION_MULT));
const int16x8_t top_right_sub_top_left =
vreinterpretq_s16_u16(vsubl_u8(top_right8x8, top_left8x8));
const int16x8_t top_x_res =
vmlaq_s16(top_left_x_res, top_right_sub_top_left, x_lerp);
const int16x8_t bottom_right_sub_bottom_left =
vreinterpretq_s16_u16(vsubl_u8(bottom_right8x8, bottom_left8x8));
const int16x8_t bottom_x_res =
vmlaq_s16(bottom_left_x_res, bottom_right_sub_bottom_left, x_lerp);
const int16x8_t bottom_sub_top_x_res = vsubq_s16(bottom_x_res, top_x_res);
const int16x8_t bottom_sub_top =
Divide16x8Pow<RESOLUTION>(bottom_sub_top_x_res);
const int16x8_t top_add_bottom_sub_top_mul_ylerp_x_res =
vmlaq_s16(top_x_res, bottom_sub_top, y_lerp);
const int16x8_t retval16 =
Divide16x8PowRound<RESOLUTION>(top_add_bottom_sub_top_mul_ylerp_x_res);
const uint8x8_t retval = vmovn_u16(vreinterpretq_u16_s16(retval16));
return retval;
}
inline std::array<int32x4_t, 2> Requantize8x8To32Neon(
const uint8* input_ptr, const int64x2_t input_0_64x2,
const int32x2_t input_mult_32x2) {
const uint8x8_t input_value_8x8 = vld1_u8(input_ptr);
const int16x8_t input_value_16x8 =
vreinterpretq_s16_u16(vmovl_u8(input_value_8x8));
const int16x4_t input_value_low_16x4 = vget_low_s16(input_value_16x8);
const int16x4_t input_value_high_16x4 = vget_high_s16(input_value_16x8);
const int32x4_t input_value_low_32x4 = vmovl_s16(input_value_low_16x4);
const int32x4_t input_value_high_32x4 = vmovl_s16(input_value_high_16x4);
const int32x2_t input_value_low_low_32x2 = vget_low_s32(input_value_low_32x4);
const int32x2_t input_value_low_high_32x2 =
vget_high_s32(input_value_low_32x4);
const int32x2_t input_value_high_low_32x2 =
vget_low_s32(input_value_high_32x4);
const int32x2_t input_value_high_high_32x2 =
vget_high_s32(input_value_high_32x4);
const int64x2_t mult_result_low_low_64x2 =
vmlal_s32(input_0_64x2, input_value_low_low_32x2, input_mult_32x2);
const int64x2_t mult_result_low_high_64x2 =
vmlal_s32(input_0_64x2, input_value_low_high_32x2, input_mult_32x2);
const int64x2_t mult_result_high_low_64x2 =
vmlal_s32(input_0_64x2, input_value_high_low_32x2, input_mult_32x2);
const int64x2_t mult_result_high_high_64x2 =
vmlal_s32(input_0_64x2, input_value_high_high_32x2, input_mult_32x2);
const int32x2_t output_value_low_low_32x2 =
vqmovn_s64(mult_result_low_low_64x2);
const int32x2_t output_value_low_high_32x2 =
vqmovn_s64(mult_result_low_high_64x2);
const int32x2_t output_value_high_low_32x2 =
vqmovn_s64(mult_result_high_low_64x2);
const int32x2_t output_value_high_high_32x2 =
vqmovn_s64(mult_result_high_high_64x2);
const int32x4_t output_value_low_32x4 =
vcombine_s32(output_value_low_low_32x2, output_value_low_high_32x2);
const int32x4_t output_value_high_32x4 =
vcombine_s32(output_value_high_low_32x2, output_value_high_high_32x2);
return std::array<int32x4_t, 2>{
{output_value_low_32x4, output_value_high_32x4}};
}
template <>
inline void RequantizeManyInNewRange<quint8, qint32>(
const quint8* input, int64 count, float min_input, float max_input,
float min_output, float max_output, qint32* output) {
const float code_0_float = QuantizedToFloat<quint8>(0, min_input, max_input);
const float code_1_float = QuantizedToFloat<quint8>(1, min_input, max_input);
const int64 code_0_int64 =
FloatToQuantizedUnclamped<qint32>(code_0_float, min_output, max_output);
const int64 code_1_int64 =
FloatToQuantizedUnclamped<qint32>(code_1_float, min_output, max_output);
const int32 mult_int32 = static_cast<int32>(code_1_int64 - code_0_int64);
const int64x2_t code_0_64x2 = vmovq_n_s64(code_0_int64);
const int32x2_t mult_32x2 = vmov_n_s32(mult_int32);
int64 i = 0;
for (; i < (count - 7); i += 8) {
const uint8* input_ptr = &(input->value) + i;
int32* output_ptr = &(output->value) + i;
const std::array<int32x4_t, 2> output_value =
Requantize8x8To32Neon(input_ptr, code_0_64x2, mult_32x2);
vst1q_s32(output_ptr + 0, output_value[0]);
vst1q_s32(output_ptr + 4, output_value[1]);
}
const int64 lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest());
const int64 highest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::highest());
for (; i < count; ++i) {
const int64 input_value = static_cast<int64_t>(input[i]);
int64 output_value = code_0_int64 + (input_value * mult_int32);
output_value = std::max(output_value, lowest_quantized);
output_value = std::min(output_value, highest_quantized);
output[i] = static_cast<int32>(output_value);
}
}
#else
template <>
inline void RequantizeManyInNewRange<qint32, quint8>(
const qint32* input, int64_t count, float min_input, float max_input,
float min_output, float max_output, quint8* output) {
RequantizeManyInNewRangeReference(input, count, min_input, max_input,
min_output, max_output, output);
}
template <>
inline void RequantizeManyInNewRange<quint8, qint32>(
const quint8* input, int64_t count, float min_input, float max_input,
float min_output, float max_output, qint32* output) {
RequantizeManyInNewRange8To32BitReference(input, count, min_input, max_input,
min_output, max_output, output);
}
#endif
template <int shift>
struct int64_right_shift_op {
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const int64_t operator()(const int64_t a) const {
return a >> shift;
}
};
template <class T1, class T2>
inline void RequantizeManyInNewRangeUsingEigen(
const Eigen::ThreadPoolDevice& device, const Tensor& input, float min_input,
float max_input, float min_output, float max_output, Tensor* output) {
auto input_array = input.flat<T1>();
QuantizedToFloatStruct<T1> q2f(min_input, max_input);
auto input_float = DEQUANTIZE_WITH_EIGEN(input_array, q2f);
FloatToQuantizedStruct<T2> f2q(min_output, max_output);
auto input_requantized = QUANTIZE_WITH_EIGEN(input_float, f2q, T2);
output->flat<T2>().device(device) = input_requantized;
}
template <>
inline void RequantizeManyInNewRangeUsingEigen<qint32, quint8>(
const Eigen::ThreadPoolDevice& device, const Tensor& input, float min_input,
float max_input, float min_output, float max_output, Tensor* output) {
const int fp_shift = 16;
const float input_range = max_input - min_input;
const float output_range = max_output - min_output;
const float recip_output_range =
output_range == 0.0 ? 0.0 : (255.0 / output_range);
const float input_rezero = (min_input + max_input) / 2.0;
const int64_t range_scale_fp =
output_range == 0.0 ? 0.0
: static_cast<int64_t>(255.0 * (1 << fp_shift) *
input_range / output_range);
const int64_t input_offset_fp =
static_cast<int64_t>(input_rezero * recip_output_range * (1 << fp_shift));
const int64_t output_offset_fp =
output_range == 0.0
? 0
: std::lround((1 << fp_shift) * (min_output * 255.0) / output_range);
const int64_t rounding_delta = 1 << (fp_shift - 1);
auto input_array = input.flat<qint32>();
auto fp_value = ((input_array.template cast<int64_t>() * range_scale_fp)
.unaryExpr(int64_right_shift_op<32>())) +
(input_offset_fp - output_offset_fp + rounding_delta);
auto intermediate = fp_ | #include <cmath>
#define EIGEN_USE_THREADS
#include <limits>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
void TestRequantizeMany(Eigen::ThreadPoolDevice* eigen_device, float input_min,
float input_max, float output_min, float output_max,
const std::vector<qint32>& values_quantized,
int tolerance = 1) {
const int values_count = values_quantized.size();
std::vector<quint8> expected_values;
expected_values.reserve(values_count);
for (int value_index = 0; value_index < values_count; ++value_index) {
expected_values.push_back(FloatToQuantized<quint8>(
QuantizedToFloat(values_quantized[value_index], input_min, input_max),
output_min, output_max));
}
Tensor i_tensor =
tensorflow::test::AsTensor(absl::Span<const qint32>(values_quantized));
Tensor o_tensor(DT_QUINT8, TensorShape{values_count});
auto output_values = o_tensor.flat<quint8>();
if (eigen_device == nullptr) {
auto input_array = i_tensor.flat<qint32>();
RequantizeManyInNewRange(input_array.data(), input_array.size(), input_min,
input_max, output_min, output_max,
output_values.data());
} else {
RequantizeManyInNewRangeUsingEigen<qint32, quint8>(
*eigen_device, i_tensor, input_min, input_max, output_min, output_max,
&o_tensor);
}
const string tolerance_str = strings::StrCat("+-", tolerance);
for (size_t value_index = 0; value_index < values_count; ++value_index) {
int e = expected_values[value_index];
int v = output_values(value_index);
ASSERT_TRUE(std::abs(e - v) <= tolerance)
<< "actual=" << v << ", expected=" << e << tolerance_str
<< ", values_quantized[" << value_index
<< "]=" << values_quantized[value_index] << ", input_min=" << input_min
<< ", input_max=" << input_max << ", output_min=" << output_min
<< ", output_max=" << output_max << ", value_index=" << value_index;
}
}
void TestRequantizeMany8To32Bit(float input_min, float input_max,
float output_min, float output_max,
const std::vector<quint8>& values_quantized,
int tolerance = 256) {
const int values_count = values_quantized.size();
std::vector<qint32> expected_values;
expected_values.reserve(values_count);
for (int value_index = 0; value_index < values_count; ++value_index) {
expected_values.push_back(FloatToQuantized<qint32>(
QuantizedToFloat(values_quantized[value_index], input_min, input_max),
output_min, output_max));
}
const Tensor i_tensor =
tensorflow::test::AsTensor(absl::Span<const quint8>(values_quantized));
Tensor o_tensor(DT_QINT32, TensorShape{values_count});
auto output_values = o_tensor.flat<qint32>();
const auto input_array = i_tensor.flat<quint8>();
RequantizeManyInNewRange(input_array.data(), input_array.size(), input_min,
input_max, output_min, output_max,
output_values.data());
const string tolerance_str = strings::StrCat("+-", tolerance);
for (int value_index = 0; value_index < values_count; ++value_index) {
const qint32 e = expected_values[value_index];
const qint32 v = output_values(value_index);
ASSERT_TRUE(std::abs(e - v) <= tolerance)
<< "actual=" << v << ", expected=" << e << tolerance_str
<< ", values_quantized[" << value_index
<< "]=" << values_quantized[value_index] << ", input_min=" << input_min
<< ", input_max=" << input_max << ", output_min=" << output_min
<< ", output_max=" << output_max << ", value_index=" << value_index;
}
}
void TestRequantizeManyInNewRange32To8Bit(
Eigen::ThreadPoolDevice* eigen_device) {
if (true) {
const size_t values_count = 6;
const float values[values_count] = {0.0f, 0.45f, 1.0f,
-1.0f, 127.0f, 255.0f};
const size_t ranges_count = 6;
const float ranges[ranges_count][4] = {
{0.0f, 255.0f, 0.0f, 255.0f},
{0.0f, 1.0f, 0.0f, 1.0f},
{-1.0f, 1.0f, -1.0f, 1.0f},
{-1.0f, 1.0f, -255.0f, 255.0f},
{3.0f, 3.0f, 0.0f, 255.0f},
{0.0f, 255.0f, 5.0f, 5.0f},
};
for (int i = 0; i < ranges_count; ++i) {
const auto& r = ranges[i];
std::vector<qint32> values_quantized;
for (int value_index = 0; value_index < values_count; ++value_index) {
const float v = values[value_index];
values_quantized.push_back(FloatToQuantized<qint32>(v, r[0], r[1]));
}
TestRequantizeMany(eigen_device, r[0], r[1], r[2], r[3],
values_quantized);
}
qint32 low = Eigen::NumTraits<qint32>::lowest();
qint32 high = Eigen::NumTraits<qint32>::highest();
std::vector<qint32> vals{low, high};
int num_steps = 14419;
qint32 step = static_cast<int32>((1LL << 32) / num_steps);
qint32 v = low + static_cast<qint32>(1);
for (int i = 0; i < num_steps; ++i) {
vals.push_back(v);
v += step;
}
TestRequantizeMany(eigen_device, -1.0f, 1.0f, -1.0f, 1.0f, vals);
TestRequantizeMany(eigen_device, -255.0f, 255.0f, -255.0f, 255.0f, vals);
TestRequantizeMany(eigen_device, -1.0f, 1.0f, -12345678.0f, 12345678.0f,
vals);
TestRequantizeMany(eigen_device, -1.0f, 12345678.0f, -12345678.0f,
12345678.0f, vals);
}
const float out_min = -29.1234;
const float out_max = 23.1234;
const float in_min = -1e6;
const float in_max = 1e6;
qint32 low = FloatToQuantized<qint32>(out_min, in_min, in_max);
qint32 high = FloatToQuantized<qint32>(out_max, in_min, in_max);
std::vector<qint32> vals;
vals.clear();
for (int32_t i = low; i <= high; ++i) vals.push_back(i);
TestRequantizeMany(eigen_device, in_min, in_max, out_min, out_max, vals);
}
void TestRequantizeManyInNewRange8To32Bit() {
const size_t values_count = 6;
const float values[values_count] = {0.0f, 0.45f, 1.0f, -1.0f, 127.0f, 255.0f};
const size_t ranges_count = 6;
const float ranges[ranges_count][4] = {
{0.0f, 255.0f, 0.0f, 255.0f},
{0.0f, 1.0f, 0.0f, 1.0f},
{-1.0f, 1.0f, -1.0f, 1.0f},
{-1.0f, 1.0f, -255.0f, 255.0f},
{3.0f, 3.0f, 0.0f, 255.0f},
{0.0f, 255.0f, 5.0f, 5.0f},
};
for (int i = 0; i < ranges_count; ++i) {
const auto& r = ranges[i];
std::vector<quint8> values_quantized;
for (int value_index = 0; value_index < values_count; ++value_index) {
const float v = values[value_index];
values_quantized.push_back(FloatToQuantized<quint8>(v, r[0], r[1]));
}
TestRequantizeMany8To32Bit(r[0], r[1], r[2], r[3], values_quantized);
}
int low = Eigen::NumTraits<quint8>::lowest();
int high = Eigen::NumTraits<quint8>::highest();
std::vector<quint8> vals;
for (int val = low; val <= high; ++val) {
vals.push_back(val);
}
TestRequantizeMany8To32Bit(-1.0f, 1.0f, -1.0f, 1.0f, vals);
TestRequantizeMany8To32Bit(-255.0f, 255.0f, -255.0f, 255.0f, vals);
TestRequantizeMany8To32Bit(-1.0f, 1.0f, -12345678.0f, 12345678.0f, vals);
TestRequantizeMany8To32Bit(-1.0f, 12345678.0f, -12345678.0f, 12345678.0f,
vals);
}
template <typename InputType, typename OutputType>
void TestRequantizeManyInNewRangeEigenVsNonEigen() {
thread::ThreadPool threadpool(Env::Default(), "test", 2 );
Eigen::ThreadPoolDevice eigen_device(threadpool.AsEigenThreadPool(),
2 );
const size_t ranges_count = 6;
const float ranges[ranges_count][4] = {
{0.0f, 255.0f, 0.0f, 255.0f},
{0.0f, 1.0f, 0.0f, 1.0f},
{-1.0f, 1.0f, -1.0f, 1.0f},
{-1.0f, 1.0f, -255.0f, 255.0f},
{3.0f, 3.0f, 0.0f, 255.0f},
{0.0f, 255.0f, 5.0f, 5.0f},
};
for (size_t range_index = 0; range_index < ranges_count; ++range_index) {
const float input_min = ranges[range_index][0];
const float input_max = ranges[range_index][1];
const float output_min = ranges[range_index][2];
const float output_max = ranges[range_index][3];
const int values_count = 10000;
random::PhiloxRandom philox(testing::RandomSeed(), 17);
random::SimplePhilox rnd(&philox);
std::vector<InputType> values_quantized;
for (int i = 0; i < values_count; ++i) {
float v = (rnd.RandFloat() * (input_max - input_min)) + input_min;
values_quantized.push_back(
FloatToQuantized<InputType>(v, input_min, input_max));
}
Tensor i_tensor = tensorflow::test::AsTensor(
gtl::ArraySlice<InputType>(values_quantized));
const auto i_array = i_tensor.flat<InputType>();
Tensor o_tensor_eigen(DataTypeToEnum<OutputType>::v(),
TensorShape{values_count});
auto output_values_eigen = o_tensor_eigen.flat<OutputType>();
Tensor o_tensor_ref(DataTypeToEnum<OutputType>::v(),
TensorShape{values_count});
auto output_values_ref = o_tensor_ref.flat<OutputType>();
RequantizeManyInNewRange(i_array.data(), i_array.size(), input_min,
input_max, output_min, output_max,
output_values_ref.data());
RequantizeManyInNewRangeUsingEigen<InputType, OutputType>(
eigen_device, i_tensor, input_min, input_max, output_min, output_max,
&o_tensor_eigen);
const int tolerance = 1;
for (int i = 0; i < values_quantized.size(); ++i) {
auto expected = output_values_ref(i);
auto actual = output_values_eigen(i);
ASSERT_TRUE(std::abs(expected - actual) <= tolerance)
<< "expected=" << expected << " actual=" << actual
<< " tolerance=" << tolerance << " v=" << values_quantized[i]
<< " i=" << i << " input_min=" << input_min
<< " input_max=" << input_max
<< " input_type=" << DataTypeString(DataTypeToEnum<InputType>::v())
<< " output_type=" << DataTypeString(DataTypeToEnum<OutputType>::v());
}
}
}
template <typename InputType, typename OutputType>
void TimeRequantizeManyInNewRange(int64_t num_elements, int64_t iterations,
bool use_eigen) {
const float input_min = -100.0f;
const float input_max = 100.0f;
const float output_min = -1000000.0f;
const float output_max = 1000000.0f;
random::PhiloxRandom philox(testing::RandomSeed(), 17);
random::SimplePhilox rnd(&philox);
std::vector<InputType> values_quantized;
for (int i = 0; i < num_elements; ++i) {
float v = (rnd.RandFloat() * (input_max - input_min)) + input_min;
values_quantized.push_back(
FloatToQuantized<InputType>(v, input_min, input_max));
}
thread::ThreadPool threadpool(Env::Default(), "test", 4 );
Eigen::ThreadPoolDevice eigen_device(threadpool.AsEigenThreadPool(),
4 );
Tensor i_tensor =
tensorflow::test::AsTensor(gtl::ArraySlice<InputType>(values_quantized));
const auto i_array = i_tensor.flat<InputType>();
Tensor o_tensor_eigen(DataTypeToEnum<OutputType>::v(),
TensorShape{num_elements});
Tensor o_tensor_ref(DataTypeToEnum<OutputType>::v(),
TensorShape{num_elements});
auto output_values_ref = o_tensor_ref.flat<OutputType>();
int64_t total_duration = 0;
for (int i = 0; i < iterations; ++i) {
const int64_t start_time = Env::Default()->NowMicros();
if (use_eigen) {
RequantizeManyInNewRangeUsingEigen<InputType, OutputType>(
eigen_device, i_tensor, input_min, input_max, output_min, output_max,
&o_tensor_eigen);
} else {
RequantizeManyInNewRange<InputType, OutputType>(
i_array.data(), i_array.size(), input_min, input_max, output_min,
output_max, output_values_ref.data());
}
const int64_t end_time = Env::Default()->NowMicros();
total_duration += end_time - start_time;
}
const int64_t one_run_duration = total_duration / iterations;
const int64_t num_ops = num_elements;
const double million_ops_per_second =
(iterations * num_ops) / static_cast<double>(total_duration);
LOG(INFO) << "TimeRequantizeManyInNewRange: " << num_elements
<< (use_eigen ? " eigen" : " ref") << ": iterations=" << iterations
<< ", MOps/s=" << million_ops_per_second
<< ", one_run_duration=" << one_run_duration
<< ", total_duration=" << total_duration;
}
template <typename T>
void TestFloatToQuantizedInPlaceUsingEigen(
Eigen::ThreadPoolDevice* eigen_device) {
typedef std::pair<float, float> FPair;
for (FPair min_and_max : std::vector<FPair>{FPair(-255.0f, 255.0f),
FPair(-1.0f, 1.0f),
FPair(-1.0f, 255.0f),
FPair(0.0f, 1e6),
FPair(0.0f, 1.0f),
FPair(-31.0f, 13.0f)}) {
const float f_min = min_and_max.first;
const float f_max = min_and_max.second;
const float f_range = f_max - f_min;
const int values_count = 50000;
Tensor input(DT_FLOAT, TensorShape{values_count});
auto input_array = input.flat<float>();
for (int i = 0; i < values_count; ++i) {
input_array(i) = f_min + f_range * i / (values_count - 1);
}
Tensor output(DataTypeToEnum<T>::v(), TensorShape{values_count});
FloatTensorToQuantizedInPlaceUsingEigen<T>(*eigen_device, input, f_min,
f_max, &output);
auto output_array = output.flat<T>();
const int tolerance = 1;
for (int i = 0; i < values_count; ++i) {
int32_t expected = FloatToQuantized<T>(input_array(i), f_min, f_max);
int32_t actual = output_array(i);
ASSERT_TRUE(std::abs(expected - actual) <= tolerance)
<< "expected=" << expected << " actual=" << actual
<< " tolerance=" << tolerance << " v=" << input_array(i) << " i=" << i
<< " f_min=" << f_min << " f_max=" << f_max
<< " type=" << DataTypeString(DataTypeToEnum<T>::v());
}
}
}
template <typename T>
void TestQuantizedToFloatInPlaceUsingEigen(
Eigen::ThreadPoolDevice* eigen_device) {
typedef std::pair<float, float> FPair;
for (FPair min_and_max : std::vector<FPair>{
FPair(-255.0f, 255.0f),
FPair(-1.0f, 1.0f),
FPair(-1.0f, 255.0f),
FPair(0.0f, 1e6),
FPair(0.0f, 1.0f),
FPair(-31.0f, 13.0f),
FPair(-5.89505e+08, 5.89505e+08),
}) {
const float f_min = min_and_max.first;
const float f_max = min_and_max.second;
const int values_count = sizeof(T) == 1 ? 256 : 50000;
Tensor input(DataTypeToEnum<T>::v(), TensorShape{values_count});
auto input_array = input.flat<T>();
const double q_range = static_cast<double>(Eigen::NumTraits<T>::highest()) -
Eigen::NumTraits<T>::lowest();
for (int i = 0; i < values_count; ++i) {
if (sizeof(T) == 1) {
input_array(i) = Eigen::NumTraits<T>::lowest() + i;
} else {
int64_t offset = static_cast<int64_t>(q_range / values_count * i);
input_array(i) = static_cast<int32>(
std::min<int64_t>(Eigen::NumTraits<T>::lowest() + offset,
Eigen::NumTraits<T>::highest()));
}
}
Tensor output(DT_FLOAT, TensorShape{values_count});
QuantizedTensorToFloatInPlaceUsingEigen<T>(*eigen_device, input, f_min,
f_max, &output);
auto output_array = output.flat<float>();
const double range = static_cast<double>(f_max) - f_min;
for (int i = 0; i < values_count; ++i) {
float expected = QuantizedToFloat<T>(input_array(i), f_min, f_max);
float actual = output_array(i);
ASSERT_NEAR(expected, actual, range * 1.1e-7)
<< "expected=" << expected << " actual=" << actual
<< " v=" << input_array(i) << " i=" << i << " f_min=" << f_min
<< " f_max=" << f_max
<< " type=" << DataTypeString(DataTypeToEnum<T>::v());
}
}
}
}
void TestFloatToQuantized() {
EXPECT_EQ(quint8(0), FloatToQuantized<quint8>(0.0f, 0.0f, 1.0f));
EXPECT_EQ(quint8(0), FloatToQuantized<quint8>(0.0f, 0.0f, 2.0f));
EXPECT_EQ(quint8(128), FloatToQuantized<quint8>(0.5f, 0.0f, 1.0f));
EXPECT_EQ(quint8(128), FloatToQuantized<quint8>(1.0f, 0.0f, 2.0f));
EXPECT_EQ(quint8(255), FloatToQuantized<quint8>(1.0f, 0.0f, 1.0f));
EXPECT_EQ(quint8(255), FloatToQuantized<quint8>(2.0f, 0.0f, 2.0f));
EXPECT_EQ(quint8(0), FloatToQuantized<quint8>(-128.0f, -128.0f, 127.0f));
EXPECT_EQ(quint8(128), FloatToQuantized<quint8>(0.0f, -128.0f, 127.0f));
EXPECT_EQ(quint8(255), FloatToQuantized<quint8>(127.0f, -128.0f, 127.0f));
EXPECT_EQ(quint8(0), FloatToQuantized<quint8>(1.0f, 1.0f, 256.0f));
EXPECT_EQ(quint8(127), FloatToQuantized<quint8>(128.0f, 1.0f, 256.0f));
EXPECT_EQ(quint8(255), FloatToQuantized<quint8>(256.0f, 1.0f, 256.0f));
const int int32_min = std::numeric_limits<int>::min();
const int int32_max = std::numeric_limits<int>::max();
EXPECT_EQ(qint32(int32_min),
FloatToQuantized<qint32>(-128.0f, -128.0f, 128.0f));
EXPECT_EQ(qint32(0), FloatToQuantized<qint32>(0.0f, -128.0f, 128.0f));
EXPECT_EQ(qint32(int32_max),
FloatToQuantized<qint32>(128.0f, -128.0f, 128.0f));
}
void TestQuantizedToFloat() {
EXPECT_LT(fabsf(0.0f - QuantizedToFloat<quint8>(0, 0.0f, 1.0f)), 1 / 255.0f);
EXPECT_LT(fabsf(0.0f - QuantizedToFloat<quint8>(0, 0.0f, 2.0f)), 1 / 255.0f);
EXPECT_LT(fabsf(0.5f - QuantizedToFloat<quint8>(127, 0.0f, 1.0f)),
1 / 255.0f);
EXPECT_LT(fabsf(1.0f - QuantizedToFloat<quint8>(127, 0.0f, 2.0f)),
1 / 255.0f);
EXPECT_LT(fabsf(1.0f - QuantizedToFloat<quint8>(255, 0.0f, 1.0f)),
1 / 255.0f);
EXPECT_LT(fabsf(2.0f - QuantizedToFloat<quint8>(255, 0.0f, 2.0f)),
1 / 255.0f);
EXPECT_LT(fabsf(1.0f - QuantizedToFloat<quint8>(0, 1.0f, 256.0f)),
1 / 255.0f);
EXPECT_LT(fabsf(128.0f - QuantizedToFloat<quint8>(127, 1.0f, 256.0f)),
1 / 255.0f);
EXPECT_LT(fabsf(256.0f - QuantizedToFloat<quint8>(255, 1.0f, 256.0f)),
1 / 255.0f);
const int int32_min = std::numeric_limits<int>::min();
const int int32_max = std::numeric_limits<int>::max();
EXPECT_NEAR(-1.0f, QuantizedToFloat<qint32>(qint32(int32_min), -1.0f, 1.0f),
1e-5f);
EXPECT_NEAR(0.0f, QuantizedToFloat<qint32>(qint32(0), -1.0f, 1.0f), 1e-5f);
EXPECT_NEAR(1.0f, QuantizedToFloat<qint32>(qint32(int32_max), -1.0f, 1.0f),
1e-5f);
EXPECT_NEAR(32.0f, QuantizedToFloat<qint32>(qint32(32), int32_min, int32_max),
1.0);
}
void TestAvoidBias() {
for (int i = 0; i < 256; ++i) {
const float as_float = QuantizedToFloat<quint8>(i, 0.0f, 2.0f);
const int back_to_int = FloatToQuantized<quint8>(as_float, 0.0f, 2.0f);
EXPECT_EQ(i, back_to_int);
}
const float min = -0.1375f;
const float max = 1.1385f;
const float step_size = (max - min) / 255.0f;
const float tolerance = step_size / 1000.0f;
float first_float = std::ceil(min / step_size) * step_size;
for (float f = first_float; f <= max; f += step_size) {
const int as_int = FloatToQuantized<quint8>(f, min, max);
const float back_to_float = QuantizedToFloat<quint8>(as_int, min, max);
EXPECT_NEAR(f, back_to_float, tolerance);
}
}
void TestRequantizeInNewRange() {
const size_t values_count = 6;
const float values[values_count] = {0.0f, 0.5f, 1.0f, -1.0f, 127.0f, 255.0f};
const size_t ranges_count = 4;
const float ranges[ranges_count][4] = {
{0.0f, 255.0f, 0.0f, 255.0f},
{0.0f, 1.0f, 0.0f, 1.0f},
{-1.0f, 1.0f, -1.0f, 1.0f},
{-1.0f, 1.0f, -255.0f, 255.0f},
};
for (size_t value_index = 0; value_index < values_count; ++value_index) {
const float value_float = values[value_index];
for (size_t range_index = 0; range_index < ranges_count; ++range_index) {
const float input_min = ranges[range_index][0];
const float input_max = ranges[range_index][1];
const float output_min = ranges[range_index][2];
const float output_max = ranges[range_index][3];
const quint8 input_value =
FloatToQuantized<quint8>(value_float, input_min, input_max);
const qint32 expected_value = FloatToQuantized<qint32>(
QuantizedToFloat(input_value, input_min, input_max), output_min,
output_max);
EXPECT_EQ(expected_value,
(RequantizeInNewRange<quint8, qint32>(
input_value, input_min, input_max, output_min, output_max)))
<< "value_float=" << value_float << ", input_min=" << input_min
<< ", input_max=" << input_max << ", output_min=" << output_min
<< ", output_max=" << output_max;
}
}
}
void TestRequantizeInNewRangeRealData() {
const float input_min = -0.739539f;
const float input_max = 0.641057f;
const float output_min = -2381.49f;
const float output_max = 2207.6f;
const float value_as_float =
QuantizedToFloat<quint8>(83, input_min, input_max);
const quint8 value_as_quint8 =
FloatToQuantized<quint8>(value_as_float, input_min, input_max);
EXPECT_EQ(quint8(83), value_as_quint8);
const qint32 actual_output = RequantizeInNewRange<quint8, qint32>(
value_as_quint8, input_min, input_max, output_min, output_max);
const qint32 value_as_qint32 =
FloatToQuantized<qint32>(value_as_float, output_min, output_max);
EXPECT_LT(std::abs(value_as_qint32 - actual_output), 10);
}
void TestRequantizeInNewRange32To8Bit() {
const size_t values_count = 6;
const float values[values_count] = {0.0f, 0.45f, 1.0f, -1.0f, 127.0f, 255.0f};
const size_t ranges_count = 4;
const float ranges[ranges_count][4] = {
{0.0f, 255.0f, 0.0f, 255.0f},
{0.0f, 1.0f, 0.0f, 1.0f},
{-1.0f, 1.0f, -1.0f, 1.0f},
{-1.0f, 1.0f, -255.0f, 255.0f},
};
for (size_t value_index = 0; value_index < values_count; ++value_index) {
const float value_float = values[value_index];
for (size_t range_index = 0; range_index < ranges_count; ++range_index) {
const float input_min = ranges[range_index][0];
const float input_max = ranges[range_index][1];
const float output_min = ranges[range_index][2];
const float output_max = ranges[range_index][3];
const qint32 input_value =
FloatToQuantized<qint32>(value_float, input_min, input_max);
const quint8 expected_value = FloatToQuantized<quint8>(
QuantizedToFloat(input_value, input_min, input_max), output_min,
output_max);
EXPECT_EQ(expected_value,
(RequantizeInNewRange<qint32, quint8>(
input_value, input_min, input_max, output_min, output_max)))
<< "input_value=" << input_value << ", value_float=" << value_float
<< ", input_min=" << input_min << ", input_max=" << input_max
<< ", output_min=" << output_min << ", output_max=" << output_max;
}
}
}
void TestRequantizeManyInNewRange32To8Bit() {
TestRequantizeManyInNewRange32To8Bit(nullptr );
}
void TestRequantizeManyInNewRange32To8BitUsingEigen() {
thread::ThreadPool threadpool(Env::Default(), "test", 2 );
Eigen::ThreadPoolDevice eigen_device(threadpool.AsEigenThreadPool(),
2 );
TestRequantizeManyInNewRange32To8Bit(&eigen_device);
}
void TestRequantizeManyInNewRange32To8BitEigenVsNonEigen() {
TestRequantizeManyInNewRangeEigenVsNonEigen<qint32, quint8>();
}
void TestRequantizeManyInNewRange32To8BitSignedEigenVsNonEigen() {
TestRequantizeManyInNewRangeEigenVsNonEigen<qint32, qint8>();
}
void TestFloatTensorToQuantized() {
const int input_width = 3;
const int input_height = 3;
const float input_min = 0.0f;
const float input_max = 255.0f;
Tensor input(DT_FLOAT, TensorShape({input_height, input_width}));
test::FillValues<float>(&input, {1.0f, -1.0f, 10.0f, 10.25f, 127.0f, 255.0f,
512.0f, 0.0f, 23.0f});
Tensor expected(DT_QUINT8, TensorShape({input_height, input_width}));
test::FillValues<quint8>(&expected, {1, 0, 10, 10, 127, 255, 255, 0, 23});
Tensor output = FloatTensorToQuantized<quint8>(input, input_min, input_max);
test::ExpectTensorEqual<quint8>(expected, output);
}
void TestFloatToQuantizedInPlaceUsingEigen() {
thread::ThreadPool threadpool(Env::Default(), "test", 2 );
Eigen::ThreadPoolDevice eigen_device(threadpool.AsEigenThreadPool(),
2 );
TestFloatToQuantizedInPlaceUsingEigen<quint8>(&eigen_device);
TestFloatToQuantizedInPlaceUsingEigen<qint8>(&eigen_device);
TestFloatToQuantizedInPlaceUsingEigen<quint16>(&eigen_device);
TestFloatToQuantizedInPlaceUsingEigen<qint16>(&eigen_device);
}
void TestOverflowWithEigen() {
thread::ThreadPool threadpool(Env::Default(), "test", 2 );
Eigen::ThreadPoolDevice eigen_device(threadpool.AsEigenThreadPool(),
2 );
const int num_vals = 4;
const float input_min = 0.0f;
const float input_max = 2400.0f;
TensorShape shape({num_vals});
Tensor input(DT_FLOAT, shape);
test::FillValues<float>(&input, {-100.f, 0.f, 2400.0f, 2400.0f});
Tensor expected(DT_QINT32, shape);
test::FillValues<qint32>(
&expected,
{static_cast<int32>(-2147483648), static_cast<int32>(-2147483648),
static_cast<int32>(2147483520), static_cast<int32>(2147483520)});
FloatToQuantizedStruct<qint32> f2q(input_min, input_max);
Tensor output(DT_QINT32, shape);
auto input_array = input.flat<float>();
output.flat<qint32>() = QUANTIZE_WITH_EIGEN(input_array, f2q, qint32);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestQuantizedTensorToFloat() {
const int input_width = 3;
const int input_height = 3;
const float input_min = -128.0f;
const float input_max = 127.0f;
Tensor input(DT_QUINT8, TensorShape({input_height, input_width}));
test::FillValues<quint8>(&input, {0, 128, 255, 23, 24, 25, 243, 244, 245});
Tensor expected(DT_FLOAT, TensorShape({input_height, input_width}));
test::FillValues<float>(&expected, {-128.0f, 0.0f, 127.0f, -105.0f, -104.0f,
-103.0f, 115.0f, 116.0f, 117.0f});
Tensor output = QuantizedTensorToFloat<quint8>(input, input_min, input_max);
test::ExpectTensorEqual<float>(expected, output);
Tensor input32(DT_QINT32, TensorShape({input_height, input_width}));
float input_range = 1LL << 25;
int64_t num_levels = (1LL << 32) - 1;
float step_size =
static_cast<float>(static_cast<double>(input_range) / num_levels);
float q_compatible_min_value =
roundf(-(input_range / 2.0) / step_size) * step_size;
float q_compatible_max_value = q_compatible_min_value + input_range;
test::FillValues<qint32>(&input32, {-16384, 0, 16256, -13440, -13312, -13184,
14720, 14848, 14976});
Tensor output32 = QuantizedTensorToFloat<qint32>(
input32, q_compatible_min_value, q_compatible_max_value);
test::FillValues<float>(&expected, {-128.0f, 0.0f, 127.0f, -105.0f, -104.0f,
-103.0f, 115.0f, 116.0f, 117.0f});
const double kTolerance = .5 / 128.0;
test::ExpectTensorNear<float>(expected, output32, kTolerance);
}
void TestQuantizedToFloatInPlaceUsingEigen() {
thread::ThreadPool threadpool(Env::Default(), "test", 2 );
Eigen::ThreadPoolDevice eigen_device(threadpool.AsEigenThreadPool(),
2 );
TestQuantizedToFloatInPlaceUsingEigen<quint8>(&eigen_device);
TestQuantizedToFloatInPlaceUsingEigen<qint8>(&eigen_device);
TestQuantizedToFloatInPlaceUsingEigen<quint16>(&eigen_device);
TestQuantizedToFloatInPlaceUsingEigen<qint16>(&eigen_device);
TestQuantizedToFloatInPlaceUsingEigen<qint32>(&eigen_device);
}
void BenchmarkRequantizeManyInNewRange() {
TimeRequantizeManyInNewRange<qint32, quint8>(1000, 1000, false);
TimeRequantizeManyInNewRange<qint32, quint8>(1000, 1000, true);
TimeRequantizeManyInNewRange<qint32, quint8>(100000, 100, false);
TimeRequantizeManyInNewRange<qint32, qu |
849 | cpp | tensorflow/tensorflow | modify_model_interface | tensorflow/lite/tools/optimize/python/modify_model_interface.cc | tensorflow/lite/tools/optimize/modify_model_interface_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_MODIFY_MODEL_INTERFACE_H_
#define TENSORFLOW_LITE_TOOLS_OPTIMIZE_MODIFY_MODEL_INTERFACE_H_
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace optimize {
TfLiteStatus ModifyModelInterface(flatbuffers::FlatBufferBuilder* builder,
ModelT* model, const TensorType& input_type,
const TensorType& output_type);
TfLiteStatus ModifyModelInterface(const string& input_file,
const string& output_file,
const TensorType& input_type,
const TensorType& output_type);
TfLiteStatus Uint8QuantizeModelInputsOutputs(
flatbuffers::FlatBufferBuilder* builder, const Model* input_model,
const std::unordered_map<string, std::pair<float, int32_t>>&
input_quant_params,
const std::unordered_map<string, std::pair<float, int32_t>>&
output_quant_params);
}
}
#endif
#include "tensorflow/lite/tools/optimize/modify_model_interface.h"
#include <string>
#include "pybind11/pybind11.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace pybind11 {
PYBIND11_MODULE(_pywrap_modify_model_interface, m) {
m.def("modify_model_interface",
[](const std::string& input_file, const std::string& output_file,
const int input_type, const int output_type) -> int {
return tflite::optimize::ModifyModelInterface(
input_file, output_file,
static_cast<tflite::TensorType>(input_type),
static_cast<tflite::TensorType>(output_type));
});
}
} | #include "tensorflow/lite/tools/optimize/modify_model_interface.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
namespace optimize {
namespace {
std::unique_ptr<ModelT> CreateQuantizedModelSingleInputOutput(
const TensorType& quantization_type) {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto buffer = std::make_unique<tflite::BufferT>();
auto quant_op_code = std::make_unique<OperatorCodeT>();
auto quant_op = std::make_unique<OperatorT>();
auto fc_op_code = std::make_unique<OperatorCodeT>();
auto fc_op = std::make_unique<OperatorT>();
auto dequant_op_code = std::make_unique<OperatorCodeT>();
auto dequant_op = std::make_unique<OperatorT>();
model->subgraphs.push_back(std::move(subgraph));
quant_op_code->builtin_code = BuiltinOperator_QUANTIZE;
quant_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_QUANTIZE);
quant_op_code->version = 2;
fc_op_code->builtin_code = BuiltinOperator_FULLY_CONNECTED;
fc_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_FULLY_CONNECTED);
fc_op_code->version = 2;
dequant_op_code->builtin_code = BuiltinOperator_DEQUANTIZE;
dequant_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_DEQUANTIZE);
dequant_op_code->version = 2;
quant_op->opcode_index = 0;
quant_op->inputs = {0};
quant_op->outputs = {1};
fc_op->opcode_index = 1;
fc_op->inputs = {1};
fc_op->outputs = {2};
dequant_op->opcode_index = 2;
dequant_op->inputs = {2};
dequant_op->outputs = {3};
model->subgraphs[0]->operators.push_back(std::move(quant_op));
model->subgraphs[0]->operators.push_back(std::move(fc_op));
model->subgraphs[0]->operators.push_back(std::move(dequant_op));
model->operator_codes.push_back(std::move(quant_op_code));
model->operator_codes.push_back(std::move(fc_op_code));
model->operator_codes.push_back(std::move(dequant_op_code));
model->subgraphs[0]->inputs = {0};
model->subgraphs[0]->outputs = {3};
auto tensor_0 = std::make_unique<TensorT>();
tensor_0->name = "tensor_0";
tensor_0->shape = {};
tensor_0->type = TensorType_FLOAT32;
auto tensor_1 = std::make_unique<TensorT>();
tensor_1->quantization = std::make_unique<QuantizationParametersT>();
tensor_1->quantization->scale.push_back(0.35);
tensor_1->quantization->zero_point.push_back(28);
tensor_1->name = "tensor_1";
tensor_1->shape = {};
tensor_1->type = quantization_type;
auto tensor_2 = std::make_unique<TensorT>();
tensor_2->quantization = std::make_unique<QuantizationParametersT>();
tensor_2->quantization->scale.push_back(0.12);
tensor_2->quantization->zero_point.push_back(50);
tensor_2->name = "tensor_2";
tensor_2->shape = {};
tensor_2->type = quantization_type;
auto tensor_3 = std::make_unique<TensorT>();
tensor_3->name = "tensor_3";
tensor_3->shape = {};
tensor_3->type = TensorType_FLOAT32;
model->subgraphs[0]->tensors.push_back(std::move(tensor_0));
model->subgraphs[0]->tensors.push_back(std::move(tensor_1));
model->subgraphs[0]->tensors.push_back(std::move(tensor_2));
model->subgraphs[0]->tensors.push_back(std::move(tensor_3));
model->buffers.push_back(std::move(buffer));
return model;
}
std::unique_ptr<ModelT> CreateQuantizedModelMultipleInputOutput(
const TensorType& quantization_type) {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto buffer = std::make_unique<tflite::BufferT>();
auto quant_op_code = std::make_unique<OperatorCodeT>();
auto quant_op_1 = std::make_unique<OperatorT>();
auto quant_op_2 = std::make_unique<OperatorT>();
auto fc_op_code = std::make_unique<OperatorCodeT>();
auto fc_op = std::make_unique<OperatorT>();
auto dequant_op_code = std::make_unique<OperatorCodeT>();
auto dequant_op_1 = std::make_unique<OperatorT>();
auto dequant_op_2 = std::make_unique<OperatorT>();
model->subgraphs.push_back(std::move(subgraph));
quant_op_code->builtin_code = BuiltinOperator_QUANTIZE;
quant_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_QUANTIZE);
quant_op_code->version = 2;
fc_op_code->builtin_code = BuiltinOperator_FULLY_CONNECTED;
fc_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_FULLY_CONNECTED);
fc_op_code->version = 2;
dequant_op_code->builtin_code = BuiltinOperator_DEQUANTIZE;
dequant_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_DEQUANTIZE);
dequant_op_code->version = 2;
quant_op_1->opcode_index = 0;
quant_op_1->inputs = {0};
quant_op_1->outputs = {2};
quant_op_2->opcode_index = 0;
quant_op_2->inputs = {1};
quant_op_2->outputs = {3};
fc_op->opcode_index = 1;
fc_op->inputs = {2, 3};
fc_op->outputs = {4, 5};
dequant_op_1->opcode_index = 2;
dequant_op_1->inputs = {4};
dequant_op_1->outputs = {6};
dequant_op_2->opcode_index = 2;
dequant_op_2->inputs = {5};
dequant_op_2->outputs = {7};
model->subgraphs[0]->operators.push_back(std::move(quant_op_1));
model->subgraphs[0]->operators.push_back(std::move(quant_op_2));
model->subgraphs[0]->operators.push_back(std::move(fc_op));
model->subgraphs[0]->operators.push_back(std::move(dequant_op_1));
model->subgraphs[0]->operators.push_back(std::move(dequant_op_2));
model->operator_codes.push_back(std::move(quant_op_code));
model->operator_codes.push_back(std::move(fc_op_code));
model->operator_codes.push_back(std::move(dequant_op_code));
model->subgraphs[0]->inputs = {0, 1};
model->subgraphs[0]->outputs = {6, 7};
auto tensor_0 = std::make_unique<TensorT>();
tensor_0->name = "tensor_0";
tensor_0->shape = {};
tensor_0->type = TensorType_FLOAT32;
auto tensor_1 = std::make_unique<TensorT>();
tensor_1->name = "tensor_1";
tensor_1->shape = {};
tensor_1->type = TensorType_FLOAT32;
auto tensor_2 = std::make_unique<TensorT>();
tensor_2->quantization = std::make_unique<QuantizationParametersT>();
tensor_2->quantization->scale.push_back(0.35);
tensor_2->quantization->zero_point.push_back(28);
tensor_2->name = "tensor_2";
tensor_2->shape = {};
tensor_2->type = quantization_type;
auto tensor_3 = std::make_unique<TensorT>();
tensor_3->quantization = std::make_unique<QuantizationParametersT>();
tensor_3->quantization->scale.push_back(0.12);
tensor_3->quantization->zero_point.push_back(50);
tensor_3->name = "tensor_3";
tensor_3->shape = {};
tensor_3->type = quantization_type;
auto tensor_4 = std::make_unique<TensorT>();
tensor_4->quantization = std::make_unique<QuantizationParametersT>();
tensor_4->quantization->scale.push_back(0.45);
tensor_4->quantization->zero_point.push_back(28);
tensor_4->name = "tensor_4";
tensor_4->shape = {};
tensor_4->type = quantization_type;
auto tensor_5 = std::make_unique<TensorT>();
tensor_5->quantization = std::make_unique<QuantizationParametersT>();
tensor_5->quantization->scale.push_back(0.22);
tensor_5->quantization->zero_point.push_back(50);
tensor_5->name = "tensor_5";
tensor_5->shape = {};
tensor_5->type = quantization_type;
auto tensor_6 = std::make_unique<TensorT>();
tensor_6->name = "tensor_6";
tensor_6->shape = {};
tensor_6->type = TensorType_FLOAT32;
auto tensor_7 = std::make_unique<TensorT>();
tensor_7->name = "tensor_7";
tensor_7->shape = {};
tensor_7->type = TensorType_FLOAT32;
model->subgraphs[0]->tensors.push_back(std::move(tensor_0));
model->subgraphs[0]->tensors.push_back(std::move(tensor_1));
model->subgraphs[0]->tensors.push_back(std::move(tensor_2));
model->subgraphs[0]->tensors.push_back(std::move(tensor_3));
model->subgraphs[0]->tensors.push_back(std::move(tensor_4));
model->subgraphs[0]->tensors.push_back(std::move(tensor_5));
model->subgraphs[0]->tensors.push_back(std::move(tensor_6));
model->subgraphs[0]->tensors.push_back(std::move(tensor_7));
model->buffers.push_back(std::move(buffer));
return model;
}
std::unique_ptr<ModelT> CreateFloatModel() {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto buffer = std::make_unique<tflite::BufferT>();
auto fc_op_code = std::make_unique<OperatorCodeT>();
auto fc_op = std::make_unique<OperatorT>();
model->subgraphs.push_back(std::move(subgraph));
fc_op_code->builtin_code = BuiltinOperator_FULLY_CONNECTED;
fc_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_FULLY_CONNECTED);
fc_op_code->version = 2;
fc_op->opcode_index = 0;
fc_op->inputs = {0};
fc_op->outputs = {1};
model->subgraphs[0]->operators.push_back(std::move(fc_op));
model->operator_codes.push_back(std::move(fc_op_code));
model->subgraphs[0]->inputs = {0};
model->subgraphs[0]->outputs = {1};
auto tensor_0 = std::make_unique<TensorT>();
tensor_0->name = "tensor_0";
tensor_0->shape = {};
tensor_0->type = TensorType_FLOAT32;
auto tensor_1 = std::make_unique<TensorT>();
tensor_1->name = "tensor_1";
tensor_1->shape = {};
tensor_1->type = TensorType_FLOAT32;
model->subgraphs[0]->tensors.push_back(std::move(tensor_0));
model->subgraphs[0]->tensors.push_back(std::move(tensor_1));
model->buffers.push_back(std::move(buffer));
return model;
}
struct ModelInterface : ::testing::TestWithParam<tflite::TensorType> {};
TEST_P(ModelInterface, SingleInputOutput) {
TensorType quantization_type = GetParam();
auto model = CreateQuantizedModelSingleInputOutput(quantization_type);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), quantization_type,
quantization_type),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 3);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 1);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 2);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 1);
auto fc_op = model->subgraphs[0]->operators[0].get();
auto input = model->subgraphs[0]->tensors[fc_op->inputs[0]].get();
EXPECT_EQ(input->name, "tensor_1");
EXPECT_EQ(input->type, quantization_type);
EXPECT_FLOAT_EQ(input->quantization->scale[0], 0.35);
EXPECT_EQ(input->quantization->zero_point[0], 28);
auto output = model->subgraphs[0]->tensors[fc_op->outputs[0]].get();
EXPECT_EQ(output->name, "tensor_2");
EXPECT_EQ(output->type, quantization_type);
EXPECT_FLOAT_EQ(output->quantization->scale[0], 0.12);
EXPECT_EQ(output->quantization->zero_point[0], 50);
}
TEST_P(ModelInterface, MutipleInputOutput) {
TensorType quantization_type = GetParam();
auto model = CreateQuantizedModelMultipleInputOutput(quantization_type);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), quantization_type,
quantization_type),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 6);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 2);
EXPECT_EQ(model->subgraphs[0]->inputs[1], 3);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 4);
EXPECT_EQ(model->subgraphs[0]->outputs[1], 5);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 1);
auto fc_op = model->subgraphs[0]->operators[0].get();
auto input_1 = model->subgraphs[0]->tensors[fc_op->inputs[0]].get();
EXPECT_EQ(input_1->name, "tensor_2");
EXPECT_EQ(input_1->type, quantization_type);
EXPECT_FLOAT_EQ(input_1->quantization->scale[0], 0.35);
EXPECT_EQ(input_1->quantization->zero_point[0], 28);
auto input_2 = model->subgraphs[0]->tensors[fc_op->inputs[1]].get();
EXPECT_EQ(input_2->name, "tensor_3");
EXPECT_EQ(input_2->type, quantization_type);
EXPECT_FLOAT_EQ(input_2->quantization->scale[0], 0.12);
EXPECT_EQ(input_2->quantization->zero_point[0], 50);
auto output_1 = model->subgraphs[0]->tensors[fc_op->outputs[0]].get();
EXPECT_EQ(output_1->name, "tensor_4");
EXPECT_EQ(output_1->type, quantization_type);
EXPECT_FLOAT_EQ(output_1->quantization->scale[0], 0.45);
EXPECT_EQ(output_1->quantization->zero_point[0], 28);
auto output_2 = model->subgraphs[0]->tensors[fc_op->outputs[1]].get();
EXPECT_EQ(output_2->name, "tensor_5");
EXPECT_EQ(output_2->type, quantization_type);
EXPECT_FLOAT_EQ(output_2->quantization->scale[0], 0.22);
EXPECT_EQ(output_2->quantization->zero_point[0], 50);
}
INSTANTIATE_TEST_SUITE_P(MultipleInputOutputTests, ModelInterface,
::testing::Values(TensorType_INT8, TensorType_INT16));
TEST(ModelInterface, MixedTypeSingleInputOutput) {
auto model = CreateQuantizedModelSingleInputOutput(TensorType_INT8);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), TensorType_UINT8,
TensorType_INT8),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 3);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 0);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 2);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 2);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[1]->opcode_index, 1);
auto quant_op = model->subgraphs[0]->operators[0].get();
auto input = model->subgraphs[0]->tensors[quant_op->inputs[0]].get();
EXPECT_EQ(input->name, "tensor_0");
EXPECT_EQ(input->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input->quantization->scale[0], 0.35);
EXPECT_EQ(input->quantization->zero_point[0], 156);
auto fc_op = model->subgraphs[0]->operators[1].get();
auto output = model->subgraphs[0]->tensors[fc_op->outputs[0]].get();
EXPECT_EQ(output->name, "tensor_2");
EXPECT_EQ(output->type, TensorType_INT8);
EXPECT_FLOAT_EQ(output->quantization->scale[0], 0.12);
EXPECT_EQ(output->quantization->zero_point[0], 50);
}
TEST(ModelInterface, Uint8SingleInputOutput) {
auto model = CreateQuantizedModelSingleInputOutput(TensorType_INT8);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), TensorType_UINT8,
TensorType_UINT8),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 4);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 0);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 3);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[1]->opcode_index, 1);
EXPECT_EQ(model->subgraphs[0]->operators[2]->opcode_index, 0);
auto input_quant_op = model->subgraphs[0]->operators[0].get();
auto input = model->subgraphs[0]->tensors[input_quant_op->inputs[0]].get();
EXPECT_EQ(input->name, "tensor_0");
EXPECT_EQ(input->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input->quantization->scale[0], 0.35);
EXPECT_EQ(input->quantization->zero_point[0], 156);
auto output_quant_op = model->subgraphs[0]->operators[2].get();
auto output = model->subgraphs[0]->tensors[output_quant_op->outputs[0]].get();
EXPECT_EQ(output->name, "tensor_3");
EXPECT_EQ(output->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(output->quantization->scale[0], 0.12);
EXPECT_EQ(output->quantization->zero_point[0], 178);
}
TEST(ModelInterface, Uint8MutipleInputOutput) {
auto model = CreateQuantizedModelMultipleInputOutput(TensorType_INT8);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), TensorType_UINT8,
TensorType_UINT8),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 8);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 0);
EXPECT_EQ(model->subgraphs[0]->inputs[1], 1);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 6);
EXPECT_EQ(model->subgraphs[0]->outputs[1], 7);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 5);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[1]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[2]->opcode_index, 1);
EXPECT_EQ(model->subgraphs[0]->operators[3]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[4]->opcode_index, 0);
auto input_quant_1 = model->subgraphs[0]->operators[0].get();
auto input_1 = model->subgraphs[0]->tensors[input_quant_1->inputs[0]].get();
EXPECT_EQ(input_1->name, "tensor_0");
EXPECT_EQ(input_1->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input_1->quantization->scale[0], 0.35);
EXPECT_EQ(input_1->quantization->zero_point[0], 156);
auto input_quant_2 = model->subgraphs[0]->operators[1].get();
auto input_2 = model->subgraphs[0]->tensors[input_quant_2->inputs[0]].get();
EXPECT_EQ(input_2->name, "tensor_1");
EXPECT_EQ(input_2->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input_2->quantization->scale[0], 0.12);
EXPECT_EQ(input_2->quantization->zero_point[0], 178);
auto output_quant_1 = model->subgraphs[0]->operators[3].get();
auto output_1 =
model->subgraphs[0]->tensors[output_quant_1->outputs[0]].get();
EXPECT_EQ(output_1->name, "tensor_6");
EXPECT_EQ(output_1->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(output_1->quantization->scale[0], 0.45);
EXPECT_EQ(output_1->quantization->zero_point[0], 156);
auto output_quant_2 = model->subgraphs[0]->operators[4].get();
auto output_2 =
model->subgraphs[0]->tensors[output_quant_2->outputs[0]].get();
EXPECT_EQ(output_2->name, "tensor_7");
EXPECT_EQ(output_2->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(output_2->quantization->scale[0], 0.22);
EXPECT_EQ(output_2->quantization->zero_point[0], 178);
}
TEST(ModelInterface, Int8MutipleInputOutput) {
auto model = CreateQuantizedModelMultipleInputOutput(TensorType_INT8);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), TensorType_INT8,
TensorType_INT8),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 6);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 2);
EXPECT_EQ(model->subgraphs[0]->inputs[1], 3);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 4);
EXPECT_EQ(model->subgraphs[0]->outputs[1], 5);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 1);
auto fc_op = model->subgraphs[0]->operators[0].get();
auto input_1 = model->subgraphs[0]->tensors[fc_op->inputs[0]].get();
EXPECT_EQ(input_1->name, "tensor_2");
EXPECT_EQ(input_1->type, TensorType_INT8);
EXPECT_FLOAT_EQ(input_1->quantization->scale[0], 0.35);
EXPECT_EQ(input_1->quantization->zero_point[0], 28);
auto input_2 = model->subgraphs[0]->tensors[fc_op->inputs[1]].get();
EXPECT_EQ(input_2->name, "tensor_3");
EXPECT_EQ(input_2->type, TensorType_INT8);
EXPECT_FLOAT_EQ(input_2->quantization->scale[0], 0.12);
EXPECT_EQ(input_2->quantization->zero_point[0], 50);
auto output_1 = model->subgraphs[0]->tensors[fc_op->outputs[0]].get();
EXPECT_EQ(output_1->name, "tensor_4");
EXPECT_EQ(output_1->type, TensorType_INT8);
EXPECT_FLOAT_EQ(output_1->quantization->scale[0], 0.45);
EXPECT_EQ(output_1->quantization->zero_point[0], 28);
auto output_2 = model->subgraphs[0]->tensors[fc_op->outputs[1]].get();
EXPECT_EQ(output_2->name, "tensor_5");
EXPECT_EQ(output_2->type, TensorType_INT8);
EXPECT_FLOAT_EQ(output_2->quantization->scale[0], 0.22);
EXPECT_EQ(output_2->quantization->zero_point[0], 50);
}
TEST(ModelInterface, Float) {
std::unique_ptr<ModelT> input_model_t = CreateFloatModel();
flatbuffers::FlatBufferBuilder builder_temp;
flatbuffers::Offset<Model> output_model_location =
Model::Pack(builder_temp, input_model_t.get());
FinishModelBuffer(builder_temp, output_model_location);
const uint8_t* buffer_temp = builder_temp.GetBufferPointer();
const Model* input_model = GetModel(buffer_temp);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(Uint8QuantizeModelInputsOutputs(&builder, input_model,
{{"tensor_0", {0.4, 2}}},
{{"tensor_1", {0.5, -5}}}),
kTfLiteOk);
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
std::unique_ptr<ModelT> model;
model.reset(output_model->UnPack());
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 4);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 0);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 1);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[0].get()),
BuiltinOperator_FULLY_CONNECTED);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[1].get()),
BuiltinOperator_DEQUANTIZE);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[2].get()),
BuiltinOperator_QUANTIZE);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 3);
auto dequantize_op = model->subgraphs[0]->operators[0].get();
auto input = model->subgraphs[0]->tensors[dequantize_op->inputs[0]].get();
EXPECT_EQ(input->name, "tensor_0_uint8");
EXPECT_EQ(input->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input->quantization->scale[0], 0.4);
EXPECT_EQ(input->quantization->zero_point[0], 2);
auto quantize_op = model->subgraphs[0]->operators[2].get();
auto output = model->subgraphs[0]->tensors[quantize_op->outputs[0]].get();
EXPECT_EQ(output->name, "tensor_1_uint8");
EXPECT_EQ(output->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(output->quantization->scale[0], 0.5);
EXPECT_EQ(output->quantization->zero_point[0], -5);
}
}
}
} |
850 | cpp | tensorflow/tensorflow | quantization_wrapper_utils | tensorflow/lite/tools/optimize/quantization_wrapper_utils.cc | tensorflow/lite/tools/optimize/quantization_wrapper_utils_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_QUANTIZATION_WRAPPER_UTILS_H_
#define TENSORFLOW_LITE_TOOLS_OPTIMIZE_QUANTIZATION_WRAPPER_UTILS_H_
#include <string>
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace optimize {
TfLiteStatus LoadModel(const string& path, ModelT* model);
TfLiteStatus AddIntermediateTensorsToFusedOp(
flatbuffers::FlatBufferBuilder* builder, ModelT* model);
bool WriteFile(const std::string& out_file, const uint8_t* bytes,
size_t num_bytes);
}
}
#endif
#include "tensorflow/lite/tools/optimize/quantization_wrapper_utils.h"
#include <fstream>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/operator_property.h"
namespace tflite {
namespace impl {
class FlatBufferModel;
}
namespace optimize {
namespace {
#ifdef TFLITE_CUSTOM_LSTM
constexpr bool kUseCustomLSTM = true;
#else
constexpr bool kUseCustomLSTM = false;
#endif
void MakeTensor(const string& name, std::unique_ptr<TensorT>* tensor) {
TensorT* tensor_raw = new TensorT;
tensor_raw->name = name;
tensor_raw->shape = {0};
tensor_raw->type = TensorType_FLOAT32;
tensor->reset(tensor_raw);
}
string CreateTensorName(int op_index, int tensor_index) {
return "intermediate_" + std::to_string(op_index) + "_" +
std::to_string(tensor_index);
}
bool IntermediateTensorExists(ModelT* model) {
for (int subgraph_idx = 0; subgraph_idx < model->subgraphs.size();
++subgraph_idx) {
SubGraphT* subgraph = model->subgraphs.at(subgraph_idx).get();
for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) {
OperatorT* op = subgraph->operators[op_idx].get();
if (!op->intermediates.empty()) {
return true;
}
}
}
return false;
}
}
TfLiteStatus LoadModel(const string& path, ModelT* model) {
auto input_model = impl::FlatBufferModel::BuildFromFile(path.c_str());
if (!input_model) {
return kTfLiteError;
}
auto readonly_model = input_model->GetModel();
if (!readonly_model) {
return kTfLiteError;
}
readonly_model->UnPackTo(model);
return kTfLiteOk;
}
TfLiteStatus AddIntermediateTensorsToFusedOp(
flatbuffers::FlatBufferBuilder* builder, ModelT* model) {
if (model->subgraphs.size() == 1 && model->subgraphs[0]->operators.empty()) {
return kTfLiteOk;
}
if (IntermediateTensorExists(model)) {
return kTfLiteOk;
}
for (int subgraph_idx = 0; subgraph_idx < model->subgraphs.size();
++subgraph_idx) {
SubGraphT* subgraph = model->subgraphs.at(subgraph_idx).get();
for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) {
OperatorT* op = subgraph->operators[op_idx].get();
operator_property::OperatorProperty property =
operator_property::GetOperatorProperty(model, subgraph_idx, op_idx);
if (property.intermediates.empty()) {
continue;
}
const int next_tensor_index = subgraph->tensors.size();
int num_intermediates = property.intermediates.size();
if (kUseCustomLSTM) {
num_intermediates = 12;
}
for (int i = 0; i < num_intermediates; ++i) {
std::unique_ptr<TensorT> intermediate_tensor;
auto name = CreateTensorName(op_idx, i);
MakeTensor(name, &intermediate_tensor);
subgraph->tensors.push_back(std::move(intermediate_tensor));
op->intermediates.push_back(next_tensor_index + i);
}
}
}
flatbuffers::Offset<Model> output_model_location =
Model::Pack(*builder, model);
FinishModelBuffer(*builder, output_model_location);
return kTfLiteOk;
}
bool WriteFile(const std::string& out_file, const uint8_t* bytes,
size_t num_bytes) {
std::fstream stream(out_file, std::ios::binary | std::ios::out);
for (size_t i = 0; i < num_bytes; i++) {
stream << bytes[i];
}
return (!stream.bad() && !stream.fail());
}
}
} | #include "tensorflow/lite/tools/optimize/quantization_wrapper_utils.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
namespace optimize {
namespace {
using ::testing::ElementsAreArray;
TEST(LstmPreprocess, Add2Tensors) {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto buffer = std::make_unique<tflite::BufferT>();
auto lstm_op_code = std::make_unique<OperatorCodeT>();
auto lstm_op = std::make_unique<OperatorT>();
lstm_op_code->builtin_code = BuiltinOperator_LSTM;
lstm_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_LSTM);
lstm_op_code->version = 2;
lstm_op->opcode_index = 0;
lstm_op->inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
lstm_op->outputs = {24};
model->subgraphs.push_back(std::move(subgraph));
for (int i = 0; i < lstm_op->inputs.size(); ++i) {
const int index = lstm_op->inputs[i];
if (index == -1) {
continue;
}
auto tensor = std::make_unique<TensorT>();
tensor->name = "lstm_tensor" + std::to_string(index);
tensor->shape = {2, 3, 4};
tensor->type = TensorType_FLOAT32;
model->subgraphs[0]->tensors.push_back(std::move(tensor));
}
model->subgraphs[0]->operators.push_back(std::move(lstm_op));
model->operator_codes.push_back(std::move(lstm_op_code));
model->buffers.push_back(std::move(buffer));
flatbuffers::FlatBufferBuilder builder;
tflite::optimize::AddIntermediateTensorsToFusedOp(&builder, model.get());
EXPECT_EQ(model->operator_codes.size(), 1);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 26);
EXPECT_EQ(model->buffers.size(), 1);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[0].get()),
BuiltinOperator_LSTM);
EXPECT_EQ(model->subgraphs[0]->tensors[0]->name, "lstm_tensor0");
EXPECT_EQ(model->subgraphs[0]->tensors[21]->name, "intermediate_0_0");
EXPECT_EQ(model->subgraphs[0]->tensors[22]->name, "intermediate_0_1");
EXPECT_EQ(model->subgraphs[0]->tensors[23]->name, "intermediate_0_2");
EXPECT_EQ(model->subgraphs[0]->tensors[24]->name, "intermediate_0_3");
EXPECT_EQ(model->subgraphs[0]->tensors[25]->name, "intermediate_0_4");
EXPECT_THAT(
model->subgraphs[0]->operators[0]->inputs,
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->outputs,
ElementsAreArray({24}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->intermediates,
ElementsAreArray({21, 22, 23, 24, 25}));
tflite::optimize::AddIntermediateTensorsToFusedOp(&builder, model.get());
EXPECT_EQ(model->operator_codes.size(), 1);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 26);
EXPECT_EQ(model->buffers.size(), 1);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[0].get()),
BuiltinOperator_LSTM);
EXPECT_EQ(model->subgraphs[0]->tensors[0]->name, "lstm_tensor0");
EXPECT_EQ(model->subgraphs[0]->tensors[21]->name, "intermediate_0_0");
EXPECT_EQ(model->subgraphs[0]->tensors[22]->name, "intermediate_0_1");
EXPECT_EQ(model->subgraphs[0]->tensors[23]->name, "intermediate_0_2");
EXPECT_EQ(model->subgraphs[0]->tensors[24]->name, "intermediate_0_3");
EXPECT_EQ(model->subgraphs[0]->tensors[25]->name, "intermediate_0_4");
EXPECT_THAT(
model->subgraphs[0]->operators[0]->inputs,
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->outputs,
ElementsAreArray({24}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->intermediates,
ElementsAreArray({21, 22, 23, 24, 25}));
}
}
}
}
int main(int argc, char** argv) { return RUN_ALL_TESTS(); } |
851 | cpp | tensorflow/tensorflow | calibrator | tensorflow/lite/tools/optimize/calibration/calibrator.cc | tensorflow/lite/tools/optimize/calibration/calibrator_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATION_CALIBRATOR_H_
#define TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATION_CALIBRATOR_H_
#include <memory>
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_reader.h"
namespace tflite {
namespace optimize {
namespace calibration {
TfLiteStatus BuildLoggingInterpreter(
const FlatBufferModel& model, const OpResolver& op_resolver,
std::unique_ptr<Interpreter>* interpreter,
std::unique_ptr<CalibrationReader>* calibration_reader);
TfLiteStatus BuildLoggingInterpreter(
const tflite::Model* model, ErrorReporter* error_reporter,
const OpResolver& op_resolver, std::unique_ptr<Interpreter>* interpreter,
std::unique_ptr<CalibrationReader>* calibration_reader,
const Allocation* allocation = nullptr);
}
}
}
#endif
#include "tensorflow/lite/tools/optimize/calibration/calibrator.h"
#include <fstream>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/vector.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/lstm.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_logger.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_reader.h"
#include "tensorflow/lite/tools/optimize/calibration/custom_logging_ops/lstm.h"
#include "tensorflow/lite/tools/optimize/calibration/logging_op.h"
#include "tensorflow/lite/tools/optimize/calibration/logging_op_resolver.h"
namespace tflite {
namespace optimize {
namespace calibration {
namespace {
class Calibrator {
public:
Calibrator(const std::unordered_map<const TfLiteNode*, OperatorInfo>&
node_ptr_opinfo_map,
std::unique_ptr<LoggingOpResolver> logging_op_resolver,
ErrorReporter* error_reporter)
: node_ptr_opinfo_map_(node_ptr_opinfo_map),
logging_op_resolver_(std::move(logging_op_resolver)),
error_reporter_(error_reporter) {
logger_ = std::make_unique<Logger>();
}
KernelEvalFuncPtr GetKernelInvoke(const TfLiteNode* node) const;
Logger* GetLogger() const { return logger_.get(); }
ErrorReporter* GetErrorReporter() const { return error_reporter_; }
const OperatorInfo& GetOpInfo(const TfLiteNode* node) const {
return node_ptr_opinfo_map_.at(node);
}
std::vector<const TfLiteNode*> GetNodesUnderCalibration() {
std::vector<const TfLiteNode*> nodes;
nodes.reserve(node_ptr_opinfo_map_.size());
for (const auto& entry : node_ptr_opinfo_map_) {
nodes.push_back(entry.first);
}
return nodes;
}
private:
std::unordered_map<const TfLiteNode*, OperatorInfo> node_ptr_opinfo_map_;
std::unique_ptr<LoggingOpResolver> logging_op_resolver_;
const std::unordered_map<int, OperatorInfo> index_opinfo_;
std::unique_ptr<Logger> logger_;
ErrorReporter* error_reporter_;
};
KernelEvalFuncPtr Calibrator::GetKernelInvoke(const TfLiteNode* node) const {
auto op_info = node_ptr_opinfo_map_.at(node);
if (op_info.is_custom_op) {
return logging_op_resolver_->GetWrappedKernelInvoke(op_info.name.c_str(),
op_info.version);
}
return logging_op_resolver_->GetWrappedKernelInvoke(op_info.builtin_op_code,
op_info.version);
}
class GlobalCalibratorRegistry {
public:
Calibrator* GetCalibrator(const TfLiteNode* node) const {
if (node_to_calibrator_.find(node) == node_to_calibrator_.cend()) {
return nullptr;
}
return node_to_calibrator_.at(node);
}
void RemoveCalibrator(const TfLiteContext* context) {
Calibrator* calibrator = calibrator_registry_.at(context).get();
auto nodes = calibrator->GetNodesUnderCalibration();
for (auto node : nodes) {
node_to_calibrator_.erase(node);
}
calibrator_registry_.erase(context);
}
TfLiteStatus CreateCalibrator(
const TfLiteContext* context,
const std::unordered_map<const TfLiteNode*, OperatorInfo>& node_to_opinfo,
std::unique_ptr<LoggingOpResolver> logging_op_resolver,
Calibrator** calibrator_ptr, ErrorReporter* reporter) {
if (calibrator_registry_.find(context) != calibrator_registry_.cend()) {
reporter->Report(
"Failed to create calibrator, context already registered.");
return kTfLiteError;
}
auto calibrator = std::make_unique<Calibrator>(
node_to_opinfo, std::move(logging_op_resolver), reporter);
calibrator_registry_[context] = std::move(calibrator);
*calibrator_ptr = calibrator_registry_.at(context).get();
for (const auto& entry : node_to_opinfo) {
node_to_calibrator_[entry.first] = *calibrator_ptr;
}
return kTfLiteOk;
}
private:
absl::flat_hash_map<const TfLiteContext*, std::unique_ptr<Calibrator>>
calibrator_registry_;
absl::flat_hash_map<const TfLiteNode*, Calibrator*> node_to_calibrator_;
};
GlobalCalibratorRegistry* GetCalibratorRegistry() {
static GlobalCalibratorRegistry* registry = new GlobalCalibratorRegistry();
return registry;
}
logging_kernel_func_ptr GetLoggingEvalFunc(TfLiteContext* context,
TfLiteNode* node,
int builtin_op_code) {
switch (builtin_op_code) {
case BuiltinOperator_LSTM: {
if (node->intermediates->size == 12) {
return tflite::optimize::calibration::custom::lstm_logging_kernel;
}
return tflite::optimize::calibration::builtin::lstm_logging_kernel;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
return tflite::optimize::calibration::builtin::
unidirectional_sequence_lstm_logging_kernel;
default:
return nullptr;
}
}
TfLiteStatus LoggingEval(TfLiteContext* context, TfLiteNode* node) {
Calibrator* calibrator = GetCalibratorRegistry()->GetCalibrator(node);
if (!calibrator) {
TF_LITE_KERNEL_LOG(context, "No calibrator found for context.");
return kTfLiteError;
}
auto kernel_invoke = calibrator->GetKernelInvoke(node);
auto logger = calibrator->GetLogger();
auto op_info = calibrator->GetOpInfo(node);
auto error_reporter = calibrator->GetErrorReporter();
for (int i : op_info.loggable_inputs) {
auto tensor = context->tensors[i];
TF_LITE_ENSURE_STATUS(
logger->LogTensorValue(op_info.subgraph_index, i, tensor.data.f,
tensor.bytes / sizeof(float), error_reporter));
}
auto builtin_op_code = calibrator->GetOpInfo(node).builtin_op_code;
auto kernel_invoke_intermediate =
GetLoggingEvalFunc(context, node, builtin_op_code);
if (kernel_invoke_intermediate == nullptr) {
TF_LITE_ENSURE_STATUS(kernel_invoke(context, node));
} else {
TF_LITE_ENSURE_STATUS(
kernel_invoke_intermediate(context, op_info.subgraph_index, node,
calibrator->GetLogger(), error_reporter));
}
for (int i : op_info.loggable_inputs) {
auto tensor = context->tensors[i];
TF_LITE_ENSURE_STATUS(
logger->LogTensorValue(op_info.subgraph_index, i, tensor.data.f,
tensor.bytes / sizeof(float), error_reporter));
}
for (int i : op_info.loggable_outputs) {
auto tensor = context->tensors[i];
TF_LITE_ENSURE_STATUS(
logger->LogTensorValue(op_info.subgraph_index, i, tensor.data.f,
tensor.bytes / sizeof(float), error_reporter));
}
return kTfLiteOk;
}
std::vector<int> GetLoggableTensorIndices(
const std::vector<int>& tensor_indices,
const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors,
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* tensor_buffers) {
std::vector<int> loggable;
for (auto tensor_index : tensor_indices) {
if (tensor_index == kTfLiteOptionalTensor) {
continue;
}
auto tensor = tensors->Get(tensor_index);
auto buffer_index = tensor->buffer();
const bool has_no_buffer =
(tensor_buffers->Get(buffer_index) == nullptr) ||
(tensor_buffers->Get(buffer_index)->data() == nullptr) ||
(tensor_buffers->Get(buffer_index)->data()->size() == 0);
if (has_no_buffer && tensor->type() == tflite::TensorType_FLOAT32) {
loggable.push_back(tensor_index);
}
}
return loggable;
}
TfLiteStatus GetNodeOpInfoMapAndContext(
const absl::flat_hash_map<std::tuple<int, int>, OperatorInfo>&
node_to_opinfo,
tflite::Interpreter* const interpreter,
std::unordered_map<const TfLiteNode*, OperatorInfo>* node_ptr_opinfo_map,
TfLiteContext** context) {
*context = interpreter->primary_subgraph().context();
TF_LITE_ENSURE(*context,
interpreter->execution_plan().size() <= node_to_opinfo.size());
for (const auto& entry : node_to_opinfo) {
auto op_info = entry.second;
int subgraph_index, op_index;
std::tie(subgraph_index, op_index) = entry.first;
const auto* node_and_reg =
interpreter->node_and_registration(subgraph_index, op_index);
op_info.registration = &node_and_reg->second;
node_ptr_opinfo_map->insert({&node_and_reg->first, op_info});
}
return kTfLiteOk;
}
string GetOpName(const tflite::OperatorCode& opcode) {
if (opcode.custom_code() != nullptr) {
return opcode.custom_code()->str();
}
return tflite::EnumNamesBuiltinOperator()[GetBuiltinCode(&opcode)];
}
class Reader : public CalibrationReader {
public:
Reader(const TfLiteContext* context, const Logger* logger)
: CalibrationReader(logger), context_(context) {}
~Reader() override { GetCalibratorRegistry()->RemoveCalibrator(context_); }
private:
const TfLiteContext* context_;
};
bool HasInputs(BuiltinOperator code) {
switch (code) {
case BuiltinOperator_CALL_ONCE:
case BuiltinOperator_VAR_HANDLE:
case BuiltinOperator_CUSTOM:
return false;
default:
return true;
}
}
bool HasOutputs(BuiltinOperator code) {
switch (code) {
case BuiltinOperator_ASSIGN_VARIABLE:
case BuiltinOperator_CALL_ONCE:
case BuiltinOperator_CUSTOM:
return false;
default:
return true;
}
}
}
TfLiteStatus BuildLoggingInterpreter(
const FlatBufferModel& model, const OpResolver& op_resolver,
std::unique_ptr<Interpreter>* interpreter,
std::unique_ptr<CalibrationReader>* calibration_reader) {
return BuildLoggingInterpreter(model.GetModel(), model.error_reporter(),
op_resolver, interpreter, calibration_reader,
model.allocation());
}
TfLiteStatus BuildLoggingInterpreter(
const tflite::Model* tflite_model, ErrorReporter* error_reporter,
const OpResolver& op_resolver, std::unique_ptr<Interpreter>* interpreter,
std::unique_ptr<CalibrationReader>* calibration_reader,
const Allocation* allocation) {
if (error_reporter == nullptr) {
error_reporter = DefaultErrorReporter();
}
auto subgraphs = tflite_model->subgraphs();
auto tensor_buffers = tflite_model->buffers();
absl::flat_hash_map<std::tuple<int, int>, OperatorInfo> node_to_opinfo;
BuiltinOpsSet builtin_op_and_versions;
CustomOpsSet custom_op_and_versions;
for (size_t subgraph_index = 0; subgraph_index < subgraphs->size();
subgraph_index++) {
auto subgraph = subgraphs->Get(subgraph_index);
auto operator_codes = tflite_model->operator_codes();
auto operators = subgraph->operators();
auto tensors = subgraph->tensors();
if (!operators) {
continue;
}
for (size_t i = 0; i < operators->size(); i++) {
OperatorInfo op_info;
op_info.subgraph_index = subgraph_index;
op_info.node_index = i;
auto op = operators->Get(i);
auto operator_code = operator_codes->Get(op->opcode_index());
op_info.builtin_op_code = GetBuiltinCode(operator_code);
op_info.name = GetOpName(*operator_code);
op_info.is_custom_op = operator_code->custom_code() != nullptr;
op_info.version = operator_code->version();
auto op_inputs = op->inputs();
auto op_outputs = op->outputs();
if (op_inputs) {
op_info.inputs = std::vector<int>(op_inputs->begin(), op_inputs->end());
} else if (HasInputs(op_info.builtin_op_code)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "Op %s missing inputs",
op_info.name.c_str());
}
if (op_outputs) {
op_info.outputs =
std::vector<int>(op_outputs->begin(), op_outputs->end());
} else if (HasOutputs(op_info.builtin_op_code)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "Op %s missing outputs",
op_info.name.c_str());
}
op_info.loggable_inputs =
GetLoggableTensorIndices(op_info.inputs, tensors, tensor_buffers);
op_info.loggable_outputs =
GetLoggableTensorIndices(op_info.outputs, tensors, tensor_buffers);
if (op_info.is_custom_op) {
op_info.registration =
op_resolver.FindOp(op_info.name.c_str(), operator_code->version());
custom_op_and_versions.insert(
{op_info.name.c_str(), operator_code->version()});
} else {
op_info.registration = op_resolver.FindOp(GetBuiltinCode(operator_code),
operator_code->version());
builtin_op_and_versions.insert(
{op_info.builtin_op_code, operator_code->version()});
}
std::tuple<int, int> key{subgraph_index, i};
node_to_opinfo[key] = op_info;
}
}
auto logging_op_resolver = std::make_unique<LoggingOpResolver>(
builtin_op_and_versions, custom_op_and_versions, op_resolver, LoggingEval,
error_reporter);
tflite::InterpreterBuilder(tflite_model, *logging_op_resolver, error_reporter,
nullptr,
allocation)(interpreter);
if (!(*interpreter)) {
error_reporter->Report("Failed to construct interpreter");
return kTfLiteError;
}
std::unordered_map<const TfLiteNode*, OperatorInfo> node_ptr_opinfo_map;
TfLiteContext* context = nullptr;
TF_LITE_ENSURE_STATUS(GetNodeOpInfoMapAndContext(
node_to_opinfo, interpreter->get(), &node_ptr_opinfo_map, &context));
Calibrator* calibrator = nullptr;
TF_LITE_ENSURE_STATUS(GetCalibratorRegistry()->CreateCalibrator(
context, node_ptr_opinfo_map, std::move(logging_op_resolver), &calibrator,
error_reporter));
*calibration_reader = std::unique_ptr<CalibrationReader>(
new Reader(context, calibrator->GetLogger()));
return kTfLiteOk;
}
}
}
} | #include "tensorflow/lite/tools/optimize/calibration/calibrator.h"
#include <cstring>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_reader.h"
namespace {
tensorflow::string* g_test_model_dir = nullptr;
}
namespace tflite {
namespace optimize {
namespace calibration {
namespace {
std::unique_ptr<FlatBufferModel> ReadModel(const string& model_name) {
auto model_path = tensorflow::io::JoinPath(*g_test_model_dir, model_name);
return FlatBufferModel::BuildFromFile(model_path.c_str());
}
TEST(CalibratorTest, CalibrationStatsAreCollected) {
auto model = ReadModel("multi_add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_TRUE(stats.empty());
status = interpreter->AllocateTensors();
ASSERT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
std::vector<float> ones(tensor_size, 1.0f);
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
TfLiteTensor* tensor = interpreter->tensor(interpreter->outputs()[0]);
for (size_t i = 0; i < tensor_size; i++) {
EXPECT_NEAR(tensor->data.f[i], 6.0f, eps);
}
tensor = interpreter->tensor(interpreter->outputs()[1]);
for (size_t i = 0; i < tensor_size; i++) {
EXPECT_NEAR(tensor->data.f[i], 9.0f, eps);
}
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(7, stats.size());
for (int tensor_idx = 0; tensor_idx < 4; tensor_idx++) {
EXPECT_NEAR(stats.find({0, tensor_idx})->second.min, tensor_idx + 1, eps);
EXPECT_NEAR(stats.find({0, tensor_idx})->second.max, tensor_idx + 1, eps);
}
EXPECT_NEAR(stats.find({0, 4})->second.min, 5, eps);
EXPECT_NEAR(stats.find({0, 4})->second.max, 5, eps);
EXPECT_NEAR(stats.find({0, 5})->second.min, 6, eps);
EXPECT_NEAR(stats.find({0, 5})->second.max, 6, eps);
EXPECT_NEAR(stats.find({0, 6})->second.min, 9, eps);
EXPECT_NEAR(stats.find({0, 6})->second.max, 9, eps);
}
TEST(CalibratorTest, MultipleInvokes) {
auto model = ReadModel("multi_add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(7, stats.size());
const float expected_values[7] = {
1.0f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(stats.find({0, tensor_idx})->second.min,
expected_values[tensor_idx], eps);
EXPECT_NEAR(stats.find({0, tensor_idx})->second.max,
expected_values[tensor_idx], eps);
}
TfLiteTensor* input0 = interpreter->tensor(0);
input0->data.f[0] = 1.5f;
input0->data.f[1] = 0.5f;
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(7, stats.size());
EXPECT_NEAR(stats.find({0, 0})->second.min, 0.5f, eps);
EXPECT_NEAR(stats.find({0, 0})->second.max, 1.5f, eps);
for (int tensor_idx = 1; tensor_idx < 5; tensor_idx++) {
EXPECT_NEAR(stats.find({0, tensor_idx})->second.min,
expected_values[tensor_idx], eps);
EXPECT_NEAR(stats.find({0, tensor_idx})->second.max,
expected_values[tensor_idx], eps);
}
EXPECT_NEAR(stats.find({0, 5})->second.min, 5.5f, eps);
EXPECT_NEAR(stats.find({0, 5})->second.max, 6.5f, eps);
EXPECT_NEAR(stats.find({0, 6})->second.min, 9.0f, eps);
EXPECT_NEAR(stats.find({0, 6})->second.max, 9.0f, eps);
}
TEST(CalibratorTest, UpdateMinMax) {
auto flatbuffer_model = ReadModel("multi_add.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
auto input_0_quant_params =
std::make_unique<tflite::QuantizationParametersT>();
input_0_quant_params->min.push_back(0.5);
input_0_quant_params->max.push_back(1.5);
model.subgraphs[0]->tensors[0]->quantization =
std::move(input_0_quant_params);
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
const float expected_min[7] = {
0.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
const float expected_max[7] = {
1.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, true);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_min[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_max[tensor_idx], eps);
}
const float expected_value[7] = {
1.0f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, false);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_value[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_value[tensor_idx], eps);
}
}
TEST(CalibratorTest, HandleNanValues) {
auto flatbuffer_model = ReadModel("multi_add.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
if (j % 2 == 0) {
tensor->data.f[j] = NAN;
} else {
tensor->data.f[j] = i + 1;
}
}
}
auto input_0_quant_params =
std::make_unique<tflite::QuantizationParametersT>();
input_0_quant_params->min.push_back(0.5);
input_0_quant_params->max.push_back(1.5);
model.subgraphs[0]->tensors[0]->quantization =
std::move(input_0_quant_params);
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
const float expected_min[7] = {
0.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
const float expected_max[7] = {
1.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, true);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_min[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_max[tensor_idx], eps);
}
const float expected_value[7] = {
1.0f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, false);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_value[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_value[tensor_idx], eps);
}
}
TEST(CalibratorTest, LSTM) {
auto flatbuffer_model = ReadModel("lstm.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(status, kTfLiteOk);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const std::vector<float> lstm_input = {0.3, 0.2};
int input_tensor_idx = interpreter->inputs()[0];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
for (size_t j = 0; j < lstm_input.size(); j++) {
tensor->data.f[j] = lstm_input[j];
}
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
EXPECT_EQ(reader->GetTensorStatsAsMap(&stats), kTfLiteOk);
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {0.200000, 0.300000}},
{{0, 18}, {0.000000, 0.468415}},
{{0, 19}, {0.000000, 0.424350}},
{{0, 24}, {0.265968, 0.468415}},
{{0, 25}, {0.080045, 0.170588}},
{{0, 26}, {0.080045, 0.170588}},
{{0, 27}, {0.080045, 0.170588}},
{{0, 28}, {0.080045, 0.170588}},
{{0, 29}, {0.000000, 0.270944}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, UnidirectionalSequenceLSTM) {
auto flatbuffer_model = ReadModel("unidirectional_sequence_lstm.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
EXPECT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
const std::vector<float> lstm_input = {0.3, 0.2, 0.9, 0.8};
int input_tensor_idx = interpreter->inputs()[0];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
for (size_t j = 0; j < lstm_input.size(); j++) {
tensor->data.f[j] = lstm_input[j];
}
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
EXPECT_EQ(reader->GetTensorStatsAsMap(&stats), kTfLiteOk);
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {0.200000, 0.900000}},
{{0, 18}, {0.000000, 0.520999}},
{{0, 19}, {0.000000, 0.711364}},
{{0, 24}, {0.247992, 0.520999}},
{{0, 25}, {0.080045, 0.824241}},
{{0, 26}, {0.080045, 0.824241}},
{{0, 27}, {0.080045, 0.824241}},
{{0, 28}, {0.080045, 0.824241}},
{{0, 29}, {0.000000, 0.413618}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, CustomLSTM) {
auto flatbuffer_model = ReadModel("custom_lstm.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
EXPECT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
const std::vector<float> lstm_input = {0.3, 0.2, 0.9, 0.8};
int input_tensor_idx = interpreter->inputs()[0];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
for (size_t j = 0; j < lstm_input.size(); j++) {
tensor->data.f[j] = lstm_input[j];
}
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
EXPECT_EQ(reader->GetTensorStatsAsMap(&stats), kTfLiteOk);
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {0.200000, 0.300000}},
{{0, 18}, {0.000000, 0.468415}},
{{0, 19}, {0.000000, 0.424349}},
{{0, 24}, {0.265968, 0.468415}},
{{0, 25}, {0.080045, 0.170588}},
{{0, 26}, {0.080045, 0.170588}},
{{0, 27}, {0.000000, 0.000000}},
{{0, 28}, {0.080045, 0.170588}},
{{0, 29}, {0.080045, 0.170588}},
{{0, 30}, {0.000000, 0.000000}},
{{0, 31}, {0.080045, 0.170588}},
{{0, 32}, {0.080045, 0.170588}},
{{0, 33}, {0.000000, 0.000000}},
{{0, 34}, {0.080045, 0.170588}},
{{0, 35}, {0.080045, 0.170588}},
{{0, 36}, {0.000000, 0.000000}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, CalibrationWithMultipleSubgraphs) {
auto model = ReadModel("multi_subgraphs_while.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_TRUE(stats.empty());
status = interpreter->AllocateTensors();
ASSERT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(int));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(4, stats.size());
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {1.0, 1.0}},
{{0, 4}, {4.0, 4.0}},
{{2, 2}, {1.0, 2.0}},
{{2, 6}, {2.0, 4.0}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, CalibrationWithCallOnce) {
auto model = ReadModel("call_once_mul.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_TRUE(stats.empty());
status = interpreter->AllocateTensors();
ASSERT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(int));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(3, stats.size());
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {1.0, 1.0}},
{{0, 2}, {2.0, 2.0}},
{{0, 3}, {2.0, 2.0}}};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
}
}
}
}
int main(int argc, char** argv) {
tensorflow::string model_file;
const std::vector<tensorflow::Flag> flag_list = {
tensorflow::Flag("test_model_file", &model_file,
"Path to test tflite model file."),
};
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
std::cerr << "Required test_model_file\n";
std::abort();
}
g_test_model_dir =
new tensorflow::string(tensorflow::io::Dirname(model_file));
::tensorflow::port::InitMain(argv[0], &argc, &argv);
return RUN_ALL_TESTS();
} |
852 | cpp | tensorflow/tensorflow | logging_op_resolver | tensorflow/lite/tools/optimize/calibration/logging_op_resolver.cc | tensorflow/lite/tools/optimize/calibration/logging_op_resolver_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATION_LOGGING_OP_RESOLVER_H_
#define TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATION_LOGGING_OP_RESOLVER_H_
#include <set>
#include <unordered_map>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
namespace tflite {
namespace optimize {
namespace calibration {
class LoggingOpResolver : public OpResolver {
public:
LoggingOpResolver(const BuiltinOpsSet& builtin_ops_to_replace,
const CustomOpsSet& custom_ops_to_replace,
const OpResolver& base_resolver,
KernelEvalFuncPtr logging_eval_fn,
ErrorReporter* error_reporter);
const TfLiteRegistration* FindOp(BuiltinOperator op,
int version) const override;
KernelEvalFuncPtr GetWrappedKernelInvoke(BuiltinOperator op,
int version) const;
const TfLiteRegistration* FindOp(const char* op, int version) const override;
KernelEvalFuncPtr GetWrappedKernelInvoke(const char* op, int version) const;
private:
BuiltinOpsMap<std::unique_ptr<TfLiteRegistration>>
builtin_op_registration_map_;
BuiltinOpsMap<KernelEvalFuncPtr> builtin_op_evalfn_map_;
CustomOpsMap<std::unique_ptr<TfLiteRegistration>> custom_op_registration_map_;
CustomOpsMap<KernelEvalFuncPtr> custom_op_evalfn_map_;
};
}
}
}
#endif
#include "tensorflow/lite/tools/optimize/calibration/logging_op_resolver.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace optimize {
namespace calibration {
LoggingOpResolver::LoggingOpResolver(
const BuiltinOpsSet& builtin_ops_to_replace,
const CustomOpsSet& custom_ops_to_replace, const OpResolver& base_resolver,
KernelEvalFuncPtr logging_eval_fn, ErrorReporter* error_reporter) {
std::vector<std::string> unresolved_builtin_ops;
std::vector<std::string> unresolved_custom_ops;
for (const auto& op_and_version : builtin_ops_to_replace) {
const TfLiteRegistration* base_registration =
base_resolver.FindOp(op_and_version.first, op_and_version.second);
if (!base_registration) {
unresolved_builtin_ops.push_back(
EnumNameBuiltinOperator(op_and_version.first));
continue;
}
BuiltinOperatorKey key = op_and_version;
builtin_op_evalfn_map_[key] = base_registration->invoke;
auto logging_registration =
std::make_unique<TfLiteRegistration>(*base_registration);
logging_registration->invoke = logging_eval_fn;
builtin_op_registration_map_[key] = std::move(logging_registration);
}
for (const auto& op_and_version : custom_ops_to_replace) {
const TfLiteRegistration* base_registration = base_resolver.FindOp(
op_and_version.first.c_str(), op_and_version.second);
if (!base_registration) {
if (!IsFlexOp(op_and_version.first.c_str()))
unresolved_custom_ops.push_back(op_and_version.first.c_str());
continue;
}
CustomOperatorKey key = op_and_version;
custom_op_evalfn_map_[key] = base_registration->invoke;
auto logging_registration =
std::make_unique<TfLiteRegistration>(*base_registration);
logging_registration->invoke = logging_eval_fn;
custom_op_registration_map_[key] = std::move(logging_registration);
}
if (!unresolved_builtin_ops.empty() || !unresolved_custom_ops.empty()) {
if (!error_reporter) return;
std::string error_message =
"Failed to initialize op resolver for calibration:";
if (!unresolved_builtin_ops.empty())
absl::StrAppend(&error_message, "\nThere are unresolved builtin ops: [",
absl::StrJoin(unresolved_builtin_ops, ", "), "]");
if (!unresolved_custom_ops.empty()) {
absl::StrAppend(&error_message, "\nThere are unresolved custom ops: [",
absl::StrJoin(unresolved_custom_ops, ", "), "]");
}
TF_LITE_REPORT_ERROR(error_reporter, error_message.c_str());
}
}
const TfLiteRegistration* LoggingOpResolver::FindOp(BuiltinOperator op,
int version) const {
BuiltinOperatorKey key = {op, version};
if (builtin_op_registration_map_.find(key) !=
builtin_op_registration_map_.end()) {
return builtin_op_registration_map_.at(key).get();
}
return nullptr;
}
KernelEvalFuncPtr LoggingOpResolver::GetWrappedKernelInvoke(BuiltinOperator op,
int version) const {
return builtin_op_evalfn_map_.at({op, version});
}
const TfLiteRegistration* LoggingOpResolver::FindOp(const char* op,
int version) const {
CustomOperatorKey key = {op, version};
if (custom_op_registration_map_.find(key) !=
custom_op_registration_map_.end()) {
return custom_op_registration_map_.at(key).get();
}
return nullptr;
}
KernelEvalFuncPtr LoggingOpResolver::GetWrappedKernelInvoke(const char* op,
int version) const {
return custom_op_evalfn_map_.at({op, version});
}
}
}
} | #include "tensorflow/lite/tools/optimize/calibration/logging_op_resolver.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
namespace tflite {
namespace optimize {
namespace calibration {
namespace {
TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus ConvEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus AddEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus CustomPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus CustomEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus WrappingInvoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TEST(LoggingOpResolverTest, KernelInvokesAreReplaced) {
MutableOpResolver base_resolver;
TfLiteRegistration conv_registration = {};
conv_registration.prepare = ConvPrepare;
conv_registration.invoke = ConvEval;
base_resolver.AddBuiltin(BuiltinOperator_CONV_2D, &conv_registration);
TfLiteRegistration add_registration = {};
add_registration.prepare = AddPrepare;
add_registration.invoke = AddEval;
base_resolver.AddBuiltin(BuiltinOperator_ADD, &add_registration);
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
{BuiltinOperator_ADD, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_CONV_2D);
EXPECT_TRUE(reg->prepare == ConvPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
reg = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_ADD);
EXPECT_TRUE(reg->prepare == AddPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
}
TEST(LoggingOpResolverTest, OriginalKernelInvokesAreRetained) {
MutableOpResolver base_resolver;
TfLiteRegistration conv_registration = {};
conv_registration.prepare = ConvPrepare;
conv_registration.invoke = ConvEval;
base_resolver.AddBuiltin(BuiltinOperator_CONV_2D, &conv_registration);
TfLiteRegistration add_registration = {};
add_registration.prepare = AddPrepare;
add_registration.invoke = AddEval;
base_resolver.AddBuiltin(BuiltinOperator_ADD, &add_registration);
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
{BuiltinOperator_ADD, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
auto kernel_invoke =
resolver.GetWrappedKernelInvoke(BuiltinOperator_CONV_2D, 1);
EXPECT_TRUE(kernel_invoke == ConvEval);
kernel_invoke = resolver.GetWrappedKernelInvoke(BuiltinOperator_ADD, 1);
EXPECT_TRUE(kernel_invoke == AddEval);
}
TEST(LoggingOpResolverTest, OnlyOpsInReplacementSetAreReplaces) {
MutableOpResolver base_resolver;
TfLiteRegistration conv_registration = {};
conv_registration.prepare = ConvPrepare;
conv_registration.invoke = ConvEval;
base_resolver.AddBuiltin(BuiltinOperator_CONV_2D, &conv_registration);
TfLiteRegistration add_registration = {};
add_registration.prepare = AddPrepare;
add_registration.invoke = AddEval;
base_resolver.AddBuiltin(BuiltinOperator_ADD, &add_registration);
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_CONV_2D);
EXPECT_TRUE(reg->prepare == ConvPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
reg = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(nullptr, reg);
}
TEST(LoggingOpResolverTest, CustomOps) {
MutableOpResolver base_resolver;
TfLiteRegistration custom_registration = {};
custom_registration.prepare = CustomPrepare;
custom_registration.invoke = CustomEval;
std::string custom_op_name = "custom";
base_resolver.AddCustom(custom_op_name.c_str(), &custom_registration);
CustomOpsSet ops_to_replace = {
{custom_op_name, 1},
};
LoggingOpResolver resolver(BuiltinOpsSet(), ops_to_replace, base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(custom_op_name.c_str(), 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_EQ(reg->custom_name, custom_op_name.c_str());
EXPECT_TRUE(reg->prepare == CustomPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
}
TEST(LoggingOpResolverTest, UnresolvedCustomOps) {
MutableOpResolver base_resolver;
std::string custom_op_name = "unresolved_custom_op";
CustomOpsSet ops_to_replace = {
{custom_op_name, 1},
};
LoggingOpResolver(BuiltinOpsSet(), ops_to_replace, base_resolver,
WrappingInvoke, nullptr);
}
TEST(LoggingOpResolverTest, UnresolvedBuiltinOps) {
MutableOpResolver base_resolver;
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
{BuiltinOperator_ADD, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
}
TEST(LoggingOpResolverTest, FlexOps) {
MutableOpResolver base_resolver;
std::string custom_op_name = "FlexAdd";
CustomOpsSet ops_to_replace = {
{custom_op_name, 1},
};
LoggingOpResolver resolver(BuiltinOpsSet(), ops_to_replace, base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(custom_op_name.c_str(), 1);
EXPECT_TRUE(!reg);
}
}
}
}
} |
853 | cpp | tensorflow/tensorflow | lstm | tensorflow/lite/kernels/lstm.cc | tensorflow/lite/kernels/lstm_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_LSTM_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_LSTM_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewLstmNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/lstm.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class LstmNodeShader : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string code = R"(
vec4 prev_state = $input_data_1[gid.x, gid.y, gid.z]$;
int c0 = 0 * $workload_z$;
int c1 = 1 * $workload_z$;
int c2 = 2 * $workload_z$;
int c3 = 3 * $workload_z$;
vec4 gate_0 = $input_data_0[gid.x, gid.y, gid.z + c0]$;
vec4 gate_1 = $input_data_0[gid.x, gid.y, gid.z + c1]$;
vec4 gate_2 = $input_data_0[gid.x, gid.y, gid.z + c2]$;
vec4 gate_3 = $input_data_0[gid.x, gid.y, gid.z + c3]$;
vec4 input_gate = 1.0f / (1.0f + exp(-1.0 * gate_0));
vec4 new_input = tanh(gate_1);
vec4 forget_gate = 1.0f / (1.0f + exp(-1.0 * gate_2));
vec4 output_gate = 1.0f / (1.0f + exp(-1.0 * gate_3));
vec4 new_state = input_gate * new_input + forget_gate * prev_state;
vec4 activation = output_gate * tanh(new_state);
value_0 = new_state;
value_1 = activation;
)";
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewLstmNodeShader() {
return std::make_unique<LstmNodeShader>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/lstm.h"
#include <cmath>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(LstmTest, BaseTest) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 1, 16);
TensorRef<BHWC> prev_state;
prev_state.type = DataType::FLOAT32;
prev_state.ref = 1;
prev_state.shape = BHWC(1, 1, 1, 4);
TensorRef<BHWC> output_state;
output_state.type = DataType::FLOAT32;
output_state.ref = 2;
output_state.shape = BHWC(1, 1, 1, 4);
TensorRef<BHWC> output_activation;
output_activation.type = DataType::FLOAT32;
output_activation.ref = 3;
output_activation.shape = BHWC(1, 1, 1, 4);
LstmAttributes attr;
attr.kernel_type = LstmKernelType::BASIC;
SingleOpModel model({ToString(OperationType::LSTM), attr},
{input, prev_state}, {output_state, output_activation});
std::vector input_data = {
-std::log(2.0f), -std::log(2.0f), -std::log(2.0f), -std::log(2.0f),
std::log(3.0f), std::log(3.0f), std::log(3.0f), std::log(3.0f),
-std::log(4.0f), -std::log(4.0f), -std::log(4.0f), -std::log(4.0f),
-std::log(5.0f), -std::log(5.0f), -std::log(5.0f), -std::log(5.0f)};
ASSERT_TRUE(model.PopulateTensor(0, std::move(input_data)));
ASSERT_TRUE(model.PopulateTensor(1, {1, 2, 3, 4}));
ASSERT_OK(model.Invoke(*NewLstmNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{7.0 / 15.0, 10.0 / 15.0, 13.0 / 15.0, 16.0 / 15.0}));
EXPECT_THAT(
model.GetOutput(1),
Pointwise(FloatNear(1e-6), {(1.f / 6.f) * std::tanh(7.f / 15.f),
(1.f / 6.f) * std::tanh(10.f / 15.f),
(1.f / 6.f) * std::tanh(13.f / 15.f),
(1.f / 6.f) * std::tanh(16.f / 15.f)}));
}
}
}
}
} |
854 | cpp | tensorflow/tensorflow | evaluation_delegate_provider | tensorflow/lite/tools/evaluation/evaluation_delegate_provider.cc | tensorflow/lite/tools/evaluation/evaluation_delegate_provider_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_EVALUATION_EVALUATION_DELEGATE_PROVIDER_H_
#define TENSORFLOW_LITE_TOOLS_EVALUATION_EVALUATION_DELEGATE_PROVIDER_H_
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace evaluation {
using ProvidedDelegateList = tflite::tools::ProvidedDelegateList;
class DelegateProviders {
public:
DelegateProviders();
std::vector<Flag> GetFlags();
bool InitFromCmdlineArgs(int* argc, const char** argv);
const tools::ToolParams& GetAllParams() const { return params_; }
tools::ToolParams GetAllParams(const TfliteInferenceParams& params) const;
TfLiteDelegatePtr CreateDelegate(const std::string& name) const;
std::vector<ProvidedDelegateList::ProvidedDelegate> CreateAllDelegates()
const {
return delegate_list_util_.CreateAllRankedDelegates();
}
std::vector<ProvidedDelegateList::ProvidedDelegate> CreateAllDelegates(
const TfliteInferenceParams& params) const {
auto converted = GetAllParams(params);
ProvidedDelegateList util(&converted);
return util.CreateAllRankedDelegates();
}
private:
tools::ToolParams params_;
ProvidedDelegateList delegate_list_util_;
const std::unordered_map<std::string, int> delegates_map_;
};
TfliteInferenceParams::Delegate ParseStringToDelegateType(
const std::string& val);
TfLiteDelegatePtr CreateTfLiteDelegate(const TfliteInferenceParams& params,
std::string* error_msg = nullptr);
}
}
#endif
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include <string>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kNnapiDelegate[] = "nnapi";
constexpr char kGpuDelegate[] = "gpu";
constexpr char kHexagonDelegate[] = "hexagon";
constexpr char kXnnpackDelegate[] = "xnnpack";
constexpr char kCoremlDelegate[] = "coreml";
}
TfliteInferenceParams::Delegate ParseStringToDelegateType(
const std::string& val) {
if (val == kNnapiDelegate) return TfliteInferenceParams::NNAPI;
if (val == kGpuDelegate) return TfliteInferenceParams::GPU;
if (val == kHexagonDelegate) return TfliteInferenceParams::HEXAGON;
if (val == kXnnpackDelegate) return TfliteInferenceParams::XNNPACK;
if (val == kCoremlDelegate) return TfliteInferenceParams::COREML;
return TfliteInferenceParams::NONE;
}
TfLiteDelegatePtr CreateTfLiteDelegate(const TfliteInferenceParams& params,
std::string* error_msg) {
const auto type = params.delegate();
switch (type) {
case TfliteInferenceParams::NNAPI: {
auto p = CreateNNAPIDelegate();
if (!p && error_msg) *error_msg = "NNAPI not supported";
return p;
}
case TfliteInferenceParams::GPU: {
auto p = CreateGPUDelegate();
if (!p && error_msg) *error_msg = "GPU delegate not supported.";
return p;
}
case TfliteInferenceParams::HEXAGON: {
auto p = CreateHexagonDelegate("",
false);
if (!p && error_msg) {
*error_msg =
"Hexagon delegate is not supported on the platform or required "
"libraries are missing.";
}
return p;
}
case TfliteInferenceParams::XNNPACK: {
auto p = CreateXNNPACKDelegate(params.num_threads(), false);
if (!p && error_msg) *error_msg = "XNNPACK delegate not supported.";
return p;
}
case TfliteInferenceParams::COREML: {
auto p = CreateCoreMlDelegate();
if (!p && error_msg) *error_msg = "CoreML delegate not supported.";
return p;
}
case TfliteInferenceParams::NONE:
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
default:
if (error_msg) {
*error_msg = "Creation of delegate type: " +
TfliteInferenceParams::Delegate_Name(type) +
" not supported yet.";
}
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
}
}
DelegateProviders::DelegateProviders()
: delegate_list_util_(¶ms_),
delegates_map_([=]() -> std::unordered_map<std::string, int> {
std::unordered_map<std::string, int> delegates_map;
const auto& providers = delegate_list_util_.providers();
for (int i = 0; i < providers.size(); ++i) {
delegates_map[providers[i]->GetName()] = i;
}
return delegates_map;
}()) {
delegate_list_util_.AddAllDelegateParams();
}
std::vector<Flag> DelegateProviders::GetFlags() {
std::vector<Flag> flags;
delegate_list_util_.AppendCmdlineFlags(flags);
return flags;
}
bool DelegateProviders::InitFromCmdlineArgs(int* argc, const char** argv) {
std::vector<Flag> flags = GetFlags();
bool parse_result = Flags::Parse(argc, argv, flags);
if (!parse_result || params_.Get<bool>("help")) {
std::string usage = Flags::Usage(argv[0], flags);
TFLITE_LOG(ERROR) << usage;
parse_result = false;
}
return parse_result;
}
TfLiteDelegatePtr DelegateProviders::CreateDelegate(
const std::string& name) const {
const auto it = delegates_map_.find(name);
if (it == delegates_map_.end()) {
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
}
const auto& providers = delegate_list_util_.providers();
return providers[it->second]->CreateTfLiteDelegate(params_);
}
tools::ToolParams DelegateProviders::GetAllParams(
const TfliteInferenceParams& params) const {
tools::ToolParams tool_params;
tool_params.Merge(params_, false);
if (params.has_num_threads()) {
tool_params.Set<int32_t>("num_threads", params.num_threads());
}
const auto type = params.delegate();
switch (type) {
case TfliteInferenceParams::NNAPI:
if (tool_params.HasParam("use_nnapi")) {
tool_params.Set<bool>("use_nnapi", true);
}
break;
case TfliteInferenceParams::GPU:
if (tool_params.HasParam("use_gpu")) {
tool_params.Set<bool>("use_gpu", true);
}
break;
case TfliteInferenceParams::HEXAGON:
if (tool_params.HasParam("use_hexagon")) {
tool_params.Set<bool>("use_hexagon", true);
}
break;
case TfliteInferenceParams::XNNPACK:
if (tool_params.HasParam("use_xnnpack")) {
tool_params.Set<bool>("use_xnnpack", true);
}
if (tool_params.HasParam("xnnpack_force_fp16")) {
tool_params.Set<bool>("xnnpack_force_fp16", true);
}
break;
case TfliteInferenceParams::COREML:
if (tool_params.HasParam("use_coreml")) {
tool_params.Set<bool>("use_coreml", true);
}
break;
default:
break;
}
return tool_params;
}
}
} | #include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace evaluation {
namespace {
TEST(EvaluationDelegateProviderTest, ParseStringToDelegateType) {
EXPECT_EQ(TfliteInferenceParams::NNAPI, ParseStringToDelegateType("nnapi"));
EXPECT_EQ(TfliteInferenceParams::GPU, ParseStringToDelegateType("gpu"));
EXPECT_EQ(TfliteInferenceParams::HEXAGON,
ParseStringToDelegateType("hexagon"));
EXPECT_EQ(TfliteInferenceParams::XNNPACK,
ParseStringToDelegateType("xnnpack"));
EXPECT_EQ(TfliteInferenceParams::NONE, ParseStringToDelegateType("Gpu"));
EXPECT_EQ(TfliteInferenceParams::NONE, ParseStringToDelegateType("Testing"));
}
TEST(EvaluationDelegateProviderTest, CreateTfLiteDelegate) {
TfliteInferenceParams params;
params.set_delegate(TfliteInferenceParams::NONE);
EXPECT_TRUE(!CreateTfLiteDelegate(params));
}
TEST(EvaluationDelegateProviderTest, DelegateProvidersParams) {
DelegateProviders providers;
const auto& params = providers.GetAllParams();
EXPECT_TRUE(params.HasParam("use_nnapi"));
EXPECT_TRUE(params.HasParam("use_gpu"));
int argc = 3;
const char* argv[] = {"program_name", "--use_gpu=true",
"--other_undefined_flag=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
EXPECT_TRUE(params.Get<bool>("use_gpu"));
EXPECT_EQ(2, argc);
EXPECT_EQ("--other_undefined_flag=1", argv[1]);
}
TEST(EvaluationDelegateProviderTest, GetAllParamsWithTfliteInferenceParams) {
DelegateProviders providers;
int argc = 2;
const char* argv[] = {"program_name", "--num_threads=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
const auto& default_params = providers.GetAllParams();
EXPECT_EQ(1, default_params.Get<int>("num_threads"));
TfliteInferenceParams params;
params.set_delegate(TfliteInferenceParams::NONE);
params.set_num_threads(4);
tools::ToolParams tool_params = providers.GetAllParams(params);
EXPECT_EQ(4, tool_params.Get<int>("num_threads"));
EXPECT_EQ(1, argc);
}
}
}
} |
855 | cpp | tensorflow/tensorflow | topk_accuracy_eval_stage | tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.cc | tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_TOPK_ACCURACY_EVAL_STAGE_H_
#define TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_TOPK_ACCURACY_EVAL_STAGE_H_
#include <string>
#include <vector>
#include "tensorflow/lite/tools/evaluation/evaluation_stage.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
namespace tflite {
namespace evaluation {
class TopkAccuracyEvalStage : public EvaluationStage {
public:
explicit TopkAccuracyEvalStage(const EvaluationStageConfig& config)
: EvaluationStage(config) {}
TfLiteStatus Init() override;
TfLiteStatus Run() override;
EvaluationStageMetrics LatestMetrics() override;
~TopkAccuracyEvalStage() override {}
void SetTaskInfo(const std::vector<std::string>& all_labels,
TfLiteType model_output_type,
TfLiteIntArray* model_output_shape) {
ground_truth_labels_ = all_labels;
model_output_type_ = model_output_type;
model_output_shape_ = model_output_shape;
}
void SetEvalInputs(void* model_raw_output, std::string* ground_truth_label) {
model_output_ = model_raw_output;
ground_truth_label_ = ground_truth_label;
}
private:
void UpdateCounts(const std::vector<int>& topk_indices);
std::vector<std::string> ground_truth_labels_;
TfLiteType model_output_type_ = kTfLiteNoType;
TfLiteIntArray* model_output_shape_ = nullptr;
int num_total_labels_;
void* model_output_ = nullptr;
std::string* ground_truth_label_ = nullptr;
int num_runs_;
std::vector<int> accuracy_counts_;
};
}
}
#endif
#include "tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.h"
#include <stdint.h>
#include <algorithm>
#include <numeric>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
std::vector<int> GetTopKIndices(const std::vector<float>& values, int k) {
std::vector<int> indices(values.size());
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&values](int a, int b) { return values[a] > values[b]; });
indices.resize(k);
return indices;
}
}
TfLiteStatus TopkAccuracyEvalStage::Init() {
num_runs_ = 0;
auto& params = config_.specification().topk_accuracy_eval_params();
if (!params.has_k()) {
LOG(ERROR) << "Value of k not provided for TopkAccuracyEvalStage";
return kTfLiteError;
}
accuracy_counts_ = std::vector<int>(params.k(), 0);
if (ground_truth_labels_.empty()) {
LOG(ERROR) << "Ground-truth labels are empty";
return kTfLiteError;
}
num_total_labels_ = ground_truth_labels_.size();
if (params.k() > num_total_labels_) {
LOG(ERROR) << "k is too large";
return kTfLiteError;
}
if (!model_output_shape_) {
LOG(ERROR) << "Model output details not correctly set";
return kTfLiteError;
}
if (!(model_output_shape_->size == 2) ||
!(model_output_shape_->data[0] == 1) ||
!(model_output_shape_->data[1] == num_total_labels_)) {
LOG(ERROR) << "Invalid model_output_shape_";
return kTfLiteError;
}
if (model_output_type_ != kTfLiteFloat32 &&
model_output_type_ != kTfLiteUInt8 && model_output_type_ != kTfLiteInt8) {
LOG(ERROR) << "model_output_type_ not supported";
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus TopkAccuracyEvalStage::Run() {
if (!model_output_) {
LOG(ERROR) << "model_output_ not set correctly";
return kTfLiteError;
}
if (!ground_truth_label_) {
LOG(ERROR) << "ground_truth_label_ not provided";
return kTfLiteError;
}
auto& params = config_.specification().topk_accuracy_eval_params();
std::vector<float> probabilities;
probabilities.reserve(num_total_labels_);
if (model_output_type_ == kTfLiteFloat32) {
auto probs = static_cast<float*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
} else if (model_output_type_ == kTfLiteUInt8) {
auto probs = static_cast<uint8_t*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
} else if (model_output_type_ == kTfLiteInt8) {
auto probs = static_cast<int8_t*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
}
std::vector<int> top_k = GetTopKIndices(probabilities, params.k());
UpdateCounts(top_k);
return kTfLiteOk;
}
EvaluationStageMetrics TopkAccuracyEvalStage::LatestMetrics() {
EvaluationStageMetrics metrics;
if (num_runs_ == 0) return metrics;
metrics.set_num_runs(num_runs_);
auto* topk_metrics =
metrics.mutable_process_metrics()->mutable_topk_accuracy_metrics();
for (const auto& count : accuracy_counts_) {
topk_metrics->add_topk_accuracies(static_cast<float>(count) / num_runs_);
}
return metrics;
}
void TopkAccuracyEvalStage::UpdateCounts(const std::vector<int>& topk_indices) {
for (size_t i = 0; i < topk_indices.size(); ++i) {
if (*ground_truth_label_ == ground_truth_labels_[topk_indices[i]]) {
for (size_t j = i; j < topk_indices.size(); j++) {
accuracy_counts_[j] += 1;
}
break;
}
}
num_runs_++;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kTopkAccuracyEvalStageName[] = "topk_accuracy_eval_stage";
constexpr int kNumCategories = 1001;
EvaluationStageConfig GetTopkAccuracyEvalStageConfig() {
EvaluationStageConfig config;
config.set_name(kTopkAccuracyEvalStageName);
auto* params =
config.mutable_specification()->mutable_topk_accuracy_eval_params();
params->set_k(5);
return config;
}
template <typename T>
T* ResetOutputArray(T array[]) {
for (int i = 0; i < kNumCategories; i++) {
array[i] = 0;
}
return array;
}
std::vector<std::string> CreateGroundTruthLabels() {
std::vector<std::string> ground_truth_labels;
ground_truth_labels.reserve(kNumCategories);
for (int i = 0; i < kNumCategories; i++) {
ground_truth_labels.push_back(std::to_string(i));
}
return ground_truth_labels;
}
TEST(TopkAccuracyEvalStage, NoInitializers) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, NoK) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
config.mutable_specification()
->mutable_topk_accuracy_eval_params()
->clear_k();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, NoGroundTruthLabels) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = {};
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, KTooLarge) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
config.mutable_specification()->mutable_topk_accuracy_eval_params()->set_k(
10000);
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, WeirdModelOutputShape) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories + 1;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, UnsupportedModelOutputType) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories + 1;
TfLiteType model_output_type = kTfLiteComplex64;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, NoInputs) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, InvalidGroundTruth) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
float array[kNumCategories];
float* tensor = ResetOutputArray(array);
tensor[0] = 0.8;
stage.SetEvalInputs(tensor, nullptr);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, FloatTest_CorrectLabelsAtLastIndices) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
float array[kNumCategories];
float* tensor = ResetOutputArray(array);
tensor[4] = 0.9;
tensor[3] = 0.8;
tensor[2] = 0.7;
tensor[1] = 0.6;
tensor[0] = 0.5;
std::string ground_truth = "0";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(1, metrics.num_runs());
auto accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(4));
for (int i = 0; i < 4; ++i) {
EXPECT_FLOAT_EQ(0.0, accuracy_metrics.topk_accuracies(i));
}
ground_truth = "1";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(2, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(4));
EXPECT_FLOAT_EQ(0.5, accuracy_metrics.topk_accuracies(3));
for (int i = 0; i < 3; ++i) {
EXPECT_FLOAT_EQ(0.0, accuracy_metrics.topk_accuracies(i));
}
}
class CorrectTopkAccuracyEvalTest : public ::testing::Test {
protected:
template <typename T>
void VerifyCorrectBehaviorForType(T ground_truth_0_value,
T ground_truth_1_value,
TfLiteType model_output_type) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
stage.SetTaskInfo(ground_truth_labels, model_output_type,
model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(0, metrics.num_runs());
auto accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_EQ(0, accuracy_metrics.topk_accuracies_size());
T array[kNumCategories];
T* tensor = ResetOutputArray(array);
tensor[0] = ground_truth_0_value;
std::string ground_truth = "0";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(1, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
for (int i = 0; i < accuracy_metrics.topk_accuracies_size(); ++i) {
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(i));
}
tensor[1] = ground_truth_1_value;
ground_truth = "1";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(2, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
for (int i = 0; i < accuracy_metrics.topk_accuracies_size(); ++i) {
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(i));
}
}
};
TEST_F(CorrectTopkAccuracyEvalTest, FloatTest) {
VerifyCorrectBehaviorForType(static_cast<float>(0.8), static_cast<float>(0.9),
kTfLiteFloat32);
}
TEST_F(CorrectTopkAccuracyEvalTest, Int8Test) {
VerifyCorrectBehaviorForType(static_cast<int8_t>(1), static_cast<int8_t>(2),
kTfLiteInt8);
}
TEST_F(CorrectTopkAccuracyEvalTest, UInt8Test) {
VerifyCorrectBehaviorForType(static_cast<uint8_t>(1), static_cast<uint8_t>(2),
kTfLiteUInt8);
}
}
}
} |
856 | cpp | tensorflow/tensorflow | inference_profiler_stage | tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.cc | tensorflow/lite/tools/evaluation/stages/inference_profiler_stage_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_INFERENCE_PROFILER_STAGE_H_
#define TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_INFERENCE_PROFILER_STAGE_H_
#include <stdint.h>
#include <memory>
#include <string>
#include <vector>
#include "xla/tsl/util/stats_calculator.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/evaluation_stage.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
namespace tflite {
namespace evaluation {
class InferenceProfilerStage : public EvaluationStage {
public:
explicit InferenceProfilerStage(const EvaluationStageConfig& config)
: EvaluationStage(config) {}
TfLiteStatus Init() override { return Init(nullptr); }
TfLiteStatus Init(const DelegateProviders* delegate_providers);
TfLiteStatus Run() override;
EvaluationStageMetrics LatestMetrics() override;
private:
std::unique_ptr<TfliteInferenceStage> reference_stage_;
std::unique_ptr<TfliteInferenceStage> test_stage_;
const TfLiteModelInfo* model_info_;
std::vector<int64_t> input_num_elements_;
std::vector<int64_t> output_num_elements_;
std::vector<tsl::Stat<float>> error_stats_;
std::vector<std::vector<float>> float_tensors_;
std::vector<std::vector<int8_t>> int8_tensors_;
std::vector<std::vector<uint8_t>> uint8_tensors_;
std::vector<std::vector<uint16_t>> float16_tensors_;
std::vector<std::vector<int64_t>> int64_tensors_;
};
}
}
#endif
#include "tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.h"
#include <cmath>
#include <limits>
#include <memory>
#include <random>
#include "fp16.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr float kGaussianFloatMean = 0.5;
constexpr float kGaussianStdDev = 1.0 / 3;
template <typename T>
void GenerateRandomGaussianData(int64_t num_elements, float min, float max,
std::vector<T>* data) {
data->clear();
data->reserve(num_elements);
static std::normal_distribution<double> distribution(kGaussianFloatMean,
kGaussianStdDev);
static std::default_random_engine generator;
for (int i = 0; i < num_elements; ++i) {
auto rand_n = distribution(generator);
while (rand_n < 0 || rand_n >= 1) {
rand_n = distribution(generator);
}
auto rand_float = min + (max - min) * static_cast<float>(rand_n);
data->push_back(static_cast<T>(rand_float));
}
}
template <typename T>
float CalculateAverageError(T* reference, T* test, int64_t num_elements) {
float error = 0;
for (int i = 0; i < num_elements; i++) {
float test_value = static_cast<float>(test[i]);
float reference_value = static_cast<float>(reference[i]);
error += std::abs(test_value - reference_value);
}
error /= num_elements;
return error;
}
}
TfLiteStatus InferenceProfilerStage::Init(
const DelegateProviders* delegate_providers) {
test_stage_ = std::make_unique<TfliteInferenceStage>(config_);
if (test_stage_->Init(delegate_providers) != kTfLiteOk) return kTfLiteError;
LOG(INFO) << "Test interpreter has been initialized.";
EvaluationStageConfig reference_config;
reference_config.set_name("reference_inference");
auto* params = reference_config.mutable_specification()
->mutable_tflite_inference_params();
params->set_model_file_path(
config_.specification().tflite_inference_params().model_file_path());
params->set_invocations_per_run(
config_.specification().tflite_inference_params().invocations_per_run());
reference_stage_ = std::make_unique<TfliteInferenceStage>(reference_config);
if (reference_stage_->Init() != kTfLiteOk) return kTfLiteError;
LOG(INFO) << "Reference interpreter (1 thread on CPU) has been initialized.";
model_info_ = reference_stage_->GetModelInfo();
for (int i = 0; i < model_info_->inputs.size(); ++i) {
const TfLiteType model_input_type = model_info_->inputs[i]->type;
if (model_input_type == kTfLiteUInt8 || model_input_type == kTfLiteInt8 ||
model_input_type == kTfLiteInt64 ||
model_input_type == kTfLiteFloat32 ||
model_input_type == kTfLiteFloat16) {
} else {
LOG(ERROR) << "InferenceProfilerStage only supports "
"float16/float32/int8/uint8/int64 "
"input types";
return kTfLiteError;
}
auto* input_shape = model_info_->inputs[i]->dims;
int64_t total_num_elements = 1;
for (int i = 0; i < input_shape->size; i++) {
total_num_elements *= input_shape->data[i];
}
input_num_elements_.push_back(total_num_elements);
float_tensors_.emplace_back();
uint8_tensors_.emplace_back();
int8_tensors_.emplace_back();
float16_tensors_.emplace_back();
int64_tensors_.emplace_back();
}
for (int i = 0; i < model_info_->outputs.size(); ++i) {
const TfLiteType model_output_type = model_info_->outputs[i]->type;
if (model_output_type == kTfLiteUInt8 || model_output_type == kTfLiteInt8 ||
model_output_type == kTfLiteFloat32) {
} else {
LOG(ERROR) << "InferenceProfilerStage only supports float32/int8/uint8 "
"output types";
return kTfLiteError;
}
auto* output_shape = model_info_->outputs[i]->dims;
int64_t total_num_elements = 1;
for (int i = 0; i < output_shape->size; i++) {
total_num_elements *= output_shape->data[i];
}
output_num_elements_.push_back(total_num_elements);
error_stats_.emplace_back();
}
return kTfLiteOk;
}
TfLiteStatus InferenceProfilerStage::Run() {
std::vector<void*> input_ptrs;
for (int i = 0; i < model_info_->inputs.size(); ++i) {
const TfLiteType model_input_type = model_info_->inputs[i]->type;
if (model_input_type == kTfLiteUInt8) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max(), &uint8_tensors_[i]);
input_ptrs.push_back(uint8_tensors_[i].data());
} else if (model_input_type == kTfLiteInt8) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max(), &int8_tensors_[i]);
input_ptrs.push_back(int8_tensors_[i].data());
} else if (model_input_type == kTfLiteInt64) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max(), &int64_tensors_[i]);
input_ptrs.push_back(int64_tensors_[i].data());
} else if (model_input_type == kTfLiteFloat32) {
GenerateRandomGaussianData(input_num_elements_[i], -1, 1,
&(float_tensors_[i]));
input_ptrs.push_back(float_tensors_[i].data());
} else if (model_input_type == kTfLiteFloat16) {
GenerateRandomGaussianData(input_num_elements_[i], -1, 1,
&(float_tensors_[i]));
for (size_t j = 0; j < float_tensors_[i].size(); j++) {
float16_tensors_[i][j] =
fp16_ieee_from_fp32_value(float_tensors_[i][j]);
}
input_ptrs.push_back(float16_tensors_[i].data());
} else {
LOG(ERROR) << "InferenceProfilerStage only supports "
"float16/float32/int8/uint8/int64 "
"input types";
return kTfLiteError;
}
}
test_stage_->SetInputs(input_ptrs);
reference_stage_->SetInputs(input_ptrs);
if (test_stage_->Run() != kTfLiteOk) return kTfLiteError;
if (reference_stage_->Run() != kTfLiteOk) return kTfLiteError;
for (int i = 0; i < model_info_->outputs.size(); ++i) {
const TfLiteType model_output_type = model_info_->outputs[i]->type;
void* reference_ptr = reference_stage_->GetOutputs()->at(i);
void* test_ptr = test_stage_->GetOutputs()->at(i);
float output_diff = 0;
if (model_output_type == kTfLiteUInt8) {
output_diff = CalculateAverageError(static_cast<uint8_t*>(reference_ptr),
static_cast<uint8_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteInt8) {
output_diff = CalculateAverageError(static_cast<int8_t*>(reference_ptr),
static_cast<int8_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteFloat32) {
output_diff = CalculateAverageError(static_cast<float*>(reference_ptr),
static_cast<float*>(test_ptr),
output_num_elements_[i]);
}
error_stats_[i].UpdateStat(output_diff);
}
return kTfLiteOk;
}
EvaluationStageMetrics InferenceProfilerStage::LatestMetrics() {
EvaluationStageMetrics metrics;
const auto& reference_metrics = reference_stage_->LatestMetrics();
metrics.set_num_runs(reference_metrics.num_runs());
auto* inference_profiler_metrics =
metrics.mutable_process_metrics()->mutable_inference_profiler_metrics();
*inference_profiler_metrics->mutable_reference_latency() =
reference_metrics.process_metrics().total_latency();
*inference_profiler_metrics->mutable_test_latency() =
test_stage_->LatestMetrics().process_metrics().total_latency();
for (int i = 0; i < error_stats_.size(); ++i) {
AccuracyMetrics* diff = inference_profiler_metrics->add_output_errors();
diff->set_avg_value(error_stats_[i].avg());
diff->set_std_deviation(error_stats_[i].std_deviation());
diff->set_min_value(error_stats_[i].min());
if (error_stats_[i].avg() != 0) {
diff->set_max_value(error_stats_[i].max());
} else {
diff->set_max_value(0);
}
}
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kInferenceProfilerStageName[] = "inference_profiler_stage";
constexpr char kModelPath[] =
"tensorflow/lite/testdata/add_quantized.bin";
EvaluationStageConfig GetInferenceProfilerStageConfig(int num_threads = 1) {
EvaluationStageConfig config;
config.set_name(kInferenceProfilerStageName);
auto* params =
config.mutable_specification()->mutable_tflite_inference_params();
params->set_model_file_path(kModelPath);
params->set_invocations_per_run(2);
params->set_num_threads(num_threads);
return config;
}
TEST(InferenceProfilerStage, NoParams) {
EvaluationStageConfig config = GetInferenceProfilerStageConfig();
config.mutable_specification()->clear_tflite_inference_params();
InferenceProfilerStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(InferenceProfilerStage, NoModelPath) {
EvaluationStageConfig config = GetInferenceProfilerStageConfig();
config.mutable_specification()
->mutable_tflite_inference_params()
->clear_model_file_path();
InferenceProfilerStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(InferenceProfilerStage, NoOutputDiffForDefaultConfig) {
EvaluationStageConfig config = GetInferenceProfilerStageConfig();
InferenceProfilerStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(stage.Run(), kTfLiteOk);
}
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_TRUE(metrics.process_metrics().has_inference_profiler_metrics());
auto profiler_metrics =
metrics.process_metrics().inference_profiler_metrics();
EXPECT_TRUE(profiler_metrics.has_reference_latency());
EXPECT_TRUE(profiler_metrics.has_test_latency());
EXPECT_EQ(profiler_metrics.output_errors_size(), 1);
EXPECT_EQ(profiler_metrics.output_errors(0).avg_value(), 0);
}
}
}
} |
857 | cpp | tensorflow/tensorflow | tflite_inference_stage | tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.cc | tensorflow/lite/tools/evaluation/stages/tflite_inference_stage_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_TFLITE_INFERENCE_STAGE_H_
#define TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_TFLITE_INFERENCE_STAGE_H_
#include <stdint.h>
#include <vector>
#include "xla/tsl/util/stats_calculator.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/evaluation_stage.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
namespace tflite {
namespace evaluation {
struct TfLiteModelInfo {
std::vector<const TfLiteTensor*> inputs;
std::vector<const TfLiteTensor*> outputs;
};
class TfliteInferenceStage : public EvaluationStage {
public:
explicit TfliteInferenceStage(const EvaluationStageConfig& config)
: EvaluationStage(config) {}
TfLiteStatus Init() override { return Init(nullptr); }
TfLiteStatus Init(const DelegateProviders* delegate_providers);
TfLiteStatus Run() override;
EvaluationStageMetrics LatestMetrics() override;
~TfliteInferenceStage() override {}
void SetInputs(const std::vector<void*>& raw_input_ptrs) {
inputs_ = &raw_input_ptrs;
}
TfLiteStatus ResizeInputs(const std::vector<std::vector<int>>& shapes);
TfLiteStatus ApplyCustomDelegate(Interpreter::TfLiteDelegatePtr delegate);
const TfLiteModelInfo* GetModelInfo() const { return &model_info_; }
const std::vector<void*>* GetOutputs() const { return &outputs_; }
private:
void UpdateModelInfo();
std::unique_ptr<FlatBufferModel> model_;
std::unique_ptr<ops::builtin::BuiltinOpResolver> resolver_;
std::unique_ptr<Interpreter> interpreter_;
std::vector<Interpreter::TfLiteDelegatePtr> delegates_;
TfLiteModelInfo model_info_;
const std::vector<void*>* inputs_ = nullptr;
std::vector<void*> outputs_;
tsl::Stat<int64_t> latency_stats_;
};
}
}
#endif
#include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
#include <cstring>
#include <fstream>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/profiling/time.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
void ABSL_ATTRIBUTE_WEAK
RegisterSelectedOps(::tflite::MutableOpResolver* resolver) {}
namespace tflite {
namespace evaluation {
namespace {
TfLiteModelInfo GetTfliteModelInfo(const Interpreter& interpreter) {
TfLiteModelInfo model_info;
for (int i : interpreter.inputs()) {
model_info.inputs.push_back(interpreter.tensor(i));
}
for (int i : interpreter.outputs()) {
model_info.outputs.push_back(interpreter.tensor(i));
}
return model_info;
}
}
void TfliteInferenceStage::UpdateModelInfo() {
model_info_ = GetTfliteModelInfo(*interpreter_);
outputs_.clear();
outputs_.reserve(interpreter_->outputs().size());
for (int i : interpreter_->outputs()) {
TfLiteTensor* tensor = interpreter_->tensor(i);
outputs_.push_back(tensor->data.raw);
}
}
TfLiteStatus TfliteInferenceStage::ResizeInputs(
const std::vector<std::vector<int>>& shapes) {
const std::vector<int>& interpreter_inputs = interpreter_->inputs();
if (interpreter_inputs.size() != shapes.size()) {
LOG(ERROR) << "New shape is not compatible";
return kTfLiteError;
}
for (int j = 0; j < shapes.size(); ++j) {
int i = interpreter_inputs[j];
TfLiteTensor* t = interpreter_->tensor(i);
if (t->type != kTfLiteString) {
TF_LITE_ENSURE_STATUS(interpreter_->ResizeInputTensor(i, shapes[j]));
}
}
TF_LITE_ENSURE_STATUS(interpreter_->AllocateTensors());
UpdateModelInfo();
return kTfLiteOk;
}
TfLiteStatus TfliteInferenceStage::ApplyCustomDelegate(
Interpreter::TfLiteDelegatePtr delegate) {
if (!interpreter_) {
LOG(ERROR) << "Stage not initialized before calling ApplyCustomDelegate";
return kTfLiteError;
}
if (!delegate) {
LOG(WARNING)
<< "Tried to apply null TfLiteDelegatePtr to TfliteInferenceStage";
return kTfLiteOk;
}
delegates_.push_back(std::move(delegate));
TF_LITE_ENSURE_STATUS(
interpreter_->ModifyGraphWithDelegate(delegates_.back().get()));
UpdateModelInfo();
return kTfLiteOk;
}
TfLiteStatus TfliteInferenceStage::Init(
const DelegateProviders* delegate_providers) {
if (!config_.specification().has_tflite_inference_params()) {
LOG(ERROR) << "TfliteInferenceParams not provided";
return kTfLiteError;
}
auto& params = config_.specification().tflite_inference_params();
if (!params.has_model_file_path()) {
LOG(ERROR) << "Model path not provided";
return kTfLiteError;
}
std::ifstream model_check(params.model_file_path());
if (!model_check.good()) {
LOG(ERROR) << "Model file not found";
return kTfLiteError;
}
model_ = FlatBufferModel::BuildFromFile(params.model_file_path().c_str());
bool apply_default_delegates = true;
if (delegate_providers != nullptr) {
const auto& provider_params = delegate_providers->GetAllParams();
if (provider_params.HasParam("use_xnnpack") &&
provider_params.HasValueSet<bool>("use_xnnpack") &&
!provider_params.Get<bool>("use_xnnpack")) {
apply_default_delegates = false;
}
}
if (apply_default_delegates) {
resolver_ = std::make_unique<ops::builtin::BuiltinOpResolver>();
} else {
resolver_ = std::make_unique<
ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
}
RegisterSelectedOps(resolver_.get());
InterpreterBuilder(*model_, *resolver_)(&interpreter_);
if (!interpreter_) {
LOG(ERROR) << "Could not build interpreter";
return kTfLiteError;
}
interpreter_->SetNumThreads(params.num_threads());
if (!delegate_providers) {
std::string error_message;
auto delegate = CreateTfLiteDelegate(params, &error_message);
if (delegate) {
delegates_.push_back(std::move(delegate));
LOG(INFO) << "Successfully created "
<< params.Delegate_Name(params.delegate()) << " delegate.";
} else {
LOG(WARNING) << error_message;
}
} else {
auto delegates = delegate_providers->CreateAllDelegates(params);
for (auto& one : delegates) delegates_.push_back(std::move(one.delegate));
}
for (int i = 0; i < delegates_.size(); ++i) {
if (interpreter_->ModifyGraphWithDelegate(delegates_[i].get()) !=
kTfLiteOk) {
LOG(FATAL) << "Failed to apply delegate " << i;
}
}
interpreter_->AllocateTensors();
UpdateModelInfo();
return kTfLiteOk;
}
TfLiteStatus TfliteInferenceStage::Run() {
if (!inputs_) {
LOG(ERROR) << "Input data not set";
return kTfLiteError;
}
for (int i = 0; i < interpreter_->inputs().size(); ++i) {
TfLiteTensor* tensor = interpreter_->tensor(interpreter_->inputs()[i]);
tensor->data.raw = static_cast<char*>(inputs_->at(i));
}
auto& params = config_.specification().tflite_inference_params();
for (int i = 0; i < params.invocations_per_run(); ++i) {
int64_t start_us = profiling::time::NowMicros();
if (interpreter_->Invoke() != kTfLiteOk) {
LOG(ERROR) << "TFLite interpreter failed to invoke at run " << i;
return kTfLiteError;
}
latency_stats_.UpdateStat(profiling::time::NowMicros() - start_us);
}
return kTfLiteOk;
}
EvaluationStageMetrics TfliteInferenceStage::LatestMetrics() {
auto& params = config_.specification().tflite_inference_params();
EvaluationStageMetrics metrics;
auto* latency_metrics =
metrics.mutable_process_metrics()->mutable_total_latency();
latency_metrics->set_last_us(latency_stats_.newest());
latency_metrics->set_max_us(latency_stats_.max());
latency_metrics->set_min_us(latency_stats_.min());
latency_metrics->set_sum_us(latency_stats_.sum());
latency_metrics->set_avg_us(latency_stats_.avg());
latency_metrics->set_std_deviation_us(latency_stats_.std_deviation());
metrics.set_num_runs(
static_cast<int>(latency_stats_.count() / params.invocations_per_run()));
auto* inference_metrics =
metrics.mutable_process_metrics()->mutable_tflite_inference_metrics();
inference_metrics->set_num_inferences(latency_stats_.count());
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kTfliteInferenceStageName[] = "tflite_inference_stage";
constexpr char kModelPath[] =
"tensorflow/lite/testdata/add_quantized.bin";
constexpr int kTotalElements = 1 * 8 * 8 * 3;
template <typename T>
T* SetValues(T array[], T value) {
for (int i = 0; i < kTotalElements; i++) {
array[i] = value;
}
return array;
}
EvaluationStageConfig GetTfliteInferenceStageConfig() {
EvaluationStageConfig config;
config.set_name(kTfliteInferenceStageName);
auto* params =
config.mutable_specification()->mutable_tflite_inference_params();
params->set_model_file_path(kModelPath);
params->set_invocations_per_run(2);
return config;
}
TEST(TfliteInferenceStage, NoParams) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
config.mutable_specification()->clear_tflite_inference_params();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TfliteInferenceStage, NoModelPath) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
config.mutable_specification()
->mutable_tflite_inference_params()
->clear_model_file_path();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TfliteInferenceStage, IncorrectModelPath) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
config.mutable_specification()
->mutable_tflite_inference_params()
->set_model_file_path("xyz.tflite");
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TfliteInferenceStage, NoInputData) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TfliteInferenceStage, CorrectModelInfo) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
const TfLiteModelInfo* model_info = stage.GetModelInfo();
EXPECT_EQ(model_info->inputs.size(), 1);
const TfLiteTensor* tensor = model_info->inputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, kTotalElements);
const TfLiteIntArray* input_shape = tensor->dims;
EXPECT_EQ(input_shape->data[0], 1);
EXPECT_EQ(input_shape->data[1], 8);
EXPECT_EQ(input_shape->data[2], 8);
EXPECT_EQ(input_shape->data[3], 3);
EXPECT_EQ(model_info->outputs.size(), 1);
tensor = model_info->outputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, kTotalElements);
const TfLiteIntArray* output_shape = tensor->dims;
EXPECT_EQ(output_shape->data[0], 1);
EXPECT_EQ(output_shape->data[1], 8);
EXPECT_EQ(output_shape->data[2], 8);
EXPECT_EQ(output_shape->data[3], 3);
}
TEST(TfliteInferenceStage, TestResizeModel) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.ResizeInputs({{3, 8, 8, 3}}), kTfLiteOk);
const TfLiteModelInfo* model_info = stage.GetModelInfo();
EXPECT_EQ(model_info->inputs.size(), 1);
const TfLiteTensor* tensor = model_info->inputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, 3 * kTotalElements);
const TfLiteIntArray* input_shape = tensor->dims;
EXPECT_EQ(input_shape->data[0], 3);
EXPECT_EQ(input_shape->data[1], 8);
EXPECT_EQ(input_shape->data[2], 8);
EXPECT_EQ(input_shape->data[3], 3);
EXPECT_EQ(model_info->outputs.size(), 1);
tensor = model_info->outputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, 3 * kTotalElements);
const TfLiteIntArray* output_shape = tensor->dims;
EXPECT_EQ(output_shape->data[0], 3);
EXPECT_EQ(output_shape->data[1], 8);
EXPECT_EQ(output_shape->data[2], 8);
EXPECT_EQ(output_shape->data[3], 3);
}
TEST(TfliteInferenceStage, CorrectOutput) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
uint8_t input_tensor[kTotalElements];
SetValues(input_tensor, static_cast<uint8_t>(2));
std::vector<void*> inputs;
inputs.push_back(input_tensor);
stage.SetInputs(inputs);
EXPECT_EQ(stage.Run(), kTfLiteOk);
uint8_t* output_tensor = static_cast<uint8_t*>(stage.GetOutputs()->at(0));
for (int i = 0; i < kTotalElements; i++) {
EXPECT_EQ(output_tensor[i], static_cast<uint8_t>(6));
}
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(metrics.num_runs(), 1);
const auto& latency = metrics.process_metrics().total_latency();
const auto max_latency = latency.max_us();
EXPECT_GT(max_latency, 0);
EXPECT_LT(max_latency, 1e7);
EXPECT_LE(latency.last_us(), max_latency);
EXPECT_LE(latency.min_us(), max_latency);
EXPECT_GE(latency.sum_us(), max_latency);
EXPECT_LE(latency.avg_us(), max_latency);
EXPECT_TRUE(latency.has_std_deviation_us());
EXPECT_EQ(
metrics.process_metrics().tflite_inference_metrics().num_inferences(), 2);
}
TEST(TfliteInferenceStage, CustomDelegate) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
Interpreter::TfLiteDelegatePtr test_delegate = CreateNNAPIDelegate();
EXPECT_NE(stage.ApplyCustomDelegate(std::move(test_delegate)), kTfLiteOk);
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.ApplyCustomDelegate(std::move(test_delegate)), kTfLiteOk);
}
}
}
} |
858 | cpp | tensorflow/tensorflow | object_detection_average_precision_stage | tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.cc | tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_OBJECT_DETECTION_AVERAGE_PRECISION_STAGE_H_
#define TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_OBJECT_DETECTION_AVERAGE_PRECISION_STAGE_H_
#include <vector>
#include "tensorflow/lite/tools/evaluation/evaluation_stage.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/stages/utils/image_metrics.h"
namespace tflite {
namespace evaluation {
class ObjectDetectionAveragePrecisionStage : public EvaluationStage {
public:
explicit ObjectDetectionAveragePrecisionStage(
const EvaluationStageConfig& config)
: EvaluationStage(config) {}
TfLiteStatus Init() override;
TfLiteStatus Run() override;
EvaluationStageMetrics LatestMetrics() override;
void SetEvalInputs(const ObjectDetectionResult& predicted_objects,
const ObjectDetectionResult& ground_truth_objects) {
predicted_objects_ = predicted_objects;
ground_truth_objects_ = ground_truth_objects;
}
private:
int num_classes_ = -1;
ObjectDetectionResult predicted_objects_;
ObjectDetectionResult ground_truth_objects_;
int current_image_index_ = 0;
std::vector<std::vector<image::Detection>> ground_truth_object_vectors_;
std::vector<std::vector<image::Detection>> predicted_object_vectors_;
};
}
}
#endif
#include "tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.h"
#include <stdint.h>
#include <numeric>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
image::Detection ConvertProtoToDetection(
const ObjectDetectionResult::ObjectInstance& input, int image_id) {
image::Detection detection;
detection.box.x.min = input.bounding_box().normalized_left();
detection.box.x.max = input.bounding_box().normalized_right();
detection.box.y.min = input.bounding_box().normalized_top();
detection.box.y.max = input.bounding_box().normalized_bottom();
detection.imgid = image_id;
detection.score = input.score();
return detection;
}
}
TfLiteStatus ObjectDetectionAveragePrecisionStage::Init() {
num_classes_ = config_.specification()
.object_detection_average_precision_params()
.num_classes();
if (num_classes_ <= 0) {
LOG(ERROR) << "num_classes cannot be <= 0";
return kTfLiteError;
}
for (int i = 0; i < num_classes_; ++i) {
ground_truth_object_vectors_.emplace_back();
predicted_object_vectors_.emplace_back();
}
return kTfLiteOk;
}
TfLiteStatus ObjectDetectionAveragePrecisionStage::Run() {
for (int i = 0; i < ground_truth_objects_.objects_size(); ++i) {
const int class_id = ground_truth_objects_.objects(i).class_id();
if (class_id >= num_classes_) {
LOG(ERROR) << "Encountered invalid class ID: " << class_id;
return kTfLiteError;
}
ground_truth_object_vectors_[class_id].push_back(ConvertProtoToDetection(
ground_truth_objects_.objects(i), current_image_index_));
}
for (int i = 0; i < predicted_objects_.objects_size(); ++i) {
const int class_id = predicted_objects_.objects(i).class_id();
if (class_id >= num_classes_) {
LOG(ERROR) << "Encountered invalid class ID: " << class_id;
return kTfLiteError;
}
predicted_object_vectors_[class_id].push_back(ConvertProtoToDetection(
predicted_objects_.objects(i), current_image_index_));
}
current_image_index_++;
return kTfLiteOk;
}
EvaluationStageMetrics ObjectDetectionAveragePrecisionStage::LatestMetrics() {
EvaluationStageMetrics metrics;
if (current_image_index_ == 0) return metrics;
metrics.set_num_runs(current_image_index_);
auto* ap_metrics = metrics.mutable_process_metrics()
->mutable_object_detection_average_precision_metrics();
auto& ap_params =
config_.specification().object_detection_average_precision_params();
std::vector<float> iou_thresholds;
if (ap_params.iou_thresholds_size() == 0) {
float threshold = 0.5;
for (int i = 0; i < 10; ++i) {
iou_thresholds.push_back(threshold + i * 0.05);
}
} else {
for (auto& threshold : ap_params.iou_thresholds()) {
iou_thresholds.push_back(threshold);
}
}
image::AveragePrecision::Options opts;
opts.num_recall_points = ap_params.num_recall_points();
float ap_sum = 0;
int num_total_aps = 0;
for (float threshold : iou_thresholds) {
float threshold_ap_sum = 0;
int num_counted_classes = 0;
for (int i = 0; i < num_classes_; ++i) {
if (ground_truth_object_vectors_[i].empty() &&
predicted_object_vectors_[i].empty())
continue;
float ap_value = 0.0;
if (!ground_truth_object_vectors_[i].empty()) {
opts.iou_threshold = threshold;
ap_value = image::AveragePrecision(opts).FromBoxes(
ground_truth_object_vectors_[i], predicted_object_vectors_[i]);
}
ap_sum += ap_value;
num_total_aps += 1;
threshold_ap_sum += ap_value;
num_counted_classes += 1;
}
if (num_counted_classes == 0) continue;
auto* threshold_ap = ap_metrics->add_individual_average_precisions();
threshold_ap->set_average_precision(threshold_ap_sum / num_counted_classes);
threshold_ap->set_iou_threshold(threshold);
}
if (num_total_aps == 0) return metrics;
ap_metrics->set_overall_mean_average_precision(ap_sum / num_total_aps);
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kAveragePrecisionStageName[] =
"object_detection_average_precision";
EvaluationStageConfig GetAveragePrecisionStageConfig(int num_classes) {
EvaluationStageConfig config;
config.set_name(kAveragePrecisionStageName);
auto* params = config.mutable_specification()
->mutable_object_detection_average_precision_params();
params->add_iou_thresholds(0.5);
params->add_iou_thresholds(0.999);
params->set_num_classes(num_classes);
return config;
}
ObjectDetectionResult GetGroundTruthDetectionResult() {
ObjectDetectionResult ground_truth;
ground_truth.set_image_name("some_image.jpg");
auto* object_1 = ground_truth.add_objects();
object_1->set_class_id(1);
auto* object_1_bbox = object_1->mutable_bounding_box();
object_1_bbox->set_normalized_top(0.5);
object_1_bbox->set_normalized_bottom(1.0);
object_1_bbox->set_normalized_left(0.5);
object_1_bbox->set_normalized_right(1.0);
auto* object_2 = ground_truth.add_objects();
object_2->set_class_id(1);
auto* object_2_bbox = object_2->mutable_bounding_box();
object_2_bbox->set_normalized_top(0);
object_2_bbox->set_normalized_bottom(1.0);
object_2_bbox->set_normalized_left(0);
object_2_bbox->set_normalized_right(1.0);
auto* object_3 = ground_truth.add_objects();
object_3->set_class_id(2);
auto* object_3_bbox = object_3->mutable_bounding_box();
object_3_bbox->set_normalized_top(0.5);
object_3_bbox->set_normalized_bottom(1.0);
object_3_bbox->set_normalized_left(0.5);
object_3_bbox->set_normalized_right(1.0);
return ground_truth;
}
ObjectDetectionResult GetPredictedDetectionResult() {
ObjectDetectionResult predicted;
auto* object_1 = predicted.add_objects();
object_1->set_class_id(1);
object_1->set_score(0.8);
auto* object_1_bbox = object_1->mutable_bounding_box();
object_1_bbox->set_normalized_top(0.091);
object_1_bbox->set_normalized_bottom(1.0);
object_1_bbox->set_normalized_left(0.091);
object_1_bbox->set_normalized_right(1.0);
auto* object_2 = predicted.add_objects();
object_2->set_class_id(1);
object_2->set_score(0.9);
auto* object_2_bbox = object_2->mutable_bounding_box();
object_2_bbox->set_normalized_top(0.474);
object_2_bbox->set_normalized_bottom(1.0);
object_2_bbox->set_normalized_left(0.474);
object_2_bbox->set_normalized_right(1.0);
auto* object_3 = predicted.add_objects();
object_3->set_class_id(1);
object_3->set_score(0.95);
auto* object_3_bbox = object_3->mutable_bounding_box();
object_3_bbox->set_normalized_top(0.474);
object_3_bbox->set_normalized_bottom(1.0);
object_3_bbox->set_normalized_left(0.474);
object_3_bbox->set_normalized_right(1.0);
return predicted;
}
TEST(ObjectDetectionAveragePrecisionStage, ZeroClasses) {
EvaluationStageConfig config = GetAveragePrecisionStageConfig(0);
ObjectDetectionAveragePrecisionStage stage =
ObjectDetectionAveragePrecisionStage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(ObjectDetectionAveragePrecisionStage, SampleInputs) {
EvaluationStageConfig config = GetAveragePrecisionStageConfig(3);
ObjectDetectionAveragePrecisionStage stage =
ObjectDetectionAveragePrecisionStage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
const ObjectDetectionResult ground_truth = GetGroundTruthDetectionResult();
const ObjectDetectionResult predicted = GetPredictedDetectionResult();
stage.SetEvalInputs(ObjectDetectionResult(), ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
ObjectDetectionAveragePrecisionMetrics detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(), 0.0);
EXPECT_EQ(detection_metrics.individual_average_precisions_size(), 2);
stage.SetEvalInputs(ground_truth, ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(),
0.50495052);
EXPECT_EQ(metrics.num_runs(), 2);
stage.SetEvalInputs(predicted, ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(0).iou_threshold(), 0.5);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(0).average_precision(),
0.4841584);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(1).iou_threshold(),
0.999);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(1).average_precision(),
0.33663365);
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(),
0.41039604);
}
TEST(ObjectDetectionAveragePrecisionStage, DefaultIoUThresholds) {
EvaluationStageConfig config = GetAveragePrecisionStageConfig(3);
auto* params = config.mutable_specification()
->mutable_object_detection_average_precision_params();
params->clear_iou_thresholds();
ObjectDetectionAveragePrecisionStage stage =
ObjectDetectionAveragePrecisionStage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
const ObjectDetectionResult ground_truth = GetGroundTruthDetectionResult();
const ObjectDetectionResult predicted = GetPredictedDetectionResult();
stage.SetEvalInputs(ground_truth, ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
ObjectDetectionAveragePrecisionMetrics detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(), 1.0);
EXPECT_EQ(detection_metrics.individual_average_precisions_size(), 10);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(0).iou_threshold(), 0.5);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(9).iou_threshold(), 0.95);
}
}
}
} |
859 | cpp | tensorflow/tensorflow | image_preprocessing_stage | tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage.cc | tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_IMAGE_PREPROCESSING_STAGE_H_
#define TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_IMAGE_PREPROCESSING_STAGE_H_
#include <stdint.h>
#include <string>
#include <utility>
#include <vector>
#include "xla/tsl/util/stats_calculator.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/evaluation_stage.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/preprocessing_steps.pb.h"
namespace tflite {
namespace evaluation {
class ImagePreprocessingStage : public EvaluationStage {
public:
explicit ImagePreprocessingStage(const EvaluationStageConfig& config)
: EvaluationStage(config) {}
TfLiteStatus Init() override;
TfLiteStatus Run() override;
EvaluationStageMetrics LatestMetrics() override;
~ImagePreprocessingStage() override {}
void SetImagePath(std::string* image_path) { image_path_ = image_path; }
void* GetPreprocessedImageData();
private:
std::string* image_path_ = nullptr;
TfLiteType output_type_;
tsl::Stat<int64_t> latency_stats_;
std::vector<float> float_preprocessed_image_;
std::vector<int8_t> int8_preprocessed_image_;
std::vector<uint8_t> uint8_preprocessed_image_;
};
class ImagePreprocessingConfigBuilder {
public:
ImagePreprocessingConfigBuilder(const std::string& name,
TfLiteType output_type) {
config_.set_name(name);
config_.mutable_specification()
->mutable_image_preprocessing_params()
->set_output_type(static_cast<int>(output_type));
}
void AddCroppingStep(float cropping_fraction,
bool use_square_cropping = false) {
ImagePreprocessingStepParams params;
params.mutable_cropping_params()->set_cropping_fraction(cropping_fraction);
params.mutable_cropping_params()->set_square_cropping(use_square_cropping);
config_.mutable_specification()
->mutable_image_preprocessing_params()
->mutable_steps()
->Add(std::move(params));
}
void AddCroppingStep(uint32_t width, uint32_t height,
bool use_square_cropping = false) {
ImagePreprocessingStepParams params;
params.mutable_cropping_params()->mutable_target_size()->set_height(height);
params.mutable_cropping_params()->mutable_target_size()->set_width(width);
params.mutable_cropping_params()->set_square_cropping(use_square_cropping);
config_.mutable_specification()
->mutable_image_preprocessing_params()
->mutable_steps()
->Add(std::move(params));
}
void AddResizingStep(uint32_t width, uint32_t height,
bool aspect_preserving) {
ImagePreprocessingStepParams params;
params.mutable_resizing_params()->set_aspect_preserving(aspect_preserving);
params.mutable_resizing_params()->mutable_target_size()->set_height(height);
params.mutable_resizing_params()->mutable_target_size()->set_width(width);
config_.mutable_specification()
->mutable_image_preprocessing_params()
->mutable_steps()
->Add(std::move(params));
}
void AddPaddingStep(uint32_t width, uint32_t height, int value) {
ImagePreprocessingStepParams params;
params.mutable_padding_params()->mutable_target_size()->set_height(height);
params.mutable_padding_params()->mutable_target_size()->set_width(width);
params.mutable_padding_params()->set_padding_value(value);
config_.mutable_specification()
->mutable_image_preprocessing_params()
->mutable_steps()
->Add(std::move(params));
}
void AddSquarePaddingStep(int value) {
ImagePreprocessingStepParams params;
params.mutable_padding_params()->set_square_padding(true);
params.mutable_padding_params()->set_padding_value(value);
config_.mutable_specification()
->mutable_image_preprocessing_params()
->mutable_steps()
->Add(std::move(params));
}
void AddPerChannelNormalizationStep(float r_mean, float g_mean, float b_mean,
float scale) {
ImagePreprocessingStepParams params;
params.mutable_normalization_params()->mutable_means()->set_r_mean(r_mean);
params.mutable_normalization_params()->mutable_means()->set_g_mean(g_mean);
params.mutable_normalization_params()->mutable_means()->set_b_mean(b_mean);
params.mutable_normalization_params()->set_scale(scale);
config_.mutable_specification()
->mutable_image_preprocessing_params()
->mutable_steps()
->Add(std::move(params));
}
void AddNormalizationStep(float mean, float scale) {
ImagePreprocessingStepParams params;
params.mutable_normalization_params()->set_channelwise_mean(mean);
params.mutable_normalization_params()->set_scale(scale);
config_.mutable_specification()
->mutable_image_preprocessing_params()
->mutable_steps()
->Add(std::move(params));
}
void AddDefaultNormalizationStep() {
switch (
config_.specification().image_preprocessing_params().output_type()) {
case kTfLiteFloat32:
AddNormalizationStep(127.5, 1.0 / 127.5);
break;
case kTfLiteUInt8:
break;
case kTfLiteInt8:
AddNormalizationStep(128.0, 1.0);
break;
default:
LOG(ERROR) << "Type not supported";
break;
}
}
EvaluationStageConfig build() { return std::move(config_); }
private:
EvaluationStageConfig config_;
};
}
}
#endif
#include "tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <fstream>
#include <ios>
#include <iterator>
#include <memory>
#include <streambuf>
#include <string>
#include <vector>
#include "absl/base/casts.h"
#include "absl/strings/ascii.h"
#include "jpeglib.h"
#include "tensorflow/core/lib/jpeg/jpeg_mem.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/kernels/internal/reference/pad.h"
#include "tensorflow/lite/kernels/internal/reference/resize_bilinear.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/profiling/time.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/preprocessing_steps.pb.h"
namespace tflite {
namespace evaluation {
namespace {
const int kNumChannels = 3;
inline int ImageArrayOffset(int height, int width, int h, int w, int c) {
return (h * width + w) * kNumChannels + c;
}
struct ImageData {
uint32_t width;
uint32_t height;
std::unique_ptr<std::vector<float>> data;
float GetData(int h, int w, int c) {
return data->at(ImageArrayOffset(height, width, h, w, c));
}
};
inline void LoadImageRaw(std::string* filename, ImageData* image_data) {
std::ifstream stream(filename->c_str(), std::ios::in | std::ios::binary);
std::vector<uint8_t> raw_data((std::istreambuf_iterator<char>(stream)),
std::istreambuf_iterator<char>());
std::vector<float>* orig_image = new std::vector<float>();
orig_image->reserve(raw_data.size());
for (int i = 0; i < raw_data.size(); ++i) {
orig_image->push_back(static_cast<float>(raw_data[i]));
}
image_data->data.reset(orig_image);
}
inline void LoadImageJpeg(std::string* filename, ImageData* image_data) {
std::ifstream t(*filename);
std::string image_str((std::istreambuf_iterator<char>(t)),
std::istreambuf_iterator<char>());
const int fsize = image_str.size();
auto temp = absl::bit_cast<const uint8_t*>(image_str.data());
std::unique_ptr<uint8_t[]> original_image;
int original_width, original_height, original_channels;
tensorflow::jpeg::UncompressFlags flags;
flags.dct_method = JDCT_ISLOW;
flags.components = kNumChannels;
original_image.reset(Uncompress(temp, fsize, flags, &original_width,
&original_height, &original_channels,
nullptr));
image_data->width = original_width;
image_data->height = original_height;
int original_size = original_height * original_width * original_channels;
std::vector<float>* float_image = new std::vector<float>();
float_image->reserve(original_size);
for (int i = 0; i < original_size; ++i) {
float_image->push_back(static_cast<float>(original_image[i]));
}
image_data->data.reset(float_image);
}
inline void Crop(ImageData* image_data, const CroppingParams& crop_params) {
int crop_height, crop_width;
int input_width = image_data->width;
int input_height = image_data->height;
if (crop_params.has_cropping_fraction()) {
crop_height =
static_cast<int>(round(crop_params.cropping_fraction() * input_height));
crop_width =
static_cast<int>(round(crop_params.cropping_fraction() * input_width));
} else if (crop_params.has_target_size()) {
crop_height = crop_params.target_size().height();
crop_width = crop_params.target_size().width();
}
if (crop_params.has_cropping_fraction() && crop_params.square_cropping()) {
crop_height = std::min(crop_height, crop_width);
crop_width = crop_height;
}
int start_w = static_cast<int>(round((input_width - crop_width) / 2.0));
int start_h = static_cast<int>(round((input_height - crop_height) / 2.0));
std::vector<float>* cropped_image = new std::vector<float>();
cropped_image->reserve(crop_height * crop_width * kNumChannels);
for (int in_h = start_h; in_h < start_h + crop_height; ++in_h) {
for (int in_w = start_w; in_w < start_w + crop_width; ++in_w) {
for (int c = 0; c < kNumChannels; ++c) {
cropped_image->push_back(image_data->GetData(in_h, in_w, c));
}
}
}
image_data->height = crop_height;
image_data->width = crop_width;
image_data->data.reset(cropped_image);
}
inline void ResizeBilinear(ImageData* image_data,
const ResizingParams& params) {
tflite::ResizeBilinearParams resize_params;
resize_params.align_corners = false;
resize_params.half_pixel_centers = false;
tflite::RuntimeShape input_shape({1, static_cast<int>(image_data->height),
static_cast<int>(image_data->width),
kNumChannels});
int output_height, output_width;
if (params.aspect_preserving()) {
float ratio_w =
params.target_size().width() / static_cast<float>(image_data->width);
float ratio_h =
params.target_size().height() / static_cast<float>(image_data->height);
if (ratio_w >= ratio_h) {
output_width = params.target_size().width();
output_height = static_cast<int>(round(image_data->height * ratio_w));
} else {
output_width = static_cast<int>(round(image_data->width * ratio_h));
output_height = params.target_size().height();
}
} else {
output_height = params.target_size().height();
output_width = params.target_size().width();
}
tflite::RuntimeShape output_size_dims({1, 1, 1, 2});
std::vector<int32_t> output_size_data = {output_height, output_width};
tflite::RuntimeShape output_shape(
{1, output_height, output_width, kNumChannels});
int output_size = output_width * output_height * kNumChannels;
std::vector<float>* output_data = new std::vector<float>(output_size, 0);
tflite::reference_ops::ResizeBilinear(
resize_params, input_shape, image_data->data->data(), output_size_dims,
output_size_data.data(), output_shape, output_data->data());
image_data->height = output_height;
image_data->width = output_width;
image_data->data.reset(output_data);
}
inline void Pad(ImageData* image_data, const PaddingParams& params) {
int output_width = params.target_size().width();
int output_height = params.target_size().height();
int pad_value = params.padding_value();
tflite::PadParams pad_params;
pad_params.left_padding_count = 4;
std::uninitialized_fill_n(pad_params.left_padding, 4, 0);
pad_params.left_padding[1] =
static_cast<int>(round((output_height - image_data->height) / 2.0));
pad_params.left_padding[2] =
static_cast<int>(round((output_width - image_data->width) / 2.0));
pad_params.right_padding_count = 4;
std::uninitialized_fill_n(pad_params.right_padding, 4, 0);
pad_params.right_padding[1] =
output_height - pad_params.left_padding[1] - image_data->height;
pad_params.right_padding[2] =
output_width - pad_params.left_padding[2] - image_data->width;
tflite::RuntimeShape input_shape({1, static_cast<int>(image_data->height),
static_cast<int>(image_data->width),
kNumChannels});
tflite::RuntimeShape output_shape(
{1, output_height, output_width, kNumChannels});
int output_size = output_width * output_height * kNumChannels;
std::vector<float>* output_data = new std::vector<float>(output_size, 0);
tflite::reference_ops::Pad(pad_params, input_shape, image_data->data->data(),
&pad_value, output_shape, output_data->data());
image_data->height = output_height;
image_data->width = output_width;
image_data->data.reset(output_data);
}
inline void Normalize(ImageData* image_data,
const NormalizationParams& params) {
float scale = params.scale();
float* data_end = image_data->data->data() + image_data->data->size();
if (params.has_channelwise_mean()) {
float mean = params.channelwise_mean();
for (float* data = image_data->data->data(); data < data_end; ++data) {
*data = (*data - mean) * scale;
}
} else {
float r_mean = params.means().r_mean();
float g_mean = params.means().g_mean();
float b_mean = params.means().b_mean();
for (float* data = image_data->data->data(); data < data_end;) {
*data = (*data - r_mean) * scale;
++data;
*data = (*data - g_mean) * scale;
++data;
*data = (*data - b_mean) * scale;
++data;
}
}
}
}
TfLiteStatus ImagePreprocessingStage::Init() {
if (!config_.has_specification() ||
!config_.specification().has_image_preprocessing_params()) {
LOG(ERROR) << "No preprocessing params";
return kTfLiteError;
}
const ImagePreprocessingParams& params =
config_.specification().image_preprocessing_params();
for (const ImagePreprocessingStepParams& param : params.steps()) {
if (param.has_cropping_params()) {
const CroppingParams& crop_params = param.cropping_params();
if (crop_params.has_cropping_fraction() &&
(crop_params.cropping_fraction() <= 0 ||
crop_params.cropping_fraction() > 1.0)) {
LOG(ERROR) << "Invalid cropping fraction";
return kTfLiteError;
}
}
}
output_type_ = static_cast<TfLiteType>(params.output_type());
return kTfLiteOk;
}
TfLiteStatus ImagePreprocessingStage::Run() {
if (!image_path_) {
LOG(ERROR) << "Image path not set";
return kTfLiteError;
}
ImageData image_data;
const ImagePreprocessingParams& params =
config_.specification().image_preprocessing_params();
int64_t start_us = profiling::time::NowMicros();
string image_ext = image_path_->substr(image_path_->find_last_of("."));
absl::AsciiStrToLower(&image_ext);
bool is_raw_image = (image_ext == ".rgb8");
if (image_ext == ".rgb8") {
LoadImageRaw(image_path_, &image_data);
} else if (image_ext == ".jpg" || image_ext == ".jpeg") {
LoadImageJpeg(image_path_, &image_data);
} else {
LOG(ERROR) << "Extension " << image_ext << " is not supported";
return kTfLiteError;
}
for (const ImagePreprocessingStepParams& param : params.steps()) {
if (param.has_cropping_params()) {
if (is_raw_image) {
LOG(WARNING) << "Image cropping will not be performed on raw images";
continue;
}
Crop(&image_data, param.cropping_params());
} else if (param.has_resizing_params()) {
if (is_raw_image) {
LOG(WARNING) << "Image resizing will not be performed on raw images";
continue;
}
ResizeBilinear(&image_data, param.resizing_params());
} else if (param.has_padding_params()) {
if (is_raw_image) {
LOG(WARNING) << "Image padding will not be performed on raw images";
continue;
}
Pad(&image_data, param.padding_params());
} else if (param.has_normalization_params()) {
Normalize(&image_data, param.normalization_params());
}
}
if (output_type_ == kTfLiteUInt8) {
uint8_preprocessed_image_.clear();
uint8_preprocessed_image_.resize(image_data.data->size() +
16);
for (int i = 0; i < image_data.data->size(); ++i) {
uint8_preprocessed_image_[i] =
static_cast<uint8_t>(image_data.data->at(i));
}
} else if (output_type_ == kTfLiteInt8) {
int8_preprocessed_image_.clear();
int8_preprocessed_image_.resize(image_data.data->size() +
16);
for (int i = 0; i < image_data.data->size(); ++i) {
int8_preprocessed_image_[i] = static_cast<int8_t>(image_data.data->at(i));
}
} else if (output_type_ == kTfLiteFloat32) {
float_preprocessed_image_ = *image_data.data;
}
latency_stats_.UpdateStat(profiling::time::NowMicros() - start_us);
return kTfLiteOk;
}
void* ImagePreprocessingStage::GetPreprocessedImageData() {
if (latency_stats_.count() == 0) return nullptr;
if (output_type_ == kTfLiteUInt8) {
return uint8_preprocessed_image_.data();
} else if (output_type_ == kTfLiteInt8) {
return int8_preprocessed_image_.data();
} else if (output_type_ == kTfLiteFloat32) {
return float_preprocessed_image_.data();
}
return nullptr;
}
EvaluationStageMetrics ImagePreprocessingStage::LatestMetrics() {
EvaluationStageMetrics metrics;
auto* latency_metrics =
metrics.mutable_process_metrics()->mutable_total_latency();
latency_metrics->set_last_us(latency_stats_.newest());
latency_metrics->set_max_us(latency_stats_.max());
latency_metrics->set_min_us(latency_stats_.min());
latency_metrics->set_sum_us(latency_stats_.sum());
latency_metrics->set_avg_us(latency_stats_.avg());
metrics.set_num_runs(static_cast<int>(latency_stats_.count()));
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kImagePreprocessingStageName[] = "inception_preprocessing_stage";
constexpr char kTestImage[] =
"tensorflow/lite/tools/evaluation/stages/testdata/"
"grace_hopper.jpg";
constexpr int kImageDim = 224;
TEST(ImagePreprocessingStage, NoParams) {
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
EvaluationStageConfig config = builder.build();
config.mutable_specification()->clear_image_preprocessing_params();
ImagePreprocessingStage stage = ImagePreprocessingStage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(ImagePreprocessingStage, InvalidCroppingFraction) {
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
builder.AddCroppingStep(-0.8);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(ImagePreprocessingStage, ImagePathNotSet) {
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.Run(), kTfLiteError);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingFloat) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
builder.AddNormalizationStep(127.5, 1.0 / 127.5);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
float* preprocessed_image_ptr =
static_cast<float*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[0], -0.74901962);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[1], -0.74901962);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[2], -0.68627453);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingFloat_NoCrop) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
builder.AddResizingStep(224, 224, false);
builder.AddNormalizationStep(127.5, 1.0 / 127.5);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
float* preprocessed_image_ptr =
static_cast<float*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[0], -0.83529419);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[1], -0.7960785);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[2], -0.35686275);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingUInt8Quantized) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteUInt8);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
uint8_t* preprocessed_image_ptr =
static_cast<uint8_t*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_EQ(preprocessed_image_ptr[0], 32);
EXPECT_EQ(preprocessed_image_ptr[1], 32);
EXPECT_EQ(preprocessed_image_ptr[2], 40);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingInt8Quantized) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteInt8);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
builder.AddNormalizationStep(128.0, 1.0);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
int8_t* preprocessed_image_ptr =
static_cast<int8_t*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_EQ(preprocessed_image_ptr[0], -96);
EXPECT_EQ(preprocessed_image_ptr[1], -96);
EXPECT_EQ(preprocessed_image_ptr[2], -88);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingPadding) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteInt8);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
builder.AddPaddingStep(225, 225, 0);
builder.AddNormalizationStep(128.0, 1.0);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
int8_t* preprocessed_image_ptr =
static_cast<int8_t*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_EQ(preprocessed_image_ptr[0], -128);
EXPECT_EQ(preprocessed_image_ptr[224], -128);
EXPECT_EQ(preprocessed_image_ptr[225 * 3], -128);
EXPECT_EQ(preprocessed_image_ptr[225 * 3 + 3], -96);
EXPECT_EQ(preprocessed_image_ptr[225 * 3 + 4], -96);
EXPECT_EQ(preprocessed_image_ptr[225 * 3 + 5], -88);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingSubtractMean) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
builder.AddPerChannelNormalizationStep(110.0, 120.0, 123.0, 1.0);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
float* preprocessed_image_ptr =
static_cast<float*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_EQ(preprocessed_image_ptr[0], -78);
EXPECT_EQ(preprocessed_image_ptr[1], -88);
EXPECT_EQ(preprocessed_image_ptr[2], -83);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
}
}
} |
860 | cpp | tensorflow/tensorflow | image_metrics | tensorflow/lite/tools/evaluation/stages/utils/image_metrics.cc | tensorflow/lite/tools/evaluation/stages/utils/image_metrics_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_UTILS_IMAGE_METRICS_H_
#define TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_UTILS_IMAGE_METRICS_H_
#include <stdint.h>
#include <vector>
namespace tflite {
namespace evaluation {
namespace image {
struct Box2D {
struct Interval {
float min = 0;
float max = 0;
Interval(float x, float y) {
min = x;
max = y;
}
Interval() {}
};
Interval x;
Interval y;
static float Length(const Interval& a);
static float Intersection(const Interval& a, const Interval& b);
float Area() const;
float Intersection(const Box2D& other) const;
float Union(const Box2D& other) const;
float IoU(const Box2D& other) const;
float Overlap(const Box2D& other) const;
};
enum IgnoreType {
kDontIgnore = 0,
kIgnoreOneMatch = 1,
kIgnoreAllMatches = 2,
};
struct Detection {
public:
bool difficult = false;
int64_t imgid = 0;
float score = 0;
Box2D box;
IgnoreType ignore = IgnoreType::kDontIgnore;
Detection() {}
Detection(bool d, int64_t id, float s, Box2D b)
: difficult(d), imgid(id), score(s), box(b) {}
Detection(bool d, int64_t id, float s, Box2D b, IgnoreType i)
: difficult(d), imgid(id), score(s), box(b), ignore(i) {}
};
struct PR {
float p = 0;
float r = 0;
PR(const float p_, const float r_) : p(p_), r(r_) {}
};
class AveragePrecision {
public:
struct Options {
float iou_threshold = 0.5;
int num_recall_points = 100;
};
AveragePrecision() : AveragePrecision(Options()) {}
explicit AveragePrecision(const Options& opts) : opts_(opts) {}
float FromPRCurve(const std::vector<PR>& pr,
std::vector<PR>* pr_out = nullptr);
float FromBoxes(const std::vector<Detection>& groundtruth,
const std::vector<Detection>& prediction,
std::vector<PR>* pr_out = nullptr);
private:
Options opts_;
};
}
}
}
#endif
#include "tensorflow/lite/tools/evaluation/stages/utils/image_metrics.h"
#include <algorithm>
#include <cmath>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/logging.h"
namespace tflite {
namespace evaluation {
namespace image {
float Box2D::Length(const Box2D::Interval& a) {
return std::max(0.f, a.max - a.min);
}
float Box2D::Intersection(const Box2D::Interval& a, const Box2D::Interval& b) {
return Length(Interval{std::max(a.min, b.min), std::min(a.max, b.max)});
}
float Box2D::Area() const { return Length(x) * Length(y); }
float Box2D::Intersection(const Box2D& other) const {
return Intersection(x, other.x) * Intersection(y, other.y);
}
float Box2D::Union(const Box2D& other) const {
return Area() + other.Area() - Intersection(other);
}
float Box2D::IoU(const Box2D& other) const {
const float total = Union(other);
if (total > 0) {
return Intersection(other) / total;
} else {
return 0.0;
}
}
float Box2D::Overlap(const Box2D& other) const {
const float intersection = Intersection(other);
return intersection > 0 ? intersection / Area() : 0.0;
}
float AveragePrecision::FromPRCurve(const std::vector<PR>& pr,
std::vector<PR>* pr_out) {
float p = 0;
float sum = 0;
int r_level = opts_.num_recall_points;
for (int i = pr.size() - 1; i >= 0; --i) {
const PR& item = pr[i];
if (i > 0) {
if (item.r < pr[i - 1].r) {
LOG(ERROR) << "recall points are not in order: " << pr[i - 1].r << ", "
<< item.r;
return 0;
}
}
while (item.r * opts_.num_recall_points < r_level) {
const float recall =
static_cast<float>(r_level) / opts_.num_recall_points;
if (r_level < 0) {
LOG(ERROR) << "Number of recall points should be > 0";
return 0;
}
sum += p;
r_level -= 1;
if (pr_out != nullptr) {
pr_out->emplace_back(p, recall);
}
}
p = std::max(p, item.p);
}
for (; r_level >= 0; --r_level) {
const float recall = static_cast<float>(r_level) / opts_.num_recall_points;
sum += p;
if (pr_out != nullptr) {
pr_out->emplace_back(p, recall);
}
}
return sum / (1 + opts_.num_recall_points);
}
float AveragePrecision::FromBoxes(const std::vector<Detection>& groundtruth,
const std::vector<Detection>& prediction,
std::vector<PR>* pr_out) {
absl::flat_hash_map<int64_t, std::list<Detection>> gt;
int num_gt = 0;
for (auto& box : groundtruth) {
gt[box.imgid].push_back(box);
if (!box.difficult && box.ignore == kDontIgnore) {
++num_gt;
}
}
if (num_gt == 0) {
return NAN;
}
std::vector<Detection> pd = prediction;
std::sort(pd.begin(), pd.end(), [](const Detection& a, const Detection& b) {
return a.score > b.score;
});
std::vector<PR> pr;
int correct = 0;
int num_pd = 0;
for (int i = 0; i < pd.size(); ++i) {
const Detection& b = pd[i];
auto* g = >[b.imgid];
auto best = g->end();
float best_iou = -INFINITY;
for (auto it = g->begin(); it != g->end(); ++it) {
const auto iou = b.box.IoU(it->box);
if (iou > best_iou) {
best = it;
best_iou = iou;
}
}
if ((best != g->end()) && (best_iou >= opts_.iou_threshold)) {
if (best->difficult) {
continue;
}
switch (best->ignore) {
case kDontIgnore: {
++correct;
++num_pd;
g->erase(best);
pr.push_back({static_cast<float>(correct) / num_pd,
static_cast<float>(correct) / num_gt});
break;
}
case kIgnoreOneMatch: {
g->erase(best);
break;
}
case kIgnoreAllMatches: {
break;
}
}
} else {
++num_pd;
pr.push_back({static_cast<float>(correct) / num_pd,
static_cast<float>(correct) / num_gt});
}
}
return FromPRCurve(pr, pr_out);
}
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/utils/image_metrics.h"
#include <stdint.h>
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <gtest/gtest.h>
namespace tflite {
namespace evaluation {
namespace image {
float MaxP(float minr, const std::vector<PR>& prs) {
float p = 0;
for (auto& pr : prs) {
if (pr.r >= minr) p = std::max(p, pr.p);
}
return p;
}
float ExpectedAP(const std::vector<PR>& prs) {
float sum = 0;
for (float r = 0; r <= 1.0; r += 0.01) {
sum += MaxP(r, prs);
}
return sum / 101;
}
float GenerateRandomFraction() {
return static_cast<float>(std::rand()) / RAND_MAX;
}
TEST(ImageMetricsTest, APBasic) {
std::vector<PR> prs;
prs = {{1., 1.}, {0.5, 1.0}, {1 / 3, 1.0}};
EXPECT_NEAR(ExpectedAP(prs), AveragePrecision().FromPRCurve(prs), 1e-6);
prs = {{1.0, 0.01}};
EXPECT_NEAR(ExpectedAP(prs), AveragePrecision().FromPRCurve(prs), 1e-6);
prs = {{1.0, 0.2}, {1.0, 0.4}, {0.67, 0.4}, {0.5, 0.4}, {0.4, 0.4},
{0.5, 0.6}, {0.57, 0.8}, {0.5, 0.8}, {0.44, 0.8}, {0.5, 1.0}};
EXPECT_NEAR(ExpectedAP(prs), AveragePrecision().FromPRCurve(prs), 1e-6);
}
TEST(ImageMetricsTest, APRandom) {
std::vector<PR> prs;
for (int i = 0; i < 5000; ++i) {
float p = GenerateRandomFraction();
float r = GenerateRandomFraction();
prs.push_back({p, r});
}
const float expected = ExpectedAP(prs);
std::sort(std::begin(prs), std::end(prs),
[](const PR& a, const PR& b) { return a.r < b.r; });
const float actual = AveragePrecision().FromPRCurve(prs);
EXPECT_NEAR(expected, actual, 1e-5);
}
TEST(ImageMetricsTest, BBoxAPBasic) {
std::vector<Detection> gt;
gt.push_back(Detection({false, 100, 1, {{0, 1}, {0, 1}}}));
gt.push_back(Detection({false, 200, 1, {{1, 2}, {1, 2}}}));
std::vector<Detection> pd;
pd.push_back(Detection({false, 100, 0.8, {{0.1, 1.1}, {0.1, 1.1}}}));
pd.push_back(Detection({false, 200, 0.8, {{0.9, 1.9}, {0.9, 1.9}}}));
EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6);
AveragePrecision::Options opts;
opts.iou_threshold = 0.85;
EXPECT_NEAR(0.0, AveragePrecision(opts).FromBoxes(gt, pd), 1e-6);
}
TEST(ImageMetricsTest, Box2DOverlap) {
Box2D a({{0, 1}, {0, 1}});
Box2D b({{0.5, 2.5}, {0.5, 2.5}});
EXPECT_NEAR(0.25, a.Overlap(b), 1e-6);
EXPECT_NEAR(0.0625, b.Overlap(a), 1e-6);
}
TEST(ImageMetricsTest, BBoxAPwithIgnoredGroundTruth) {
std::vector<Detection> gt;
std::vector<Detection> pd;
gt.push_back(Detection({false, 100, 1, {{1, 2}, {1, 2}}, kIgnoreOneMatch}));
pd.push_back(Detection({false, 100, 0.8, {{0.1, 1.1}, {0.1, 1.1}}}));
EXPECT_TRUE(std::isnan(AveragePrecision().FromBoxes(gt, pd)));
gt.push_back({false, 100, 1, {{0, 1}, {0, 1}}});
EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6);
pd.push_back({false, 100, 0.9, {{0.9, 1.9}, {0.9, 1.9}}});
EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6);
pd.push_back({false, 100, 0.95, {{0.9, 1.9}, {0.9, 1.9}}});
EXPECT_NEAR(0.5, AveragePrecision().FromBoxes(gt, pd), 1e-6);
gt[0].ignore = kIgnoreAllMatches;
EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6);
}
TEST(ImageMetricsTest, BBoxAPRandom) {
auto rand = [](int64_t id) {
auto xmin = GenerateRandomFraction();
auto xmax = xmin + GenerateRandomFraction();
auto ymin = GenerateRandomFraction();
auto ymax = ymin + GenerateRandomFraction();
return Detection(
{false, id, GenerateRandomFraction(), {{xmin, xmax}, {ymin, ymax}}});
};
std::vector<Detection> gt;
for (int i = 0; i < 100; ++i) {
gt.push_back(rand(i % 10));
}
std::vector<Detection> pd = gt;
for (int i = 0; i < 10000; ++i) {
pd.push_back(rand(i % 10));
}
std::vector<PR> pr;
AveragePrecision().FromBoxes(gt, pd, &pr);
EXPECT_EQ(101, pr.size());
}
}
}
} |
861 | cpp | tensorflow/tensorflow | writer_lib | tensorflow/lite/tools/serialization/writer_lib.cc | tensorflow/lite/tools/serialization/writer_lib_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_SERIALIZATION_WRITER_LIB_H_
#define TENSORFLOW_LITE_TOOLS_SERIALIZATION_WRITER_LIB_H_
#include <iostream>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#if 1
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#endif
#include "absl/container/flat_hash_map.h"
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/tools/serialization/enum_mapping.h"
#include "tensorflow/lite/version.h"
namespace tflite {
struct OpCode {
int builtin;
std::string custom;
};
class SubgraphWriter;
class ModelWriter {
public:
typedef flatbuffers::Offset<Operator> (*CustomWriter)(
flatbuffers::FlatBufferBuilder* fbb, Subgraph* subgraph, int node_index,
flatbuffers::Offset<flatbuffers::Vector<uint8_t>>* output_options,
CustomOptionsFormat* custom_options_format);
explicit ModelWriter(Interpreter* interpreter,
bool serialize_dims_signature = true);
explicit ModelWriter(const std::vector<Subgraph*>& subgraphs,
bool serialize_dims_signature = true);
TfLiteStatus GetBuffer(std::unique_ptr<uint8_t[]>* out, size_t* size);
TfLiteStatus Write(const std::string& filename);
void SetUnusedTensors(int subgraph_index,
const std::set<int>& unused_tensors);
TfLiteStatus SetCustomInputOutput(int subgraph_index,
const std::vector<int>& inputs,
const std::vector<int>& outputs,
const std::vector<int>& execution_plan);
TfLiteStatus RegisterCustomWriter(const std::string& custom_name,
CustomWriter custom_writer);
private:
void Init(const std::vector<Subgraph*>& subgraphs,
bool serialize_dims_signature);
template <class T>
using Offset = flatbuffers::Offset<T>;
Offset<flatbuffers::Vector<Offset<OperatorCode>>> CreateOpCodeTable(
flatbuffers::FlatBufferBuilder* fbb);
Offset<flatbuffers::Vector<Offset<Buffer>>> ExportBuffers(
flatbuffers::FlatBufferBuilder* fbb);
TfLiteStatus UpdateSubgraphReferences(flatbuffers::FlatBufferBuilder* fbb);
std::vector<SubgraphWriter> subgraph_writers_;
std::vector<std::pair<const uint8_t*, size_t>> buffers_;
std::vector<OpCode> opcodes_;
absl::flat_hash_map<int, int> builtin_op_to_opcode_;
absl::flat_hash_map<int, int> subgraph_index_mapper_;
};
class SubgraphWriter {
public:
friend class ModelWriter;
typedef flatbuffers::Offset<Operator> (*CustomWriter)(
flatbuffers::FlatBufferBuilder* fbb, Subgraph* subgraph, int node_index,
flatbuffers::Offset<flatbuffers::Vector<uint8_t>>* output_options,
CustomOptionsFormat* custom_options_format);
explicit SubgraphWriter(Subgraph* subgraph,
bool serialize_dims_signature = true)
: subgraph_(subgraph),
inputs_(subgraph->inputs()),
outputs_(subgraph->outputs()),
execution_plan_(subgraph->execution_plan()),
serialize_dims_signature_(serialize_dims_signature) {
buffers_ = &buffers_data_;
opcodes_ = &opcodes_data_;
builtin_op_to_opcode_ = &builtin_op_to_opcode_data_;
buffers_->push_back(std::make_pair(nullptr, 0));
}
TfLiteStatus GetBuffer(std::unique_ptr<uint8_t[]>* out, size_t* size);
TfLiteStatus Write(const std::string& filename);
TfLiteStatus RegisterCustomWriter(const std::string& custom_name,
CustomWriter custom_writer);
void SetUnusedTensors(const std::set<int>& unused_tensors) {
unused_tensors_ = unused_tensors;
}
TfLiteStatus SetCustomInputOutput(const std::vector<int>& inputs,
const std::vector<int>& outputs,
const std::vector<int>& execution_plan);
private:
explicit SubgraphWriter(
Subgraph* subgraph,
std::vector<std::pair<const uint8_t*, size_t>>* external_buffers,
std::vector<OpCode>* external_opcodes,
absl::flat_hash_map<int, int>* external_builtin_op_to_opcode,
bool serialize_dims_signature)
: subgraph_(subgraph),
inputs_(subgraph->inputs()),
outputs_(subgraph->outputs()),
execution_plan_(subgraph->execution_plan()),
serialize_dims_signature_(serialize_dims_signature) {
buffers_ = external_buffers;
opcodes_ = external_opcodes;
builtin_op_to_opcode_ = external_builtin_op_to_opcode;
buffers_->push_back(std::make_pair(nullptr, 0));
}
flatbuffers::Offset<SubGraph> PopulateAndGetOffset(
flatbuffers::FlatBufferBuilder* builder,
const std::string& subgraph_name);
template <class T>
using Offset = flatbuffers::Offset<T>;
template <class T_OUTPUT, class T_INPUT>
Offset<flatbuffers::Vector<T_OUTPUT>> ExportVector(
flatbuffers::FlatBufferBuilder* fbb, const T_INPUT& v);
Offset<flatbuffers::Vector<Offset<Tensor>>> ExportTensors(
flatbuffers::FlatBufferBuilder* fbb);
Offset<flatbuffers::Vector<Offset<Operator>>> ExportOperators(
flatbuffers::FlatBufferBuilder* fbb);
Offset<flatbuffers::Vector<Offset<OperatorCode>>> CreateOpCodeTable(
flatbuffers::FlatBufferBuilder* fbb);
Offset<flatbuffers::Vector<Offset<Buffer>>> ExportBuffers(
flatbuffers::FlatBufferBuilder* fbb);
template <class T>
std::vector<int> RemapTensorIndicesToWritten(const T& input);
TfLiteStatus CheckInputOutput(const std::vector<int>& inputs,
const std::vector<int>& outputs,
const std::vector<int>& execution_plan);
int GetOpCodeForBuiltin(int builtin_op_index) {
std::pair<decltype(builtin_op_to_opcode_data_)::iterator, bool> result =
builtin_op_to_opcode_->insert(
std::make_pair(builtin_op_index, opcodes_->size()));
if (result.second) {
opcodes_->push_back({builtin_op_index, ""});
}
return result.first->second;
}
int GetOpCodeForCustom(const std::string& custom_name) {
std::pair<decltype(custom_op_to_opcode_)::iterator, bool> result =
custom_op_to_opcode_.insert(
std::make_pair(custom_name, opcodes_->size()));
if (result.second) {
opcodes_->push_back({BuiltinOperator_CUSTOM, custom_name});
}
return result.first->second;
}
Subgraph* subgraph_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> execution_plan_;
std::set<int> unused_tensors_;
std::vector<int> tensor_to_written_tensor_;
std::unordered_map<std::string, int> custom_op_to_opcode_;
std::unordered_map<std::string, CustomWriter> custom_op_to_writer_;
std::vector<std::pair<const uint8_t*, size_t>>* buffers_;
std::vector<OpCode>* opcodes_;
absl::flat_hash_map<int, int>* builtin_op_to_opcode_;
std::vector<std::pair<const uint8_t*, size_t>> buffers_data_;
std::vector<OpCode> opcodes_data_;
absl::flat_hash_map<int, int> builtin_op_to_opcode_data_;
bool serialize_dims_signature_;
};
}
#endif
#include "tensorflow/lite/tools/serialization/writer_lib.h"
#include <cstdlib>
#include <cstring>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#if FLATBUFFERS_LITTLEENDIAN == 0
#include "tensorflow/lite/core/model_builder.h"
#endif
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/tools/serialization/enum_mapping.h"
#include "tensorflow/lite/tools/versioning/op_version.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace {
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>>
CreateOpCodeTableImpl(flatbuffers::FlatBufferBuilder* fbb,
std::vector<OpCode>* opcodes) {
std::vector<flatbuffers::Offset<OperatorCode>> codes;
for (const auto& it : *opcodes) {
const char* custom_name = it.custom.empty() ? nullptr : it.custom.c_str();
int32_t op_version = it.builtin != tflite::BuiltinOperator_CUSTOM ? 0 : 1;
codes.push_back(
CreateOperatorCodeDirect(*fbb, static_cast<BuiltinOperator>(it.builtin),
custom_name, op_version));
}
return fbb->template CreateVector<flatbuffers::Offset<OperatorCode>>(codes);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
ExportBuffersImpl(flatbuffers::FlatBufferBuilder* fbb,
std::vector<std::pair<const uint8_t*, size_t>>* buffers) {
std::vector<flatbuffers::Offset<Buffer>> buffer_vector;
for (auto buffer : *buffers) {
auto data_offset = fbb->CreateVector(buffer.first, buffer.second);
buffer_vector.push_back(CreateBuffer(*fbb, data_offset));
}
return fbb->template CreateVector<flatbuffers::Offset<Buffer>>(buffer_vector);
}
TfLiteStatus WriteImpl(const std::string& filename, void* data, size_t size) {
FILE* fp = fopen(filename.c_str(), "wb");
if (!fp) return kTfLiteError;
#if FLATBUFFERS_LITTLEENDIAN == 0
const tflite::Model* input_model = tflite::GetModel(data);
tflite::FlatBufferModel::ByteSwapTFLiteModel(input_model);
#endif
const int result_size = fwrite(data, 1, size, fp);
fclose(fp);
if (result_size != size) return kTfLiteError;
return kTfLiteOk;
}
std::pair<BuiltinOptions, flatbuffers::Offset<void>> CreateBuiltinUnion(
flatbuffers::FlatBufferBuilder* fbb, enum BuiltinOperator op,
void* builtin_op_data, int node_inputs_size) {
switch (op) {
#include "tensorflow/lite/tools/serialization/option_writer_generated.h"
}
return std::make_pair(BuiltinOptions_NONE, flatbuffers::Offset<void>());
}
}
template <class T_OUTPUT, class T_INPUT>
flatbuffers::Offset<flatbuffers::Vector<T_OUTPUT>> SubgraphWriter::ExportVector(
flatbuffers::FlatBufferBuilder* fbb, const T_INPUT& v) {
std::vector<T_OUTPUT> inputs(v.begin(), v.end());
return fbb->template CreateVector<T_OUTPUT>(inputs);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>>
SubgraphWriter::ExportOperators(flatbuffers::FlatBufferBuilder* fbb) {
std::vector<flatbuffers::Offset<Operator>> operators;
std::vector<int> operator_to_opcode;
operator_to_opcode.resize(subgraph_->nodes_size(), -1);
for (int op_index : execution_plan_) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
const TfLiteRegistration* registration = &node_and_registration->second;
if (!registration->custom_name) {
operator_to_opcode[op_index] =
GetOpCodeForBuiltin(registration->builtin_code);
} else {
operator_to_opcode[op_index] =
GetOpCodeForCustom(registration->custom_name);
}
}
for (int op_index : execution_plan_) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
const TfLiteNode& node = node_and_registration->first;
const TfLiteRegistration& registration = node_and_registration->second;
flatbuffers::Offset<void> builtin_options;
BuiltinOptions builtin_options_type = BuiltinOptions_NONE;
auto custom_options_format = CustomOptionsFormat_FLEXBUFFERS;
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0;
if (!registration.custom_name) {
auto builtin_options_and_type = CreateBuiltinUnion(
fbb, static_cast<enum BuiltinOperator>(registration.builtin_code),
node.builtin_data, node.inputs->size);
builtin_options = builtin_options_and_type.second;
builtin_options_type = builtin_options_and_type.first;
} else {
auto custom_writer = custom_op_to_writer_.find(registration.custom_name);
if (custom_writer != custom_op_to_writer_.end() &&
custom_writer->second) {
custom_writer->second(fbb, subgraph_, op_index, &custom_options,
&custom_options_format);
} else {
custom_options = fbb->CreateVector(
reinterpret_cast<const uint8_t*>(node.custom_initial_data),
node.custom_initial_data_size);
}
}
int opcode_index = operator_to_opcode[op_index];
std::vector<int> written_inputs =
RemapTensorIndicesToWritten(TfLiteIntArrayView(node.inputs));
std::vector<int> written_outputs =
RemapTensorIndicesToWritten(TfLiteIntArrayView(node.outputs));
auto inputs = ExportVector<int32_t>(fbb, written_inputs);
auto outputs = ExportVector<int32_t>(fbb, written_outputs);
operators.push_back(CreateOperator(*fbb, opcode_index, inputs, outputs,
builtin_options_type, builtin_options,
custom_options, custom_options_format));
}
return fbb->template CreateVector<flatbuffers::Offset<Operator>>(operators);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>>
SubgraphWriter::ExportTensors(flatbuffers::FlatBufferBuilder* fbb) {
tensor_to_written_tensor_.resize(subgraph_->tensors_size(), -1);
std::vector<flatbuffers::Offset<Tensor>> tensors;
std::vector<bool> tensor_is_temporary(subgraph_->tensors_size(), false);
for (int op_index = 0; op_index < subgraph_->nodes_size(); ++op_index) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
for (auto tensor_index :
TfLiteIntArrayView(node_and_registration->first.temporaries))
tensor_is_temporary[tensor_index] = true;
}
int curr_output_index = 0;
for (int tensor_index = 0; tensor_index < subgraph_->tensors_size();
tensor_index++) {
if (!tensor_is_temporary[tensor_index] &&
unused_tensors_.find(tensor_index) == unused_tensors_.end()) {
tensor_to_written_tensor_[tensor_index] = curr_output_index++;
}
}
for (int tensor_index = 0; tensor_index < subgraph_->tensors_size();
++tensor_index) {
if (tensor_to_written_tensor_[tensor_index] == -1) continue;
if (TfLiteTensor* tensor = subgraph_->tensor(tensor_index)) {
int buffer_index = 0;
if (tensor->allocation_type == kTfLiteMmapRo) {
buffer_index = buffers_->size();
buffers_->push_back(std::make_pair(
reinterpret_cast<const uint8_t*>(tensor->data.raw), tensor->bytes));
}
TensorType type = TfLiteTypeToSchemaType(tensor->type);
flatbuffers::Offset<QuantizationParameters> quantization_params;
const flatbuffers::Offset<flatbuffers::Vector<float>> null_array;
flatbuffers::Offset<flatbuffers::Vector<float>> scale_array;
flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point_array;
if (tensor->quantization.type == kTfLiteAffineQuantization) {
if (tensor->params.scale != 0.f) {
scale_array = fbb->CreateVector<float>({tensor->params.scale});
zero_point_array =
fbb->CreateVector<int64_t>({tensor->params.zero_point});
quantization_params = CreateQuantizationParameters(
*fbb, null_array, null_array, scale_array, zero_point_array);
} else {
const TfLiteAffineQuantization* params =
reinterpret_cast<TfLiteAffineQuantization*>(
tensor->quantization.params);
const size_t num_scales = params->scale->size;
std::vector<float> scale_vector(params->scale->data,
params->scale->data + num_scales);
std::vector<int64_t> zero_point_vector(
params->zero_point->data, params->zero_point->data + num_scales);
scale_array = fbb->CreateVector<float>(scale_vector);
zero_point_array = fbb->CreateVector<int64_t>(zero_point_vector);
quantization_params = CreateQuantizationParameters(
*fbb, null_array, null_array, scale_array, zero_point_array,
QuantizationDetails_NONE, 0, params->quantized_dimension);
}
}
if (tensor->dims) {
TfLiteIntArrayView shape_view(tensor->dims);
std::vector<int> shape =
std::vector<int>(shape_view.begin(), shape_view.end());
Offset<flatbuffers::String> tensor_name_offset = 0;
if (tensor->name != nullptr) {
tensor_name_offset = fbb->CreateString(tensor->name);
}
flatbuffers::Offset<flatbuffers::Vector<int32_t>>
shape_signature_offset = 0;
if (serialize_dims_signature_ && tensor->dims_signature != nullptr) {
TfLiteIntArrayView shape_signature_view(tensor->dims_signature);
std::vector<int32_t> shape_signature(shape_signature_view.begin(),
shape_signature_view.end());
shape_signature_offset = ExportVector<int32_t>(fbb, shape_signature);
}
bool has_rank = true;
tensors.push_back(CreateTensor(
*fbb, ExportVector<int32_t>(fbb, shape), type, buffer_index,
tensor_name_offset, quantization_params, tensor->is_variable,
0, shape_signature_offset, has_rank));
}
}
}
return fbb->template CreateVector<flatbuffers::Offset<Tensor>>(tensors);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
SubgraphWriter::ExportBuffers(flatbuffers::FlatBufferBuilder* fbb) {
return ExportBuffersImpl(fbb, buffers_);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>>
SubgraphWriter::CreateOpCodeTable(flatbuffers::FlatBufferBuilder* fbb) {
return CreateOpCodeTableImpl(fbb, opcodes_);
}
template <class T>
std::vector<int> SubgraphWriter::RemapTensorIndicesToWritten(const T& input) {
std::vector<int> output;
output.reserve(input.size());
for (int x : input) {
if (x == -1) {
output.push_back(x);
continue;
}
if (tensor_to_written_tensor_[x] != -1) {
output.push_back(tensor_to_written_tensor_[x]);
}
}
return output;
}
TfLiteStatus SubgraphWriter::GetBuffer(std::unique_ptr<uint8_t[]>* out,
size_t* size) {
if (!out || !size) return kTfLiteError;
flatbuffers::FlatBufferBuilder builder(10240);
std::vector<flatbuffers::Offset<SubGraph>> subgraphs_as_vector;
subgraphs_as_vector.push_back(
PopulateAndGetOffset(&builder, subgraph_->GetName()));
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
buffers = ExportBuffers(&builder);
auto description = builder.CreateString("Exported from Subgraph.");
auto op_codes = CreateOpCodeTable(&builder);
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, op_codes,
builder.CreateVector(subgraphs_as_vector),
description, buffers);
::tflite::FinishModelBuffer(builder, model);
::tflite::UpdateOpVersion(builder.GetBufferPointer());
const uint8_t* buffer = builder.GetBufferPointer();
*size = builder.GetSize();
(*out).reset(new uint8_t[*size]);
memcpy(out->get(), buffer, *size);
return kTfLiteOk;
}
flatbuffers::Offset<SubGraph> SubgraphWriter::PopulateAndGetOffset(
flatbuffers::FlatBufferBuilder* builder, const std::string& subgraph_name) {
auto tensors = ExportTensors(builder);
std::vector<int> written_inputs = RemapTensorIndicesToWritten(inputs_);
std::vector<int> written_outputs = RemapTensorIndicesToWritten(outputs_);
auto inputs = ExportVector<int32_t>(builder, written_inputs);
auto outputs = ExportVector<int32_t>(builder, written_outputs);
auto ops = ExportOperators(builder);
auto name = builder->CreateString(subgraph_name);
return CreateSubGraph(*builder, tensors, inputs, outputs, ops, name);
}
TfLiteStatus SubgraphWriter::Write(const std::string& filename) {
std::unique_ptr<uint8_t[]> buffer;
size_t size;
TF_LITE_ENSURE_STATUS(GetBuffer(&buffer, &size));
return WriteImpl(filename, buffer.get(), size);
}
TfLiteStatus SubgraphWriter::RegisterCustomWriter(
const std::string& custom_name, CustomWriter custom_writer) {
if (custom_op_to_writer_.find(custom_name) != custom_op_to_writer_.end()) {
return kTfLiteError;
}
custom_op_to_writer_.insert(std::make_pair(custom_name, custom_writer));
return kTfLiteOk;
}
TfLiteStatus SubgraphWriter::CheckInputOutput(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<int>& execution_plan) {
absl::flat_hash_set<int> known_tensors(inputs.begin(), inputs.end());
known_tensors.insert(subgraph_->variables().begin(),
subgraph_->variables().end());
for (int op_index : execution_plan) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
const TfLiteNode& node = node_and_registration->first;
for (int tensor_index : TfLiteIntArrayView(node.inputs)) {
if (tensor_index < 0) {
if (tensor_index == kTfLiteOptionalTensor) {
continue;
} else {
return kTfLiteError;
}
}
if (TfLiteTensor* tensor = subgraph_->tensor(tensor_index)) {
if (tensor->allocation_type == kTfLiteMmapRo) {
continue;
}
}
if (known_tensors.find(tensor_index) == known_tensors.end()) {
subgraph_->context()->ReportError(
subgraph_->context(),
"Node (%d) uses an input (%d) that is not provided.", op_index,
tensor_index);
return kTfLiteError;
}
}
TfLiteIntArrayView outputs(node.outputs);
known_tensors.insert(outputs.begin(), outputs.end());
}
for (int tensor_index : outputs) {
if (TfLiteTensor* tensor = subgraph_->tensor(tensor_index)) {
if (tensor->allocation_type == kTfLiteMmapRo) {
continue;
}
}
if (known_tensors.find(tensor_index) == known_tensors.end()) {
subgraph_->context()->ReportError(
subgraph_->context(),
"Output (%d) is not produced by the execution plan.", tensor_index);
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus SubgraphWriter::SetCustomInputOutput(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<int>& execution_plan) {
TF_LITE_ENSURE_STATUS(CheckInputOutput(inputs, outputs, execution_plan));
inputs_ = inputs;
outputs_ = outputs;
execution_plan_ = execution_plan;
return kTfLiteOk;
}
ModelWriter::ModelWriter(Interpreter* interpreter,
bool serialize_dims_signature) {
std::vector<Subgraph*> subgraphs;
subgraphs.reserve(interpreter->subgraphs_size());
for (int i = 0; i < interpreter->subgraphs_size(); ++i) {
subgraphs.push_back(interpreter->subgraph(i));
}
Init(subgraphs, serialize_dims_signature);
}
ModelWriter::ModelWriter(const std::vector<Subgraph*>& subgraphs,
bool serialize_dims_signature) {
Init(subgraphs, serialize_dims_signature);
}
void ModelWriter::Init(const std::vector<Subgraph*>& subgraphs,
bool serialize_dims_signature) {
buffers_.push_back(std::make_pair(nullptr, 0));
subgraph_writers_.reserve(subgraphs.size());
for (auto* subgraph : subgraphs) {
SubgraphWriter writer(subgraph, &buffers_, &opcodes_,
&builtin_op_to_opcode_, serialize_dims_signature);
subgraph_writers_.push_back(writer);
}
if (!subgraphs.empty()) {
absl::flat_hash_map<Subgraph*, int> subgraph_to_new_subgraph_index;
for (int i = 0; i < subgraphs.size(); ++i) {
subgraph_to_new_subgraph_index[subgraphs[i]] = i;
}
auto* all_subgraphs = subgraphs[0]->GetSubgraphs();
for (int i = 0; i < all_subgraphs->size(); ++i) {
auto it = subgraph_to_new_subgraph_index.find(all_subgraphs->at(i));
if (it != subgraph_to_new_subgraph_index.end()) {
subgraph_index_mapper_[i] = it->second;
}
}
}
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
ModelWriter::ExportBuffers(flatbuffers::FlatBufferBuilder* fbb) {
return ExportBuffersImpl(fbb, &buffers_);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>>
ModelWriter::CreateOpCodeTable(flatbuffers::FlatBufferBuilder* fbb) {
return CreateOpCodeTableImpl(fbb, &opcodes_);
}
TfLiteStatus ModelWriter::GetBuffer(std::unique_ptr<uint8_t[]>* out,
size_t* size) {
if (!out || !size) return kTfLiteError;
flatbuffers::FlatBufferBuilder builder(10240);
std::vector<flatbuffers::Offset<SubGraph>> subgraphs_as_vector;
subgraphs_as_vector.reserve(subgraph_writers_.size());
for (auto& subgraph_writer : subgraph_writers_) | #include "tensorflow/lite/tools/serialization/writer_lib.h"
#include <cstdlib>
#include <fstream>
#include <memory>
#include <numeric>
#include <sstream>
#include <string>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
using subgraph_test_util::CheckIntTensor;
using subgraph_test_util::FillIntTensor;
std::string CreateFilePath(const std::string& file_name) {
const char* tmp_dir = getenv("TEST_TMPDIR");
return std::string(tmp_dir ? tmp_dir : "./") + file_name;
}
class SingleSubgraphTest : public ::testing::TestWithParam<bool> {
protected:
void WriteToFile(Interpreter* interpreter, const std::string& filename,
bool use_subgraph_writer) {
if (use_subgraph_writer) {
SubgraphWriter writer(&interpreter->primary_subgraph());
CHECK_EQ(writer.Write(filename), kTfLiteOk);
} else {
ModelWriter writer(interpreter);
CHECK_EQ(writer.Write(filename), kTfLiteOk);
}
}
};
TEST_P(SingleSubgraphTest, InvalidDestinations) {
Interpreter interpreter;
interpreter.AddTensors(3);
float foo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<char*>(foo), sizeof(foo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
if (GetParam()) {
SubgraphWriter writer(&interpreter.primary_subgraph());
CHECK_EQ(writer.Write(""), kTfLiteError);
} else {
ModelWriter writer(&interpreter);
CHECK_EQ(writer.Write(""), kTfLiteError);
}
size_t size;
if (GetParam()) {
SubgraphWriter writer(&interpreter.primary_subgraph());
CHECK_EQ(writer.GetBuffer(nullptr, &size), kTfLiteError);
} else {
ModelWriter writer(&interpreter);
CHECK_EQ(writer.GetBuffer(nullptr, &size), kTfLiteError);
}
}
TEST_P(SingleSubgraphTest, FloatModelTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
float foo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<char*>(foo), sizeof(foo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const std::string test_file = CreateFilePath("test_float.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, CustomInputOutputTest) {
Interpreter interpreter;
interpreter.AddTensors(4);
constexpr float kFoo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<const char*>(kFoo), sizeof(kFoo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(3, kTfLiteFloat32, "d", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({3});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const TfLiteRegistration* reg2 = resolver.FindOp(BuiltinOperator_RELU, 1);
interpreter.AddNodeWithParameters({2}, {3}, nullptr, 0, nullptr, reg2);
const std::string test_file = CreateFilePath("test_custom.tflite");
SubgraphWriter writer(&interpreter.primary_subgraph());
EXPECT_EQ(writer.SetCustomInputOutput({2}, {3},
{1}),
kTfLiteOk);
writer.SetUnusedTensors({0, 1});
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, CustomInputOutputErrorCasesTest) {
Interpreter interpreter;
interpreter.AddTensors(5);
constexpr float kFoo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<const char*>(kFoo), sizeof(kFoo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(3, kTfLiteFloat32, "d", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(4, kTfLiteFloat32, "e", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({4});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const TfLiteRegistration* reg2 = resolver.FindOp(BuiltinOperator_RELU, 1);
interpreter.AddNodeWithParameters({2}, {3}, nullptr, 0, nullptr, reg2);
const TfLiteRegistration* reg3 = resolver.FindOp(BuiltinOperator_RELU6, 1);
interpreter.AddNodeWithParameters({3}, {4}, nullptr, 0, nullptr, reg3);
SubgraphWriter writer(&interpreter.primary_subgraph());
EXPECT_EQ(writer.SetCustomInputOutput({2}, {3},
{0, 1}),
kTfLiteError);
EXPECT_EQ(writer.SetCustomInputOutput({0, 1}, {4},
{0, 1}),
kTfLiteError);
EXPECT_EQ(writer.SetCustomInputOutput({0, 1}, {3},
{0, 1}),
kTfLiteOk);
}
TEST_P(SingleSubgraphTest, CustomInputOutputVariableTensorTest) {
Interpreter interpreter;
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
interpreter.AddTensors(3);
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "b", {3},
TfLiteQuantization(),
true);
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetInputs({0});
interpreter.SetOutputs({2});
interpreter.SetVariables({1});
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
interpreter.AddNodeWithParameters({0, 1}, {2}, nullptr, 0,
reinterpret_cast<void*>(builtin_data),
resolver.FindOp(BuiltinOperator_ADD, 1));
const std::string test_file = CreateFilePath("test_variables.tflite");
SubgraphWriter writer(&interpreter.primary_subgraph());
EXPECT_EQ(writer.SetCustomInputOutput({0}, {2},
{0}),
kTfLiteOk);
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, PerTensorQuantizedModelTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
interpreter.SetTensorParametersReadWrite(
0, kTfLiteUInt8, "a", {3}, TfLiteQuantizationParams({1 / 256., 128}));
interpreter.SetTensorParametersReadWrite(
1, kTfLiteUInt8, "b", {3}, TfLiteQuantizationParams({1 / 256., 128}));
interpreter.SetTensorParametersReadWrite(
2, kTfLiteUInt8, "c", {3}, TfLiteQuantizationParams({1 / 256., 128}));
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const std::string test_file = CreateFilePath("test_uint8.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, OpVersioningTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {1, 4},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(1, kTfLiteInt32, "b", {2},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {4, 4},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
const TfLiteRegistration* reg =
resolver.FindOp(BuiltinOperator_BROADCAST_TO, 2);
interpreter.AddNodeWithParameters({0, 1}, {2},
nullptr, 0,
nullptr, reg);
const std::string test_file = CreateFilePath("test_float.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(new_interpreter->nodes_size(), 1);
TfLiteRegistration output_reg =
new_interpreter->node_and_registration(0)->second;
ASSERT_EQ(output_reg.builtin_code, BuiltinOperator_BROADCAST_TO);
CHECK_EQ(output_reg.version, 2);
}
TEST_P(SingleSubgraphTest, DynamicShapeTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
std::vector<int> dims = {1, 3};
std::vector<int> dims_signature = {-1, 3};
interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "a", dims, TfLiteQuantizationParams{1.0, 0},
false, &dims_signature);
interpreter.SetTensorParametersReadWrite(
1, kTfLiteFloat32, "b", dims, TfLiteQuantizationParams{1.0, 0},
false, &dims_signature);
interpreter.SetTensorParametersReadWrite(
2, kTfLiteFloat32, "c", dims, TfLiteQuantizationParams{1.0, 0},
false, &dims_signature);
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const std::string test_file = CreateFilePath("test_dynamic_shape.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
TfLiteTensor* tensor0 = new_interpreter->tensor(0);
CHECK_NOTNULL(tensor0->dims_signature);
TfLiteIntArrayView shape_view(tensor0->dims_signature);
CHECK_EQ(shape_view.size(), 2);
CHECK_EQ(shape_view[0], -1);
}
INSTANTIATE_TEST_SUITE_P(Writer, SingleSubgraphTest, ::testing::Bool());
struct ReshapeTestPattern {
int num_inputs;
bool is_param_valid;
bool has_buggy_non_flatten_shape;
};
class ReshapeLayerTest : public ::testing::TestWithParam<ReshapeTestPattern> {};
TEST_P(ReshapeLayerTest, ReshapeLayerTest) {
const auto param = GetParam();
Interpreter interpreter;
const int total_tensors = param.num_inputs + 1;
interpreter.AddTensors(total_tensors);
int output_shape[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32,
"a", {6},
TfLiteQuantization());
ASSERT_LE(param.num_inputs, 2);
if (param.num_inputs == 2) {
if (param.has_buggy_non_flatten_shape) {
interpreter.SetTensorParametersReadOnly(
1, kTfLiteInt32, "b", {3, 1},
TfLiteQuantization(), reinterpret_cast<char*>(output_shape),
sizeof(output_shape));
} else {
interpreter.SetTensorParametersReadOnly(
1, kTfLiteInt32, "b", {3},
TfLiteQuantization(), reinterpret_cast<char*>(output_shape),
sizeof(output_shape));
}
}
interpreter.SetTensorParametersReadWrite(total_tensors - 1,
kTfLiteFloat32, "c",
{3}, TfLiteQuantization());
std::vector<int> input_tensors(param.num_inputs);
std::iota(input_tensors.begin(), input_tensors.end(), 0);
interpreter.SetInputs(input_tensors);
interpreter.SetOutputs({total_tensors - 1});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteReshapeParams* builtin_data = reinterpret_cast<TfLiteReshapeParams*>(
malloc(sizeof(TfLiteReshapeParams)));
memset(builtin_data, 0, sizeof(TfLiteReshapeParams));
if (param.is_param_valid) {
builtin_data->num_dimensions = 3;
for (int dim = 0; dim < builtin_data->num_dimensions; ++dim) {
builtin_data->shape[dim] = output_shape[dim];
}
}
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_RESHAPE, 1);
interpreter.AddNodeWithParameters(input_tensors,
{total_tensors - 1},
initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
SubgraphWriter writer(&interpreter.primary_subgraph());
std::stringstream ss;
ss << CreateFilePath("test_reshape_") << param.num_inputs
<< param.is_param_valid << ".tflite";
std::string filename = ss.str();
writer.Write(filename);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(filename.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
INSTANTIATE_TEST_SUITE_P(
Writer, ReshapeLayerTest,
::testing::Values(ReshapeTestPattern{2,
true,
false},
ReshapeTestPattern{2,
false,
false},
ReshapeTestPattern{1,
true,
false},
ReshapeTestPattern{2,
true,
true}),
[](const ::testing::TestParamInfo<ReshapeLayerTest::ParamType>& info) {
std::stringstream ss;
ss << "num_inputs_" << info.param.num_inputs << "_valid_param_"
<< info.param.is_param_valid << "_buggy_shape_"
<< info.param.has_buggy_non_flatten_shape;
std::string name = ss.str();
return name;
});
class WhileTest : public subgraph_test_util::ControlFlowOpTest {
protected:
TfLiteCustomAllocation NewCustomAlloc(size_t num_bytes,
int required_alignment) {
char* new_alloc = new char[num_bytes + required_alignment];
char* new_underlying_buffer_aligned_ptr = reinterpret_cast<char*>(
AlignTo(required_alignment, reinterpret_cast<intptr_t>(new_alloc)));
custom_alloc_buffers_.emplace_back(new_alloc);
return TfLiteCustomAllocation(
{new_underlying_buffer_aligned_ptr, num_bytes});
}
intptr_t AlignTo(size_t alignment, intptr_t offset) {
return offset % alignment == 0 ? offset
: offset + (alignment - offset % alignment);
}
std::vector<std::unique_ptr<char[]>> custom_alloc_buffers_;
};
TEST_F(WhileTest, TestTriangularNumberSequence) {
const int kSeqNumber = 4;
const int kExpectedValue = 15;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), kSeqNumber);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
auto alloc =
NewCustomAlloc(interpreter_->tensor(interpreter_->inputs()[1])->bytes,
kDefaultTensorAlignment);
auto* input_data = reinterpret_cast<int*>(alloc.data);
input_data[0] = 1;
interpreter_->SetCustomAllocationForTensor(interpreter_->inputs()[1], alloc);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {kSeqNumber + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue});
ModelWriter writer(interpreter_.get());
const std::string test_file = CreateFilePath("test_while.tflite");
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[0], {1});
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[1], {1});
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[0]), {1});
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[1]), {1});
ASSERT_EQ(new_interpreter->Invoke(), kTfLiteOk);
output1 = new_interpreter->tensor(new_interpreter->outputs()[0]);
CheckIntTensor(output1, {1}, {kSeqNumber + 1});
output2 = new_interpreter->tensor(new_interpreter->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue});
}
TEST_F(WhileTest, TestModelWriterFromSubgraphs) {
const int kSeqNumber = 4;
const int kExpectedValue = 15;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), kSeqNumber);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
auto alloc =
NewCustomAlloc(interpreter_->tensor(interpreter_->inputs()[1])->bytes,
kDefaultTensorAlignment);
auto* input_data = reinterpret_cast<int*>(alloc.data);
input_data[0] = 1;
interpreter_->SetCustomAllocationForTensor(interpreter_->inputs()[1], alloc);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {kSeqNumber + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue});
ModelWriter writer_1(interpreter_.get());
const std::string test_file_1 = CreateFilePath("test_while_1.tflite");
writer_1.Write(test_file_1);
std::vector<Subgraph*> subgraphs;
for (int i = 0; i < interpreter_->subgraphs_size(); ++i) {
subgraphs.push_back(interpreter_->subgraph(i));
}
ModelWriter writer_2(subgraphs);
const std::string test_file_2 = CreateFilePath("test_while_2.tflite");
writer_2.Write(test_file_2);
std::ifstream file_ifs_1(test_file_1, std::ios::in);
std::ostringstream model_content_1;
model_content_1 << file_ifs_1.rdbuf();
std::ifstream file_ifs_2(test_file_2, std::ios::in);
std::ostringstream model_content_2;
model_content_2 << file_ifs_2.rdbuf();
EXPECT_FALSE(model_content_1.str().empty());
EXPECT_EQ(model_content_1.str(), model_content_2.str());
}
TEST_F(WhileTest, TestUpdateSubgraphIndices) {
const int kSeqNumber1 = 4;
const int kSeqNumber2 = 5;
const int kExpectedValue1 = 15;
const int kExpectedValue2 = 21;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(4);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), kSeqNumber1);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(3), kSeqNumber2);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(4));
Subgraph* primary_subgraph = &interpreter_->primary_subgraph();
const int kInput1 = 0;
const int kInput2 = 1;
const int kUnused1 = 2;
const int kUnused2 = 3;
const int kOutput1 = 4;
const int kOutput2 = 5;
const int kTensorCount = 6;
int first_new_tensor_index;
ASSERT_EQ(primary_subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(primary_subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(primary_subgraph->SetOutputs({kOutput1, kOutput2}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
ASSERT_EQ(primary_subgraph->SetTensorParametersReadWrite(
i, kTfLiteInt32, "", 0, nullptr, {}, false),
kTfLiteOk);
}
auto* while_reg = ops::builtin::Register_WHILE();
while_reg->builtin_code = kTfLiteBuiltinWhile;
TfLiteWhileParams* params1 =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params1->cond_subgraph_index = 1;
params1->body_subgraph_index = 2;
TfLiteWhileParams* params2 =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params2->cond_subgraph_index = 3;
params2->body_subgraph_index = 4;
int while1_index, while2_index;
primary_subgraph->AddNodeWithParameters({kInput1, kInput2},
{kUnused1, kOutput1}, {}, nullptr, 0,
params1, while_reg, &while1_index);
primary_subgraph->AddNodeWithParameters({kInput1, kInput2},
{kUnused2, kOutput2}, {}, nullptr, 0,
params2, while_reg, &while2_index);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
auto alloc =
NewCustomAlloc(interpreter_->tensor(interpreter_->inputs()[1])->bytes,
kDefaultTensorAlignment);
auto* input_data = reinterpret_cast<int*>(alloc.data);
input_data[0] = 1;
interpreter_->SetCustomAllocationForTensor(interpreter_->inputs()[1], alloc);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {kExpectedValue1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue2});
ModelWriter writer({interpreter_->subgraph(0), interpreter_->subgraph(3),
interpreter_->subgraph(4)});
writer.SetCustomInputOutput(0, {kInput1, kInput2},
{kOutput2}, {while2_index});
const std::string test_file = CreateFilePath("test_while.tflite");
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[0], {1});
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[1], {1});
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[0]), {1});
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[1]), {1});
ASSERT_EQ(new_interpreter->Invoke(), kTfLiteOk);
ASSERT_EQ(new_interpreter->outputs().size(), 1);
TfLiteTensor* output |
862 | cpp | tensorflow/tensorflow | delegate_provider | tensorflow/lite/tools/delegates/delegate_provider.cc | tensorflow/lite/tools/delegates/delegate_provider_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_DELEGATES_DELEGATE_PROVIDER_H_
#define TENSORFLOW_LITE_TOOLS_DELEGATES_DELEGATE_PROVIDER_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
using TfLiteDelegatePtr =
std::unique_ptr<TfLiteOpaqueDelegate, void (*)(TfLiteOpaqueDelegate*)>;
class DelegateProvider {
public:
virtual ~DelegateProvider() {}
virtual std::vector<Flag> CreateFlags(ToolParams* params) const = 0;
virtual void LogParams(const ToolParams& params, bool verbose) const = 0;
virtual TfLiteDelegatePtr CreateTfLiteDelegate(
const ToolParams& params) const = 0;
virtual std::pair<TfLiteDelegatePtr, int> CreateRankedTfLiteDelegate(
const ToolParams& params) const = 0;
virtual std::string GetName() const = 0;
const ToolParams& DefaultParams() const { return default_params_; }
protected:
template <typename T>
Flag CreateFlag(const char* name, ToolParams* params,
const std::string& usage) const {
return Flag(
name,
[params, name](const T& val, int argv_position) {
params->Set<T>(name, val, argv_position);
},
default_params_.Get<T>(name), usage, Flag::kOptional);
}
ToolParams default_params_;
};
using DelegateProviderPtr = std::unique_ptr<DelegateProvider>;
using DelegateProviderList = std::vector<DelegateProviderPtr>;
class DelegateProviderRegistrar {
public:
template <typename T>
struct Register {
Register() {
auto* const instance = DelegateProviderRegistrar::GetSingleton();
instance->providers_.emplace_back(DelegateProviderPtr(new T()));
}
};
static const DelegateProviderList& GetProviders() {
return GetSingleton()->providers_;
}
private:
DelegateProviderRegistrar() {}
DelegateProviderRegistrar(const DelegateProviderRegistrar&) = delete;
DelegateProviderRegistrar& operator=(const DelegateProviderRegistrar&) =
delete;
static DelegateProviderRegistrar* GetSingleton() {
static auto* instance = new DelegateProviderRegistrar();
return instance;
}
DelegateProviderList providers_;
};
#define REGISTER_DELEGATE_PROVIDER_VNAME(T) gDelegateProvider_##T##_
#define REGISTER_DELEGATE_PROVIDER(T) \
static tflite::tools::DelegateProviderRegistrar::Register<T> \
REGISTER_DELEGATE_PROVIDER_VNAME(T);
TfLiteDelegatePtr CreateNullDelegate();
inline const DelegateProviderList& GetRegisteredDelegateProviders() {
return DelegateProviderRegistrar::GetProviders();
}
class ProvidedDelegateList {
public:
struct ProvidedDelegate {
ProvidedDelegate()
: provider(nullptr), delegate(CreateNullDelegate()), rank(0) {}
const DelegateProvider* provider;
TfLiteDelegatePtr delegate;
int rank;
};
ProvidedDelegateList() : ProvidedDelegateList( nullptr) {}
explicit ProvidedDelegateList(ToolParams* params)
: providers_(GetRegisteredDelegateProviders()), params_(params) {}
const DelegateProviderList& providers() const { return providers_; }
void AddAllDelegateParams() const;
void AppendCmdlineFlags(std::vector<Flag>& flags) const;
void RemoveCmdlineFlag(std::vector<Flag>& flags,
const std::string& name) const;
std::vector<ProvidedDelegate> CreateAllRankedDelegates(
const ToolParams& params) const;
std::vector<ProvidedDelegate> CreateAllRankedDelegates() const {
return CreateAllRankedDelegates(*params_);
}
private:
const DelegateProviderList& providers_;
ToolParams* const params_;
};
}
}
#endif
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace tflite {
namespace tools {
TfLiteDelegatePtr CreateNullDelegate() {
return TfLiteDelegatePtr(nullptr, [](TfLiteOpaqueDelegate*) {});
}
void ProvidedDelegateList::AddAllDelegateParams() const {
for (const auto& provider : providers_) {
params_->Merge(provider->DefaultParams());
}
}
void ProvidedDelegateList::AppendCmdlineFlags(std::vector<Flag>& flags) const {
for (const auto& provider : providers_) {
auto delegate_flags = provider->CreateFlags(params_);
flags.insert(flags.end(), delegate_flags.begin(), delegate_flags.end());
}
}
void ProvidedDelegateList::RemoveCmdlineFlag(std::vector<Flag>& flags,
const std::string& name) const {
decltype(flags.begin()) it;
for (it = flags.begin(); it < flags.end();) {
if (it->GetFlagName() == name) {
it = flags.erase(it);
} else {
++it;
}
}
}
std::vector<ProvidedDelegateList::ProvidedDelegate>
ProvidedDelegateList::CreateAllRankedDelegates(const ToolParams& params) const {
std::vector<ProvidedDelegateList::ProvidedDelegate> delegates;
for (const auto& provider : providers_) {
auto ptr_rank = provider->CreateRankedTfLiteDelegate(params);
if (ptr_rank.first == nullptr) continue;
static bool already_logged = false;
if (!already_logged) {
TFLITE_LOG(INFO) << provider->GetName() << " delegate created.";
#ifndef NDEBUG
provider->LogParams(params, false);
#endif
already_logged = true;
}
ProvidedDelegateList::ProvidedDelegate info;
info.provider = provider.get();
info.delegate = std::move(ptr_rank.first);
info.rank = ptr_rank.second;
delegates.emplace_back(std::move(info));
}
std::sort(delegates.begin(), delegates.end(),
[](const ProvidedDelegateList::ProvidedDelegate& a,
const ProvidedDelegateList::ProvidedDelegate& b) {
return a.rank < b.rank;
});
return delegates;
}
}
} | #include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/test_util.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
namespace {
TEST(ProvidedDelegateListTest, AddAllDelegateParams) {
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
EXPECT_TRUE(params.HasParam("use_xnnpack"));
#if !TFLITE_WITH_STABLE_ABI
EXPECT_TRUE(params.HasParam("use_nnapi"));
#endif
}
TEST(ProvidedDelegateListTest, AppendCmdlineFlags) {
std::vector<Flag> flags;
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
providers.AppendCmdlineFlags(flags);
EXPECT_FALSE(flags.empty());
}
TEST(KernelTestDelegateProvidersTest, CreateAllRankedDelegates) {
#if !defined(__Fuchsia__) && !defined(__s390x__) && \
!defined(TFLITE_WITHOUT_XNNPACK)
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
#if TFLITE_WITH_STABLE_ABI
ASSERT_EQ(TfLiteInitializeShimsForTest(), 0);
params.Set<bool>("use_xnnpack", true, 1);
auto delegates = providers.CreateAllRankedDelegates();
EXPECT_EQ(1, delegates.size());
EXPECT_EQ("XNNPACK", delegates.front().provider->GetName());
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ(1, delegates.front().rank);
#else
params.Set<bool>("use_xnnpack", true, 2);
params.Set<bool>("use_dummy_delegate", true, 1);
auto delegates = providers.CreateAllRankedDelegates();
EXPECT_EQ(2, delegates.size());
EXPECT_EQ("DummyDelegate", delegates.front().provider->GetName());
EXPECT_EQ(1, delegates.front().rank);
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ("XNNPACK", delegates.back().provider->GetName());
EXPECT_NE(nullptr, delegates.back().delegate.get());
EXPECT_EQ(2, delegates.back().rank);
#endif
#endif
}
}
}
} |
863 | cpp | tensorflow/tensorflow | nnapi_compatibility_lib | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib.cc | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_DELEGATES_COMPATIBILITY_NNAPI_NNAPI_COMPATIBILITY_LIB_H_
#define TENSORFLOW_LITE_TOOLS_DELEGATES_COMPATIBILITY_NNAPI_NNAPI_COMPATIBILITY_LIB_H_
#include <map>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
namespace tflite {
namespace tools {
TfLiteStatus CheckCompatibility(
TfLiteContext* context, int32_t runtime_feature_level,
std::vector<int>* supported_nodes,
std::map<int, std::vector<tflite::delegate::nnapi::NNAPIValidationFailure>>*
failures_by_node);
class CompatibilityCheckerDelegate : public TfLiteDelegate {
public:
explicit CompatibilityCheckerDelegate(int32_t runtime_feature_level)
: TfLiteDelegate(TfLiteDelegateCreate()),
runtime_feature_level_(runtime_feature_level),
supported_nodes_(),
failures_by_node_() {
Prepare = DoPrepare;
CopyFromBufferHandle = DoCopyFromBufferHandle;
CopyToBufferHandle = DoCopyToBufferHandle;
FreeBufferHandle = DoFreeBufferHandle;
data_ = &delegate_data_;
}
std::vector<int> GetSupportedNodes() { return supported_nodes_; }
std::map<int, std::vector<tflite::delegate::nnapi::NNAPIValidationFailure>>
GetFailuresByNode() {
return failures_by_node_;
}
protected:
static TfLiteStatus DoPrepare(TfLiteContext* context,
TfLiteDelegate* delegate) {
auto self = reinterpret_cast<CompatibilityCheckerDelegate*>(delegate);
TF_LITE_ENSURE_OK(context,
CheckCompatibility(context, self->runtime_feature_level_,
&(self->supported_nodes_),
&(self->failures_by_node_)));
return kTfLiteOk;
}
static TfLiteStatus DoCopyFromBufferHandle(TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor) {
return kTfLiteError;
}
static TfLiteStatus DoCopyToBufferHandle(TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor) {
return kTfLiteError;
}
static void DoFreeBufferHandle(TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle* handle) {}
private:
int delegate_data_;
int runtime_feature_level_;
std::vector<int> supported_nodes_;
std::map<int, std::vector<tflite::delegate::nnapi::NNAPIValidationFailure>>
failures_by_node_;
};
}
}
#endif
#include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib.h"
#include <map>
#include <utility>
#include <vector>
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace tools {
using ::tflite::delegate::nnapi::NNAPIValidationFailure;
TfLiteStatus CheckCompatibility(
TfLiteContext* context, int32_t runtime_feature_level,
std::vector<int>* supported_nodes,
std::map<int, std::vector<NNAPIValidationFailure>>* failures_by_node) {
if (!context) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_ERROR, "Context is nullptr.");
return kTfLiteError;
}
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
for (int node_index : TfLiteIntArrayView(execution_plan)) {
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Node index: %d", node_index);
TfLiteNode* node;
TfLiteRegistration* registration;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, node_index, &node, ®istration));
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures;
if (NNAPIDelegateKernel::Validate(
context, registration, runtime_feature_level, node,
true,
nullptr, &map_failures)) {
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Built-in Code: %d",
registration->builtin_code);
if (supported_nodes) {
supported_nodes->push_back(node_index);
}
} else {
if (failures_by_node) {
(*failures_by_node)[node_index] = std::move(map_failures);
}
}
}
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib.h"
#include <map>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace tools {
namespace {
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type,
CompatibilityCheckerDelegate* checker_delegate) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
SetDelegate(checker_delegate);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
}
TEST(NnapiDelegateCompabilityTest, InvalidInput) {
EXPECT_EQ(CheckCompatibility(nullptr, 0, nullptr, nullptr), kTfLiteError);
}
TEST(NnapiDelegateCompabilityTest, CompatibleModel) {
CompatibilityCheckerDelegate checker_delegate(
tflite::delegate::nnapi::kMinSdkVersionForNNAPI13);
AddOpModel add_op_model(
{TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE, &checker_delegate);
EXPECT_EQ(checker_delegate.GetSupportedNodes().size(), 1);
EXPECT_EQ(checker_delegate.GetFailuresByNode().size(), 0);
}
TEST(NnapiDelegateCompabilityTest, IncompatibleModel) {
CompatibilityCheckerDelegate checker_delegate(
tflite::delegate::nnapi::kMinSdkVersionForNNAPI13);
AddOpModel add_op_model(
{TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}}, ActivationFunctionType_RELU_N1_TO_1,
&checker_delegate);
EXPECT_EQ(checker_delegate.GetSupportedNodes().size(), 0);
EXPECT_EQ(checker_delegate.GetFailuresByNode().size(), 1);
}
}
} |
864 | cpp | tensorflow/tensorflow | nnapi_delegate_compatibility_checker | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.cc | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_DELEGATES_COMPATIBILITY_NNAPI_NNAPI_DELEGATE_COMPATIBILITY_CHECKER_H_
#define TENSORFLOW_LITE_TOOLS_DELEGATES_COMPATIBILITY_NNAPI_NNAPI_DELEGATE_COMPATIBILITY_CHECKER_H_
#include <string>
#include <unordered_map>
#include "absl/status/status.h"
#include "tensorflow/lite/tools/delegates/compatibility/common/delegate_compatibility_checker_base.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace tools {
inline constexpr int kDefaultRuntimeFeatureLevel = 8;
class NnapiDelegateCompatibilityChecker
: public DelegateCompatibilityCheckerBase {
public:
NnapiDelegateCompatibilityChecker() {
runtime_feature_level_ = kDefaultRuntimeFeatureLevel;
}
absl::Status checkModelCompatibilityOnline(
tflite::FlatBufferModel* model_buffer,
tflite::proto::CompatibilityResult* result) override;
static absl::Status checkOpCompatibilityOnline(
TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration,
std::unordered_map<std::string, std::string> dcc_configs,
tflite::proto::OpCompatibilityResult* op_result);
std::unordered_map<std::string, std::string> getDccConfigurations() override;
absl::Status setDccConfigurations(
const std::unordered_map<std::string, std::string>& dcc_configs) override;
private:
absl::Status checkOpSigCompatibility(
const OpSignature& op_sig,
tflite::proto::OpCompatibilityResult* op_result) override;
int runtime_feature_level_;
};
}
}
#endif
#include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.h"
#include <cctype>
#include <cstdlib>
#include <functional>
#include <limits>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/tools/delegates/compatibility/common/delegate_compatibility_checker_util.h"
#include "tensorflow/lite/tools/delegates/compatibility/common/online_helper_delegate.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace tools {
namespace {
void getCanonicalFeatureLevel(int runtime_feature_level,
int& canonical_feature_level) {
switch (runtime_feature_level) {
case 1:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_1;
break;
case 2:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_2;
break;
case 3:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_3;
break;
case 4:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_4;
break;
case 5:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_5;
break;
case 6:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_6;
break;
case 7:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_7;
break;
case 8:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_8;
break;
default:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_8;
}
}
absl::Status IsValidFeatureLevelInt(const std::string& s) {
if (s.size() == 1 && std::isdigit(s[0]) && s[0] > '0' && s[0] < '9') {
return absl::OkStatus();
}
return absl::InvalidArgumentError("Invalid runtime feature level.");
}
absl::Status extractRuntimeFeatureLevel(
const std::unordered_map<std::string, std::string>& dcc_configs,
int& runtime_feature_level) {
std::string str_runtime_feature_level;
if (dcc_configs.find("nnapi-runtime_feature_level") == dcc_configs.end()) {
for (const auto& dcc_config : dcc_configs) {
if (absl::StrContains(dcc_config.first, "nnapi")) {
return absl::InvalidArgumentError(
"The correct flag name is 'nnapi-runtime_feature_level");
}
}
str_runtime_feature_level =
std::to_string(tools::kDefaultRuntimeFeatureLevel);
} else {
str_runtime_feature_level = dcc_configs.at("nnapi-runtime_feature_level");
RETURN_IF_ERROR(IsValidFeatureLevelInt(str_runtime_feature_level));
}
runtime_feature_level = std::stoi(str_runtime_feature_level);
return absl::OkStatus();
}
absl::Status convertToCompatibilityFailureType(
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures,
proto::OpCompatibilityResult* op_result) {
for (const auto& status : map_failures) {
auto compatibility_failure = op_result->add_compatibility_failures();
compatibility_failure->set_description(status.message);
switch (status.type) {
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOperator:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERATOR);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedAndroidVersion:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_VERSION);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedOperatorVersion:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERATOR_VERSION);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedInputType:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_INPUT_TYPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kNotRestrictedScaleCompliant:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::
DCC_NOT_RESTRICTED_SCALE_COMPLIANT);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOutputType:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OUTPUT_TYPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOperandSize:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERAND_SIZE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedOperandValue:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERAND_VALUE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedHybridOperator:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_HYBRID_OPERATOR);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedQuantizationType:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_QUANTIZATION_TYPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::kMissingRequiredOperand:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_MISSING_REQUIRED_OPERAND);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOperandRank:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERAND_RANK);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kInputTensorShouldHaveConstantShape:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::
DCC_INPUT_TENSOR_SHOULD_HAVE_CONSTANT_SHAPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedOperatorVariant:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERATOR_VARIANT);
break;
case delegate::nnapi::NNAPIValidationFailureType::kNoActivationExpected:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_NO_ACTIVATION_EXPECTED);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedQuantizationParameters:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::
DCC_UNSUPPORTED_QUANTIZATION_PARAMETERS);
break;
default:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INTERNAL_ERROR);
compatibility_failure->set_description(
"Unknown validation failure type.");
}
}
return absl::OkStatus();
}
}
absl::Status
tools::NnapiDelegateCompatibilityChecker::checkOpCompatibilityOnline(
TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration,
std::unordered_map<std::string, std::string> dcc_configs,
tflite::proto::OpCompatibilityResult* op_result) {
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures;
int runtime_feature_level;
RETURN_IF_ERROR(
extractRuntimeFeatureLevel(dcc_configs, runtime_feature_level));
getCanonicalFeatureLevel(runtime_feature_level, runtime_feature_level);
if (NNAPIDelegateKernel::Validate(
context, registration, runtime_feature_level, node,
true,
nullptr, &map_failures)) {
op_result->set_is_supported(true);
} else {
RETURN_IF_ERROR(convertToCompatibilityFailureType(map_failures, op_result));
op_result->set_is_supported(false);
}
return absl::OkStatus();
}
std::unordered_map<std::string, std::string>
tools::NnapiDelegateCompatibilityChecker::getDccConfigurations() {
std::unordered_map<std::string, std::string> dcc_configs;
dcc_configs["nnapi-runtime_feature_level"] =
std::to_string(runtime_feature_level_);
return dcc_configs;
}
absl::Status tools::NnapiDelegateCompatibilityChecker::setDccConfigurations(
const std::unordered_map<std::string, std::string>& dcc_configs) {
RETURN_IF_ERROR(
extractRuntimeFeatureLevel(dcc_configs, runtime_feature_level_));
return absl::OkStatus();
}
absl::Status
tools::NnapiDelegateCompatibilityChecker::checkModelCompatibilityOnline(
tflite::FlatBufferModel* model_buffer,
tflite::proto::CompatibilityResult* result) {
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder interpreter_builder(*model_buffer, resolver);
auto dcc_configs = getDccConfigurations();
std::function<absl::Status(TfLiteContext*, const TfLiteNode*,
const TfLiteRegistration*,
std::unordered_map<std::string, std::string>,
proto::OpCompatibilityResult*)>
check_op_func_ptr = &checkOpCompatibilityOnline;
OnlineHelperDelegate delegate(dcc_configs, check_op_func_ptr, result);
interpreter_builder.AddDelegate(&delegate);
interpreter_builder(&interpreter);
return absl::OkStatus();
}
absl::Status tools::NnapiDelegateCompatibilityChecker::checkOpSigCompatibility(
const OpSignature& op_sig,
tflite::proto::OpCompatibilityResult* op_result) {
return absl::UnimplementedError(
"Offline mode is not yet supported on NNAPI delegate compatibility "
"checker.");
}
}
} | #include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.h"
#include <cstdint>
#include <limits>
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace tools {
#ifndef EXPECT_OK
#define EXPECT_OK(x) EXPECT_TRUE(x.ok());
#endif
namespace {
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
class NnapiDccTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override { compatibility_result_.Clear(); }
NnapiDelegateCompatibilityChecker nnapi_dcc_;
proto::CompatibilityResult compatibility_result_;
};
}
TEST_F(NnapiDccTest, ValidRuntimeFeatureLevel) {
std::unordered_map dcc_configs = nnapi_dcc_.getDccConfigurations();
EXPECT_EQ(dcc_configs["nnapi-runtime_feature_level"], "8");
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
dcc_configs["nnapi-runtime_feature_level"] = "1";
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
dcc_configs["nnapi-runtime_feature_level"] = "8";
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
dcc_configs.clear();
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
EXPECT_EQ(nnapi_dcc_.getDccConfigurations()["nnapi-runtime_feature_level"],
"8");
}
TEST_F(NnapiDccTest, InvalidRuntimeFeatureLevel) {
std::unordered_map dcc_configs = nnapi_dcc_.getDccConfigurations();
dcc_configs["nnapi-runtime_feature_level"] = "03";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "a";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "28123497123489123841212344516";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "30.0";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "-30";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "9";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs.clear();
dcc_configs["nnapi-runtim_feature_level"] = "8";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(NnapiDccTest, CompatibleModelOnlineMode) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
auto model = fb_model->GetModel();
EXPECT_EQ(model->subgraphs()->size(), 1);
EXPECT_EQ(model->subgraphs()->Get(0)->operators()->size(), 2);
EXPECT_OK(nnapi_dcc_.checkModelCompatibilityOnline(fb_model.get(),
&compatibility_result_));
for (auto op_compatibility_result :
compatibility_result_.compatibility_results()) {
EXPECT_TRUE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result_.compatibility_results_size(), 2);
}
TEST_F(NnapiDccTest, IncompatibleModelOperation) {
AddOpModel add_op_model(
{TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}}, ActivationFunctionType_RELU_N1_TO_1);
auto fb_model = tflite::FlatBufferModel::BuildFromModel(
tflite::GetModel(add_op_model.GetModelBuffer()));
ASSERT_TRUE(fb_model);
EXPECT_OK(nnapi_dcc_.checkModelCompatibilityOnline(fb_model.get(),
&compatibility_result_));
for (auto op_compatibility_result :
compatibility_result_.compatibility_results()) {
EXPECT_FALSE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result_.compatibility_results_size(), 1);
}
TEST_F(NnapiDccTest, IncompatibleModelFeatureLevel) {
AddOpModel add_op_model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}}, ActivationFunctionType_NONE);
auto fb_model = tflite::FlatBufferModel::BuildFromModel(
tflite::GetModel(add_op_model.GetModelBuffer()));
ASSERT_TRUE(fb_model);
auto nnapi_configs = nnapi_dcc_.getDccConfigurations();
nnapi_configs["nnapi-runtime_feature_level"] = "2";
EXPECT_OK(nnapi_dcc_.setDccConfigurations(nnapi_configs));
EXPECT_OK(nnapi_dcc_.checkModelCompatibilityOnline(fb_model.get(),
&compatibility_result_));
for (auto op_compatibility_result :
compatibility_result_.compatibility_results()) {
EXPECT_FALSE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result_.compatibility_results_size(), 1);
}
}
} |
865 | cpp | tensorflow/tensorflow | gpu_delegate_compatibility_checker | tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.cc | tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_DELEGATES_COMPATIBILITY_GPU_GPU_DELEGATE_COMPATIBILITY_CHECKER_H_
#define TENSORFLOW_LITE_TOOLS_DELEGATES_COMPATIBILITY_GPU_GPU_DELEGATE_COMPATIBILITY_CHECKER_H_
#include <string>
#include <unordered_map>
#include "absl/status/status.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/tools/delegates/compatibility/common/delegate_compatibility_checker_base.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace tools {
class GpuDelegateCompatibilityChecker
: public DelegateCompatibilityCheckerBase {
public:
GpuDelegateCompatibilityChecker() {}
absl::Status checkModelCompatibilityOnline(
tflite::FlatBufferModel* model_buffer,
tflite::proto::CompatibilityResult* result) override;
std::unordered_map<std::string, std::string> getDccConfigurations() override;
absl::Status setDccConfigurations(
const std::unordered_map<std::string, std::string>& dcc_configs) override;
private:
absl::Status checkOpSigCompatibility(
const OpSignature& op_sig,
tflite::proto::OpCompatibilityResult* op_result) override;
};
}
}
#endif
#include "tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.h"
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "absl/status/status.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
#include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace tools {
namespace {
void convertToValidationFailureType(absl::Status status,
proto::OpCompatibilityResult* op_result) {
auto compatibility_failure = op_result->add_compatibility_failures();
compatibility_failure->set_description(std::string(status.message()));
switch (status.code()) {
case absl::StatusCode::kInvalidArgument:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INVALID_ARGUMENT);
break;
case absl::StatusCode::kUnimplemented:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNIMPLEMENTED_ERROR);
break;
case absl::StatusCode::kInternal:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INTERNAL_ERROR);
break;
case absl::StatusCode::kOutOfRange:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_OUT_OF_RANGE);
break;
default:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INTERNAL_ERROR);
compatibility_failure->set_description(
"Unknown validation failure type.");
}
}
}
std::unordered_map<std::string, std::string>
tools::GpuDelegateCompatibilityChecker::getDccConfigurations() {
return {};
}
absl::Status tools::GpuDelegateCompatibilityChecker::setDccConfigurations(
const std::unordered_map<std::string, std::string>& dcc_configs) {
return absl::OkStatus();
}
absl::Status
tools::GpuDelegateCompatibilityChecker::checkModelCompatibilityOnline(
tflite::FlatBufferModel* model_buffer,
tflite::proto::CompatibilityResult* result) {
return absl::UnimplementedError(
"Online mode is not supported on GPU delegate compatibility checker.");
}
absl::Status tools::GpuDelegateCompatibilityChecker::checkOpSigCompatibility(
const OpSignature& op_sig,
tflite::proto::OpCompatibilityResult* op_result) {
auto status = CheckGpuDelegateCompatibility(op_sig);
if (!status.ok()) {
convertToValidationFailureType(status, op_result);
op_result->set_is_supported(false);
} else {
op_result->set_is_supported(true);
}
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
namespace tflite {
namespace tools {
#ifndef EXPECT_OK
#define EXPECT_OK(x) EXPECT_TRUE(x.ok());
#endif
namespace {
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
}
TEST(GpuDelegateCompatibilityCheckerTest, CheckOnlineMode) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
proto::CompatibilityResult compatibility_result;
GpuDelegateCompatibilityChecker gpu_dcc;
EXPECT_EQ(
gpu_dcc
.checkModelCompatibilityOnline(fb_model.get(), &compatibility_result)
.code(),
absl::StatusCode::kUnimplemented);
}
TEST(GpuDelegateCompatibilityCheckerTest, CompatibleModelOfflineMode) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
proto::CompatibilityResult compatibility_result;
GpuDelegateCompatibilityChecker gpu_dcc;
EXPECT_OK(gpu_dcc.checkModelCompatibilityOffline(fb_model.get(),
&compatibility_result));
for (auto op_compatibility_result :
compatibility_result.compatibility_results()) {
EXPECT_TRUE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result.compatibility_results_size(), 2);
}
TEST(GpuDelegateCompatibilityCheckerTest, IncompatibleModelOfflineMode) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/conv3d_huge_im2col.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
proto::CompatibilityResult compatibility_result;
GpuDelegateCompatibilityChecker gpu_dcc;
EXPECT_OK(gpu_dcc.checkModelCompatibilityOffline(fb_model.get(),
&compatibility_result));
for (auto op_compatibility_result :
compatibility_result.compatibility_results()) {
EXPECT_FALSE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result.compatibility_results_size(), 1);
}
}
} |
866 | cpp | tensorflow/tensorflow | flatbuffer_to_proto | tensorflow/lite/acceleration/configuration/flatbuffer_to_proto.cc | tensorflow/lite/acceleration/configuration/flatbuffer_to_proto_test.cc | #ifndef TENSORFLOW_LITE_ACCELERATION_CONFIGURATION_FLATBUFFER_TO_PROTO_H_
#define TENSORFLOW_LITE_ACCELERATION_CONFIGURATION_FLATBUFFER_TO_PROTO_H_
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace tflite {
proto::ComputeSettings ConvertFromFlatbuffer(
const ComputeSettings& settings, bool skip_mini_benchmark_settings = false);
proto::ComputeSettings ConvertFromFlatbuffer(
const ComputeSettingsT& settings,
bool skip_mini_benchmark_settings = false);
proto::MiniBenchmarkEvent ConvertFromFlatbuffer(
const MiniBenchmarkEvent& event);
proto::MiniBenchmarkEvent ConvertFromFlatbuffer(
const MiniBenchmarkEventT& event);
}
#endif
#include "tensorflow/lite/acceleration/configuration/flatbuffer_to_proto.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace {
proto::ExecutionPreference ConvertExecutionPreference(
ExecutionPreference preference) {
switch (preference) {
case ExecutionPreference_ANY:
return proto::ExecutionPreference::ANY;
case ExecutionPreference_LOW_LATENCY:
return proto::ExecutionPreference::LOW_LATENCY;
case ExecutionPreference_LOW_POWER:
return proto::ExecutionPreference::LOW_POWER;
case ExecutionPreference_FORCE_CPU:
return proto::ExecutionPreference::FORCE_CPU;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for ExecutionPreference: %d", preference);
return proto::ExecutionPreference::ANY;
}
proto::Delegate ConvertDelegate(Delegate delegate) {
switch (delegate) {
case Delegate_NONE:
return proto::Delegate::NONE;
case Delegate_NNAPI:
return proto::Delegate::NNAPI;
case Delegate_GPU:
return proto::Delegate::GPU;
case Delegate_HEXAGON:
return proto::Delegate::HEXAGON;
case Delegate_XNNPACK:
return proto::Delegate::XNNPACK;
case Delegate_EDGETPU:
return proto::Delegate::EDGETPU;
case Delegate_EDGETPU_CORAL:
return proto::Delegate::EDGETPU_CORAL;
case Delegate_CORE_ML:
return proto::Delegate::CORE_ML;
case Delegate_ARMNN:
return proto::Delegate::ARMNN;
case Delegate_MTK_NEURON:
return proto::Delegate::MTK_NEURON;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for Delegate: %d",
delegate);
return proto::Delegate::NONE;
}
proto::NNAPIExecutionPreference ConvertNNAPIExecutionPreference(
NNAPIExecutionPreference preference) {
switch (preference) {
case NNAPIExecutionPreference_UNDEFINED:
return proto::NNAPIExecutionPreference::UNDEFINED;
case NNAPIExecutionPreference_NNAPI_LOW_POWER:
return proto::NNAPIExecutionPreference::NNAPI_LOW_POWER;
case NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER:
return proto::NNAPIExecutionPreference::NNAPI_FAST_SINGLE_ANSWER;
case NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED:
return proto::NNAPIExecutionPreference::NNAPI_SUSTAINED_SPEED;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPreference: %d",
preference);
return proto::NNAPIExecutionPreference::UNDEFINED;
}
proto::NNAPIExecutionPriority ConvertNNAPIExecutionPriority(
NNAPIExecutionPriority priority) {
switch (priority) {
case NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED:
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED;
case NNAPIExecutionPriority_NNAPI_PRIORITY_LOW:
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_LOW;
case NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM:
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_MEDIUM;
case NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH:
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_HIGH;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPriority: %d", priority);
return proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED;
}
proto::GPUBackend ConvertGPUBackend(GPUBackend backend) {
switch (backend) {
case GPUBackend_UNSET:
return proto::GPUBackend::UNSET;
case GPUBackend_OPENCL:
return proto::GPUBackend::OPENCL;
case GPUBackend_OPENGL:
return proto::GPUBackend::OPENGL;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for GPUBackend: %d",
backend);
return proto::GPUBackend::UNSET;
}
proto::GPUInferenceUsage ConvertGPUInferenceUsage(
GPUInferenceUsage preference) {
switch (preference) {
case GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER:
return proto::GPUInferenceUsage::
GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
case GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED:
return proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for GPUInferenceUsage: %d", preference);
return proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
}
proto::GPUInferencePriority ConvertGPUInferencePriority(
GPUInferencePriority priority) {
switch (priority) {
case GPUInferencePriority_GPU_PRIORITY_AUTO:
return proto::GPUInferencePriority::GPU_PRIORITY_AUTO;
case GPUInferencePriority_GPU_PRIORITY_MAX_PRECISION:
return proto::GPUInferencePriority::GPU_PRIORITY_MAX_PRECISION;
case GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY:
return proto::GPUInferencePriority::GPU_PRIORITY_MIN_LATENCY;
case GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE:
return proto::GPUInferencePriority::GPU_PRIORITY_MIN_MEMORY_USAGE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for GPUInferencePriority: %d", priority);
return proto::GPUInferencePriority::GPU_PRIORITY_AUTO;
}
proto::EdgeTpuPowerState ConvertEdgeTpuPowerState(EdgeTpuPowerState state) {
switch (state) {
case EdgeTpuPowerState_UNDEFINED_POWERSTATE:
return proto::EdgeTpuPowerState::UNDEFINED_POWERSTATE;
case EdgeTpuPowerState_TPU_CORE_OFF:
return proto::EdgeTpuPowerState::TPU_CORE_OFF;
case EdgeTpuPowerState_READY:
return proto::EdgeTpuPowerState::READY;
case EdgeTpuPowerState_ACTIVE_MIN_POWER:
return proto::EdgeTpuPowerState::ACTIVE_MIN_POWER;
case EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER:
return proto::EdgeTpuPowerState::ACTIVE_VERY_LOW_POWER;
case EdgeTpuPowerState_ACTIVE_LOW_POWER:
return proto::EdgeTpuPowerState::ACTIVE_LOW_POWER;
case EdgeTpuPowerState_ACTIVE:
return proto::EdgeTpuPowerState::ACTIVE;
case EdgeTpuPowerState_OVER_DRIVE:
return proto::EdgeTpuPowerState::OVER_DRIVE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for EdgeTpuSettings::PowerState: %d",
state);
return proto::EdgeTpuPowerState::UNDEFINED_POWERSTATE;
}
proto::FallbackSettings ConvertFallbackSettings(
const FallbackSettings& settings) {
proto::FallbackSettings proto_settings;
proto_settings.set_allow_automatic_fallback_on_compilation_error(
settings.allow_automatic_fallback_on_compilation_error());
proto_settings.set_allow_automatic_fallback_on_execution_error(
settings.allow_automatic_fallback_on_execution_error());
return proto_settings;
}
proto::NNAPISettings ConvertNNAPISettings(const NNAPISettings& settings) {
proto::NNAPISettings proto_settings;
if (settings.accelerator_name() != nullptr) {
proto_settings.set_accelerator_name(settings.accelerator_name()->str());
}
if (settings.cache_directory() != nullptr) {
proto_settings.set_cache_directory(settings.cache_directory()->str());
}
if (settings.model_token() != nullptr) {
proto_settings.set_model_token(settings.model_token()->str());
}
proto_settings.set_execution_preference(
ConvertNNAPIExecutionPreference(settings.execution_preference()));
proto_settings.set_no_of_nnapi_instances_to_cache(
settings.no_of_nnapi_instances_to_cache());
if (settings.fallback_settings() != nullptr) {
*(proto_settings.mutable_fallback_settings()) =
ConvertFallbackSettings(*settings.fallback_settings());
}
proto_settings.set_allow_nnapi_cpu_on_android_10_plus(
settings.allow_nnapi_cpu_on_android_10_plus());
proto_settings.set_execution_priority(
ConvertNNAPIExecutionPriority(settings.execution_priority()));
proto_settings.set_allow_dynamic_dimensions(
settings.allow_dynamic_dimensions());
proto_settings.set_allow_fp16_precision_for_fp32(
settings.allow_fp16_precision_for_fp32());
proto_settings.set_use_burst_computation(settings.use_burst_computation());
proto_settings.set_support_library_handle(settings.support_library_handle());
return proto_settings;
}
proto::GPUSettings ConvertGPUSettings(const GPUSettings& settings) {
proto::GPUSettings proto_settings;
proto_settings.set_is_precision_loss_allowed(
settings.is_precision_loss_allowed());
proto_settings.set_enable_quantized_inference(
settings.enable_quantized_inference());
proto_settings.set_force_backend(ConvertGPUBackend(settings.force_backend()));
proto_settings.set_inference_priority1(
ConvertGPUInferencePriority(settings.inference_priority1()));
proto_settings.set_inference_priority2(
ConvertGPUInferencePriority(settings.inference_priority2()));
proto_settings.set_inference_priority3(
ConvertGPUInferencePriority(settings.inference_priority3()));
proto_settings.set_inference_preference(
ConvertGPUInferenceUsage(settings.inference_preference()));
if (settings.cache_directory() != nullptr) {
proto_settings.set_cache_directory(settings.cache_directory()->str());
}
if (settings.model_token() != nullptr) {
proto_settings.set_model_token(settings.model_token()->str());
}
return proto_settings;
}
proto::HexagonSettings ConvertHexagonSettings(const HexagonSettings& settings) {
proto::HexagonSettings proto_settings;
proto_settings.set_debug_level(settings.debug_level());
proto_settings.set_powersave_level(settings.powersave_level());
proto_settings.set_print_graph_profile(settings.print_graph_profile());
proto_settings.set_print_graph_debug(settings.print_graph_debug());
return proto_settings;
}
proto::XNNPackSettings ConvertXNNPackSettings(const XNNPackSettings& settings) {
proto::XNNPackSettings proto_settings;
proto_settings.set_num_threads(settings.num_threads());
proto_settings.set_flags(::tflite::proto::XNNPackFlags(settings.flags()));
return proto_settings;
}
proto::CoreMLSettings ConvertCoreMLSettings(const CoreMLSettings& settings) {
proto::CoreMLSettings proto_settings;
switch (settings.enabled_devices()) {
case CoreMLSettings_::EnabledDevices_DEVICES_ALL:
proto_settings.set_enabled_devices(proto::CoreMLSettings::DEVICES_ALL);
break;
case CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE:
proto_settings.set_enabled_devices(
proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE);
break;
default:
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Invalid devices enum: %d",
settings.enabled_devices());
}
proto_settings.set_coreml_version(settings.coreml_version());
proto_settings.set_max_delegated_partitions(
settings.max_delegated_partitions());
proto_settings.set_min_nodes_per_partition(
settings.min_nodes_per_partition());
return proto_settings;
}
proto::CPUSettings ConvertCPUSettings(const CPUSettings& settings) {
proto::CPUSettings proto_settings;
proto_settings.set_num_threads(settings.num_threads());
return proto_settings;
}
proto::EdgeTpuDeviceSpec ConvertEdgeTpuDeviceSpec(
const EdgeTpuDeviceSpec& device_spec) {
proto::EdgeTpuDeviceSpec proto_settings;
if (device_spec.device_paths() != nullptr) {
for (int i = 0; i < device_spec.device_paths()->size(); ++i) {
auto device_path = device_spec.device_paths()->Get(i);
proto_settings.add_device_paths(device_path->str());
}
}
proto_settings.set_platform_type(
static_cast<proto::EdgeTpuDeviceSpec::PlatformType>(
device_spec.platform_type()));
proto_settings.set_num_chips(device_spec.num_chips());
proto_settings.set_chip_family(device_spec.chip_family());
return proto_settings;
}
proto::EdgeTpuSettings ConvertEdgeTpuSettings(const EdgeTpuSettings& settings) {
proto::EdgeTpuSettings proto_settings;
proto_settings.set_inference_power_state(
ConvertEdgeTpuPowerState(settings.inference_power_state()));
proto_settings.set_inference_priority(settings.inference_priority());
if (settings.model_token() != nullptr) {
proto_settings.set_model_token(settings.model_token()->str());
}
if (settings.edgetpu_device_spec() != nullptr) {
*(proto_settings.mutable_edgetpu_device_spec()) =
ConvertEdgeTpuDeviceSpec(*settings.edgetpu_device_spec());
}
proto_settings.set_float_truncation_type(
static_cast<proto::EdgeTpuSettings::FloatTruncationType>(
settings.float_truncation_type()));
auto inactive_powre_configs = settings.inactive_power_configs();
if (inactive_powre_configs != nullptr) {
for (int i = 0; i < inactive_powre_configs->size(); ++i) {
auto config = inactive_powre_configs->Get(i);
auto proto_config = proto_settings.add_inactive_power_configs();
proto_config->set_inactive_power_state(
ConvertEdgeTpuPowerState(config->inactive_power_state()));
proto_config->set_inactive_timeout_us(config->inactive_timeout_us());
}
}
return proto_settings;
}
proto::StableDelegateLoaderSettings ConvertStableDelegateLoaderSettings(
const StableDelegateLoaderSettings& settings) {
proto::StableDelegateLoaderSettings proto_settings;
if (settings.delegate_path() != nullptr) {
proto_settings.set_delegate_path(settings.delegate_path()->str());
}
if (settings.delegate_name() != nullptr) {
proto_settings.set_delegate_name(settings.delegate_name()->str());
}
return proto_settings;
}
proto::CoralSettings ConvertCoralSettings(const CoralSettings& settings) {
proto::CoralSettings proto_settings;
if (settings.device() != nullptr) {
proto_settings.set_device(settings.device()->str());
}
proto_settings.set_performance(
static_cast<proto::CoralSettings::Performance>(settings.performance()));
proto_settings.set_usb_always_dfu(settings.usb_always_dfu());
proto_settings.set_usb_max_bulk_in_queue_length(
settings.usb_max_bulk_in_queue_length());
return proto_settings;
}
proto::GoogleEdgeTpuSettings::Priority ConvertGoogleEdgeTpuPriority(
GoogleEdgeTpuSettings_::Priority priority) {
switch (priority) {
case GoogleEdgeTpuSettings_::Priority_PRIORITY_UNDEFINED:
return proto::GoogleEdgeTpuSettings::PRIORITY_UNDEFINED;
case GoogleEdgeTpuSettings_::Priority_PRIORITY_LOW:
return proto::GoogleEdgeTpuSettings::PRIORITY_LOW;
case GoogleEdgeTpuSettings_::Priority_PRIORITY_MEDIUM:
return proto::GoogleEdgeTpuSettings::PRIORITY_MEDIUM;
case GoogleEdgeTpuSettings_::Priority_PRIORITY_HIGH:
return proto::GoogleEdgeTpuSettings::PRIORITY_HIGH;
}
}
proto::GoogleEdgeTpuSettings::TriState ConvertGoogleEdgeTpuTriState(
GoogleEdgeTpuSettings_::TriState tri_state) {
switch (tri_state) {
case GoogleEdgeTpuSettings_::TriState_TRISTATE_UNDEFINED:
return proto::GoogleEdgeTpuSettings::TRISTATE_UNDEFINED;
case GoogleEdgeTpuSettings_::TriState_TRISTATE_FALSE:
return proto::GoogleEdgeTpuSettings::TRISTATE_FALSE;
case GoogleEdgeTpuSettings_::TriState_TRISTATE_TRUE:
return proto::GoogleEdgeTpuSettings::TRISTATE_TRUE;
}
}
proto::GoogleEdgeTpuSettings ConvertGoogleEdgetpuSettings(
const GoogleEdgeTpuSettings& settings) {
proto::GoogleEdgeTpuSettings proto_settings;
proto_settings.set_log_verbosity(settings.log_verbosity());
proto_settings.set_enable_tracing(settings.enable_tracing());
proto_settings.set_priority(
ConvertGoogleEdgeTpuPriority(settings.priority()));
if (settings.extension_data()) {
proto_settings.set_extension_data(settings.extension_data()->data(),
settings.extension_data()->size());
}
if (settings.model_identifier()) {
proto_settings.set_model_identifier(settings.model_identifier()->str());
}
proto_settings.set_use_async_api(settings.use_async_api());
proto_settings.set_delegate_should_manage_cache_for_inputs(
settings.delegate_should_manage_cache_for_inputs());
proto_settings.set_delegate_should_manage_cache_for_outputs(
settings.delegate_should_manage_cache_for_outputs());
proto_settings.set_prefer_cache_coherency_for_inputs(
ConvertGoogleEdgeTpuTriState(
settings.prefer_cache_coherency_for_inputs()));
proto_settings.set_prefer_cache_coherency_for_outputs(
ConvertGoogleEdgeTpuTriState(
settings.prefer_cache_coherency_for_outputs()));
proto_settings.set_allow_fp16_precision_for_fp32(
settings.allow_fp16_precision_for_fp32());
return proto_settings;
}
proto::CompilationCachingSettings ConvertCompilationCachingSettings(
const CompilationCachingSettings& settings) {
proto::CompilationCachingSettings proto_settings;
if (settings.cache_dir() != nullptr) {
proto_settings.set_cache_dir(settings.cache_dir()->str());
}
if (settings.model_token() != nullptr) {
proto_settings.set_model_token(settings.model_token()->str());
}
return proto_settings;
}
proto::MtkNeuronSettings ConvertMtkNeuronSettings(
const MtkNeuronSettings& settings) {
proto::MtkNeuronSettings proto_settings;
proto_settings.set_execution_preference(
static_cast<proto::MtkNeuronSettings_ExecutionPreference>(
settings.execution_preference()));
proto_settings.set_execution_priority(
static_cast<proto::MtkNeuronSettings_ExecutionPriority>(
settings.execution_priority()));
auto optimization_hints = settings.optimization_hints();
if (optimization_hints != nullptr) {
for (auto hint : *optimization_hints) {
proto_settings.add_optimization_hints(
static_cast<proto::MtkNeuronSettings_OptimizationHint>(hint));
}
}
proto_settings.set_operation_check_mode(
static_cast<proto::MtkNeuronSettings_OperationCheckMode>(
settings.operation_check_mode()));
proto_settings.set_allow_fp16_precision_for_fp32(
settings.allow_fp16_precision_for_fp32());
proto_settings.set_use_ahwb(settings.use_ahwb());
proto_settings.set_use_cacheable_buffer(settings.use_cacheable_buffer());
auto compile_options = settings.compile_options();
if (compile_options != nullptr) {
for (auto option : *compile_options) {
proto_settings.add_compile_options(option->str());
}
}
auto accelerator_names = settings.accelerator_names();
if (accelerator_names != nullptr) {
for (auto name : *accelerator_names) {
proto_settings.add_accelerator_names(name->str());
}
}
if (settings.neuron_config_path()) {
proto_settings.set_neuron_config_path(settings.neuron_config_path()->str());
}
return proto_settings;
}
proto::TFLiteSettings ConvertTfliteSettings(const TFLiteSettings& settings) {
proto::TFLiteSettings proto_settings;
proto_settings.set_delegate(ConvertDelegate(settings.delegate()));
if (settings.nnapi_settings() != nullptr) {
*proto_settings.mutable_nnapi_settings() =
ConvertNNAPISettings(*settings.nnapi_settings());
}
if (settings.gpu_settings() != nullptr) {
*proto_settings.mutable_gpu_settings() =
ConvertGPUSettings(*settings.gpu_settings());
}
if (settings.hexagon_settings() != nullptr) {
*proto_settings.mutable_hexagon_settings() =
ConvertHexagonSettings(*settings.hexagon_settings());
}
if (settings.xnnpack_settings() != nullptr) {
*proto_settings.mutable_xnnpack_settings() =
ConvertXNNPackSettings(*settings.xnnpack_settings());
}
if (settings.coreml_settings() != nullptr) {
*proto_settings.mutable_coreml_settings() =
ConvertCoreMLSettings(*settings.coreml_settings());
}
if (settings.cpu_settings() != nullptr) {
*proto_settings.mutable_cpu_settings() =
ConvertCPUSettings(*settings.cpu_settings());
}
proto_settings.set_max_delegated_partitions(
settings.max_delegated_partitions());
if (settings.edgetpu_settings() != nullptr) {
*proto_settings.mutable_edgetpu_settings() =
ConvertEdgeTpuSettings(*settings.edgetpu_settings());
}
if (settings.coral_settings() != nullptr) {
*proto_settings.mutable_coral_settings() =
ConvertCoralSettings(*settings.coral_settings());
}
if (settings.fallback_settings() != nullptr) {
*proto_settings.mutable_fallback_settings() =
ConvertFallbackSettings(*settings.fallback_settings());
}
proto_settings.set_disable_default_delegates(
settings.disable_default_delegates());
if (settings.stable_delegate_loader_settings() != nullptr) {
*proto_settings.mutable_stable_delegate_loader_settings() =
ConvertStableDelegateLoaderSettings(
*settings.stable_delegate_loader_settings());
}
if (settings.google_edgetpu_settings() != nullptr) {
*proto_settings.mutable_google_edgetpu_settings() =
ConvertGoogleEdgetpuSettings(*settings.google_edgetpu_settings());
}
if (settings.compilation_caching_settings() != nullptr) {
*proto_settings.mutable_compilation_caching_settings() =
ConvertCompilationCachingSettings(
*settings.compilation_caching_settings());
}
if (settings.mtk_neuron_settings() != nullptr) {
*proto_settings.mutable_mtk_neuron_settings() =
ConvertMtkNeuronSettings(*settings.mtk_neuron_settings());
}
return proto_settings;
}
proto::ModelFile ConvertModelFile(const ModelFile& model_file) {
proto::ModelFile proto_settings;
if (model_file.filename() != nullptr) {
proto_settings.set_filename(model_file.filename()->str());
}
proto_settings.set_fd(model_file.fd());
proto_settings.set_offset(model_file.offset());
proto_settings.set_length(model_file.length());
return proto_settings;
}
proto::BenchmarkStoragePaths ConvertBenchmarkStoragePaths(
const BenchmarkStoragePaths& storage_paths) {
proto::BenchmarkStoragePaths proto_settings;
if (storage_paths.storage_file_path() != nullptr) {
proto_settings.set_storage_file_path(
storage_paths.storage_file_path()->str());
}
if (storage_paths.data_directory_path() != nullptr) {
proto_settings.set_data_directory_path(
storage_paths.data_directory_path()->str());
}
return proto_settings;
}
proto::MinibenchmarkSettings ConvertMinibenchmarkSettings(
const MinibenchmarkSettings& settings) {
proto::MinibenchmarkSettings proto_settings;
if (settings.settings_to_test() != nullptr &&
settings.settings_to_test()->size() > 0) {
for (int i = 0; i < settings.settings_to_test()->size(); ++i) {
auto tflite_setting = settings.settings_to_test()->Get(i);
auto proto_tflite_setting = proto_settings.add_settings_to_test();
*proto_tflite_setting = ConvertTfliteSettings(*tflite_setting);
}
}
if (settings.model_file() != nullptr) {
*(proto_settings.mutable_model_file()) =
ConvertModelFile(*settings.model_file());
}
if (settings.storage_paths() != nullptr) {
*(proto_settings.mutable_storage_paths()) =
ConvertBenchmarkStoragePaths(*settings.storage_paths());
}
return proto_settings;
}
proto::BenchmarkEventType ConvertBenchmarkEventType(BenchmarkEventType type) {
switch (type) {
case BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE:
return proto::BenchmarkEventType::UNDEFINED_BENCHMARK_EVENT_TYPE;
case BenchmarkEventType_START:
return proto::BenchmarkEventType::START;
case BenchmarkEventType_END:
return proto::BenchmarkEventType::END;
case BenchmarkEventType_ERROR:
return proto::BenchmarkEventType::ERROR;
case BenchmarkEventType_LOGGED:
return proto::BenchmarkEventType::LOGGED;
case BenchmarkEventType_RECOVERED_ERROR:
return proto::BenchmarkEventType::RECOVERED_ERROR;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for BenchmarkEventType: %d", type);
return proto::BenchmarkEventType::UNDEFINED_BENCHMARK_EVENT_TYPE;
}
proto::BenchmarkMetric ConvertBenchmarkMetric(const BenchmarkMetric& metric) {
proto::BenchmarkMetric proto_metric;
if (metric.name() != nullptr) {
proto_metric.set_name(metric.name()->str());
}
auto values = metric.values();
if (values != nullptr) {
for (int i = 0; i < values->size(); ++i) {
proto_metric.add_values(values->Get(i));
}
}
return proto_metric;
}
proto::BenchmarkResult ConvertBenchmarkResult(const BenchmarkResult& result) {
proto::BenchmarkResult proto_result;
auto initialization_time_us = result.initialization_time_us();
if (initialization_time_us != nullptr) {
for (int i = 0; i < initialization_time_us->size(); ++i) {
proto_result.add_initialization_time_us(initialization_time_us->Get(i));
}
}
auto inference_time_us = result.inference_time_us();
if (inference_time_us != nullptr) {
for (int i = 0; i < inference_time_us->size(); ++i) {
proto_result.add_inference_time_us(inference_time_us->Get(i));
}
}
proto_result.set_max_memory_kb(result.max_memory_kb());
proto_result.set_ok(result.ok());
auto metrics = result.metrics();
if (metrics != nullptr) {
for (int i = 0; i < metrics->size(); ++i) {
*proto_result.add_metrics() = ConvertBenchmarkMetric(*metrics->Get(i));
}
}
return proto_result;
}
proto::BenchmarkStage ConvertBenchmarkStage(BenchmarkStage stage) {
switch (stage) {
case BenchmarkStage_UNKNOWN:
return proto::BenchmarkStage::UNKNOWN;
case BenchmarkStage_INITIALIZATION:
return proto::BenchmarkStage::INITIALIZATION;
case BenchmarkStage_INFERENCE:
return proto::BenchmarkStage::INFERENCE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for BenchmarkStage: %d",
stage);
return proto::BenchmarkStage::UNKNOWN;
}
proto::ErrorCode ConvertBenchmarkErrorCode(const ErrorCode& code) {
proto::ErrorCode proto_code;
proto_code.set_source(ConvertDelegate(code.source()));
proto_code.set_tflite_error(code.tflite_error());
proto_code.set_underlying_api_error(code.underlying_api_error());
return proto_code;
}
proto::BenchmarkError ConvertBenchmarkError(const BenchmarkError& error) {
proto::BenchmarkError proto_error;
proto_error.set_stage(ConvertBenchmarkStage(error.stage()));
proto_error.set_exit_code(error.exit_code());
proto_error.set_signal(error.signal());
auto error_codes = error.error_code();
if (error_codes != nullptr) {
for (int i = 0; i < error_codes->size(); ++i) {
*proto_error.add_error_code() =
ConvertBenchmarkErrorCode(*error_codes->Get(i));
}
}
proto_error.set_mini_benchmark_error_code(error.mini_benchmark_error_code());
return proto_error;
}
proto::BenchmarkEvent ConvertBenchmarkEvent(const BenchmarkEvent& event) {
proto::BenchmarkEvent proto_event;
if (event.tflite_settings() != nullptr) {
*proto_event.mutable_tflite_settings() =
ConvertTfliteSettings(*event.tflite_settings());
}
proto_event.set_event_type(ConvertBenchmarkEventType(event.event_type()));
if (event.result() != nullptr) {
*proto_event.mutable_result() = ConvertBenchmarkResult(*event.result());
}
if (event.error() != nullptr) {
*proto_event.mutable_error() = ConvertBenchmarkError(*event.error());
}
proto_event.set_boottime_us(event.boottime_us());
proto_event.set_wallclock_us(event.wallclock_us());
return proto_event;
}
proto::BestAccelerationDecision ConvertBestAccelerationDecision(
const BestAccelerationDecision& decision) {
proto::BestAccelerationDecision proto_decision;
proto_decision.set_number_of_source_events(
decision.number_of_source_events());
if (decision.min_latency_event() != nullptr) {
*proto_decision.mutable_min_latency_event() =
ConvertBenchmarkEvent(*decision.min_latency_event());
}
proto_decision.set_min_inference_time_us(decision.min_inference_time_us());
return proto_decision;
}
proto::BenchmarkInitializationFailure ConvertBenchmarkInitializationFailure(
const BenchmarkInitializationFailure& init_failure) {
proto::BenchmarkInitializationFailure proto_init_failure;
proto_init_failure.set_initialization_status(
init_failure.initialization_status());
return proto_init_failure;
}
}
proto::ComputeSettings ConvertFromFlatbuffer(
const ComputeSettings& settings, bool skip_mini_benchmark_settings) {
proto::ComputeSettings proto_settings;
proto_settings.set_preference(
ConvertExecutionPreference(settings.preference()));
if (settings.tflite_settings() != nullptr) {
*(proto_settings.mutable_tflite_settings()) =
ConvertTfliteSettings(*settings.tflite_settings());
}
if (settings.model_namespace_for_statistics() != nullptr) {
proto_settings.set_model_namespace_for_statistics(
settings.model_namespace_for_statistics()->str());
}
if (settings.model_identifier_for_statistics() != nullptr) {
proto_settings.set_model_identifier_for_statistics(
settings.model_identifier_for_statistics()->str());
}
if (!skip_mini_benchmark_settings &&
settings.settings_to_test_locally() != nullptr) {
*(proto_settings.mutable_settings_to_test_locally()) =
ConvertMinibenchmarkSettings(*settings.settings_to_test_locally());
}
return proto_settings;
}
proto::ComputeSettings ConvertFromFlatbuffer(
const ComputeSettingsT& settings, bool skip_mini_benchmark_settings) {
flatbuffers::FlatBufferBuilder fbb;
fbb.Finish(ComputeSettings::Pack(fbb, &settings));
auto settings_fbb =
flatbuffers::GetRoot<ComputeSettings>(fbb.GetBufferPointer());
return ConvertFromFlatbuffer(*settings_fbb, skip_mini_benchmark_settings);
}
proto::MiniBenchmarkEvent ConvertFromFlatbuffer(
const MiniBenchmarkEvent& event) {
proto::MiniBenchmarkEvent proto_event;
proto_event.set_is_log_flushing_event(event.is_log_flushing_event());
if (event.best_acceleration_decision() != nullptr) {
*proto_event.mutable_best_acceleration_decision() =
ConvertBestAccelerationDecision(*event.best_acceleration_decision());
}
if (event.initialization_failure() != nullptr) {
*proto_event.mutable_initialization_failure() =
ConvertBenchmarkInitializationFailure(*event.initialization_failure());
}
if (event.benchmark_event() != nullptr) {
*proto_event.mutable_benchmark_event() =
ConvertBenchmarkEvent(*event.benchmark_event());
}
return proto_event;
}
proto::MiniBenchmarkEvent ConvertFromFlatbuffer(
const MiniBenchm | #include "tensorflow/lite/acceleration/configuration/flatbuffer_to_proto.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace tflite {
namespace acceleration {
namespace {
class ConversionTest : public ::testing::Test {
protected:
void CheckDelegateEnum(Delegate input, proto::Delegate output) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->delegate = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(output, compute.tflite_settings().delegate());
}
void CheckExecutionPreference(ExecutionPreference input,
proto::ExecutionPreference output) {
settings_.preference = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(output, compute.preference());
}
void CheckNNAPIExecutionPreference(NNAPIExecutionPreference input,
proto::NNAPIExecutionPreference output) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
settings_.tflite_settings->nnapi_settings->execution_preference = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(
output,
compute.tflite_settings().nnapi_settings().execution_preference());
}
void CheckNNAPIExecutionPriority(NNAPIExecutionPriority input,
proto::NNAPIExecutionPriority output) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
settings_.tflite_settings->nnapi_settings->execution_priority = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(output,
compute.tflite_settings().nnapi_settings().execution_priority());
}
void CheckGPUBackend(GPUBackend input, proto::GPUBackend output) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
settings_.tflite_settings->gpu_settings->force_backend = input;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(output, compute.tflite_settings().gpu_settings().force_backend());
}
ComputeSettingsT settings_;
MiniBenchmarkEventT event_;
};
TEST_F(ConversionTest, Delegate) {
CheckDelegateEnum(Delegate_NONE, proto::Delegate::NONE);
CheckDelegateEnum(Delegate_NNAPI, proto::Delegate::NNAPI);
CheckDelegateEnum(Delegate_GPU, proto::Delegate::GPU);
CheckDelegateEnum(Delegate_HEXAGON, proto::Delegate::HEXAGON);
CheckDelegateEnum(Delegate_EDGETPU, proto::Delegate::EDGETPU);
CheckDelegateEnum(Delegate_EDGETPU_CORAL, proto::Delegate::EDGETPU_CORAL);
CheckDelegateEnum(Delegate_XNNPACK, proto::Delegate::XNNPACK);
CheckDelegateEnum(Delegate_CORE_ML, proto::Delegate::CORE_ML);
}
TEST_F(ConversionTest, ExecutionPreference) {
CheckExecutionPreference(ExecutionPreference_ANY,
proto::ExecutionPreference::ANY);
CheckExecutionPreference(ExecutionPreference_LOW_LATENCY,
proto::ExecutionPreference::LOW_LATENCY);
CheckExecutionPreference(ExecutionPreference_LOW_POWER,
proto::ExecutionPreference::LOW_POWER);
CheckExecutionPreference(ExecutionPreference_FORCE_CPU,
proto::ExecutionPreference::FORCE_CPU);
}
TEST_F(ConversionTest, ModelIdentifier) {
settings_.model_identifier_for_statistics = "id";
settings_.model_namespace_for_statistics = "ns";
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.model_namespace_for_statistics(), "ns");
EXPECT_EQ(compute.model_identifier_for_statistics(), "id");
}
TEST_F(ConversionTest, NNAPISettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
NNAPISettingsT* input_settings =
settings_.tflite_settings->nnapi_settings.get();
input_settings->accelerator_name = "a";
input_settings->cache_directory = "d";
input_settings->model_token = "t";
input_settings->allow_fp16_precision_for_fp32 = true;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::NNAPISettings output_settings =
compute.tflite_settings().nnapi_settings();
EXPECT_EQ(output_settings.accelerator_name(), "a");
EXPECT_EQ(output_settings.cache_directory(), "d");
EXPECT_EQ(output_settings.model_token(), "t");
EXPECT_TRUE(output_settings.allow_fp16_precision_for_fp32());
EXPECT_FALSE(output_settings.allow_nnapi_cpu_on_android_10_plus());
EXPECT_FALSE(output_settings.fallback_settings()
.allow_automatic_fallback_on_compilation_error());
EXPECT_FALSE(output_settings.fallback_settings()
.allow_automatic_fallback_on_execution_error());
input_settings->fallback_settings = std::make_unique<FallbackSettingsT>();
input_settings->fallback_settings
->allow_automatic_fallback_on_compilation_error = true;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_TRUE(output_settings.fallback_settings()
.allow_automatic_fallback_on_compilation_error());
EXPECT_FALSE(output_settings.fallback_settings()
.allow_automatic_fallback_on_execution_error());
input_settings->fallback_settings
->allow_automatic_fallback_on_compilation_error = false;
input_settings->fallback_settings
->allow_automatic_fallback_on_execution_error = true;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_FALSE(output_settings.fallback_settings()
.allow_automatic_fallback_on_compilation_error());
EXPECT_TRUE(output_settings.fallback_settings()
.allow_automatic_fallback_on_execution_error());
input_settings->allow_fp16_precision_for_fp32 = false;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_FALSE(output_settings.allow_fp16_precision_for_fp32());
}
TEST_F(ConversionTest, NNAPIAllowDynamicDimensions) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
NNAPISettingsT* input_settings =
settings_.tflite_settings->nnapi_settings.get();
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::NNAPISettings output_settings =
compute.tflite_settings().nnapi_settings();
EXPECT_FALSE(output_settings.allow_dynamic_dimensions());
input_settings->allow_dynamic_dimensions = true;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_TRUE(output_settings.allow_dynamic_dimensions());
}
TEST_F(ConversionTest, NNAPIBurstComputation) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
NNAPISettingsT* input_settings =
settings_.tflite_settings->nnapi_settings.get();
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::NNAPISettings output_settings =
compute.tflite_settings().nnapi_settings();
EXPECT_FALSE(output_settings.use_burst_computation());
input_settings->use_burst_computation = true;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_TRUE(output_settings.use_burst_computation());
}
TEST_F(ConversionTest, NNAPIExecutionPreference) {
CheckNNAPIExecutionPreference(
NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER,
proto::NNAPIExecutionPreference::NNAPI_FAST_SINGLE_ANSWER);
CheckNNAPIExecutionPreference(
NNAPIExecutionPreference_NNAPI_LOW_POWER,
proto::NNAPIExecutionPreference::NNAPI_LOW_POWER);
CheckNNAPIExecutionPreference(
NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED,
proto::NNAPIExecutionPreference::NNAPI_SUSTAINED_SPEED);
CheckNNAPIExecutionPreference(NNAPIExecutionPreference_UNDEFINED,
proto::NNAPIExecutionPreference::UNDEFINED);
}
TEST_F(ConversionTest, NNAPIExecutionPriority) {
CheckNNAPIExecutionPriority(
NNAPIExecutionPriority_NNAPI_PRIORITY_LOW,
proto::NNAPIExecutionPriority::NNAPI_PRIORITY_LOW);
CheckNNAPIExecutionPriority(
NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM,
proto::NNAPIExecutionPriority::NNAPI_PRIORITY_MEDIUM);
CheckNNAPIExecutionPriority(
NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH,
proto::NNAPIExecutionPriority::NNAPI_PRIORITY_HIGH);
CheckNNAPIExecutionPriority(
NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED);
}
TEST_F(ConversionTest, NNAPISupportLibraryHandle) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->nnapi_settings =
std::make_unique<NNAPISettingsT>();
NNAPISettingsT* input_settings =
settings_.tflite_settings->nnapi_settings.get();
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::NNAPISettings output_settings =
compute.tflite_settings().nnapi_settings();
EXPECT_EQ(output_settings.support_library_handle(), 0);
input_settings->support_library_handle = std::numeric_limits<int64_t>::max();
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().nnapi_settings();
EXPECT_EQ(output_settings.support_library_handle(),
std::numeric_limits<int64_t>::max());
}
TEST_F(ConversionTest, GPUSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
input_settings->is_precision_loss_allowed = true;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
EXPECT_TRUE(output_settings.is_precision_loss_allowed());
input_settings->is_precision_loss_allowed = false;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().gpu_settings();
EXPECT_FALSE(output_settings.is_precision_loss_allowed());
EXPECT_TRUE(output_settings.enable_quantized_inference());
input_settings->enable_quantized_inference = false;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().gpu_settings();
EXPECT_FALSE(output_settings.enable_quantized_inference());
}
TEST_F(ConversionTest, GPUBacked) {
CheckGPUBackend(GPUBackend_UNSET, proto::GPUBackend::UNSET);
CheckGPUBackend(GPUBackend_OPENCL, proto::GPUBackend::OPENCL);
CheckGPUBackend(GPUBackend_OPENGL, proto::GPUBackend::OPENGL);
}
TEST_F(ConversionTest, GPUInferencePriority) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
input_settings->inference_priority1 =
GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE;
input_settings->inference_priority2 =
GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_MIN_MEMORY_USAGE,
output_settings.inference_priority1());
EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_MIN_LATENCY,
output_settings.inference_priority2());
EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_AUTO,
output_settings.inference_priority3());
}
TEST_F(ConversionTest, GPUInferencePreference) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
input_settings->inference_preference =
GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
EXPECT_EQ(
proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER,
output_settings.inference_preference());
input_settings->inference_preference =
GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().gpu_settings();
EXPECT_EQ(proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED,
output_settings.inference_preference());
}
TEST_F(ConversionTest, HexagonSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->hexagon_settings =
std::make_unique<HexagonSettingsT>();
HexagonSettingsT* input_settings =
settings_.tflite_settings->hexagon_settings.get();
input_settings->debug_level = 1;
input_settings->powersave_level = 2;
input_settings->print_graph_profile = true;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
const proto::HexagonSettings& output_settings =
compute.tflite_settings().hexagon_settings();
EXPECT_EQ(1, output_settings.debug_level());
EXPECT_EQ(2, output_settings.powersave_level());
EXPECT_TRUE(output_settings.print_graph_profile());
EXPECT_FALSE(output_settings.print_graph_debug());
}
TEST_F(ConversionTest, EdgeTpuSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->edgetpu_settings =
std::make_unique<EdgeTpuSettingsT>();
EdgeTpuSettingsT* input_settings =
settings_.tflite_settings->edgetpu_settings.get();
constexpr EdgeTpuPowerState kInferencePowerState = EdgeTpuPowerState_ACTIVE;
constexpr EdgeTpuPowerState kInactivePowerState =
EdgeTpuPowerState_ACTIVE_MIN_POWER;
constexpr int64_t kInactiveTimeoutUs = 300000;
constexpr int kInferencePriority = 2;
const std::string kModelToken = "model_token";
constexpr EdgeTpuSettings_::FloatTruncationType kFloatTruncationType =
EdgeTpuSettings_::FloatTruncationType_HALF;
input_settings->inference_power_state = kInferencePowerState;
input_settings->inference_priority = kInferencePriority;
input_settings->model_token = kModelToken;
input_settings->float_truncation_type = kFloatTruncationType;
std::unique_ptr<EdgeTpuInactivePowerConfigT> inactive_power_config(
new EdgeTpuInactivePowerConfigT());
inactive_power_config->inactive_power_state = kInactivePowerState;
inactive_power_config->inactive_timeout_us = kInactiveTimeoutUs;
input_settings->inactive_power_configs.emplace_back(
std::move(inactive_power_config));
constexpr EdgeTpuDeviceSpec_::PlatformType kPlatformType =
EdgeTpuDeviceSpec_::PlatformType_MMIO;
constexpr int kNumChips = 1;
const std::string kDevicePath = "/dev/abrolhos";
constexpr int kChipFamily = 1;
input_settings->edgetpu_device_spec = std::make_unique<EdgeTpuDeviceSpecT>();
EdgeTpuDeviceSpecT* input_spec = input_settings->edgetpu_device_spec.get();
input_spec->platform_type = kPlatformType;
input_spec->num_chips = kNumChips;
input_spec->chip_family = kChipFamily;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::EdgeTpuSettings output_settings =
compute.tflite_settings().edgetpu_settings();
EXPECT_EQ(
static_cast<EdgeTpuPowerState>(output_settings.inference_power_state()),
kInferencePowerState);
EXPECT_EQ(output_settings.inactive_power_configs().size(), 1);
EXPECT_EQ(
static_cast<EdgeTpuPowerState>(output_settings.inactive_power_configs()
.at(0)
.inactive_power_state()),
kInactivePowerState);
EXPECT_EQ(
output_settings.inactive_power_configs().at(0).inactive_timeout_us(),
kInactiveTimeoutUs);
EXPECT_EQ(output_settings.inference_priority(), kInferencePriority);
EXPECT_EQ(output_settings.model_token(), kModelToken);
EXPECT_EQ(static_cast<EdgeTpuSettings_::FloatTruncationType>(
output_settings.float_truncation_type()),
kFloatTruncationType);
EXPECT_EQ(static_cast<EdgeTpuDeviceSpec_::PlatformType>(
output_settings.edgetpu_device_spec().platform_type()),
kPlatformType);
EXPECT_EQ(output_settings.edgetpu_device_spec().num_chips(), kNumChips);
EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths_size(), 0);
EXPECT_EQ(output_settings.edgetpu_device_spec().chip_family(), kChipFamily);
input_spec->device_paths.push_back(kDevicePath);
compute = ConvertFromFlatbuffer(settings_);
output_settings = compute.tflite_settings().edgetpu_settings();
EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths().size(), 1);
EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths()[0],
kDevicePath);
}
TEST_F(ConversionTest, XNNPackSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->xnnpack_settings =
std::make_unique<XNNPackSettingsT>();
XNNPackSettingsT* input_settings =
settings_.tflite_settings->xnnpack_settings.get();
input_settings->num_threads = 2;
input_settings->flags =
tflite::XNNPackFlags::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.tflite_settings().xnnpack_settings().num_threads(), 2);
EXPECT_EQ(compute.tflite_settings().xnnpack_settings().flags(), 3);
}
TEST_F(ConversionTest, CoreMLSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->coreml_settings =
std::make_unique<CoreMLSettingsT>();
CoreMLSettingsT* input_settings =
settings_.tflite_settings->coreml_settings.get();
input_settings->enabled_devices =
CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE;
input_settings->coreml_version = 3;
input_settings->max_delegated_partitions = 10;
input_settings->min_nodes_per_partition = 4;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.tflite_settings().coreml_settings().enabled_devices(),
proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE);
EXPECT_EQ(compute.tflite_settings().coreml_settings().coreml_version(), 3);
EXPECT_EQ(
compute.tflite_settings().coreml_settings().max_delegated_partitions(),
10);
EXPECT_EQ(
compute.tflite_settings().coreml_settings().min_nodes_per_partition(), 4);
}
TEST_F(ConversionTest, CoralSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->coral_settings =
std::make_unique<CoralSettingsT>();
CoralSettingsT* input_settings =
settings_.tflite_settings->coral_settings.get();
input_settings->device = "test";
input_settings->performance = CoralSettings_::Performance_HIGH;
input_settings->usb_always_dfu = true;
input_settings->usb_max_bulk_in_queue_length = 768;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
const proto::CoralSettings& output_settings =
compute.tflite_settings().coral_settings();
EXPECT_EQ("test", output_settings.device());
EXPECT_TRUE(output_settings.usb_always_dfu());
EXPECT_EQ(proto::CoralSettings::HIGH, output_settings.performance());
EXPECT_EQ(768, output_settings.usb_max_bulk_in_queue_length());
}
TEST_F(ConversionTest, StableDelegateLoaderSettings) {
const std::string kDelegatePath = "TEST_DELEGATE_PATH";
const std::string kDelegateName = "TEST_DELEGATE_NAME";
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->stable_delegate_loader_settings =
std::make_unique<StableDelegateLoaderSettingsT>();
settings_.tflite_settings->stable_delegate_loader_settings->delegate_path =
kDelegatePath;
settings_.tflite_settings->stable_delegate_loader_settings->delegate_name =
kDelegateName;
const proto::StableDelegateLoaderSettings output_settings =
ConvertFromFlatbuffer(settings_)
.tflite_settings()
.stable_delegate_loader_settings();
EXPECT_EQ(output_settings.delegate_path(), kDelegatePath);
EXPECT_EQ(output_settings.delegate_name(), kDelegateName);
}
TEST_F(ConversionTest, CPUSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->cpu_settings = std::make_unique<CPUSettingsT>();
settings_.tflite_settings->cpu_settings->num_threads = 2;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.tflite_settings().cpu_settings().num_threads(), 2);
}
TEST_F(ConversionTest, MaxDelegatedPartitions) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->max_delegated_partitions = 2;
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(compute.tflite_settings().max_delegated_partitions(), 2);
}
TEST_F(ConversionTest, GoogleEdgeTpuSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->google_edgetpu_settings =
std::make_unique<GoogleEdgeTpuSettingsT>();
GoogleEdgeTpuSettingsT* input_settings =
settings_.tflite_settings->google_edgetpu_settings.get();
input_settings->priority = GoogleEdgeTpuSettings_::Priority_PRIORITY_HIGH;
input_settings->allow_fp16_precision_for_fp32 = true;
std::vector<uint8_t> extension_data{1, 2, 3};
input_settings->extension_data = extension_data;
input_settings->model_identifier = "model";
input_settings->prefer_cache_coherency_for_inputs =
GoogleEdgeTpuSettings_::TriState_TRISTATE_TRUE;
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::GoogleEdgeTpuSettings output_settings =
compute.tflite_settings().google_edgetpu_settings();
EXPECT_EQ(output_settings.priority(),
proto::GoogleEdgeTpuSettings::PRIORITY_HIGH);
EXPECT_TRUE(output_settings.allow_fp16_precision_for_fp32());
EXPECT_EQ(output_settings.extension_data().size(), 3);
EXPECT_EQ(output_settings.model_identifier(), "model");
EXPECT_EQ(output_settings.prefer_cache_coherency_for_inputs(),
proto::GoogleEdgeTpuSettings::TRISTATE_TRUE);
}
TEST_F(ConversionTest, CompilationCachingSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->compilation_caching_settings =
std::make_unique<CompilationCachingSettingsT>();
CompilationCachingSettingsT* input_settings =
settings_.tflite_settings->compilation_caching_settings.get();
input_settings->cache_dir = "/tmp";
input_settings->model_token = "model";
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
proto::CompilationCachingSettings output_settings =
compute.tflite_settings().compilation_caching_settings();
EXPECT_EQ(output_settings.cache_dir(), "/tmp");
EXPECT_EQ(output_settings.model_token(), "model");
}
TEST_F(ConversionTest, MtkNeuronSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->mtk_neuron_settings =
std::make_unique<MtkNeuronSettingsT>();
MtkNeuronSettingsT* input_settings =
settings_.tflite_settings->mtk_neuron_settings.get();
input_settings->execution_preference =
MtkNeuronSettings_::ExecutionPreference_PREFERENCE_UNDEFINED;
input_settings->execution_priority =
MtkNeuronSettings_::ExecutionPriority_PRIORITY_MEDIUM;
input_settings->optimization_hints = {
MtkNeuronSettings_::OptimizationHint_OPTIMIZATION_LOW_LATENCY,
MtkNeuronSettings_::OptimizationHint_OPTIMIZATION_BATCH_PROCESSING};
input_settings->operation_check_mode =
MtkNeuronSettings_::OperationCheckMode_PER_NODE_OPERATION_CHECK;
input_settings->allow_fp16_precision_for_fp32 = true;
input_settings->use_ahwb = false;
input_settings->use_cacheable_buffer = true;
input_settings->compile_options = {"TEST_COMPILE_OPTIONS"};
input_settings->accelerator_names = {"TEST_ACCELERATOR_NAME"};
input_settings->neuron_config_path = "TEST_NEURON_CONFIG_PATH";
const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
const proto::MtkNeuronSettings& output_settings =
compute.tflite_settings().mtk_neuron_settings();
EXPECT_EQ(output_settings.execution_preference(),
proto::MtkNeuronSettings::PREFERENCE_UNDEFINED);
EXPECT_EQ(output_settings.execution_priority(),
proto::MtkNeuronSettings::PRIORITY_MEDIUM);
EXPECT_EQ(output_settings.optimization_hints().size(), 2);
EXPECT_EQ(output_settings.optimization_hints().at(0),
proto::MtkNeuronSettings::OPTIMIZATION_LOW_LATENCY);
EXPECT_EQ(output_settings.optimization_hints().at(1),
proto::MtkNeuronSettings::OPTIMIZATION_BATCH_PROCESSING);
EXPECT_EQ(output_settings.operation_check_mode(),
proto::MtkNeuronSettings::PER_NODE_OPERATION_CHECK);
EXPECT_TRUE(output_settings.allow_fp16_precision_for_fp32());
EXPECT_FALSE(output_settings.use_ahwb());
EXPECT_TRUE(output_settings.use_cacheable_buffer());
EXPECT_EQ(output_settings.compile_options().size(), 1);
EXPECT_EQ(output_settings.compile_options().at(0), "TEST_COMPILE_OPTIONS");
EXPECT_EQ(output_settings.accelerator_names().size(), 1);
EXPECT_EQ(output_settings.accelerator_names().at(0), "TEST_ACCELERATOR_NAME");
EXPECT_EQ(output_settings.neuron_config_path(), "TEST_NEURON_CONFIG_PATH");
}
TEST_F(ConversionTest, MiniBenchmarkSettings) {
settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
settings_.tflite_settings->cpu_settings = std::make_unique<CPUSettingsT>();
settings_.tflite_settings->cpu_settings->num_threads = 2;
settings_.model_identifier_for_statistics = "id";
settings_.model_namespace_for_statistics = "ns";
settings_.settings_to_test_locally =
std::make_unique<MinibenchmarkSettingsT>();
MinibenchmarkSettingsT* mini_settings =
settings_.settings_to_test_locally.get();
mini_settings->model_file = std::make_unique<ModelFileT>();
mini_settings->model_file->filename = "test_model";
mini_settings->storage_paths = std::make_unique<BenchmarkStoragePathsT>();
mini_settings->storage_paths->storage_file_path = "/data/local/tmp";
std::unique_ptr<TFLiteSettingsT> xnnpack(new TFLiteSettingsT());
xnnpack->xnnpack_settings = std::make_unique<XNNPackSettingsT>();
xnnpack->xnnpack_settings->num_threads = 2;
std::unique_ptr<TFLiteSettingsT> hexagon(new TFLiteSettingsT());
hexagon->hexagon_settings = std::make_unique<HexagonSettingsT>();
hexagon->hexagon_settings->powersave_level = 3;
std::unique_ptr<TFLiteSettingsT> coreml(new TFLiteSettingsT());
coreml->coreml_settings = std::make_unique<CoreMLSettingsT>();
coreml->coreml_settings->enabled_devices =
CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE;
coreml->coreml_settings->coreml_version = 3;
coreml->coreml_settings->max_delegated_partitions = 10;
coreml->coreml_settings->min_nodes_per_partition = 4;
mini_settings->settings_to_test.emplace_back(std::move(xnnpack));
mini_settings->settings_to_test.emplace_back(std::move(hexagon));
mini_settings->settings_to_test.emplace_back(std::move(coreml));
proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
EXPECT_EQ(2, compute.tflite_settings().cpu_settings().num_threads());
EXPECT_EQ("id", compute.model_identifier_for_statistics());
EXPECT_EQ("ns", compute.model_namespace_for_statistics());
EXPECT_TRUE(compute.has_settings_to_test_locally());
const proto::MinibenchmarkSettings& mini_output =
compute.settings_to_test_locally();
EXPECT_EQ("test_model", mini_output.model_file().filename());
EXPECT_EQ("/data/local/tmp", mini_output.storage_paths().storage_file_path());
EXPECT_EQ(3, mini_output.settings_to_test_size());
EXPECT_EQ(
2, mini_output.settings_to_test().at(0).xnnpack_settings().num_threads());
EXPECT_EQ(3, mini_output.settings_to_test()
.at(1)
.hexagon_settings()
.powersave_level());
EXPECT_EQ(
proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE,
mini_output.settings_to_test().at(2).coreml_settings().enabled_devices());
EXPECT_EQ(
3,
mini_output.settings_to_test().at(2).coreml_settings().coreml_version());
EXPECT_EQ(10, mini_output.settings_to_test()
.at(2)
.coreml_settings()
.max_delegated_partitions());
EXPECT_EQ(4, mini_output.settings_to_test()
.at(2)
.coreml_settings()
.min_nodes_per_partition());
compute =
ConvertFromFlatbuffer(settings_, true);
EXPECT_EQ(2, compute.tflite_settings().cpu_settings().num_threads());
EXPECT_EQ("id", compute.model_identifier_for_statistics());
EXPECT_EQ("ns", compute.model_namespace_for_statistics());
EXPECT_FALSE(compute.has_settings_to_test_locally());
}
TEST_F(ConversionTest, BestAccelerationDecisionEvent) {
event_.is_log_flushing_event = true;
event_.best_acceleration_decision =
std::make_unique<BestAccelerationDecisionT>();
event_.best_acceleration_decision->number_of_source_events = 4;
event_.best_acceleration_decision->min_inference_time_us = 3000;
proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
EXPECT_TRUE(proto_event.is_log_flushing_event());
const auto& best_decision = proto_event.best_acceleration_decision();
EXPECT_EQ(4, best_decision.number_of_source_events());
EXPECT_EQ(3000, best_decision.min_inference_time_us());
EXPECT_FALSE(best_decision.has_min_latency_event());
event_.best_acceleration_decision->min_latency_event =
std::make_unique<BenchmarkEventT>();
auto* min_event = event_.best_acceleration_decision->min_latency_event.get();
min_event->event_type = BenchmarkEventType_END;
min_event->tflite_settings = std::make_unique<TFLiteSettingsT>();
min_event->tflite_settings->delegate = Delegate_XNNPACK;
min_event->tflite_settings->xnnpack_settings =
std::make_unique<XNNPackSettingsT>();
min_event->tflite_settings->xnnpack_settings->num_threads = 2;
min_event->result = std::make_unique<BenchmarkResultT>();
min_event->result->initialization_time_us.push_back(100);
min_event->result->initialization_time_us.push_back(110);
min_event->result->inference_time_us.push_back(3000);
min_event->result->inference_time_us.push_back(3500);
min_event->result->max_memory_kb = 1234;
min_event->result->ok = true;
min_event->boottime_us = 1111;
min_event->wallclock_us = 2222;
proto_event = ConvertFromFlatbuffer(event_);
EXPECT_TRUE(proto_event.best_acceleration_decision().has_min_latency_event());
const auto& proto_min_event =
proto_event.best_acceleration_decision().min_latency_event();
EXPECT_EQ(proto::BenchmarkEventType::END, proto_min_event.event_type());
EXPECT_EQ(proto::Delegate::XNNPACK,
proto_min_event.tflite_settings().delegate());
EXPECT_EQ(2, |
867 | cpp | tensorflow/tensorflow | stable_delegate_plugin | tensorflow/lite/acceleration/configuration/stable_delegate_plugin.cc | tensorflow/lite/acceleration/configuration/stable_delegate_plugin_test.cc | #ifndef TENSORFLOW_LITE_ACCELERATION_CONFIGURATION_STABLE_DELEGATE_PLUGIN_H_
#define TENSORFLOW_LITE_ACCELERATION_CONFIGURATION_STABLE_DELEGATE_PLUGIN_H_
#include <memory>
#include <string>
#include "tensorflow/lite/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace delegates {
class StableDelegatePlugin : public DelegatePluginInterface {
public:
static std::unique_ptr<StableDelegatePlugin> New(
const TFLiteSettings& tflite_settings) {
return std::make_unique<StableDelegatePlugin>(tflite_settings);
}
explicit StableDelegatePlugin(const TFLiteSettings& tflite_settings) {
TFLiteSettingsT tflite_settings_t;
tflite_settings.UnPackTo(&tflite_settings_t);
tflite_settings_builder_.Finish(
CreateTFLiteSettings(tflite_settings_builder_, &tflite_settings_t));
const StableDelegateLoaderSettings* stable_delegate_loader_settings =
GetTFLiteSettings()->stable_delegate_loader_settings();
if (!stable_delegate_loader_settings ||
!stable_delegate_loader_settings->delegate_path() ||
stable_delegate_loader_settings->delegate_path()->Length() == 0) {
TFLITE_LOG(ERROR) << "The delegate path field is not available from the "
"provided stable delegate loader settings.";
return;
}
const auto* stable_delegate_ = utils::LoadDelegateFromSharedLibrary(
stable_delegate_loader_settings->delegate_path()->str());
if (!stable_delegate_) {
TFLITE_LOG(ERROR) << "Failed to load stable delegate plugin symbol from "
<< stable_delegate_loader_settings->delegate_path();
return;
}
stable_delegate_plugin_ = stable_delegate_->delegate_plugin;
TFLITE_LOG(INFO)
<< "The stable delegate plugin has loaded delegate plugin for "
<< stable_delegate_->delegate_name;
}
TfLiteDelegatePtr Create() override {
return TfLiteDelegatePtr(
stable_delegate_plugin_->create(GetTFLiteSettings()),
stable_delegate_plugin_->destroy);
}
int GetDelegateErrno(TfLiteOpaqueDelegate* from_delegate) override {
return stable_delegate_plugin_->get_delegate_errno(from_delegate);
}
private:
const TFLiteSettings* GetTFLiteSettings() {
return flatbuffers::GetRoot<TFLiteSettings>(
tflite_settings_builder_.GetBufferPointer());
}
const TfLiteOpaqueDelegatePlugin* stable_delegate_plugin_;
flatbuffers::FlatBufferBuilder tflite_settings_builder_;
};
}
}
#endif
#include "tensorflow/lite/acceleration/configuration/stable_delegate_plugin.h"
namespace tflite {
namespace delegates {
TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(StableDelegatePlugin,
StableDelegatePlugin::New);
}
} | #include <memory>
#include <gtest/gtest.h>
#include "pthreadpool.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
class StableDelegatePluginTest : public testing::Test {
public:
static constexpr int kNumThreadsForTest = 7;
static constexpr tflite::XNNPackFlags kFlagsForTest =
tflite::XNNPackFlags::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8;
static constexpr char kDelegateBinaryPath[] =
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/libtensorflowlite_stable_xnnpack_delegate.so";
void SetUp() override {
flatbuffers::Offset<flatbuffers::String> stable_delegate_path_offset =
flatbuffer_builder_.CreateString(kDelegateBinaryPath);
StableDelegateLoaderSettingsBuilder stable_delegate_loader_settings_builder(
flatbuffer_builder_);
stable_delegate_loader_settings_builder.add_delegate_path(
stable_delegate_path_offset);
flatbuffers::Offset<StableDelegateLoaderSettings>
stable_delegate_loader_settings =
stable_delegate_loader_settings_builder.Finish();
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_num_threads(kNumThreadsForTest);
xnnpack_settings_builder.add_flags(kFlagsForTest);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_stable_delegate_loader_settings(
stable_delegate_loader_settings);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
tflite_settings_builder.add_delegate(Delegate_XNNPACK);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
tflite_settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
delegate_plugin_ = delegates::DelegatePluginRegistry::CreateByName(
"StableDelegatePlugin", *tflite_settings_);
ASSERT_NE(delegate_plugin_, nullptr);
}
void TearDown() override { delegate_plugin_.reset(); }
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *tflite_settings_;
std::unique_ptr<delegates::DelegatePluginInterface> delegate_plugin_;
};
TEST_F(StableDelegatePluginTest, CanCreateAndDestroyDelegate) {
delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();
EXPECT_NE(delegate, nullptr);
}
TEST_F(StableDelegatePluginTest, CanGetDelegateErrno) {
delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();
EXPECT_EQ(delegate_plugin_->GetDelegateErrno(delegate.get()), 0);
}
TEST_F(StableDelegatePluginTest, SetsCorrectThreadCount) {
delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();
pthreadpool_t threadpool = static_cast<pthreadpool_t>(
TfLiteXNNPackDelegateGetThreadPool(delegate.get()));
EXPECT_EQ(pthreadpool_get_threads_count(threadpool), kNumThreadsForTest);
}
} |
868 | cpp | tensorflow/tensorflow | proto_to_flatbuffer | tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.cc | tensorflow/lite/acceleration/configuration/proto_to_flatbuffer_test.cc | #ifndef TENSORFLOW_LITE_ACCELERATION_CONFIGURATION_PROTO_TO_FLATBUFFER_H_
#define TENSORFLOW_LITE_ACCELERATION_CONFIGURATION_PROTO_TO_FLATBUFFER_H_
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace tflite {
const TFLiteSettings* ConvertFromProto(
const proto::TFLiteSettings& proto_settings,
flatbuffers::FlatBufferBuilder* builder);
const ComputeSettings* ConvertFromProto(
const proto::ComputeSettings& proto_settings,
flatbuffers::FlatBufferBuilder* builder);
const MinibenchmarkSettings* ConvertFromProto(
const proto::MinibenchmarkSettings& proto_settings,
flatbuffers::FlatBufferBuilder* builder);
}
#endif
#include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include <cstdint>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::Offset;
using ::flatbuffers::String;
using ::flatbuffers::Vector;
ExecutionPreference ConvertExecutionPreference(
proto::ExecutionPreference preference) {
switch (preference) {
case proto::ExecutionPreference::ANY:
return ExecutionPreference_ANY;
case proto::ExecutionPreference::LOW_LATENCY:
return ExecutionPreference_LOW_LATENCY;
case proto::ExecutionPreference::LOW_POWER:
return ExecutionPreference_LOW_POWER;
case proto::ExecutionPreference::FORCE_CPU:
return ExecutionPreference_FORCE_CPU;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for ExecutionPreference: %d", preference);
return ExecutionPreference_ANY;
}
Delegate ConvertDelegate(proto::Delegate delegate) {
switch (delegate) {
case proto::Delegate::NONE:
return Delegate_NONE;
case proto::Delegate::NNAPI:
return Delegate_NNAPI;
case proto::Delegate::GPU:
return Delegate_GPU;
case proto::Delegate::HEXAGON:
return Delegate_HEXAGON;
case proto::Delegate::XNNPACK:
return Delegate_XNNPACK;
case proto::Delegate::EDGETPU:
return Delegate_EDGETPU;
case proto::Delegate::EDGETPU_CORAL:
return Delegate_EDGETPU_CORAL;
case proto::Delegate::CORE_ML:
return Delegate_CORE_ML;
case proto::Delegate::ARMNN:
return Delegate_ARMNN;
case proto::Delegate::MTK_NEURON:
return Delegate_MTK_NEURON;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for Delegate: %d",
delegate);
return Delegate_NONE;
}
NNAPIExecutionPreference ConvertNNAPIExecutionPreference(
proto::NNAPIExecutionPreference preference) {
switch (preference) {
case proto::NNAPIExecutionPreference::UNDEFINED:
return NNAPIExecutionPreference_UNDEFINED;
case proto::NNAPIExecutionPreference::NNAPI_LOW_POWER:
return NNAPIExecutionPreference_NNAPI_LOW_POWER;
case proto::NNAPIExecutionPreference::NNAPI_FAST_SINGLE_ANSWER:
return NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER;
case proto::NNAPIExecutionPreference::NNAPI_SUSTAINED_SPEED:
return NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPreference: %d",
preference);
return NNAPIExecutionPreference_UNDEFINED;
}
NNAPIExecutionPriority ConvertNNAPIExecutionPriority(
proto::NNAPIExecutionPriority priority) {
switch (priority) {
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED:
return NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED;
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_LOW:
return NNAPIExecutionPriority_NNAPI_PRIORITY_LOW;
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_MEDIUM:
return NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM;
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_HIGH:
return NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPriority: %d", priority);
return NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED;
}
GPUBackend ConvertGPUBackend(proto::GPUBackend backend) {
switch (backend) {
case proto::GPUBackend::UNSET:
return GPUBackend_UNSET;
case proto::GPUBackend::OPENCL:
return GPUBackend_OPENCL;
case proto::GPUBackend::OPENGL:
return GPUBackend_OPENGL;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for GPUBackend: %d",
backend);
return GPUBackend_UNSET;
}
GPUInferenceUsage ConvertGPUInferenceUsage(
proto::GPUInferenceUsage preference) {
switch (preference) {
case proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER:
return GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
case proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED:
return GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for GPUInferenceUsage: %d", preference);
return GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
}
GPUInferencePriority ConvertGPUInferencePriority(
proto::GPUInferencePriority priority) {
switch (priority) {
case proto::GPUInferencePriority::GPU_PRIORITY_AUTO:
return GPUInferencePriority_GPU_PRIORITY_AUTO;
case proto::GPUInferencePriority::GPU_PRIORITY_MAX_PRECISION:
return GPUInferencePriority_GPU_PRIORITY_MAX_PRECISION;
case proto::GPUInferencePriority::GPU_PRIORITY_MIN_LATENCY:
return GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY;
case proto::GPUInferencePriority::GPU_PRIORITY_MIN_MEMORY_USAGE:
return GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for GPUInferencePriority: %d", priority);
return GPUInferencePriority_GPU_PRIORITY_AUTO;
}
EdgeTpuPowerState ConvertEdgeTpuPowerState(proto::EdgeTpuPowerState state) {
switch (state) {
case proto::EdgeTpuPowerState::UNDEFINED_POWERSTATE:
return EdgeTpuPowerState_UNDEFINED_POWERSTATE;
case proto::EdgeTpuPowerState::TPU_CORE_OFF:
return EdgeTpuPowerState_TPU_CORE_OFF;
case proto::EdgeTpuPowerState::READY:
return EdgeTpuPowerState_READY;
case proto::EdgeTpuPowerState::ACTIVE_MIN_POWER:
return EdgeTpuPowerState_ACTIVE_MIN_POWER;
case proto::EdgeTpuPowerState::ACTIVE_VERY_LOW_POWER:
return EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER;
case proto::EdgeTpuPowerState::ACTIVE_LOW_POWER:
return EdgeTpuPowerState_ACTIVE_LOW_POWER;
case proto::EdgeTpuPowerState::ACTIVE:
return EdgeTpuPowerState_ACTIVE;
case proto::EdgeTpuPowerState::OVER_DRIVE:
return EdgeTpuPowerState_OVER_DRIVE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for EdgeTpuSettings::PowerState: %d",
state);
return EdgeTpuPowerState_UNDEFINED_POWERSTATE;
}
Offset<FallbackSettings> ConvertFallbackSettings(
const proto::FallbackSettings& settings, FlatBufferBuilder& builder) {
return CreateFallbackSettings(
builder,
settings.allow_automatic_fallback_on_compilation_error(),
settings.allow_automatic_fallback_on_execution_error());
}
Offset<NNAPISettings> ConvertNNAPISettings(const proto::NNAPISettings& settings,
FlatBufferBuilder& builder) {
return CreateNNAPISettings(
builder,
builder.CreateString(settings.accelerator_name()),
builder.CreateString(settings.cache_directory()),
builder.CreateString(settings.model_token()),
ConvertNNAPIExecutionPreference(settings.execution_preference()),
settings.no_of_nnapi_instances_to_cache(),
ConvertFallbackSettings(settings.fallback_settings(), builder),
settings.allow_nnapi_cpu_on_android_10_plus(),
ConvertNNAPIExecutionPriority(settings.execution_priority()),
settings.allow_dynamic_dimensions(),
settings.allow_fp16_precision_for_fp32(),
settings.use_burst_computation(),
settings.support_library_handle());
}
Offset<GPUSettings> ConvertGPUSettings(const proto::GPUSettings& settings,
FlatBufferBuilder& builder) {
return CreateGPUSettings(
builder,
settings.is_precision_loss_allowed(),
settings.enable_quantized_inference(),
ConvertGPUBackend(settings.force_backend()),
ConvertGPUInferencePriority(settings.inference_priority1()),
ConvertGPUInferencePriority(settings.inference_priority2()),
ConvertGPUInferencePriority(settings.inference_priority3()),
ConvertGPUInferenceUsage(settings.inference_preference()),
builder.CreateString(settings.cache_directory()),
builder.CreateString(settings.model_token()));
}
Offset<HexagonSettings> ConvertHexagonSettings(
const proto::HexagonSettings& settings, FlatBufferBuilder& builder) {
return CreateHexagonSettings(
builder,
settings.debug_level(),
settings.powersave_level(),
settings.print_graph_profile(),
settings.print_graph_debug());
}
Offset<XNNPackSettings> ConvertXNNPackSettings(
const proto::XNNPackSettings& settings, FlatBufferBuilder& builder) {
return CreateXNNPackSettings(
builder,
settings.num_threads(),
tflite::XNNPackFlags(settings.flags()));
}
Offset<CoreMLSettings> ConvertCoreMLSettings(
const proto::CoreMLSettings& settings, FlatBufferBuilder& builder) {
tflite::CoreMLSettings_::EnabledDevices enabled_devices =
tflite::CoreMLSettings_::EnabledDevices_DEVICES_ALL;
switch (settings.enabled_devices()) {
case proto::CoreMLSettings::DEVICES_ALL:
enabled_devices = tflite::CoreMLSettings_::EnabledDevices_DEVICES_ALL;
break;
case proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE:
enabled_devices =
tflite::CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE;
break;
default:
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Invalid devices enum: %d",
settings.enabled_devices());
}
return CreateCoreMLSettings(
builder, enabled_devices, settings.coreml_version(),
settings.max_delegated_partitions(), settings.min_nodes_per_partition());
}
Offset<StableDelegateLoaderSettings> ConvertStableDelegateLoaderSettings(
const proto::StableDelegateLoaderSettings& settings,
FlatBufferBuilder& builder) {
return CreateStableDelegateLoaderSettings(
builder, builder.CreateString(settings.delegate_path()),
builder.CreateString(settings.delegate_name()));
}
Offset<CPUSettings> ConvertCPUSettings(const proto::CPUSettings& settings,
FlatBufferBuilder& builder) {
return CreateCPUSettings(builder,
settings.num_threads());
}
Offset<tflite::EdgeTpuDeviceSpec> ConvertEdgeTpuDeviceSpec(
FlatBufferBuilder& builder, const proto::EdgeTpuDeviceSpec& device_spec) {
Offset<Vector<Offset<String>>> device_paths_fb = 0;
if (device_spec.device_paths_size() > 0) {
std::vector<Offset<String>> device_paths;
for (const auto& device_path : device_spec.device_paths()) {
auto device_path_fb = builder.CreateString(device_path);
device_paths.push_back(device_path_fb);
}
device_paths_fb = builder.CreateVector(device_paths);
}
return tflite::CreateEdgeTpuDeviceSpec(
builder,
static_cast<tflite::EdgeTpuDeviceSpec_::PlatformType>(
device_spec.platform_type()),
device_spec.num_chips(), device_paths_fb, device_spec.chip_family());
}
Offset<GoogleEdgeTpuSettings> ConvertGoogleEdgeTpuSettings(
const proto::GoogleEdgeTpuSettings& settings, FlatBufferBuilder& builder) {
Offset<String> model_identifier = 0;
if (settings.has_model_identifier()) {
model_identifier = builder.CreateString(settings.model_identifier());
}
Offset<Vector<uint8_t>> extension_data = 0;
if (settings.has_extension_data()) {
extension_data = builder.CreateVector(
reinterpret_cast<const uint8_t*>(settings.extension_data().data()),
settings.extension_data().size());
}
GoogleEdgeTpuSettingsBuilder builder_(builder);
builder_.add_log_verbosity(settings.log_verbosity());
builder_.add_enable_tracing(settings.enable_tracing());
builder_.add_priority(static_cast<tflite::GoogleEdgeTpuSettings_::Priority>(
settings.priority()));
builder_.add_model_identifier(model_identifier);
builder_.add_use_async_api(settings.use_async_api());
builder_.add_delegate_should_manage_cache_for_inputs(
settings.delegate_should_manage_cache_for_inputs());
builder_.add_delegate_should_manage_cache_for_outputs(
settings.delegate_should_manage_cache_for_outputs());
builder_.add_prefer_cache_coherency_for_inputs(
static_cast<tflite::GoogleEdgeTpuSettings_::TriState>(
settings.prefer_cache_coherency_for_inputs()));
builder_.add_prefer_cache_coherency_for_outputs(
static_cast<tflite::GoogleEdgeTpuSettings_::TriState>(
settings.prefer_cache_coherency_for_outputs()));
builder_.add_allow_fp16_precision_for_fp32(
settings.allow_fp16_precision_for_fp32());
builder_.add_extension_data(extension_data);
return builder_.Finish();
}
Offset<EdgeTpuSettings> ConvertEdgeTpuSettings(
const proto::EdgeTpuSettings& settings, FlatBufferBuilder& builder) {
Offset<Vector<Offset<tflite::EdgeTpuInactivePowerConfig>>>
inactive_power_configs = 0;
std::vector<Offset<tflite::EdgeTpuInactivePowerConfig>>
inactive_power_configs_std;
if (settings.inactive_power_configs_size() > 0) {
for (const auto& config : settings.inactive_power_configs()) {
inactive_power_configs_std.push_back(
tflite::CreateEdgeTpuInactivePowerConfig(
builder,
static_cast<tflite::EdgeTpuPowerState>(
config.inactive_power_state()),
config.inactive_timeout_us()));
}
inactive_power_configs =
builder.CreateVector<Offset<tflite::EdgeTpuInactivePowerConfig>>(
inactive_power_configs_std);
}
Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec = 0;
if (settings.has_edgetpu_device_spec()) {
edgetpu_device_spec =
ConvertEdgeTpuDeviceSpec(builder, settings.edgetpu_device_spec());
}
Offset<String> model_token = 0;
if (settings.has_model_token()) {
model_token = builder.CreateString(settings.model_token());
}
std::vector<int32_t> hardware_cluster_ids_std{
settings.hardware_cluster_ids().begin(),
settings.hardware_cluster_ids().end()};
auto hardware_cluster_ids_fb =
builder.CreateVector<int32_t>(hardware_cluster_ids_std);
Offset<String> public_model_id = 0;
if (settings.has_public_model_id()) {
public_model_id = builder.CreateString(settings.public_model_id());
}
return CreateEdgeTpuSettings(
builder, ConvertEdgeTpuPowerState(settings.inference_power_state()),
inactive_power_configs, settings.inference_priority(),
edgetpu_device_spec, model_token,
static_cast<tflite::EdgeTpuSettings_::FloatTruncationType>(
settings.float_truncation_type()),
static_cast<tflite::EdgeTpuSettings_::QosClass>(settings.qos_class()),
hardware_cluster_ids_fb, public_model_id,
static_cast<tflite::EdgeTpuSettings_::UseLayerIrTgcBackend>(
settings.use_layer_ir_tgc_backend()));
}
Offset<CompilationCachingSettings> ConvertCompilationCachingSettings(
const proto::CompilationCachingSettings& settings,
FlatBufferBuilder& builder) {
return CreateCompilationCachingSettings(
builder, builder.CreateString(settings.cache_dir()),
builder.CreateString(settings.model_token()));
}
Offset<ArmNNSettings> ConvertArmNNSettings(const proto::ArmNNSettings& settings,
FlatBufferBuilder& builder) {
return CreateArmNNSettings(
builder, builder.CreateString(settings.backends()), settings.fastmath(),
builder.CreateString(settings.additional_parameters()));
}
Offset<MtkNeuronSettings> ConvertMtkNeuronSettings(
const proto::MtkNeuronSettings& settings, FlatBufferBuilder& builder) {
return CreateMtkNeuronSettings(
builder,
static_cast<MtkNeuronSettings_::ExecutionPreference>(
settings.execution_preference()),
static_cast<MtkNeuronSettings_::ExecutionPriority>(
settings.execution_priority()),
builder.CreateVector(settings.optimization_hints().data(),
settings.optimization_hints().size()),
static_cast<MtkNeuronSettings_::OperationCheckMode>(
settings.operation_check_mode()),
settings.allow_fp16_precision_for_fp32(), settings.use_ahwb(),
settings.use_cacheable_buffer(),
builder.CreateVectorOfStrings(settings.compile_options().begin(),
settings.compile_options().end()),
builder.CreateVectorOfStrings(settings.accelerator_names().begin(),
settings.accelerator_names().end()),
builder.CreateString(settings.neuron_config_path()));
}
Offset<CoralSettings> ConvertCoralSettings(const proto::CoralSettings& settings,
FlatBufferBuilder& builder) {
return CreateCoralSettings(
builder, builder.CreateString(settings.device()),
static_cast<tflite::CoralSettings_::Performance>(settings.performance()),
settings.usb_always_dfu(), settings.usb_max_bulk_in_queue_length());
}
Offset<TFLiteSettings> ConvertTfliteSettings(
const proto::TFLiteSettings& settings, FlatBufferBuilder& builder) {
return CreateTFLiteSettings(
builder, ConvertDelegate(settings.delegate()),
ConvertNNAPISettings(settings.nnapi_settings(), builder),
ConvertGPUSettings(settings.gpu_settings(), builder),
ConvertHexagonSettings(settings.hexagon_settings(), builder),
ConvertXNNPackSettings(settings.xnnpack_settings(), builder),
ConvertCoreMLSettings(settings.coreml_settings(), builder),
ConvertCPUSettings(settings.cpu_settings(), builder),
settings.max_delegated_partitions(),
ConvertEdgeTpuSettings(settings.edgetpu_settings(), builder),
ConvertCoralSettings(settings.coral_settings(), builder),
ConvertFallbackSettings(settings.fallback_settings(), builder),
settings.disable_default_delegates(),
ConvertStableDelegateLoaderSettings(
settings.stable_delegate_loader_settings(), builder),
ConvertGoogleEdgeTpuSettings(settings.google_edgetpu_settings(), builder),
ConvertCompilationCachingSettings(settings.compilation_caching_settings(),
builder),
ConvertArmNNSettings(settings.armnn_settings(), builder),
ConvertMtkNeuronSettings(settings.mtk_neuron_settings(), builder));
}
Offset<ModelFile> ConvertModelFile(const proto::ModelFile& model_file,
FlatBufferBuilder& builder) {
return CreateModelFile(builder, builder.CreateString(model_file.filename()),
model_file.fd(), model_file.offset(),
model_file.length());
}
Offset<BenchmarkStoragePaths> ConvertBenchmarkStoragePaths(
const proto::BenchmarkStoragePaths& storage_paths,
FlatBufferBuilder& builder) {
return CreateBenchmarkStoragePaths(
builder, builder.CreateString(storage_paths.storage_file_path()),
builder.CreateString(storage_paths.data_directory_path()));
}
Offset<MinibenchmarkSettings> ConvertMinibenchmarkSettings(
const proto::MinibenchmarkSettings& settings, FlatBufferBuilder& builder) {
Offset<Vector<Offset<TFLiteSettings>>> settings_to_test = 0;
std::vector<Offset<TFLiteSettings>> settings_to_test_vec;
if (settings.settings_to_test_size() > 0) {
for (const auto& one : settings.settings_to_test()) {
settings_to_test_vec.push_back(ConvertTfliteSettings(one, builder));
}
settings_to_test =
builder.CreateVector<Offset<TFLiteSettings>>(settings_to_test_vec);
}
return CreateMinibenchmarkSettings(
builder, settings_to_test,
ConvertModelFile(settings.model_file(), builder),
ConvertBenchmarkStoragePaths(settings.storage_paths(), builder));
}
const TFLiteSettings* ConvertFromProto(
const proto::TFLiteSettings& proto_settings, FlatBufferBuilder* builder) {
Offset<TFLiteSettings> settings =
ConvertTfliteSettings(proto_settings, *builder);
return flatbuffers::GetTemporaryPointer(*builder, settings);
}
const ComputeSettings* ConvertFromProto(
const proto::ComputeSettings& proto_settings, FlatBufferBuilder* builder) {
auto settings = CreateComputeSettings(
*builder, ConvertExecutionPreference(proto_settings.preference()),
ConvertTfliteSettings(proto_settings.tflite_settings(), *builder),
builder->CreateString(proto_settings.model_namespace_for_statistics()),
builder->CreateString(proto_settings.model_identifier_for_statistics()),
ConvertMinibenchmarkSettings(proto_settings.settings_to_test_locally(),
*builder));
return flatbuffers::GetTemporaryPointer(*builder, settings);
}
const MinibenchmarkSettings* ConvertFromProto(
const proto::MinibenchmarkSettings& proto_settings,
flatbuffers::FlatBufferBuilder* builder) {
auto settings = ConvertMinibenchmarkSettings(proto_settings, *builder);
return flatbuffers::GetTemporaryPointer(*builder, settings);
}
} | #include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace tflite {
namespace {
TEST(ConversionTest, EdgeTpuSettings) {
const std::vector<int32_t> kHardwareClusterIds{1};
const std::string kPublicModelId = "public_model_id";
const tflite::proto::EdgeTpuSettings_UseLayerIrTgcBackend
kUseLayerIrTgcBackend =
tflite::proto::EdgeTpuSettings::USE_LAYER_IR_TGC_BACKEND_YES;
proto::ComputeSettings input_settings;
auto* edgetpu_settings =
input_settings.mutable_tflite_settings()->mutable_edgetpu_settings();
edgetpu_settings->set_public_model_id(kPublicModelId);
edgetpu_settings->set_use_layer_ir_tgc_backend(kUseLayerIrTgcBackend);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
*edgetpu_settings->mutable_hardware_cluster_ids() = {
kHardwareClusterIds.begin(), kHardwareClusterIds.end()};
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder)
->tflite_settings()
->edgetpu_settings();
EXPECT_EQ(output_settings->hardware_cluster_ids()->size(), 1);
EXPECT_EQ(output_settings->hardware_cluster_ids()->Get(0),
kHardwareClusterIds[0]);
EXPECT_EQ(output_settings->public_model_id()->str(), kPublicModelId);
EXPECT_EQ(output_settings->use_layer_ir_tgc_backend(),
tflite::EdgeTpuSettings_::
UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_YES);
}
TEST(ConversionTest, TFLiteSettings) {
const std::vector<int32_t> kHardwareClusterIds{1};
const std::string kPublicModelId = "public_model_id";
const tflite::proto::EdgeTpuSettings_UseLayerIrTgcBackend
kUseLayerIrTgcBackend =
tflite::proto::EdgeTpuSettings::USE_LAYER_IR_TGC_BACKEND_YES;
proto::TFLiteSettings input_settings;
input_settings.set_delegate(::tflite::proto::EDGETPU);
auto* edgetpu_settings = input_settings.mutable_edgetpu_settings();
edgetpu_settings->set_public_model_id(kPublicModelId);
edgetpu_settings->set_use_layer_ir_tgc_backend(kUseLayerIrTgcBackend);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
*edgetpu_settings->mutable_hardware_cluster_ids() = {
kHardwareClusterIds.begin(), kHardwareClusterIds.end()};
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
EXPECT_EQ(output_settings->delegate(), ::tflite::Delegate_EDGETPU);
const auto* output_edgetpu_settings = output_settings->edgetpu_settings();
EXPECT_EQ(output_edgetpu_settings->hardware_cluster_ids()->size(), 1);
EXPECT_EQ(output_edgetpu_settings->hardware_cluster_ids()->Get(0),
kHardwareClusterIds[0]);
EXPECT_EQ(output_edgetpu_settings->public_model_id()->str(), kPublicModelId);
EXPECT_EQ(output_edgetpu_settings->use_layer_ir_tgc_backend(),
tflite::EdgeTpuSettings_::
UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_YES);
}
TEST(ConversionTest, StableDelegateLoaderSettings) {
const std::string kDelegatePath = "TEST_DELEGATE_PATH";
const std::string kDelegateName = "TEST_DELEGATE_NAME";
proto::TFLiteSettings input_settings;
auto* stable_delegate_loader_settings =
input_settings.mutable_stable_delegate_loader_settings();
stable_delegate_loader_settings->set_delegate_path(kDelegatePath);
stable_delegate_loader_settings->set_delegate_name(kDelegateName);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
const auto* output_stable_delegate_loader_settings =
output_settings->stable_delegate_loader_settings();
ASSERT_NE(output_stable_delegate_loader_settings, nullptr);
EXPECT_EQ(output_stable_delegate_loader_settings->delegate_path()->str(),
kDelegatePath);
EXPECT_EQ(output_stable_delegate_loader_settings->delegate_name()->str(),
kDelegateName);
}
TEST(ConversionTest, CompilationCachingSettings) {
const std::string kCacheDir = "TEST_CACHE_DIR";
const std::string kModelToken = "TEST_MODEL_TOKEN";
proto::TFLiteSettings input_settings;
auto* compilation_caching_settings =
input_settings.mutable_compilation_caching_settings();
compilation_caching_settings->set_cache_dir(kCacheDir);
compilation_caching_settings->set_model_token(kModelToken);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
const auto* output_compilation_caching_settings =
output_settings->compilation_caching_settings();
ASSERT_NE(output_compilation_caching_settings, nullptr);
EXPECT_EQ(output_compilation_caching_settings->cache_dir()->str(), kCacheDir);
EXPECT_EQ(output_compilation_caching_settings->model_token()->str(),
kModelToken);
}
TEST(ConversionTest, ArmNNSettings) {
const std::string kBackends = "TEST_BACKENDS";
const bool kFastmath = true;
const std::string kAdditionalParameters = "TEST_ADDITIONAL_PARAMETERS";
proto::TFLiteSettings input_settings;
auto* armnn_settings = input_settings.mutable_armnn_settings();
armnn_settings->set_backends(kBackends);
armnn_settings->set_fastmath(kFastmath);
armnn_settings->set_additional_parameters(kAdditionalParameters);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
const auto* output_armnn_settings = output_settings->armnn_settings();
ASSERT_NE(output_armnn_settings, nullptr);
EXPECT_EQ(output_armnn_settings->backends()->str(), kBackends);
EXPECT_EQ(output_armnn_settings->fastmath(), kFastmath);
EXPECT_EQ(output_armnn_settings->additional_parameters()->str(),
kAdditionalParameters);
}
TEST(ConversionTest, MtkNeuronSettings) {
const proto::MtkNeuronSettings_ExecutionPreference kExecutionPreference =
proto::MtkNeuronSettings::PREFERENCE_FAST_SINGLE_ANSWER;
const proto::MtkNeuronSettings_ExecutionPriority kExecutionPriority =
proto::MtkNeuronSettings::PRIORITY_MEDIUM;
const proto::MtkNeuronSettings_OptimizationHint kOptimizationHint =
proto::MtkNeuronSettings::OPTIMIZATION_LOW_LATENCY;
const proto::MtkNeuronSettings_OperationCheckMode kOperationCheckMode =
proto::MtkNeuronSettings::PER_NODE_OPERATION_CHECK;
const bool kAllowFp16 = true;
const bool kUseAhwb = false;
const bool kUseCacheableBuffer = true;
const std::string kCompileOptions = "TEST_COMPILE_OPTIONS";
const std::string kAcceleratorName = "TEST_ACCELERATOR_NAME";
const std::string kNeuronConfigPath = "TEST_NEURON_CONFIG_PATH";
proto::TFLiteSettings input_settings;
auto* mtk_neuron_settings = input_settings.mutable_mtk_neuron_settings();
mtk_neuron_settings->set_execution_preference(kExecutionPreference);
mtk_neuron_settings->set_execution_priority(kExecutionPriority);
mtk_neuron_settings->add_optimization_hints(kOptimizationHint);
mtk_neuron_settings->set_operation_check_mode(kOperationCheckMode);
mtk_neuron_settings->set_allow_fp16_precision_for_fp32(kAllowFp16);
mtk_neuron_settings->set_use_ahwb(kUseAhwb);
mtk_neuron_settings->set_use_cacheable_buffer(kUseCacheableBuffer);
mtk_neuron_settings->add_compile_options(kCompileOptions);
mtk_neuron_settings->add_accelerator_names(kAcceleratorName);
mtk_neuron_settings->set_neuron_config_path(kNeuronConfigPath);
flatbuffers::FlatBufferBuilder flatbuffers_builder;
auto output_settings = ConvertFromProto(input_settings, &flatbuffers_builder);
const auto* output_mtk_neuron_settings =
output_settings->mtk_neuron_settings();
ASSERT_NE(output_mtk_neuron_settings, nullptr);
EXPECT_EQ(
output_mtk_neuron_settings->execution_preference(),
MtkNeuronSettings_::ExecutionPreference_PREFERENCE_FAST_SINGLE_ANSWER);
EXPECT_EQ(output_mtk_neuron_settings->execution_priority(),
MtkNeuronSettings_::ExecutionPriority_PRIORITY_MEDIUM);
EXPECT_EQ(output_mtk_neuron_settings->optimization_hints()->size(), 1);
EXPECT_EQ(output_mtk_neuron_settings->optimization_hints()->Get(0),
kOptimizationHint);
EXPECT_EQ(output_mtk_neuron_settings->operation_check_mode(),
MtkNeuronSettings_::OperationCheckMode_PER_NODE_OPERATION_CHECK);
EXPECT_EQ(output_mtk_neuron_settings->allow_fp16_precision_for_fp32(),
kAllowFp16);
EXPECT_EQ(output_mtk_neuron_settings->use_ahwb(), kUseAhwb);
EXPECT_EQ(output_mtk_neuron_settings->use_cacheable_buffer(),
kUseCacheableBuffer);
EXPECT_EQ(output_mtk_neuron_settings->compile_options()->size(), 1);
EXPECT_EQ(output_mtk_neuron_settings->compile_options()->Get(0)->str(),
kCompileOptions);
EXPECT_EQ(output_mtk_neuron_settings->accelerator_names()->size(), 1);
EXPECT_EQ(output_mtk_neuron_settings->accelerator_names()->Get(0)->str(),
kAcceleratorName);
EXPECT_EQ(output_mtk_neuron_settings->neuron_config_path()->str(),
kNeuronConfigPath);
}
}
} |
869 | cpp | tensorflow/tensorflow | gpu_plugin | tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.cc | tensorflow/lite/core/acceleration/configuration/c/gpu_plugin_test.cc | #ifndef TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_GPU_PLUGIN_H_
#define TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_GPU_PLUGIN_H_
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#ifdef __cplusplus
extern "C" {
#endif
const TfLiteDelegatePlugin* TfLiteGpuDelegatePluginCApi();
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/acceleration/configuration/gpu_plugin.h"
#include "tensorflow/lite/core/c/common.h"
#if TFLITE_SUPPORTS_GPU_DELEGATE
#include "tensorflow/lite/delegates/gpu/delegate.h"
#elif defined(REAL_IPHONE_DEVICE)
#include "tensorflow/lite/delegates/gpu/metal_delegate.h"
#endif
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
tflite::delegates::GpuPlugin gpu_plugin(*tflite_settings);
#if TFLITE_SUPPORTS_GPU_DELEGATE
return TfLiteGpuDelegateV2Create(&gpu_plugin.Options());
#elif defined(REAL_IPHONE_DEVICE)
return TFLGpuDelegateCreate(&gpu_plugin.Options());
#else
return nullptr;
#endif
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
#if TFLITE_SUPPORTS_GPU_DELEGATE
TfLiteGpuDelegateV2Delete(delegate);
#elif defined(REAL_IPHONE_DEVICE)
TFLGpuDelegateDelete(delegate);
#endif
}
static int DelegateErrno(TfLiteDelegate* from_delegate) { return 0; }
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteGpuDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
class GpuTest : public testing::Test {
public:
void SetUp() override {
GPUSettingsBuilder gpu_settings_builder(flatbuffer_builder_);
flatbuffers::Offset<GPUSettings> gpu_settings =
gpu_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_gpu_settings(gpu_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~GpuTest() override {}
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
TEST_F(GpuTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate = TfLiteGpuDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteGpuDelegatePluginCApi()->destroy(delegate);
}
TEST_F(GpuTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate = TfLiteGpuDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteGpuDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteGpuDelegatePluginCApi()->destroy(delegate);
}
} |
870 | cpp | tensorflow/tensorflow | neg | tensorflow/lite/kernels/neg.cc | tensorflow/lite/kernels/neg_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void Negate(const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = -input_data[i];
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/neg.h"
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace neg {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input->type) {
case kTfLiteInt64:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int64_t>(input),
GetTensorShape(output), GetTensorData<int64_t>(output));
break;
case kTfLiteInt32:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int32_t>(input),
GetTensorShape(output), GetTensorData<int32_t>(output));
break;
case kTfLiteFloat32:
reference_ops::Negate(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
break;
default:
TF_LITE_KERNEL_LOG(
context,
"Neg only currently supports int64, int32, and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_NEG() {
static TfLiteRegistration r = {nullptr, nullptr,
neg::Prepare, neg::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class NegOpModel : public SingleOpModelWithHexagon {
public:
NegOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_NEG, BuiltinOptions_NegOptions,
CreateNegOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
template <typename integer_type>
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<integer_type>(input_, data);
}
template <typename integer_type>
std::vector<float> GetDequantizedOutput() {
return Dequantize<integer_type>(ExtractVector<integer_type>(output_),
GetScale(output_), GetZeroPoint(output_));
}
protected:
int input_;
int output_;
};
TEST(NegOpModel, NegTest_UInt8) {
NegOpModel m({TensorType_UINT8, {2, 3}, -4, 4},
{TensorType_UINT8, {2, 3}, -4, 4});
m.SetQuantizedInput<uint8_t>({-2.0f, -1.0f, 0.f, 1.0f, 2.0f, 3.0f});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({2.0f, 1.0f, 0.f, -1.0f, -2.0f, -3.0f},
0.05)));
}
TEST(NegOpModel, NegTest_Int8) {
NegOpModel m({TensorType_INT8, {2, 3}, -4, 4},
{TensorType_INT8, {2, 3}, -4, 4});
m.SetQuantizedInput<int8_t>({-2.0f, -1.0f, 0.f, 1.0f, 2.0f, 3.0f});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({2.0f, 1.0f, 0.f, -1.0f, -2.0f, -3.0f},
0.05)));
}
} |
871 | cpp | tensorflow/tensorflow | space_to_batch_nd | tensorflow/lite/kernels/space_to_batch_nd.cc | tensorflow/lite/kernels/space_to_batch_nd_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_BATCH_ND_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_BATCH_ND_H_
#include <cmath>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline RuntimeShape ExtendShapeSpaceToBatch(const RuntimeShape& shape) {
if (shape.DimensionsCount() == 4) {
return shape;
}
RuntimeShape new_shape(4, 1);
new_shape.SetDim(0, shape.Dims(0));
new_shape.SetDim(1, shape.Dims(1));
new_shape.SetDim(3, shape.Dims(2));
return new_shape;
}
template <typename T>
inline void SpaceToBatchND(const SpaceToBatchParams& params,
const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const int32_t* block_shape_data,
const RuntimeShape& unextended_input3_shape,
const int32_t* paddings_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("SpaceToBatchND");
TFLITE_DCHECK_GE(unextended_input1_shape.DimensionsCount(), 3);
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(unextended_input1_shape.DimensionsCount(),
unextended_output_shape.DimensionsCount());
const RuntimeShape input1_shape =
ExtendShapeSpaceToBatch(unextended_input1_shape);
const RuntimeShape output_shape =
ExtendShapeSpaceToBatch(unextended_output_shape);
const int depth = input1_shape.Dims(3);
const int input_width = input1_shape.Dims(2);
const int input_height = input1_shape.Dims(1);
const int input_batch_size = input1_shape.Dims(0);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_batch_size = output_shape.Dims(0);
const int block_shape_height = block_shape_data[0];
const int block_shape_width =
unextended_input1_shape.DimensionsCount() == 4 ? block_shape_data[1] : 1;
const int padding_top = paddings_data[0];
const int padding_left =
unextended_input1_shape.DimensionsCount() == 4 ? paddings_data[2] : 0;
const int32_t pad_value = params.output_offset;
for (int out_b = 0; out_b < output_batch_size; ++out_b) {
int input_batch = out_b % input_batch_size;
int shift_w = (out_b / input_batch_size) % block_shape_width;
int shift_h = (out_b / input_batch_size) / block_shape_width;
for (int out_h = 0; out_h < output_height; ++out_h) {
for (int out_w = 0; out_w < output_width; ++out_w) {
T* out = output_data + Offset(output_shape, out_b, out_h, out_w, 0);
if (out_h * block_shape_height + shift_h < padding_top ||
out_h * block_shape_height + shift_h >=
padding_top + input_height ||
out_w * block_shape_width + shift_w < padding_left ||
out_w * block_shape_width + shift_w >= padding_left + input_width) {
memset(out, pad_value, depth * sizeof(T));
} else {
const T* in =
input1_data +
Offset(input1_shape, input_batch,
(out_h * block_shape_height + shift_h) - padding_top,
(out_w * block_shape_width + shift_w) - padding_left, 0);
memcpy(out, in, depth * sizeof(T));
}
}
}
}
}
}
}
#endif
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace space_to_batch_nd {
enum KernelType {
kReference,
kGenericOptimized,
};
struct SpaceToBatchNDContext {
SpaceToBatchNDContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
block_shape = GetInput(context, node, 1);
paddings = GetInput(context, node, 2);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
const TfLiteTensor* block_shape;
const TfLiteTensor* paddings;
TfLiteTensor* output;
};
const int kInputMinDimensionNum = 3;
const int kInputMaxDimensionNum = 4;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
SpaceToBatchNDContext* op_context) {
TfLiteIntArray* input_size = op_context->input->dims;
const int32* block_shape = GetTensorData<int32>(op_context->block_shape);
const int32* paddings_data = GetTensorData<int32>(op_context->paddings);
int spatial_dims_num = input_size->size - 2;
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), 1);
TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->paddings), 2);
TF_LITE_ENSURE_EQ(context, op_context->paddings->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, op_context->paddings->dims->data[1], 2);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input_size);
int output_batch_size = input_size->data[0];
for (int dim = 0; dim < spatial_dims_num; ++dim) {
int final_dim_size = (input_size->data[dim + 1] + paddings_data[dim * 2] +
paddings_data[dim * 2 + 1]);
TF_LITE_ENSURE(context, block_shape[dim] != 0);
TF_LITE_ENSURE_EQ(context, final_dim_size % block_shape[dim], 0);
output_size->data[dim + 1] = final_dim_size / block_shape[dim];
output_batch_size *= block_shape[dim];
}
output_size->data[0] = output_batch_size;
output_size->data[input_size->size - 1] =
input_size->data[input_size->size - 1];
return context->ResizeTensor(context, op_context->output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
SpaceToBatchNDContext op_context(context, node);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) >= kInputMinDimensionNum);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) <= kInputMaxDimensionNum);
TF_LITE_ENSURE_TYPES_EQ(context, op_context.input->type,
op_context.output->type);
if (op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
op_context.output->params.scale);
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
op_context.output->params.zero_point);
}
if (op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, op_context.output->params.zero_point, 0);
}
if (!IsConstantOrPersistentTensor(op_context.block_shape) ||
!IsConstantOrPersistentTensor(op_context.paddings)) {
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, &op_context);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
SpaceToBatchNDContext op_context(context, node);
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
#define TF_LITE_SPACE_TO_BATCH_ND(type, scalar, pad_value) \
tflite::SpaceToBatchParams op_params; \
op_params.output_offset = pad_value; \
type::SpaceToBatchND(op_params, GetTensorShape(op_context.input), \
GetTensorData<scalar>(op_context.input), \
GetTensorShape(op_context.block_shape), \
GetTensorData<int32_t>(op_context.block_shape), \
GetTensorShape(op_context.paddings), \
GetTensorData<int32_t>(op_context.paddings), \
GetTensorShape(op_context.output), \
GetTensorData<scalar>(op_context.output))
switch (op_context.input->type) {
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, float, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, float, 0);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, uint8_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, uint8_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int8_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int8_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt16:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int16_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int16_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int32_t, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int32_t, 0);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int64_t, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int64_t, 0);
}
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by SpaceToBatch.",
op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_SPACE_TO_BATCH_ND
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SPACE_TO_BATCH_ND_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, space_to_batch_nd::Prepare,
space_to_batch_nd::Eval<space_to_batch_nd::kReference>};
return &r;
}
TfLiteRegistration* Register_SPACE_TO_BATCH_ND_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, space_to_batch_nd::Prepare,
space_to_batch_nd::Eval<space_to_batch_nd::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_SPACE_TO_BATCH_ND() {
return Register_SPACE_TO_BATCH_ND_GENERIC_OPT();
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::Matcher;
class SpaceToBatchNDOpModel : public SingleOpModel {
public:
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
template <typename T>
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<T>(input_, data);
}
void SetBlockShape(std::initializer_list<int> data) {
PopulateTensor<int>(block_shape_, data);
}
void SetPaddings(std::initializer_list<int> data) {
PopulateTensor<int>(paddings_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
protected:
int input_;
int block_shape_;
int paddings_;
int output_;
};
class SpaceToBatchNDOpConstModel : public SpaceToBatchNDOpModel {
public:
SpaceToBatchNDOpConstModel(
const TensorData& input, std::initializer_list<int> block_shape,
std::initializer_list<int> paddings, const TensorData& output,
std::initializer_list<int> paddings_dims = {2, 2}) {
input_ = AddInput(input);
block_shape_ = AddConstInput(TensorType_INT32, block_shape,
{static_cast<int>(block_shape.size())});
paddings_ = AddConstInput(TensorType_INT32, paddings, paddings_dims);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
BuiltinOptions_SpaceToBatchNDOptions,
CreateSpaceToBatchNDOptions(builder_).Union());
BuildInterpreter({input.shape});
}
};
class SpaceToBatchNDOpDynamicModel : public SpaceToBatchNDOpModel {
public:
SpaceToBatchNDOpDynamicModel(
const TensorData& input, const TensorData& output,
std::initializer_list<int> block_shape_dims = {2},
std::initializer_list<int> paddings_dims = {2, 2}) {
input_ = AddInput(input);
block_shape_ = AddInput(TensorType_INT32);
paddings_ = AddInput(TensorType_INT32);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
BuiltinOptions_SpaceToBatchNDOptions,
CreateSpaceToBatchNDOptions(builder_).Union());
BuildInterpreter({input.shape, block_shape_dims, paddings_dims});
}
};
#if GTEST_HAS_DEATH_TEST
TEST(SpaceToBatchNDOpTest, InvalidShapeTest) {
EXPECT_DEATH(
SpaceToBatchNDOpConstModel({TensorType_FLOAT32, {1, 3, 3, 1}}, {2, 2},
{0, 0, 0, 0}, {TensorType_FLOAT32}),
"Cannot allocate tensors");
}
#endif
TEST(SpaceToBatchNDOpTest, SimpleConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 4, 1}}, {2, 2},
{0, 0, 0, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, SimpleDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2, 2});
m.SetPaddings({0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, MultipleInputBatchesConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {2, 2, 4, 1}}, {2, 2},
{0, 0, 0, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, MultipleInputBatchesDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2, 2});
m.SetPaddings({0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, SimplePaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 5, 2, 1}}, {3, 2},
{1, 0, 2, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
}));
}
TEST(SpaceToBatchNDOpTest, SimplePaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 5, 2, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 0, 2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
}));
}
TEST(SpaceToBatchNDOpTest, ComplexPaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 2, 1}}, {3, 2},
{1, 1, 2, 4}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
}));
}
TEST(SpaceToBatchNDOpTest, ComplexPaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 2, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 1, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
}));
}
template <typename integer_dtype = int8_t>
std::vector<Matcher<float>> DequantizedArrayNear(
const std::vector<float>& values, const float min, const float max) {
const float quantization_tolerance =
(max - min) / (std::numeric_limits<integer_dtype>::max() -
std::numeric_limits<integer_dtype>::min());
return ArrayFloatNear(values, quantization_tolerance);
}
#if GTEST_HAS_DEATH_TEST
TEST(QuantizedSpaceToBatchNDOpTest, ZeroNotInQuantizationRange) {
EXPECT_DEATH(SpaceToBatchNDOpConstModel m(
{TensorType_UINT8, {1, 2, 2, 1}, 1.0, 2.0}, {4, 2},
{0, 0, 1, 1, 1, 1, 0, 0}, {TensorType_UINT8, {}, 1.0, 2.0}),
".*Check failed: f_min <= 0.*");
}
#endif
template <typename integer_dtype>
void SimplePaddingConstTestQuant() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
SpaceToBatchNDOpConstModel m(
{GetTensorType<integer_dtype>(), {1, 5, 2, 1}, 1.0f * kMin, 1.0f * kMax},
{3, 2}, {1, 0, 2, 0},
{GetTensorType<integer_dtype>(), {}, 1.0f * kMin, 1.0f * kMax});
m.SetQuantizedInput<integer_dtype>(
{-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8, -0.9, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(DequantizedArrayNear<integer_dtype>(
{0, 0, 0, -0.5, 0, 0, 0, 0.6, 0, -0.1, 0, -0.7,
0, 0.2, 0, 0.8, 0, -0.3, 0, -0.9, 0, 0.4, 0, 0.1},
-1.0, 1.0)));
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingConstTestUint8) {
SimplePaddingConstTestQuant<uint8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingConstTestInt8) {
SimplePaddingConstTestQuant<int8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingConstTestInt16) {
SimplePaddingConstTestQuant<int16_t>();
}
template <typename integer_dtype>
void SimplePaddingDynamicTestQuant() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
SpaceToBatchNDOpDynamicModel m(
{GetTensorType<integer_dtype>(), {1, 5, 2, 1}, 1.0f * kMin, 1.0f * kMax},
{GetTensorType<integer_dtype>(), {}, 1.0f * kMin, 1.0f * kMax});
m.SetQuantizedInput<integer_dtype>(
{-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8, -0.9, 0.1});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 0, 2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(DequantizedArrayNear<integer_dtype>(
{0, 0, 0, -0.5, 0, 0, 0, 0.6, 0, -0.1, 0, -0.7,
0, 0.2, 0, 0.8, 0, -0.3, 0, -0.9, 0, 0.4, 0, 0.1},
-1.0, 1.0)));
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingDynamicTestUint8) {
SimplePaddingDynamicTestQuant<uint8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingDynamicTestInt8) {
SimplePaddingDynamicTestQuant<int8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingDynamicTestInt16) {
SimplePaddingDynamicTestQuant<int16_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, ComplexPaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_UINT8, {1, 4, 2, 1}, -1.0, 1.0},
{3, 2}, {1, 1, 2, 4},
{TensorType_UINT8, {}, -1.0, 1.0});
m.SetQuantizedInput<uint8_t>({-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(DequantizedArrayNear(
{
0, 0, 0, 0, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.6, 0, 0,
0, -0.1, 0, 0, 0, -0.7, 0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0,
0, -0.3, 0, 0, 0, 0, 0, 0, 0, 0.4, 0, 0, 0, 0, 0, 0,
},
-1.0, 1.0)));
}
TEST(QuantizedSpaceToBatchNDOpTest, ComplexPaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_UINT8, {1, 4, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {}, -1.0, 1.0});
m.SetQuantizedInput<uint8_t>({-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 1, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(DequantizedArrayNear(
{
0, 0, 0, 0, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.6, 0, 0,
0, -0.1, 0, 0, 0, -0.7, 0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0,
0, -0.3, 0, 0, 0, 0, 0, 0, 0, 0.4, 0, 0, 0, 0, 0, 0,
},
-1.0, 1.0)));
}
TEST(SpaceToBatchNDOpTest, Simple3DConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 4}}, {2}, {0, 0},
{TensorType_FLOAT32}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 9, 10, 11, 12, 5, 6,
7, 8, 13, 14, 15, 16}));
}
TEST(SpaceToBatchNDOpTest, Simple3DPaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 4}}, {2}, {2, 2},
{TensorType_FLOAT32}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 4, 4}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 1, 2, 3, 4, 9, 10, 11, 12, 0, 0, 0, 0,
0, 0, 0, 0, 5, 6, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0}));
}
TEST(SpaceToBatchNDOpTest, Simple3DDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 4}},
{TensorType_FLOAT32}, {1}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2});
m.SetPaddings({0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 9, 10, 11, 12, 5, 6,
7, 8, 13, 14, 15, 16}));
}
TEST(SpaceToBatchNDOpTest, Simple3DPaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 4}},
{TensorType_FLOAT32}, {1}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2});
m.SetPaddings({2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 4, 4}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 1, 2, 3, 4, 9, 10, 11, 12, 0, 0, 0, 0,
0, 0, 0, 0, 5, 6, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0}));
}
}
} |
872 | cpp | tensorflow/tensorflow | depth_to_space | tensorflow/lite/kernels/depth_to_space.cc | tensorflow/lite/kernels/depth_to_space_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params,
const RuntimeShape& unextended_input_shape,
const T* input_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int input_depth = input_shape.Dims(3);
const int input_width = input_shape.Dims(2);
const int input_height = input_shape.Dims(1);
const int input_batch = input_shape.Dims(0);
const int output_depth = output_shape.Dims(3);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_batch = output_shape.Dims(0);
const int32_t block_size = op_params.block_size;
TFLITE_DCHECK_EQ(input_width * block_size, output_width);
TFLITE_DCHECK_EQ(input_height * block_size, output_height);
TFLITE_DCHECK_EQ(input_depth, output_depth * block_size * block_size);
TFLITE_DCHECK_EQ(input_batch, output_batch);
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_h = 0; out_h < output_height; ++out_h) {
for (int out_w = 0; out_w < output_width; ++out_w) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
const int in_d =
out_d + ((out_h % block_size) * block_size + out_w % block_size) *
output_depth;
const int in_w = out_w / block_size;
const int in_h = out_h / block_size;
const int in_b = out_b;
const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d);
const int output_index =
Offset(output_shape, out_b, out_h, out_w, out_d);
output_data[output_index] = input_data[input_index];
}
}
}
}
}
}
}
#endif
#include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace depth_to_space {
enum KernelType {
kReference,
kGenericOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
auto data_type = output->type;
TF_LITE_ENSURE(context,
data_type == kTfLiteFloat32 || data_type == kTfLiteUInt8 ||
data_type == kTfLiteInt8 || data_type == kTfLiteInt32 ||
data_type == kTfLiteInt64);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
const int block_size = params->block_size;
TF_LITE_ENSURE(context, block_size > 0);
const int input_height = input->dims->data[1];
const int input_width = input->dims->data[2];
const int input_channels = input->dims->data[3];
int output_height = input_height * block_size;
int output_width = input_width * block_size;
int output_channels = input_channels / block_size / block_size;
TF_LITE_ENSURE_EQ(context, input_height, output_height / block_size);
TF_LITE_ENSURE_EQ(context, input_width, output_width / block_size);
TF_LITE_ENSURE_EQ(context, input_channels,
output_channels * block_size * block_size);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = output_height;
output_size->data[2] = output_width;
output_size->data[3] = output_channels;
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
#define TF_LITE_DEPTH_TO_SPACE(type, scalar) \
tflite::DepthToSpaceParams op_params; \
op_params.block_size = params->block_size; \
type::DepthToSpace(op_params, GetTensorShape(input), \
GetTensorData<scalar>(input), GetTensorShape(output), \
GetTensorData<scalar>(output))
switch (input->type) {
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, float);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, float);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, uint8_t);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, uint8_t);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, int8_t);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, int8_t);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, int32_t);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, int32_t);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_DEPTH_TO_SPACE(reference_ops, int64_t);
} else {
TF_LITE_DEPTH_TO_SPACE(optimized_ops, int64_t);
}
break;
default:
TF_LITE_KERNEL_LOG(context, "Type '%s' not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
#undef TF_LITE_DEPTH_TO_SPACE
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DEPTH_TO_SPACE_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, depth_to_space::Prepare,
depth_to_space::Eval<depth_to_space::kReference>};
return &r;
}
TfLiteRegistration* Register_DEPTH_TO_SPACE_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, depth_to_space::Prepare,
depth_to_space::Eval<depth_to_space::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_DEPTH_TO_SPACE() {
return Register_DEPTH_TO_SPACE_GENERIC_OPT();
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/depth_to_space_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(DepthToSpace, SinglePixel) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(1)
.InputWidth(1)
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(DepthToSpace, SingleRow) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto width_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(1)
.InputWidth(width_rng())
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(DepthToSpace, SingleColumn) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto height_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(height_rng())
.InputWidth(1)
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(DepthToSpace, FullImage) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(DepthToSpace, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
DepthToSpaceTester()
.BatchSize(batch_rng())
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputChannels(channel_rng())
.BlockSize(block_rng())
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
} |
873 | cpp | tensorflow/tensorflow | acceleration_test_util_internal | tensorflow/lite/kernels/acceleration_test_util_internal.cc | tensorflow/lite/kernels/acceleration_test_util_internal_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_ACCELERATION_TEST_UTIL_INTERNAL_H_
#define TENSORFLOW_LITE_KERNELS_ACCELERATION_TEST_UTIL_INTERNAL_H_
#include <algorithm>
#include <atomic>
#include <functional>
#include <iterator>
#include <optional>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "re2/re2.h"
namespace tflite {
void ReadAccelerationConfig(
const char* config,
const std::function<void(std::string, std::string, bool)>& consumer);
template <typename T>
class ConfigurationEntry {
public:
ConfigurationEntry(const std::string& test_id_rex, T test_config,
bool is_denylist)
: test_id_rex_(test_id_rex),
test_config_(test_config),
is_denylist_(is_denylist) {}
bool Matches(const std::string& test_id) {
return RE2::FullMatch(test_id, test_id_rex_);
}
bool IsDenylistEntry() const { return is_denylist_; }
const T& TestConfig() const { return test_config_; }
const std::string& TestIdRex() const { return test_id_rex_; }
private:
std::string test_id_rex_;
T test_config_;
bool is_denylist_;
};
template <typename T>
std::optional<T> GetAccelerationTestParam(std::string test_id) {
static std::atomic<std::vector<ConfigurationEntry<T>>*> test_config_ptr;
if (test_config_ptr.load() == nullptr) {
auto config = new std::vector<ConfigurationEntry<T>>();
auto consumer = [&config](std::string key, std::string value_str,
bool is_denylist) mutable {
T value = T::ParseConfigurationLine(value_str);
config->push_back(ConfigurationEntry<T>(key, value, is_denylist));
};
ReadAccelerationConfig(T::AccelerationTestConfig(), consumer);
auto* prev_val = test_config_ptr.exchange(config);
delete prev_val;
}
const std::vector<ConfigurationEntry<T>>* test_config =
test_config_ptr.load();
const auto test_config_iter = std::find_if(
test_config->begin(), test_config->end(),
[&test_id](ConfigurationEntry<T> elem) { return elem.Matches(test_id); });
if (test_config_iter != test_config->end() &&
!test_config_iter->IsDenylistEntry()) {
return std::optional<T>(test_config_iter->TestConfig());
} else {
return std::optional<T>();
}
}
}
#endif
#include "tensorflow/lite/kernels/acceleration_test_util_internal.h"
#include <ctype.h>
#include <algorithm>
#include <functional>
#include <iterator>
#include <sstream>
#include <string>
namespace tflite {
void ReadAccelerationConfig(
const char* config,
const std::function<void(std::string, std::string, bool)>& consumer) {
if (config) {
std::istringstream istream{config};
std::string curr_config_line;
while (std::getline(istream, curr_config_line)) {
curr_config_line.erase(
curr_config_line.begin(),
std::find_if_not(curr_config_line.begin(), curr_config_line.end(),
[](int ch) { return std::isspace(ch); }));
if (curr_config_line.empty() || curr_config_line.at(0) == '#') {
continue;
}
auto first_sep_pos =
std::find(curr_config_line.begin(), curr_config_line.end(), ',');
bool is_denylist = false;
std::string key = curr_config_line;
std::string value{};
if (first_sep_pos != curr_config_line.end()) {
key = std::string(curr_config_line.begin(), first_sep_pos);
value = std::string(first_sep_pos + 1, curr_config_line.end());
}
if (key[0] == '-') {
key = key.substr(1);
is_denylist = true;
}
consumer(key, value, is_denylist);
}
}
}
} | #include "tensorflow/lite/kernels/acceleration_test_util_internal.h"
#include <functional>
#include <optional>
#include <string>
#include <unordered_map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
using ::testing::Eq;
using ::testing::Not;
using ::testing::Test;
struct SimpleConfig {
public:
static constexpr char kAccelerationTestConfig[] =
R"(
#test-id,some-other-data
test-1,data-1
test-2,
test-3,data-3
test-4.*,data-4
-test-5
test-6
test-7,data-7
)";
static const char* AccelerationTestConfig() {
return kAccelerationTestConfig;
}
static SimpleConfig ParseConfigurationLine(const std::string& conf_line) {
return {conf_line};
}
std::string value;
};
class ReadAccelerationConfigTest : public ::testing::Test {
public:
std::unordered_map<std::string, SimpleConfig> allowlist_;
std::unordered_map<std::string, SimpleConfig> denylist_;
std::function<void(std::string, std::string, bool)> consumer_ =
[this](std::string key, std::string value, bool is_denylist) {
if (is_denylist) {
denylist_[key] = {value};
} else {
allowlist_[key] = {value};
}
};
};
TEST_F(ReadAccelerationConfigTest, ReadsAKeyOnlyLine) {
ReadAccelerationConfig("key", consumer_);
EXPECT_THAT(allowlist_.find("key"), Not(Eq(allowlist_.end())));
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsADenylistKeyOnlyLine) {
ReadAccelerationConfig("-key", consumer_);
EXPECT_THAT(denylist_.find("key"), Not(Eq(allowlist_.end())));
EXPECT_TRUE(allowlist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsAKeyValueLine) {
ReadAccelerationConfig("key,value", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value"));
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsADenyListKeyValueLine) {
ReadAccelerationConfig("-key,value", consumer_);
EXPECT_THAT(denylist_["key"].value, Eq("value"));
EXPECT_TRUE(allowlist_.empty());
}
TEST_F(ReadAccelerationConfigTest, KeysAreLeftTrimmed) {
ReadAccelerationConfig(" key,value", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value"));
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, BlKeysAreLeftTrimmed) {
ReadAccelerationConfig(" -key,value", consumer_);
EXPECT_THAT(denylist_["key"].value, Eq("value"));
EXPECT_TRUE(allowlist_.empty());
}
TEST_F(ReadAccelerationConfigTest, IgnoresCommentedLines) {
ReadAccelerationConfig("#key,value", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, CommentCanHaveTrailingBlanks) {
ReadAccelerationConfig(" #key,value", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, CommentsAreOnlyForTheFullLine) {
ReadAccelerationConfig("key,value #comment", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value #comment"));
}
TEST_F(ReadAccelerationConfigTest, IgnoresEmptyLines) {
ReadAccelerationConfig("", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLines) {
ReadAccelerationConfig("key1,value1\nkey2,value2\n-key3,value3", consumer_);
EXPECT_THAT(allowlist_["key1"].value, Eq("value1"));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
EXPECT_THAT(denylist_["key3"].value, Eq("value3"));
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLinesWithCommentsAndSpaces) {
ReadAccelerationConfig("key1,value1\n#comment\n\nkey2,value2", consumer_);
EXPECT_THAT(allowlist_["key1"].value, Eq("value1"));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLinesWithMissingConfigValues) {
ReadAccelerationConfig("key1\nkey2,value2\nkey3\nkey4,value4", consumer_);
EXPECT_THAT(allowlist_["key1"].value, Eq(""));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
EXPECT_THAT(allowlist_["key3"].value, Eq(""));
EXPECT_THAT(allowlist_["key4"].value, Eq("value4"));
}
TEST(GetAccelerationTestParam, LoadsTestConfig) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-3");
ASSERT_TRUE(config_value_maybe.has_value());
ASSERT_THAT(config_value_maybe.value().value, Eq("data-3"));
}
TEST(GetAccelerationTestParam, LoadsTestConfigWithEmptyValue) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-2");
ASSERT_TRUE(config_value_maybe.has_value());
ASSERT_THAT(config_value_maybe.value().value, Eq(""));
}
TEST(GetAccelerationTestParam, SupportsWildcards) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-41");
ASSERT_TRUE(config_value_maybe.has_value());
ASSERT_THAT(config_value_maybe.value().value, Eq("data-4"));
}
TEST(GetAccelerationTestParam, SupportDenylist) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-5");
ASSERT_FALSE(config_value_maybe.has_value());
}
struct UnmatchedSimpleConfig {
public:
static constexpr const char* kAccelerationTestConfig = nullptr;
static const char* AccelerationTestConfig() {
return kAccelerationTestConfig;
}
static UnmatchedSimpleConfig ParseConfigurationLine(
const std::string& conf_line) {
return {conf_line};
}
std::string value;
};
TEST(GetAccelerationTestParam, ReturnEmptyOptionalForNullConfig) {
ASSERT_FALSE(
GetAccelerationTestParam<UnmatchedSimpleConfig>("test-3").has_value());
}
} |
874 | cpp | tensorflow/tensorflow | pooling | third_party/xla/xla/client/lib/pooling.cc | third_party/xla/xla/client/lib/pooling_test.cc | #ifndef XLA_CLIENT_LIB_POOLING_H_
#define XLA_CLIENT_LIB_POOLING_H_
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "xla/client/xla_builder.h"
namespace xla {
class TensorFormat {
public:
TensorFormat(int batch_dimension, int feature_dimension,
absl::Span<const int64_t> spatial_dimensions)
: batch_dimension_(batch_dimension),
feature_dimension_(feature_dimension),
spatial_dimensions_(spatial_dimensions.begin(),
spatial_dimensions.end()) {}
int batch_dimension() const { return batch_dimension_; }
int feature_dimension() const { return feature_dimension_; }
int spatial_dimension(int dim) const { return spatial_dimensions_[dim]; }
int num_spatial_dims() const { return spatial_dimensions_.size(); }
private:
int batch_dimension_;
int feature_dimension_;
absl::InlinedVector<int, 4> spatial_dimensions_;
};
XlaOp MaxPool(XlaOp operand, absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride, Padding padding,
const TensorFormat& data_format);
XlaOp AvgPool(XlaOp operand, absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride,
absl::Span<const std::pair<int64_t, int64_t>> padding,
const TensorFormat& data_format, bool counts_include_padding);
std::vector<std::pair<int64_t, int64_t>> MakeSpatialPadding(
absl::Span<const int64_t> input_size, absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride, Padding padding,
const TensorFormat& data_format);
XlaOp AvgPoolGrad(XlaOp out_backprop, absl::Span<const int64_t> gradients_size,
absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride,
absl::Span<const std::pair<int64_t, int64_t>> spatial_padding,
const TensorFormat& data_format, bool counts_include_padding);
}
#endif
#include "xla/client/lib/pooling.h"
#include <numeric>
#include <utility>
#include <vector>
#include "xla/client/lib/arithmetic.h"
#include "xla/client/lib/constants.h"
#include "xla/client/lib/conv_grad_size_util.h"
namespace xla {
namespace {
XlaOp AvgPoolDivideByCountWithGeneralPadding(
XlaOp sums, PrimitiveType dtype, absl::Span<const int64_t> input_shape,
absl::Span<const std::pair<int64_t, int64_t>> spatial_padding,
absl::Span<const int64_t> ksize, absl::Span<const int64_t> stride,
const TensorFormat& data_format) {
const int num_spatial_dims = spatial_padding.size();
std::vector<int64_t> input_dim_sizes(num_spatial_dims);
std::vector<int64_t> window_dims(num_spatial_dims);
std::vector<int64_t> window_ksize(num_spatial_dims);
std::vector<int64_t> window_stride(num_spatial_dims);
CHECK_EQ(data_format.num_spatial_dims(), num_spatial_dims)
<< "Invalid number of spatial dimensions in data format specification";
for (int i = 0; i < num_spatial_dims; ++i) {
int dim = data_format.spatial_dimension(i);
input_dim_sizes[i] = input_shape[dim];
window_dims[i] = dim;
window_ksize[i] = ksize[dim];
window_stride[i] = stride[dim];
}
XlaBuilder* b = sums.builder();
auto ones = Broadcast(One(b, dtype), input_dim_sizes);
PaddingConfig padding_config;
for (int i = 0; i < num_spatial_dims; ++i) {
auto dims = padding_config.add_dimensions();
dims->set_edge_padding_low(spatial_padding[i].first);
dims->set_edge_padding_high(spatial_padding[i].second);
}
auto zero = Zero(b, dtype);
auto padded_ones = Pad(ones, zero, padding_config);
auto counts =
ReduceWindow(padded_ones, zero, CreateScalarAddComputation(dtype, b),
window_ksize, window_stride, Padding::kValid);
return Div(sums, counts, window_dims);
}
XlaOp ComputeSums(XlaOp operand, XlaOp init_value,
absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride,
const TensorFormat& data_format) {
XlaBuilder* b = operand.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape operand_shape, b->GetShape(operand));
TF_ASSIGN_OR_RETURN(Shape init_shape, b->GetShape(init_value));
PrimitiveType accumulation_type = init_shape.element_type();
auto add_computation = CreateScalarAddComputation(accumulation_type, b);
return ReduceWindow(operand, init_value, add_computation, kernel_size,
stride, Padding::kValid);
});
}
PaddingConfig MakeSpatialPaddingConfig(
absl::Span<const std::pair<int64_t, int64_t>> spatial_padding,
int num_spatial_dims, absl::Span<const int64_t> stride,
const TensorFormat& data_format) {
PaddingConfig padding_config;
padding_config.mutable_dimensions()->Reserve(2 + num_spatial_dims);
for (int i = 0; i < 2 + num_spatial_dims; ++i) {
padding_config.add_dimensions();
}
CHECK_EQ(data_format.num_spatial_dims(), num_spatial_dims)
<< "Invalid number of spatial dimensions in data format specification";
for (int i = 0; i < num_spatial_dims; ++i) {
int dim = data_format.spatial_dimension(i);
auto padding_dimension = padding_config.mutable_dimensions(dim);
padding_dimension->set_edge_padding_low(spatial_padding[i].first);
padding_dimension->set_edge_padding_high(spatial_padding[i].second);
}
return padding_config;
}
XlaOp AvgPoolDivideByCount(
XlaOp pooled, absl::Span<const int64_t> input_size,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding, PrimitiveType dtype,
const TensorFormat& data_format, bool counts_include_padding) {
if (counts_include_padding) {
int64_t window_size =
std::accumulate(window_dimensions.begin(), window_dimensions.end(), 1,
[](int64_t a, int64_t b) { return a * b; });
auto divisor = ConstantR0WithType(pooled.builder(), dtype, window_size);
return pooled / divisor;
} else {
return AvgPoolDivideByCountWithGeneralPadding(pooled, dtype, input_size,
padding, window_dimensions,
window_strides, data_format);
}
}
}
XlaOp MaxPool(XlaOp operand, absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride, Padding padding,
const TensorFormat& data_format) {
XlaBuilder* b = operand.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape operand_shape, b->GetShape(operand));
PrimitiveType dtype = operand_shape.element_type();
auto max_computation = CreateScalarMaxComputation(dtype, b);
auto init_value = MinValue(b, dtype);
return ReduceWindow(operand, init_value, max_computation, kernel_size,
stride, padding);
});
}
XlaOp AvgPool(XlaOp operand, absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride,
absl::Span<const std::pair<int64_t, int64_t>> padding,
const TensorFormat& data_format,
const bool counts_include_padding) {
XlaBuilder* b = operand.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape operand_shape, b->GetShape(operand));
PrimitiveType dtype = operand_shape.element_type();
auto init_value = Zero(b, dtype);
std::vector<int64_t> input_size(operand_shape.dimensions().begin(),
operand_shape.dimensions().end());
const int num_dims = kernel_size.size();
const int num_spatial_dims = num_dims - 2;
auto padding_config = MakeSpatialPaddingConfig(padding, num_spatial_dims,
stride, data_format);
auto padded_operand = Pad(operand, Zero(b, dtype), padding_config);
auto pooled = ComputeSums(padded_operand, init_value, kernel_size, stride,
data_format);
return AvgPoolDivideByCount(pooled, input_size, kernel_size, stride,
padding, dtype, data_format,
counts_include_padding);
});
}
std::vector<std::pair<int64_t, int64_t>> MakeSpatialPadding(
absl::Span<const int64_t> input_size, absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride, Padding padding,
const TensorFormat& data_format) {
const int num_spatial_dims = kernel_size.size() - 2;
std::vector<int64_t> input_spatial_dimensions;
std::vector<int64_t> kernel_size_spatial_dimensions;
std::vector<int64_t> stride_spatial_dimensions;
CHECK_EQ(data_format.num_spatial_dims(), num_spatial_dims)
<< "Invalid number of spatial dimensions in data format specification";
for (int i = 0; i < num_spatial_dims; ++i) {
int dim = data_format.spatial_dimension(i);
input_spatial_dimensions.push_back(input_size[dim]);
kernel_size_spatial_dimensions.push_back(kernel_size[dim]);
stride_spatial_dimensions.push_back(stride[dim]);
}
return MakePadding(input_spatial_dimensions, kernel_size_spatial_dimensions,
stride_spatial_dimensions, padding);
}
XlaOp AvgPoolGrad(XlaOp out_backprop, absl::Span<const int64_t> gradients_size,
absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride,
absl::Span<const std::pair<int64_t, int64_t>> spatial_padding,
const TensorFormat& data_format,
const bool counts_include_padding) {
XlaBuilder* b = out_backprop.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
const int num_dims = kernel_size.size();
const int num_gradients = gradients_size.size();
if (num_gradients != num_dims) {
return tsl::errors::InvalidArgument("gradients must be ", num_dims,
"-dimensional");
}
TF_ASSIGN_OR_RETURN(Shape out_backprop_xla_shape,
b->GetShape(out_backprop));
const int backprop_xla_num_dims =
out_backprop_xla_shape.dimensions().size();
if (backprop_xla_num_dims != num_dims) {
return tsl::errors::InvalidArgument("out_backprop must be ", num_dims,
"-dimensional");
}
PrimitiveType dtype = out_backprop_xla_shape.element_type();
auto out_backprop_div = AvgPoolDivideByCount(
out_backprop, gradients_size, kernel_size, stride, spatial_padding,
dtype, data_format, counts_include_padding);
PaddingConfig padding_config = MakeNoPaddingConfig(num_dims);
std::vector<int64_t> padded_gradients_size(gradients_size.begin(),
gradients_size.end());
const int num_spatial_dims = num_dims - 2;
for (int i = 0; i < num_spatial_dims; ++i) {
int dim = data_format.spatial_dimension(i);
padded_gradients_size[dim] +=
(spatial_padding[i].first + spatial_padding[i].second);
}
for (int i = 0; i < num_spatial_dims; ++i) {
int dim = data_format.spatial_dimension(i);
TF_ASSIGN_OR_RETURN(
SpatialDimensionOutputSizeAndPadding conv_backprop_spatial_dim,
ConvGradExtractAndVerifyDimension(
padded_gradients_size[dim],
kernel_size[dim],
out_backprop_xla_shape.dimensions(dim),
1,
stride[dim], Padding::kValid));
auto* padding = padding_config.mutable_dimensions(dim);
padding->set_edge_padding_low(conv_backprop_spatial_dim.pad_before);
padding->set_edge_padding_high(conv_backprop_spatial_dim.pad_after);
padding->set_interior_padding(stride[dim] - 1);
}
auto zero = Zero(b, dtype);
auto padded_gradients = Pad(out_backprop_div, zero, padding_config);
std::vector<int64_t> ones(num_dims, 1LL);
auto in_backprop =
ReduceWindow(padded_gradients, Zero(b, dtype),
CreateScalarAddComputation(dtype, b), kernel_size,
ones, Padding::kValid);
std::vector<std::pair<int64_t, int64_t>> neg_spatial_padding;
neg_spatial_padding.reserve(spatial_padding.size());
for (const std::pair<int64_t, int64_t>& spatial_padding_dim :
spatial_padding) {
neg_spatial_padding.emplace_back(-spatial_padding_dim.first,
-spatial_padding_dim.second);
}
auto remove_padding_config = MakeSpatialPaddingConfig(
neg_spatial_padding, num_spatial_dims, stride, data_format);
return Pad(in_backprop, zero, remove_padding_config);
});
}
} | #include "xla/client/lib/pooling.h"
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace {
TensorFormat MakeNCHWFormat(int num_spatial_dims) {
absl::InlinedVector<int64_t, 4> spatial_dimensions;
for (int i = 0; i < num_spatial_dims; ++i) {
spatial_dimensions.push_back(i + 2);
}
return TensorFormat(0, 1,
spatial_dimensions);
}
std::vector<std::pair<int64_t, int64_t>> MakeGeneralPadding(
XlaOp input, absl::Span<const int64_t> kernel_size,
absl::Span<const int64_t> stride, Padding padding,
const xla::TensorFormat& data_format) {
XlaBuilder* b = input.builder();
Shape operand_shape = b->GetShape(input).value();
std::vector<int64_t> input_size(operand_shape.dimensions().begin(),
operand_shape.dimensions().end());
return MakeSpatialPadding(input_size, kernel_size, stride, padding,
data_format);
}
std::vector<int64_t> ExpandWithBatchAndFeatureDimensions(
absl::Span<const int64_t> spatial_dim_sizes,
const xla::TensorFormat& data_format) {
const int num_spatial_dims = spatial_dim_sizes.size();
std::vector<int64_t> tensor_sizes(num_spatial_dims + 2, 1);
for (int i = 0; i < num_spatial_dims; ++i) {
int dim = data_format.spatial_dimension(i);
tensor_sizes[dim] = spatial_dim_sizes[i];
}
return tensor_sizes;
}
class PoolingTest : public ClientLibraryTestBase {
public:
ErrorSpec error_spec_{0.0001};
};
XLA_TEST_F(PoolingTest, MaxPool2D) {
XlaBuilder builder(TestName());
XlaOp input = ConstantR4FromArray4D<float>(
&builder, {{{{1, 2, 3, 4, 5}, {5, 4, 3, 2, 1}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = kernel_size;
MaxPool(input, kernel_size, stride, Padding::kValid, data_format);
ComputeAndCompareR4<float>(&builder, {{{{5, 4}}}}, {}, error_spec_);
}
XLA_TEST_F(PoolingTest, MaxPool2DWithPadding) {
XlaBuilder builder(TestName());
XlaOp input = ConstantR4FromArray4D<float>(
&builder, {{{{1, 2, 3, 4, 5}, {5, 4, 3, 2, 1}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = kernel_size;
MaxPool(input, kernel_size, stride, Padding::kSame, data_format);
ComputeAndCompareR4<float>(&builder, {{{{5, 4, 5}}}}, {}, error_spec_);
}
XLA_TEST_F(PoolingTest, MaxPool2DWithPaddingAndStride) {
XlaBuilder builder(TestName());
XlaOp input = ConstantR4FromArray4D<float>(
&builder, {{{{1, 2, 3, 4, 5}, {5, 4, 3, 2, 1}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = ExpandWithBatchAndFeatureDimensions({1, 1}, data_format);
MaxPool(input, kernel_size, stride, Padding::kSame, data_format);
ComputeAndCompareR4<float>(&builder, {{{{5, 4, 4, 5, 5}, {5, 4, 3, 2, 1}}}},
{}, error_spec_);
}
XLA_TEST_F(PoolingTest, AvgPool2D) {
XlaBuilder builder(TestName());
XlaOp input = ConstantR4FromArray4D<float>(
&builder, {{{{1, 2, 3, 4, 5}, {5, 4, 3, 2, 1}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = kernel_size;
auto padding = MakeGeneralPadding(input, kernel_size, stride, Padding::kValid,
data_format);
AvgPool(input, kernel_size, stride, padding, data_format,
true);
ComputeAndCompareR4<float>(&builder, {{{{3, 3}}}}, {}, error_spec_);
}
XLA_TEST_F(PoolingTest, AvgPool2DWithPadding) {
XlaBuilder builder(TestName());
XlaOp input = ConstantR4FromArray4D<float>(
&builder, {{{{1, 2, 3, 4, 5}, {5, 4, 3, 2, 1}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = kernel_size;
auto padding = MakeGeneralPadding(input, kernel_size, stride, Padding::kSame,
data_format);
AvgPool(input, kernel_size, stride, padding, data_format,
false);
ComputeAndCompareR4<float>(&builder, {{{{3, 3, 3}}}}, {}, error_spec_);
}
XLA_TEST_F(PoolingTest, AvgPool2DWithPaddingAndStride) {
XlaBuilder builder(TestName());
XlaOp input = ConstantR4FromArray4D<float>(
&builder, {{{{1, 2, 3, 4, 5}, {5, 4, 3, 2, 1}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = ExpandWithBatchAndFeatureDimensions({1, 1}, data_format);
auto padding = MakeGeneralPadding(input, kernel_size, stride, Padding::kSame,
data_format);
AvgPool(input, kernel_size, stride, padding, data_format,
false);
ComputeAndCompareR4<float>(&builder,
{{{{3, 3, 3, 3, 3}, {4.5, 3.5, 2.5, 1.5, 1}}}}, {},
error_spec_);
}
XLA_TEST_F(PoolingTest, AvgPool2DWithGeneralPaddingCountNotIncludePadding) {
XlaBuilder builder(TestName());
XlaOp input = ConstantR4FromArray4D<float>(
&builder, {{{{1, 2, 3, 4, 5}, {5, 4, 3, 2, 1}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({3, 3}, data_format);
auto stride = kernel_size;
AvgPool(input, kernel_size, stride, {{1, 1}, {2, 1}}, data_format,
false);
ComputeAndCompareR4<float>(&builder, {{{{3, 3}}}}, {}, error_spec_);
}
XLA_TEST_F(PoolingTest,
AvgPool2DWithGeneralPaddingCountNotIncludePaddingAndStride) {
XlaBuilder builder(TestName());
XlaOp input = ConstantR4FromArray4D<float>(
&builder, {{{{1, 2, 3, 4, 5}, {5, 4, 3, 2, 1}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({3, 3}, data_format);
auto stride = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
AvgPool(input, kernel_size, stride, {{2, 1}, {1, 1}}, data_format,
false);
ComputeAndCompareR4<float>(&builder, {{{{1.5, 3, 4.5}, {3, 3, 3}}}}, {},
error_spec_);
}
XLA_TEST_F(PoolingTest, AvgPool2DGradNoPadding) {
XlaBuilder builder(TestName());
for (bool counts_include_padding : {false, true}) {
XlaOp out_backprop = ConstantR4FromArray4D<float>(&builder, {{{{1.}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride,
{{0, 0}, {0, 0}}, MakeNCHWFormat(2),
counts_include_padding);
ComputeAndCompareR4<float>(
&builder, {{{{0.25, 0.25, 0.}, {0.25, 0.25, 0.}, {0., 0., 0.}}}}, {},
error_spec_);
}
}
XLA_TEST_F(PoolingTest, AvgPool2DGradNoPaddingWithStride) {
XlaBuilder builder(TestName());
for (bool counts_include_padding : {false, true}) {
XlaOp out_backprop =
ConstantR4FromArray4D<float>(&builder, {{{{1., 1.}, {1., 1.}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = ExpandWithBatchAndFeatureDimensions({1, 1}, data_format);
AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride,
{{0, 0}, {0, 0}}, MakeNCHWFormat(2),
counts_include_padding);
ComputeAndCompareR4<float>(
&builder, {{{{0.25, 0.5, 0.25}, {0.5, 1., 0.5}, {0.25, 0.5, 0.25}}}},
{}, error_spec_);
}
}
XLA_TEST_F(PoolingTest, AvgPool2DGradWithPadding) {
XlaBuilder builder(TestName());
XlaOp out_backprop =
ConstantR4FromArray4D<float>(&builder, {{{{1., 1.}, {1., 1.}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride, {{1, 1}, {1, 1}},
MakeNCHWFormat(2),
true);
ComputeAndCompareR4<float>(
&builder,
{{{{0.25, 0.25, 0.25}, {0.25, 0.25, 0.25}, {0.25, 0.25, 0.25}}}}, {},
error_spec_);
}
XLA_TEST_F(PoolingTest, AvgPool2DGradWithPaddingCountNotIncludePadding) {
XlaBuilder builder(TestName());
XlaOp out_backprop =
ConstantR4FromArray4D<float>(&builder, {{{{1., 1.}, {1., 1.}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride, {{1, 1}, {1, 1}},
MakeNCHWFormat(2), false);
ComputeAndCompareR4<float>(
&builder, {{{{1., 0.5, 0.5}, {0.5, 0.25, 0.25}, {0.5, 0.25, 0.25}}}}, {},
error_spec_);
}
XLA_TEST_F(PoolingTest, AvgPool2DGradWithPaddingCountWithStride) {
XlaBuilder builder(TestName());
XlaOp out_backprop =
ConstantR4FromArray4D<float>(&builder, {{{{1., 1., 1., 1.},
{1., 1., 1., 1.},
{1., 1., 1., 1.},
{1., 1., 1., 1.}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = ExpandWithBatchAndFeatureDimensions({1, 1}, data_format);
AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride, {{1, 1}, {1, 1}},
MakeNCHWFormat(2), true);
ComputeAndCompareR4<float>(&builder,
{{{{1., 1., 1.}, {1., 1., 1.}, {1., 1., 1.}}}}, {},
error_spec_);
}
XLA_TEST_F(PoolingTest,
AvgPool2DGradWithPaddingCountWithStrideNotIncludePadding) {
XlaBuilder builder(TestName());
XlaOp out_backprop =
ConstantR4FromArray4D<float>(&builder, {{{{1., 1., 1., 1.},
{1., 1., 1., 1.},
{1., 1., 1., 1.},
{1., 1., 1., 1.}}}});
auto data_format = MakeNCHWFormat(2);
auto kernel_size = ExpandWithBatchAndFeatureDimensions({2, 2}, data_format);
auto stride = ExpandWithBatchAndFeatureDimensions({1, 1}, data_format);
AvgPoolGrad(out_backprop, {1, 1, 3, 3}, kernel_size, stride, {{1, 1}, {1, 1}},
MakeNCHWFormat(2), false);
ComputeAndCompareR4<float>(
&builder, {{{{2.25, 1.5, 2.25}, {1.5, 1., 1.5}, {2.25, 1.5, 2.25}}}}, {},
error_spec_);
}
}
} |
875 | cpp | tensorflow/tensorflow | reduce | tensorflow/lite/kernels/reduce.cc | third_party/xla/xla/tests/reduce_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_STABLEHLO_TRANSFORMS_LEGALIZE_HLO_CONVERSIONS_REDUCE_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_STABLEHLO_TRANSFORMS_LEGALIZE_HLO_CONVERSIONS_REDUCE_H_
#include <cstdint>
#include <optional>
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace odml {
LogicalResult MatchReduceToArgMinMaxType1(mhlo::ReduceOp reduce_op,
bool is_float, bool is_argmax);
LogicalResult MatchReduceToArgMinMaxType2(mhlo::ReduceOp reduce_op,
bool is_argmax);
template <typename Reduce, typename ArgReduce, typename BooleanReduce,
bool is_argmax>
class ConvertReduceOpToArgMinMax : public OpConversionPattern<mhlo::ReduceOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ReduceOp reduce_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final {
if (reduce_op.getInputs().size() != 2) return failure();
if (reduce_op.getDimensions().getNumElements() != 1) return failure();
DenseElementsAttr operand_init;
if (!matchPattern(reduce_op.getInitValues().front(),
m_Constant(&operand_init)))
return failure();
if (!IsValueInitValue(operand_init)) return failure();
DenseElementsAttr iota_init;
if (!matchPattern(reduce_op.getInitValues().back(), m_Constant(&iota_init)))
return failure();
if (iota_init.getValues<APInt>()[0] != 0) return failure();
Value iota = reduce_op.getInputs().back();
if (!MatchIota(reduce_op.getDimensions(), iota)) return failure();
const bool is_float = mlir::isa<FloatType>(operand_init.getElementType());
if (failed(MatchReduceToArgMinMaxType1(reduce_op, is_float, is_argmax)) &&
failed(MatchReduceToArgMinMaxType2(reduce_op, is_argmax)))
return rewriter.notifyMatchFailure(
reduce_op, "Unsupported Reduce -> ArgMax/ArgMin pattern");
Value operand = reduce_op.getInputs().front();
int64_t axis = reduce_op.getDimensions().getValues<int64_t>()[0];
auto dim_type = RankedTensorType::get({1}, rewriter.getI32Type());
auto reduction_indices = rewriter.create<arith::ConstantOp>(
reduce_op.getLoc(), dim_type,
rewriter.getI32TensorAttr({static_cast<int32_t>(axis)}));
if (!mlir::isa<ShapedType>(operand.getType())) return failure();
auto operand_type = mlir::cast<ShapedType>(operand.getType());
if (operand_type.getElementType().isInteger(1)) {
auto tf_reduce_op = rewriter.create<BooleanReduce>(
reduce_op.getLoc(), reduce_op->getResult(0).getType(), operand,
reduction_indices,
rewriter.getBoolAttr(false));
auto tf_argreduce_op = rewriter.create<ArgReduce>(
reduce_op.getLoc(), reduce_op->getResult(1).getType(), operand,
reduction_indices);
rewriter.replaceOp(reduce_op, {tf_reduce_op, tf_argreduce_op});
} else {
auto tf_reduce_op = rewriter.create<Reduce>(
reduce_op.getLoc(), reduce_op->getResult(0).getType(), operand,
reduction_indices,
rewriter.getBoolAttr(false));
auto tf_argreduce_op = rewriter.create<ArgReduce>(
reduce_op.getLoc(), reduce_op->getResult(1).getType(), operand,
reduction_indices);
rewriter.replaceOp(reduce_op, {tf_reduce_op, tf_argreduce_op});
}
return success();
}
virtual bool IsValueInitValue(const DenseElementsAttr& attr) const = 0;
};
std::optional<bool> IsReduceOpLegal(mhlo::ReduceOp reduce_op);
}
}
#endif
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.h"
#include <optional>
#include "llvm/Support/Casting.h"
#include "mlir/IR/Block.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace odml {
LogicalResult MatchReduceToArgMinMaxType2(mhlo::ReduceOp reduce_op,
bool is_argmax) {
Block& body = reduce_op.getBody().front();
if (body.getNumArguments() != 4) return failure();
mhlo::ReturnOp return_op = dyn_cast<mhlo::ReturnOp>(body.back());
if (!return_op || return_op.getNumOperands() != 2) return failure();
mhlo::SelectOp value_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
return_op.getOperand(0).getDefiningOp());
if (!value_select || value_select.getOnTrue() != body.getArgument(0) ||
value_select.getOnFalse() != body.getArgument(2))
return failure();
auto compare_direction_included =
is_argmax ? mhlo::ComparisonDirection::GE : mhlo::ComparisonDirection::LE;
mhlo::CompareOp value_gt = llvm::dyn_cast_or_null<mhlo::CompareOp>(
value_select.getOperand(0).getDefiningOp());
if (!value_gt ||
value_gt.getComparisonDirection() != compare_direction_included ||
value_gt.getLhs() != body.getArgument(0) ||
value_gt.getRhs() != body.getArgument(2))
return failure();
mhlo::SelectOp index_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
return_op.getOperand(1).getDefiningOp());
if (!index_select) return failure();
mhlo::MinOp index_select_min = llvm::dyn_cast_or_null<mhlo::MinOp>(
index_select.getOnTrue().getDefiningOp());
if (!index_select_min || index_select_min.getLhs() != body.getArgument(1) ||
index_select_min.getRhs() != body.getArgument(3))
return failure();
mhlo::SelectOp index_select_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
index_select.getOnFalse().getDefiningOp());
if (!index_select_select ||
index_select_select.getOnTrue() != body.getArgument(1) ||
index_select_select.getOnFalse() != body.getArgument(3) ||
index_select_select.getOperand(0).getDefiningOp() != value_gt)
return failure();
mhlo::CompareOp value_eq = llvm::dyn_cast_or_null<mhlo::CompareOp>(
index_select.getOperand(0).getDefiningOp());
if (!value_eq ||
value_eq.getComparisonDirection() != mhlo::ComparisonDirection::EQ ||
value_eq.getLhs() != body.getArgument(0) ||
value_eq.getRhs() != body.getArgument(2))
return failure();
return success();
}
LogicalResult MatchReduceToArgMinMaxType1(mhlo::ReduceOp reduce_op,
bool is_float, bool is_argmax) {
Block& body = reduce_op.getBody().front();
if (body.getNumArguments() != 4) return failure();
mhlo::ReturnOp return_op = dyn_cast<mhlo::ReturnOp>(body.back());
if (!return_op || return_op.getNumOperands() != 2) return failure();
mhlo::SelectOp value_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
return_op.getOperand(0).getDefiningOp());
if (!value_select || value_select.getOnTrue() != body.getArgument(0) ||
value_select.getOnFalse() != body.getArgument(2))
return failure();
auto compare_direction =
is_argmax ? mhlo::ComparisonDirection::GT : mhlo::ComparisonDirection::LT;
if (is_float) {
mhlo::OrOp value_or = llvm::dyn_cast_or_null<mhlo::OrOp>(
value_select.getOperand(0).getDefiningOp());
if (!value_or) return failure();
mhlo::CompareOp value_gt = llvm::dyn_cast_or_null<mhlo::CompareOp>(
value_or.getLhs().getDefiningOp());
if (!value_gt || value_gt.getComparisonDirection() != compare_direction ||
value_gt.getLhs() != body.getArgument(0) ||
value_gt.getRhs() != body.getArgument(2))
return failure();
mhlo::CompareOp value_ne = llvm::dyn_cast_or_null<mhlo::CompareOp>(
value_or.getRhs().getDefiningOp());
if (!value_ne ||
value_ne.getComparisonDirection() != mhlo::ComparisonDirection::NE ||
value_ne.getLhs() != body.getArgument(0) ||
value_ne.getRhs() != body.getArgument(0))
return failure();
} else {
mhlo::CompareOp value_gt = llvm::dyn_cast_or_null<mhlo::CompareOp>(
value_select.getOperand(0).getDefiningOp());
if (!value_gt || value_gt.getComparisonDirection() != compare_direction ||
value_gt.getLhs() != body.getArgument(0) ||
value_gt.getRhs() != body.getArgument(2))
return failure();
}
mhlo::SelectOp index_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
return_op.getOperand(1).getDefiningOp());
if (!index_select || index_select.getOnTrue() != body.getArgument(1) ||
index_select.getOnFalse() != body.getArgument(3))
return failure();
mhlo::OrOp index_or = llvm::dyn_cast_or_null<mhlo::OrOp>(
index_select.getPred().getDefiningOp());
if (!index_or || index_or.getLhs() != value_select.getPred())
return failure();
mhlo::AndOp index_and =
llvm::dyn_cast_or_null<mhlo::AndOp>(index_or.getRhs().getDefiningOp());
if (!index_and) return failure();
mhlo::CompareOp value_eq = llvm::dyn_cast_or_null<mhlo::CompareOp>(
index_and.getLhs().getDefiningOp());
if (!value_eq ||
value_eq.getComparisonDirection() != mhlo::ComparisonDirection::EQ ||
value_eq.getLhs() != body.getArgument(0) ||
value_eq.getRhs() != body.getArgument(2))
return failure();
mhlo::CompareOp index_lt = llvm::dyn_cast_or_null<mhlo::CompareOp>(
index_and.getRhs().getDefiningOp());
if (!index_lt ||
index_lt.getComparisonDirection() != mhlo::ComparisonDirection::LT ||
index_lt.getLhs() != body.getArgument(1) ||
index_lt.getRhs() != body.getArgument(3))
return failure();
return success();
}
std::optional<bool> IsReduceOpLegal(mhlo::ReduceOp reduce_op) {
if (succeeded(MatchReduceToArgMinMaxType1(reduce_op, true, true)) ||
succeeded(MatchReduceToArgMinMaxType1(reduce_op, false, true)) ||
succeeded(MatchReduceToArgMinMaxType1(reduce_op, true, false)) ||
succeeded(MatchReduceToArgMinMaxType1(reduce_op, false, false)) ||
succeeded(MatchReduceToArgMinMaxType2(reduce_op, false)) ||
succeeded(MatchReduceToArgMinMaxType2(reduce_op, true))) {
return false;
}
return true;
}
}
} | #include <stdlib.h>
#include <algorithm>
#include <cmath>
#include <functional>
#include <memory>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/array4d.h"
#include "xla/client/global_data.h"
#include "xla/client/lib/arithmetic.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/reference_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using FuncGeneratorForType = XlaComputation (*)(PrimitiveType, XlaBuilder*);
using FuncGenerator = XlaComputation (*)(XlaBuilder*);
class ReduceTest : public ClientLibraryTestBase {
protected:
ReduceTest() {
literal_2d_ = LiteralUtil::CreateR2<float>({
{ 1.f, 2.f, 3.f},
{ 4.f, 5.f, 6.f},
});
literal_3d_ = LiteralUtil::CreateR3Projected<float>({
{ 1.f, 2.f, 3.f},
{ 4.f, 5.f, 6.f},
}, 4);
CHECK(ShapeUtil::Equal(
literal_3d_.shape(),
ShapeUtil::MakeShape(F32, {4, 2, 3})))
<< literal_3d_.shape().ShortDebugString();
}
void RunR1ToR0Test(int64_t element_count) {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {element_count});
auto input = Parameter(&builder, 0, input_shape, "input");
auto zero = ConstantR0<float>(&builder, 0.0);
Reduce(input, zero, add_f32, {0});
std::minstd_rand rng(seed_);
std::vector<float> input_data(element_count);
for (int64_t i = 0; i < element_count; ++i) {
input_data[i] = rng() % 3;
if (rng() % 2 == 0) {
input_data[i] *= -1;
}
}
Literal input_literal =
LiteralUtil::CreateR1(absl::MakeConstSpan(input_data));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(input_literal).value();
float expected = absl::c_accumulate(input_data, 0.0f);
ComputeAndCompareR0<float>(&builder, expected, {input_global_data.get()},
ErrorSpec(0.001));
}
void RunR1ToR0PredTest(bool and_reduce, absl::Span<const int> input_data) {
const int element_count = input_data.size();
XlaBuilder builder(TestName());
const Shape input_shape = ShapeUtil::MakeShape(S32, {element_count});
auto input_par = Parameter(&builder, 0, input_shape, "input");
auto pred_values =
Eq(input_par, ConstantR1<int>(&builder, element_count, 1));
XlaOp init_value;
XlaComputation reduce;
if (and_reduce) {
init_value = ConstantR0<bool>(&builder, true);
reduce = CreateScalarAndComputation(PRED, &builder);
} else {
init_value = ConstantR0<bool>(&builder, false);
reduce = CreateScalarOrComputation(PRED, &builder);
}
Reduce(pred_values, init_value, reduce,
{0});
Literal input_literal = LiteralUtil::CreateR1(input_data);
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(input_literal).value();
bool expected = and_reduce;
for (bool item : input_data) {
if (and_reduce) {
expected = expected && item;
} else {
expected = expected || item;
}
}
ComputeAndCompareR0<bool>(&builder, expected, {input_global_data.get()});
}
template <int64_t cols>
void RunR2ToR1PredTest(bool and_reduce, int64_t rows, int64_t minor = 1,
int64_t major = 0) {
XlaBuilder builder(TestName());
const Shape input_shape = ShapeUtil::MakeShape(U8, {rows, cols});
auto input = Parameter(&builder, 0, input_shape, "input");
auto input_pred = Eq(input, ConstantR0<uint8_t>(&builder, 1));
XlaOp init_value;
XlaComputation reduce_op;
if (and_reduce) {
init_value = ConstantR0<bool>(&builder, true);
reduce_op = CreateScalarAndComputation(PRED, &builder);
} else {
init_value = ConstantR0<bool>(&builder, false);
reduce_op = CreateScalarOrComputation(PRED, &builder);
}
Reduce(input_pred, init_value, reduce_op,
{0});
Array2D<uint8_t> input_data(rows, cols);
input_data.FillRandom(0, 1);
Literal input_literal = LiteralUtil::CreateR2FromArray2D(input_data);
input_literal =
input_literal.Relayout(LayoutUtil::MakeLayout({minor, major}));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(input_literal).value();
std::array<bool, cols> expected;
for (int64_t colno = 0; colno < cols; ++colno) {
bool column_sum = and_reduce ? true : false;
for (int64_t rowno = 0; rowno < rows; ++rowno) {
if (and_reduce) {
column_sum = column_sum && input_data(rowno, colno);
} else {
column_sum = column_sum || input_data(rowno, colno);
}
}
expected[colno] = column_sum;
}
ComputeAndCompareR1<bool>(&builder, expected, {input_global_data.get()});
}
void RunR2ToR0Test(int64_t rows, int64_t cols, int64_t minor = 1,
int64_t major = 0) {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols});
auto input = Parameter(&builder, 0, input_shape, "input");
auto zero = ConstantR0<float>(&builder, 0.0);
Reduce(input, zero, add_f32, {0, 1});
Array2D<float> input_data(rows, cols);
input_data.FillRandom(3.14f, 0.04);
Literal input_literal = LiteralUtil::CreateR2FromArray2D(input_data);
input_literal =
input_literal.Relayout(LayoutUtil::MakeLayout({minor, major}));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(input_literal).value();
float expected = 0.0;
for (int64_t rowno = 0; rowno < rows; ++rowno) {
for (int64_t colno = 0; colno < cols; ++colno) {
expected += input_data(rowno, colno);
}
}
ComputeAndCompareR0<float>(&builder, expected, {input_global_data.get()},
ErrorSpec(0.01, 1e-4));
}
void RunR2ToR1Test(int64_t rows, int64_t cols, int64_t minor = 1,
int64_t major = 0) {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols});
auto input = Parameter(&builder, 0, input_shape, "input");
auto zero = ConstantR0<float>(&builder, 0.0);
Reduce(input, zero, add_f32, {0});
Array2D<float> input_data(rows, cols);
input_data.FillRandom(3.14f, 0.04);
Literal input_literal = LiteralUtil::CreateR2FromArray2D(input_data);
input_literal =
input_literal.Relayout(LayoutUtil::MakeLayout({minor, major}));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(input_literal).value();
std::vector<float> expected;
expected.reserve(cols);
for (int64_t colno = 0; colno < cols; ++colno) {
float column_sum = 0;
for (int64_t rowno = 0; rowno < rows; ++rowno) {
column_sum += input_data(rowno, colno);
}
expected.push_back(column_sum);
}
ComputeAndCompareR1<float>(&builder, expected, {input_global_data.get()},
ErrorSpec(0.01, 1e-4));
}
template <typename NativeT>
void ComputeAndCompareGeneric(
typename std::enable_if<std::is_floating_point<NativeT>::value,
XlaBuilder>::type* builder,
absl::Span<const NativeT> expected,
absl::Span<GlobalData* const> arguments) {
ComputeAndCompareR1<NativeT>(builder, expected, arguments,
ErrorSpec(0.01, 1e-4));
}
template <typename NativeT>
void ComputeAndCompareGeneric(
typename std::enable_if<std::is_integral<NativeT>::value,
XlaBuilder>::type* builder,
absl::Span<const NativeT> expected,
absl::Span<GlobalData* const> arguments) {
ComputeAndCompareR1<NativeT>(builder, expected, arguments);
}
template <typename NativeT>
void RunVectorizedReduceTestForType(
const std::function<XlaComputation(XlaBuilder*)>&
reduction_function_generator,
const std::function<NativeT(NativeT, NativeT)>&
reference_reduction_function,
const NativeT& initial_value) {
const int rows = 64, cols = 128;
const int minor = 1, major = 0;
XlaBuilder builder(TestName());
XlaComputation reduction_function = reduction_function_generator(&builder);
const Shape input_shape = ShapeUtil::MakeShape(
xla::primitive_util::NativeToPrimitiveType<NativeT>(), {rows, cols});
auto input = Parameter(&builder, 0, input_shape, "input");
auto zero = ConstantR0<NativeT>(&builder, initial_value);
Reduce(input, zero, reduction_function,
{0});
Array2D<NativeT> input_data(rows, cols);
input_data.FillUnique(initial_value);
Literal input_literal = LiteralUtil::CreateR2FromArray2D(input_data);
input_literal =
input_literal.Relayout(LayoutUtil::MakeLayout({minor, major}));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(input_literal).value();
std::unique_ptr<NativeT[]> expected(new NativeT[cols]);
for (int64_t colno = 0; colno < cols; ++colno) {
NativeT column_result = initial_value;
for (int64_t rowno = 0; rowno < rows; ++rowno) {
column_result = reference_reduction_function(column_result,
input_data(rowno, colno));
}
expected[colno] = column_result;
}
ComputeAndCompareGeneric<NativeT>(
&builder, absl::Span<const NativeT>(expected.get(), cols),
{input_global_data.get()});
}
void RunVectorizedReduceTest(
const std::function<XlaComputation(PrimitiveType, XlaBuilder*)>&
reduction_function_generator_for_type,
const std::function<float(float, float)>&
reference_reduction_function_for_floats,
const std::function<int32_t(int32_t, int32_t)>&
reference_reduction_function_for_ints,
const std::function<uint32_t(uint32_t, uint32_t)>&
reference_reduction_function_for_uints,
float floating_point_identity, int32_t signed_int_identity,
uint32_t unsigned_int_identity) {
RunVectorizedReduceTestForType<float>(
[&](XlaBuilder* builder) {
return reduction_function_generator_for_type(F32, builder);
},
reference_reduction_function_for_floats, floating_point_identity);
RunVectorizedReduceTestForType<int32_t>(
[&](XlaBuilder* builder) {
return reduction_function_generator_for_type(S32, builder);
},
reference_reduction_function_for_ints, signed_int_identity);
RunVectorizedReduceTestForType<uint32_t>(
[&](XlaBuilder* builder) {
return reduction_function_generator_for_type(U32, builder);
},
reference_reduction_function_for_uints, unsigned_int_identity);
}
Literal literal_2d_;
Literal literal_3d_;
uint32_t seed_ = 0xdeadbeef;
};
XLA_TEST_F(ReduceTest, ReduceR1_0_F32_To_R0) { RunR1ToR0Test(0); }
XLA_TEST_F(ReduceTest, ReduceR1_1_F32_To_R0) { RunR1ToR0Test(1); }
XLA_TEST_F(ReduceTest, ReduceR1_2_F32_To_R0) { RunR1ToR0Test(2); }
XLA_TEST_F(ReduceTest, ReduceR1_16_F32_To_R0) { RunR1ToR0Test(16); }
XLA_TEST_F(ReduceTest, ReduceR1_128_F32_To_R0) { RunR1ToR0Test(128); }
XLA_TEST_F(ReduceTest, ReduceR1_129_F32_To_R0) { RunR1ToR0Test(129); }
XLA_TEST_F(ReduceTest, ReduceR1_240_F32_To_R0) { RunR1ToR0Test(240); }
XLA_TEST_F(ReduceTest, ReduceR1_256_F32_To_R0) { RunR1ToR0Test(256); }
XLA_TEST_F(ReduceTest, ReduceR1_1024_F32_To_R0) { RunR1ToR0Test(1024); }
XLA_TEST_F(ReduceTest, ReduceR1_2048_F32_To_R0) { RunR1ToR0Test(2048); }
XLA_TEST_F(ReduceTest, ReduceR1_16K_F32_To_R0) { RunR1ToR0Test(16 * 1024); }
XLA_TEST_F(ReduceTest, ReduceR1_16KP1_F32_To_R0) {
RunR1ToR0Test(16 * 1024 + 1);
}
XLA_TEST_F(ReduceTest, ReduceR1_64K_F32_To_R0) { RunR1ToR0Test(64 * 1024); }
XLA_TEST_F(ReduceTest, ReduceR1_1M_F32_To_R0) { RunR1ToR0Test(1024 * 1024); }
XLA_TEST_F(ReduceTest, ReduceR1_16M_F32_To_R0) { RunR1ToR0Test(4096 * 4096); }
XLA_TEST_F(ReduceTest, ReduceR2_0x0_To_R0) { RunR2ToR0Test(0, 0); }
XLA_TEST_F(ReduceTest, ReduceR2_0x2_To_R0) { RunR2ToR0Test(0, 2); }
XLA_TEST_F(ReduceTest, ReduceR2_1x1_To_R0) { RunR2ToR0Test(1, 1); }
XLA_TEST_F(ReduceTest, ReduceR2_2x0_To_R0) { RunR2ToR0Test(2, 0); }
XLA_TEST_F(ReduceTest, ReduceR2_2x2_To_R0) { RunR2ToR0Test(2, 2); }
XLA_TEST_F(ReduceTest, ReduceR2_8x8_To_R0) { RunR2ToR0Test(8, 8); }
XLA_TEST_F(ReduceTest, ReduceR2_9x9_To_R0) { RunR2ToR0Test(9, 9); }
XLA_TEST_F(ReduceTest, ReduceR2_50x111_To_R0) { RunR2ToR0Test(50, 111); }
XLA_TEST_F(ReduceTest, ReduceR2_111x50_To_R0) { RunR2ToR0Test(111, 50); }
XLA_TEST_F(ReduceTest, ReduceR2_111x50_01_To_R0) {
RunR2ToR0Test(111, 50, 0, 1);
}
XLA_TEST_F(ReduceTest, ReduceR2_1024x1024_To_R0) { RunR2ToR0Test(1024, 1024); }
XLA_TEST_F(ReduceTest, ReduceR2_1000x1500_To_R0) { RunR2ToR0Test(1000, 1500); }
XLA_TEST_F(ReduceTest, ReduceR2_0x2_To_R1) { RunR2ToR1Test(0, 2); }
XLA_TEST_F(ReduceTest, ReduceR2_1x1_To_R1) { RunR2ToR1Test(1, 1); }
XLA_TEST_F(ReduceTest, ReduceR2_2x2_To_R1) { RunR2ToR1Test(2, 2); }
XLA_TEST_F(ReduceTest, ReduceR2_8x8_To_R1) { RunR2ToR1Test(8, 8); }
XLA_TEST_F(ReduceTest, ReduceR2_9x9_To_R1) { RunR2ToR1Test(9, 9); }
XLA_TEST_F(ReduceTest, ReduceR2_50x111_To_R1) { RunR2ToR1Test(50, 111); }
XLA_TEST_F(ReduceTest, ReduceR2_111x50_To_R1) { RunR2ToR1Test(111, 50); }
XLA_TEST_F(ReduceTest, ReduceR2_111x50_01_To_R1) {
RunR2ToR1Test(111, 50, 0, 1);
}
XLA_TEST_F(ReduceTest, ReduceR2_1024x1024_To_R1) { RunR2ToR1Test(1024, 1024); }
XLA_TEST_F(ReduceTest, ReduceR2_1000x1500_To_R1) { RunR2ToR1Test(1000, 1500); }
XLA_TEST_F(ReduceTest, AndReduceAllOnesR1_10_Pred) {
constexpr int element_count = 10;
std::vector<int> input(element_count, 1);
RunR1ToR0PredTest(true, input);
}
XLA_TEST_F(ReduceTest, AndReduceOnesAndZerosR1_10_Pred) {
constexpr int element_count = 10;
std::vector<int> input(element_count);
for (int i = 0; i < element_count; ++i) {
input[i] = i % 2;
}
RunR1ToR0PredTest(true, input);
}
XLA_TEST_F(ReduceTest, OrReduceAllOnesR1_10_Pred) {
constexpr int element_count = 10;
std::vector<int> input(element_count, 1);
RunR1ToR0PredTest(false, input);
}
XLA_TEST_F(ReduceTest, OrReduceOnesAndZerosR1_10_Pred) {
constexpr int element_count = 10;
std::vector<int> input(element_count);
for (int i = 0; i < element_count; ++i) {
input[i] = i % 2;
}
RunR1ToR0PredTest(false, input);
}
XLA_TEST_F(ReduceTest, ReduceElementwiseR2_111x50_To_R1) {
const int64_t rows = 111, cols = 50;
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols});
auto input = Parameter(&builder, 0, input_shape, "input");
auto zero = ConstantR0<float>(&builder, 0.0);
auto log_ = Log(input);
Reduce(log_, zero, add_f32, {0});
Array2D<float> input_data(rows, cols);
input_data.FillRandom(3.14f, 0.04);
Literal input_literal = LiteralUtil::CreateR2FromArray2D(input_data);
input_literal = input_literal.Relayout(LayoutUtil::MakeLayout({0, 1}));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(input_literal).value();
std::vector<float> expected;
expected.reserve(cols);
for (int64_t colno = 0; colno < cols; ++colno) {
float column_sum = 0;
for (int64_t rowno = 0; rowno < rows; ++rowno) {
column_sum += std::log(input_data(rowno, colno));
}
expected.push_back(column_sum);
}
ComputeAndCompareR1<float>(&builder, expected, {input_global_data.get()},
ErrorSpec(0.01, 1e-4));
}
XLA_TEST_F(ReduceTest, TransposeAndReduceElementwiseR2_111x50_To_R1) {
const int64_t rows = 111, cols = 50;
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols});
auto input = Parameter(&builder, 0, input_shape, "input");
auto zero = ConstantR0<float>(&builder, 0.0);
auto log_ = Log(input);
auto transpose = Transpose(log_, {1, 0});
Reduce(transpose, zero, add_f32, {1});
Array2D<float> input_data(rows, cols);
input_data.FillRandom(3.14f, 0.04);
Literal input_literal = LiteralUtil::CreateR2FromArray2D(input_data);
input_literal = input_literal.Relayout(LayoutUtil::MakeLayout({0, 1}));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(input_literal).value();
std::vector<float> expected;
expected.reserve(cols);
for (int64_t colno = 0; colno < cols; ++colno) {
float column_sum = 0;
for (int64_t rowno = 0; rowno < rows; ++rowno) {
column_sum += std::log(input_data(rowno, colno));
}
expected.push_back(column_sum);
}
ComputeAndCompareR1<float>(&builder, expected, {input_global_data.get()},
ErrorSpec(0.01, 1e-4));
}
XLA_TEST_F(ReduceTest, TransposeAndReduceR3_12x111x50_To_R2) {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {12, 111, 50});
XlaOp input = Parameter(&builder, 0, input_shape, "input");
XlaOp zero = ConstantR0<float>(&builder, 0.0);
XlaOp transpose = Transpose(input, {1, 0, 2});
Reduce(transpose, zero, add_f32, {0});
TF_ASSERT_OK_AND_ASSIGN(Literal input_data, MakeFakeLiteral(input_shape));
ComputeAndCompare(&builder, {std::move(input_data)}, ErrorSpec(0.01, 1e-4));
}
XLA_TEST_F(ReduceTest, Reshape_111x2x25Reduce_111x50_To_R1) {
const int64_t rows = 111, cols = 50;
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, 2, cols / 2});
auto input = Parameter(&builder, 0, input_shape, "input");
auto zero = ConstantR0<float>(&builder, 0.0);
auto log_ = Tanh(input);
auto reshape = Reshape(log_, {rows, cols});
Reduce(reshape, zero, add_f32, {0});
Array3D<float> input_data(rows, 2, cols / 2);
input_data.FillRandom(3.14f, 0.04);
Literal input_literal = LiteralUtil::CreateR3FromArray3D(input_data);
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(input_literal).value();
std::vector<float> expected;
expected.reserve(cols);
for (int64_t major = 0; major < 2; ++major) {
for (int64_t colno = 0; colno < cols / 2; ++colno) {
float column_sum = 0;
for (int64_t rowno = 0; rowno < rows; ++rowno) {
column_sum += std::tanh(input_data(rowno, major, colno));
}
expected.push_back(column_sum);
}
}
ComputeAndCompareR1<float>(&builder, expected, {input_global_data.get()},
ErrorSpec(0.01, 1e-4));
}
struct BoundsLayout {
std::vector<int64_t> bounds;
std::vector<int64_t> layout;
std::vector<int64_t> reduce_dims;
};
void PrintTo(const BoundsLayout& spec, std::ostream* os) {
*os << absl::StrFormat("R%uToR%u%s_%s_Reduce%s", spec.bounds.size(),
spec.bounds.size() - spec.reduce_dims.size(),
absl::StrJoin(spec.bounds, "x"),
absl::StrJoin(spec.layout, ""),
absl::StrJoin(spec.reduce_dims, ""));
}
XLA_TEST_F(ReduceTest, AddReduce2DScalarToR0) {
XlaBuilder builder(TestName());
auto add = CreateScalarAddComputation(F32, &builder);
auto scalar = ConstantR0<float>(&builder, 42.0);
auto broadcasted = Broadcast(scalar, {500, 500});
Reduce(broadcasted, ConstantR0<float>(&builder, 0.0f), add, {0, 1});
float expected = 42.0f * static_cast<float>(500 * 500);
ComputeAndCompareR0<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, MaxReduce2DScalarToR0) {
XlaBuilder builder(TestName());
auto max = CreateScalarMaxComputation(F32, &builder);
auto scalar = ConstantR0<float>(&builder, 42.0);
auto broadcasted = Broadcast(scalar, {500, 500});
Reduce(broadcasted, ConstantR0<float>(&builder, 0.0f), max, {0, 1});
float expected = 42.0f;
ComputeAndCompareR0<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, MaxReduce2DToR0) {
XlaBuilder builder(TestName());
auto max = CreateScalarMaxComputation(F32, &builder);
Array2D<float> input(300, 250);
input.FillRandom(214.0f);
auto input_literal = LiteralUtil::CreateR2FromArray2D(input);
Reduce(ConstantLiteral(&builder, input_literal),
ConstantR0<float>(&builder, FLT_MIN), max, {0, 1});
auto input_max = FLT_MIN;
input.Each(
[&](int64_t, int64_t, float* v) { input_max = std::max(input_max, *v); });
ComputeAndCompareR0<float>(&builder, input_max, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, MinReduce2DToR0) {
XlaBuilder builder(TestName());
auto min = CreateScalarMinComputation(F32, &builder);
Array2D<float> input(150, 130);
input.FillRandom(214.0f);
auto input_literal = LiteralUtil::CreateR2FromArray2D(input);
Reduce(ConstantLiteral(&builder, input_literal),
ConstantR0<float>(&builder, FLT_MAX), min, {0, 1});
auto input_min = FLT_MAX;
input.Each(
[&](int64_t, int64_t, float* v) { input_min = std::min(input_min, *v); });
ComputeAndCompareR0<float>(&builder, input_min, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, UnsignedInt_MinReduce) {
XlaBuilder builder(TestName());
Array2D<uint32_t> input({{1}, {2}});
auto min = CreateScalarMinComputation(U32, &builder);
auto input_literal = LiteralUtil::CreateR2FromArray2D(input);
auto initial_value =
ConstantR0<uint32_t>(&builder, std::numeric_limits<uint32_t>::max());
Reduce(ConstantLiteral(&builder, input_literal), initial_value, min, {0, 1});
ComputeAndCompareR0<uint32_t>(&builder, 1, {});
}
XLA_TEST_F(ReduceTest, UnsignedInt_MaxReduce) {
XlaBuilder builder(TestName());
Array2D<uint32_t> input({{1}, {2}});
auto max = CreateScalarMaxComputation(U32, &builder);
auto input_literal = LiteralUtil::CreateR2FromArray2D(input);
auto initial_value =
ConstantR0<uint32_t>(&builder, std::numeric_limits<uint32_t>::min());
Reduce(ConstantLiteral(&builder, input_literal), initial_value, max, {0, 1});
ComputeAndCompareR0<uint32_t>(&builder, 2, {});
}
XLA_TEST_F(ReduceTest, Reduce2DAmong1) {
XlaBuilder builder(TestName());
auto m = ConstantLiteral(&builder, literal_2d_);
auto add = CreateScalarAddComputation(F32, &builder);
Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {1});
std::vector<float> expected = {6.f, 15.f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, Reduce2DAmong0and1) {
XlaBuilder builder(TestName());
auto m = ConstantLiteral(&builder, literal_2d_);
auto add = CreateScalarAddComputation(F32, &builder);
Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0, 1});
ComputeAndCompareR0<float>(&builder, 21.0f, {}, ErrorSpec(0.0001, 1e-4));
}
XLA_TEST_F(ReduceTest, Reduce2DAmongY) {
XlaBuilder builder("reduce_among_y");
auto m = ConstantLiteral(&builder, literal_2d_);
auto add = CreateScalarAddComputation(F32, &builder);
Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0});
std::vector<float> expected = {5.f, 7.f, 9.f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, ReduceR3AmongDims_1_2) {
XlaBuilder builder(TestName());
auto m = ConstantLiteral(&builder, literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {1, 2});
std::vector<float> expected = {21.f, 21.f, 21.f, 21.f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, ReduceR3AmongDims_0_1) {
XlaBuilder builder(TestName());
auto m = ConstantLiteral(&builder, literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0, 1});
std::vector<float> expected = {20.f, 28.f, 36.f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, ReduceR3ToR0) {
XlaBuilder builder(TestName());
auto m = ConstantLiteral(&builder, literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0, 1, 2});
float expected = 21.0f * 4.0;
ComputeAndCompareR0<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, ReduceR3AmongDim0) {
XlaBuilder builder(TestName());
auto m = ConstantLiteral(&builder, literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0});
Array2D<float> expected({
{4.f, 8.f, 12.f},
{16.f, 20.f, 24.f},
});
ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, ReduceR3AmongDim1) {
XlaBuilder builder(TestName());
auto m = ConstantLiteral(&builder, literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {1});
Array2D<float> expected({
{5.f, 7.f, 9.f},
{5.f, 7.f, 9.f},
{5.f, 7.f, 9.f},
{5.f, 7.f, 9.f},
});
ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, ReduceR3AmongDim2) {
XlaBuilder builder(TestName());
auto m = ConstantLiteral(&builder, literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {2});
Array2D<float> expected({
{6.f, 15.f},
{6.f, 15.f},
{6.f, 15.f},
{6.f, 15.f},
});
ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ReduceTest, VectorizedReduce_Add) {
RunVectorizedReduceTest(
static_cast<FuncGeneratorForType>(CreateScalarAddComputation),
[](float a, float b) { return a + b; },
[](int32_t a, int32_t b) {
return static_cast<int32_t>(static_cast<uint32_t>(a) +
static_cast<uint32_t>(b));
},
[](uint32_t a, uint32_t b) { return a + b; }, 0.0, 0, 0);
}
XLA_TEST_F(ReduceTest, VectorizedReduce_Multiply) {
RunVectorizedReduceTest(
static_cast<FuncGeneratorForType>(CreateScalarMultiplyComputation),
[](float a, float b) { return a * b; },
[](int32_t a, int32_t b) {
return static_cast<int32_t>(static_cast<uint32_t>(a) *
static_cast<uint32_t>(b));
},
[](uint32_t a, uint32_t b) { return a * b; }, 1.0, 1, 1);
}
XLA_TEST_F(ReduceTest, VectorizedReduce_Max) {
RunVectorizedReduceTest(
static_cast<FuncGeneratorForType>(CreateScalarMaxComputation),
[](float a, float b) { return std::max(a, b); },
[](int32_t a, int32_t b) { return std::max(a, b); },
[](uint32_t a, uint32_t b) { return std::max(a, b); },
std::numeric_limits<float>::min(), std::numeric_limits<int32_t>::min(),
std::numeric_limits<uint32_t>::min());
}
XLA_TEST_F(ReduceTest, VectorizedReduce_Min) {
RunVectorizedReduceTest(
static_cast<FuncGeneratorForType>(CreateScalarMinComputation),
[](float a, float b) { return std::min(a, b); },
[](int32_t a, int32_t b) { return std::min(a, b); },
[](uint32_t a, uint32_t b) { return std::min(a, b); },
std::numeric_limits<float>::max(), std::numeric_limits<int32_t>::max(),
std::numeric_limits<uint32_t>::max());
}
XLA_TEST_F(ReduceTest, VectorizedReduce_BooleanAnd) {
RunVectorizedReduceTestForType<bool>(
static_cast<FuncGenerator>([](XlaBuilder* builder) {
return CreateScalarAndComputation(PRED, builder);
}),
[](bool a, bool b) { return a && b; }, true);
}
XLA_TEST_F(ReduceTest, VectorizedReduce_BooleanOr) {
RunVectorizedReduceTestForType<bool>(
static_cast<FuncGenerator>([](XlaBuilder* builder) {
return CreateScalarOrComputation(PRED, builder);
}),
[](bool a, bool b) { return a || b; }, false);
}
class ReduceR3ToR2Test : public ReduceTest,
public ::testing::WithParamInterface<BoundsLayout> {};
XLA_TEST_P(ReduceR3ToR2Test, ReduceR3ToR2) {
XlaBuilder builder(TestName());
const auto& bounds = GetParam().bounds;
Array3D<float> input_array(bounds[0], bounds[1], bounds[2]);
input_array.Fill(1.0f);
auto input_literal = LiteralUtil::CreateR3FromArray3D(input_array);
input_literal =
input_literal.Relayout(LayoutUtil::MakeLayout(GetParam().layout));
std::unique_ptr<GlobalData> input_data =
client_->TransferToServer(input_literal).value();
auto input_activations =
Parameter(&builder, 0, input_literal.shape(), "input");
XlaComputation add = CreateScalarAddComputation(F32, &builder);
Reduce(input_activations, ConstantR0<float>(&builder, 0.0f), add,
GetParam().reduce_dims);
auto expected =
ReferenceUtil::Reduce3DTo2D(input_array, 0.0f, GetParam().reduce_dims, |
876 | cpp | tensorflow/tensorflow | subgraph_test_util | tensorflow/lite/kernels/subgraph_test_util.cc | tensorflow/lite/kernels/subgraph_test_util_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_SUBGRAPH_TEST_UTIL_H_
#define TENSORFLOW_LITE_KERNELS_SUBGRAPH_TEST_UTIL_H_
#include <stdint.h>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter_test_util.h"
namespace tflite {
namespace subgraph_test_util {
class SubgraphBuilder {
public:
~SubgraphBuilder();
void BuildInplaceOpSubgraph(Subgraph* subgraph);
void BuildBroadcastingSubgraph(Subgraph* subgraph);
void BuildOffsetAddSharing(Subgraph* subgraph);
void BuildInputDynamicUpdateSliceSubgraph(Subgraph& subgraph);
void BuildInplaceDynamicUpdateSliceSubgraph(Subgraph& subgraph,
bool multiple_consumers);
void BuildOutputNotConsumedSubgraph(Subgraph& subgraph);
void BuildFloatIfSubgraph(Subgraph* subgraph, int num_inputs);
void BuildFloatWhileSubgraph(Subgraph* subgraph, int num_inputs);
void BuildXNNPACKSubgraph(Subgraph* subgraph);
void BuildFloatLessCondSubgraph(Subgraph* subgraph, float rhs);
void BuildInputIsOutputSubgraph(Subgraph* subgraph);
void BuildInputIsDifferentOutputSubgraph(Subgraph* subgraph);
void BuildFlexOutputSubgraph(Subgraph* subgraph);
void BuildCounterOnlySubgraph(Subgraph* subgraph);
void BuildBinaryOpSubgraph(Subgraph* subgraph,
TfLiteRegistration* (*Register_OP)(),
TfLiteBuiltinOperator builtin_code, void* params,
TfLiteType input1_type, TfLiteType input2_type,
TfLiteType output_type);
void BuildAddSubgraph(Subgraph* subgraph,
TfLiteType operand_type = kTfLiteInt32);
void BuildStablehloAddSubgraph(Subgraph* subgraph,
TfLiteType operand_type = kTfLiteInt32);
void BuildMaximumSubgraph(Subgraph* subgraph,
TfLiteType operand_type = kTfLiteInt32);
void BuildStablehloMaximumSubgraph(Subgraph* subgraph,
TfLiteType operand_type = kTfLiteInt32);
void BuildMinimumSubgraph(Subgraph* subgraph,
TfLiteType operand_type = kTfLiteInt32);
void BuildStablehloMinimumSubgraph(Subgraph* subgraph,
TfLiteType operand_type = kTfLiteInt32);
void BuildLogicalOrSubgraph(Subgraph* subgraph);
void BuildLogicalAndSubgraph(Subgraph* subgraph);
void BuildOutputIsSecondInputSubgraph(Subgraph* subgraph);
void BuildMulSubgraph(Subgraph* subgraph,
TfLiteType operand_type = kTfLiteInt32);
void BuildStablehloMulSubgraph(Subgraph* subgraph,
TfLiteType operand_type = kTfLiteInt32);
void BuildPadSubgraph(Subgraph* subgraph);
void BuildIfSubgraph(Subgraph* subgraph);
void BuildCompositeSubgraph(Subgraph* subgraph,
const Subgraph* decomposition);
void BuildDynamicOpTriggersAllocationOfUnsedInputSubgraph(Subgraph* subgraph);
void BuildAllInplaceScenariosSubgraph(Subgraph* subgraph);
void BuildLessEqualCondSubgraph(Subgraph* subgraph, int rhs);
void BuildOutputNotConsumedIfSubgraph(Subgraph* subgraph);
void BuildOutputNotConsumedWhileSubgraph(Subgraph* subgraph);
void BuildMultiInputIfSubgraph(Subgraph* subgraph, int num_inputs);
void BuildMultiInputWhileSubgraph(Subgraph* subgraph, int num_inputs);
void BuildMultiInputIfSubgraphWithUnconsumedOutput(Subgraph* subgraph,
int num_inputs);
void BuildMultiInputWhileSubgraphWithUnconsumedOutput(Subgraph* subgraph,
int num_inputs);
void BuildDynamicBodySubgraphWithAliases(Subgraph* subgraph);
void BuildLargeLessEqualCondSubgraph(Subgraph* subgraph, int rhs,
int num_inputs);
void BuildAccumulateLoopBodySubgraph(Subgraph* subgraph);
void BuildDeepBodySubgraph(Subgraph* subgraph);
void BuildLargeBodySubgraph(Subgraph* subgraph);
void BuildDynamicIncreasingSizeSubgraph(Subgraph* subgraph);
void BuildLargePadSubgraph(Subgraph* subgraph, std::vector<int> padding);
void BuildPadLoopBodySubgraph(Subgraph* subgraph,
const std::vector<int>& padding);
void BuildWhileSubgraph(Subgraph* subgraph);
void BuildAssignRandomValueToVariableSubgraph(Subgraph* graph);
void BuildCallOnceAndReadVariableSubgraph(Subgraph* graph);
void BuildCallOnceAndReadVariablePlusOneSubgraph(Subgraph* graph);
void BuildLessEqualCondSubgraphWithDynamicTensor(Subgraph* subgraph, int rhs);
void BuildBodySubgraphWithDynamicTensor(Subgraph* subgraph);
void BuildIfSubgraphWithDynamicTensor(Subgraph* subgraph);
void BuildWhileSubgraphWithDynamicTensor(Subgraph* subgraph);
private:
template <typename T = int32_t>
void CreateConstantTensor(Subgraph* subgraph, int tensor_index,
const std::vector<int>& shape,
const std::vector<T>& data) {
ASSERT_GT(shape.size(), 0);
const int num_elements = absl::c_accumulate(shape, 1, std::multiplies<>());
ASSERT_EQ(data.size(), num_elements);
const size_t size_in_bytes = sizeof(T) * num_elements;
T* buffer = reinterpret_cast<T*>(malloc(size_in_bytes));
memcpy(buffer, data.data(), size_in_bytes);
buffers_.push_back(buffer);
ASSERT_EQ(subgraph->SetTensorParametersReadOnly(
tensor_index, typeToTfLiteType<T>(), "", shape, {},
reinterpret_cast<const char*>(buffer), size_in_bytes),
kTfLiteOk);
}
std::vector<void*> buffers_;
};
class ControlFlowOpTest : public InterpreterTest {
public:
ControlFlowOpTest() : builder_(new SubgraphBuilder) {}
~ControlFlowOpTest() override { builder_.reset(); }
protected:
std::unique_ptr<SubgraphBuilder> builder_;
};
void FillIntTensor(TfLiteTensor* tensor, const std::vector<int32_t>& data);
void FillScalarStringTensor(TfLiteTensor* tensor, const std::string& data);
void CheckScalarStringTensor(const TfLiteTensor* tensor,
const std::string& data);
void CheckStringTensor(const TfLiteTensor* tensor,
const std::vector<int>& shape,
const std::vector<std::string>& data);
void CheckIntTensor(const TfLiteTensor* tensor, const std::vector<int>& shape,
const std::vector<int32_t>& data);
void CheckBoolTensor(const TfLiteTensor* tensor, const std::vector<int>& shape,
const std::vector<bool>& data);
void SetupTensor(Subgraph* subgraph, int tensor_index, TfLiteType type);
}
}
#endif
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <random>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace random_int {
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 0);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output = GetOutput(context, node, 0);
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
outputSize->data[0] = 1;
return context->ResizeTensor(context, output, outputSize);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
std::random_device rd;
std::uniform_int_distribution<int> dist(1, 32768);
output.data.i32[0] = dist(rd);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RANDOM_INT() {
static TfLiteRegistration r = {nullptr, nullptr, random_int::Prepare,
random_int::Eval};
return &r;
}
}
}
namespace subgraph_test_util {
namespace {
void AddTileNode(Subgraph* subgraph, int input0, int input1, int output) {
int node_index;
auto* tile_reg = ops::builtin::Register_TILE();
tile_reg->builtin_code = kTfLiteBuiltinTile;
subgraph->AddNodeWithParameters({input0, input1}, {output}, {}, nullptr, 0,
nullptr, tile_reg, &node_index);
}
void AddFlexNode(Subgraph* subgraph, int input_tensor, int output_tensor) {
auto prepare = [](TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor& input = context->tensors[node->inputs->data[0]];
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
TfLiteArrayUniquePtr<int> shape =
BuildTfLiteArray(input.dims->size, input.dims->data);
return context->ResizeTensor(context, &output, shape.release());
};
auto eval = [](TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor& input = context->tensors[node->inputs->data[0]];
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
memcpy(output.data.data, input.data.data, input.bytes);
return kTfLiteOk;
};
TfLiteRegistration reg = {nullptr, nullptr, prepare, eval};
reg.builtin_code = BuiltinOperator_CUSTOM;
reg.custom_name = "Flex";
int node_index;
ASSERT_EQ(
subgraph->AddNodeWithParameters({input_tensor}, {output_tensor}, {},
nullptr, 0, nullptr, ®, &node_index),
kTfLiteOk);
}
void AddReshapeNode(Subgraph* subgraph, int input0, int input1, int output) {
int node_index;
TfLiteReshapeParams* reshape_params = reinterpret_cast<TfLiteReshapeParams*>(
calloc(1, sizeof(TfLiteReshapeParams)));
auto* reshape_reg = ops::builtin::Register_RESHAPE();
reshape_reg->builtin_code = kTfLiteBuiltinReshape;
ASSERT_EQ(subgraph->AddNodeWithParameters({input0, input1}, {output}, {},
nullptr, 0, reshape_params,
reshape_reg, &node_index),
kTfLiteOk);
}
void AddOffsetAddNode(Subgraph* subgraph, int input0, int input1, int output) {
auto prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor& input0 = context->tensors[node->inputs->data[0]];
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
TfLiteIntArray* shape = TfLiteIntArrayCopy(input0.dims);
return context->ResizeTensor(context, &output, shape);
};
auto invoke = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor& input0 = context->tensors[node->inputs->data[0]];
const TfLiteTensor& input1 = context->tensors[node->inputs->data[1]];
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
int num_elements = input0.dims->data[0];
const int kOffset = 1;
const int* i0 = static_cast<int*>(input0.data.data);
const int* i1 = static_cast<int*>(input1.data.data);
int* o = static_cast<int*>(output.data.data);
for (int i = 0; i < num_elements; ++i) {
int input0_pos = (i + kOffset) % num_elements;
o[i] = i0[input0_pos] + i1[i];
}
return kTfLiteOk;
};
int node_index;
TfLiteRegistration offset_add_reg = {nullptr, nullptr,
prepare, invoke};
offset_add_reg.builtin_code = BuiltinOperator_CUSTOM;
offset_add_reg.custom_name = "OffsetAdd";
offset_add_reg.inplace_operator = kTfLiteInplaceOpInput1Shared;
subgraph->AddNodeWithParameters({input0, input1}, {output}, {}, nullptr, 0,
nullptr, &offset_add_reg, &node_index);
}
void AddAddNode(Subgraph* subgraph, int input0, int input1, int output) {
int node_index;
TfLiteAddParams* add_params =
reinterpret_cast<TfLiteAddParams*>(calloc(1, sizeof(TfLiteAddParams)));
auto* add_reg = ops::builtin::Register_ADD();
add_reg->builtin_code = kTfLiteBuiltinAdd;
subgraph->AddNodeWithParameters({input0, input1}, {output}, {}, nullptr, 0,
add_params, add_reg, &node_index);
}
void AddDynamicUpdateSliceNode(Subgraph* subgraph, int input0, int input1,
int input2, int output) {
int node_index;
auto* reg = ops::builtin::Register_DYNAMIC_UPDATE_SLICE();
reg->builtin_code = kTfLiteBuiltinDynamicUpdateSlice;
subgraph->AddNodeWithParameters({input0, input1, input2}, {output}, {},
nullptr, 0, nullptr, reg, &node_index);
}
}
void Setup1DTensor(Subgraph* subgraph, int tensor_index, TfLiteType type) {
int dim = 1;
ASSERT_EQ(subgraph->SetTensorParametersReadWrite(tensor_index, type, "", 1,
&dim, {}, false),
kTfLiteOk);
}
void SetupTensor(Subgraph* subgraph, int tensor_index, TfLiteType type) {
ASSERT_EQ(subgraph->SetTensorParametersReadWrite(tensor_index, type, "", 0,
nullptr, {}, false),
kTfLiteOk);
}
SubgraphBuilder::~SubgraphBuilder() {
for (auto buffer : buffers_) {
free(buffer);
}
}
void SubgraphBuilder::BuildInplaceDynamicUpdateSliceSubgraph(
Subgraph& subgraph, bool multiple_consumers) {
enum {
kInput0,
kInput1,
kInput2,
kConstRhs,
kOutput,
kIntermediate0,
kIntermediate1,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph.AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph.SetInputs({kInput0, kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph.SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(&subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(&subgraph, kConstRhs, {1}, {1});
AddAddNode(&subgraph, kInput0, kConstRhs, kIntermediate0);
AddDynamicUpdateSliceNode(&subgraph, kIntermediate0, kInput1, kInput2,
kIntermediate1);
AddAddNode(&subgraph, kIntermediate1,
multiple_consumers ? kIntermediate0 : kConstRhs, kOutput);
}
void SubgraphBuilder::BuildInputDynamicUpdateSliceSubgraph(Subgraph& subgraph) {
enum {
kInput0,
kInput1,
kInput2,
kConstRhs,
kOutput,
kIntermediate0,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph.AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph.SetInputs({kInput0, kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph.SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(&subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(&subgraph, kConstRhs, {1}, {1});
AddDynamicUpdateSliceNode(&subgraph, kInput0, kInput1, kInput2,
kIntermediate0);
AddAddNode(&subgraph, kIntermediate0, kConstRhs, kOutput);
}
void SubgraphBuilder::BuildOutputNotConsumedSubgraph(Subgraph& subgraph) {
enum {
kInput0,
kInput1,
kInput2,
kOutput0,
kOutput1,
kConstRhs,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph.AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph.SetInputs({kInput0, kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph.SetOutputs({kOutput0, kOutput1, kConstRhs}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
Setup1DTensor(&subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(&subgraph, kConstRhs, {1}, {1});
AddAddNode(&subgraph, kInput0, kInput1, kOutput0);
AddTileNode(&subgraph, kInput0, kInput2, kOutput1);
}
void SubgraphBuilder::BuildXNNPACKSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue,
kOutputCounter,
kOutputValue,
kIntermediateTensor0,
kIntermediateTensor1,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteFloat32);
}
AddAddNode(subgraph, kInputCounter, kInputValue, kIntermediateTensor0);
AddAddNode(subgraph, kInputCounter, kInputValue, kIntermediateTensor1);
AddAddNode(subgraph, kIntermediateTensor0, kIntermediateTensor1,
kOutputCounter);
AddAddNode(subgraph, kIntermediateTensor0, kIntermediateTensor1,
kOutputValue);
}
void SubgraphBuilder::BuildInputIsOutputSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue0,
kInputOutput,
kOutputCounter,
kOutputValue0,
kConstRhs,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue0, kInputOutput}),
kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue0, kInputOutput}),
kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstRhs, {1}, {1});
AddAddNode(subgraph, kInputCounter, kConstRhs, kOutputCounter);
AddAddNode(subgraph, kInputValue0, kInputOutput, kOutputValue0);
}
void SubgraphBuilder::BuildInputIsDifferentOutputSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue,
kOutputCounter,
kOutputValue,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kInputValue, kOutputValue}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
AddAddNode(subgraph, kInputCounter, kInputValue, kOutputValue);
}
void SubgraphBuilder::BuildFlexOutputSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue,
kOutputCounter,
kOutputValue,
kConstRhs,
kIntermediateTensor,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstRhs, {1}, {1});
AddAddNode(subgraph, kInputCounter, kConstRhs, kOutputCounter);
AddAddNode(subgraph, kConstRhs, kInputValue, kIntermediateTensor);
AddFlexNode(subgraph, kIntermediateTensor, kOutputValue);
}
void SubgraphBuilder::BuildCounterOnlySubgraph(Subgraph* subgraph) {
enum { kInputCounter, kOutputCounter, kConstRhs, kTensorCount };
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstRhs, {1}, {1});
AddAddNode(subgraph, kInputCounter, kConstRhs, kOutputCounter);
}
void SubgraphBuilder::BuildAddSubgraph(Subgraph* subgraph,
const TfLiteType operand_type) {
TfLiteAddParams* params =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params->activation = kTfLiteActNone;
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_ADD, kTfLiteBuiltinAdd,
params, operand_type, operand_type, operand_type);
}
void SubgraphBuilder::BuildStablehloAddSubgraph(Subgraph* subgraph,
const TfLiteType operand_type) {
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_STABLEHLO_ADD,
kTfLiteBuiltinStablehloAdd, nullptr, operand_type,
operand_type, operand_type);
}
void SubgraphBuilder::BuildAllInplaceScenariosSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue0,
kInputValue1,
kInputValue2,
kOutputCounter,
kOutputValue0,
kOutputValue1,
kOutputValue2,
kIntermediateTensor0,
kIntermediateTensor1,
kInputOutputTensor,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue0, kInputValue1,
kInputValue2, kInputOutputTensor}),
kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue0, kOutputValue1,
kOutputValue2, kInputOutputTensor}),
kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kInputOutputTensor, {1}, {1});
AddAddNode(subgraph, kInputCounter, kInputOutputTensor, kOutputCounter);
AddAddNode(subgraph, kInputValue0, kInputOutputTensor, kIntermediateTensor0);
AddAddNode(subgraph, kIntermediateTensor0, kInputOutputTensor, kOutputValue0);
AddTileNode(subgraph, kInputValue1, kInputCounter, kOutputValue1);
AddTileNode(subgraph, kInputValue2, kInputCounter, kIntermediateTensor1);
AddAddNode(subgraph, kIntermediateTensor1, kInputOutputTensor, kOutputValue2);
}
void SubgraphBuilder::BuildDynamicOpTriggersAllocationOfUnsedInputSubgraph(
Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue0,
kInputValue1,
kOutputCounter,
kOutputValue0,
kOutputValue1,
kInter | #include "tensorflow/lite/kernels/subgraph_test_util.h"
#include <stdint.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace subgraph_test_util {
namespace {
class SubgraphBuilderTest : public ::testing::Test {
public:
SubgraphBuilderTest()
: interpreter_(new Interpreter), builder_(new SubgraphBuilder) {}
~SubgraphBuilderTest() override {
interpreter_.reset();
builder_.reset();
}
protected:
void TestAccumulateLoopBody(int input1, int input2, int output1,
int output2) {
interpreter_ = std::make_unique<Interpreter>();
builder_->BuildAccumulateLoopBodySubgraph(
&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {input1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {input2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output_tensor1 =
interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output_tensor1, {1}, {output1});
TfLiteTensor* output_tensor2 =
interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output_tensor2, {1}, {output2});
}
std::unique_ptr<Interpreter> interpreter_;
std::unique_ptr<SubgraphBuilder> builder_;
};
TEST_F(SubgraphBuilderTest, TestBuildAddSubgraph) {
builder_->BuildAddSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {1, 2}, {6, 9});
}
TEST_F(SubgraphBuilderTest, TestBuildMulSubgraph) {
builder_->BuildMulSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {1, 2}, {5, 14});
}
TEST_F(SubgraphBuilderTest, TestBuildPadSubgraph) {
builder_->BuildPadSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {5}, {0, 5, 7, 0, 0});
}
TEST_F(SubgraphBuilderTest, TestBuildDynamicPadSubgraph) {
builder_->BuildPadSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
EXPECT_TRUE(IsDynamicTensor(output));
CheckIntTensor(output, {5}, {0, 5, 7, 0, 0});
}
TEST_F(SubgraphBuilderTest, TestBuildLessEqualCondSubgraph) {
builder_->BuildLessEqualCondSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {5});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {10, 10});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]),
{1, 2, 3, 4, 5});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckBoolTensor(output, {5}, {true, true, true, false, false});
}
TEST_F(SubgraphBuilderTest, TestBuildAccumulateLoopBodySubgraph) {
TestAccumulateLoopBody(1, 1, 2, 3);
TestAccumulateLoopBody(2, 3, 3, 6);
TestAccumulateLoopBody(3, 6, 4, 10);
}
TEST_F(SubgraphBuilderTest, TestBuildPadLoopBodySubgraph) {
builder_->BuildPadLoopBodySubgraph(&interpreter_->primary_subgraph(), {1, 2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {5});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]),
{0, 5, 7, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {8}, {0, 0, 5, 7, 0, 0, 0, 0});
}
}
}
} |
877 | cpp | tensorflow/tensorflow | transpose | third_party/xla/xla/pjrt/transpose.cc | third_party/xla/xla/pjrt/transpose_test.cc | #ifndef XLA_PJRT_TRANSPOSE_H_
#define XLA_PJRT_TRANSPOSE_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "xla/pjrt/lru_cache.h"
namespace xla {
class TransposePlan {
public:
struct Tiling {
absl::Span<int64_t const> tiling;
};
struct Striding {
absl::Span<int64_t const> strides_in_bytes;
};
enum class Transformation {
kNone = 0,
kF64ToEf57 = 1,
};
struct Options {
size_t elem_size_in_bytes;
absl::Span<int64_t const> dims;
absl::Span<int64_t const> permutation;
std::variant<Tiling, Striding> input_layout = Tiling{};
Tiling output_tiling;
Transformation transformation = Transformation::kNone;
int num_threads = 1;
};
static absl::StatusOr<std::unique_ptr<TransposePlan>> Create(
const Options& options);
TransposePlan();
~TransposePlan();
void Execute(const void* a, void* b,
const std::function<void(std::function<void(void)>)>&
schedule_work = {}) const;
std::string ToString() const;
size_t ElemSizeInBytes() const { return elem_size_in_bytes_; }
int64_t InputNumElems() const;
int64_t OutputNumElems() const;
absl::Span<int64_t const> InputDims() const { return original_a_dims_; }
absl::Span<int64_t const> OutputDims() const { return original_b_dims_; }
absl::Span<int64_t const> InputStrides() const { return original_a_strides_; }
int Parallelism() const { return nodes_.size(); }
struct Node;
protected:
static void RemoveTrivialDimensions(
absl::InlinedVector<int64_t, 4>& a_dims,
absl::InlinedVector<int64_t, 4>& permutation,
absl::InlinedVector<int64_t, 4>& lda,
absl::InlinedVector<int64_t, 4>& lda_tile,
absl::InlinedVector<int64_t, 4>& a_tiling,
absl::InlinedVector<int64_t, 4>& b_tiling);
static void CoalesceDimensions(absl::InlinedVector<int64_t, 4>& a_dims,
absl::InlinedVector<int64_t, 4>& permutation,
absl::InlinedVector<int64_t, 4>& lda,
absl::InlinedVector<int64_t, 4>& lda_tile,
absl::InlinedVector<int64_t, 4>& a_tiling,
absl::InlinedVector<int64_t, 4>& b_tiling);
private:
void Initialize();
void BuildPlanNodes(absl::Span<int64_t const> inverse_permutation,
int thread_id, std::vector<Node>& output_nodes);
std::vector<int> ChooseParallelizationStrategy(
absl::Span<int64_t const> inverse_permutation);
template <typename T, Transformation transformation>
void ExecuteTyped(const char* a, char* b, absl::Span<Node const> nodes) const;
int num_threads_requested_ = 1;
int64_t elem_size_in_bytes_;
int64_t num_elems_;
absl::InlinedVector<int64_t, 4> original_a_dims_;
absl::InlinedVector<int64_t, 4> original_a_strides_;
std::vector<int64_t> original_b_dims_;
absl::InlinedVector<int64_t, 4> a_dims_;
absl::InlinedVector<int64_t, 4> a_strides_;
std::vector<int64_t> b_dims_;
absl::InlinedVector<int64_t, 4> permutation_;
absl::InlinedVector<int64_t, 4> lda_;
absl::InlinedVector<int64_t, 4> lda_tile_;
absl::InlinedVector<int64_t, 4> ldb_;
absl::InlinedVector<int64_t, 4> ldb_tile_;
absl::InlinedVector<int64_t, 4> a_tiling_;
absl::InlinedVector<int64_t, 4> b_tiling_;
bool a_is_tiled_;
bool b_is_tiled_;
struct Loop {
int dim_in_a;
bool tile_interior;
};
std::vector<Loop> loop_order_;
std::vector<int> loop_parallelism_;
absl::InlinedVector<std::vector<Node>, 1> nodes_;
bool inner_kernel_is_memcpy_;
int inner_block_elems_ = 1;
int outer_block_elems_a_ = 4;
int outer_block_elems_b_ = 4;
Transformation transformation_;
int64_t scratch_size_ = 0;
};
struct TransposePlanCacheKey {
template <typename H>
friend H AbslHashValue(H h, const TransposePlanCacheKey& key);
size_t elem_size_in_bytes;
absl::InlinedVector<int64_t, 4> dims;
absl::InlinedVector<int64_t, 4> permutation;
bool input_layout_is_tiling;
absl::InlinedVector<int64_t, 4> input_layout;
absl::InlinedVector<int64_t, 4> output_tiling;
TransposePlan::Transformation transformation;
int num_threads;
bool operator==(const TransposePlanCacheKey& other) const;
};
template <typename H>
H AbslHashValue(H h, const TransposePlanCacheKey& key);
class TransposePlanCache {
public:
explicit TransposePlanCache(int capacity);
~TransposePlanCache();
TransposePlanCache(const TransposePlanCache&) = delete;
TransposePlanCache(TransposePlanCache&&) = delete;
TransposePlanCache& operator=(const TransposePlanCache&) = delete;
TransposePlanCache& operator=(TransposePlanCache&&) = delete;
absl::StatusOr<std::shared_ptr<TransposePlan>> GetOrCreate(
const TransposePlan::Options& options);
private:
LRUCache<TransposePlanCacheKey,
absl::StatusOr<std::shared_ptr<TransposePlan>>>::LRUList lru_list_;
LRUCache<TransposePlanCacheKey,
absl::StatusOr<std::shared_ptr<TransposePlan>>>
cache_;
};
}
#endif
#include "xla/pjrt/transpose.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <memory>
#include <numeric>
#include <stack>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "xla/ef57.h"
#include "xla/permutation_util.h"
#include "xla/pjrt/transpose_kernels.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace {
#ifdef __AVX__
static constexpr int kMaxInnerBlockSizeBytes = sizeof(__m256i);
#elif defined(XLA_HAS_VEC128)
static constexpr int kMaxInnerBlockSizeBytes = sizeof(Vec128);
#else
static constexpr int kMaxInnerBlockSizeBytes = 16;
#endif
}
struct TransposePlan::Node {
int64_t start;
int64_t end;
int64_t inc;
int64_t lda;
int64_t ldb;
int trailing_tile_next_node_inc = 0;
bool is_inner_dim_in_a = false;
bool is_inner_dim_in_b = false;
};
template <typename T, int inner_bs,
TransposePlan::Transformation transformation>
void MacroKernel(const char* __restrict a, int64_t lda, int outer_bs_a,
char* __restrict b, int64_t ldb, int outer_bs_b,
void* __restrict scratch) {
DVLOG(10) << "MacroKernel lda=" << lda << " ldb=" << ldb
<< " outer_bs_a=" << outer_bs_a << " outer_bs_b=" << outer_bs_b
<< " inner_bs=" << inner_bs;
if constexpr (transformation == TransposePlan::Transformation::kF64ToEf57) {
DCHECK_EQ(outer_bs_a * inner_bs % 2, 0);
float* p = reinterpret_cast<float*>(scratch);
if (ABSL_PREDICT_TRUE(lda == sizeof(double) &&
outer_bs_a * inner_bs == 2)) {
absl::Span<const double> input = absl::MakeConstSpan(
reinterpret_cast<const double*>(a), outer_bs_b * inner_bs);
absl::Span<float> output =
absl::MakeSpan(reinterpret_cast<float*>(p), input.size() * 2);
ConvertF64ToEf57(input, output);
} else {
for (int i = 0; i < outer_bs_b * inner_bs; ++i) {
absl::Span<const double> input =
absl::MakeConstSpan(reinterpret_cast<const double*>(a + lda * i),
outer_bs_a * inner_bs / 2);
absl::Span<float> output = absl::MakeSpan(
reinterpret_cast<float*>(p + outer_bs_a * inner_bs * i),
input.size() * 2);
ConvertF64ToEf57(input, output);
}
}
a = reinterpret_cast<const char*>(scratch);
lda = outer_bs_a * inner_bs * sizeof(float);
}
for (int i = 0; i < outer_bs_a; ++i) {
for (int j = 0; j < outer_bs_b; ++j) {
TransposeMicroKernel<T, inner_bs>::Apply(
a + inner_bs * j * lda + i * inner_bs * sizeof(T), lda,
b + inner_bs * i * ldb + j * inner_bs * sizeof(T), ldb);
}
}
}
template <typename T, int inner_bs,
TransposePlan::Transformation transformation>
void Transpose(const char* __restrict a, int outer_bs_a, char* __restrict b,
int outer_bs_b, TransposePlan::Node const* __restrict node,
void* __restrict scratch) {
tsl::profiler::TraceMe traceme([&]() {
return tsl::profiler::TraceMeEncode("Transpose",
{{"inner_bs", inner_bs},
{"outer_bs_a", outer_bs_a},
{"outer_bs_b", outer_bs_b}});
});
DVLOG(10) << "Transpose " << outer_bs_a << " " << outer_bs_b;
DCHECK_GT(outer_bs_a, 0);
DCHECK_GT(outer_bs_b, 0);
const int64_t start = node->start;
const int64_t end = node->end;
const int64_t stop = node->end - (node->inc - 1);
const int64_t lda = node->lda;
const int64_t ldb = node->ldb;
const int64_t inc = node->inc;
TransposePlan::Node const* next_node = node + 1;
if (next_node->inc < 0) {
const int64_t lda_block = next_node->lda;
const int64_t ldb_block = next_node->ldb;
int64_t i;
for (i = start; i < stop; i += inc) {
MacroKernel<T, inner_bs, transformation>(a + i * lda, lda_block,
outer_bs_a, b + i * ldb,
ldb_block, outer_bs_b, scratch);
}
if (i < end) {
DCHECK_EQ(node->trailing_tile_next_node_inc, 0);
DCHECK(node->is_inner_dim_in_a || node->is_inner_dim_in_b);
if (node->is_inner_dim_in_a) {
outer_bs_a = (end - i) / inner_bs;
if (outer_bs_a > 0) {
MacroKernel<T, inner_bs, transformation>(
a + i * lda, lda_block, outer_bs_a, b + i * ldb, ldb_block,
outer_bs_b, scratch);
i += outer_bs_a * inner_bs;
}
if (i < end) {
MacroKernel<T, 1, transformation>(a + i * lda, lda_block, end - i,
b + i * ldb, ldb_block,
outer_bs_b * inner_bs, scratch);
}
} else if (node->is_inner_dim_in_b) {
outer_bs_b = (end - i) / inner_bs;
if (outer_bs_b > 0) {
MacroKernel<T, inner_bs, transformation>(
a + i * lda, lda_block, outer_bs_a, b + i * ldb, ldb_block,
outer_bs_b, scratch);
i += outer_bs_b * inner_bs;
}
if (i < end) {
MacroKernel<T, 1, transformation>(a + i * lda, lda_block,
outer_bs_a * inner_bs, b + i * ldb,
ldb_block, end - i, scratch);
}
}
} else if (node->trailing_tile_next_node_inc) {
DCHECK_EQ(inc, 1);
TransposePlan::Node const* trailing_next_node =
node + node->trailing_tile_next_node_inc;
if (trailing_next_node->inc < 0) {
const int64_t lda_block = trailing_next_node->lda;
const int64_t ldb_block = trailing_next_node->ldb;
MacroKernel<T, inner_bs, transformation>(
a + i * lda, lda_block, outer_bs_a, b + i * ldb, ldb_block,
outer_bs_b, scratch);
} else {
Transpose<T, inner_bs, transformation>(a + i * lda, outer_bs_a,
b + i * ldb, outer_bs_b,
trailing_next_node, scratch);
}
}
} else {
int64_t i;
for (i = start; i < stop; i += inc) {
Transpose<T, inner_bs, transformation>(
a + i * lda, outer_bs_a, b + i * ldb, outer_bs_b, next_node, scratch);
}
if (i < end) {
DCHECK_EQ(node->trailing_tile_next_node_inc, 0);
DCHECK(node->is_inner_dim_in_a || node->is_inner_dim_in_b);
if (node->is_inner_dim_in_a) {
outer_bs_a = (end - i) / inner_bs;
if (outer_bs_a > 0) {
Transpose<T, inner_bs, transformation>(a + i * lda, outer_bs_a,
b + i * ldb, outer_bs_b,
next_node, scratch);
i += outer_bs_a * inner_bs;
}
if (i < end) {
Transpose<T, 1, transformation>(a + i * lda, end - i, b + i * ldb,
outer_bs_b * inner_bs, next_node,
scratch);
}
} else if (node->is_inner_dim_in_b) {
outer_bs_b = (end - i) / inner_bs;
if (outer_bs_b > 0) {
Transpose<T, inner_bs, transformation>(a + i * lda, outer_bs_a,
b + i * ldb, outer_bs_b,
next_node, scratch);
i += outer_bs_b * inner_bs;
}
if (i < end) {
Transpose<T, 1, transformation>(a + i * lda, outer_bs_a * inner_bs,
b + i * ldb, end - i, next_node,
scratch);
}
}
} else if (node->trailing_tile_next_node_inc) {
TransposePlan::Node const* trailing_next_node =
node + node->trailing_tile_next_node_inc;
if (trailing_next_node->inc < 0) {
const int64_t lda_block = trailing_next_node->lda;
const int64_t ldb_block = trailing_next_node->ldb;
MacroKernel<T, inner_bs, transformation>(
a + i * lda, lda_block, outer_bs_a, b + i * ldb, ldb_block,
outer_bs_b, scratch);
} else {
Transpose<T, inner_bs, transformation>(a + i * lda, outer_bs_a,
b + i * ldb, outer_bs_b,
trailing_next_node, scratch);
}
}
}
}
template <typename T>
void TransposeConstStride1(const char* __restrict a, char* __restrict b,
TransposePlan::Node const* __restrict node) {
a += node[0].start * node[0].lda;
b += node[0].start * node[0].ldb;
if (node[0].is_inner_dim_in_a) {
int64_t num_bytes = (node->end - node->start) * sizeof(T);
std::memcpy(b, a, num_bytes);
} else if (node[1].is_inner_dim_in_a) {
int64_t offset_a = node[1].start * node[1].lda;
int64_t offset_b = node[1].start * node[1].ldb;
int64_t num_bytes = (node[1].end - node[1].start) * sizeof(T);
a += offset_a;
b += offset_b;
for (int64_t i = node[0].start; i < node[0].end; ++i) {
std::memcpy(b, a, num_bytes);
a += node[0].lda;
b += node[0].ldb;
}
if (node[0].trailing_tile_next_node_inc) {
TransposeConstStride1<T>(a - offset_a, b - offset_b,
node + node[0].trailing_tile_next_node_inc);
}
} else if (node[2].is_inner_dim_in_a) {
int64_t num_bytes = (node[2].end - node[2].start) * sizeof(T);
int64_t offset_a1 = node[1].start * node[1].lda;
int64_t offset_b1 = node[1].start * node[1].ldb;
int64_t offset_a2 = node[2].start * node[2].lda;
int64_t offset_b2 = node[2].start * node[2].ldb;
a += offset_a1 + offset_a2;
b += offset_b1 + offset_b2;
for (int64_t i = node[0].start; i < node[0].end; ++i) {
const char* a1 = a;
char* b1 = b;
for (int64_t j = node[1].start; j < node[1].end; ++j) {
std::memcpy(b1, a1, num_bytes);
a1 += node[1].lda;
b1 += node[1].ldb;
}
if (node[1].trailing_tile_next_node_inc) {
TransposeConstStride1<T>(
a1 - offset_a2, b1 - offset_b2,
&node[1] + node[1].trailing_tile_next_node_inc);
}
a += node[0].lda;
b += node[0].ldb;
}
if (node[0].trailing_tile_next_node_inc) {
TransposeConstStride1<T>(a - offset_a1 - offset_a2,
b - offset_b1 - offset_b2,
node + node[0].trailing_tile_next_node_inc);
}
} else {
for (int64_t i = node[0].start; i < node[0].end; ++i) {
const char* a1 = a + node[1].start * node[1].lda;
char* b1 = b + node[1].start * node[1].ldb;
for (int64_t j = node[1].start; j < node[1].end; ++j) {
TransposeConstStride1<T>(a1, b1, node + 2);
a1 += node[1].lda;
b1 += node[1].ldb;
}
if (node[1].trailing_tile_next_node_inc) {
TransposeConstStride1<T>(
a1, b1, &node[1] + node[1].trailing_tile_next_node_inc);
}
a += node[0].lda;
b += node[0].ldb;
}
if (node[0].trailing_tile_next_node_inc) {
TransposeConstStride1<T>(a, b,
node + node[0].trailing_tile_next_node_inc);
}
}
}
template <typename T, TransposePlan::Transformation transformation>
void TransposePlan::ExecuteTyped(const char* a, char* b,
absl::Span<Node const> nodes) const {
tsl::profiler::TraceMe traceme([&]() {
return tsl::profiler::TraceMeEncode(
"TransposePlan::ExecuteTyped",
{{"inner_kernel_is_memcpy", inner_kernel_is_memcpy_},
{"inner_block_elems", inner_block_elems_}});
});
if (inner_kernel_is_memcpy_) {
DCHECK(transformation_ == Transformation::kNone);
TransposeConstStride1<T>(a, b, nodes.data());
} else {
std::unique_ptr<char[]> scratch;
if (scratch_size_ > 0) {
scratch.reset(new char[scratch_size_]);
}
DCHECK_LE(sizeof(T) * inner_block_elems_, kMaxInnerBlockSizeBytes);
auto handle_inner_block_elems = [&](auto const_inner_block_elems) {
if (nodes.size() > 1) {
Transpose<T, const_inner_block_elems, transformation>(
a, outer_block_elems_a_, b, outer_block_elems_b_, nodes.data(),
scratch.get());
} else {
MacroKernel<T, const_inner_block_elems, transformation>(
a, nodes.back().lda, outer_block_elems_a_, b, nodes.back().ldb,
outer_block_elems_b_, scratch.get());
}
};
switch (inner_block_elems_) {
case 1:
handle_inner_block_elems(std::integral_constant<int, 1>{});
break;
case 2:
handle_inner_block_elems(std::integral_constant<int, 2>{});
break;
case 4:
handle_inner_block_elems(std::integral_constant<int, 4>{});
break;
case 8:
handle_inner_block_elems(std::integral_constant<int, 8>{});
break;
case 16:
handle_inner_block_elems(std::integral_constant<int, 16>{});
break;
default:
LOG(FATAL) << "Invalid inner_block_elems_ " << inner_block_elems_;
}
}
}
struct uint128 {
uint64_t lo;
uint64_t hi;
};
static_assert(sizeof(uint128) == 16, "uint128 should be 16 bytes in size");
void TransposePlan::Execute(
const void* a, void* b,
const std::function<void(std::function<void(void)>)>& schedule_work) const {
if (num_elems_ == 0) {
return;
}
tsl::profiler::TraceMe traceme("Transpose::Execute", 2);
const char* ac = static_cast<const char*>(a);
char* bc = static_cast<char*>(b);
auto execute_by_type = [&](absl::Span<Node const> nodes) {
switch (elem_size_in_bytes_) {
case 1:
ExecuteTyped<uint8_t, Transformation::kNone>(ac, bc, nodes);
break;
case 2:
ExecuteTyped<uint16_t, Transformation::kNone>(ac, bc, nodes);
break;
case 4:
if (transformation_ == Transformation::kNone) {
ExecuteTyped<uint32_t, Transformation::kNone>(ac, bc, nodes);
} else {
DCHECK(transformation_ == Transformation::kF64ToEf57);
ExecuteTyped<uint32_t, Transformation::kF64ToEf57>(ac, bc, nodes);
}
break;
case 8:
ExecuteTyped<uint64_t, Transformation::kNone>(ac, bc, nodes);
break;
case 16:
ExecuteTyped<uint128, Transformation::kNone>(ac, bc, nodes);
break;
default:
LOG(FATAL) << "Unimplemented element size " << elem_size_in_bytes_;
}
};
if (!schedule_work || nodes_.size() <= 1) {
for (const auto& nodes : nodes_) {
execute_by_type(nodes);
}
} else {
absl::BlockingCounter counter(nodes_.size() - 1);
for (size_t i = 1; i < nodes_.size(); ++i) {
absl::Span<Node const> nodes = nodes_[i];
schedule_work([&, nodes]() {
execute_by_type(nodes);
counter.DecrementCount();
});
}
execute_by_type(nodes_[0]);
counter.Wait();
}
}
TransposePlan::TransposePlan() = default;
TransposePlan::~TransposePlan() = default;
static void ComputeStrides(
int64_t elem_size_in_bytes, absl::Span<const int64_t> dims,
absl::Span<const int64_t> tiling,
absl::InlinedVector<int64_t, 4>& outer_tile_strides,
absl::InlinedVector<int64_t, 4>& inner_tile_strides) {
inner_tile_strides.resize(dims.size());
int64_t acc = elem_size_in_bytes;
for (int d = static_cast<int>(dims.size()) - 1; d >= 0; --d) {
inner_tile_strides[d] = acc;
acc *= tiling[d];
}
outer_tile_strides.resize(dims.size());
for (int d = static_cast<int>(dims.size()) - 1; d >= 0; --d) {
outer_tile_str | #include "xla/pjrt/transpose.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <numeric>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/numeric/int128.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/array.h"
#include "xla/permutation_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace xla {
class TestTransposePlan : public TransposePlan {
public:
using TransposePlan::CoalesceDimensions;
using TransposePlan::RemoveTrivialDimensions;
};
TEST(TransposeTest, RemoveTrivialDimensions) {
absl::InlinedVector<int64_t, 4> dims = {4, 5, 1, 3, 1, 2, 5};
absl::InlinedVector<int64_t, 4> perm = {0, 2, 1, 4, 3, 6, 5};
absl::InlinedVector<int64_t, 4> lda = {2, 5, 7, 100, 3, 0, 1};
absl::InlinedVector<int64_t, 4> lda_tile = {1, 1, 1, 1, 1, 1, 1};
absl::InlinedVector<int64_t, 4> input_tiling = {1, 1, 1, 1, 1, 1, 1};
absl::InlinedVector<int64_t, 4> output_tiling = {1, 1, 1, 1, 1, 1, 1};
TestTransposePlan::RemoveTrivialDimensions(dims, perm, lda, lda_tile,
input_tiling, output_tiling);
EXPECT_THAT(dims, testing::ElementsAre(4, 5, 3, 2, 5));
EXPECT_THAT(perm, testing::ElementsAre(0, 1, 2, 4, 3));
dims = {4, 5, 3, 2, 5};
perm = {4, 3, 2, 1, 0};
lda = {2, 5, 100, 0, 1};
lda_tile = {1, 1, 1, 1, 1};
input_tiling = {1, 1, 1, 1, 1};
output_tiling = {1, 1, 1, 1, 1};
TestTransposePlan::RemoveTrivialDimensions(dims, perm, lda, lda_tile,
input_tiling, output_tiling);
EXPECT_THAT(dims, testing::ElementsAre(4, 5, 3, 2, 5));
EXPECT_THAT(perm, testing::ElementsAre(4, 3, 2, 1, 0));
}
TEST(TransposeTest, CoalesceDimensions) {
absl::InlinedVector<int64_t, 4> dims = {4, 5, 1, 3, 1, 2, 5};
absl::InlinedVector<int64_t, 4> perm = {0, 2, 1, 4, 3, 6, 5};
absl::InlinedVector<int64_t, 4> lda = {50, 30, 30, 10, 10, 5, 1};
absl::InlinedVector<int64_t, 4> lda_tile = {1, 1, 1, 1, 1, 1, 1};
absl::InlinedVector<int64_t, 4> input_tiling = {1, 1, 1, 1, 1, 1, 1};
absl::InlinedVector<int64_t, 4> output_tiling = {1, 1, 1, 1, 1, 1, 1};
TestTransposePlan::CoalesceDimensions(dims, perm, lda, lda_tile, input_tiling,
output_tiling);
EXPECT_THAT(dims, testing::ElementsAre(4, 5, 1, 3, 1, 2, 5));
EXPECT_THAT(perm, testing::ElementsAre(0, 2, 1, 4, 3, 6, 5));
EXPECT_THAT(lda, testing::ElementsAre(50, 30, 30, 10, 10, 5, 1));
dims = {4, 5, 3, 2, 5};
perm = {4, 1, 2, 3, 0};
lda = {150, 30, 10, 5, 1};
lda_tile = {1, 1, 1, 1, 1};
input_tiling = {1, 1, 1, 1, 1};
output_tiling = {1, 1, 1, 1, 1};
TestTransposePlan::CoalesceDimensions(dims, perm, lda, lda_tile, input_tiling,
output_tiling);
EXPECT_THAT(dims, testing::ElementsAre(4, 30, 5));
EXPECT_THAT(perm, testing::ElementsAre(2, 1, 0));
EXPECT_THAT(lda, testing::ElementsAre(150, 5, 1));
dims = {4, 5, 3, 2, 5};
perm = {0, 1, 2, 3, 4};
lda = {150, 30, 10, 5, 1};
lda_tile = {1, 1, 1, 1, 1};
input_tiling = {1, 1, 1, 1, 1};
output_tiling = {1, 1, 1, 1, 1};
TestTransposePlan::CoalesceDimensions(dims, perm, lda, lda_tile, input_tiling,
output_tiling);
EXPECT_THAT(dims, testing::ElementsAre(600));
EXPECT_THAT(perm, testing::ElementsAre(0));
EXPECT_THAT(lda, testing::ElementsAre(1));
dims = {4, 5, 3, 2, 5};
perm = {4, 1, 2, 3, 0};
lda = {150, 30, 10, 7, 1};
lda_tile = {1, 1, 1, 1, 1};
input_tiling = {1, 1, 1, 1, 1};
output_tiling = {1, 1, 1, 1, 1};
TestTransposePlan::CoalesceDimensions(dims, perm, lda, lda_tile, input_tiling,
output_tiling);
EXPECT_THAT(dims, testing::ElementsAre(4, 15, 2, 5));
EXPECT_THAT(perm, testing::ElementsAre(3, 1, 2, 0));
EXPECT_THAT(lda, testing::ElementsAre(150, 10, 7, 1));
}
TEST(TransposeTest, InvalidTilings) {
TransposePlan::Options options;
std::vector<int64_t> dims = {3, 4, 5};
std::vector<int64_t> perm = {0, 1, 2};
options.elem_size_in_bytes = sizeof(float);
options.dims = dims;
options.permutation = perm;
std::vector<int64_t> input_tiling = {8, 128};
std::vector<int64_t> output_tiling = {4};
options.input_layout = TransposePlan::Tiling{input_tiling};
options.output_tiling = TransposePlan::Tiling{output_tiling};
auto plan = TransposePlan::Create(options);
EXPECT_EQ(plan.status().code(), tsl::error::UNIMPLEMENTED);
EXPECT_THAT(
plan.status().message(),
testing::HasSubstr(
"Only one of the input and output may have a non-trivial tiling"));
}
int64_t SizeOfTiledArray(absl::Span<int64_t const> shape,
absl::Span<int64_t const> tiling) {
int64_t size = 1;
for (size_t i = 0; i < shape.size(); ++i) {
if (i >= shape.size() - tiling.size()) {
size *= RoundUpTo(shape[i], tiling[i - (shape.size() - tiling.size())]);
} else {
size *= shape[i];
}
}
return size;
}
bool BumpIndices(absl::Span<int64_t const> shape, absl::Span<int64_t> indices) {
CHECK_EQ(shape.size(), indices.size());
for (int dimno = indices.size() - 1; dimno >= 0; --dimno) {
if (indices[dimno] + 1 < shape[dimno]) {
indices[dimno]++;
std::fill(indices.begin() + dimno + 1, indices.end(), 0);
return true;
}
}
return false;
}
int64_t IndexToLinearIndex(absl::Span<int64_t const> shape,
absl::Span<int64_t const> tiling,
absl::Span<int64_t const> indices) {
CHECK_LE(tiling.size(), shape.size());
CHECK_EQ(shape.size(), indices.size());
int64_t stride = 1;
int64_t offset = 0;
auto index_it = indices.rbegin();
auto tile_it = tiling.rbegin();
for (; tile_it != tiling.rend(); ++index_it, ++tile_it) {
offset += (*index_it % *tile_it) * stride;
stride *= *tile_it;
}
index_it = indices.rbegin();
tile_it = tiling.rbegin();
auto shape_it = shape.rbegin();
for (; tile_it != tiling.rend(); ++index_it, ++shape_it, ++tile_it) {
offset += (*index_it / *tile_it) * stride;
stride *= CeilOfRatio(*shape_it, *tile_it);
}
for (; shape_it != shape.rend(); ++index_it, ++shape_it) {
offset += *index_it * stride;
stride *= *shape_it;
}
return offset;
}
template <typename T>
std::vector<T> TileArray(const Array<T>& in, absl::Span<int64_t const> tiling) {
std::vector<T> out(SizeOfTiledArray(in.dimensions(), tiling), -1);
if (in.num_elements() == 0) {
return out;
}
std::vector<int64_t> indices(in.num_dimensions(), 0);
do {
int64_t i = IndexToLinearIndex(in.dimensions(), tiling, indices);
out.at(i) = in(indices);
} while (BumpIndices(in.dimensions(), absl::MakeSpan(indices)));
return out;
}
template <typename T, int NDIMS>
void TransposeUsingEigenNd(const T* input, T* output,
absl::Span<int64_t const> dims,
absl::Span<int64_t const> dims_out,
absl::Span<int64_t const> permutation) {
typedef Eigen::TensorMap<
Eigen::Tensor<T, NDIMS, Eigen::RowMajor, Eigen::DenseIndex>,
Eigen::Aligned>
Tensor;
typedef Eigen::TensorMap<
Eigen::Tensor<const T, NDIMS, Eigen::RowMajor, Eigen::DenseIndex>,
Eigen::Aligned>
ConstTensor;
Eigen::array<int, NDIMS> p;
Eigen::DSizes<Eigen::DenseIndex, NDIMS> dims_eigen;
Eigen::DSizes<Eigen::DenseIndex, NDIMS> dims_out_eigen;
for (int i = 0; i < NDIMS; ++i) {
p[i] = permutation[i];
dims_eigen[i] = dims[i];
dims_out_eigen[i] = dims_out[i];
}
auto x = ConstTensor(input, dims_eigen);
auto y = Tensor(output, dims_out_eigen);
y = x.shuffle(p);
}
template <typename T>
void TransposeUsingEigen(const T* input, T* output,
absl::Span<int64_t const> dims,
absl::Span<int64_t const> dims_out,
absl::Span<int64_t const> permutation) {
switch (dims.size()) {
case 0:
return;
case 1:
TransposeUsingEigenNd<T, 1>(input, output, dims, dims_out, permutation);
return;
case 2:
TransposeUsingEigenNd<T, 2>(input, output, dims, dims_out, permutation);
return;
case 3:
TransposeUsingEigenNd<T, 3>(input, output, dims, dims_out, permutation);
return;
case 4:
TransposeUsingEigenNd<T, 4>(input, output, dims, dims_out, permutation);
return;
default:
LOG(FATAL) << "Unimplemented Eigen transpose rank";
}
}
struct TransposeTestCase {
TransposeTestCase(std::vector<int64_t> dims, std::vector<int64_t> permutation,
std::vector<int64_t> input_tiling = {},
std::vector<int64_t> output_tiling = {})
: dims(std::move(dims)),
permutation(std::move(permutation)),
input_tiling(std::move(input_tiling)),
output_tiling(std::move(output_tiling)) {}
std::vector<int64_t> dims;
std::vector<int64_t> permutation;
std::vector<int64_t> input_tiling;
std::vector<int64_t> output_tiling;
std::string ToString() const {
return absl::StrFormat(
"[%s],perm=[%s],tiling=[%s]/[%s]", absl::StrJoin(dims, ","),
absl::StrJoin(permutation, ","), absl::StrJoin(input_tiling, ","),
absl::StrJoin(output_tiling, ","));
}
};
std::ostream& operator<<(std::ostream& os, const TransposeTestCase& test) {
os << test.ToString();
return os;
}
std::vector<TransposeTestCase> GetTransposeTestCases() {
std::vector<TransposeTestCase> cases = {
TransposeTestCase({1}, {0}),
TransposeTestCase({4}, {0}),
TransposeTestCase({27}, {0}),
TransposeTestCase({1, 1}, {0, 1}),
TransposeTestCase({1, 1}, {1, 0}),
TransposeTestCase({2, 2}, {0, 1}),
TransposeTestCase({4, 4}, {1, 0}),
TransposeTestCase({4, 4}, {0, 1}),
TransposeTestCase({4, 4}, {1, 0}),
TransposeTestCase({8, 8}, {0, 1}),
TransposeTestCase({8, 8}, {1, 0}),
TransposeTestCase({16, 16}, {0, 1}),
TransposeTestCase({16, 16}, {1, 0}),
TransposeTestCase({11, 15}, {0, 1}),
TransposeTestCase({11, 15}, {1, 0}),
TransposeTestCase({11, 15, 13}, {0, 1, 2}),
TransposeTestCase({11, 15, 13}, {0, 2, 1}),
TransposeTestCase({11, 15, 13}, {1, 2, 0}),
TransposeTestCase({11, 15, 13}, {1, 0, 2}),
TransposeTestCase({11, 15, 13}, {2, 0, 1}),
TransposeTestCase({64, 64, 64}, {2, 1, 0}),
TransposeTestCase({256, 256, 256}, {2, 1, 0}),
TransposeTestCase({4, 8, 16, 32}, {3, 1, 0, 2}),
TransposeTestCase({64, 224, 224, 3},
{3, 1, 2, 0}),
TransposeTestCase({3}, {0},
{3}),
TransposeTestCase({3}, {0},
{},
{3}),
TransposeTestCase({2, 4, 6}, {0, 1, 2},
{},
{2, 3}),
TransposeTestCase({4}, {0},
{3}),
TransposeTestCase({5}, {0},
{},
{3}),
TransposeTestCase({8}, {0},
{},
{3}),
TransposeTestCase({8}, {0},
{3},
{}),
TransposeTestCase({29}, {0},
{},
{3}),
TransposeTestCase({12, 7}, {1, 0},
{4}),
TransposeTestCase({12, 7}, {1, 0},
{}, {5}),
TransposeTestCase({12, 7}, {1, 0},
{2, 4}),
TransposeTestCase({12, 7}, {1, 0},
{}, {5, 2}),
TransposeTestCase({128, 224, 224, 3},
{3, 1, 2, 0},
{},
{8, 128}),
};
return cases;
}
class TransposeTest : public ::testing::TestWithParam<TransposeTestCase> {
protected:
template <typename T>
void TestTranspose(int parallelism) {
const TransposeTestCase test = GetParam();
tsl::thread::ThreadPool threadpool(tsl::Env::Default(), "Transpose",
parallelism);
std::vector<int64_t> output_dims = Permute(test.dims, test.permutation);
TransposePlan::Options options;
options.elem_size_in_bytes = sizeof(T);
options.dims = test.dims;
options.permutation = test.permutation;
options.input_layout = TransposePlan::Tiling{test.input_tiling};
options.output_tiling = TransposePlan::Tiling{test.output_tiling};
options.transformation = TransposePlan::Transformation::kNone;
options.num_threads = parallelism;
TF_ASSERT_OK_AND_ASSIGN(auto plan, TransposePlan::Create(options));
VLOG(1) << plan->ToString();
xla::Array<T> untiled_input(test.dims);
untiled_input.FillIota(0);
xla::Array<T> expected_untiled_output(output_dims);
TransposeUsingEigen(untiled_input.data(), expected_untiled_output.data(),
test.dims, output_dims, test.permutation);
auto tiled_input = TileArray(untiled_input, test.input_tiling);
auto expected_tiled_output =
TileArray(expected_untiled_output, test.output_tiling);
std::vector<T> output(
SizeOfTiledArray(plan->OutputDims(), test.output_tiling), -1);
plan->Execute(
tiled_input.data(), output.data(),
[&](std::function<void()> fn) { threadpool.Schedule(std::move(fn)); });
EXPECT_EQ(expected_tiled_output, output);
}
};
TEST_P(TransposeTest, TransposeInt8) { TestTranspose<int8_t>(1); }
TEST_P(TransposeTest, TransposeInt16) { TestTranspose<int16_t>(1); }
TEST_P(TransposeTest, TransposeInt32) { TestTranspose<int32_t>(1); }
TEST_P(TransposeTest, TransposeInt64) { TestTranspose<int64_t>(1); }
TEST_P(TransposeTest, TransposeInt128) { TestTranspose<absl::int128>(1); }
TEST_P(TransposeTest, ParallelTransposeInt8) { TestTranspose<int8_t>(16); }
TEST_P(TransposeTest, ParallelTransposeInt32) { TestTranspose<int32_t>(16); }
INSTANTIATE_TEST_SUITE_P(TransposeTestInstance, TransposeTest,
::testing::ValuesIn(GetTransposeTestCases()));
TEST(TransposeTest, NegativeStrides1D) {
int64_t n = 10;
std::vector<int32_t> input(n);
std::vector<int32_t> output(n);
std::vector<int32_t> expected(n);
absl::c_iota(input, int32_t{7});
std::iota(expected.rbegin(), expected.rend(), 7);
std::vector<int64_t> dims = {n};
std::vector<int64_t> permutation = {0};
TransposePlan::Options options;
options.elem_size_in_bytes = sizeof(int32_t);
options.dims = dims;
options.permutation = permutation;
std::vector<int64_t> strides = {-int64_t{sizeof(int32_t)}};
options.input_layout = TransposePlan::Striding{strides};
TF_ASSERT_OK_AND_ASSIGN(auto plan, TransposePlan::Create(options));
plan->Execute(input.data() + (n - 1), output.data());
EXPECT_EQ(expected, output);
}
TEST(TransposeTest, NegativeStrides2D) {
xla::Array<int16_t> input = {
{1, 2, 3, 4},
{5, 6, 7, 8},
{9, 10, 11, 12},
};
xla::Array<int16_t> expected = {
{4, 8, 12},
{3, 7, 11},
{2, 6, 10},
{1, 5, 9},
};
xla::Array<int16_t> output({4, 3});
std::vector<int64_t> dims = {3, 4};
std::vector<int64_t> permutation = {1, 0};
TransposePlan::Options options;
options.elem_size_in_bytes = sizeof(int16_t);
options.dims = dims;
options.permutation = permutation;
std::vector<int64_t> strides = {4 * sizeof(int16_t),
-int64_t{sizeof(int16_t)}};
options.input_layout = TransposePlan::Striding{strides};
TF_ASSERT_OK_AND_ASSIGN(auto plan, TransposePlan::Create(options));
plan->Execute(input.data() + 3, output.data());
EXPECT_EQ(expected, output);
}
static std::vector<TransposeTestCase> BenchmarkCases() {
return std::vector<TransposeTestCase>{
TransposeTestCase({256, 256},
{1, 0}),
TransposeTestCase({512, 512},
{1, 0}),
TransposeTestCase({1024, 1024},
{1, 0}),
TransposeTestCase({256, 256, 256},
{0, 2, 1}),
TransposeTestCase({256, 256, 256},
{1, 0, 2}),
TransposeTestCase({256, 256, 256},
{1, 2, 0}),
TransposeTestCase({256, 256, 256},
{2, 0, 1}),
TransposeTestCase({256, 256, 256},
{2, 1, 0}),
TransposeTestCase({512, 512, 512},
{0, 2, 1}),
TransposeTestCase({512, 512, 512},
{1, 0, 2}),
TransposeTestCase({512, 512, 512},
{1, 2, 0}),
TransposeTestCase({512, 512, 512},
{2, 0, 1}),
TransposeTestCase({512, 512, 512},
{2, 1, 0}),
TransposeTestCase({64, 224, 224, 3},
{1, 2, 3, 0}),
TransposeTestCase({256, 64, 64, 3},
{1, 3, 2, 0}),
};
}
template <typename T>
void BM_Eigen(const TransposeTestCase& bm, int parallelism,
::testing::benchmark::State& state) {
CHECK_EQ(parallelism, 1);
Array<T> input(bm.dims);
input.FillIota(0);
std::vector<int64_t> output_dims = Permute(bm.dims, bm.permutation);
Array<T> output(output_dims);
for (auto s : state) {
TransposeUsingEigen(input.data(), output.data(), bm.dims, output_dims,
bm.permutation);
tsl::testing::DoNotOptimize(output);
}
}
static void BM_Eigen_uint8(const TransposeTestCase& bm, int parallelism,
::testing::benchmark::State& state) {
BM_Eigen<uint8_t>(std::move(bm), parallelism, state);
}
static void BM_Eigen_float(const TransposeTestCase& bm, int parallelism,
::testing::benchmark::State& state) {
BM_Eigen<float>(bm, parallelism, state);
}
template <typename T>
void BM_Transpose(const TransposeTestCase& bm, int parallelism,
::testing::benchmark::State& state) {
TransposePlan::Options options;
options.elem_size_in_bytes = sizeof(T);
options.dims = bm.dims;
options.permutation = bm.permutation;
options.input_layout = TransposePlan::Tiling{};
options.output_tiling = TransposePlan::Tiling{};
options.transformation = TransposePlan::Transformation::kNone;
options.num_threads = parallelism;
TF_ASSERT_OK_AND_ASSIGN(auto plan, TransposePlan::Create(options));
Array<T> input(bm.dims);
input.FillIota(0);
std::vector<int64_t> output_dims = Permute(bm.dims, bm.permutation);
Array<T> output(output_dims);
tsl::thread::ThreadPool threadpool(tsl::Env::Default(), "Transpose",
parallelism);
for (auto s : state) {
plan->Execute(input.data(), output.data(), [&](std::function<void()> fn) {
threadpool.Schedule(std::move(fn));
});
tsl::testing::DoNotOptimize(output);
}
}
static void BM_Transpose_uint8(const TransposeTestCase& bm, int parallelism,
::testing::benchmark::State& state) {
BM_Transpose<uint8_t>(bm, parallelism, state);
}
static void BM_Transpose_float(const TransposeTestCase& bm, int parallelism,
::testing::benchmark::State& state) {
BM_Transpose<float>(bm, parallelism, state);
}
static void* benchmarks = []() {
using BenchmarkFn =
void (*)(const TransposeTestCase&, int, testing::benchmark::State&);
std::vector<std::tuple<std::string, BenchmarkFn, std::vector<int>>> variants =
{
{"BM_Eigen_uint8", BM_Eigen_uint8, {1}},
{"BM_Transpose_uint8", BM_Transpose_uint8, {1, 4, 8}},
{"BM_Eigen_float", BM_Eigen_float, {1}},
{"BM_Transpose_float", BM_Transpose_float, {1, 4, 8}},
};
auto benchmark_cases = BenchmarkCases();
for (const auto& benchmark_case : benchmark_cases) {
for (const auto& variant : variants) {
for (int num_threads : std::get<2>(variant)) {
std::string name =
absl::StrCat(std::get<0>(variant), "_threads_", num_threads, "_",
absl::StrJoin(benchmark_case.dims, "_"), "_perm_",
absl::StrJoin(benchmark_case.permutation, "_"));
TransposeTestCase testcase = benchmark_case;
BenchmarkFn fn = std::get<1>(variant);
benchmark::RegisterBenchmark(
name.c_str(), [fn, num_threads, testcase](benchmark::State& state) {
fn(testcase, num_threads, state);
});
}
}
}
return nullptr;
}();
TEST(TransposePlanCache, Basics) {
std::vector<int64_t> dims = {1, 2, 3};
std::vector<int64_t> permutation_210 = {2, 1, 0};
std::vector<int64_t> permutation_120 = {1, 2, 0};
std::vector<int64_t> permutation_012 = {0, 1, 2};
TransposePlanCache cache(2);
TransposePlan::Options o;
o.elem_size_in_bytes = 4;
o.dims = dims;
o.permutation = permutation_210;
TF_ASSERT_OK_AND_ASSIGN(auto p1, cache.GetOrCreate(o));
TF_ASSERT_OK_AND_ASSIGN(auto p1a, cache.GetOrCreate(o));
EXPECT_TRUE(p1.get() == p1a.get());
TransposePlan::Options o2;
o2.elem_size_in_bytes = 4;
o2.dims = dims;
o2.permutation = permutation_120;
TF_ASSERT_OK_AND_ASSIGN(auto p2, cache.GetOrCreate(o2));
EXPECT_TRUE(p1.get() != p2.get());
TransposePlan::Options o3;
o3.elem_size_in_bytes = 4;
o3.dims = dims;
o3.permutation = permutation_012;
TF_ASSERT_OK_AND_ASSIGN(auto p3, cache.GetOrCreate(o3));
EXPECT_TRUE(p3.get() != p1.get());
TF_ASSERT_OK_AND_ASSIGN(auto p1b, cache.GetOrCreate(o));
EXPECT_TRUE(p1.get() != p1b.get());
}
} |
878 | cpp | tensorflow/tensorflow | svdf | tensorflow/lite/kernels/svdf.cc | tensorflow/lite/kernels/svdf_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SVDF_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SVDF_H_
#include <stdint.h>
#include <algorithm>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
static inline void ApplyTimeWeightsBiasAndActivation(
int batch_size, int memory_size, int num_filters, int num_units, int rank,
const float* const __restrict__ weights_time_data,
const float* const __restrict__ bias_ptr, TfLiteFusedActivation activation,
float* const __restrict__ state_ptr, float* const __restrict__ scratch_ptr,
float* const __restrict__ output_ptr) {
for (int b = 0; b < batch_size; ++b) {
float* state_ptr_batch = state_ptr + b * memory_size * num_filters;
float* scratch_ptr_batch = scratch_ptr + b * num_filters;
tensor_utils::BatchVectorBatchVectorDotProduct(
weights_time_data, state_ptr_batch, memory_size, num_filters,
scratch_ptr_batch);
}
tensor_utils::ReductionSumVector(scratch_ptr, output_ptr,
batch_size * num_units, rank);
if (bias_ptr) {
tensor_utils::VectorBatchVectorAdd(bias_ptr, num_units, batch_size,
output_ptr);
}
tensor_utils::ApplyActivationToVector(output_ptr, batch_size * num_units,
activation, output_ptr);
}
inline void EvalIntegerSVDF(
const TfLiteSVDFParams* params, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& weights_feature_shape,
const int8_t* weights_feature_data, const RuntimeShape& weights_time_shape,
const int16_t* weights_time_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, int16_t* state_data,
const RuntimeShape& output_shape, int8_t* output_data,
int32_t* scratch_data, int32_t* output_temp_data, int32_t scale_1_a,
int scale_1_b, int32_t scale_2_a, int scale_2_b, int32_t input_zp,
int32_t output_zp) {
const int n_rank = params->rank;
const int n_batch = input_shape.Dims(0);
const int n_input = input_shape.Dims(1);
const int n_filter = weights_feature_shape.Dims(0);
const int n_unit = n_filter / n_rank;
const int n_memory = weights_time_shape.Dims(1);
std::copy(state_data + 1, state_data + n_batch * n_memory * n_filter,
state_data);
{
const int32_t output_max = std::numeric_limits<int16_t>::max();
const int32_t output_min = std::numeric_limits<int16_t>::min();
int16_t* result_in_batch = state_data + (n_memory - 1);
for (int b = 0; b < n_batch; b++) {
const int8_t* matrix_data = weights_feature_data;
for (int r = 0; r < n_filter; r++) {
int32_t dot_prod = 0;
const int8_t* vector_in_batch = input_data + b * n_input;
for (int c = 0; c < n_input; c++) {
dot_prod += *matrix_data++ * (*vector_in_batch++ - input_zp);
}
dot_prod =
MultiplyByQuantizedMultiplier(dot_prod, scale_1_a, scale_1_b);
dot_prod = std::min(std::max(output_min, dot_prod), output_max);
*result_in_batch = dot_prod;
result_in_batch += n_memory;
}
}
}
{
for (int b = 0; b < n_batch; ++b) {
const int16_t* state_data_batch = state_data + b * n_memory * n_filter;
int32_t* scratch_data_batch = scratch_data + b * n_filter;
tensor_utils::BatchVectorBatchVectorDotProduct(
weights_time_data, state_data_batch, n_memory, n_filter,
scratch_data_batch);
}
}
{
tensor_utils::ReductionSumVector(scratch_data, output_temp_data,
n_batch * n_unit, n_rank);
if (bias_data) {
tensor_utils::VectorBatchVectorAdd(bias_data, n_unit, n_batch,
output_temp_data);
}
const int32_t output_max = std::numeric_limits<int8_t>::max();
const int32_t output_min = std::numeric_limits<int8_t>::min();
for (int i = 0; i < n_batch * n_unit; ++i) {
int32_t x1 = output_temp_data[i];
int32_t x2 = MultiplyByQuantizedMultiplier(x1, scale_2_a, scale_2_b);
int32_t x3 = x2 + output_zp;
int32_t x4 = std::min(std::max(output_min, x3), output_max);
output_data[i] = static_cast<int8_t>(x4);
}
}
}
inline void EvalFloatSVDF(
const TfLiteSVDFParams* params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& weights_feature_shape,
const float* weights_feature_data, const RuntimeShape& weights_time_shape,
const float* weights_time_data, const RuntimeShape& bias_shape,
const float* bias_data, float* scratch_data, float* state_data,
const RuntimeShape& output_shape, float* output_data) {
const int rank = params->rank;
const int batch_size = input_shape.Dims(0);
const int input_size = input_shape.Dims(1);
const int num_filters = weights_feature_shape.Dims(0);
const int num_units = num_filters / rank;
const int memory_size = weights_time_shape.Dims(1);
std::copy(state_data + 1, state_data + batch_size * memory_size * num_filters,
state_data);
std::fill_n(scratch_data, batch_size * num_filters, 0.0f);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
weights_feature_data, num_filters, input_size, input_data, batch_size,
scratch_data);
for (int i = 0; i < batch_size * num_filters; ++i) {
state_data[i * memory_size + memory_size - 1] = scratch_data[i];
}
ApplyTimeWeightsBiasAndActivation(
batch_size, memory_size, num_filters, num_units, rank, weights_time_data,
bias_data, params->activation, state_data, scratch_data, output_data);
}
inline void EvalHybridSVDF(
const TfLiteSVDFParams* params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& weights_feature_shape,
const int8_t* weights_feature_data, const float weights_feature_scale,
const RuntimeShape& weights_time_shape, const float* weights_time_data,
const RuntimeShape& bias_shape, const float* bias_data, float* scratch,
float* scaling_factors, int8_t* quantized_input, float* state,
const RuntimeShape& output_shape, float* output_data, int32_t* zero_points,
int32_t* row_sums, bool* compute_row_sums) {
const int rank = params->rank;
const int batch_size = input_shape.Dims(0);
const int input_size = input_shape.Dims(1);
const int num_filters = weights_feature_shape.Dims(0);
const int num_units = num_filters / rank;
const int memory_size = weights_time_shape.Dims(1);
std::copy(state + 1, state + batch_size * memory_size * num_filters, state);
std::fill_n(scratch, batch_size * num_filters, 0.0f);
if (!tensor_utils::IsZeroVector(input_data, batch_size * input_size)) {
tensor_utils::BatchQuantizeFloats(
input_data, batch_size, input_size, quantized_input, scaling_factors,
zero_points, params->asymmetric_quantize_inputs);
for (int b = 0; b < batch_size; ++b) {
scaling_factors[b] *= weights_feature_scale;
}
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
weights_feature_data, num_filters, input_size, quantized_input,
scaling_factors, batch_size, scratch,
nullptr, zero_points,
reinterpret_cast<int32_t*>(scratch), row_sums, compute_row_sums,
nullptr);
}
for (int i = 0; i < batch_size * num_filters; ++i) {
state[i * memory_size + memory_size - 1] = scratch[i];
}
ApplyTimeWeightsBiasAndActivation(
batch_size, memory_size, num_filters, num_units, rank, weights_time_data,
bias_data, params->activation, state, scratch, output_data);
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/svdf.h"
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace svdf {
namespace {
struct OpData {
int scratch_tensor_index;
bool float_weights_time_initialized;
int32 effective_scale_1_a;
int effective_scale_1_b;
int32 effective_scale_2_a;
int effective_scale_2_b;
bool compute_row_sums = false;
};
}
constexpr int kInputTensor = 0;
constexpr int kWeightsFeatureTensor = 1;
constexpr int kWeightsTimeTensor = 2;
constexpr int kBiasTensor = 3;
constexpr int kStateTensor = 4;
constexpr int kOutputTensor = 0;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->float_weights_time_initialized = false;
context->AddTensors(context, 6,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
int scratch_tensor_index = op_data->scratch_tensor_index;
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TF_LITE_ENSURE_EQ(context, node->inputs->size, 5);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* weights_feature;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kWeightsFeatureTensor,
&weights_feature));
const TfLiteTensor* weights_time;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTimeTensor, &weights_time));
TF_LITE_ENSURE(context,
input->type == kTfLiteFloat32 || input->type == kTfLiteInt8);
const int rank = params->rank;
const int batch_size = input->dims->data[0];
const int num_filters = weights_feature->dims->data[0];
TF_LITE_ENSURE(context, rank != 0);
TF_LITE_ENSURE_EQ(context, num_filters % rank, 0);
const int num_units = num_filters / rank;
const int memory_size = weights_time->dims->data[1];
TF_LITE_ENSURE_EQ(context, input->dims->data[1],
weights_feature->dims->data[1]);
TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters);
const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
if (bias) {
TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units);
}
const TfLiteTensor* state;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStateTensor, &state));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(state), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(state, 0), batch_size);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(state, 1),
memory_size * num_filters);
TfLiteIntArray* output_size_array = TfLiteIntArrayCreate(2);
output_size_array->data[0] = batch_size;
output_size_array->data[1] = num_units;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
const bool is_hybrid_op = IsHybridOp(input, weights_feature);
const bool is_full_integer = input->type == kTfLiteInt8;
TfLiteIntArrayFree(node->temporaries);
if (is_hybrid_op) {
node->temporaries = TfLiteIntArrayCreate(6);
} else if (is_full_integer) {
node->temporaries = TfLiteIntArrayCreate(2);
} else {
node->temporaries = TfLiteIntArrayCreate(1);
}
node->temporaries->data[0] = scratch_tensor_index;
TfLiteIntArray* scratch_size_array = TfLiteIntArrayCreate(2);
scratch_size_array->data[0] = batch_size;
scratch_size_array->data[1] = num_filters;
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 0, &scratch_tensor));
if (is_full_integer) {
scratch_tensor->type = kTfLiteInt32;
} else {
scratch_tensor->type = kTfLiteFloat32;
}
scratch_tensor->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor,
scratch_size_array));
if (is_hybrid_op) {
op_data->compute_row_sums = true;
node->temporaries->data[1] = scratch_tensor_index + 1;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 1,
&input_quantized));
input_quantized->type = weights_feature->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[2] = scratch_tensor_index + 2;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[3] = scratch_tensor_index + 3;
TfLiteTensor* float_weights_time;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3,
&float_weights_time));
float_weights_time->type = kTfLiteFloat32;
float_weights_time->name = "Svdf_float_weights_time";
float_weights_time->allocation_type = kTfLiteArenaRwPersistent;
if (!TfLiteIntArrayEqual(float_weights_time->dims, weights_time->dims)) {
TfLiteIntArray* float_weights_time_size =
TfLiteIntArrayCopy(weights_time->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, float_weights_time,
float_weights_time_size));
}
node->temporaries->data[4] = scratch_tensor_index + 4;
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &zero_points));
zero_points->type = kTfLiteFloat32;
zero_points->allocation_type = kTfLiteArenaRw;
int zero_points_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) {
TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1);
zero_points_size->data[0] = zero_points_dims[0];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points,
zero_points_size));
}
node->temporaries->data[5] = scratch_tensor_index + 5;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 5, &row_sums));
row_sums->type = kTfLiteFloat32;
float_weights_time->name = "Svdf_row_sums";
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[1] = {num_filters};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1);
row_sums_size->data[0] = row_sums_dims[0];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
if (is_full_integer) {
TfLiteIntArray* output_temp_size_array = TfLiteIntArrayCreate(2);
output_temp_size_array->data[0] = num_units;
output_temp_size_array->data[1] = batch_size;
node->temporaries->data[1] = scratch_tensor_index + 1;
TfLiteTensor* output_temp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &output_temp));
output_temp->type = kTfLiteInt32;
output_temp->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_temp,
output_temp_size_array));
TF_LITE_ENSURE(context, input->quantization.type != kTfLiteNoQuantization);
auto* input_params =
reinterpret_cast<TfLiteAffineQuantization*>(input->quantization.params);
TF_LITE_ENSURE(context,
weights_feature->quantization.type != kTfLiteNoQuantization);
auto* weights_feature_params = reinterpret_cast<TfLiteAffineQuantization*>(
weights_feature->quantization.params);
TF_LITE_ENSURE(context, state->quantization.type != kTfLiteNoQuantization);
auto* state_params =
reinterpret_cast<TfLiteAffineQuantization*>(state->quantization.params);
TF_LITE_ENSURE(context,
weights_time->quantization.type != kTfLiteNoQuantization);
auto* weight_time_params = reinterpret_cast<TfLiteAffineQuantization*>(
weights_time->quantization.params);
TF_LITE_ENSURE(context, output->quantization.type != kTfLiteNoQuantization);
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
output->quantization.params);
const double effective_scale_1 = input_params->scale->data[0] *
weights_feature_params->scale->data[0] /
state_params->scale->data[0];
const double effective_scale_2 = state_params->scale->data[0] *
weight_time_params->scale->data[0] /
output_params->scale->data[0];
QuantizeMultiplier(effective_scale_1, &op_data->effective_scale_1_a,
&op_data->effective_scale_1_b);
QuantizeMultiplier(effective_scale_2, &op_data->effective_scale_2_a,
&op_data->effective_scale_2_b);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* weights_feature;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kWeightsFeatureTensor,
&weights_feature));
const TfLiteTensor* weights_time;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTimeTensor, &weights_time));
const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
TfLiteTensor* scratch;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &scratch));
TfLiteTensor* state = GetVariableInput(context, node, kStateTensor);
TF_LITE_ENSURE(context, state != nullptr);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (weights_feature->type) {
case kTfLiteFloat32: {
reference_ops::EvalFloatSVDF(
params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(weights_feature),
GetTensorData<float>(weights_feature), GetTensorShape(weights_time),
GetTensorData<float>(weights_time), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorData<float>(scratch),
GetTensorData<float>(state), GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8:
case kTfLiteInt8: {
if (input->type == kTfLiteFloat32) {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 1,
&input_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&scaling_factors));
TfLiteTensor* float_weights_time;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3,
&float_weights_time));
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 4,
&zero_points));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 5, &row_sums));
if (!op_data->float_weights_time_initialized) {
const float dequantization_scale = weights_time->params.scale;
const int8_t* weights_time_ptr = GetTensorData<int8_t>(weights_time);
float* float_weights_time_ptr =
GetTensorData<float>(float_weights_time);
for (int i = 0; i < NumElements(float_weights_time); ++i) {
float_weights_time_ptr[i] =
weights_time_ptr[i] * dequantization_scale;
}
op_data->float_weights_time_initialized = true;
}
int32_t* zero_points_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs && row_sums != nullptr) {
zero_points_ptr = GetTensorData<int32_t>(zero_points);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
reference_ops::EvalHybridSVDF(
params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(weights_feature),
GetTensorData<int8_t>(weights_feature),
weights_feature->params.scale, GetTensorShape(float_weights_time),
GetTensorData<float>(float_weights_time), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorData<float>(scratch),
GetTensorData<float>(scaling_factors),
GetTensorData<int8_t>(input_quantized), GetTensorData<float>(state),
GetTensorShape(output), GetTensorData<float>(output),
zero_points_ptr, row_sums_ptr, &op_data->compute_row_sums);
return kTfLiteOk;
}
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
input->quantization.params);
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
output->quantization.params);
TfLiteTensor* output_temp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &output_temp));
TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActRelu);
reference_ops::EvalIntegerSVDF(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(weights_feature),
GetTensorData<int8_t>(weights_feature), GetTensorShape(weights_time),
GetTensorData<int16_t>(weights_time), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorData<int16_t>(state),
GetTensorShape(output), GetTensorData<int8_t>(output),
GetTensorData<int32_t>(scratch), GetTensorData<int32_t>(output_temp),
op_data->effective_scale_1_a, op_data->effective_scale_1_b,
op_data->effective_scale_2_a, op_data->effective_scale_2_b,
input_params->zero_point->data[0],
output_params->zero_point->data[0]);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(weights_feature->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_SVDF() {
static TfLiteRegistration r = {svdf::Init, svdf::Free, svdf::Prepare,
svdf::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
static float svdf_input[] = {
0.12609188, -0.46347019, -0.89598465,
0.35867718, 0.36897406, 0.73463392,
0.14278367, -1.64410412, -0.75222826,
-0.57290924, 0.12729003, 0.7567004,
0.49837467, 0.19278903, 0.26584083,
0.17660543, 0.52949083, -0.77931279,
-0.11186574, 0.13164264, -0.05349274,
-0.72674477, -0.5683046, 0.55900657,
-0.68892461, 0.37783599, 0.18263303,
-0.63690937, 0.44483393, -0.71817774,
-0.81299269, -0.86831826, 1.43940818,
-0.95760226, 1.82078898, 0.71135032,
-1.45006323, -0.82251364, -1.69082689,
-1.65087092, -1.89238167, 1.54172635,
0.03966608, -0.24936394, -0.77526885,
2.06740379, -1.51439476, 1.43768692,
0.11771342, -0.23761693, -0.65898693,
0.31088525, -1.55601168, -0.87661445,
-0.89477462, 1.67204106, -0.53235275,
-0.6230064, 0.29819036, 1.06939757,
};
static float svdf_golden_output_rank_1[] = {
0.014899, -0.0517661, -0.143725, -0.00271883,
-0.03004015, 0.09565311, 0.1587342, 0.00784263,
0.068281, -0.162217, -0.152268, 0.00323521,
0.01582633, 0.03858774, -0.03001583, -0.02671271,
-0.0317821, -0.0333089, 0.0609602, 0.0333759,
-0.01432795, 0.05524484, 0.1101355, -0.02382665,
-0.00623099, -0.077701, -0.391193, -0.0136691,
-0.02333033, 0.02293761, 0.12338032, 0.04326871,
0.201551, -0.164607, -0.179462, -0.0592739,
0.01064911, -0.17503069, 0.07821996, -0.00224009,
0.0886511, -0.0875401, -0.269283, 0.0281379,
-0.02282338, 0.09741908, 0.32973239, 0.12281385,
-0.201174, -0.586145, -0.628624, -0.0330412,
0.24780814, -0.39304617, -0.22473189, 0.02589256,
-0.0839096, -0.299329, 0.108746, 0.109808,
0.10084175, -0.06416984, 0.28936723, 0.0026358,
0.419114, -0.237824, -0.422627, 0.175115,
-0.2314795, -0.18584411, -0.4228974, -0.12928449,
0.36726, -0.522303, -0.456502, -0.175475,
0.17012937, -0.34447709, 0.38505614, -0.28158101,
};
static float svdf_golden_output_rank_2[] = {
-0.09623547, -0.10193135, 0.11083051, -0.0347917,
0.1141196, 0.12965347, -0.12652366, 0.01007236,
-0.16396809, -0.21247184, 0.11259045, -0.04156673,
0.10132131, -0.06143532, -0.00924693, 0.10084561,
0.01257364, 0.0506071, -0.19287863, -0.07162561,
-0.02033747, 0.22673416, 0.15487903, 0.02525555,
-0.1411963, -0.37054959, 0.01774767, 0.05867489,
0.09607603, -0.0141301, -0.08995658, 0.12867066,
-0.27142537, -0.16955489, 0.18521598, -0.12528358,
0.00331409, 0.11167502, 0.02218599, -0.07309391,
0.09593632, -0.28361851, -0.0773851, 0.17199151,
-0.00075242, 0.33691186, -0.1536046, 0.16572715,
-0.27916506, -0.27626723, 0.42615682, 0.3225764,
-0.37472126, -0.55655634, -0.05013514, 0.289112,
-0.24418658, 0.07540751, -0.1940318, -0.08911639,
0.00732617, 0.46737891, 0.26449674, 0.24888524,
-0.17225097, -0.54660404, -0.38795233, 0.08389944,
0.07736043, -0.28260678, 0.15666828, 1.14949894,
-0.57454878, -0.64704704, 0.73235172, -0.34616736,
0.21120001, -0.22927976, 0.02455296, -0.35906726,
};
class BaseSVDFOpModel : public SingleOpModel {
public:
BaseSVDFOpModel(int batches, int units, int input_size, int memory_size,
int rank,
TensorType weights_feature_type = TensorType_FLOAT32,
TensorType weights_time_type = TensorType_FLOAT32,
bool asymmetric_quantize_inputs = false)
: batches_(batches),
units_(units),
input_size_(input_size),
memory_size_(memory_size),
rank_(rank) {
input_ = AddInput(TensorType_FLOAT32);
weights_feature_ = AddInput(weights_feature_type);
weights_time_ = AddInput(weights_time_type);
bias_ = AddNullInput();
const int num_filters = units * rank;
activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
CreateSVDFOptions(builder_, rank, ActivationFunctionType_NONE,
asymmetric_quantize_inputs)
.Union());
BuildInterpreter({
{batches_, input_size_},
{units_ * rank, input_size_},
{units_ * rank, memory_size_},
{units_},
{batches, memory_size * num_filters}
});
}
void SetWeightsFeature(std::initializer_list<float> f) {
PopulateTensor(weights_feature_, f);
}
void SetWeightsTime(std::initializer_list<float> f) {
PopulateTensor(weights_time_, f);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_feature_;
int weights_time_;
int bias_;
int activation_state_;
int output_;
int batches_;
int units_;
int input_size_;
int memory_size_;
int rank_;
};
class SVDFOpModel : public BaseSVDFOpModel {
public:
using BaseSVDFOpModel::BaseSVDFOpModel;
};
class HybridSVDFOpModel : public BaseSVDFOpModel {
public:
HybridSVDFOpModel(int batches, int units, int input_size, int memory_size,
int rank, TensorType tensor_type,
bool asymmetric_quantize_inputs)
: BaseSVDFOpModel(batches, units, input_size, memory_size, rank,
tensor_type, tensor_type, asymmetric_quantize_inputs) {
tensor_type_ = tensor_type;
}
void SetWeights(int weights_idx, const std::vector<float>& f) {
if (tensor_type_ == TensorType_UINT8) {
SymmetricQuantizeAndPopulate(weights_idx, f);
} else {
SignedSymmetricQuantizeAndPopulate(weights_idx, f);
}
}
void SetWeightsFeature(std::initializer_list<float> f) {
SetWeights(weights_feature_, f);
}
void SetWeightsTime(std::initializer_list<float> f) {
SetWeights(weights_time_, f);
}
protected:
TensorType tensor_type_;
};
class SVDFOpTest : public ::testing::TestWithParam<bool> {
protected:
void VerifyGoldens(float golden_input[], float golden_output[],
int golden_size, BaseSVDFOpModel* svdf,
float tolerance = 1e-5) {
const int svdf_num_batches = svdf->num_batches();
const int svdf_input_size = svdf->input_size();
const int svdf_num_units = svdf->num_units();
const int input_sequence_size =
golden_size / sizeof(float) / (svdf_input_size * svdf_num_batches);
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start =
golden_input + i * svdf_input_size * svdf_num_batches;
float* batch_end = batch_start + svdf_input_size * svdf_num_batches;
svdf->SetInput(0, batch_start, batch_end);
ASSERT_EQ(svdf->Invoke(), kTfLiteOk);
const float* golden_start =
golden_output + i * svdf_num_units * svdf_num_batches;
const float* golden_end =
golden_start + svdf_num_units * svdf_num_batches;
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(svdf->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
}
};
INSTANTIATE_TEST_SUITE_P(SVDFOpTest, SVDFOpTest,
::testing::ValuesIn({false, true}));
TEST_F(SVDFOpTest, BlackBoxTestRank1) {
SVDFOpModel svdf(2, 4, 3,
10, 1);
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
&svdf);
}
TEST_F(SVDFOpTest, BlackBoxTestRank2) {
SVDFOpModel svdf(2, 4, 3,
10, 2);
svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
0.12416199, 0.15785322, 0.27901134, 0.3905206,
0.21931258, -0.36137494, -0.10640851, 0.31053296,
-0.36118156, -0.0976817, -0.36916667, 0.22197971,
0.15294972, 0.38031587, 0.27557442, 0.39635518,
-0.21580373, -0.06634006, -0.02702999, 0.27072677});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657,
-0.14884081, 0.19931212, -0.36002168, 0.34663299, -0.11405486,
0.12672701, 0.39463779, -0.07886535, -0.06384811, 0.08249187,
-0.26816407, -0.19905911, 0.29211238, 0.31264046, -0.28664589,
0.05698794, 0.11613581, 0.14078894, 0.02187902, -0.21781836,
-0.15567942, 0.08693647, -0.38256618, 0.36580828, -0.22922277,
-0.0226903, 0.12878349, -0.28122205, -0.10850525, -0.11955214,
0.27179423, -0.04710215, 0.31069002, 0.22672787, 0.09580326,
0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
&svdf);
}
TEST_P(SVDFOpTest, BlackBoxTestHybridRank1Uint8) {
HybridSVDFOpModel svdf(2, 4, 3,
10, 1, TensorType_UINT8,
GetParam());
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
&svdf,
0.004285);
}
TEST_P(SVDFOpTest, BlackBoxTestHybridRank2Uint8) {
HybridSVDFOpModel svdf(2, 4, 3,
10, 2, TensorType_UINT8,
GetParam());
svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
0.12416199, 0.15785322, 0.27901134, 0.3905206,
0.21931258, -0.36137494, -0.10640851, 0.31053296,
-0.36118156, -0.0976817, -0.36916667, 0.22197971,
0.15294972, 0.38031587, 0.27557442, 0.39635518,
-0.21580373, -0.06634006, -0.02702999, 0.27072677});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657,
-0.14884081, 0.19931212, -0.36002168, 0.34663299, -0.11405486,
0.12672701, 0.39463779, -0.07886535, -0.06384811, 0.08249187,
-0.26816407, -0.19905911, 0.29211238, 0.31264046, -0.28664589,
0.05698794, 0.11613581, 0.14078894, 0.02187902, -0.21781836,
-0.15567942, 0.08693647, -0.38256618, 0.36580828, -0.22922277,
-0.0226903, 0.12878349, -0.28122205, -0.10850525, -0.11955214,
0.27179423, -0.04710215, 0.31069002, 0.22672787, 0.09580326,
0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
&svdf,
0.007175);
}
TEST_P(SVDFOpTest, BlackBoxTestHybridRank1Int8) {
HybridSVDFOpModel svdf(2, 4, 3,
10, 1, TensorType_INT8,
GetParam());
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
&svdf,
0.004285);
}
TEST_P(SVDFOpTest, BlackBoxTestHybridRank2Int8) {
HybridSVDFOpModel svdf(2, 4, 3,
10, 2, TensorType_INT8,
GetParam());
svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
0.12416199, 0.15785322, 0.27901134, 0.3905206,
0.21931258, -0.36137494, -0.10640851, 0.31053296,
-0.36118156, -0.0976817, -0.36916667, 0.22197971,
0.15294972, 0.38031587, 0.27557442, 0.39635518,
-0.21580373, -0.06634006, -0.02702999, 0.27072677});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657,
-0.14884081, 0.19931212, -0.36002168, 0.34663299, -0.11405486,
0.12672701, 0.39463779, -0.07886535, -0.06384811, 0.08249187,
-0.26816407, -0.19905911, 0.29211238, 0.31264046, -0.28664589,
0.05698794, 0.11613581, 0.14078894, 0.02187902, -0.21781836,
-0.15567942, 0.08693647, -0.38256618, 0.36580828, -0.22922277,
-0.0226903, 0.12878349, -0.28122205, -0.10850525, -0.11955214,
0.27179423, -0.04710215, 0.31069002, 0.22672787, 0.09580326,
0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
&svdf,
0.007175);
}
class IntegerSVDFOpModel : public SingleOpModel {
public:
IntegerSVDFOpModel(int batches, int units, int input_size, int memory_size,
int rank)
: batches_(batches),
units_(units),
input_size_(input_size),
memory_size_(memory_size),
rank_(rank) {
const int num_filters = units * rank;
input_ = AddInput({TensorType_INT8, {batches, input_size}, -1, 1});
weights_feature_ =
AddInput({TensorType_INT8, {num_filters, input_size}, -0.5, 0.5});
weights_time_ =
AddInput({TensorType_INT16, {num_filters, memory_size}, -1, 1});
bias_ = AddInput({TensorType_INT32, {units}, -512, 512});
activation_state_ = AddVariableInput(
{TensorType_INT16, {batches, memory_size * num_filters}, -16, 16});
output_ = AddOutput({TensorType_INT8, {batches, units}, -0.5, 0.5});
SetBuiltinOp(
BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
CreateSVDFOptions(builder_, rank, ActivationFunctionType_RELU).Union());
BuildInterpreter({
{batches, input_size},
{num_filters, input_size},
{num_filters, memory_size},
{units},
{batches, memory_size * num_filters}
});
}
void SetWeightsFeature(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(weights_feature_, f);
}
void SetWeightsTime(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(weights_time_, f);
}
void SetBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(bias_, f);
}
void SetInput(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_, f);
}
std::vector<int8_t> GetOutput() { return ExtractVector<int8_t>(output_); }
protected:
int input_;
int weights_feature_;
int weights_time_;
int bias_;
int activation_state_;
int output_;
int batches_;
int units_;
int input_size_;
int memory_size_;
int rank_;
};
TEST_F(SVDFOpTest, BlackBoxTestInteger) {
IntegerSVDFOpModel svdf(2, 4, 3,
10, 1);
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
svdf.SetBias({-0.0976817, 0.15294972, 0.39635518, -0.02702999});
const std::vector<std::vector<float>> input_sequences = {
{0.49837467, 0.19278903, 0.26584083, 0.17660543, 0.52949083, -0.77931279},
{0.12609188, -0.46347019, -0.89598465, 0.35867718, 0.36897406,
0.73463392},
{0.14278367, -1.64410412, -0.75222826, -0.57290924, 0.12729003,
0.7567004},
{0.49837467, 0.19278903, 0.26584083, 0.17660543, 0.52949083, -0.77931279},
{0.12609188, -0.46347019, -0.89598465, 0.35867718, 0.36897406,
0.73463392},
{0.14278367, -1.64410412, -0.75222826, -0.57290924, 0.12729003,
0.7567004},
{0.49837467, 0.19278903, 0.26584083, 0.17660543, 0.52949083, -0.77931279},
{0.12609188, -0.46347019, -0.89598465, 0.35867718, 0.36897406,
0.73463392},
{0.14278367, -1.64410412, -0.75222826, -0.57290924, 0.12729003,
0.7567004},
{0.49837467, 0.19278903, 0.26584083, 0.17660543, 0.52949083, -0.77931279},
{0.12609188, -0.46347019, -0.89598465, 0.35867718, 0.36897406,
0.73463392},
{0.14278367, -1.64410412, -0.75222826, -0.57290924, 0.12729003,
0.7567004}};
const std::vector<std::vector<int8_t>> expected_output = {
{-9, 24, 31, 1, -10, 10, -3, 0},
{2, 4, -44, -7, -10, 32, 52, 1},
{12, -17, 9, -8, 7, 16, -11, -8},
{-26, 29, 28, 16, -23, 26, 30, -6},
{-8, -25, -86, -5, -44, 59, 81, 15},
{62, -16, -37, 3, 27, 14, 34, -10},
{1, 24, -25, 23, 31, 61, 67, 11},
{-64, -65, -128, -25, -53, 59, 127, 20},
{20, -29, -20, -15, -28, 0, 8, -27},
{54, 61, -67, 38, 38, 64, 115, 0},
{-44, -75, -128, -20, -19, 93, 101, 35},
{-5, -56, 30, -18, -40, -9, -8, -31},
};
for (int sequence_index = 0; sequence_index < 12; ++sequence_index) {
svdf.SetInput(input_sequences[sequence_index]);
ASSERT_EQ(svdf.Invoke(), kTfLiteOk);
const std::vector<int8_t> res = svdf.GetOutput();
EXPECT_THAT(res, ElementsAreArray(expected_output[sequence_index]));
}
}
}
} |
879 | cpp | tensorflow/tensorflow | table | third_party/xla/third_party/tsl/tsl/lib/io/table.cc | third_party/xla/third_party/tsl/tsl/lib/io/table_test.cc | #ifndef TENSORFLOW_TSL_LIB_IO_TABLE_H_
#define TENSORFLOW_TSL_LIB_IO_TABLE_H_
#include <stdint.h>
#include "tsl/lib/io/iterator.h"
namespace tsl {
class RandomAccessFile;
namespace table {
struct Options;
class Table {
public:
static absl::Status Open(const Options& options, tsl::RandomAccessFile* file,
uint64 file_size, Table** table);
~Table();
Iterator* NewIterator() const;
uint64 ApproximateOffsetOf(const StringPiece& key) const;
private:
struct Rep;
Rep* rep_;
explicit Table(Rep* rep) { rep_ = rep; }
static Iterator* BlockReader(void*, const StringPiece&);
absl::Status InternalGet(const StringPiece& key, void* arg,
void (*handle_result)(void* arg,
const StringPiece& k,
const StringPiece& v));
Table(const Table&);
void operator=(const Table&);
};
}
}
#endif
#include "tsl/lib/io/table.h"
#include "tsl/lib/io/block.h"
#include "tsl/lib/io/cache.h"
#include "tsl/lib/io/format.h"
#include "tsl/lib/io/table_options.h"
#include "tsl/lib/io/two_level_iterator.h"
#include "tsl/platform/coding.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
namespace tsl {
namespace table {
struct Table::Rep {
~Rep() { delete index_block; }
Options options;
absl::Status status;
RandomAccessFile* file;
uint64 cache_id;
BlockHandle metaindex_handle;
Block* index_block;
};
absl::Status Table::Open(const Options& options, RandomAccessFile* file,
uint64 size, Table** table) {
*table = nullptr;
if (size < Footer::kEncodedLength) {
return errors::DataLoss("file is too short to be an sstable");
}
char footer_space[Footer::kEncodedLength];
StringPiece footer_input;
absl::Status s =
file->Read(size - Footer::kEncodedLength, Footer::kEncodedLength,
&footer_input, footer_space);
if (!s.ok()) return s;
Footer footer;
s = footer.DecodeFrom(&footer_input);
if (!s.ok()) return s;
BlockContents contents;
Block* index_block = nullptr;
if (s.ok()) {
s = ReadBlock(file, footer.index_handle(), &contents);
}
if (s.ok()) {
index_block = new Block(contents);
Rep* rep = new Table::Rep;
rep->options = options;
rep->file = file;
rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block;
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
*table = new Table(rep);
} else {
if (index_block) delete index_block;
}
return s;
}
Table::~Table() { delete rep_; }
static void DeleteBlock(void* arg, void* ignored) {
delete reinterpret_cast<Block*>(arg);
}
static void DeleteCachedBlock(const absl::string_view&, void* value) {
Block* block = reinterpret_cast<Block*>(value);
delete block;
}
static void ReleaseBlock(void* arg, void* h) {
Cache* cache = reinterpret_cast<Cache*>(arg);
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
cache->Release(handle);
}
Iterator* Table::BlockReader(void* arg, const StringPiece& index_value) {
Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache;
Block* block = nullptr;
Cache::Handle* cache_handle = nullptr;
BlockHandle handle;
StringPiece input = index_value;
absl::Status s = handle.DecodeFrom(&input);
if (s.ok()) {
BlockContents contents;
if (block_cache != nullptr) {
char cache_key_buffer[16];
core::EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
core::EncodeFixed64(cache_key_buffer + 8, handle.offset());
absl::string_view key(cache_key_buffer, sizeof(cache_key_buffer));
cache_handle = block_cache->Lookup(key);
if (cache_handle != nullptr) {
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
} else {
s = ReadBlock(table->rep_->file, handle, &contents);
if (s.ok()) {
block = new Block(contents);
cache_handle = block_cache->Insert(key, block, block->size(),
&DeleteCachedBlock);
}
}
} else {
s = ReadBlock(table->rep_->file, handle, &contents);
if (s.ok()) {
block = new Block(contents);
}
}
}
Iterator* iter;
if (block != nullptr) {
iter = block->NewIterator();
if (cache_handle == nullptr) {
iter->RegisterCleanup(&DeleteBlock, block, nullptr);
} else {
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
}
} else {
iter = NewErrorIterator(s);
}
return iter;
}
Iterator* Table::NewIterator() const {
return NewTwoLevelIterator(rep_->index_block->NewIterator(),
&Table::BlockReader, const_cast<Table*>(this));
}
absl::Status Table::InternalGet(const StringPiece& k, void* arg,
void (*saver)(void*, const StringPiece&,
const StringPiece&)) {
absl::Status s;
Iterator* iiter = rep_->index_block->NewIterator();
iiter->Seek(k);
if (iiter->Valid()) {
Iterator* block_iter = BlockReader(this, iiter->value());
block_iter->Seek(k);
if (block_iter->Valid()) {
(*saver)(arg, block_iter->key(), block_iter->value());
}
s = block_iter->status();
delete block_iter;
}
if (s.ok()) {
s = iiter->status();
}
delete iiter;
return s;
}
uint64 Table::ApproximateOffsetOf(const StringPiece& key) const {
Iterator* index_iter = rep_->index_block->NewIterator();
index_iter->Seek(key);
uint64 result;
if (index_iter->Valid()) {
BlockHandle handle;
StringPiece input = index_iter->value();
absl::Status s = handle.DecodeFrom(&input);
if (s.ok()) {
result = handle.offset();
} else {
result = rep_->metaindex_handle.offset();
}
} else {
result = rep_->metaindex_handle.offset();
}
delete index_iter;
return result;
}
}
} | #include "tsl/lib/io/table.h"
#include <algorithm>
#include <map>
#include <string>
#include <vector>
#include "absl/strings/escaping.h"
#include "tsl/lib/io/block.h"
#include "tsl/lib/io/block_builder.h"
#include "tsl/lib/io/format.h"
#include "tsl/lib/io/iterator.h"
#include "tsl/lib/io/table_builder.h"
#include "tsl/lib/random/simple_philox.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/snappy.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace table {
namespace {
typedef std::pair<StringPiece, StringPiece> StringPiecePair;
}
namespace test {
static StringPiece RandomString(random::SimplePhilox* rnd, int len,
string* dst) {
dst->resize(len);
for (int i = 0; i < len; i++) {
(*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95));
}
return StringPiece(*dst);
}
static string RandomKey(random::SimplePhilox* rnd, int len) {
static const char kTestChars[] = {'\0', '\1', 'a', 'b', 'c',
'd', 'e', '\xfd', '\xfe', '\xff'};
string result;
for (int i = 0; i < len; i++) {
result += kTestChars[rnd->Uniform(sizeof(kTestChars))];
}
return result;
}
static StringPiece CompressibleString(random::SimplePhilox* rnd,
double compressed_fraction, size_t len,
string* dst) {
int raw = static_cast<int>(len * compressed_fraction);
if (raw < 1) raw = 1;
string raw_data;
RandomString(rnd, raw, &raw_data);
dst->clear();
while (dst->size() < len) {
dst->append(raw_data);
}
dst->resize(len);
return StringPiece(*dst);
}
}
static void Increment(string* key) { key->push_back('\0'); }
namespace {
struct STLLessThan {
STLLessThan() {}
bool operator()(const string& a, const string& b) const {
return StringPiece(a).compare(StringPiece(b)) < 0;
}
};
}
class StringSink : public WritableFile {
public:
~StringSink() override {}
const string& contents() const { return contents_; }
absl::Status Close() override { return absl::OkStatus(); }
absl::Status Flush() override { return absl::OkStatus(); }
absl::Status Name(StringPiece* result) const override {
return errors::Unimplemented("StringSink does not support Name()");
}
absl::Status Sync() override { return absl::OkStatus(); }
absl::Status Tell(int64_t* pos) override {
*pos = contents_.size();
return absl::OkStatus();
}
absl::Status Append(StringPiece data) override {
contents_.append(data.data(), data.size());
return absl::OkStatus();
}
private:
string contents_;
};
class StringSource : public RandomAccessFile {
public:
explicit StringSource(const StringPiece& contents)
: contents_(contents.data(), contents.size()), bytes_read_(0) {}
~StringSource() override {}
uint64 Size() const { return contents_.size(); }
absl::Status Name(StringPiece* result) const override {
return errors::Unimplemented("StringSource does not support Name()");
}
absl::Status Read(uint64 offset, size_t n, StringPiece* result,
char* scratch) const override {
if (offset > contents_.size()) {
return errors::InvalidArgument("invalid Read offset");
}
if (offset + n > contents_.size()) {
n = contents_.size() - offset;
}
memcpy(scratch, &contents_[offset], n);
*result = StringPiece(scratch, n);
bytes_read_ += n;
return absl::OkStatus();
}
uint64 BytesRead() const { return bytes_read_; }
private:
string contents_;
mutable uint64 bytes_read_;
};
typedef std::map<string, string, STLLessThan> KVMap;
class Constructor {
public:
explicit Constructor() : data_(STLLessThan()) {}
virtual ~Constructor() {}
void Add(const string& key, const StringPiece& value) {
data_[key] = string(value);
}
void Finish(const Options& options, std::vector<string>* keys, KVMap* kvmap) {
*kvmap = data_;
keys->clear();
for (KVMap::const_iterator it = data_.begin(); it != data_.end(); ++it) {
keys->push_back(it->first);
}
data_.clear();
absl::Status s = FinishImpl(options, *kvmap);
ASSERT_TRUE(s.ok()) << s.ToString();
}
virtual absl::Status FinishImpl(const Options& options,
const KVMap& data) = 0;
virtual Iterator* NewIterator() const = 0;
virtual const KVMap& data() { return data_; }
private:
KVMap data_;
};
class BlockConstructor : public Constructor {
public:
BlockConstructor() : block_(nullptr) {}
~BlockConstructor() override { delete block_; }
absl::Status FinishImpl(const Options& options, const KVMap& data) override {
delete block_;
block_ = nullptr;
BlockBuilder builder(&options);
for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
builder.Add(it->first, it->second);
}
data_ = string(builder.Finish());
BlockContents contents;
contents.data = data_;
contents.cacheable = false;
contents.heap_allocated = false;
block_ = new Block(contents);
return absl::OkStatus();
}
Iterator* NewIterator() const override { return block_->NewIterator(); }
private:
string data_;
Block* block_;
};
class TableConstructor : public Constructor {
public:
TableConstructor() : source_(nullptr), table_(nullptr) {}
~TableConstructor() override { Reset(); }
absl::Status FinishImpl(const Options& options, const KVMap& data) override {
Reset();
StringSink sink;
TableBuilder builder(options, &sink);
for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
builder.Add(it->first, it->second);
TF_CHECK_OK(builder.status());
}
absl::Status s = builder.Finish();
TF_CHECK_OK(s) << s.ToString();
CHECK_EQ(sink.contents().size(), builder.FileSize());
source_ = new StringSource(sink.contents());
Options table_options;
return Table::Open(table_options, source_, sink.contents().size(), &table_);
}
Iterator* NewIterator() const override { return table_->NewIterator(); }
uint64 ApproximateOffsetOf(const StringPiece& key) const {
return table_->ApproximateOffsetOf(key);
}
uint64 BytesRead() const { return source_->BytesRead(); }
private:
void Reset() {
delete table_;
delete source_;
table_ = nullptr;
source_ = nullptr;
}
StringSource* source_;
Table* table_;
};
enum TestType { TABLE_TEST, BLOCK_TEST };
struct TestArgs {
TestType type;
int restart_interval;
};
static const TestArgs kTestArgList[] = {
{TABLE_TEST, 16}, {TABLE_TEST, 1}, {TABLE_TEST, 1024},
{BLOCK_TEST, 16}, {BLOCK_TEST, 1}, {BLOCK_TEST, 1024},
};
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
class Harness : public ::testing::Test {
public:
Harness() : constructor_(nullptr) {}
void Init(const TestArgs& args) {
delete constructor_;
constructor_ = nullptr;
options_ = Options();
options_.block_restart_interval = args.restart_interval;
options_.block_size = 256;
switch (args.type) {
case TABLE_TEST:
constructor_ = new TableConstructor();
break;
case BLOCK_TEST:
constructor_ = new BlockConstructor();
break;
}
}
~Harness() override { delete constructor_; }
void Add(const string& key, const string& value) {
constructor_->Add(key, value);
}
void Test(random::SimplePhilox* rnd, int num_random_access_iters = 200) {
std::vector<string> keys;
KVMap data;
constructor_->Finish(options_, &keys, &data);
TestForwardScan(keys, data);
TestRandomAccess(rnd, keys, data, num_random_access_iters);
}
void TestForwardScan(const std::vector<string>& keys, const KVMap& data) {
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
iter->SeekToFirst();
for (KVMap::const_iterator model_iter = data.begin();
model_iter != data.end(); ++model_iter) {
ASSERT_EQ(ToStringPiecePair(data, model_iter), ToStringPiecePair(iter));
iter->Next();
}
ASSERT_TRUE(!iter->Valid());
delete iter;
}
void TestRandomAccess(random::SimplePhilox* rnd,
const std::vector<string>& keys, const KVMap& data,
int num_random_access_iters) {
static const bool kVerbose = false;
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
KVMap::const_iterator model_iter = data.begin();
if (kVerbose) fprintf(stderr, "---\n");
for (int i = 0; i < num_random_access_iters; i++) {
const int toss = rnd->Uniform(3);
switch (toss) {
case 0: {
if (iter->Valid()) {
if (kVerbose) fprintf(stderr, "Next\n");
iter->Next();
++model_iter;
ASSERT_EQ(ToStringPiecePair(data, model_iter),
ToStringPiecePair(iter));
}
break;
}
case 1: {
if (kVerbose) fprintf(stderr, "SeekToFirst\n");
iter->SeekToFirst();
model_iter = data.begin();
ASSERT_EQ(ToStringPiecePair(data, model_iter),
ToStringPiecePair(iter));
break;
}
case 2: {
string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key);
if (kVerbose)
fprintf(stderr, "Seek '%s'\n", absl::CEscape(key).c_str());
iter->Seek(StringPiece(key));
ASSERT_EQ(ToStringPiecePair(data, model_iter),
ToStringPiecePair(iter));
break;
}
}
}
delete iter;
}
StringPiecePair ToStringPiecePair(const KVMap& data,
const KVMap::const_iterator& it) {
if (it == data.end()) {
return StringPiecePair("END", "");
} else {
return StringPiecePair(it->first, it->second);
}
}
StringPiecePair ToStringPiecePair(const KVMap& data,
const KVMap::const_reverse_iterator& it) {
if (it == data.rend()) {
return StringPiecePair("END", "");
} else {
return StringPiecePair(it->first, it->second);
}
}
StringPiecePair ToStringPiecePair(const Iterator* it) {
if (!it->Valid()) {
return StringPiecePair("END", "");
} else {
return StringPiecePair(it->key(), it->value());
}
}
string PickRandomKey(random::SimplePhilox* rnd,
const std::vector<string>& keys) {
if (keys.empty()) {
return "foo";
} else {
const int index = rnd->Uniform(keys.size());
string result = keys[index];
switch (rnd->Uniform(3)) {
case 0:
break;
case 1: {
if (!result.empty() && result[result.size() - 1] > '\0') {
result[result.size() - 1]--;
}
break;
}
case 2: {
Increment(&result);
break;
}
}
return result;
}
}
private:
Options options_;
Constructor* constructor_;
};
TEST_F(Harness, Empty) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 1, 17);
random::SimplePhilox rnd(&philox);
Test(&rnd);
}
}
TEST_F(Harness, ZeroRestartPointsInBlock) {
char data[sizeof(uint32)];
memset(data, 0, sizeof(data));
BlockContents contents;
contents.data = StringPiece(data, sizeof(data));
contents.cacheable = false;
contents.heap_allocated = false;
Block block(contents);
Iterator* iter = block.NewIterator();
iter->SeekToFirst();
ASSERT_TRUE(!iter->Valid());
iter->Seek("foo");
ASSERT_TRUE(!iter->Valid());
delete iter;
}
TEST_F(Harness, SimpleEmptyKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 1, 17);
random::SimplePhilox rnd(&philox);
Add("", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleSingle) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 2, 17);
random::SimplePhilox rnd(&philox);
Add("abc", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleMulti) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 3, 17);
random::SimplePhilox rnd(&philox);
Add("abc", "v");
Add("abcd", "v");
Add("ac", "v2");
Test(&rnd);
}
}
TEST_F(Harness, SimpleMultiBigValues) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 3, 17);
random::SimplePhilox rnd(&philox);
Add("ainitial", "tiny");
Add("anext", string(10000000, 'a'));
Add("anext2", string(10000000, 'b'));
Add("azz", "tiny");
Test(&rnd, 100 );
}
}
TEST_F(Harness, SimpleSpecialKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 4, 17);
random::SimplePhilox rnd(&philox);
Add("\xff\xff", "v3");
Test(&rnd);
}
}
TEST_F(Harness, Randomized) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 5, 17);
random::SimplePhilox rnd(&philox);
for (int num_entries = 0; num_entries < 2000;
num_entries += (num_entries < 50 ? 1 : 200)) {
if ((num_entries % 10) == 0) {
fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
int(kNumTestArgs), num_entries);
}
for (int e = 0; e < num_entries; e++) {
string v;
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
string(test::RandomString(&rnd, rnd.Skewed(5), &v)));
}
Test(&rnd);
}
}
}
static bool Between(uint64 val, uint64 low, uint64 high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
static_cast<unsigned long long>(val),
static_cast<unsigned long long>(low),
static_cast<unsigned long long>(high));
}
return result;
}
class TableTest {};
TEST(TableTest, ApproximateOffsetOfPlain) {
TableConstructor c;
c.Add("k01", "hello");
c.Add("k02", "hello2");
c.Add("k03", string(10000, 'x'));
c.Add("k04", string(200000, 'x'));
c.Add("k05", string(300000, 'x'));
c.Add("k06", "hello3");
c.Add("k07", string(100000, 'x'));
std::vector<string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = kNoCompression;
c.Finish(options, &keys, &kvmap);
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 10, 500));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
}
static bool SnappyCompressionSupported() {
string out;
StringPiece in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
return port::Snappy_Compress(in.data(), in.size(), &out);
}
TEST(TableTest, ApproximateOffsetOfCompressed) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
TableConstructor c;
string tmp;
c.Add("k01", "hello");
c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
c.Add("k03", "hello3");
c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
std::vector<string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = kSnappyCompression;
c.Finish(options, &keys, &kvmap);
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 10, 100));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 4000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 4000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 7000));
}
TEST(TableTest, SeekToFirstKeyDoesNotReadTooMuch) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
string tmp;
TableConstructor c;
c.Add("k01", "firstvalue");
c.Add("k03", test::CompressibleString(&rnd, 0.25, 1000000, &tmp));
c.Add("k04", "abc");
std::vector<string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = kNoCompression;
c.Finish(options, &keys, &kvmap);
Iterator* iter = c.NewIterator();
iter->Seek("k01");
delete iter;
EXPECT_LT(c.BytesRead(), 200);
}
}
} |
880 | cpp | tensorflow/tensorflow | sub | tensorflow/lite/kernels/sub.cc | tensorflow/lite/kernels/sub_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SUB_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SUB_H_
#include <stdint.h>
#include <algorithm>
#include <cstddef>
#include <limits>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <class T>
struct SubImpl {
template <class F>
static void BroadcastInput1(const ArithmeticParams& params,
const T* input1_data, const T* input2_data,
T* output_data, size_t size, F binary_func) {
for (size_t c = 0; c < size; ++c) {
output_data[c] = binary_func(input1_data[0], input2_data[c], params);
}
}
template <class F>
static void BroadcastInput2(const ArithmeticParams& params,
const T* input1_data, const T* input2_data,
T* output_data, size_t size, F binary_func) {
for (size_t c = 0; c < size; ++c) {
output_data[c] = binary_func(input1_data[c], input2_data[0], params);
}
}
template <class F>
static void ElementWise(const ArithmeticParams& params, const T* input1_data,
const T* input2_data, T* output_data, size_t size,
F binary_func) {
for (size_t c = 0; c < size; ++c) {
output_data[c] = binary_func(input1_data[c], input2_data[c], params);
}
}
};
template <>
struct SubImpl<int32_t> {
template <class F>
static void BroadcastInput1(const ArithmeticParams& params,
const int32_t* input1_data,
const int32_t* input2_data, int32_t* output_data,
size_t size, F binary_func) {
size_t c = 0;
int32_t activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
#ifdef USE_NEON
const int32x4_t vmax = vdupq_n_s32(activation_max);
const int32x4_t vmin = vdupq_n_s32(activation_min);
const int32x4_t va = vdupq_n_s32(input1_data[0]);
for (; c + 4 <= size; c += 4) {
const int32x4_t vb = vld1q_s32(&input2_data[c]);
int32x4_t vres = vsubq_s32(va, vb);
vres = vmaxq_s32(vmin, vres);
vres = vminq_s32(vmax, vres);
vst1q_s32(&output_data[c], vres);
}
#endif
for (; c < size; ++c) {
output_data[c] = binary_func(input1_data[0], input2_data[c], params);
}
}
template <class F>
static void BroadcastInput2(const ArithmeticParams& params,
const int32_t* input1_data,
const int32_t* input2_data, int32_t* output_data,
size_t size, F binary_func) {
size_t c = 0;
int32_t activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
#ifdef USE_NEON
const int32x4_t vmax = vdupq_n_s32(activation_max);
const int32x4_t vmin = vdupq_n_s32(activation_min);
const int32x4_t vb = vdupq_n_s32(input2_data[0]);
for (; c + 4 <= size; c += 4) {
const int32x4_t va = vld1q_s32(&input1_data[c]);
int32x4_t vres = vsubq_s32(va, vb);
vres = vmaxq_s32(vmin, vres);
vres = vminq_s32(vmax, vres);
vst1q_s32(&output_data[c], vres);
}
#endif
for (; c < size; ++c) {
output_data[c] = binary_func(input1_data[c], input2_data[0], params);
}
}
template <class F>
static void ElementWise(const ArithmeticParams& params,
const int32_t* input1_data,
const int32_t* input2_data, int32_t* output_data,
size_t size, F binary_func) {
size_t c = 0;
int32_t activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
#ifdef USE_NEON
int32x4_t vmax = vdupq_n_s32(activation_max);
int32x4_t vmin = vdupq_n_s32(activation_min);
for (; c + 4 <= size; c += 4) {
const int32x4_t va = vld1q_s32(&input1_data[c]);
const int32x4_t vb = vld1q_s32(&input2_data[c]);
int32x4_t vres = vsubq_s32(va, vb);
vres = vmaxq_s32(vmin, vres);
vres = vminq_s32(vmax, vres);
vst1q_s32(&output_data[c], vres);
}
#endif
for (; c < size; ++c) {
output_data[c] = binary_func(input1_data[c], input2_data[c], params);
}
}
};
template <typename T, typename F>
inline void BroadcastSubRecursiveDimensions(
int dimension, const ArithmeticParams& params, const T* input1_data,
const T* input2_data, T* output_data, size_t* input1_offset_p,
size_t* input2_offset_p, size_t* output_offset,
size_t* compressed_input1_stride, size_t* compressed_input2_stride,
size_t* compressed_output_shape, F binary_func) {
if (dimension > 0) {
for (size_t c = 0; c < compressed_output_shape[dimension]; ++c) {
size_t input1_offset_c = *input1_offset_p;
size_t input2_offset_c = *input2_offset_p;
BroadcastSubRecursiveDimensions(
dimension - 1, params, input1_data, input2_data, output_data,
&input1_offset_c, &input2_offset_c, output_offset,
compressed_input1_stride, compressed_input2_stride,
compressed_output_shape, binary_func);
*input1_offset_p += compressed_input1_stride[dimension];
*input2_offset_p += compressed_input2_stride[dimension];
}
} else {
TFLITE_DCHECK(dimension == 0);
bool input1_is_broadcast = compressed_input1_stride[dimension] == 0;
bool input2_is_broadcast = compressed_input2_stride[dimension] == 0;
TFLITE_DCHECK(!(input1_is_broadcast && input2_is_broadcast));
const T* input1_data_ptr = input1_data + *input1_offset_p;
const T* input2_data_ptr = input2_data + *input2_offset_p;
T* output_data_ptr = output_data + *output_offset;
if (input1_is_broadcast) {
SubImpl<T>::BroadcastInput1(
params, input1_data_ptr, input2_data_ptr, output_data_ptr,
compressed_output_shape[dimension], binary_func);
*input2_offset_p += compressed_output_shape[dimension];
} else if (input2_is_broadcast) {
SubImpl<T>::BroadcastInput2(
params, input1_data_ptr, input2_data_ptr, output_data_ptr,
compressed_output_shape[dimension], binary_func);
*input1_offset_p += compressed_output_shape[dimension];
} else {
SubImpl<T>::ElementWise(params, input1_data_ptr, input2_data_ptr,
output_data_ptr,
compressed_output_shape[dimension], binary_func);
*input1_offset_p += compressed_output_shape[dimension];
*input2_offset_p += compressed_output_shape[dimension];
}
*output_offset += compressed_output_shape[dimension];
}
}
template <typename T, typename F>
inline void BroadcastSubCommon(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const T* input1_data,
const RuntimeShape& input2_shape,
const T* input2_data,
const RuntimeShape& output_shape, T* output_data,
F binary_func) {
constexpr int kMaxBroadcastDim = 6;
TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), kMaxBroadcastDim);
TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), kMaxBroadcastDim);
TFLITE_DCHECK_LE(output_shape.DimensionsCount(), kMaxBroadcastDim);
size_t compressed_input1_stride[kMaxBroadcastDim];
size_t compressed_input2_stride[kMaxBroadcastDim];
size_t compressed_output_shape[kMaxBroadcastDim];
bool broadcastable_shape = ReduceDimensionsForBroadcast<kMaxBroadcastDim>(
input1_shape, input2_shape, compressed_input1_stride,
compressed_input2_stride, compressed_output_shape);
if (!broadcastable_shape) {
return;
}
size_t input1_offset = 0;
size_t input2_offset = 0;
size_t output_offset = 0;
BroadcastSubRecursiveDimensions(
kMaxBroadcastDim - 1, params, input1_data, input2_data, output_data,
&input1_offset, &input2_offset, &output_offset, compressed_input1_stride,
compressed_input2_stride, compressed_output_shape, binary_func);
}
template <typename T>
void BroadcastSubSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("BroadcastSubSlow/T");
BroadcastSubCommon<T>(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data,
[](T input1_val, T input2_val, const ArithmeticParams& params) {
T activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
return ActivationFunctionWithMinMax(input1_val - input2_val,
activation_min, activation_max);
});
}
inline void BroadcastSub16POTSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int16_t* input1_data,
const RuntimeShape& input2_shape,
const int16_t* input2_data,
const RuntimeShape& output_shape,
int16_t* output_data) {
ruy::profiler::ScopeLabel label("BroadcastSub16POTSlow/int16_t");
BroadcastSubCommon<int16_t>(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data,
[](int16_t input1_val, int16_t input2_val,
const ArithmeticParams& params) {
const int32_t scaled_input1_val =
gemmlowp::RoundingDivideByPOT(input1_val, -params.input1_shift);
const int32_t scaled_input2_val =
gemmlowp::RoundingDivideByPOT(input2_val, -params.input2_shift);
const int32_t raw_output = scaled_input1_val - scaled_input2_val;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
return static_cast<int16_t>(clamped_output);
});
}
template <typename T>
void BroadcastQuantSubSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const T* input1_data,
const RuntimeShape& input2_shape,
const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("BroadcastQuantSubSlow/T");
BroadcastSubCommon<T>(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data,
[](T input1_val, T input2_val, const ArithmeticParams& params) {
const int32_t shifted_input1_val =
(params.input1_offset + input1_val) * (1 << params.left_shift);
const int32_t shifted_input2_val =
(params.input2_offset + input2_val) * (1 << params.left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier,
params.input1_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier,
params.input2_shift);
const int32_t raw_sub = scaled_input1_val - scaled_input2_val;
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sub, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
return static_cast<T>(clamped_output);
});
}
template <typename T>
inline void SubElementwise(int size, const ArithmeticParams& params,
const T* input1_data, const T* input2_data,
T* output_data) {
for (int i = 0; i < size; ++i) {
const int32_t input1_val = params.input1_offset + input1_data[i];
const int32_t input2_val = params.input2_offset + input2_data[i];
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier, params.input1_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier, params.input2_shift);
const int32_t raw_sub = scaled_input1_val - scaled_input2_val;
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sub, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<T>(clamped_output);
}
}
inline void Sub(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const uint8_t* input1_data,
const RuntimeShape& input2_shape, const uint8_t* input2_data,
const RuntimeShape& output_shape, uint8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
SubElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void Sub(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int8_t* input1_data,
const RuntimeShape& input2_shape, const int8_t* input2_data,
const RuntimeShape& output_shape, int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
TFLITE_DCHECK_GE(params.input1_offset, -128);
TFLITE_DCHECK_GE(params.input2_offset, -128);
TFLITE_DCHECK_LE(params.input1_offset, 128);
TFLITE_DCHECK_LE(params.input2_offset, 128);
SubElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void Sub(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int16_t* input1_data,
const RuntimeShape& input2_shape, const int16_t* input2_data,
const RuntimeShape& output_shape, int16_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
TFLITE_DCHECK_EQ(params.input1_offset, 0);
TFLITE_DCHECK_EQ(params.input2_offset, 0);
SubElementwise(flat_size, params, input1_data, input2_data, output_data);
}
template <typename T>
void Sub(const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape,
T* output_data) {
BroadcastSubCommon<T>(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data,
[](T input1_val, T input2_val, const ArithmeticParams& params) {
return input1_val - input2_val;
});
}
inline void SetActivationMinMax(const ArithmeticParams& params,
int32_t* activation_min,
int32_t* activation_max) {
*activation_min = params.quantized_activation_min;
*activation_max = params.quantized_activation_max;
}
inline void SetActivationMinMax(const ArithmeticParams& params,
float* activation_min, float* activation_max) {
*activation_min = params.float_activation_min;
*activation_max = params.float_activation_max;
}
inline void SetActivationMinMax(const ArithmeticParams& params,
int64_t* activation_min,
int64_t* activation_max) {
*activation_min = params.int64_activation_min;
*activation_max = params.int64_activation_max;
}
template <typename T>
inline void SubWithActivation(
const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("SubWithActivation");
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
T activation_min, activation_max;
SetActivationMinMax(params, &activation_min, &activation_max);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
input1_data[i] - input2_data[i], activation_min, activation_max);
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/sub.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/sub.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/add.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h"
#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace sub {
enum KernelType {
kReference,
kGenericOptimized,
kNeonOptimized,
};
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
int input1_shift;
int input2_shift;
int32 output_activation_min;
int32 output_activation_max;
int32 input1_multiplier;
int32 input2_multiplier;
int32 output_multiplier;
int output_shift;
int left_shift;
int32 input1_offset;
int32 input2_offset;
int32 output_offset;
bool pot_scale_int16;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus PrepareGeneralSubOp(TfLiteContext* context,
const TfLiteTensor* input_1,
const TfLiteTensor* input_2,
TfLiteTensor* output, TfLiteSubParams* params,
OpData* op_params) {
TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 ||
output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16);
const auto& input1_quantization_params = input_1->params;
const auto& input2_quantization_params = input_2->params;
const auto& output_quantization_params = output->params;
int32_t integer_type_min = 0;
int32_t integer_type_max = 0;
if (output->type == kTfLiteUInt8) {
integer_type_min = std::numeric_limits<uint8_t>::min();
integer_type_max = std::numeric_limits<uint8_t>::max();
} else if (output->type == kTfLiteInt16) {
integer_type_min = std::numeric_limits<int16_t>::min();
integer_type_max = std::numeric_limits<int16_t>::max();
} else {
integer_type_min = std::numeric_limits<int8_t>::min();
integer_type_max = std::numeric_limits<int8_t>::max();
}
TF_LITE_ENSURE(context,
input1_quantization_params.zero_point >= integer_type_min);
TF_LITE_ENSURE(context,
input1_quantization_params.zero_point <= integer_type_max);
TF_LITE_ENSURE(context,
input2_quantization_params.zero_point >= integer_type_min);
TF_LITE_ENSURE(context,
input2_quantization_params.zero_point <= integer_type_max);
TF_LITE_ENSURE(context,
output_quantization_params.zero_point >= integer_type_min);
TF_LITE_ENSURE(context,
output_quantization_params.zero_point <= integer_type_max);
op_params->input1_offset = -input1_quantization_params.zero_point;
op_params->input2_offset = -input2_quantization_params.zero_point;
op_params->output_offset = output_quantization_params.zero_point;
op_params->left_shift = output->type == kTfLiteInt16 ? 15 : 20;
const double twice_max_input_scale =
2 * std::max(input1_quantization_params.scale,
input2_quantization_params.scale);
const double real_input1_multiplier =
input1_quantization_params.scale / twice_max_input_scale;
const double real_input2_multiplier =
input2_quantization_params.scale / twice_max_input_scale;
const double real_output_multiplier =
twice_max_input_scale /
((1 << op_params->left_shift) * output_quantization_params.scale);
tflite::QuantizeMultiplierSmallerThanOneExp(real_input1_multiplier,
&op_params->input1_multiplier,
&op_params->input1_shift);
tflite::QuantizeMultiplierSmallerThanOneExp(real_input2_multiplier,
&op_params->input2_multiplier,
&op_params->input2_shift);
if (real_output_multiplier > 1) {
tflite::QuantizeMultiplierGreaterThanOne(real_output_multiplier,
&op_params->output_multiplier,
&op_params->output_shift);
} else {
tflite::QuantizeMultiplierSmallerThanOneExp(real_output_multiplier,
&op_params->output_multiplier,
&op_params->output_shift);
}
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &op_params->output_activation_min,
&op_params->output_activation_max));
return kTfLiteOk;
}
TfLiteStatus PrepareInt16SubOpPOT(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
TfLiteTensor* output, TfLiteSubParams* params,
OpData* data) {
TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
int input1_scale_log2_rounded;
bool input1_scale_is_pot =
CheckedLog2(input1->params.scale, &input1_scale_log2_rounded);
TF_LITE_ENSURE(context, input1_scale_is_pot);
int input2_scale_log2_rounded;
bool input2_scale_is_pot =
CheckedLog2(input2->params.scale, &input2_scale_log2_rounded);
TF_LITE_ENSURE(context, input2_scale_is_pot);
int output_scale_log2_rounded;
bool output_scale_is_pot =
CheckedLog2(output->params.scale, &output_scale_log2_rounded);
TF_LITE_ENSURE(context, output_scale_is_pot);
data->input1_shift = input1_scale_log2_rounded - output_scale_log2_rounded;
data->input2_shift = input2_scale_log2_rounded - output_scale_log2_rounded;
TF_LITE_ENSURE(context, data->input1_shift == 0 || data->input2_shift == 0);
TF_LITE_ENSURE(context, data->input1_shift <= 0);
TF_LITE_ENSURE(context, data->input2_shift <= 0);
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input2->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
bool general_scale_int16 = false;
bool input1_scale_is_pot = false;
bool input2_scale_is_pot = false;
bool output_scale_is_pot = false;
int input1_scale_log2_rounded{0};
int input2_scale_log2_rounded{0};
int output_scale_log2_rounded{0};
if (input1->type == kTfLiteInt16 && input2->type == kTfLiteInt16 &&
output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
general_scale_int16 = !params || !params->pot_scale_int16;
if (!general_scale_int16) {
input1_scale_is_pot =
CheckedLog2(input1->params.scale, &input1_scale_log2_rounded);
input2_scale_is_pot =
CheckedLog2(input2->params.scale, &input2_scale_log2_rounded);
output_scale_is_pot =
CheckedLog2(output->params.scale, &output_scale_log2_rounded);
general_scale_int16 =
!input1_scale_ | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Sub, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, ReluActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluActivation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, Relu6Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Relu6Activation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, ReluMinus1To1Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluMinus1To1Activation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, DISABLED_TanhActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.TanhActivation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, DISABLED_SignBitActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.SignBitActivation()
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
TEST(Sub, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SUB, xnnpack_delegate.get());
}
}
} |
881 | cpp | tensorflow/tensorflow | space_to_depth | tensorflow/lite/kernels/space_to_depth.cc | tensorflow/lite/kernels/space_to_depth_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_SPACE_TO_DEPTH_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_SPACE_TO_DEPTH_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewSpaceToDepthNodeShader();
std::unique_ptr<NodeShader> NewDepthToSpaceNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class SpaceToDepth : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr =
std::any_cast<const SpaceToDepthAttributes&>(ctx.op_attr);
std::string code = R"(
for (int i = 0; i < 4; ++i) {
int dst_c = 4 * gid.z + i;
int block_id = dst_c / $input_data_0_c$;
int src_x = gid.x * $block_size$ + block_id % $block_size$;
int src_y = gid.y * $block_size$ + block_id / $block_size$;
int src_c = dst_c % $input_data_0_c$;
value_0[i] = $input_data_0[src_x, src_y, src_c / 4]$[src_c % 4];
}
)";
*generated_code = {
{
{"block_size", attr.block_size},
{"input_data_0_c", static_cast<int>(ctx.input_shapes[0][3])},
},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class DepthToSpace : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr =
std::any_cast<const SpaceToDepthAttributes&>(ctx.op_attr);
std::string code = R"(
for (int i = 0; i < 4; ++i) {
int dst_c = 4 * gid.z + i;
int block_x = gid.x % $block_size$;
int src_x = gid.x / $block_size$;
int block_y = gid.y % $block_size$;
int src_y = gid.y / $block_size$;
int block_id = block_y * $block_size$ + block_x;
int src_c = block_id * $output_channels$ + dst_c;
value_0[i] = $input_data_0[src_x, src_y, src_c / 4]$[src_c % 4];
}
)";
*generated_code = {
{
{"block_size", attr.block_size},
{"output_channels", static_cast<int>(ctx.output_shapes[0][3])},
},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewSpaceToDepthNodeShader() {
return std::make_unique<SpaceToDepth>();
}
std::unique_ptr<NodeShader> NewDepthToSpaceNodeShader() {
return std::make_unique<DepthToSpace>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(SpaceToDepthTest, TensorShape1x2x2x1BlockSize2) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 2, 1), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 1, 1, 4), .ref = 1};
const SpaceToDepthAttributes attr = {.block_size = 2};
SingleOpModel model({ToString(OperationType::SPACE_TO_DEPTH), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f, 4.0f}));
ASSERT_OK(model.Invoke(*NewSpaceToDepthNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0f, 2.0f, 3.0f, 4.0f}));
}
TEST(SpaceToDepthTest, TensorShape1x2x2x2BlockSize2) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 2, 2), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 1, 1, 8), .ref = 1};
const SpaceToDepthAttributes attr = {.block_size = 2};
SingleOpModel model({ToString(OperationType::SPACE_TO_DEPTH), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(
0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}));
ASSERT_OK(model.Invoke(*NewSpaceToDepthNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}));
}
TEST(SpaceToDepthTest, TensorShape1x2x2x3BlockSize2) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 2, 3), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 1, 1, 12), .ref = 1};
const SpaceToDepthAttributes attr = {.block_size = 2};
SingleOpModel model({ToString(OperationType::SPACE_TO_DEPTH), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f,
4.0f, 5.0f, 6.0f,
7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f}));
ASSERT_OK(model.Invoke(*NewSpaceToDepthNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}));
}
TEST(SpaceToDepthTest, TensorShape1x4x4x1BlockSize2) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 4, 4, 1), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 2, 4), .ref = 1};
const SpaceToDepthAttributes attr = {.block_size = 2};
SingleOpModel model({ToString(OperationType::SPACE_TO_DEPTH), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 5.0, 6.0,
3.0, 4.0, 7.0, 8.0,
9.0, 10.0, 13.0, 14.0,
11.0, 12.0, 15.0, 16.0}));
ASSERT_OK(model.Invoke(*NewSpaceToDepthNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0}));
}
}
}
}
} |
882 | cpp | tensorflow/tensorflow | resize_bilinear | tensorflow/lite/kernels/resize_bilinear.cc | tensorflow/lite/kernels/internal/resize_bilinear_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void ComputeInterpolationValues(const float value, const float scale,
const bool half_pixel_centers,
int32_t input_size, float* scaled_value,
int32_t* lower_bound,
int32_t* upper_bound) {
if (half_pixel_centers) {
*scaled_value = (value + 0.5f) * scale - 0.5f;
} else {
*scaled_value = value * scale;
}
float scaled_value_floor = std::floor(*scaled_value);
*lower_bound = std::max(static_cast<int32_t>(scaled_value_floor),
static_cast<int32_t>(0));
*upper_bound =
std::min(static_cast<int32_t>(std::ceil(*scaled_value)), input_size - 1);
}
template <typename T>
inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params,
const RuntimeShape& unextended_input_shape,
const T* input_data,
const RuntimeShape& unextended_output_size_shape,
const int32_t* output_size_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners);
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_size_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_size_shape =
RuntimeShape::ExtendedShape(4, unextended_output_size_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
int32_t batches = MatchingDim(input_shape, 0, output_shape, 0);
int32_t input_height = input_shape.Dims(1);
int32_t input_width = input_shape.Dims(2);
int32_t depth = MatchingDim(input_shape, 3, output_shape, 3);
TFLITE_DCHECK_EQ(output_size_shape.Dims(0), 1);
TFLITE_DCHECK_EQ(output_size_shape.Dims(1), 1);
TFLITE_DCHECK_EQ(output_size_shape.Dims(2), 1);
TFLITE_DCHECK_EQ(output_size_shape.Dims(3), 2);
int32_t output_height =
output_size_data[Offset(output_size_shape, 0, 0, 0, 0)];
int32_t output_width =
output_size_data[Offset(output_size_shape, 0, 0, 0, 1)];
float height_scale = static_cast<float>(input_height) / output_height;
float width_scale = static_cast<float>(input_width) / output_width;
if (op_params.align_corners && output_height > 1) {
height_scale = static_cast<float>(input_height - 1) / (output_height - 1);
}
if (op_params.align_corners && output_width > 1) {
width_scale = static_cast<float>(input_width - 1) / (output_width - 1);
}
const float rounding_offset = std::numeric_limits<T>::is_integer ? .5f : .0f;
for (int b = 0; b < batches; ++b) {
for (int y = 0; y < output_height; ++y) {
float input_y;
int32_t y0, y1;
ComputeInterpolationValues(y, height_scale, op_params.half_pixel_centers,
input_height, &input_y, &y0, &y1);
for (int x = 0; x < output_width; ++x) {
float input_x;
int32_t x0, x1;
ComputeInterpolationValues(x, width_scale, op_params.half_pixel_centers,
input_width, &input_x, &x0, &x1);
for (int c = 0; c < depth; ++c) {
T interpolation =
static_cast<T>(input_data[Offset(input_shape, b, y0, x0, c)] *
(1 - (input_y - y0)) * (1 - (input_x - x0)) +
input_data[Offset(input_shape, b, y1, x0, c)] *
(input_y - y0) * (1 - (input_x - x0)) +
input_data[Offset(input_shape, b, y0, x1, c)] *
(1 - (input_y - y0)) * (input_x - x0) +
input_data[Offset(input_shape, b, y1, x1, c)] *
(input_y - y0) * (input_x - x0) +
rounding_offset);
output_data[Offset(output_shape, b, y, x, c)] = interpolation;
}
}
}
}
}
inline void ComputeInterpolationValuesInteger(
const int32_t value, const int32_t scale_10, const bool half_pixel_centers,
int32_t input_size, int32_t* scaled_value, int32_t* lower_bound,
int32_t* upper_bound) {
if (half_pixel_centers) {
*scaled_value = value * scale_10 + scale_10 / 2 - (1 << 9);
} else {
*scaled_value = value * scale_10;
}
constexpr int32_t zero = 0;
*lower_bound = std::max(*scaled_value / (1 << 10), zero);
*upper_bound =
std::min((*scaled_value + (1 << 10) - 1) / (1 << 10), input_size - 1);
}
template <typename T>
inline void ResizeBilinearInteger(
const tflite::ResizeBilinearParams& op_params,
const RuntimeShape& unextended_input_shape, const T* input_data,
const RuntimeShape& unextended_output_size_shape,
const int32_t* output_size_data,
const RuntimeShape& unextended_output_shape, T* output_data) {
TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners);
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_size_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_size_shape =
RuntimeShape::ExtendedShape(4, unextended_output_size_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int32_t batches = MatchingDim(input_shape, 0, output_shape, 0);
const int32_t input_height = input_shape.Dims(1);
const int32_t input_width = input_shape.Dims(2);
const int32_t depth = MatchingDim(input_shape, 3, output_shape, 3);
TFLITE_DCHECK_EQ(output_size_shape.Dims(0), 1);
TFLITE_DCHECK_EQ(output_size_shape.Dims(1), 1);
TFLITE_DCHECK_EQ(output_size_shape.Dims(2), 1);
TFLITE_DCHECK_EQ(output_size_shape.Dims(3), 2);
const int32_t output_height =
output_size_data[Offset(output_size_shape, 0, 0, 0, 0)];
const int32_t output_width =
output_size_data[Offset(output_size_shape, 0, 0, 0, 1)];
int32_t height_scale_10 =
((1 << 10) * input_height + output_height / 2) / output_height;
int32_t width_scale_10 =
((1 << 10) * input_width + output_width / 2) / output_width;
if (op_params.align_corners && output_height > 1) {
height_scale_10 =
((1 << 10) * (input_height - 1) + (output_height - 1) / 2) /
(output_height - 1);
}
if (op_params.align_corners && output_width > 1) {
width_scale_10 = ((1 << 10) * (input_width - 1) + (output_width - 1) / 2) /
(output_width - 1);
}
for (int b = 0; b < batches; ++b) {
for (int y = 0; y < output_height; ++y) {
int32_t input_y, y0, y1;
ComputeInterpolationValuesInteger(y, height_scale_10,
op_params.half_pixel_centers,
input_height, &input_y, &y0, &y1);
for (int x = 0; x < output_width; ++x) {
int32_t input_x, x0, x1;
ComputeInterpolationValuesInteger(x, width_scale_10,
op_params.half_pixel_centers,
input_width, &input_x, &x0, &x1);
for (int c = 0; c < depth; ++c) {
const int64_t output_20_ll =
static_cast<int64_t>(
input_data[Offset(input_shape, b, y0, x0, c)]) *
((1 << 10) - (input_y - (1 << 10) * y0)) *
((1 << 10) - (input_x - (1 << 10) * x0));
const int64_t output_20_lu =
static_cast<int64_t>(
input_data[Offset(input_shape, b, y1, x0, c)]) *
(input_y - (1 << 10) * y0) *
((1 << 10) - (input_x - (1 << 10) * x0));
const int64_t output_20_rl =
static_cast<int64_t>(
input_data[Offset(input_shape, b, y0, x1, c)]) *
((1 << 10) - (input_y - (1 << 10) * y0)) *
(input_x - (1 << 10) * x0);
const int64_t output_20_ru =
static_cast<int64_t>(
input_data[Offset(input_shape, b, y1, x1, c)]) *
(input_y - (1 << 10) * y0) * (input_x - (1 << 10) * x0);
const int64_t output_20 =
output_20_ll + output_20_lu + output_20_rl + output_20_ru;
#if TFLITE_SINGLE_ROUNDING
const int64_t round = 1 << 19;
const T interpolation = static_cast<T>((output_20 + round) >> 20);
#else
const int64_t round = (output_20 > 0) ? (1 << 19) : -(1 << 19);
const T interpolation =
static_cast<T>((output_20 + round) / (1 << 20));
#endif
output_data[Offset(output_shape, b, y, x, c)] = interpolation;
}
}
}
}
}
}
}
#endif
#include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/optimized/resize_bilinear.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace resize_bilinear {
enum KernelType {
kReference,
kOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kSizeTensor = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* size,
TfLiteTensor* output) {
const int32* size_data = GetTensorData<int32>(size);
TF_LITE_ENSURE(context, size_data[0] > 0);
TF_LITE_ENSURE(context, size_data[1] > 0);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = size_data[0];
output_size->data[2] = size_data[1];
output_size->data[3] = input->dims->data[3];
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32);
output->type = input->type;
if (!IsConstantOrPersistentTensor(size)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
if (params->half_pixel_centers && params->align_corners) {
TF_LITE_KERNEL_LOG(
context, "If half_pixel_centers is True, align_corners must be False.");
return kTfLiteError;
}
return ResizeOutputTensor(context, input, size, output);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor(context, input, size, output));
}
if (output->type == kTfLiteFloat32) {
#define TF_LITE_RESIZE_BILINEAR(type, opname, datatype) \
tflite::ResizeBilinearParams op_params; \
op_params.align_corners = params->align_corners; \
op_params.half_pixel_centers = params->half_pixel_centers; \
type::opname(op_params, GetTensorShape(input), \
GetTensorData<datatype>(input), GetTensorShape(size), \
GetTensorData<int32>(size), GetTensorShape(output), \
GetTensorData<datatype>(output))
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, float);
} else if (kernel_type == kOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, float);
}
} else if (output->type == kTfLiteUInt8) {
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, uint8_t);
} else if (kernel_type == kOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, uint8_t);
}
} else if (output->type == kTfLiteInt8) {
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int8_t);
} else if (kernel_type == kOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, int8_t);
}
} else if (output->type == kTfLiteInt16) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int16_t);
#undef TF_LITE_RESIZE_BILINEAR
} else {
TF_LITE_KERNEL_LOG(context, "Output type is %d, requires float.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RESIZE_BILINEAR_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_bilinear::Prepare,
resize_bilinear::Eval<resize_bilinear::kReference>};
return &r;
}
TfLiteRegistration* Register_RESIZE_BILINEAR() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_bilinear::Prepare,
resize_bilinear::Eval<resize_bilinear::kOptimized>};
return &r;
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(ResizeBilinear, AlignCenters) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.HalfPixelCenters(true)
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, AlignCentersTF1X) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, AlignCorners) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.AlignCorners(true)
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, TransientIndirectionBuffer) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
delegate_options.flags |=
TFLITE_XNNPACK_DELEGATE_FLAG_TRANSIENT_INDIRECTION_BUFFER;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
}
} |
883 | cpp | tensorflow/tensorflow | elementwise | tensorflow/lite/kernels/elementwise.cc | tensorflow/lite/kernels/elementwise_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_ELEMENTWISE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_ELEMENTWISE_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewElementwiseNodeShader(
OperationType operation_type);
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/elementwise.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ElementwiseOneArgument : public NodeShader {
public:
explicit ElementwiseOneArgument(OperationType operation_type)
: operation_type_(operation_type) {}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string source;
switch (operation_type_) {
case OperationType::ABS:
source = "value_0 = abs(value_0);";
break;
case OperationType::COS:
source = "value_0 = cos(value_0);";
break;
case OperationType::COPY:
source = "value_0 = value_0;";
break;
case OperationType::ELU:
source = R"(
value_0.x = value_0.x < 0.0 ? exp(value_0.x) - 1.0 : value_0.x;
value_0.y = value_0.y < 0.0 ? exp(value_0.y) - 1.0 : value_0.y;
value_0.z = value_0.z < 0.0 ? exp(value_0.z) - 1.0 : value_0.z;
value_0.w = value_0.w < 0.0 ? exp(value_0.w) - 1.0 : value_0.w;
)";
break;
case OperationType::EXP:
source = "value_0 = exp(value_0);";
break;
case tflite::gpu::OperationType::FLOOR:
source = "value_0 = floor(value_0);";
break;
case tflite::gpu::OperationType::GELU:
source =
"value_0 = 0.5 * value_0 * (1.0 + tanh(0.7978845608 * (value_0 + "
"0.044715 * value_0 * value_0 * value_0)));";
break;
case OperationType::HARD_SWISH:
source =
"value_0 *= clamp(value_0 / 6.0 + vec4(0.5), vec4(0.0), "
"vec4(1.0));";
break;
case OperationType::LOG:
source = R"(
const float nan = normalize(vec4(0, 0, 0, 0)).x;
value_0.x = value_0.x > 0.0 ? log(value_0.x) : nan;
value_0.y = value_0.y > 0.0 ? log(value_0.y) : nan;
value_0.z = value_0.z > 0.0 ? log(value_0.z) : nan;
value_0.w = value_0.w > 0.0 ? log(value_0.w) : nan;
)";
break;
case OperationType::NEG:
source = "value_0 = -(value_0);";
break;
case OperationType::RSQRT:
source = R"(
const float nan = normalize(vec4(0, 0, 0, 0)).x;
value_0.x = value_0.x > 0.0 ? 1.0 / sqrt(value_0.x) : nan;
value_0.y = value_0.y > 0.0 ? 1.0 / sqrt(value_0.y) : nan;
value_0.z = value_0.z > 0.0 ? 1.0 / sqrt(value_0.z) : nan;
value_0.w = value_0.w > 0.0 ? 1.0 / sqrt(value_0.w) : nan;
)";
break;
case OperationType::SIGMOID:
source = "value_0 = 1.0 / (1.0 + exp(-1.0 * value_0));";
break;
case OperationType::SIN:
source = "value_0 = sin(value_0);";
break;
case OperationType::SQRT:
source = R"(
const float nan = normalize(vec4(0, 0, 0, 0)).x;
value_0.x = value_0.x >= 0.0 ? sqrt(value_0.x) : nan;
value_0.y = value_0.y >= 0.0 ? sqrt(value_0.y) : nan;
value_0.z = value_0.z >= 0.0 ? sqrt(value_0.z) : nan;
value_0.w = value_0.w >= 0.0 ? sqrt(value_0.w) : nan;
)";
break;
case OperationType::SQUARE:
source = "value_0 = value_0 * value_0;";
break;
case OperationType::TANH:
source = "value_0 = tanh(value_0);";
break;
default:
return absl::InvalidArgumentError(
"Incorrect elementwise operation type.");
}
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
source,
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
private:
OperationType operation_type_;
};
class ElementwiseTwoArguments : public NodeShader {
public:
explicit ElementwiseTwoArguments(OperationType operation_type)
: operation_type_(operation_type) {}
inline bool IsElementwiseSupported(const GenerationContext& ctx) const {
return ctx.input_shapes.size() == 2 &&
ctx.input_shapes[0] == ctx.input_shapes[1];
}
inline bool IsBroadcastSupported(const GenerationContext& ctx) const {
return ctx.input_shapes.size() == 2 && ctx.input_shapes[1][1] == 1 &&
ctx.input_shapes[1][2] == 1 &&
ctx.input_shapes[0][3] == ctx.input_shapes[1][3];
}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::vector<Variable> parameters;
std::vector<std::pair<std::string, Object>> objects;
std::string argument0, argument1;
if (IsElementwiseSupported(ctx)) {
argument0 = "value_0";
argument1 = "value_1";
} else if (IsBroadcastSupported(ctx)) {
argument0 = "$input_data_0[gid.x, gid.y, gid.z]$";
argument1 = "$input_data_1[0, 0, gid.z]$";
} else {
const auto& attr =
std::any_cast<const ElementwiseAttributes&>(ctx.op_attr);
const auto* tensor =
std::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.param);
const auto* scalar = std::get_if<float>(&attr.param);
if (!tensor && !scalar) {
return absl::InvalidArgumentError(
"Couldn't read scalar of const vector data from the attributes.");
}
argument0 = "value_0";
if (tensor) {
argument1 = "$const_data[gid.z]$";
objects.push_back({"const_data", MakeReadonlyObject(tensor->data)});
} else {
argument1 = "vec4($const_data$)";
parameters.push_back({"const_data", *scalar});
}
if (attr.runtime_tensor_is_second) {
argument0 = argument1;
argument1 = "value_0";
}
}
std::string source;
switch (operation_type_) {
case OperationType::DIV: {
source = "value_0 = $0/$1;";
break;
}
case tflite::gpu::OperationType::FLOOR_DIV:
source = "value_0 = floor($0 / $1);";
break;
case tflite::gpu::OperationType::FLOOR_MOD:
source = "value_0 = $0 - floor($0 / $1) * $1;";
break;
case OperationType::MAXIMUM: {
source = "value_0 = max($0, $1);";
break;
}
case OperationType::MINIMUM: {
source = "value_0 = min($0, $1);";
break;
}
case OperationType::SQUARED_DIFF: {
source = "value_0 = ($0 - $1) * ($0 - $1);";
break;
}
case OperationType::SUB: {
source = "value_0 = $0 - $1;";
break;
}
case OperationType::POW: {
source = "value_0 = pow($0, $1);";
break;
}
default:
return absl::InvalidArgumentError(
"Incorrect elementwise with scalar operation type.");
}
source = absl::Substitute(source, argument0, argument1);
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
uint3(),
source,
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
private:
OperationType operation_type_;
};
}
std::unique_ptr<NodeShader> NewElementwiseNodeShader(
OperationType operation_type) {
switch (operation_type) {
case OperationType::ABS:
case OperationType::COS:
case OperationType::COPY:
case OperationType::ELU:
case OperationType::EXP:
case OperationType::FLOOR:
case OperationType::GELU:
case OperationType::HARD_SWISH:
case OperationType::LOG:
case OperationType::NEG:
case OperationType::RSQRT:
case OperationType::SIGMOID:
case OperationType::SIN:
case OperationType::SQRT:
case OperationType::SQUARE:
case OperationType::TANH:
return std::make_unique<ElementwiseOneArgument>(operation_type);
case OperationType::DIV:
case OperationType::FLOOR_DIV:
case OperationType::FLOOR_MOD:
case OperationType::MAXIMUM:
case OperationType::MINIMUM:
case OperationType::POW:
case OperationType::SQUARED_DIFF:
case OperationType::SUB:
return std::make_unique<ElementwiseTwoArguments>(operation_type);
default:
return nullptr;
}
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/elementwise.h"
#include <cmath>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatEq;
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
TensorRef<BHWC> tensor_ref;
tensor_ref.type = DataType::FLOAT32;
tensor_ref.ref = ref;
tensor_ref.shape = shape;
return tensor_ref;
}
TEST(ElementwiseOneArgumentTest, Abs) {
OperationType op_type = OperationType::ABS;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 6.2, 2.0, 4.0}));
}
TEST(ElementwiseOneArgumentTest, Cos) {
OperationType op_type = OperationType::COS;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 3.1415926, -3.1415926, 1}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, -1.0, -1.0, 0.540302}));
}
TEST(ElementwiseOneArgumentTest, Copy) {
OperationType op_type = OperationType::COPY;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatEq(), {0.0, -6.2, 2.0, 4.0}));
}
TEST(ElementwiseOneArgumentTest, Elu) {
OperationType op_type = OperationType::ELU;
const BHWC shape(1, 1, 1, 7);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(
0, {0.0f, 1.0f, -1.0f, 100.0f, -100.0f, 0.01f, -0.01f}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0f, 1.0f, std::exp(-1.0f) - 1.0f,
100.0f, std::exp(-100.0f) - 1.0f,
0.01f, std::exp(-0.01f) - 1.0f}));
}
TEST(ElementwiseOneArgumentTest, Exp) {
OperationType op_type = OperationType::EXP;
const BHWC shape(1, 1, 1, 7);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(
0, {0.0f, 1.0f, -1.0f, 100.0f, -100.0f, 0.01f, -0.01f}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{std::exp(0.0f), std::exp(1.0f), std::exp(-1.0f),
std::exp(100.0f), std::exp(-100.0f), std::exp(0.01f),
std::exp(-0.01f)}));
}
TEST(ElementwiseOneArgumentTest, Floor) {
OperationType op_type = OperationType::FLOOR;
const BHWC shape(1, 1, 1, 7);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(
model.PopulateTensor(0, {-4.5f, -3.0f, -1.5f, 0.0f, 1.5f, 3.0f, 4.5f}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{-5.0f, -3.0f, -2.0f, 0.0f, 1.0f, 3.0f, 4.0f}));
}
TEST(ElementwiseOneArgumentTest, Gelu) {
OperationType op_type = OperationType::GELU;
const BHWC shape(1, 1, 1, 6);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0f, 1.0f, 3.0f, 1.0f, -1.0f, -2.0f}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-5), {0.0f, 0.841192f, 2.99636f, 0.841192f,
-0.158808f, -0.0454023f}));
}
TEST(ElementwiseOneArgumentTest, HardSwish) {
OperationType op_type = OperationType::HARD_SWISH;
const BHWC shape(1, 1, 1, 7);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(
model.PopulateTensor(0, {-4.5f, -3.0f, -1.5f, 0.0f, 1.5f, 3.0f, 4.5f}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6f),
{0.0f, 0.0f, -0.375f, 0.0f, 1.125f, 3.f, 4.5f}));
}
TEST(ElementwiseOneArgumentTest, Log) {
OperationType op_type = OperationType::LOG;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 3.1415926, 1.0, 1.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 1.14473, 0.0, 0.0}));
}
TEST(ElementwiseOneArgumentTest, Neg) {
OperationType op_type = OperationType::NEG;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, -3.1415926, 0.0, 1.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.0, 3.1415926, 0.0, -1.0}));
}
TEST(ElementwiseOneArgumentTest, Rsqrt) {
OperationType op_type = OperationType::RSQRT;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 4.0, 9.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 0.707106, 0.5, 0.333333}));
}
TEST(ElementwiseOneArgumentTest, Sigmoid) {
OperationType op_type = OperationType::SIGMOID;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.0, 2.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.5, 0.002473, 0.880797, 0.982014}));
}
TEST(ElementwiseOneArgumentTest, Sin) {
OperationType op_type = OperationType::SIN;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 3.1415926, -3.1415926, 1.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 0.0, 0.0, 0.841471}));
}
TEST(ElementwiseOneArgumentTest, Sqrt) {
OperationType op_type = OperationType::SQRT;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 1.0, 1.414213, 2.0}));
}
TEST(ElementwiseOneArgumentTest, Square) {
OperationType op_type = OperationType::SQUARE;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 0.5, -3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 4.0, 0.25, 9.0}));
}
TEST(ElementwiseOneArgumentTest, Tanh) {
OperationType op_type = OperationType::TANH;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), {}},
{GetTensorRef(0, shape)},
{GetTensorRef(1, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.0, 2.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, -0.999987, 0.964027, 0.999329}));
}
TEST(ElementwiseTwoArgumentsTest, DivElementwise) {
OperationType op_type = OperationType::DIV;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape), GetTensorRef(1, shape)},
{GetTensorRef(2, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 2.0, -0.5, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, -3.1, -4.0, 1.0}));
}
TEST(ElementwiseTwoArgumentsTest, DivBroadcast) {
OperationType op_type = OperationType::DIV;
const BHWC shape0(1, 2, 1, 2);
const BHWC shape1(1, 1, 1, 2);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape0), GetTensorRef(1, shape1)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_TRUE(model.PopulateTensor(1, {0.5, 0.2}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 5.0, 4.0, 15.0}));
}
TEST(ElementwiseTwoArgumentsTest, DivScalar) {
OperationType op_type = OperationType::DIV;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
attr.param = static_cast<float>(0.5);
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 2.0, 4.0, 6.0}));
}
TEST(ElementwiseTwoArgumentsTest, DivConstVector) {
OperationType op_type = OperationType::DIV;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
Tensor<Linear, DataType::FLOAT32> param;
param.shape = Linear(2);
param.id = 1;
param.data = {0.4, 0.5};
attr.param = std::move(param);
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 2.0, 5.0, 6.0}));
}
TEST(ElementwiseTwoArgumentsTest, FloorDiv) {
OperationType op_type = OperationType::FLOOR_DIV;
const BHWC shape0(1, 1, 1, 7);
float scalar = 2.7f;
ElementwiseAttributes attr;
attr.param = scalar;
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(
model.PopulateTensor(0, {-4.5f, -3.0f, -1.5f, 0.0f, 1.5f, 3.0f, 4.5f}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{std::floor(-4.5f / scalar), std::floor(-3.0f / scalar),
std::floor(-1.5f / scalar), std::floor(0.0f / scalar),
std::floor(1.5f / scalar), std::floor(3.0f / scalar),
std::floor(4.5f / scalar)}));
}
TEST(ElementwiseTwoArgumentsTest, FloorMod) {
OperationType op_type = OperationType::FLOOR_MOD;
const BHWC shape0(1, 1, 1, 7);
float scalar = 2.7f;
ElementwiseAttributes attr;
attr.param = scalar;
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(
model.PopulateTensor(0, {-4.5f, -3.0f, -1.5f, 0.0f, 1.5f, 3.0f, 4.5f}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-4.5f - std::floor(-4.5f / scalar) * scalar,
-3.0f - std::floor(-3.0f / scalar) * scalar,
-1.5f - std::floor(-1.5f / scalar) * scalar,
0.0f - std::floor(0.0f / scalar) * scalar,
1.5f - std::floor(1.5f / scalar) * scalar,
3.0f - std::floor(3.0f / scalar) * scalar,
4.5f - std::floor(4.5f / scalar) * scalar}));
}
TEST(ElementwiseTwoArgumentsTest, MaximumElementwise) {
OperationType op_type = OperationType::MAXIMUM;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape), GetTensorRef(1, shape)},
{GetTensorRef(2, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, -3.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 2.0, 3.0, -2.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.0, 3.0, -2.0}));
}
TEST(ElementwiseTwoArgumentsTest, MaximumBroadcast) {
OperationType op_type = OperationType::MAXIMUM;
const BHWC shape0(1, 2, 1, 2);
const BHWC shape1(1, 1, 1, 2);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape0), GetTensorRef(1, shape1)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_TRUE(model.PopulateTensor(1, {0.5, 0.2}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.5, 1.0, 2.0, 3.0}));
}
TEST(ElementwiseTwoArgumentsTest, MaximumScalar) {
OperationType op_type = OperationType::MAXIMUM;
const BHWC shape(1, 2, 2, 1);
ElementwiseAttributes attr;
attr.param = -1.0f;
SingleOpModel model(
{ToString(op_type), std::move(attr)},
{GetTensorRef(0, shape)},
{GetTensorRef(2, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, -3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, -1.0, 2.0, -1.0}));
}
TEST(ElementwiseTwoArgumentsTest, MaximumConstVector) {
OperationType op_type = OperationType::MAXIMUM;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
Tensor<Linear, DataType::FLOAT32> param;
param.shape = Linear(2);
param.id = 1;
param.data = {0.4, 0.5};
attr.param = std::move(param);
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.4, 1.0, 2.0, 3.0}));
}
TEST(ElementwiseTwoArgumentsTest, MinimumElementwise) {
OperationType op_type = OperationType::MINIMUM;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape), GetTensorRef(1, shape)},
{GetTensorRef(2, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, -3.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 2.0, 3.0, -2.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, -6.2, 2.0, -3.0}));
}
TEST(ElementwiseTwoArgumentsTest, MinimumBroadcast) {
OperationType op_type = OperationType::MINIMUM;
const BHWC shape0(1, 2, 1, 2);
const BHWC shape1(1, 1, 1, 2);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape0), GetTensorRef(1, shape1)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_TRUE(model.PopulateTensor(1, {0.5, 0.2}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 0.2, 0.5, 0.2}));
}
TEST(ElementwiseTwoArgumentsTest, MinimumScalar) {
OperationType op_type = OperationType::MINIMUM;
const BHWC shape(1, 2, 2, 1);
ElementwiseAttributes attr;
attr.param = -1.0f;
SingleOpModel model(
{ToString(op_type), std::move(attr)},
{GetTensorRef(0, shape)},
{GetTensorRef(2, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, -3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.0, -6.2, -1.0, -3.0}));
}
TEST(ElementwiseTwoArgumentsTest, MinimumConstVector) {
OperationType op_type = OperationType::MINIMUM;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
Tensor<Linear, DataType::FLOAT32> param;
param.shape = Linear(2);
param.id = 1;
param.data = {0.5, 0.2};
attr.param = std::move(param);
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 0.2, 0.5, 0.2}));
}
TEST(ElementwiseTwoArgumentsTest, PowElementwise) {
OperationType op_type = OperationType::POW;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape), GetTensorRef(1, shape)},
{GetTensorRef(2, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 4.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 2.0, 3.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 1.0, 8.0, 256.0}));
}
TEST(ElementwiseTwoArgumentsTest, PowBroadcast) {
OperationType op_type = OperationType::POW;
const BHWC shape0(1, 2, 1, 2);
const BHWC shape1(1, 1, 1, 2);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape0), GetTensorRef(1, shape1)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 4.0}));
ASSERT_TRUE(model.PopulateTensor(1, {2.0, 0.5}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 1.0, 4.0, 2.0}));
}
TEST(ElementwiseTwoArgumentsTest, PowScalar) {
OperationType op_type = OperationType::POW;
const BHWC shape(1, 2, 2, 1);
ElementwiseAttributes attr;
attr.param = 2.0f;
SingleOpModel model(
{ToString(op_type), std::move(attr)},
{GetTensorRef(0, shape)},
{GetTensorRef(2, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 1.0, 4.0, 16.0}));
}
TEST(ElementwiseTwoArgumentsTest, PowConstVector) {
OperationType op_type = OperationType::POW;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
Tensor<Linear, DataType::FLOAT32> param;
param.shape = Linear(2);
param.id = 1;
param.data = {2.0, 0.5};
attr.param = std::move(param);
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 1.0, 4.0, 2.0}));
}
TEST(ElementwiseTwoArgumentsTest, SquaredDiffElementwise) {
OperationType op_type = OperationType::SQUARED_DIFF;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape), GetTensorRef(1, shape)},
{GetTensorRef(2, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 2.0, 2.0, 4.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 1.0, 5.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 1.0, 9.0, 0.0}));
}
TEST(ElementwiseTwoArgumentsTest, SquaredDiffBroadcast) {
OperationType op_type = OperationType::SQUARED_DIFF;
const BHWC shape0(1, 2, 1, 2);
const BHWC shape1(1, 1, 1, 2);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape0), GetTensorRef(1, shape1)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_TRUE(model.PopulateTensor(1, {-1.0, 5.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 16.0, 9.0, 4.0}));
}
TEST(ElementwiseTwoArgumentsTest, SquaredDiffScalar) {
OperationType op_type = OperationType::SQUARED_DIFF;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
attr.param = static_cast<float>(5.0);
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {25.0, 16.0, 9.0, 4.0}));
}
TEST(ElementwiseTwoArgumentsTest, SquaredDiffConstVector) {
OperationType op_type = OperationType::SQUARED_DIFF;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
Tensor<Linear, DataType::FLOAT32> param;
param.shape = Linear(2);
param.id = 1;
param.data = {-1.0, 5.0};
attr.param = std::move(param);
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 16.0, 9.0, 4.0}));
}
TEST(ElementwiseTwoArgumentsTest, SubElementwise) {
OperationType op_type = OperationType::SUB;
const BHWC shape(1, 2, 2, 1);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape), GetTensorRef(1, shape)},
{GetTensorRef(2, shape)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 2.0, 3.0, 4.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.0, -8.2, -1.0, 0.0}));
}
TEST(ElementwiseTwoArgumentsTest, SubBroadcast) {
OperationType op_type = OperationType::SUB;
const BHWC shape0(1, 2, 1, 2);
const BHWC shape1(1, 1, 1, 2);
SingleOpModel model(
{ToString(op_type), {}},
{GetTensorRef(0, shape0), GetTensorRef(1, shape1)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_TRUE(model.PopulateTensor(1, {0.3, 0.2}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-0.3, 0.8, 1.7, 2.8}));
}
TEST(ElementwiseTwoArgumentsTest, SubScalar) {
OperationType op_type = OperationType::SUB;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
attr.param = static_cast<float>(0.5);
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-0.5, 0.5, 1.5, 2.5}));
}
TEST(ElementwiseTwoArgumentsTest, SubScalarRuntimeTensorSecond) {
OperationType op_type = OperationType::SUB;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
attr.param = static_cast<float>(0.5);
attr.runtime_tensor_is_second = true;
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.5, -0.5, -1.5, -2.5}));
}
TEST(ElementwiseTwoArgumentsTest, SubConstVector) {
OperationType op_type = OperationType::SUB;
const BHWC shape0(1, 2, 1, 2);
ElementwiseAttributes attr;
Tensor<Linear, DataType::FLOAT32> param;
param.shape = Linear(2);
param.id = 1;
param.data = {0.3, 0.2};
attr.param = std::move(param);
SingleOpModel model({ToString(op_type), attr},
{GetTensorRef(0, shape0)},
{GetTensorRef(2, shape0)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 3.0}));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-0.3, 0.8, 1.7, 2.8}));
}
}
}
}
} |
884 | cpp | tensorflow/tensorflow | one_hot | tensorflow/lite/kernels/one_hot.cc | tensorflow/lite/kernels/one_hot_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_ONE_HOT_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_ONE_HOT_H_
#include <string>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
GPUOperation CreateOneHot(const OperationDef& definition,
const OneHotAttributes& attr);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/one_hot.h"
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
namespace tflite {
namespace gpu {
std::string GetOneHotCode(const OperationDef& op_def,
const OneHotAttributes& attr, GPUOperation* op) {
op->AddSrcTensor("src_tensor", op_def.src_tensors[0]);
op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += " args.src_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int Z = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"Z >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " int idx = Z * 4;\n";
c += " int hot_idx = args.src_tensor.Read(0, 0, 0).x;\n";
c += " FLT4 res = INIT_FLT4(args.off_value);\n";
c += " if ((hot_idx >= idx) && (hot_idx < (idx + 4))) {\n";
c += " res.x = (idx + 0) == hot_idx ? args.on_value : args.off_value;\n";
c += " res.y = (idx + 1) == hot_idx ? args.on_value : args.off_value;\n";
c += " res.z = (idx + 2) == hot_idx ? args.on_value : args.off_value;\n";
c += " res.w = (idx + 3) == hot_idx ? args.on_value : args.off_value;\n";
c += " }\n";
c += " args.dst_tensor.Write(res, X, Y, Z);\n";
c += "}\n";
return c;
}
GPUOperation CreateOneHot(const OperationDef& definition,
const OneHotAttributes& attr) {
GPUOperation op(definition);
op.code_ = GetOneHotCode(definition, attr, &op);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
if (definition.precision == CalculationsPrecision::F32) {
op.args_.AddFloat("on_value", attr.on_value);
op.args_.AddFloat("off_value", attr.off_value);
} else {
op.args_.AddHalf("on_value", half(attr.on_value));
op.args_.AddHalf("off_value", half(attr.off_value));
}
return op;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/one_hot_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, OneHot) {
auto status = OneHotTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, OneHotBatch) {
auto status = OneHotBatchTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} |
885 | cpp | tensorflow/tensorflow | pad | tensorflow/lite/kernels/pad.cc | third_party/xla/xla/tests/pad_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_PAD_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_PAD_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewPadNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/pad.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Pad : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const PadAttributes&>(ctx.op_attr);
if (attr.type != PaddingContentType::ZEROS &&
attr.type != PaddingContentType::REFLECT) {
return absl::UnimplementedError(
"Only ZERO and REFLECT padding types are supported.");
}
if (attr.appended.h < 0 || attr.appended.w < 0 || attr.appended.c < 0 ||
attr.prepended.h < 0 || attr.prepended.w < 0 || attr.prepended.c < 0) {
return absl::UnimplementedError("Negative padding is not supported.");
}
if (attr.appended.b != 0 || attr.prepended.b != 0) {
return absl::UnimplementedError("Padding for BATCH is not supported.");
}
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"input_data_0_c", static_cast<int>(ctx.input_shapes[0][3])},
{"prepended",
int4(attr.prepended.w, attr.prepended.h, attr.prepended.c, 0)},
};
std::string source;
if (attr.type == PaddingContentType::REFLECT) {
source = R"(
int src_x = gid.x - $prepended.x$;
src_x = abs(src_x);
src_x = $input_data_0_w$ - 1 - abs(src_x - $input_data_0_w$ + 1);
int src_y = gid.y - $prepended.y$;
src_y = abs(src_y);
src_y = $input_data_0_h$ - 1 - abs(src_y - $input_data_0_h$ + 1);
)";
if (attr.prepended.c == 0 && attr.appended.c == 0) {
source += " value_0 = $input_data_0[src_x, src_y, gid.z]$;\n";
} else {
source += R"(
int start_channel = gid.z * 4;
for (int i = 0; i < 4; ++i) {
int channel = start_channel + i;
int src_z = channel - $prepended.z$;
src_z = abs(src_z);
src_z = $input_data_0_c$ - 1 - abs(src_z - $input_data_0_c$ + 1);
src_z = clamp(src_z, 0, $input_data_0_c$ - 1);
value_0[i] = $input_data_0[src_x, src_y, src_z / 4]$[src_z % 4];
}
)";
}
} else {
source = R"(
int src_x = gid.x - $prepended.x$;
int src_y = gid.y - $prepended.y$;
if (src_x >= 0 && src_x < $input_data_0_w$ && src_y >= 0 && src_y < $input_data_0_h$) {
)";
if (attr.prepended.c == 0 && attr.appended.c == 0) {
source += " value_0 = $input_data_0[src_x, src_y, gid.z]$;\n";
} else if (attr.prepended.c % 4 == 0) {
parameters.push_back(
{"src_slices",
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)});
source += R"(
int src_z = gid.z - $prepended.z$ / 4;
if (src_z >= 0 && src_z < $src_slices$) {
value_0 = $input_data_0[src_x, src_y, src_z]$;
}
)";
} else {
source += R"(
int start_channel = gid.z * 4;
for (int i = 0; i < 4; ++i) {
int channel = start_channel + i;
int src_z = channel - $prepended.z$;
if (src_z >= 0 && src_z < $input_data_0_c$) {
value_0[i] = $input_data_0[src_x, src_y, src_z / 4]$[src_z % 4];
}
}
)";
}
source += " }\n";
}
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewPadNodeShader() {
return std::make_unique<Pad>();
}
}
}
} | #include <memory>
#include <vector>
#include "xla/array2d.h"
#include "xla/array4d.h"
#include "xla/client/lib/arithmetic.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_builder.h"
#include "xla/reference_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
#ifdef XLA_BACKEND_SUPPORTS_BFLOAT16
static std::array<bool, 2> use_bfloat16_params{false, true};
#else
static std::array<bool, 1> use_bfloat16_params{false};
#endif
class PadTest : public ClientLibraryTestBase {
protected:
PadTest() {
auto dimension0 = r4_padding_on_dim0_dim1_.add_dimensions();
dimension0->set_edge_padding_low(1);
dimension0->set_edge_padding_high(0);
dimension0->set_interior_padding(2);
auto dimension1 = r4_padding_on_dim0_dim1_.add_dimensions();
dimension1->set_edge_padding_low(0);
dimension1->set_edge_padding_high(2);
dimension1->set_interior_padding(1);
auto dimension2 = r4_padding_on_dim0_dim1_.add_dimensions();
dimension2->set_edge_padding_low(0);
dimension2->set_edge_padding_high(0);
dimension2->set_interior_padding(0);
auto dimension3 = r4_padding_on_dim0_dim1_.add_dimensions();
dimension3->set_edge_padding_low(0);
dimension3->set_edge_padding_high(0);
dimension3->set_interior_padding(0);
}
PaddingConfig r4_padding_on_dim0_dim1_;
};
class PadTestFloat : public PadTest,
public ::testing::WithParamInterface<bool> {
protected:
PadTestFloat() { set_use_bfloat16(GetParam()); }
ErrorSpec DefaultErrorSpec() const {
if (use_bfloat16()) {
return ErrorSpec(1e-3, 1e-3);
} else {
return ErrorSpec(1e-5, 1e-5);
}
}
};
XLA_TEST_P(PadTestFloat, Pad1DS0ToS0Array) {
XlaBuilder b(TestName());
PaddingConfig padding_config;
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(0);
dimension->set_edge_padding_high(0);
dimension->set_interior_padding(0);
Pad(AddParam(LiteralUtil::CreateR1<float>({}), &b),
AddParam(LiteralUtil::CreateR0<float>(0.1), &b), padding_config);
ComputeAndCompareR1<float>(&b, {}, {}, DefaultErrorSpec());
}
XLA_TEST_P(PadTestFloat, Pad1DS0ToS5Array) {
XlaBuilder b(TestName());
PaddingConfig padding_config;
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(1);
dimension->set_edge_padding_high(4);
dimension->set_interior_padding(7);
Pad(AddParam(LiteralUtil::CreateR1<float>({}), &b),
AddParam(LiteralUtil::CreateR0<float>(0.1), &b), padding_config);
ComputeAndCompareR1<float>(&b, std::vector<float>(5, 0.1), {},
DefaultErrorSpec());
}
XLA_TEST_P(PadTestFloat, Pad1DS3Array) {
XlaBuilder b(TestName());
PaddingConfig padding_config;
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(3);
dimension->set_edge_padding_high(0);
dimension->set_interior_padding(1);
Pad(AddParam(LiteralUtil::CreateR1<float>({1, 2, 3}), &b),
AddParam(LiteralUtil::CreateR0<float>(0.1), &b), padding_config);
std::vector<float> expected({0.1, 0.1, 0.1, 1, 0.1, 2, 0.1, 3});
ComputeAndCompareR1<float>(&b, expected, {}, DefaultErrorSpec());
}
XLA_TEST_P(PadTestFloat, Pad4D_2x0x3x2_FloatArray) {
XlaBuilder b(TestName());
Pad(AddParam(Array4D<float>(2, 0, 3, 2), &b),
AddParam(LiteralUtil::CreateR0<float>(1.5), &b),
r4_padding_on_dim0_dim1_);
ComputeAndCompareR4<float>(&b, Array4D<float>(5, 2, 3, 2, 1.5f), {},
DefaultErrorSpec());
}
TEST_P(PadTestFloat, Pad4DFloat_1x1x3x2_Array) {
XlaBuilder b(TestName());
auto input = std::make_unique<Array4D<float>>(1, 1, 3, 2);
Array2D<float> input_xy({
{1.0f, 2.0f},
{3.0f, 4.0f},
{5.0f, 6.0f},
});
input->FillWithYX(input_xy);
Pad(AddParam(*input, &b), AddParam(LiteralUtil::CreateR0<float>(1.5), &b),
r4_padding_on_dim0_dim1_);
auto expected = std::make_unique<Array4D<float>>(2, 3, 3, 2);
expected->Fill(1.5);
(*expected)(1, 0, 0, 0) = 1.0f;
(*expected)(1, 0, 0, 1) = 2.0f;
(*expected)(1, 0, 1, 0) = 3.0f;
(*expected)(1, 0, 1, 1) = 4.0f;
(*expected)(1, 0, 2, 0) = 5.0f;
(*expected)(1, 0, 2, 1) = 6.0f;
ComputeAndCompareR4<float>(&b, *expected, {}, DefaultErrorSpec());
}
TEST_P(PadTestFloat, Pad4DFloatArrayWithInteriorPadding) {
XlaBuilder b(TestName());
const float pad_value = 1.5f;
Array4D<float> input(3, 2, 1, 1, {1, 2, 3, 4, 5, 6});
Pad(AddParam(input, &b),
AddParam(LiteralUtil::CreateR0<float>(pad_value), &b),
r4_padding_on_dim0_dim1_);
auto expected = std::make_unique<Array4D<float>>(8, 5, 1, 1);
expected->Fill(pad_value);
(*expected)(1, 0, 0, 0) = 1.0f;
(*expected)(1, 2, 0, 0) = 2.0f;
(*expected)(4, 0, 0, 0) = 3.0f;
(*expected)(4, 2, 0, 0) = 4.0f;
(*expected)(7, 0, 0, 0) = 5.0f;
(*expected)(7, 2, 0, 0) = 6.0f;
ComputeAndCompareR4<float>(&b, *expected, {}, ErrorSpec(0.0001));
}
TEST_P(PadTestFloat, Pad4DFloatArrayMinorFirstSmall) {
XlaBuilder b(TestName());
PaddingConfig padding_config;
auto dimension0 = padding_config.add_dimensions();
dimension0->set_edge_padding_low(0);
dimension0->set_edge_padding_high(0);
dimension0->set_interior_padding(0);
auto dimension1 = padding_config.add_dimensions();
dimension1->set_edge_padding_low(0);
dimension1->set_edge_padding_high(0);
dimension1->set_interior_padding(0);
auto dimension2 = padding_config.add_dimensions();
dimension2->set_edge_padding_low(2);
dimension2->set_edge_padding_high(1);
dimension2->set_interior_padding(0);
auto dimension3 = padding_config.add_dimensions();
dimension3->set_edge_padding_low(2);
dimension3->set_edge_padding_high(3);
dimension3->set_interior_padding(0);
const Layout layout = LayoutUtil::MakeLayout({0, 1, 2, 3});
const float pad_value = -5.123f;
Array4D<float> input_array(1, 1, 2, 3, {1, 2, 3, 4, 5, 6});
auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array);
input = input.Relayout(layout);
Pad(AddParam(input, &b),
AddParam(LiteralUtil::CreateR0<float>(pad_value), &b), padding_config);
Array4D<float> expected_array(1, 1, 5, 8);
expected_array.Fill(pad_value);
expected_array(0, 0, 2, 2) = 1.0f;
expected_array(0, 0, 2, 3) = 2.0f;
expected_array(0, 0, 2, 4) = 3.0f;
expected_array(0, 0, 3, 2) = 4.0f;
expected_array(0, 0, 3, 3) = 5.0f;
expected_array(0, 0, 3, 4) = 6.0f;
ComputeAndCompareR4<float>(&b, expected_array, {}, ErrorSpec(0.0001));
}
XLA_TEST_P(PadTestFloat, Pad4DFloatArrayMinorFirstNonTrivialMinorDimensions) {
XlaBuilder b(TestName());
PaddingConfig padding_config;
auto dimension0 = padding_config.add_dimensions();
dimension0->set_edge_padding_low(0);
dimension0->set_edge_padding_high(0);
dimension0->set_interior_padding(0);
auto dimension1 = padding_config.add_dimensions();
dimension1->set_edge_padding_low(0);
dimension1->set_edge_padding_high(0);
dimension1->set_interior_padding(0);
auto dimension2 = padding_config.add_dimensions();
dimension2->set_edge_padding_low(2);
dimension2->set_edge_padding_high(2);
dimension2->set_interior_padding(1);
auto dimension3 = padding_config.add_dimensions();
dimension3->set_edge_padding_low(2);
dimension3->set_edge_padding_high(2);
dimension3->set_interior_padding(0);
const Layout layout = LayoutUtil::MakeLayout({0, 1, 2, 3});
const float pad_value = -5.123f;
Array4D<float> input_array(1, 25, 7, 7);
input_array.Fill(pad_value);
input_array(0, 0, 0, 0) = 1.0f;
input_array(0, 24, 6, 6) = 2.0f;
input_array(0, 17, 2, 5) = 3.0f;
auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array);
input = input.Relayout(layout);
Pad(AddParam(input, &b),
AddParam(LiteralUtil::CreateR0<float>(pad_value), &b), padding_config);
Array4D<float> expected_array(1, 25, 17, 11);
expected_array.Fill(pad_value);
expected_array(0, 0, 2, 2) = 1.0f;
expected_array(0, 24, 14, 8) = 2.0f;
expected_array(0, 17, 6, 7) = 3.0f;
ComputeAndCompareR4<float>(&b, expected_array, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(PadTest, Pad4DU8Array) {
XlaBuilder b(TestName());
auto input = std::make_unique<Array4D<uint8_t>>(1, 1, 3, 2);
Array2D<uint8_t> input_xy({
{1, 2},
{3, 4},
{5, 6},
});
input->FillWithYX(input_xy);
Pad(AddParam(*input, &b), ConstantR0<uint8_t>(&b, 35),
r4_padding_on_dim0_dim1_);
auto expected = std::make_unique<Array4D<uint8_t>>(2, 3, 3, 2);
expected->Fill(35);
(*expected)(1, 0, 0, 0) = 1;
(*expected)(1, 0, 0, 1) = 2;
(*expected)(1, 0, 1, 0) = 3;
(*expected)(1, 0, 1, 1) = 4;
(*expected)(1, 0, 2, 0) = 5;
(*expected)(1, 0, 2, 1) = 6;
ComputeAndCompareR4<uint8_t>(&b, *expected, {});
}
XLA_TEST_F(PadTest, Pad4DPredArray) {
XlaBuilder b(TestName());
auto input = Broadcast(ConstantR0<bool>(&b, true), {1, 1, 3, 2});
auto padded =
Pad(input, ConstantR0<bool>(&b, false), r4_padding_on_dim0_dim1_);
auto zeros = std::make_unique<Array4D<int32_t>>(2, 3, 3, 2);
auto ones = std::make_unique<Array4D<int32_t>>(2, 3, 3, 2);
zeros->Fill(0);
ones->Fill(1);
Select(padded, AddParam(*ones, &b), AddParam(*zeros, &b));
auto expected = std::make_unique<Array4D<int32_t>>(2, 3, 3, 2);
expected->Fill(0);
(*expected)(1, 0, 0, 0) = 1;
(*expected)(1, 0, 0, 1) = 1;
(*expected)(1, 0, 1, 0) = 1;
(*expected)(1, 0, 1, 1) = 1;
(*expected)(1, 0, 2, 0) = 1;
(*expected)(1, 0, 2, 1) = 1;
ComputeAndCompareR4<int32_t>(&b, *expected, {});
}
XLA_TEST_P(PadTestFloat, Large2DPad) {
XlaBuilder b(TestName());
auto ones = std::make_unique<Array2D<float>>(4, 4);
ones->Fill(1.0f);
auto input = AddParam(*ones, &b);
PaddingConfig padding_config = MakeNoPaddingConfig(2);
for (int dim : {0, 1}) {
padding_config.mutable_dimensions(dim)->set_edge_padding_low(
98 + 100 * (1 - dim));
padding_config.mutable_dimensions(dim)->set_edge_padding_high(58 +
100 * dim);
}
Pad(input, AddParam(LiteralUtil::CreateR0<float>(0.0f), &b), padding_config);
auto expected = ReferenceUtil::PadArray2D(*ones, padding_config, 0.0f);
ComputeAndCompareR2<float>(&b, *expected, {}, DefaultErrorSpec());
}
XLA_TEST_P(PadTestFloat, AllTypes2DPad) {
XlaBuilder b(TestName());
constexpr int64_t in_rows = 35;
constexpr int64_t in_cols = 35;
auto operand = std::make_unique<Array2D<float>>(in_rows, in_cols);
operand->FillUnique(0.0f);
auto input = AddParam(*operand, &b);
PaddingConfig padding_config = MakeNoPaddingConfig(2);
padding_config.mutable_dimensions(0)->set_edge_padding_low(7);
padding_config.mutable_dimensions(0)->set_edge_padding_high(5);
padding_config.mutable_dimensions(0)->set_interior_padding(3);
padding_config.mutable_dimensions(1)->set_edge_padding_low(6);
padding_config.mutable_dimensions(1)->set_edge_padding_high(4);
padding_config.mutable_dimensions(1)->set_interior_padding(2);
Pad(input, AddParam(LiteralUtil::CreateR0<float>(3.14f), &b), padding_config);
auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 3.14f);
ComputeAndCompareR2<float>(&b, *expected, {}, DefaultErrorSpec());
}
XLA_TEST_P(PadTestFloat, High2DPad) {
XlaBuilder b(TestName());
constexpr int64_t in_rows = 129;
constexpr int64_t in_cols = 129;
constexpr int64_t low_padding = 0;
int64_t high_padding[2] = {5, 7};
constexpr int64_t interior_padding = 0;
auto operand = std::make_unique<Array2D<float>>(in_rows, in_cols);
operand->FillUnique(1.0f);
auto input = AddParam(*operand, &b);
PaddingConfig padding_config = MakeNoPaddingConfig(2);
for (int dim : {0, 1}) {
padding_config.mutable_dimensions(dim)->set_edge_padding_low(low_padding);
padding_config.mutable_dimensions(dim)->set_edge_padding_high(
high_padding[dim]);
padding_config.mutable_dimensions(dim)->set_interior_padding(
interior_padding);
}
Pad(input, AddParam(LiteralUtil::CreateR0<float>(2.718f), &b),
padding_config);
auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 2.718f);
ComputeAndCompareR2<float>(&b, *expected, {}, DefaultErrorSpec());
}
XLA_TEST_P(PadTestFloat, NegativePadding2D) {
XlaBuilder b(TestName());
constexpr int64_t in_rows = 129;
constexpr int64_t in_cols = 129;
int64_t low_padding[2] = {-1, -2};
int64_t high_padding[2] = {-3, 4};
constexpr int64_t interior_padding = 0;
auto operand = std::make_unique<Array2D<float>>(in_rows, in_cols);
operand->FillUnique(1.0f);
auto input = AddParam(*operand, &b);
PaddingConfig padding_config = MakeNoPaddingConfig(2);
for (int dim : {0, 1}) {
padding_config.mutable_dimensions(dim)->set_edge_padding_low(
low_padding[dim]);
padding_config.mutable_dimensions(dim)->set_edge_padding_high(
high_padding[dim]);
padding_config.mutable_dimensions(dim)->set_interior_padding(
interior_padding);
}
Pad(input, AddParam(LiteralUtil::CreateR0<float>(2.718f), &b),
padding_config);
auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 2.718f);
ComputeAndCompareR2<float>(&b, *expected, {}, DefaultErrorSpec());
}
XLA_TEST_P(PadTestFloat, NegativeAndInteriorPadding2D) {
XlaBuilder b(TestName());
constexpr int64_t in_rows = 8;
constexpr int64_t in_cols = 11;
int64_t low_padding[2] = {4, -1};
int64_t high_padding[2] = {-2, -4};
int64_t interior_padding[2] = {1, 2};
auto operand = std::make_unique<Array2D<float>>(in_rows, in_cols);
operand->FillUnique(1.0f);
auto input = AddParam(*operand, &b);
PaddingConfig padding_config = MakeNoPaddingConfig(2);
for (int dim : {0, 1}) {
padding_config.mutable_dimensions(dim)->set_edge_padding_low(
low_padding[dim]);
padding_config.mutable_dimensions(dim)->set_edge_padding_high(
high_padding[dim]);
padding_config.mutable_dimensions(dim)->set_interior_padding(
interior_padding[dim]);
}
Pad(input, AddParam(LiteralUtil::CreateR0<float>(2.718f), &b),
padding_config);
auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 2.718f);
ComputeAndCompareR2<float>(&b, *expected, {}, DefaultErrorSpec());
}
XLA_TEST_P(PadTestFloat, ReducePad) {
XlaBuilder b(TestName());
auto ones = std::make_unique<Array4D<float>>(2, 2, 2, 2);
ones->Fill(1.0);
auto input = AddParam(*ones, &b);
XlaComputation add = CreateScalarAddComputation(FloatType(), &b);
auto reduce =
Reduce(input, AddParam(LiteralUtil::CreateR0<float>(0.0), &b), add, {0});
PaddingConfig padding_config = MakeNoPaddingConfig(3);
padding_config.mutable_dimensions(0)->set_edge_padding_low(1);
padding_config.mutable_dimensions(0)->set_edge_padding_high(1);
Pad(reduce, AddParam(LiteralUtil::CreateR0<float>(0.0f), &b), padding_config);
Array3D<float> expected({{{0.0, 0.0}, {0.0, 0.0}},
{{2.0, 2.0}, {2.0, 2.0}},
{{2.0, 2.0}, {2.0, 2.0}},
{{0.0, 0.0}, {0.0, 0.0}}});
ComputeAndCompareR3<float>(&b, expected, {}, DefaultErrorSpec());
}
INSTANTIATE_TEST_CASE_P(PadTestFloatInstantiation, PadTestFloat,
::testing::ValuesIn(use_bfloat16_params));
}
} |
886 | cpp | tensorflow/tensorflow | eigen_support | tensorflow/lite/kernels/eigen_support.cc | tensorflow/lite/kernels/eigen_support_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
#define TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
#include "tensorflow/lite/core/c/common.h"
namespace EigenForTFLite {
struct ThreadPoolDevice;
}
namespace tflite {
namespace eigen_support {
void IncrementUsageCounter(TfLiteContext* context);
void DecrementUsageCounter(TfLiteContext* context);
const EigenForTFLite::ThreadPoolDevice* GetThreadPoolDevice(
TfLiteContext* context);
}
}
#endif
#include "tensorflow/lite/kernels/eigen_support.h"
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
#include "tensorflow/lite/kernels/op_macros.h"
#ifndef EIGEN_DONT_ALIGN
#include "tensorflow/lite/util.h"
#endif
namespace tflite {
namespace eigen_support {
namespace {
const int kDefaultNumThreadpoolThreads = 4;
bool IsValidNumThreads(int num_threads) { return num_threads >= -1; }
int GetNumThreads(int num_threads) {
return num_threads > -1 ? num_threads : kDefaultNumThreadpoolThreads;
}
#ifndef EIGEN_DONT_ALIGN
static_assert(
kDefaultTensorAlignment % EIGEN_MAX_ALIGN_BYTES == 0,
"kDefaultTensorAlignment doesn't comply with Eigen alignment requirement.");
#endif
void SetEigenNbThreads(int threads) {
#if defined(EIGEN_HAS_OPENMP)
Eigen::setNbThreads(threads);
#endif
}
class EigenThreadPoolWrapper : public Eigen::ThreadPoolInterface {
public:
explicit EigenThreadPoolWrapper(int num_threads) {
if (num_threads > 1) {
pool_ = std::make_unique<Eigen::ThreadPool>(num_threads);
}
}
~EigenThreadPoolWrapper() override {}
void Schedule(std::function<void()> fn) override {
if (pool_) {
pool_->Schedule(std::move(fn));
} else {
fn();
}
}
int NumThreads() const override { return pool_ ? pool_->NumThreads() : 1; }
int CurrentThreadId() const override {
return pool_ ? pool_->CurrentThreadId() : 0;
}
private:
std::unique_ptr<Eigen::ThreadPool> pool_;
};
class LazyEigenThreadPoolHolder {
public:
explicit LazyEigenThreadPoolHolder(int num_threads) {
SetNumThreads(num_threads);
}
const Eigen::ThreadPoolDevice* GetThreadPoolDevice() {
if (!device_) {
thread_pool_wrapper_ =
std::make_unique<EigenThreadPoolWrapper>(target_num_threads_);
device_ = std::make_unique<Eigen::ThreadPoolDevice>(
thread_pool_wrapper_.get(), target_num_threads_);
}
return device_.get();
}
void SetNumThreads(int num_threads) {
const int target_num_threads = GetNumThreads(num_threads);
if (target_num_threads_ != target_num_threads) {
target_num_threads_ = target_num_threads;
device_.reset();
thread_pool_wrapper_.reset();
}
}
private:
int target_num_threads_ = kDefaultNumThreadpoolThreads;
std::unique_ptr<Eigen::ThreadPoolDevice> device_;
std::unique_ptr<Eigen::ThreadPoolInterface> thread_pool_wrapper_;
};
struct RefCountedEigenContext : public TfLiteExternalContext {
std::unique_ptr<LazyEigenThreadPoolHolder> thread_pool_holder;
int num_references = 0;
};
RefCountedEigenContext* GetEigenContext(TfLiteContext* context) {
return reinterpret_cast<RefCountedEigenContext*>(
context->GetExternalContext(context, kTfLiteEigenContext));
}
TfLiteStatus Refresh(TfLiteContext* context) {
if (IsValidNumThreads(context->recommended_num_threads)) {
SetEigenNbThreads(GetNumThreads(context->recommended_num_threads));
}
auto* ptr = GetEigenContext(context);
if (ptr != nullptr) {
ptr->thread_pool_holder->SetNumThreads(context->recommended_num_threads);
}
return kTfLiteOk;
}
}
void IncrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
if (IsValidNumThreads(context->recommended_num_threads)) {
SetEigenNbThreads(context->recommended_num_threads);
}
ptr = new RefCountedEigenContext;
ptr->type = kTfLiteEigenContext;
ptr->Refresh = Refresh;
ptr->thread_pool_holder = std::make_unique<LazyEigenThreadPoolHolder>(
context->recommended_num_threads);
ptr->num_references = 0;
context->SetExternalContext(context, kTfLiteEigenContext, ptr);
}
ptr->num_references++;
}
void DecrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to DecrementUsageCounter() not preceded by "
"IncrementUsageCounter()");
}
if (--ptr->num_references == 0) {
delete ptr;
context->SetExternalContext(context, kTfLiteEigenContext, nullptr);
}
}
const Eigen::ThreadPoolDevice* GetThreadPoolDevice(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to GetFromContext() not preceded by IncrementUsageCounter()");
}
return ptr->thread_pool_holder->GetThreadPoolDevice();
}
}
} | #include "tensorflow/lite/kernels/eigen_support.h"
#include <utility>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
namespace tflite {
namespace eigen_support {
struct TestTfLiteContext : public TfLiteContext {
TestTfLiteContext() {
recommended_num_threads = -1;
external_context = nullptr;
GetExternalContext = GetExternalContextImpl;
SetExternalContext = SetExternalContextImpl;
}
static void SetExternalContextImpl(TfLiteContext* context,
TfLiteExternalContextType type,
TfLiteExternalContext* external_context) {
static_cast<TestTfLiteContext*>(context)->external_context =
external_context;
}
static TfLiteExternalContext* GetExternalContextImpl(
TfLiteContext* context, TfLiteExternalContextType type) {
return static_cast<TestTfLiteContext*>(context)->external_context;
}
TfLiteExternalContext* external_context;
};
TEST(EigenSupport, Default) {
TestTfLiteContext context;
IncrementUsageCounter(&context);
ASSERT_NE(context.external_context, nullptr);
EXPECT_EQ(context.external_context->type, kTfLiteEigenContext);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, SingleThreaded) {
TestTfLiteContext context;
context.recommended_num_threads = 1;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 1);
EXPECT_EQ(thread_pool_device->numThreadsInPool(), 1);
bool executed = false;
auto notification =
thread_pool_device->enqueue([&executed]() { executed = true; });
ASSERT_NE(notification, nullptr);
notification->Wait();
delete notification;
EXPECT_TRUE(executed);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, MultiThreaded) {
TestTfLiteContext context;
context.recommended_num_threads = 2;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 2);
bool executed = false;
auto notification =
thread_pool_device->enqueue([&executed]() { executed = true; });
ASSERT_NE(notification, nullptr);
notification->Wait();
delete notification;
EXPECT_TRUE(executed);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, NumThreadsChanged) {
TestTfLiteContext context;
context.recommended_num_threads = 1;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 1);
context.recommended_num_threads = 3;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 3);
context.recommended_num_threads = -1;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
context.recommended_num_threads = 0;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 0);
context.recommended_num_threads = 3;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 3);
context.recommended_num_threads = -5;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, RefCounting) {
TestTfLiteContext context;
EXPECT_EQ(context.external_context, nullptr);
IncrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
IncrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
DecrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
DecrementUsageCounter(&context);
EXPECT_EQ(context.external_context, nullptr);
}
}
} |
887 | cpp | tensorflow/tensorflow | densify | tensorflow/lite/kernels/densify.cc | tensorflow/lite/kernels/densify_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DENSIFY_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DENSIFY_H_
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/internal/utils/sparsity_format_converter.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void Densify(const TfLiteSparsity* sparsity,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data,
TfLiteContext* context) {
const int dims_count = output_shape.DimensionsCount();
std::vector<int> vector_shape(dims_count);
for (int i = 0; i < dims_count; i++) {
vector_shape[i] = output_shape.Dims(i);
}
tflite::internal::sparsity::FormatConverter<T> converter(vector_shape,
*sparsity);
converter.SparseToDense(input_data, output_shape.FlatSize(), output_data,
context);
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/densify.h"
#include <stddef.h>
#include <cstdint>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace densify {
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
TfLiteTensor* output;
};
struct OpData {
bool dense_weights_initialized;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->dense_weights_initialized = false;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type != kTfLiteString);
TF_LITE_ENSURE(context, IsConstantTensor(op_context.input));
TF_LITE_ENSURE(context, op_context.input->sparsity != nullptr);
op_context.output->type = op_context.input->type;
op_context.output->name = "Densify_output";
op_context.output->allocation_type = kTfLiteArenaRwPersistent;
return context->ResizeTensor(context, op_context.output,
TfLiteIntArrayCopy(op_context.input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
if (op_data->dense_weights_initialized) {
return kTfLiteOk;
}
switch (op_context.input->type) {
case kTfLiteFloat32:
reference_ops::Densify(op_context.input->sparsity,
GetTensorShape(op_context.input),
GetTensorData<float>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<float>(op_context.output), context);
break;
case kTfLiteFloat16:
reference_ops::Densify(
op_context.input->sparsity, GetTensorShape(op_context.input),
GetTensorData<Eigen::half>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<Eigen::half>(op_context.output), context);
break;
case kTfLiteInt8:
reference_ops::Densify(op_context.input->sparsity,
GetTensorShape(op_context.input),
GetTensorData<int8_t>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<int8_t>(op_context.output), context);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %d not supported.",
op_context.input->type);
return kTfLiteError;
}
op_data->dense_weights_initialized = true;
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DENSIFY() {
static TfLiteRegistration r = {densify::Init, densify::Free, densify::Prepare,
densify::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_DENSIFY();
}
}
namespace {
using ::testing::ElementsAreArray;
template <typename T>
class DensifyOpModel : public SingleOpModel {
public:
DensifyOpModel(const TensorData& input, const std::vector<T>& input_data,
int version = 1) {
input_ = AddConstSparseInput(input, input_data);
output_ = AddOutput({input.type, input.shape});
SetBuiltinOp(BuiltinOperator_DENSIFY, BuiltinOptions_DensifyOptions,
CreateDensifyOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DENSIFY, ops::builtin::Register_DENSIFY(), version);
BuildInterpreter({input.shape}, -1,
false,
false, true);
}
std::vector<T> GetInput() { return ExtractVector<T>(input_); }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
private:
int input_;
int output_;
};
TEST(DensifyOpTest, Float) {
std::vector<float> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
std::vector<float> sparse_values = {6, 9, 8, 5, 7};
TensorData input = {};
input.type = TensorType_FLOAT32;
input.shape = {3, 4};
input.traversal_order = {0, 1};
input.format = {kTfLiteDimDense, kTfLiteDimSparseCSR};
DensifyOpModel<float> m(input, dense_values);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetInput(), ElementsAreArray(sparse_values));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(dense_values));
}
TEST(DensifyOpTest, Float3D) {
std::vector<float> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
std::vector<float> sparse_values = {6, 9, 8, 5, 7};
TensorData input = {};
input.type = TensorType_FLOAT32;
input.shape = {3, 2, 2};
input.traversal_order = {0, 1, 2};
input.format = {kTfLiteDimDense, kTfLiteDimDense, kTfLiteDimSparseCSR};
DensifyOpModel<float> m(input, dense_values);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetInput(), ElementsAreArray(sparse_values));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(dense_values));
}
TEST(DensifyOpTest, Int8) {
std::vector<int8_t> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
std::vector<int8_t> sparse_values = {6, 9, 8, 5, 7};
TensorData input = {};
input.type = TensorType_INT8;
input.shape = {3, 4};
input.traversal_order = {0, 1};
input.format = {kTfLiteDimDense, kTfLiteDimSparseCSR};
DensifyOpModel<int8_t> m(input, dense_values);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetInput(), ElementsAreArray(sparse_values));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(dense_values));
}
}
} |
888 | cpp | tensorflow/tensorflow | add_n | tensorflow/lite/kernels/add_n.cc | tensorflow/lite/kernels/add_n_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
#include <algorithm>
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void AddN(const RuntimeShape& input_shape, const size_t num_inputs,
const T* const* input_data, T* output_data) {
const size_t size = input_shape.FlatSize();
for (size_t i = 0; i < size; ++i) {
T x = 0;
for (size_t j = 0; j < num_inputs; ++j) {
x += input_data[j][i];
}
output_data[i] = x;
}
}
inline void AddN(const ArithmeticParams& params,
const RuntimeShape& input_shape, const size_t num_inputs,
const int8_t* const* input_data, int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
const size_t size = input_shape.FlatSize();
for (size_t i = 0; i < size; ++i) {
const int32_t x = params.input1_offset;
const int32_t shifted_x = x * (1 << params.left_shift);
int32_t scaled_x = MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_x, params.input1_multiplier, params.input1_shift);
for (size_t j = 0; j < num_inputs; ++j) {
const int32_t y = params.input1_offset + input_data[j][i];
const int32_t shifted_y = y * (1 << params.left_shift);
int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_y, params.input1_multiplier, params.input1_shift);
scaled_x += scaled_y;
}
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
scaled_x, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<int8_t>(clamped_output);
}
}
}
}
#endif
#include <stdint.h>
#include <algorithm>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace add_n {
constexpr int kInputTensor1 = 0;
constexpr int kOutputTensor = 0;
struct OpData {
int scratch_tensor_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, 1, &op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int num_inputs = NumInputs(node);
TF_LITE_ENSURE(context, num_inputs >= 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = input1->type;
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 0, &scratch_tensor));
scratch_tensor->type = input1->type;
scratch_tensor->allocation_type = kTfLiteArenaRw;
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
const int thread_count =
std::min(std::max(1, static_cast<int>(num_inputs) / 2),
cpu_backend_context->max_num_threads());
TfLiteIntArray* scratch_shape = TfLiteIntArrayCreate(1);
scratch_shape->data[0] = thread_count * NumElements(input1);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, scratch_tensor, scratch_shape));
for (int i = kInputTensor1 + 1; i < num_inputs; ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE(context, HaveSameShapes(input1, input));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input->type);
}
TfLiteIntArray* input1_dims = input1->dims;
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input1_dims);
return context->ResizeTensor(context, output, output_dims);
}
template <typename T>
TfLiteStatus EvalAddN(TfLiteContext* context, TfLiteNode* node) {
VectorOfTensors<T> all_inputs(*context, *node->inputs);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
int num_inputs = NumInputs(node);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &scratch_tensor));
optimized_ops::AddN<T>(GetTensorShape(input1), num_inputs, all_inputs.data(),
GetTensorData<T>(output),
GetTensorData<T>(scratch_tensor), cpu_backend_context);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32) {
TF_LITE_ENSURE_OK(context, EvalAddN<float>(context, node));
} else if (output->type == kTfLiteInt32) {
TF_LITE_ENSURE_OK(context, EvalAddN<int32_t>(context, node));
} else {
TF_LITE_KERNEL_LOG(context, "AddN only supports FLOAT32|INT32 now, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_ADD_N() {
static TfLiteRegistration r = {add_n::Init, add_n::Free, add_n::Prepare,
add_n::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/add_n_test_common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
TEST(FloatAddNOpModel, AddMultipleTensors) {
FloatAddNOpModel m({{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}}},
{TensorType_FLOAT32, {}});
m.PopulateTensor<float>(m.input(0), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input(1), {0.1, 0.2, 0.3, 0.5});
m.PopulateTensor<float>(m.input(2), {0.5, 0.1, 0.1, 0.2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.4, 0.5, 1.1, 1.5}));
}
TEST(FloatAddNOpModel, Add2Tensors) {
FloatAddNOpModel m(
{{TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}}},
{TensorType_FLOAT32, {}});
m.PopulateTensor<float>(m.input(0), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input(1), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(IntegerAddNOpModel, AddMultipleTensors) {
IntegerAddNOpModel m({{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}}},
{TensorType_INT32, {}});
m.PopulateTensor<int32_t>(m.input(0), {-20, 2, 7, 8});
m.PopulateTensor<int32_t>(m.input(1), {1, 2, 3, 5});
m.PopulateTensor<int32_t>(m.input(2), {10, -5, 1, -2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-9, -1, 11, 11}));
}
}
} |
889 | cpp | tensorflow/tensorflow | arg_min_max | tensorflow/lite/kernels/arg_min_max.cc | tensorflow/lite/kernels/arg_min_max_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
#include <functional>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
std::function<bool(T, T)> GetComparefunction(bool is_arg_max) {
if (is_arg_max) {
return std::greater<T>();
} else {
return std::less<T>();
}
}
template <typename T1, typename T2, typename T3, typename Cmp>
void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
const T3* input2_data, const RuntimeShape& output_shape,
T2* output_data, const Cmp& cmp) {
TFLITE_DCHECK_GT(input1_shape.DimensionsCount(), 0);
TFLITE_DCHECK_EQ(input1_shape.DimensionsCount() - 1,
output_shape.DimensionsCount());
int axis = input2_data[0];
if (axis < 0) {
axis += input1_shape.DimensionsCount();
}
const int axis_size = input1_shape.Dims(axis);
int outer_size = 1;
for (int i = 0; i < axis; ++i) {
TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i));
outer_size *= input1_shape.Dims(i);
}
int inner_size = 1;
const int dims_count = input1_shape.DimensionsCount();
for (int i = axis + 1; i < dims_count; ++i) {
TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i - 1));
inner_size *= input1_shape.Dims(i);
}
for (int outer = 0; outer < outer_size; ++outer) {
for (int inner = 0; inner < inner_size; ++inner) {
auto min_max_value = input1_data[outer * axis_size * inner_size + inner];
T2 min_max_index = 0;
for (int i = 1; i < axis_size; ++i) {
const auto& curr_value =
input1_data[(outer * axis_size + i) * inner_size + inner];
if (cmp(curr_value, min_max_value)) {
min_max_value = curr_value;
min_max_index = static_cast<T2>(i);
}
}
output_data[outer * inner_size + inner] = min_max_index;
}
}
}
template <typename T1, typename T2, typename T3>
void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
const T3* input2_data, const RuntimeShape& output_shape,
T2* output_data, const bool is_arg_max) {
ArgMinMax(input1_shape, input1_data, input2_data, output_shape, output_data,
GetComparefunction<T1>(is_arg_max));
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/arg_min_max.h"
#include <stdint.h>
#include <functional>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace arg_min_max {
constexpr int kInputTensor = 0;
constexpr int kAxis = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* axis, TfLiteTensor* output) {
int axis_value;
if (axis->type == kTfLiteInt64) {
axis_value = static_cast<int>(*GetTensorData<int64_t>(axis));
} else {
axis_value = *GetTensorData<int>(axis);
}
if (axis_value < 0) {
axis_value += NumDimensions(input);
}
TF_LITE_ENSURE(context, axis_value >= 0);
TF_LITE_ENSURE(context, axis_value < NumDimensions(input));
TfLiteIntArray* output_dims = TfLiteIntArrayCreate(NumDimensions(input) - 1);
int j = 0;
for (int i = 0; i < NumDimensions(input); ++i) {
if (i != axis_value) {
output_dims->data[j] = SizeOfDimension(input, i);
++j;
}
}
return context->ResizeTensor(context, output, output_dims);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis));
TF_LITE_ENSURE_EQ(context, NumElements(axis), 1);
TF_LITE_ENSURE(context,
axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data);
switch (params->output_type) {
case kTfLiteInt32:
output->type = kTfLiteInt32;
break;
case kTfLiteInt64:
output->type = kTfLiteInt64;
break;
default:
TF_LITE_KERNEL_LOG(context, "Unknown index output data type: %d",
params->output_type);
return kTfLiteError;
}
switch (input->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt32:
case kTfLiteBool:
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unknown input type: %d, only float32, int types "
"and bool are supported",
input->type);
return kTfLiteError;
}
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
if (IsConstantOrPersistentTensor(axis)) {
TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output));
} else {
SetTensorToDynamic(output);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output));
}
#define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \
optimized_ops::ArgMinMax( \
GetTensorShape(input), GetTensorData<data_type>(input), \
GetTensorData<axis_type>(axis), GetTensorShape(output), \
GetTensorData<output_type>(output), is_arg_max)
if (axis->type == kTfLiteInt32) {
switch (output->type) {
case kTfLiteInt32: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int32_t, int32_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
case kTfLiteInt64: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int64_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int32_t, int64_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only int32 and int64 are supported currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
} else {
switch (output->type) {
case kTfLiteInt32: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int64_t, int32_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int32_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int32_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int32_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int64_t, int32_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
case kTfLiteInt64: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int64_t, int64_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int64_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int64_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int64_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int64_t, int64_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only int32 and int64 are supported currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
#undef TF_LITE_ARG_MIN_MAX
return kTfLiteOk;
}
TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) {
return Eval(context, node, false);
}
TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) {
return Eval(context, node, true);
}
}
TfLiteRegistration* Register_ARG_MAX() {
static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare,
arg_min_max::ArgMaxEval};
return &r;
}
TfLiteRegistration* Register_ARG_MIN() {
static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare,
arg_min_max::ArgMinEval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class ArgBaseOpModel : public SingleOpModelWithHexagon {
public:
explicit ArgBaseOpModel(TensorType input_type) {
input_ = AddInput(input_type);
output_ = AddOutput(TensorType_INT32);
}
int input() const { return input_; }
std::vector<int> GetInt32Output() const {
return ExtractVector<int>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
using SingleOpModelWithHexagon::builder_;
int input_;
int output_;
};
class ArgMinOpModel : public ArgBaseOpModel {
public:
ArgMinOpModel(std::initializer_list<int> input_shape, TensorType input_type)
: ArgBaseOpModel(input_type ), input_shape_(input_shape) {}
void Build() {
SetBuiltinOp(BuiltinOperator_ARG_MIN, BuiltinOptions_ArgMinOptions,
CreateArgMinOptions(builder_, TensorType_INT32 )
.Union());
BuildInterpreter({input_shape_, {1}});
}
private:
std::vector<int> input_shape_;
};
class ArgMaxOpModel : public ArgBaseOpModel {
public:
ArgMaxOpModel(std::initializer_list<int> input_shape, TensorType input_type)
: ArgBaseOpModel(input_type ), input_shape_(input_shape) {}
void Build() {
SetBuiltinOp(BuiltinOperator_ARG_MAX, BuiltinOptions_ArgMaxOptions,
CreateArgMaxOptions(builder_, TensorType_INT32 )
.Union());
BuildInterpreter({input_shape_, {1}});
}
private:
std::vector<int> input_shape_;
};
template <typename integer_type, TensorType tensor_dtype>
void ArgMinTestImpl() {
ArgMinOpModel model({1, 1, 1, 4}, tensor_dtype);
model.AddConstInput(TensorType_INT32, {3}, {1});
model.Build();
if (tensor_dtype == TensorType_UINT8) {
model.SymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
} else {
model.SignedSymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
}
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1}));
}
template <typename integer_type, TensorType tensor_dtype>
void ArgMinNegativeTestImpl() {
ArgMinOpModel model({1, 1, 2, 4}, tensor_dtype);
model.AddConstInput(TensorType_INT32, {-2}, {1});
model.Build();
if (tensor_dtype == TensorType_UINT8) {
model.SymmetricQuantizeAndPopulate(model.input(), {1, 2, 7, 8, 1, 9, 7, 3});
} else {
model.SignedSymmetricQuantizeAndPopulate(model.input(),
{1, 2, 7, 8, 1, 9, 7, 3});
}
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({0, 0, 0, 1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 4}));
}
template <typename integer_type, TensorType tensor_dtype>
void ArgMaxTestImpl() {
ArgMaxOpModel model({1, 1, 1, 4}, tensor_dtype);
model.AddConstInput(TensorType_INT32, {3}, {1});
model.Build();
if (tensor_dtype == TensorType_UINT8) {
model.SymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
} else {
model.SignedSymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
}
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({3}));
}
TEST(ArgMinTest, GetArgMin_UInt8) {
ArgMinTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ArgMinTest, GetArgMin_Int8) { ArgMinTestImpl<int8_t, TensorType_INT8>(); }
TEST(ArgMinTest, GetArgMinNegative_UInt8) {
ArgMinNegativeTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ArgMinTest, GetArgMinNegative_Int8) {
ArgMinNegativeTestImpl<int8_t, TensorType_INT8>();
}
TEST(ArgMaxTest, GetArgMax_UInt8) {
ArgMaxTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ArgMaxTest, GetArgMax_Int8) { ArgMaxTestImpl<int8_t, TensorType_INT8>(); }
} |
890 | cpp | tensorflow/tensorflow | tensor_slice_util | tensorflow/lite/kernels/tensor_slice_util.cc | tensorflow/lite/kernels/tensor_slice_util_test.cc | #ifndef TENSORFLOW_CORE_UTIL_TENSOR_SLICE_UTIL_H_
#define TENSORFLOW_CORE_UTIL_TENSOR_SLICE_UTIL_H_
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_slice.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
static const int kTensorSliceMaxRank = 8;
template <typename T>
Eigen::TensorMap<Eigen::Tensor<T, kTensorSliceMaxRank, Eigen::RowMajor>>
GetEigenTensorMapFromTensorShape(const TensorShape& shape, T* data) {
Eigen::DSizes<Eigen::DenseIndex, kTensorSliceMaxRank> dsizes =
shape.AsEigenDSizesWithPadding<kTensorSliceMaxRank>();
Eigen::TensorMap<Eigen::Tensor<T, kTensorSliceMaxRank, Eigen::RowMajor>> eig(
data, dsizes);
return eig;
}
template <typename DstT>
struct CopyThatWorksWithStringPointer {
template <typename SrcTensor, typename DstTensor, typename Shape>
static void Copy(const SrcTensor& s, Shape s_start, Shape len, DstTensor& d,
Shape d_start) {
d.slice(d_start, len) = s.slice(s_start, len).template cast<DstT>();
}
};
template <>
struct CopyThatWorksWithStringPointer<tstring> {
template <typename SrcTensor, typename DstTensor, typename Shape>
static void Copy(const SrcTensor& s, Shape s_start, Shape len, DstTensor& d,
Shape d_start) {
typedef typename SrcTensor::Index Index;
static_assert(kTensorSliceMaxRank == 8,
"If kTensorSliceMaxRank changes, modify the loop below.");
for (Index i0 = 0; i0 < len[0]; i0++) {
for (Index i1 = 0; i1 < len[1]; i1++) {
for (Index i2 = 0; i2 < len[2]; i2++) {
for (Index i3 = 0; i3 < len[3]; i3++) {
for (Index i4 = 0; i4 < len[4]; i4++) {
for (Index i5 = 0; i5 < len[5]; i5++) {
for (Index i6 = 0; i6 < len[6]; i6++) {
for (Index i7 = 0; i7 < len[7]; i7++) {
d(d_start[0] + i0, d_start[1] + i1, d_start[2] + i2,
d_start[3] + i3, d_start[4] + i4, d_start[5] + i5,
d_start[6] + i6, d_start[7] + i7) =
*s(s_start[0] + i0, s_start[1] + i1, s_start[2] + i2,
s_start[3] + i3, s_start[4] + i4, s_start[5] + i5,
s_start[6] + i6, s_start[7] + i7);
}
}
}
}
}
}
}
}
}
};
template <>
struct CopyThatWorksWithStringPointer<Eigen::half> {
template <typename SrcTensor, typename DstTensor, typename Shape>
static void Copy(const SrcTensor& s, Shape s_start, Shape len, DstTensor& d,
Shape d_start) {
typedef typename SrcTensor::Index Index;
static_assert(kTensorSliceMaxRank == 8,
"If kTensorSliceMaxRank changes, modify the loop below.");
for (Index i0 = 0; i0 < len[0]; i0++) {
for (Index i1 = 0; i1 < len[1]; i1++) {
for (Index i2 = 0; i2 < len[2]; i2++) {
for (Index i3 = 0; i3 < len[3]; i3++) {
for (Index i4 = 0; i4 < len[4]; i4++) {
for (Index i5 = 0; i5 < len[5]; i5++) {
for (Index i6 = 0; i6 < len[6]; i6++) {
for (Index i7 = 0; i7 < len[7]; i7++) {
d(d_start[0] + i0, d_start[1] + i1, d_start[2] + i2,
d_start[3] + i3, d_start[4] + i4, d_start[5] + i5,
d_start[6] + i6, d_start[7] + i7) =
Eigen::numext::bit_cast<Eigen::half, uint16_t>(
s(s_start[0] + i0, s_start[1] + i1, s_start[2] + i2,
s_start[3] + i3, s_start[4] + i4, s_start[5] + i5,
s_start[6] + i6, s_start[7] + i7));
}
}
}
}
}
}
}
}
}
};
template <typename SrcT, typename DstT>
static bool CopyDataFromTensorSliceToTensorSlice(const TensorShape& shape,
const TensorSlice& slice_s,
const TensorSlice& slice_d,
const SrcT* ptr_s,
DstT* ptr_d) {
CHECK_LE(shape.dims(), kTensorSliceMaxRank)
<< "Only tensors of size up to " << kTensorSliceMaxRank
<< " are supported";
TensorSlice inter;
if (!slice_s.Intersect(slice_d, &inter)) {
return false;
} else {
TensorShape shp_s, shp_d;
Status s;
s = slice_s.SliceTensorShape(shape, &shp_s);
if (!s.ok()) {
LOG(WARNING) << s;
return false;
}
s = slice_d.SliceTensorShape(shape, &shp_d);
if (!s.ok()) {
LOG(WARNING) << s;
return false;
}
TensorSlice rel_s, rel_d;
slice_s.ComputeRelative(inter, &rel_s);
slice_d.ComputeRelative(inter, &rel_d);
auto t_s = GetEigenTensorMapFromTensorShape(shp_s, ptr_s);
auto t_d = GetEigenTensorMapFromTensorShape(shp_d, ptr_d);
Eigen::DSizes<Eigen::DenseIndex, kTensorSliceMaxRank> s_start, s_len,
d_start, d_len;
rel_s.FillIndicesAndSizes<kTensorSliceMaxRank>(shp_s, &s_start, &s_len);
rel_d.FillIndicesAndSizes<kTensorSliceMaxRank>(shp_d, &d_start, &d_len);
CopyThatWorksWithStringPointer<DstT>::Copy(t_s, s_start, s_len, t_d,
d_start);
return true;
}
}
}
}
#endif
#include "tensorflow/lite/kernels/tensor_slice_util.h"
#include <cstdint>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
namespace tflite {
namespace ops {
namespace builtin {
template <typename IndexType>
Index<IndexType> ReadIndexVector(const TfLiteTensor* indices_tensor,
const RuntimeShape& tensor_shape,
const Index<IndexType>& other_indices,
int64_t dim_to_read) {
Index<IndexType> index;
index.reserve(tensor_shape.DimensionsCount());
int shift = 0;
for (int64_t dim = 0; dim < tensor_shape.DimensionsCount(); ++dim) {
if (dim == dim_to_read) {
index.push_back(0);
shift = 1;
} else {
index.push_back(other_indices[dim - shift]);
}
}
int64_t index_vector_size = tensor_shape.Dims(dim_to_read);
Index<IndexType> result;
result.reserve(index_vector_size);
for (IndexType index_vector_idx = 0; index_vector_idx < index_vector_size;
++index_vector_idx) {
index[dim_to_read] = index_vector_idx;
IndexType flat_index = TensorIndexToFlat(
index.data(), tensor_shape.DimensionsCount(), tensor_shape);
const IndexType* tensor_data = GetTensorData<IndexType>(indices_tensor);
result.push_back(tensor_data[flat_index]);
}
return result;
}
template Index<int32_t> ReadIndexVector(const TfLiteTensor* indices_tensor,
const RuntimeShape& tensor_shape,
const Index<int32_t>& other_indices,
int64_t dim_to_read);
template Index<int64_t> ReadIndexVector(const TfLiteTensor* indices_tensor,
const RuntimeShape& tensor_shape,
const Index<int64_t>& other_indices,
int64_t dim_to_read);
}
}
} | #include "tensorflow/core/util/tensor_slice_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(TensorSliceUtilTest, CopyTensorSliceToTensorSlice) {
TensorShape shape({4, 5});
{
TensorSlice slice_s = TensorSlice::ParseOrDie("1,2:1,3");
TensorSlice slice_d = TensorSlice::ParseOrDie("1,2:1,3");
const float ptr_s[] = {6, 7, 8, 11, 12, 13};
float ptr_d[6];
for (int i = 0; i < 6; ++i) {
ptr_d[i] = 0;
}
EXPECT_TRUE(CopyDataFromTensorSliceToTensorSlice(shape, slice_s, slice_d,
ptr_s, ptr_d));
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(ptr_s[i], ptr_d[i]);
}
}
{
TensorSlice slice_s = TensorSlice::ParseOrDie("1,2:1,3");
TensorSlice slice_d = TensorSlice::ParseOrDie("3,1:2,3");
const float ptr_s[] = {6, 7, 8, 11, 12, 13};
float ptr_d[6];
EXPECT_FALSE(CopyDataFromTensorSliceToTensorSlice(shape, slice_s, slice_d,
ptr_s, ptr_d));
}
{
TensorSlice slice_s = TensorSlice::ParseOrDie("0,3:0,3");
TensorSlice slice_d = TensorSlice::ParseOrDie("1,2:1,4");
const float ptr_s[] = {0, 1, 2, 5, 6, 7, 10, 11, 12};
float ptr_d[8];
for (int i = 0; i < 8; ++i) {
ptr_d[i] = 0;
}
EXPECT_TRUE(CopyDataFromTensorSliceToTensorSlice(shape, slice_s, slice_d,
ptr_s, ptr_d));
const float expected[] = {6, 7, 0, 0, 11, 12, 0, 0};
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(expected[i], ptr_d[i]);
}
}
}
}
} |
891 | cpp | tensorflow/tensorflow | exp | tensorflow/lite/kernels/exp.cc | tensorflow/lite/kernels/exp_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
#include <cmath>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void Exp(const T* input_data, const size_t num_elements,
T* output_data) {
ruy::profiler::ScopeLabel label("Exp");
for (size_t idx = 0; idx < num_elements; ++idx) {
output_data[idx] = std::exp(input_data[idx]);
}
}
}
}
#endif
#include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace exp {
enum KernelType {
kReference,
};
struct ExpContext {
ExpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
TfLiteTensor* output;
};
struct OpData {
union {
int8_t lut_int8[LUTSize<int8_t>()];
int16_t lut_int16[LUTSize<int16_t>()];
};
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = static_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
ExpContext op_context(context, node);
const TfLiteTensor* input = op_context.input;
TfLiteTensor* output = op_context.output;
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input->dims);
output->type = input->type;
if (input->type == kTfLiteInt8) {
LUTPopulate<int8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point, [](float value) { return std::exp(value); },
data->lut_int8);
} else if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
LUTPopulate<int16_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point, [](float value) { return std::exp(value); },
data->lut_int16);
}
return context->ResizeTensor(context, op_context.output, output_dims);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
ExpContext op_context(context, node);
if (kernel_type == kReference) {
switch (op_context.input->type) {
case kTfLiteFloat32:
reference_ops::Exp(GetTensorData<float>(op_context.input),
NumElements(op_context.input),
GetTensorData<float>(op_context.output));
break;
case kTfLiteInt8:
reference_integer_ops::LookupTable(
GetTensorData<int8_t>(op_context.input),
NumElements(op_context.input), data->lut_int8,
GetTensorData<int8_t>(op_context.output));
break;
case kTfLiteInt16:
reference_integer_ops::LookupTable(
GetTensorData<int16_t>(op_context.input),
NumElements(op_context.input), data->lut_int16,
GetTensorData<int16_t>(op_context.output));
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by Exp.",
op_context.input->type);
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_EXP_REF() {
static TfLiteRegistration r = {exp::Init, exp::Free, exp::Prepare,
exp::Eval<exp::kReference>};
return &r;
}
TfLiteRegistration* Register_EXP() { return Register_EXP_REF(); }
}
}
} | #include <math.h>
#include <initializer_list>
#include <limits>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class BaseExpOpModel : public SingleOpModel {
public:
BaseExpOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_EXP, BuiltinOptions_ExpOptions,
CreateExpOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int output_;
};
class FloatExpOpModel : public BaseExpOpModel {
public:
using BaseExpOpModel::BaseExpOpModel;
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
class QuantizedExpOpModel : public BaseExpOpModel {
public:
using BaseExpOpModel::BaseExpOpModel;
template <class T>
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename integer_dtype>
std::vector<float> GetDequantizedOutput() {
return Dequantize<integer_dtype>(ExtractVector<integer_dtype>(output_),
GetScale(output_), GetZeroPoint(output_));
}
};
template <typename T>
inline float GetTolerance(float min, float max) {
float kQuantizedTolerance = (max - min) / (std::numeric_limits<T>::max() -
std::numeric_limits<T>::min());
if (std::is_same<T, int8_t>::value) {
kQuantizedTolerance += (max - min) / 256.0f;
} else if (std::is_same<T, int16_t>::value) {
kQuantizedTolerance += (max - min) / 512.0f;
}
return kQuantizedTolerance;
}
TEST(ExpOpTest, ExpFloat) {
std::initializer_list<float> data = {0.0f, 1.0f, -1.0f, 100.0f,
-100.0f, 0.01f, -0.01f};
FloatExpOpModel m({TensorType_FLOAT32, {1, 1, 7}}, {TensorType_FLOAT32, {}});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 7}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{std::exp(0.0f), std::exp(1.0f), std::exp(-1.0f), std::exp(100.0f),
std::exp(-100.0f), std::exp(0.01f), std::exp(-0.01f)})));
}
template <TensorType tensor_type, typename integer_dtype>
void QuantizedExpSymmetricTest() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
const float kQuantizedTolerance = GetTolerance<integer_dtype>(-3.1, 3.1);
QuantizedExpOpModel m({tensor_type, {1, 2, 2, 2}, 1.3f * kMin, 1.3f * kMax},
{tensor_type, {}, 3.01f * kMin, 3.01f * kMax});
m.SetInput<integer_dtype>({-1.3, -1.0, -0.3, 0, 0.1, 0.5, 1.0, 1.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear(
{0.2725, 0.3679, 0.7408, 1.0, 1.1052, 1.6487, 2.7183, 3.0042},
kQuantizedTolerance)));
}
TEST(ExpOpTest, ExpSymmetricInt8) {
QuantizedExpSymmetricTest<TensorType_INT8, int8_t>();
}
TEST(ExpOpTest, ExpSymmetricInt16) {
QuantizedExpSymmetricTest<TensorType_INT16, int16_t>();
}
template <TensorType tensor_type, typename integer_dtype>
void QuantizedExpAsymmetricTest() {
const float kQuantizedTolerance = GetTolerance<integer_dtype>(-1.3, 3.01);
QuantizedExpOpModel m({tensor_type, {1, 2, 2, 2}, -1.3, 1.1},
{tensor_type, {}, 0.0, 3.01});
m.SetInput<integer_dtype>({-1.3, -1.0, -0.3, 0, 0.1, 0.5, 1.0, 1.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear(
{0.2725, 0.3679, 0.7408, 1.0, 1.1052, 1.6487, 2.7183, 3.0042},
kQuantizedTolerance)));
}
TEST(ExpOpTest, ExpAsymmetricInt8) {
QuantizedExpAsymmetricTest<TensorType_INT8, int8_t>();
}
}
} |
892 | cpp | tensorflow/tensorflow | depthwise_conv | tensorflow/lite/kernels/depthwise_conv.cc | tensorflow/lite/kernels/depthwise_conv_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_DEPTHWISE_CONV_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_DEPTHWISE_CONV_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewDepthwiseConvolutionNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
#include "tensorflow/lite/delegates/gpu/gl/workgroups/ideal_workgroup_picker.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class DepthwiseConvolution : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"DepthWise Convolution does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const DepthwiseConvolution2DAttributes&>(ctx.op_attr);
auto weights = attr.weights.shape;
const int offsets_count = weights.h * weights.w;
const bool offsets_count_too_large = offsets_count > kMaxConstArraySize;
std::vector<Variable> parameters;
if (offsets_count_too_large) {
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"padding_w", attr.padding.prepended.w},
{"padding_h", attr.padding.prepended.h},
{"dilation_w", attr.dilations.w},
{"dilation_h", attr.dilations.h},
{"kernel_w", weights.w},
{"kernel_h", weights.h},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"channel_multiplier", weights.o},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
} else {
std::vector<int2> offsets;
for (int h = 0; h < weights.h; ++h) {
for (int w = 0; w < weights.w; ++w) {
offsets.emplace_back(w * attr.dilations.w - attr.padding.prepended.w,
h * attr.dilations.h - attr.padding.prepended.h);
}
}
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"offsets_count", offsets_count},
{"offsets", offsets},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"channel_multiplier", weights.o},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
}
bool non_empty_padding =
attr.padding.appended.h != 0 || attr.padding.appended.w != 0 ||
attr.padding.prepended.h != 0 || attr.padding.prepended.w != 0;
std::vector<std::pair<std::string, Object>> objects = {
{"weights", MakeReadonlyObject(ConvertToPIOHW4(attr.weights))}};
std::string source;
if (offsets_count_too_large) {
source = R"(
int offsets_count = $kernel_w$ * $kernel_h$;
int src_layer_offset = (gid.z % $channel_multiplier$) * 4;
int i = 0;
for (int ky = 0; ky < $kernel_h$; ky++) {
for (int kx = 0; kx < $kernel_w$; kx++, i++) {
ivec2 coord = gid.xy * $stride$ + ivec2(kx * $dilation_w$ - $padding_w$, ky * $dilation_h$ - $padding_h$);)";
} else {
source = R"(
int offsets_count = $offsets_count$;
int src_layer_offset = (gid.z % $channel_multiplier$) * 4;
for (int i = 0; i < offsets_count; ++i) {
ivec2 coord = gid.xy * $stride$ + $offsets[i]$;)";
}
if (non_empty_padding) {
source += R"(
if (coord.x < 0 || coord.y < 0 ||
coord.x >= $input_data_0_w$ || coord.y >= $input_data_0_h$) {
continue;
})";
}
source += R"(
int src_layer = gid.z / $channel_multiplier$;
vec4 input_ = $input_data_0[coord.x, coord.y, src_layer]$;
vec4 input_shifted = vec4(
input_[(src_layer_offset + 0) / $channel_multiplier$],
input_[(src_layer_offset + 1) / $channel_multiplier$],
input_[(src_layer_offset + 2) / $channel_multiplier$],
input_[(src_layer_offset + 3) / $channel_multiplier$]
);
value_0 += input_shifted * $weights[gid.z * offsets_count + i]$;
}
)";
if (offsets_count_too_large) {
source += R"(
}
)";
}
if (!attr.bias.data.empty()) {
source += "value_0 += $bias[gid.z]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
GetIdealWorkgroupIfPossible(
*ctx.gpu_info, OperationType::DEPTHWISE_CONVOLUTION,
HW(attr.weights.shape.h, attr.weights.shape.w), attr.strides,
OHWI(attr.weights.shape.o, ctx.input_shapes[0][1],
ctx.input_shapes[0][2], ctx.input_shapes[0][3])),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewDepthwiseConvolutionNodeShader() {
return std::make_unique<DepthwiseConvolution>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(DepthwiseConvTest, O4H1W1I2Strides1x1Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 1, 2);
DepthwiseConvolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 4;
bias.id = 1;
bias.data = {1, 2, 3, 4};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(2, 1, 1, 2);
weights.id = 2;
weights.data = {1, 3, 2, 4};
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 1, 1, 4);
SingleOpModel model(
{ToString(OperationType::DEPTHWISE_CONVOLUTION), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 3}));
ASSERT_OK(model.Invoke(*NewDepthwiseConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4, 12, 16}));
}
TEST(DepthwiseConvTest, O2H1W1I1Strides2x2Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 3, 1);
DepthwiseConvolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 4;
bias.id = 1;
bias.data = {0, 0};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(2, 1, 1, 1);
weights.id = 1;
weights.data = {1, 3};
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.strides = HW(2, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model(
{ToString(OperationType::DEPTHWISE_CONVOLUTION), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 0, 1, 1, 0, 1, 1, 0, 1}));
ASSERT_OK(model.Invoke(*NewDepthwiseConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1, 3, 1, 3, 1, 3, 1, 3}));
}
TEST(DepthwiseConvTest, O2H2W2I1Strides1x1Dilation2x2) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 3, 1);
DepthwiseConvolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 4;
bias.id = 1;
bias.data = {0, 0};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(2, 2, 2, 1);
weights.id = 1;
weights.data = {1, 2, 3, 4, 5, 6, 7, 8};
attr.weights = std::move(weights);
attr.dilations = HW(2, 2);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 1, 1, 2);
SingleOpModel model(
{ToString(OperationType::DEPTHWISE_CONVOLUTION), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 0, 1, 1, 0, 1, 1, 0, 1}));
ASSERT_OK(model.Invoke(*NewDepthwiseConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {10, 26}));
}
}
}
}
} |
893 | cpp | tensorflow/tensorflow | fully_connected | tensorflow/lite/kernels/fully_connected.cc | tensorflow/lite/kernels/fully_connected_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_FULLY_CONNECTED_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_FULLY_CONNECTED_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewFullyConnectedNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/fully_connected.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class FullyConnectedBuffers : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr =
std::any_cast<const FullyConnectedAttributes&>(ctx.op_attr);
const int src_depth = DivideRoundUp(attr.weights.shape.i, 4);
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
constexpr int kWorkgroupHintX = 4;
constexpr int kWorkgroupHintY = 4;
std::vector<Variable> parameters = {
{"src_depth", src_depth},
{"dst_depth", dst_depth},
};
std::vector<std::pair<std::string, Object>> objects = {
{"weights", MakeReadonlyObject(ConvertToPHWO4I4(attr.weights))}};
std::string source = R"(
const int threads = int(gl_WorkGroupSize.y);
const int workers = int(gl_WorkGroupSize.x);
ivec3 tid = ivec3(gl_LocalInvocationID);
if (gid.x < $dst_depth$) {
int offset = 4 * gid.x * $src_depth$ + 4 * tid.y;
for (int d = tid.y; d < $src_depth$; d += threads, offset += 4 * threads) {
vec4 src = $input_data_0[0, 0, d]$;
value_0.x += dot(src, $weights[offset + 0]$);
value_0.y += dot(src, $weights[offset + 1]$);
value_0.z += dot(src, $weights[offset + 2]$);
value_0.w += dot(src, $weights[offset + 3]$);
}
sh_mem[workers * tid.y + tid.x] = value_0;
}
memoryBarrierShared();
barrier();
if (tid.y > 0 || gid.x >= $dst_depth$) {
return;
}
for (int t = 1; t < threads; t++) {
value_0 += sh_mem[workers * t + tid.x];
}
)";
if (!attr.bias.data.empty()) {
source += " value_0 += $bias[gid.x]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
source += " $output_data_0[0, 0, gid.x] = value_0$;";
std::vector<Variable> shared_variables = {
#ifdef __APPLE__
{"sh_mem", std::vector<float4>(32)},
#else
{"sh_mem", std::vector<float4>(0)},
#endif
};
*generated_code = {
std::move(parameters),
std::move(objects),
std::move(shared_variables),
uint3(dst_depth, kWorkgroupHintY, 1),
uint3(kWorkgroupHintX, kWorkgroupHintY, 1),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::ONLY_DEFINITIONS,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewFullyConnectedNodeShader() {
return std::make_unique<FullyConnectedBuffers>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/fully_connected.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(FullyConnectedTest, MatrixByVectorMultiplication) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 1, 2);
FullyConnectedAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 4;
bias.id = 1;
bias.data = {1, 2, 3, 4};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(4, 1, 1, 2);
weights.id = 2;
weights.data = {1, 2, 3, 4, 5, 6, 7, 8};
attr.weights = std::move(weights);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 1, 1, 4);
SingleOpModel model({ToString(OperationType::FULLY_CONNECTED), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2}));
ASSERT_OK(model.Invoke(*NewFullyConnectedNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {6, 13, 20, 27}));
}
}
}
}
} |
894 | cpp | tensorflow/tensorflow | conv | tensorflow/lite/kernels/conv.cc | tensorflow/lite/kernels/conv_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_CONV_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_CONV_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewConvolutionNodeShader();
std::unique_ptr<NodeShader> NewConvolution1x1NodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/conv.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
#include "tensorflow/lite/delegates/gpu/gl/workgroups/ideal_workgroup_picker.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Convolution : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"Convolution does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const Convolution2DAttributes&>(ctx.op_attr);
if (attr.groups != 1) {
return absl::UnimplementedError(
"Convolution does not support more than 1 group");
}
auto weights = attr.weights.shape;
const int offsets_count = weights.h * weights.w;
const bool offsets_count_too_large = offsets_count > kMaxConstArraySize;
std::vector<Variable> parameters;
if (offsets_count_too_large) {
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"padding_w", attr.padding.prepended.w},
{"padding_h", attr.padding.prepended.h},
{"dilation_w", attr.dilations.w},
{"dilation_h", attr.dilations.h},
{"kernel_w", weights.w},
{"kernel_h", weights.h},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
} else {
std::vector<int2> offsets;
for (int h = 0; h < weights.h; ++h) {
for (int w = 0; w < weights.w; ++w) {
offsets.emplace_back(w * attr.dilations.w - attr.padding.prepended.w,
h * attr.dilations.h - attr.padding.prepended.h);
}
}
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"offsets_count", offsets_count},
{"offsets", offsets},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
}
bool non_empty_padding =
attr.padding.appended.h != 0 || attr.padding.appended.w != 0 ||
attr.padding.prepended.h != 0 || attr.padding.prepended.w != 0;
std::vector<std::pair<std::string, Object>> objects = {
{"weights", MakeReadonlyObject(Get3DSizeForPHWO4I4(attr.weights.shape),
ConvertToPHWO4I4(attr.weights))}};
std::string source;
if (offsets_count_too_large) {
source = R"(
int i = 0;
for (int ky = 0; ky < $kernel_h$; ky++) {
for (int kx = 0; kx < $kernel_w$; kx++, i++) {
ivec2 coord = gid.xy * $stride$ + ivec2(kx * $dilation_w$ - $padding_w$, ky * $dilation_h$ - $padding_h$);)";
} else {
source = R"(
for (int i = 0; i < $offsets_count$; ++i) {
ivec2 coord = gid.xy * $stride$ + $offsets[i]$;)";
}
if (non_empty_padding) {
source += R"(
if (coord.x < 0 || coord.y < 0 || coord.x >= $input_data_0_w$ || coord.y >= $input_data_0_h$) {
continue;
})";
}
source += R"(
for (int l = 0; l < $src_depth$; ++l) {
vec4 input_ = $input_data_0[coord.x, coord.y, l]$;
value_0.x += dot(input_, $weights[l * 4 + 0, i, gid.z]$);
value_0.y += dot(input_, $weights[l * 4 + 1, i, gid.z]$);
value_0.z += dot(input_, $weights[l * 4 + 2, i, gid.z]$);
value_0.w += dot(input_, $weights[l * 4 + 3, i, gid.z]$);
}
}
)";
if (offsets_count_too_large) {
source += R"(
}
)";
}
if (!attr.bias.data.empty()) {
source += "value_0 += $bias[gid.z]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
GetIdealWorkgroupIfPossible(
*ctx.gpu_info, OperationType::CONVOLUTION_2D,
HW(weights.h, weights.w), attr.strides, uint3(0, 0, 0),
OHWI(weights.o, ctx.input_shapes[0][1], ctx.input_shapes[0][2],
ctx.input_shapes[0][3])),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
int SelectMultiplier(int32_t input_width,
const NodeShader::GenerationContext& ctx) {
std::vector<int> multipliers = {4, 2};
if (ctx.gpu_info->IsAMD()) {
return 1;
}
if (!ctx.compiler_options.allow_precision_loss && ctx.gpu_info->IsMali()) {
multipliers = {2};
}
for (int i : multipliers) {
if (input_width % i == 0) {
return i;
}
}
return 1;
}
class Convolution1x1 : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"Convolution does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const Convolution2DAttributes&>(ctx.op_attr);
if (attr.weights.shape.h != 1 || attr.weights.shape.w != 1) {
return absl::UnimplementedError("Height and width should be 1.");
}
if (attr.dilations.h != 1 || attr.dilations.w != 1) {
return absl::UnimplementedError("Dilations are not supported.");
}
if (attr.strides.h != 1 || attr.strides.w != 1) {
return absl::UnimplementedError("Strides are not supported.");
}
if (attr.padding.appended.h != 0 || attr.padding.appended.w != 0 ||
attr.padding.prepended.h != 0 || attr.padding.prepended.w != 0) {
return absl::UnimplementedError("Padding is not supported.");
}
int multiplier = SelectMultiplier(ctx.input_shapes[0][2], ctx);
std::vector<Variable> parameters = {
{"src_depth",
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)},
};
std::vector<std::pair<std::string, Object>> objects = {
{"weights",
MakeReadonlyObject(uint3(4, DivideRoundUp(attr.weights.shape.i, 4),
DivideRoundUp(attr.weights.shape.o, 4)),
ConvertToPHWO4I4(attr.weights))}};
std::string source;
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, "highp vec4 result", i, " = vec4(0);\n");
}
absl::StrAppend(&source, "vec4 f;\n");
absl::StrAppend(&source, "for (int l = 0; l < $src_depth$; ++l) {\n");
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, " vec4 input", i, " = $input_data_0[gid.x * ",
multiplier, " + ", i, ",gid.y,l]$;\n");
}
for (int k = 0; k < 4; k++) {
absl::StrAppend(&source, " f = $weights[", k, ", l, gid.z]$;\n");
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, " result", i, "[", k, "] += dot(input", i,
", f);\n");
}
}
absl::StrAppend(&source, "}\n");
if (!attr.bias.data.empty()) {
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
absl::StrAppend(&source, "vec4 b = $bias[gid.z]$;\n");
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, "result", i, " += b;\n");
}
}
if (multiplier != 1) {
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, "$inplace_update:result", i, "$\n");
absl::StrAppend(&source, "$output_data_0[gid.x * ", multiplier, " + ",
i, ",gid.y,gid.z] = result", i, "$;\n");
}
} else {
absl::StrAppend(&source, "value_0 = result0;\n");
}
auto dst_depth = DivideRoundUp(ctx.output_shapes[0][3], 4);
uint3 workgroup = uint3(16, 16, 1);
if (ctx.gpu_info->IsAdreno()) {
if (dst_depth >= 2) {
workgroup = uint3(8, 8, 2);
}
if (dst_depth >= 4) {
workgroup = uint3(4, 8, 4);
}
if (dst_depth >= 8) {
workgroup = uint3(4, 4, 8);
}
if (dst_depth >= 32) {
workgroup = uint3(4, 4, 16);
}
if (dst_depth >= 64) {
workgroup = uint3(2, 8, 16);
}
} else {
if (dst_depth >= 2) {
workgroup = uint3(16, 8, 2);
}
if (dst_depth >= 4) {
workgroup = uint3(16, 4, 4);
}
if (dst_depth >= 8) {
workgroup = uint3(8, 4, 8);
}
if (dst_depth >= 32) {
workgroup = uint3(8, 4, 8);
}
if (dst_depth >= 64) {
workgroup = uint3(8, 4, 8);
}
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(ctx.output_shapes[0][2] / multiplier, ctx.output_shapes[0][1],
DivideRoundUp(ctx.output_shapes[0][3], 4)),
GetIdealWorkgroupIfPossible(
*ctx.gpu_info, OperationType::CONVOLUTION_2D,
HW(attr.weights.shape.h, attr.weights.shape.w), attr.strides,
workgroup,
OHWI(attr.weights.shape.o, ctx.input_shapes[0][1],
ctx.input_shapes[0][2], ctx.input_shapes[0][3])),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
multiplier == 1 ? IOStructure::AUTO
: IOStructure::ONLY_DEFINITIONS,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewConvolutionNodeShader() {
return std::make_unique<Convolution>();
}
std::unique_ptr<NodeShader> NewConvolution1x1NodeShader() {
return std::make_unique<Convolution1x1>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/conv.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(ConvTest, O2H2W1I1Stride1x1Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data = {1, 1};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(2, 2, 1, 1);
weights.id = 2;
weights.data = {1, 2, 3, 4};
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(1, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {4, 8, 4, 8, 2, 4, 2, 4}));
}
TEST(ConvTest, O1H2W2I1Stride1x1Dilation2x2) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 3, 1);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data.push_back(0.0);
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(1, 2, 2, 1);
weights.id = 2;
weights.data = {1, 2, 3, 4};
attr.weights = std::move(weights);
attr.dilations = HW(2, 2);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 1, 1, 1);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1, 1, 1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {10}));
}
TEST(ConvTest, O1H3W3I1Stride1x1Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 1;
bias.id = 1;
bias.data.push_back(1.0);
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(1, 3, 3, 1);
weights.id = 2;
weights.data = {1, 2, 3, 1, 2, 3, 1, 2, 3};
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(1, 1);
attr.padding.appended = HW(0, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 1, 1, 1);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {11}));
}
TEST(ConvTest, O2H1W1I2Stride1x1Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 1, 2);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data = {1, 1};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(2, 1, 1, 2);
weights.id = 2;
weights.data = {1, 2, 3, 4};
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 2, 1, 2);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolution1x1NodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {4, 8, 4, 8}));
}
TEST(ConvTest, O1H1W1I1Stride2x2Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 3, 1);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data.push_back(0.0);
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(1, 1, 1, 1);
weights.id = 2;
weights.data.push_back(2.0);
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.strides = HW(2, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 2, 2, 1);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 0, 2, 0, 0, 0, 4, 0, 8}));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4, 8, 16}));
}
}
}
}
} |
895 | cpp | tensorflow/tensorflow | transpose_conv | tensorflow/lite/kernels/transpose_conv.cc | tensorflow/lite/kernels/transpose_conv_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_TRANSPOSE_CONV_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_TRANSPOSE_CONV_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewConvolutionTransposedNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ConvolutionTransposedBuffers : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"Convolution Transposed does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const ConvolutionTransposedAttributes&>(ctx.op_attr);
auto weights = attr.weights.shape;
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"kernel_size", int2(weights.w, weights.h)},
{"stride", int2(attr.stride.w, attr.stride.h)},
{"padding", int2(weights.w - 1 - attr.padding.prepended.w,
weights.h - 1 - attr.padding.prepended.h)},
};
std::vector<std::pair<std::string, Object>> objects = {
{"weights",
MakeReadonlyObject(Get3DSizeForPHWO4I4(attr.weights.shape),
ConvertToPHWO4I4Transposed(attr.weights))}};
std::string source = R"(
#define IN_BOUNDS(p, p0, p1) (all(greaterThanEqual(p, p0)) && all(lessThan(p, p1)))
ivec2 p0 = ($padding$ + $stride$ - gid.xy % $stride$) % $stride$;
for (int y = p0.y; y < $kernel_size.y$; y += $stride.y$) {
for (int x = p0.x; x < $kernel_size.x$; x += $stride.x$) {
int i = int(float(y * $kernel_size.x$) + float(x));
ivec2 idx = ivec2(vec2(gid.xy + ivec2(x, y)) - vec2($padding$));
if (IN_BOUNDS(idx, ivec2(0), ivec2($input_data_0_w$, $input_data_0_h$) * $stride$)) {
ivec2 coord = idx / $stride$;
for (int l = 0; l < $src_depth$; ++l) {
vec4 src_color = $input_data_0[coord.x, coord.y, l]$;
value_0.x += dot(src_color, $weights[l * 4 + 0, i, gid.z]$);
value_0.y += dot(src_color, $weights[l * 4 + 1, i, gid.z]$);
value_0.z += dot(src_color, $weights[l * 4 + 2, i, gid.z]$);
value_0.w += dot(src_color, $weights[l * 4 + 3, i, gid.z]$);
}
}
}
}
)";
if (!attr.bias.data.empty()) {
source += "value_0 += $bias[gid.z]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
uint3(),
source,
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewConvolutionTransposedNodeShader() {
return std::make_unique<ConvolutionTransposedBuffers>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(TransposeConvTest, O2H2W1I1Stride1x1DAdjacent1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
ConvolutionTransposedAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data = {1, 1};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(2, 2, 1, 1);
weights.id = 2;
weights.data = {1, 2, 3, 4};
attr.weights = std::move(weights);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(1, 0);
attr.adjacent = HW(1, 1);
attr.stride = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 3, 3, 2);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {2, 4, 2, 4, 1, 1, 4, 8, 4, 8, 1, 1, 3,
5, 3, 5, 1, 1}));
}
TEST(TransposeConvTest, O1H2W2I1Stride1x1Adjacent2x2) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 3, 1);
ConvolutionTransposedAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data.push_back(0.0);
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(1, 2, 2, 1);
weights.id = 2;
weights.data = {1, 2, 3, 4};
attr.weights = std::move(weights);
attr.adjacent = HW(2, 2);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.stride = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 6, 6, 1);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1, 1, 1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{1, 3, 3, 2, 0, 0, 4, 10, 10, 6, 0, 0, 4, 10, 10, 6, 0, 0,
3, 7, 7, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(TransposeConvTest, O1H3W3I1Stride1x1Adjacent1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
ConvolutionTransposedAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 1;
bias.id = 1;
bias.data.push_back(1.0);
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(1, 3, 3, 1);
weights.id = 2;
weights.data = {1, 2, 3, 1, 2, 3, 1, 2, 3};
attr.weights = std::move(weights);
attr.adjacent = HW(1, 1);
attr.padding.prepended = HW(1, 1);
attr.padding.appended = HW(0, 0);
attr.stride = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 4, 4, 1);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{7, 11, 7, 1, 7, 11, 7, 1, 4, 6, 4, 1, 1, 1, 1, 1}));
}
TEST(TransposeConvTest, O2H1W1I2Stride1x1Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 1, 2);
ConvolutionTransposedAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data = {1, 1};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(2, 1, 1, 2);
weights.id = 2;
weights.data = {1, 2, 3, 4};
attr.weights = std::move(weights);
attr.adjacent = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.stride = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 3, 2, 2);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {4, 8, 1, 1, 4, 8, 1, 1, 1, 1, 1, 1}));
}
TEST(TransposeConvTest, O1H1W1I1Stride2x2Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 3, 1);
ConvolutionTransposedAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data.push_back(0.0);
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(1, 1, 1, 1);
weights.id = 2;
weights.data.push_back(2.0);
attr.weights = std::move(weights);
attr.adjacent = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.stride = HW(2, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 6, 6, 1);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 0, 2, 0, 0, 0, 4, 0, 8}));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{2, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0}));
}
}
}
}
} |
896 | cpp | tensorflow/tensorflow | non_max_suppression | tensorflow/lite/kernels/non_max_suppression.cc | tensorflow/lite/kernels/internal/non_max_suppression_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NON_MAX_SUPPRESSION_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NON_MAX_SUPPRESSION_H_
#include <algorithm>
#include <cmath>
#include <deque>
#include <queue>
namespace tflite {
namespace reference_ops {
struct BoxCornerEncoding {
float y1;
float x1;
float y2;
float x2;
};
inline float ComputeIntersectionOverUnion(const float* boxes, const int i,
const int j) {
auto& box_i = reinterpret_cast<const BoxCornerEncoding*>(boxes)[i];
auto& box_j = reinterpret_cast<const BoxCornerEncoding*>(boxes)[j];
const float box_i_y_min = std::min<float>(box_i.y1, box_i.y2);
const float box_i_y_max = std::max<float>(box_i.y1, box_i.y2);
const float box_i_x_min = std::min<float>(box_i.x1, box_i.x2);
const float box_i_x_max = std::max<float>(box_i.x1, box_i.x2);
const float box_j_y_min = std::min<float>(box_j.y1, box_j.y2);
const float box_j_y_max = std::max<float>(box_j.y1, box_j.y2);
const float box_j_x_min = std::min<float>(box_j.x1, box_j.x2);
const float box_j_x_max = std::max<float>(box_j.x1, box_j.x2);
const float area_i =
(box_i_y_max - box_i_y_min) * (box_i_x_max - box_i_x_min);
const float area_j =
(box_j_y_max - box_j_y_min) * (box_j_x_max - box_j_x_min);
if (area_i <= 0 || area_j <= 0) return 0.0;
const float intersection_ymax = std::min<float>(box_i_y_max, box_j_y_max);
const float intersection_xmax = std::min<float>(box_i_x_max, box_j_x_max);
const float intersection_ymin = std::max<float>(box_i_y_min, box_j_y_min);
const float intersection_xmin = std::max<float>(box_i_x_min, box_j_x_min);
const float intersection_area =
std::max<float>(intersection_ymax - intersection_ymin, 0.0) *
std::max<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
inline void NonMaxSuppression(const float* boxes, const int num_boxes,
const float* scores, const int max_output_size,
const float iou_threshold,
const float score_threshold,
const float soft_nms_sigma, int* selected_indices,
float* selected_scores,
int* num_selected_indices) {
struct Candidate {
int index;
float score;
int suppress_begin_index;
};
auto cmp = [](const Candidate bs_i, const Candidate bs_j) {
return bs_i.score < bs_j.score;
};
std::priority_queue<Candidate, std::deque<Candidate>, decltype(cmp)>
candidate_priority_queue(cmp);
for (int i = 0; i < num_boxes; ++i) {
if (scores[i] > score_threshold) {
candidate_priority_queue.emplace(Candidate({i, scores[i], 0}));
}
}
*num_selected_indices = 0;
int num_outputs = std::min(static_cast<int>(candidate_priority_queue.size()),
max_output_size);
if (num_outputs == 0) return;
float scale = 0;
if (soft_nms_sigma > 0.0) {
scale = -0.5 / soft_nms_sigma;
}
while (*num_selected_indices < num_outputs &&
!candidate_priority_queue.empty()) {
Candidate next_candidate = candidate_priority_queue.top();
const float original_score = next_candidate.score;
candidate_priority_queue.pop();
bool should_hard_suppress = false;
for (int j = *num_selected_indices - 1;
j >= next_candidate.suppress_begin_index; --j) {
const float iou = ComputeIntersectionOverUnion(
boxes, next_candidate.index, selected_indices[j]);
if (iou >= iou_threshold) {
should_hard_suppress = true;
break;
}
if (soft_nms_sigma > 0.0) {
next_candidate.score =
next_candidate.score * std::exp(scale * iou * iou);
}
if (next_candidate.score <= score_threshold) break;
}
next_candidate.suppress_begin_index = *num_selected_indices;
if (!should_hard_suppress) {
if (next_candidate.score == original_score) {
selected_indices[*num_selected_indices] = next_candidate.index;
if (selected_scores) {
selected_scores[*num_selected_indices] = next_candidate.score;
}
++*num_selected_indices;
}
if (next_candidate.score > score_threshold) {
candidate_priority_queue.push(next_candidate);
}
}
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/non_max_suppression.h"
#include <initializer_list>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace non_max_suppression {
constexpr int kInputTensorBoxes = 0;
constexpr int kInputTensorScores = 1;
constexpr int kInputTensorMaxOutputSize = 2;
constexpr int kInputTensorIouThreshold = 3;
constexpr int kInputTensorScoreThreshold = 4;
constexpr int kInputTensorSigma = 5;
constexpr int kNMSOutputTensorSelectedIndices = 0;
constexpr int kNMSOutputTensorNumSelectedIndices = 1;
constexpr int kSoftNMSOutputTensorSelectedIndices = 0;
constexpr int kSoftNMSOutputTensorSelectedScores = 1;
constexpr int kSoftNMSOutputTensorNumSelectedIndices = 2;
TfLiteStatus SetTensorSizes(TfLiteContext* context, TfLiteTensor* tensor,
std::initializer_list<int> values) {
TfLiteIntArray* size = TfLiteIntArrayCreate(values.size());
int index = 0;
for (const auto& v : values) {
size->data[index++] = v;
}
return context->ResizeTensor(context, tensor, size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int num_inputs = NumInputs(node);
const bool is_soft_nms = num_inputs == 6;
if (num_inputs != 5 && num_inputs != 6) {
TF_LITE_KERNEL_LOG(context, "Found NMS op with invalid num inputs: %d",
NumInputs(node));
return kTfLiteError;
}
const TfLiteTensor* input_boxes;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorBoxes, &input_boxes));
TF_LITE_ENSURE_EQ(context, input_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_boxes), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_boxes, 1), 4);
const int num_boxes = SizeOfDimension(input_boxes, 0);
const TfLiteTensor* input_scores;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorScores, &input_scores));
TF_LITE_ENSURE_EQ(context, input_scores->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_scores), 1);
TF_LITE_ENSURE_EQ(context, num_boxes, SizeOfDimension(input_scores, 0));
const TfLiteTensor* input_max_output_size;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorMaxOutputSize,
&input_max_output_size));
TF_LITE_ENSURE_EQ(context, input_max_output_size->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_max_output_size), 0);
const bool is_max_output_size_const =
IsConstantOrPersistentTensor(input_max_output_size);
int max_output_size_value = 0;
if (is_max_output_size_const) {
max_output_size_value = *GetTensorData<int>(input_max_output_size);
TF_LITE_ENSURE(context, (max_output_size_value >= 0));
}
const TfLiteTensor* input_iou_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorIouThreshold,
&input_iou_threshold));
TF_LITE_ENSURE_EQ(context, input_iou_threshold->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_iou_threshold), 0);
const TfLiteTensor* input_score_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorScoreThreshold,
&input_score_threshold));
TF_LITE_ENSURE_EQ(context, input_iou_threshold->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_score_threshold), 0);
if (is_soft_nms) {
const TfLiteTensor* input_sigma;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorSigma, &input_sigma));
TF_LITE_ENSURE_EQ(context, input_sigma->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_sigma), 0);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3);
TfLiteTensor* output_selected_indices;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorSelectedIndices,
&output_selected_indices));
output_selected_indices->type = kTfLiteInt32;
TfLiteTensor* output_selected_scores;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kSoftNMSOutputTensorSelectedScores,
&output_selected_scores));
output_selected_scores->type = kTfLiteFloat32;
TfLiteTensor* output_num_selected_indices;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
output_num_selected_indices->type = kTfLiteInt32;
SetTensorSizes(context, output_num_selected_indices, {});
if (is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
SetTensorSizes(context, output_selected_scores, {max_output_size_value});
} else {
SetTensorToDynamic(output_selected_indices);
SetTensorToDynamic(output_selected_scores);
}
} else {
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
TfLiteTensor* output_selected_indices;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kNMSOutputTensorSelectedIndices,
&output_selected_indices));
output_selected_indices->type = kTfLiteInt32;
TfLiteTensor* output_num_selected_indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
output_num_selected_indices->type = kTfLiteInt32;
SetTensorSizes(context, output_num_selected_indices, {});
if (is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
} else {
SetTensorToDynamic(output_selected_indices);
}
}
return kTfLiteOk;
}
void ResetUnusedElementsToZeroes(const int max_output_size,
const int num_selected_indices,
int* selected_indices,
float* selected_scores) {
for (int i = num_selected_indices; i < max_output_size; ++i) {
selected_indices[i] = 0;
if (selected_scores) {
selected_scores[i] = 0.0;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const bool is_soft_nms = NumInputs(node) == 6;
const TfLiteTensor* input_boxes;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorBoxes, &input_boxes));
const int num_boxes = SizeOfDimension(input_boxes, 0);
const TfLiteTensor* input_scores;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorScores, &input_scores));
const TfLiteTensor* input_max_output_size;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorMaxOutputSize,
&input_max_output_size));
const int max_output_size_value = *GetTensorData<int>(input_max_output_size);
TF_LITE_ENSURE(context, (max_output_size_value >= 0));
const bool is_max_output_size_const =
IsConstantOrPersistentTensor(input_max_output_size);
const TfLiteTensor* input_iou_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorIouThreshold,
&input_iou_threshold));
const float iou_threshold = *GetTensorData<float>(input_iou_threshold);
const TfLiteTensor* input_score_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorScoreThreshold,
&input_score_threshold));
const float score_threshold = *GetTensorData<float>(input_score_threshold);
TfLiteTensor* output_selected_indices = nullptr;
TfLiteTensor* output_selected_scores = nullptr;
TfLiteTensor* output_num_selected_indices = nullptr;
if (is_soft_nms) {
const TfLiteTensor* input_sigma;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorSigma, &input_sigma));
const float soft_nms_sigma = *GetTensorData<float>(input_sigma);
if (soft_nms_sigma < 0) {
TF_LITE_KERNEL_LOG(context, "Invalid sigma value for soft NMS: %f",
soft_nms_sigma);
return kTfLiteError;
}
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorSelectedIndices,
&output_selected_indices));
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kSoftNMSOutputTensorSelectedScores,
&output_selected_scores));
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
if (!is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
SetTensorSizes(context, output_selected_scores, {max_output_size_value});
}
reference_ops::NonMaxSuppression(
input_boxes->data.f, num_boxes, input_scores->data.f,
max_output_size_value, iou_threshold, score_threshold, soft_nms_sigma,
output_selected_indices->data.i32, output_selected_scores->data.f,
output_num_selected_indices->data.i32);
ResetUnusedElementsToZeroes(
max_output_size_value, *output_num_selected_indices->data.i32,
output_selected_indices->data.i32, output_selected_scores->data.f);
} else {
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kNMSOutputTensorSelectedIndices,
&output_selected_indices));
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
if (!is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
}
reference_ops::NonMaxSuppression(
input_boxes->data.f, num_boxes, input_scores->data.f,
max_output_size_value, iou_threshold, score_threshold, 0.0,
output_selected_indices->data.i32, nullptr,
output_num_selected_indices->data.i32);
ResetUnusedElementsToZeroes(max_output_size_value,
*output_num_selected_indices->data.i32,
output_selected_indices->data.i32, nullptr);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_NON_MAX_SUPPRESSION_V4() {
static TfLiteRegistration r = {nullptr, nullptr, non_max_suppression::Prepare,
non_max_suppression::Eval};
return &r;
}
TfLiteRegistration* Register_NON_MAX_SUPPRESSION_V5() {
static TfLiteRegistration r = {nullptr, nullptr, non_max_suppression::Prepare,
non_max_suppression::Eval};
return &r;
}
}
}
} | #include "tensorflow/lite/kernels/internal/reference/non_max_suppression.h"
#include <algorithm>
#include <cmath>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
constexpr int kNumBoxes = 6;
void InitializeCandidates(std::vector<float>* boxes, std::vector<float>* scores,
bool flip_coordinates = false) {
if (!flip_coordinates) {
*boxes = {
0, 0, 1, 1,
0, 0.1, 1, 1.1,
0, -0.1, 1, 0.9,
0, 10, 1, 11,
0, 10.1, 1, 11.1,
0, 100, 1, 101
};
} else {
*boxes = {
1, 1, 0, 0,
0, 0.1, 1, 1.1,
0, .9f, 1, -0.1,
0, 10, 1, 11,
1, 10.1f, 0, 11.1,
1, 101, 0, 100
};
}
*scores = {0.9, 0.75, 0.6, 0.95, 0.5, 0.3};
}
template <typename T>
void MatchFirstNElements(int num_elements, const std::vector<T>& test_values,
const std::vector<T>& reference_values) {
EXPECT_LT(num_elements, test_values.size());
EXPECT_EQ(num_elements, reference_values.size());
for (int i = 0; i < num_elements; ++i) {
EXPECT_EQ(test_values[i], reference_values[i]);
}
}
TEST(NonMaxSuppression, TestZeroBoxes) {
std::vector<float> boxes(1);
std::vector<float> scores(1);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
const int max_output_size = 4;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), 0, scores.data(), max_output_size,
iou_threshold, score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 0);
}
TEST(NonMaxSuppression, TestSelectFromIdenticalBoxes) {
std::vector<float> boxes(kNumBoxes * 4);
std::vector<float> scores(kNumBoxes);
for (int i = 0; i < kNumBoxes; ++i) {
boxes[i * 4 + 0] = 0;
boxes[i * 4 + 1] = 0;
boxes[i * 4 + 2] = 1;
boxes[i * 4 + 3] = 1;
scores[i] = 0.75;
}
const float iou_threshold = 0.5;
float score_threshold = 0.5;
const int max_output_size = kNumBoxes;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 1);
MatchFirstNElements(1, selected_scores, {.75});
score_threshold = 0.95;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 0);
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithZeroScoreThreshold) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 0.5;
int max_output_size;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
max_output_size = 100;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 3);
MatchFirstNElements(3, selected_indices, {3, 0, 5});
MatchFirstNElements(3, selected_scores, {0.95, 0.9, 0.3});
max_output_size = 2;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, max_output_size);
MatchFirstNElements(max_output_size, selected_indices, {3, 0});
MatchFirstNElements(max_output_size, selected_scores, {0.95, 0.9});
max_output_size = 0;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 0);
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithScoreThreshold) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
int max_output_size;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
max_output_size = 100;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 2);
MatchFirstNElements(2, selected_indices, {3, 0});
MatchFirstNElements(2, selected_scores, {0.95, 0.9});
max_output_size = 1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 1);
MatchFirstNElements(1, selected_indices, {3});
MatchFirstNElements(1, selected_scores, {0.95});
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithFlippedCoordinates) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores, true);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
const int max_output_size = 3;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 2);
MatchFirstNElements(2, selected_indices, {3, 0});
MatchFirstNElements(2, selected_scores, {0.95, 0.9});
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 3);
MatchFirstNElements(3, selected_indices, {3, 0, 5});
MatchFirstNElements(3, selected_scores, {0.95, 0.9, 0.3});
}
TEST(NonMaxSuppression, TestIoUThresholdBoundaryCases) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float score_threshold = 0.4;
const int max_output_size = 4;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size,
0.0, score_threshold, 0.0,
selected_indices.data(), selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 1);
MatchFirstNElements(1, selected_indices, {3});
MatchFirstNElements(1, selected_scores, {0.95});
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size,
0.9999,
0.0, 0.0, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, max_output_size);
MatchFirstNElements(max_output_size, selected_indices, {3, 0, 1, 2});
MatchFirstNElements(max_output_size, selected_scores, {0.95, 0.9, 0.75, 0.6});
}
TEST(NonMaxSuppression, TestSelectFromThreeClustersWithSoftNMS) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 1.0;
float score_threshold = 0.0;
const float soft_nms_sigma = 0.5;
int max_output_size = 6;
std::vector<int> selected_indices(6);
std::vector<float> selected_scores(6);
int num_selected_indices = -1;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, soft_nms_sigma, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 6);
EXPECT_THAT(selected_indices, ElementsAreArray({3, 0, 1, 5, 4, 2}));
EXPECT_THAT(selected_scores,
ElementsAreArray(
ArrayFloatNear({0.95, 0.9, 0.384, 0.3, 0.256, 0.197}, 1e-3)));
score_threshold = 0.299;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, soft_nms_sigma, selected_indices.data(),
selected_scores.data(), &num_selected_indices);
EXPECT_EQ(num_selected_indices, 4);
MatchFirstNElements(4, selected_indices, {3, 0, 1, 5});
}
TEST(NonMaxSuppression, TestNullSelectedScoresOutput) {
std::vector<float> boxes;
std::vector<float> scores;
InitializeCandidates(&boxes, &scores);
const float iou_threshold = 0.5;
const float score_threshold = 0.4;
int max_output_size;
std::vector<int> selected_indices(6);
int num_selected_indices = -1;
max_output_size = 100;
reference_ops::NonMaxSuppression(
boxes.data(), kNumBoxes, scores.data(), max_output_size, iou_threshold,
score_threshold, 0.0, selected_indices.data(),
nullptr, &num_selected_indices);
EXPECT_EQ(num_selected_indices, 2);
}
}
} |
897 | cpp | tensorflow/tensorflow | rng_util | tensorflow/lite/kernels/rng_util.cc | tensorflow/lite/kernels/rng_util_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_RNG_UTIL_H_
#define TENSORFLOW_LITE_KERNELS_RNG_UTIL_H_
#include <array>
#include <cstdint>
namespace tflite {
namespace rng {
std::array<uint32_t, 2> Threefry2x32(uint32_t key_0, uint32_t key_1,
std::array<uint32_t, 2> ctr);
std::array<uint32_t, 4> Philox4x32(uint32_t key_0, uint32_t key_1,
std::array<uint32_t, 4> ctr);
}
}
#endif
#include "tensorflow/lite/kernels/rng_util.h"
#include <array>
#include <cstdint>
namespace tflite {
namespace rng {
static constexpr uint32_t kThreefryParity = 0x1BD11BDA;
static constexpr uint64_t kPhiloxM4x32A = 0xD2511F53;
static constexpr uint64_t kPhiloxM4x32B = 0xCD9E8D57;
static constexpr uint32_t kPhiloxW32A = 0x9E3779B9;
static constexpr uint32_t kPhiloxW32B = 0xBB67AE85;
std::array<uint32_t, 2> Threefry2x32(uint32_t key_0, uint32_t key_1,
std::array<uint32_t, 2> ctr) {
constexpr std::array<std::array<int, 4>, 2> rotations{
std::array<int, 4>{13, 15, 26, 6}, std::array<int, 4>{17, 29, 16, 24}};
uint32_t key_2 = key_0 ^ key_1 ^ kThreefryParity;
ctr[0] += key_0;
ctr[1] += key_1;
auto apply_round = [&](int r, uint32_t ks0, uint32_t ks1, int b) {
for (int rot : rotations[r]) {
ctr[0] += ctr[1];
ctr[1] = (ctr[1] << rot) | (ctr[1] >> (32 - rot));
ctr[1] ^= ctr[0];
}
ctr[0] += ks0;
ctr[1] += ks1 + b;
};
apply_round(0, key_1, key_2, 1);
apply_round(1, key_2, key_0, 2);
apply_round(0, key_0, key_1, 3);
apply_round(1, key_1, key_2, 4);
apply_round(0, key_2, key_0, 5);
return ctr;
}
std::array<uint32_t, 4> Philox4x32(uint32_t key_0, uint32_t key_1,
std::array<uint32_t, 4> ctr) {
struct u32pair {
uint32_t low;
uint32_t high;
};
union prod {
u32pair hilo;
uint64_t prod;
};
for (int i = 0; i < 10; ++i) {
prod p0, p1;
p0.prod = kPhiloxM4x32A * static_cast<uint64_t>(ctr[0]);
p1.prod = kPhiloxM4x32B * static_cast<uint64_t>(ctr[2]);
ctr = {{p1.hilo.high ^ ctr[1] ^ key_0, p1.hilo.low,
p0.hilo.high ^ ctr[3] ^ key_1, p0.hilo.low}};
key_0 += kPhiloxW32A;
key_1 += kPhiloxW32B;
}
return ctr;
}
}
} | #include "tensorflow/lite/kernels/rng_util.h"
#include <array>
#include <cstdint>
#include <limits>
#include <gtest/gtest.h>
namespace tflite {
namespace {
using tflite::rng::Philox4x32;
using tflite::rng::Threefry2x32;
TEST(RngUtilTest, Threefry2x32Test) {
std::array<uint32_t, 2> results = Threefry2x32(0, 0, {0, 0});
std::array<uint32_t, 2> expected = {0x6B200159u, 0x99BA4EFEu};
ASSERT_EQ(results, expected);
uint32_t u32_max = std::numeric_limits<uint32_t>::max();
results = Threefry2x32(u32_max, u32_max, {u32_max, u32_max});
expected = {0x1CB996FCu, 0xBB002BE7u};
ASSERT_EQ(results, expected);
results = Threefry2x32(0x13198A2Eu, 0x03707344u, {0x243F6A88u, 0x85A308D3u});
expected = {0xC4923A9Cu, 0x483DF7A0u};
ASSERT_EQ(results, expected);
}
TEST(RngUtilTest, Philox4x32Test) {
std::array<uint32_t, 4> results = Philox4x32(0, 0, {0, 0, 0, 0});
std::array<uint32_t, 4> expected = {0x6627E8D5u, 0xE169C58Du, 0xBC57AC4Cu,
0x9B00DBD8u};
ASSERT_EQ(results, expected);
uint32_t u32_max = std::numeric_limits<uint32_t>::max();
results = Philox4x32(u32_max, u32_max, {u32_max, u32_max, u32_max, u32_max});
expected = {0x408F276Du, 0x41C83B0Eu, 0xA20BC7C6u, 0x6D5451FDu};
ASSERT_EQ(results, expected);
results = Philox4x32(0xA4093822u, 0x299F31D0u,
{0x243F6A88u, 0x85A308D3u, 0x13198A2Eu, 0x03707344u});
expected = {0xD16CFE09u, 0x94FDCCEBu, 0x5001E420u, 0x24126EA1u};
ASSERT_EQ(results, expected);
}
}
} |
898 | cpp | tensorflow/tensorflow | fill | tensorflow/lite/kernels/fill.cc | tensorflow/lite/kernels/fill_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
#include <cmath>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
void Fill(const RuntimeShape& value_shape, const T* value_data,
const RuntimeShape& output_shape, T* output_data) {
TFLITE_DCHECK_EQ(value_shape.DimensionsCount(), 0);
const int flat_size = output_shape.FlatSize();
for (int i = 0; i < flat_size; ++i) {
output_data[i] = *value_data;
}
}
}
}
#endif
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace fill {
namespace {
constexpr int kDimsTensor = 0;
constexpr int kValueTensor = 1;
constexpr int kOutputTensor = 0;
template <typename T>
TfLiteStatus ResizeOutputImpl(TfLiteContext* context, const TfLiteTensor* dims,
TfLiteTensor* output) {
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dims->dims->data[0]);
for (int i = 0; i < output_shape->size; ++i) {
T data = GetTensorData<T>(dims)[i];
if (data < 0) {
TfLiteIntArrayFree(output_shape);
TF_LITE_KERNEL_LOG(context, "Fill dimensions must be >= 0 got %d",
dims->type);
return kTfLiteError;
}
output_shape->data[i] = data;
}
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* dims,
TfLiteTensor* output) {
switch (dims->type) {
case kTfLiteInt32:
return ResizeOutputImpl<int32_t>(context, dims, output);
case kTfLiteInt64:
return ResizeOutputImpl<int64_t>(context, dims, output);
default:
TF_LITE_KERNEL_LOG(
context,
"Fill only currently supports int32, int64 for input 0, "
"got %d.",
dims->type);
return kTfLiteError;
}
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* dims;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDimsTensor, &dims));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value));
TF_LITE_ENSURE_EQ(context, NumDimensions(dims), 1);
const auto dtype = dims->type;
TF_LITE_ENSURE(context, dtype == kTfLiteInt32 || dtype == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = value->type;
TF_LITE_ENSURE_EQ(context, output->params.scale, value->params.scale);
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
value->params.zero_point);
if (value->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, value->params.zero_point, 0);
}
if (IsConstantOrPersistentTensor(dims)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output));
} else {
SetTensorToDynamic(output);
}
return kTfLiteOk;
}
TfLiteStatus FillString(const TfLiteTensor* value, TfLiteTensor* output) {
DynamicBuffer buffer;
const auto string_ref = GetString(value, 0);
int n = 1;
for (int i = 0; i < output->dims->size; ++i) {
n *= output->dims->data[i];
}
for (int i = 0; i < n; ++i) {
buffer.AddString(string_ref.str, string_ref.len);
}
buffer.WriteToTensor(output, nullptr);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
const TfLiteTensor* dims;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDimsTensor, &dims));
TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output));
}
#define TF_LITE_FILL(data_type) \
reference_ops::Fill(GetTensorShape(value), GetTensorData<data_type>(value), \
GetTensorShape(output), \
GetTensorData<data_type>(output))
switch (output->type) {
case kTfLiteInt8:
TF_LITE_FILL(int8_t);
break;
case kTfLiteInt16:
TF_LITE_FILL(int16_t);
break;
case kTfLiteInt32:
TF_LITE_FILL(int32_t);
break;
case kTfLiteInt64:
TF_LITE_FILL(int64_t);
break;
case kTfLiteFloat16:
TF_LITE_FILL(Eigen::half);
break;
case kTfLiteFloat32:
TF_LITE_FILL(float);
break;
case kTfLiteBool:
TF_LITE_FILL(bool);
break;
case kTfLiteString:
FillString(value, output);
break;
default:
TF_LITE_KERNEL_LOG(
context,
"Fill only currently supports int8, int16, int32, int64, float32, "
"bool, string for input 1, got %d.",
value->type);
return kTfLiteError;
}
#undef TF_LITE_FILL
return kTfLiteOk;
}
}
TfLiteRegistration* Register_FILL() {
static TfLiteRegistration r = {nullptr, nullptr,
fill::Prepare, fill::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "Eigen/Core"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
enum class TestType {
kConst = 0,
kDynamic = 1,
};
template <typename dims_type, typename value_type>
class FillOpModel : public SingleOpModel {
public:
explicit FillOpModel(TensorType dims_tensor_type,
std::initializer_list<int> dims_shape,
std::initializer_list<dims_type> dims_data,
value_type value, TestType input_tensor_types) {
if (input_tensor_types == TestType::kDynamic) {
dims_ = AddInput(dims_tensor_type);
} else {
dims_ = AddConstInput(dims_tensor_type, dims_data, dims_shape);
}
value_ = AddInput(GetTensorType<value_type>());
output_ = AddOutput(GetTensorType<value_type>());
SetBuiltinOp(BuiltinOperator_FILL, BuiltinOptions_FillOptions,
CreateFillOptions(builder_).Union());
BuildInterpreter({dims_shape, {}});
if (input_tensor_types == TestType::kDynamic) {
if (dims_data.size() > 0) {
PopulateTensor<dims_type>(dims_, dims_data);
}
}
PopulateTensor<value_type>(value_, {value});
}
std::vector<value_type> GetOutput() {
return ExtractVector<value_type>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int dims_;
int value_;
int output_;
};
template <typename dims_type, typename quant_type>
class QuantizedFillOpModel : public SingleOpModel {
public:
explicit QuantizedFillOpModel(TensorType dims_tensor_type,
std::initializer_list<int> dims_shape,
std::initializer_list<dims_type> dims_data,
const TensorData& tensor_data, float value) {
dims_ = AddInput(dims_tensor_type);
value_ = AddInput(tensor_data);
output_ = AddOutput(tensor_data);
SetBuiltinOp(BuiltinOperator_FILL, BuiltinOptions_FillOptions,
CreateFillOptions(builder_).Union());
BuildInterpreter({dims_shape, {}});
if (dims_data.size() > 0) {
PopulateTensor<dims_type>(dims_, dims_data);
}
QuantizeAndPopulate<quant_type>(value_, {value});
}
std::vector<quant_type> GetOutput() {
return ExtractVector<quant_type>(output_);
}
std::vector<float> GetDequantizedOutput() {
TfLiteTensor* t = interpreter_->tensor(output_);
return Dequantize(GetOutput(), t->params.scale, t->params.zero_point);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int dims_;
int value_;
int output_;
};
class FillOpTest : public ::testing::TestWithParam<TestType> {};
TEST_P(FillOpTest, FillInt32) {
FillOpModel<int32_t, int32_t> m(TensorType_INT32, {2}, {2, 3}, -11,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-11, -11, -11, -11, -11, -11}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST_P(FillOpTest, FillInt64) {
FillOpModel<int64_t, int64_t> m(TensorType_INT64, {2}, {2, 4}, 1LL << 45,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({1LL << 45, 1LL << 45, 1LL << 45, 1LL << 45,
1LL << 45, 1LL << 45, 1LL << 45, 1LL << 45}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 4}));
}
TEST_P(FillOpTest, FillFloat) {
FillOpModel<int64_t, float> m(TensorType_INT64, {3}, {2, 2, 2}, 4.0,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST_P(FillOpTest, FillFloat16) {
FillOpModel<int64_t, Eigen::half> m(TensorType_INT64, {3}, {2, 2, 2},
Eigen::half(4.0f), GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST_P(FillOpTest, FillFloatInt32Dims) {
FillOpModel<int32_t, float> m(TensorType_INT32, {3}, {2, 2, 2}, 4.0,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST_P(FillOpTest, FillOutputScalar) {
FillOpModel<int64_t, float> m(TensorType_INT64, {0}, {}, 4.0, GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4.0}));
EXPECT_THAT(m.GetOutputShape(), IsEmpty());
}
TEST_P(FillOpTest, FillBool) {
FillOpModel<int64_t, bool> m(TensorType_INT64, {3}, {2, 2, 2}, true,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({true, true, true, true, true,
true, true, true}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST(FillOpTest, FillString) {
FillOpModel<int64_t, std::string> m(TensorType_INT64, {3}, {2, 2, 2}, "AB",
TestType::kDynamic);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({"AB", "AB", "AB", "AB", "AB",
"AB", "AB", "AB"}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST_P(FillOpTest, FillInt8) {
FillOpModel<int64_t, int8_t> m(TensorType_INT64, {3}, {2, 2, 2}, 5,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
template <typename quant_type>
void QuantizedFill(float value) {
const float kMin = -1;
const float kMax =
std::numeric_limits<quant_type>::max() /
static_cast<float>(std::numeric_limits<quant_type>::max() + 1);
const TensorData tensor_data(GetTensorType<quant_type>(), {},
std::abs(value) * kMin, std::abs(value) * kMax);
QuantizedFillOpModel<int32_t, quant_type> m(TensorType_INT32, {2}, {2, 3},
tensor_data, value);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
constexpr float epsilon = 0.01f;
const float min_value = tensor_data.min - epsilon;
const float max_value = tensor_data.max + epsilon;
const float kQuantizedTolerance =
(max_value - min_value) / (std::numeric_limits<quant_type>::max() -
std::numeric_limits<quant_type>::min());
EXPECT_THAT(
m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear(
{value, value, value, value, value, value}, kQuantizedTolerance)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST(FillOpTest, QuantizedFillInt8) { QuantizedFill<int8_t>(3.14f); }
TEST(FillOpTest, QuantizedFillInt16) { QuantizedFill<int16_t>(3.14f); }
INSTANTIATE_TEST_SUITE_P(FillOpTest, FillOpTest,
::testing::Values(TestType::kConst,
TestType::kDynamic));
}
} |
899 | cpp | tensorflow/tensorflow | lstm_eval | tensorflow/lite/kernels/lstm_eval.cc | tensorflow/lite/kernels/lstm_eval_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_LSTM_EVAL_H_
#define TENSORFLOW_LITE_KERNELS_LSTM_EVAL_H_
#include <cstdint>
#include <memory>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace lstm_eval {
struct IntegerLstmParameter {
int32_t effective_input_to_input_scale_a;
int32_t effective_input_to_input_scale_b;
int32_t effective_recurrent_to_input_scale_a;
int32_t effective_recurrent_to_input_scale_b;
int32_t effective_cell_to_input_scale_a;
int32_t effective_cell_to_input_scale_b;
int32_t effective_input_to_forget_scale_a;
int32_t effective_input_to_forget_scale_b;
int32_t effective_recurrent_to_forget_scale_a;
int32_t effective_recurrent_to_forget_scale_b;
int32_t effective_cell_to_forget_scale_a;
int32_t effective_cell_to_forget_scale_b;
int32_t effective_input_to_cell_scale_a;
int32_t effective_input_to_cell_scale_b;
int32_t effective_recurrent_to_cell_scale_a;
int32_t effective_recurrent_to_cell_scale_b;
int32_t effective_input_to_output_scale_a;
int32_t effective_input_to_output_scale_b;
int32_t effective_recurrent_to_output_scale_a;
int32_t effective_recurrent_to_output_scale_b;
int32_t effective_cell_to_output_scale_a;
int32_t effective_cell_to_output_scale_b;
int32_t effective_proj_scale_a;
int32_t effective_proj_scale_b;
int32_t effective_hidden_scale_a;
int32_t effective_hidden_scale_b;
int32_t layer_norm_input_scale_a;
int32_t layer_norm_input_scale_b;
int32_t layer_norm_forget_scale_a;
int32_t layer_norm_forget_scale_b;
int32_t layer_norm_cell_scale_a;
int32_t layer_norm_cell_scale_b;
int32_t layer_norm_output_scale_a;
int32_t layer_norm_output_scale_b;
int16_t quantized_cell_clip;
int8_t quantized_proj_clip;
int32_t hidden_zp;
int32_t cell_scale;
int32_t input_variance_guard;
int32_t forget_variance_guard;
int32_t cell_variance_guard;
int32_t output_variance_guard;
std::unique_ptr<int32_t[]> input_to_forget_effective_bias;
std::unique_ptr<int32_t[]> recurrent_to_forget_effective_bias;
std::unique_ptr<int32_t[]> input_to_cell_effective_bias;
std::unique_ptr<int32_t[]> recurrent_to_cell_effective_bias;
std::unique_ptr<int32_t[]> input_to_output_effective_bias;
std::unique_ptr<int32_t[]> recurrent_to_output_effective_bias;
std::unique_ptr<int32_t[]> input_to_input_effective_bias;
std::unique_ptr<int32_t[]> recurrent_to_input_effective_bias;
std::unique_ptr<int32_t[]> projection_effective_bias;
int32_t intermediate_scale_a[8];
int32_t intermediate_scale_b[8];
int32_t intermediate_zp[12];
};
TfLiteStatus EvalFloat(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* aux_input,
const TfLiteTensor* aux_input_to_input_weights,
const TfLiteTensor* aux_input_to_forget_weights,
const TfLiteTensor* aux_input_to_cell_weights,
const TfLiteTensor* aux_input_to_output_weights,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, bool forward_sequence, bool time_major,
int output_offset, TfLiteTensor* scratch_buffer, TfLiteTensor* output_state,
TfLiteTensor* cell_state, TfLiteTensor* output,
bool recurrent_to_input_is_diag, bool recurrent_to_forget_is_diag,
bool recurrent_to_cell_is_diag, bool recurrent_to_output_is_diag,
CpuBackendContext* context);
TfLiteStatus EvalHybrid(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_input_weights_ledger,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_forget_weights_ledger,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_cell_weights_ledger,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* input_to_output_weights_ledger,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_input_weights_ledger,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_forget_weights_ledger,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_cell_weights_ledger,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* recurrent_to_output_weights_ledger,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* aux_input,
const TfLiteTensor* aux_input_to_input_weights,
const TfLiteTensor* aux_input_to_forget_weights,
const TfLiteTensor* aux_input_to_cell_weights,
const TfLiteTensor* aux_input_to_output_weights,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights,
const TfLiteTensor* projection_weights_ledger,
const TfLiteTensor* projection_bias, const TfLiteLSTMParams* params,
bool forward_sequence, bool time_major, int output_offset,
TfLiteTensor* scratch_buffer, TfLiteTensor* input_sf,
TfLiteTensor* aux_input_sf, TfLiteTensor* output_state_sf,
TfLiteTensor* prod_scaling_factors, TfLiteTensor* recovered_cell_weights,
TfLiteTensor* input_quantized, TfLiteTensor* aux_input_quantized,
TfLiteTensor* output_state_quantized, TfLiteTensor* cell_state_quantized,
TfLiteTensor* output_state, TfLiteTensor* cell_state,
TfLiteTensor* output_scratch_buffer, TfLiteTensor* output,
TfLiteTensor* input_zp, TfLiteTensor* aux_input_zp,
TfLiteTensor* output_state_zp, TfLiteTensor* row_sums, int row_sums_size,
bool* compute_row_sums, bool recurrent_to_input_is_diag,
bool recurrent_to_forget_is_diag, bool recurrent_to_cell_is_diag,
bool recurrent_to_output_is_diag, CpuBackendContext* context);
TfLiteStatus EvalInteger8x8_16(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, bool forward_sequence, bool time_major,
const lstm_eval::IntegerLstmParameter* integer_lstm_param,
TfLiteTensor* output_state, TfLiteTensor* cell_state, TfLiteTensor* output,
TfLiteTensor* scratch0, TfLiteTensor* scratch1, TfLiteTensor* scratch2,
TfLiteTensor* scratch3, TfLiteTensor* scratch4, TfLiteTensor* scratch5,
CpuBackendContext* context);
TfLiteStatus EvalInteger8x8_8(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, TfLiteTensor* output_state,
TfLiteTensor* cell_state, TfLiteTensor* output,
const lstm_eval::IntegerLstmParameter* integer_lstm_param,
TfLiteTensor* scratch0, TfLiteTensor* scratch1, TfLiteTensor* scratch2,
TfLiteTensor* scratch3, TfLiteTensor* scratch4, TfLiteTensor* scratch5,
TfLiteTensor* scratch6, TfLiteTensor* scratch7);
}
}
}
}
#endif
#include "tensorflow/lite/kernels/lstm_eval.h"
#include <math.h>
#include <string.h>
#include <algorithm>
#include <cstdint>
#include <memory>
#include <vector>
#include "ruy/matrix.h"
#include "ruy/mul_params.h"
#include "ruy/profiler/instrumentation.h"
#include "ruy/ruy.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace lstm_eval {
namespace {
void MatrixBatchVectorMultiplyAccumulate(
const float* matrix, const float* vector, const float* result,
float* output, int m_rows, int m_cols, int n_batch,
CpuBackendContext* cpu_backend_context) {
tflite::FullyConnectedParams float_fc_params;
float_fc_params.float_activation_min = std::numeric_limits<float>::lowest();
float_fc_params.float_activation_max = std::numeric_limits<float>::max();
float_fc_params.lhs_cacheable = true;
float_fc_params.rhs_cacheable = false;
tflite::RuntimeShape weight_shape({m_rows, m_cols});
tflite::RuntimeShape input_shape({n_batch, m_cols});
tflite::RuntimeShape output_shape({n_batch, m_rows});
if (n_batch == 1) {
tflite::optimized_ops::FullyConnected(
float_fc_params, input_shape, vector, weight_shape, matrix,
output_shape, result, output_shape, output, cpu_backend_context);
} else {
tflite::optimized_ops::FullyConnected(
float_fc_params, input_shape, vector, weight_shape, matrix,
output_shape, nullptr, output_shape, output, cpu_backend_context);
for (int i = 0; i < m_rows * n_batch; ++i) {
output[i] += result[i];
}
}
}
void ComputeRowSums(
int32_t* input_to_input_row_sums, int32_t* input_to_forget_row_sums,
int32_t* input_to_cell_row_sums, int32_t* input_to_output_row_sums,
int32_t* aux_input_to_input_row_sums, int32_t* aux_input_to_forget_row_sums,
int32_t* aux_input_to_cell_row_sums, int32_t* aux_input_to_output_row_sums,
int32_t* recurrent_to_input_row_sums, int32_t* recurrent_to_forget_row_sums,
int32_t* recurrent_to_cell_row_sums, int32_t* recurrent_to_output_row_sums,
int32_t* projection_weights_row_sums, int32_t* row_sums, int n_cell,
int n_input, int n_aux_input, int n_output,
const int8_t* input_to_input_weights_ptr,
const int8_t* input_to_forget_weights_ptr,
const int8_t* input_to_cell_weights_ptr,
const int8_t* input_to_output_weights_ptr,
const int8_t* aux_input_to_input_weights_ptr,
const int8_t* aux_input_to_forget_weights_ptr,
const int8_t* aux_input_to_cell_weights_ptr,
const int8_t* aux_input_to_output_weights_ptr,
const int8_t* recurrent_to_input_weights_ptr,
const int8_t* recurrent_to_forget_weights_ptr,
const int8_t* recurrent_to_cell_weights_ptr,
const int8_t* recurrent_to_output_weights_ptr,
const int8_t* projection_weights_ptr, bool use_cifg,
const float* aux_input_ptr, bool recurrent_to_input_is_diag = false,
bool recurrent_to_forget_is_diag = false,
bool recurrent_to_cell_is_diag = false,
bool recurrent_to_output_is_diag = false) {
if (!use_cifg) {
tensor_utils::ReductionSumVector(input_to_input_weights_ptr,
input_to_input_row_sums, n_cell, n_input);
}
tensor_utils::ReductionSumVector(input_to_forget_weights_ptr,
input_to_forget_row_sums, n_cell, n_input);
tensor_utils::ReductionSumVector(input_to_cell_weights_ptr,
input_to_cell_row_sums, n_cell, n_input);
tensor_utils::ReductionSumVector(input_to_output_weights_ptr,
input_to_output_row_sums, n_cell, n_input);
if (aux_input_ptr) {
if (!use_cifg) {
tensor_utils::ReductionSumVector(aux_input_to_input_weights_ptr,
aux_input_to_input_row_sums, n_cell,
n_aux_input);
}
tensor_utils::ReductionSumVector(aux_input_to_forget_weights_ptr,
aux_input_to_forget_row_sums, n_cell,
n_aux_input);
tensor_utils::ReductionSumVector(aux_input_to_cell_weights_ptr,
aux_input_to_cell_row_sums, n_cell,
n_aux_input);
tensor_utils::ReductionSumVector(aux_input_to_output_weights_ptr,
aux_input_to_output_row_sums, n_cell,
n_aux_input);
}
if (!use_cifg) {
if (!recurrent_to_input_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_input_weights_ptr,
recurrent_to_input_row_sums, n_cell,
n_output);
}
}
if (!recurrent_to_forget_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_forget_weights_ptr,
recurrent_to_forget_row_sums, n_cell,
n_output);
}
if (!recurrent_to_cell_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_cell_weights_ptr,
recurrent_to_cell_row_sums, n_cell,
n_output);
}
if (!recurrent_to_output_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_output_weights_ptr,
recurrent_to_output_row_sums, n_cell,
n_output);
}
if (projection_weights_ptr != nullptr) {
tensor_utils::ReductionSumVector(
projection_weights_ptr, projection_weights_row_sums, n_output, n_cell);
}
}
inline float GetTensorScale(const TfLiteTensor* tensor) {
return tensor == nullptr ? 1.0f : tensor->params.scale;
}
inline void CalculateLstmGateFloat(
const float* input, const float* input_to_gate_weights,
const float* aux_input, const float* aux_input_to_gate_weights,
const float* output_state, const float* recurrent_to_gate_weights,
const float* cell_state, const float* cell_to_gate_weights,
const float* layer_norm_coefficients, const float* gate_bias,
const int n_batch, const int n_input, const int n_aux_input,
const int n_output, const int n_cell,
const TfLiteFusedActivation activation, float* gate,
const bool is_input_all_zeros, const bool is_aux_input_all_zeros,
float* output, bool recurrent_is_diag, CpuBackendContext* context) {
const bool use_peephole = (cell_to_gate_weights != nullptr);
const bool use_layer_norm = (layer_norm_coefficients != nullptr);
if (use_layer_norm) {
std::fill_n(gate, n_cell * n_batch, 0.0f);
} else {
tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, gate);
}
float* accumulation_buffer = gate;
if (!is_input_all_zeros) {
MatrixBatchVectorMultiplyAccumulate(input_to_gate_weights, input,
accumulation_buffer, output, n_cell,
n_input, n_batch, context);
std::swap(accumulation_buffer, output);
}
if (!is_aux_input_all_zeros) {
MatrixBatchVectorMultiplyAccumulate(aux_input_to_gate_weights, aux_input,
accumulation_buffer, output, n_cell,
n_aux_input, n_batch, context);
std::swap(accumulation_buffer, output);
}
if (recurrent_is_diag) {
tflite::tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recurrent_to_gate_weights, n_cell, output_state, n_batch,
accumulation_buffer);
std::swap(accumulation_buffer, output);
} else {
MatrixBatchVectorMultiplyAccumulate(recurrent_to_gate_weights, output_state,
accumulation_buffer, output, n_cell,
n_output, n_batch, context);
}
if (use_peephole) {
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
cell_to_gate_weights, n_cell, cell_state, n_batch, output);
}
if (use_layer_norm) {
tensor_utils::MeanStddevNormalization(output, output, n_cell, n_batch);
tensor_utils::VectorBatchVectorCwiseProduct(layer_norm_coefficients, n_cell,
output, n_batch, output);
tensor_utils::VectorBatchVectorAdd(gate_bias, n_cell, n_batch, output);
}
tensor_utils::ApplyActivationToVector(output, n_batch * n_cell, activation,
gate);
}
void UpdateLstmCellFloat(int n_batch, int n_cell, float* cell_state,
const float* input_gate, float* forget_gate,
const float* cell_gate, bool use_cifg, float clip) {
tensor_utils::VectorVectorCwiseProduct(forget_gate, cell_state,
n_batch * n_cell, cell_state);
if (use_cifg) {
float* scratch = forget_gate;
tensor_utils::Sub1Vector(forget_gate, n_batch * n_cell, scratch);
tensor_utils::VectorVectorCwiseProductAccumulate(
cell_gate, scratch, n_batch * n_cell, cell_state);
} else {
tensor_utils::VectorVectorCwiseProductAccumulate(
cell_gate, input_gate, n_batch * n_cell, cell_state);
}
if (clip > 0.0f) {
tensor_utils::CwiseClipping(cell_state, n_batch * n_cell, clip);
}
}
void CalculateLstmOutputFloat(int n_batch, int n_cell, int n_output,
const float* cell_state, const float* output_gate,
TfLiteFusedActivation activation,
const float* projection_weights,
const float* projection_bias,
const float proj_clip, float* output_state,
float* scratch, float* projection_bias_scratch,
CpuBackendContext* context) {
tensor_utils::ApplyActivationToVector(cell_state, n_batch * n_cell,
activation, scratch);
tensor_utils::VectorVectorCwiseProduct(output_gate, scratch, n_batch * n_cell,
scratch);
const bool use_projection = (projection_weights != nullptr);
const bool use_projection_bias = (projection_bias != nullptr);
if (use_projection) {
if (use_projection_bias) {
tensor_utils::VectorBatchVectorAssign(projection_bias, n_output, n_batch,
projection_bias_scratch);
} else {
std::fill_n(projection_bias_scratch, n_batch * n_output, 0.0f);
}
MatrixBatchVectorMultiplyAccumulate(projection_weights, scratch,
projection_bias_scratch, output_state,
n_output, n_cell, n_batch, context);
if (proj_clip > 0.0f) {
tensor_utils::CwiseClipping(output_state, n_batch * n_output, proj_clip);
}
} else {
std::copy_n(scratch, n_batch * n_output, output_state);
}
}
void CalculateLstmGateHybrid(
const int8_t* input, const float* input_sf, const int32_t* input_zp,
const int8_t* input_to_gate_weights,
const uint8_t* input_to_gate_weights_ledger,
const float input_to_gate_weights_scale, int32_t* input_to_gate_row_sums,
const int8_t* aux_input, const float* aux_input_sf,
const int32_t* aux_input_zp, const int8_t* aux_input_to_gate_weights,
const float aux_input_to_gate_weights_scale,
int32_t* aux_input_to_gate_row_sums,
const int8_t* output_state, const float* output_state_float,
const float* output_state_sf, const int32_t* output_state_zp,
const int8_t* recurrent_to_gate_weights,
const float* recurrent_to_gate_diag,
const uint8_t* recurrent_to_gate_weights_ledger,
const float recurrent_to_gate_weights_scale,
int32_t* recurrent_to_gate_row_sums,
const float* cell_state, const int8_t* cell_to_gate_weights,
const float cell_to_gate_weights_scale,
const float* layer_norm_coefficients, const float* gate_bias,
const int n_batch, const int n_input, const int n_aux_input,
const int n_output, const int n_cell,
const TfLiteFusedActivation activation,
float* gate,
const bool is_input_all_zeros, const bool is_aux_input_all_zeros,
const bool is_output_state_all_zeros, bool* compute_row_sums,
CpuBackendContext* context,
float* scratch0,
float* scratch1,
int32_t* accum_scratch,
bool recurrent_is_diag) {
const bool use_peephole = (cell_to_gate_weights != nullptr);
const bool use_layer_norm = (layer_norm_coefficients != nullptr);
if (use_layer_norm) {
std::fill_n(gate, n_cell * n_batch, 0.0f);
} else {
tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, gate);
}
if (!is_input_all_zeros) {
if (input_to_gate_weights_ledger != nullptr) {
std::vector<float> scales(n_batch);
for (int i = 0; i < n_batch; i++) {
scales[i] = input_to_gate_weights_scale * input_sf[i];
}
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
input_to_gate_weights, input_to_gate_weights_ledger, n_cell, n_input,
input, scales.data(), n_batch, gate);
} else {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
input_to_gate_weights, n_cell, n_input, input,
input_to_gate_weights_scale, input_sf, n_batch, gate,
nullptr, input_zp, accum_scratch,
input_to_gate_row_sums, compute_row_sums, scratch0, context);
}
}
if (!is_aux_input_all_zeros) {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
aux_input_to_gate_weights, n_cell, n_aux_input, aux_input,
aux_input_to_gate_weights_scale, aux_input_sf, n_batch, gate,
nullptr, aux_input_zp, accum_scratch,
aux_input_to_gate_row_sums, compute_row_sums, scratch0, context);
}
if (!is_output_state_all_zeros) {
if (recurrent_to_gate_weights_ledger != nullptr) {
std::vector<float> scales(n_batch);
for (int i = 0; i < n_batch; i++) {
scales[i] = recurrent_to_gate_weights_scale * input_sf[i];
}
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
recurrent_to_gate_weights, recurrent_to_gate_weights_ledger, n_cell,
n_output, output_state, scales.data(), n_batch, gate);
} else {
if (recurrent_is_diag) {
tflite::tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recurrent_to_gate_diag, n_cell, output_state_float, n_batch, gate);
} else {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
recurrent_to_gate_weights, n_cell, n_output, output_state,
recurrent_to_gate_weights_scale, output_state_sf, n_batch, gate,
nullptr, output_state_zp, accum_scratch,
recurrent_to_gate_row_sums, compute_row_sums, scratch0, context);
} | #include "tensorflow/lite/kernels/lstm_eval.h"
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
namespace tflite {
namespace {
template <typename T>
bool ArrayEq(const T* result, const T* expected_result, int size) {
for (int i = 0; i < size; ++i) {
if (result[i] != expected_result[i]) {
return false;
}
}
return true;
}
template <typename T>
bool ArrayFloatNear(const T* result, const T* expected_result, int size,
double threshold) {
for (int i = 0; i < size; ++i) {
if (std::abs(result[i] - expected_result[i]) > threshold) {
return false;
}
}
return true;
}
class BaseLstmParam {
public:
TfLiteTensor* Geti2i() {
PackWeightToTensor(&i2i_tensor_, i2i_, i2i_size_);
i2i_tensor_.data.int8 = i2i_.data();
return &i2i_tensor_;
}
TfLiteTensor* Geti2f() {
PackWeightToTensor(&i2f_tensor_, i2f_, i2f_size_);
i2f_tensor_.data.int8 = i2f_.data();
return &i2f_tensor_;
}
TfLiteTensor* Geti2c() {
PackWeightToTensor(&i2c_tensor_, i2c_, i2c_size_);
i2c_tensor_.data.int8 = i2c_.data();
return &i2c_tensor_;
}
TfLiteTensor* Geti2o() {
PackWeightToTensor(&i2o_tensor_, i2o_, i2o_size_);
i2o_tensor_.data.int8 = i2o_.data();
return &i2o_tensor_;
}
TfLiteTensor* Getr2i() {
PackWeightToTensor(&r2i_tensor_, r2i_, r2i_size_);
r2i_tensor_.data.int8 = r2i_.data();
return &r2i_tensor_;
}
TfLiteTensor* Getr2f() {
PackWeightToTensor(&r2f_tensor_, r2f_, r2f_size_);
r2f_tensor_.data.int8 = r2f_.data();
return &r2f_tensor_;
}
TfLiteTensor* Getr2c() {
PackWeightToTensor(&r2c_tensor_, r2c_, r2c_size_);
r2c_tensor_.data.int8 = r2c_.data();
return &r2c_tensor_;
}
TfLiteTensor* Getr2o() {
PackWeightToTensor(&r2o_tensor_, r2o_, r2o_size_);
r2o_tensor_.data.int8 = r2o_.data();
return &r2o_tensor_;
}
TfLiteTensor* GetProjection() {
PackWeightToTensor(&projection_tensor_, projection_, projection_size_);
projection_tensor_.data.int8 = projection_.data();
return &projection_tensor_;
}
~BaseLstmParam() {
TfLiteIntArrayFree(input_tensor_.dims);
TfLiteIntArrayFree(i2i_tensor_.dims);
TfLiteIntArrayFree(i2f_tensor_.dims);
TfLiteIntArrayFree(i2c_tensor_.dims);
TfLiteIntArrayFree(i2o_tensor_.dims);
TfLiteIntArrayFree(r2i_tensor_.dims);
TfLiteIntArrayFree(r2f_tensor_.dims);
TfLiteIntArrayFree(r2c_tensor_.dims);
TfLiteIntArrayFree(r2o_tensor_.dims);
TfLiteIntArrayFree(layer_norm_input_tensor_.dims);
TfLiteIntArrayFree(layer_norm_forget_tensor_.dims);
TfLiteIntArrayFree(layer_norm_cell_tensor_.dims);
TfLiteIntArrayFree(layer_norm_output_tensor_.dims);
TfLiteIntArrayFree(input_gate_bias_tensor_.dims);
TfLiteIntArrayFree(forget_gate_bias_tensor_.dims);
TfLiteIntArrayFree(cell_gate_bias_tensor_.dims);
TfLiteIntArrayFree(output_gate_bias_tensor_.dims);
TfLiteIntArrayFree(projection_tensor_.dims);
TfLiteIntArrayFree(projection_bias_tensor_.dims);
TfLiteIntArrayFree(activation_tensor_.dims);
TfLiteIntArrayFree(cell_tensor_.dims);
TfLiteIntArrayFree(output_tensor_.dims);
}
protected:
template <typename T>
void PackWeightToTensor(TfLiteTensor* tensor, std::vector<T>& data,
std::vector<int32_t> dims) {
if (data.empty()) {
int total = 1;
for (int i = 0; i < dims.size(); ++i) {
total *= dims[i];
}
for (int i = 0; i < total; ++i) {
data.push_back(0);
}
}
tensor->dims = TfLiteIntArrayCreate(dims.size());
for (int i = 0; i < dims.size(); ++i) {
tensor->dims->data[i] = dims[i];
}
}
const int n_batch_ = 2;
const int n_input_ = 18;
const int n_cell_ = 10;
const int n_output_ = 6;
std::vector<int32_t> input_size_ = {n_batch_, n_input_};
TfLiteTensor input_tensor_;
std::vector<int8_t> i2i_ = {
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 1, 2, 3, 14, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
};
std::vector<int32_t> i2i_size_ = {n_cell_, n_input_};
TfLiteTensor i2i_tensor_;
std::vector<int8_t> i2f_ = {
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 11, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, -6, 1, 2, 3, 14, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 13, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
};
std::vector<int32_t> i2f_size_ = {n_cell_, n_input_};
TfLiteTensor i2f_tensor_;
std::vector<int8_t> i2c_ = {
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 16, 1, 2, 3, 14, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6,
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
};
std::vector<int32_t> i2c_size_ = {n_cell_, n_input_};
TfLiteTensor i2c_tensor_;
std::vector<int8_t> i2o_ = {
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, -1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 1, 2, 3, 14, 5, 6,
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, -6, 1, 2, 3, 4, 5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, -1, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
};
std::vector<int32_t> i2o_size_ = {n_cell_, n_input_};
TfLiteTensor i2o_tensor_;
std::vector<int8_t> r2i_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2i_size_ = {n_cell_, n_output_};
TfLiteTensor r2i_tensor_;
std::vector<int8_t> r2f_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2f_size_ = {n_cell_, n_output_};
TfLiteTensor r2f_tensor_;
std::vector<int8_t> r2c_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2c_size_ = {n_cell_, n_output_};
TfLiteTensor r2c_tensor_;
std::vector<int8_t> r2o_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2o_size_ = {n_cell_, n_output_};
TfLiteTensor r2o_tensor_;
std::vector<int32_t> layer_norm_input_size_ = {n_cell_};
TfLiteTensor layer_norm_input_tensor_;
TfLiteTensor layer_norm_forget_tensor_;
std::vector<int32_t> layer_norm_forget_size_ = {n_cell_};
std::vector<int32_t> layer_norm_cell_size_ = {n_cell_};
TfLiteTensor layer_norm_cell_tensor_;
std::vector<int32_t> layer_norm_output_size_ = {n_cell_};
TfLiteTensor layer_norm_output_tensor_;
std::vector<int32_t> input_gate_bias_size_ = {n_cell_};
TfLiteTensor input_gate_bias_tensor_;
std::vector<int32_t> forget_gate_bias_size_ = {n_cell_};
TfLiteTensor forget_gate_bias_tensor_;
std::vector<int32_t> cell_gate_bias_size_ = {n_cell_};
TfLiteTensor cell_gate_bias_tensor_;
std::vector<int32_t> output_gate_bias_size_ = {n_cell_};
TfLiteTensor output_gate_bias_tensor_;
std::vector<int8_t> projection_ = {
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
};
std::vector<int32_t> projection_size_ = {n_cell_, n_output_};
TfLiteTensor projection_tensor_;
std::vector<int32_t> projection_bias_ = {
16, 4, 5, 6, 1, 1
};
std::vector<int32_t> projection_bias_size_ = {n_output_};
TfLiteTensor projection_bias_tensor_;
std::vector<int32_t> activation_size_ = {n_batch_, n_output_};
TfLiteTensor activation_tensor_;
std::vector<int32_t> cell_size_ = {n_batch_, n_cell_};
TfLiteTensor cell_tensor_;
std::vector<int32_t> output_size_ = {n_batch_, n_output_};
TfLiteTensor output_tensor_;
};
class QuantizedLstmParam : public BaseLstmParam {
public:
TfLiteTensor* GetInput() {
PackWeightToTensor(&input_tensor_, input_, input_size_);
input_tensor_.data.int8 = input_.data();
return &input_tensor_;
}
TfLiteTensor* GetInputLayerNorm() {
PackWeightToTensor(&layer_norm_input_tensor_, layer_norm_input_,
layer_norm_input_size_);
layer_norm_input_tensor_.data.i16 = layer_norm_input_.data();
return &layer_norm_input_tensor_;
}
TfLiteTensor* GetForgetLayerNorm() {
PackWeightToTensor(&layer_norm_forget_tensor_, layer_norm_forget_,
layer_norm_forget_size_);
layer_norm_forget_tensor_.data.i16 = layer_norm_forget_.data();
return &layer_norm_forget_tensor_;
}
TfLiteTensor* GetCellLayerNorm() {
PackWeightToTensor(&layer_norm_cell_tensor_, layer_norm_cell_,
layer_norm_cell_size_);
layer_norm_cell_tensor_.data.i16 = layer_norm_cell_.data();
return &layer_norm_cell_tensor_;
}
TfLiteTensor* GetOutputLayerNorm() {
PackWeightToTensor(&layer_norm_output_tensor_, layer_norm_output_,
layer_norm_output_size_);
layer_norm_output_tensor_.data.i16 = layer_norm_output_.data();
return &layer_norm_output_tensor_;
}
TfLiteTensor* GetInputBias() {
PackWeightToTensor(&input_gate_bias_tensor_, input_gate_bias_,
input_gate_bias_size_);
input_gate_bias_tensor_.data.i32 = input_gate_bias_.data();
return &input_gate_bias_tensor_;
}
TfLiteTensor* GetForgetBias() {
PackWeightToTensor(&forget_gate_bias_tensor_, forget_gate_bias_,
forget_gate_bias_size_);
forget_gate_bias_tensor_.data.i32 = forget_gate_bias_.data();
return &forget_gate_bias_tensor_;
}
TfLiteTensor* GetCellBias() {
PackWeightToTensor(&cell_gate_bias_tensor_, cell_gate_bias_,
cell_gate_bias_size_);
cell_gate_bias_tensor_.data.i32 = cell_gate_bias_.data();
return &cell_gate_bias_tensor_;
}
TfLiteTensor* GetOutputBias() {
PackWeightToTensor(&output_gate_bias_tensor_, output_gate_bias_,
output_gate_bias_size_);
output_gate_bias_tensor_.data.i32 = output_gate_bias_.data();
return &output_gate_bias_tensor_;
}
TfLiteTensor* GetProjectionBias() {
PackWeightToTensor(&projection_bias_tensor_, projection_bias_,
projection_bias_size_);
projection_bias_tensor_.data.i32 = projection_bias_.data();
return &projection_bias_tensor_;
}
ops::builtin::lstm_eval::IntegerLstmParameter* GetQuantParam() {
integer_lstm_param_.effective_input_to_input_scale_a = 1808677632;
integer_lstm_param_.effective_input_to_input_scale_b = -1;
integer_lstm_param_.effective_recurrent_to_input_scale_a = 1078887680;
integer_lstm_param_.effective_recurrent_to_input_scale_b = -1;
integer_lstm_param_.effective_cell_to_input_scale_a = 1073741824;
integer_lstm_param_.effective_cell_to_input_scale_b = 1;
integer_lstm_param_.effective_input_to_forget_scale_a = 1845996800;
integer_lstm_param_.effective_input_to_forget_scale_b = -3;
integer_lstm_param_.effective_recurrent_to_forget_scale_a = 1477412736;
integer_lstm_param_.effective_recurrent_to_forget_scale_b = -2;
integer_lstm_param_.effective_cell_to_forget_scale_a = 1073741824;
integer_lstm_param_.effective_cell_to_forget_scale_b = 1;
integer_lstm_param_.effective_input_to_cell_scale_a = 1648385408;
integer_lstm_param_.effective_input_to_cell_scale_b = -2;
integer_lstm_param_.effective_recurrent_to_cell_scale_a = 1185544192,
integer_lstm_param_.effective_recurrent_to_cell_scale_b = -1;
integer_lstm_param_.effective_input_to_output_scale_a = 1328153600;
integer_lstm_param_.effective_input_to_output_scale_b = -1;
integer_lstm_param_.effective_recurrent_to_output_scale_a = 1479582592;
integer_lstm_param_.effective_recurrent_to_output_scale_b = -1;
integer_lstm_param_.effective_cell_to_output_scale_a = 1073741824,
integer_lstm_param_.effective_cell_to_output_scale_b = 1;
integer_lstm_param_.effective_proj_scale_a = 1105682560;
integer_lstm_param_.effective_proj_scale_b = -8;
integer_lstm_param_.effective_hidden_scale_a = 0;
integer_lstm_param_.effective_hidden_scale_b = 0;
integer_lstm_param_.layer_norm_input_scale_a = 2011617664;
integer_lstm_param_.layer_norm_input_scale_b = -11;
integer_lstm_param_.layer_norm_forget_scale_a = 1968024960;
integer_lstm_param_.layer_norm_forget_scale_b = -13;
integer_lstm_param_.layer_norm_cell_scale_a = 1097334528,
integer_lstm_param_.layer_norm_cell_scale_b = -12;
integer_lstm_param_.layer_norm_output_scale_a = 1837163008;
integer_lstm_param_.layer_norm_output_scale_b = -12;
integer_lstm_param_.quantized_cell_clip = 20480;
integer_lstm_param_.quantized_proj_clip = 0;
integer_lstm_param_.cell_scale = -11;
integer_lstm_param_.input_variance_guard = 1;
integer_lstm_param_.forget_variance_guard = 2;
integer_lstm_param_.cell_variance_guard = 2;
integer_lstm_param_.output_variance_guard = 1;
integer_lstm_param_.hidden_zp = 0;
integer_lstm_param_.input_to_forget_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_forget_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.input_to_cell_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_cell_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.input_to_output_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_output_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.input_to_input_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_input_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.projection_effective_bias.reset(new int32_t[n_output_]);
std::fill_n(integer_lstm_param_.input_to_forget_effective_bias.get(),
n_cell_, 152);
std::fill_n(integer_lstm_param_.recurrent_to_forget_effective_bias.get(),
n_cell_, 315);
std::fill_n(integer_lstm_param_.input_to_cell_effective_bias.get(), n_cell_,
165);
std::fill_n(integer_lstm_param_.recurrent_to_cell_effective_bias.get(),
n_cell_, 1165);
std::fill_n(integer_lstm_param_.input_to_output_effective_bias.get(),
n_cell_, 159);
std::fill_n(integer_lstm_param_.recurrent_to_output_effective_bias.get(),
n_cell_, 915);
std::fill_n(integer_lstm_param_.input_to_input_effective_bias.get(),
n_cell_, -15);
std::fill_n(integer_lstm_param_.recurrent_to_input_effective_bias.get(),
n_cell_, 315);
std::fill_n(integer_lstm_param_.projection_effective_bias.get(), n_output_,
115);
return &integer_lstm_param_;
}
TfLiteTensor* GetScratch0() {
PackWeightToTensor(&scratch0_tensor_, scratch0_, scratch0_size_);
scratch0_tensor_.data.i16 = scratch0_.data();
return &scratch0_tensor_;
}
TfLiteTensor* GetScratch1() {
PackWeightToTensor(&scratch1_tensor_, scratch1_, scratch1_size_);
scratch1_tensor_.data.i16 = scratch1_.data();
return &scratch1_tensor_;
}
TfLiteTensor* GetScratch2() {
PackWeightToTensor(&scratch2_tensor_, scratch2_, scratch2_size_);
scratch2_tensor_.data.i16 = scratch2_.data();
return &scratch2_tensor_;
}
TfLiteTensor* GetScratch3() {
PackWeightToTensor(&scratch3_tensor_, scratch3_, scratch3_size_);
scratch3_tensor_.data.i16 = scratch3_.data();
return &scratch3_tensor_;
}
TfLiteTensor* GetScratch4() {
PackWeightToTensor(&scratch4_tensor_, scratch4_, scratch4_size_);
scratch4_tensor_.data.int8 = scratch4_.data();
return &scratch4_tensor_;
}
TfLiteTensor* GetScratch5() {
PackWeightToTensor(&scratch5_tensor_, scratch5_, scratch5_size_);
scratch5_tensor_.data.i32 = scratch5_.data();
return &scratch5_tensor_;
}
TfLiteTensor* GetActivation() {
PackWeightToTensor(&activation_tensor_, activation_, activation_size_);
activation_tensor_.data.int8 = activation_.data();
activation_tensor_.params.zero_point = 50;
return &activation_tensor_;
}
TfLiteTensor* GetOutput() {
PackWeightToTensor(&output_tensor_, output_, output_size_);
output_tensor_.data.int8 = output_.data();
return &output_tensor_;
}
TfLiteTensor* GetCell() {
PackWeightToTensor(&cell_tensor_, cell_, cell_size_);
cell_tensor_.data.i16 = cell_.data();
return &cell_tensor_;
}
~QuantizedLstmParam() {
TfLiteIntArrayFree(scratch0_tensor_.dims);
TfLiteIntArrayFree(scratch1_tensor_.dims);
TfLiteIntArrayFree(scratch2_tensor_.dims);
TfLiteIntArrayFree(scratch3_tensor_.dims);
TfLiteIntArrayFree(scratch4_tensor_.dims);
TfLiteIntArrayFree(scratch5_tensor_.dims);
}
private:
std::vector<int8_t> input_ = {
8, 2, 3, 4, 5, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, -3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
};
std::vector<int16_t> layer_norm_input_ = {8, 2, 3, 4, 5, 6, 1, 2, 3, 4};
std::vector<int16_t> layer_norm_forget_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
};
std::vector<int16_t> layer_norm_cell_ = {
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int16_t> layer_norm_output_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> input_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> forget_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> cell_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> output_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int8_t> activation_;
std::vector<int16_t> cell_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
1, 14, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int8_t> output_ = {
1, 1, 3, 4, -5, 6,
1, 4, 3, 4, -5, 6,
};
ops::builtin::lstm_eval::IntegerLstmParameter integer_lstm_param_;
std::vector<int16_t> scratch0_;
std::vector<int32_t> scratch0_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch0_tensor_;
std::vector<int16_t> scratch1_;
std::vector<int32_t> scratch1_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch1_tensor_;
std::vector<int16_t> scratch2_;
std::vector<int32_t> scratch2_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch2_tensor_;
std::vector<int16_t> scratch3_;
std::vector<int32_t> scratch3_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch3_tensor_;
std::vector<int8_t> scratch4_;
std::vector<int32_t> scratch4_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch4_tensor_;
std::vector<int32_t> scratch5_;
std::vector<int32_t> scratch5_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch5_tensor_;
};
void TestOneFullyQuantizedLSTM() {
CpuBackendContext context;
QuantizedLstmParam one_parameter;
auto activation = one_parameter.GetActivation();
auto output = one_parameter.GetOutput();
auto cell = one_parameter.GetCell();
auto param = one_parameter.GetQuantParam();
ops::builtin::lstm_eval::EvalInteger8x8_16(
one_parameter.GetInput(), one_parameter.Geti2i(), one_parameter.Geti2f(),
one_parameter.Geti2c(), one_parameter.Geti2o(), one_parameter.Getr2i(),
one_parameter.Getr2f(), one_parameter.Getr2c(), one_parameter.Getr2o(),
nullptr, nullptr, nullptr, one_parameter.GetInputLayerNorm(),
one_parameter.GetForgetLayerNorm(), one_parameter.GetCellLayerNorm(),
one_parameter.GetOutputLayerNorm(), one_parameter.GetInputBias(),
one_parameter.GetForgetBias(), one_parameter.GetCellBias(),
one_parameter.GetOutputBias(), one_parameter.GetProjection(),
one_parameter.GetProjectionBias(), nullptr, true,
true, param, activation, cell, output,
one_parameter.GetScratch0(), one_parameter.GetScratch1(),
one_parameter.GetScratch2(), one_parameter.GetScratch3(),
one_parameter.GetScratch4(), one_parameter.GetScratch5(), &context);
const std::vector<int16_t> expected_cell = {
7, 1, 3, 2, 0, 1, 0, 2, -2, 4, 1, 6, 4, 3, 0, 1, 0, 2, -2, 4,
};
const std::vector<int8_t> expected_activation = {
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
};
EXPECT_TRUE(ArrayEq(cell->data.i16, expected_cell.data(), 20));
EXPECT_TRUE(ArrayEq(activation->data.int8, expected_activation.data(), 12));
EXPECT_TRUE(ArrayEq(output->data.int8, expected_activation.data(), 12));
}
TEST(TestOneFullyQuantizedLSTM, TestOneFullyQuantizedLSTM) {
TestOneFullyQuantizedLSTM();
}
class HybridLstmParam : public BaseLstmParam {
public:
TfLiteTensor* GetFloatOutput() {
PackWeightToTensor(&output_tensor_, output_float_, output_size_);
output_tensor_.data.f = output_float_.data();
return &output_tensor_;
}
const TfLiteLSTMParams GetLSTMParam() {
return {kTfLiteActRelu, 0, 0, kTfLiteLSTMFullKernel, true};
}
TfLiteTensor* GetScratchBuffer() {
PackWeightToTensor(&scratch_buffer_tensor_, scratch_buffer_,
scratch_buffer_size_);
scratch_buffer_tensor_.data.f = scratch_buffer_.data();
return &scratch_buffer_tensor_;
}
TfLiteTensor* GetInputScalingFactors() {
PackWeightToTensor(&input_sf_tensor_, input_sf_,
quantization_extra_scratch_buffer_sizes_);
input_sf_tensor_.data.f = input_sf_.data();
return &input_sf_tensor_;
}
TfLiteTensor* GetAuxInputScalingFactors() {
PackWeightToTensor(&aux_input_sf_tensor_, aux_input_sf_,
quantization_extra_scratch_buffer_sizes_);
aux_input_sf_tensor_.data.f = aux_input_sf_.data();
return &aux_input_sf_tensor_;
}
TfLiteTensor* GetOutputStateScalingFactors() {
PackWeightToTensor(&output_state_sf_tensor_, output_state_sf_,
quantization_extra_scratch_buffer_sizes_);
output_state_sf_tensor_.data.f = output_state_sf_.data();
return &output_state_sf_tensor_;
}
TfLiteTensor* GetProdScalingFactors() {
PackWeightToTensor(&prod_scaling_factors_tensor_, prod_scaling_factors_,
quantization_extra_scratch_buffer_sizes_);
prod_scaling_factors_tensor_.data.f = prod_scaling_factors_.data();
return &prod_scaling_factors_tensor_;
}
TfLiteTensor* GetInputQuantized() {
PackWeightToTensor(&input_quantized_tensor_, input_quantized_, input_size_);
input_quantized_tensor_.data.int8 = input_quantized_.data();
return &input_quantized_tensor_;
}
TfLiteTensor* GetActivationStateQuantized() {
PackWeightToTensor(&activation_quantized_tensor_, activation_quantized_,
activation_size_);
activation_quantized_tensor_.data.int8 = activation_quantized_.data();
return &activation_quantized_tensor_;
}
TfLiteTensor* GetCellStateQuantized() {
PackWeightToTensor(&cell_quantized_tensor_, cell_quantized_, cell_size_);
cell_quantized_tensor_.data.int8 = cell_quantized_.data();
return &cell_quantized_tensor_;
}
TfLiteTensor* GetInputZeroPoints() {
PackWeightToTensor(&input_zp_tensor_, input_zp_,
quantization_extra_scratch_buffer_sizes_);
input_zp_tensor_.data.i32 = input_zp_.data();
return &input_zp_tensor_;
}
TfLiteTensor* GetAuxInputZeroPoints() {
PackWeightToTensor(&aux_input_zp_tensor_, aux_input_zp_,
quantization_extra_scratch_buffer_sizes_);
aux_input_zp_tensor_.data.i32 = aux_input_zp_.data();
return &aux_input_zp_tensor_;
}
TfLiteTensor* GetOutputStateZeroPoints() {
PackWeightToTensor(&output_state_zp_tensor_, output_state_zp_,
quantization_extra_scratch_buffer_sizes_);
output_state_zp_tensor_.data.i32 = output_state_zp_.data();
return &output_state_zp_tensor_;
}
TfLiteTensor* GetRowSums() {
PackWeightToTensor(&row_sums_tensor_, row_sums_, row_sums_size_);
row_sums_tensor_.data.i32 = row_sums_.data();
return &row_sums_tensor_;
}
TfLiteTensor* GetFloatInput() {
PackWeightToTensor(&input_tensor_, input_float_, input_size_);
input_tensor_.data.f = input_float_.data();
return &input_tensor_;
}
TfLiteTensor* GetActivation() {
PackWeightToTensor(&activation_tensor_, activation_state_,
activation_size_);
activation_tensor_.data.f = activation_state_.data();
return &activation_tensor_;
}
TfLiteTensor* GetCell() {
PackWeightToTensor(&cell_tensor_, cell_state_, cell_size_);
cell_tensor_.data.f = cell_state_.data();
return &cell_tensor_;
}
TfLiteTensor* GetAccumScratchBuffer() {
PackWeightToTensor(&accum_scratch_tensor_, accum_scratch_,
accum_scratch_size_);
accum_scratch_tensor_.data.i32 = accum_scratch_.data();
return &accum_scratch_tensor_;
}
TfLiteTensor* GetInputBias() {
PackWeightToTensor(&input_gate_bias_tensor_, input_float_bias_,
input_gate_bias_size_);
input_gate_bias_tensor_.data.f = input_float_bias_.data();
return &input_gate_bias_tensor_;
}
TfLiteTensor* GetForgetBias() {
PackWeightToTensor(&forget_gate_bias_tensor_, forget_float_bias_,
forget_gate_bias_size_);
forget_gate_bias_tensor_.data.f = forget_float_bias_.data();
return &forget_gate_bias_tensor_;
}
TfLiteTensor* GetCellBias() {
PackWeightToTensor(&cell_gate_bias_tensor_, cell_float_bias_,
cell_gate_bias_size_);
cell_gate_bias_tensor_.data.f = cell_float_bias_.data();
return &cell_gate_bias_tensor_;
}
TfLiteTensor* GetOutputBias() {
PackWeightToTensor(&output_gate_bias_tensor_, output_float_bias_,
output_gate_bias_size_);
output_gate_bias_tensor_.data.f = output_float_bias_.data();
return &output_gate_bias_tensor_;
}
TfLiteTensor* GetProjectionBias() {
PackWeightToTensor(&projection_bias_tensor_, projection_float_bias_,
projection_bias_size_);
projection_bias_tensor_.data.f = projection_float_bias_.data();
return &projection_bias_tensor_;
}
int GetNumRowSums() { return n_row_sums_; }
TfLiteTensor* GetInputLayerNorm() {
PackWeightToTensor(&layer_norm_input_tensor_, layer_norm_float_input_,
layer_norm_input_size_);
layer_norm_input_tensor_.data.f = layer_norm_float_input_.data();
return &layer_norm_input_tensor_;
}
TfLiteTensor* GetForgetLayerNorm() {
PackWeightToTensor(&layer_norm_forget_tensor_, layer_norm_float_forget_,
layer_norm_forget_size_);
layer_norm_forget_tensor_.data.f = layer_norm_float_forget_.data();
return &layer_norm_forget_tensor_;
}
TfLiteTensor* GetCellLayerNorm() {
PackWeightToTensor(&layer_norm_cell_tensor_, layer_norm_float_cell_,
layer_norm_cell_size_);
layer_norm_cell_tensor_.data.f = layer_norm_float_cell_.data();
return &layer_norm_cell_tensor_;
}
TfLiteTensor* GetOutputLayerNorm() {
PackWeightToTensor(&layer_norm_output_tensor_, layer_norm_float_output_,
layer_norm_output_size_);
layer_norm_output_tensor_.data.f = layer_norm_float_output_.data();
return &layer_norm_output_tensor_;
}
static TfLiteTensor* addScale(TfLiteTensor* t, float scale) {
t->params.scale = scale;
return t;
}
~HybridLstmParam() {
TfLiteIntArrayFree(scratch_buffer_tensor_.dims);
TfLiteIntArrayFree(accum_scratch_tensor_.dims);
TfLiteIntArrayFree(input_sf_tensor_.dims);
TfLiteIntArrayFree(aux_input_sf_tensor_.dims);
TfLiteIntArrayFree(output_state_sf_tensor_.dims);
TfLiteIntArrayFree(prod_scaling_factors_tensor_.dims);
TfLiteIntArrayFree(input_quantized_tensor_.dims);
TfLiteIntArrayFree(activation_quantized_tensor_.dims);
TfLiteIntArrayFree(cell_quantized_tensor_.dims);
TfLiteIntArrayFree(input_zp_tensor_.dims);
TfLiteIntArrayFree(aux_input_zp_tensor_.dims);
TfLiteIntArrayFree(output_state_zp_tensor_.dims);
TfLiteIntArrayFree(row_sums_tensor_.dims);
}
private:
const int n_row_sums_ = 9;
std::vector<float> scratch_buffer_;
std::vector<int32_t> scratch_buffer_size_ = {n_batch_, n_cell_ * 4};
TfLiteTensor scratch_buffer_tens |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.