ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
93fad05c-1bed-42af-b8bd-1cf21c7c02af | cpp | tensorflow/tensorflow | model_cmdline_flags | tensorflow/lite/toco/model_cmdline_flags.cc | tensorflow/lite/toco/model_cmdline_flags_test.cc | #include "tensorflow/lite/toco/model_cmdline_flags.h"
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/toco_graphviz_dump_options.h"
#include "tensorflow/lite/toco/toco_port.h"
#ifdef PLATFORM_GOOGLE
#include "base/commandlineflags.h"
#endif
namespace toco {
bool ParseModelFlagsFromCommandLineFlags(
int* argc, char* argv[], std::string* msg,
ParsedModelFlags* parsed_model_flags_ptr) {
ParsedModelFlags& parsed_flags = *parsed_model_flags_ptr;
using tensorflow::Flag;
std::vector<tensorflow::Flag> flags = {
Flag("input_array", parsed_flags.input_array.bind(),
parsed_flags.input_array.default_value(),
"Deprecated: use --input_arrays instead. Name of the input array. "
"If not specified, will try to read "
"that information from the input file."),
Flag("input_arrays", parsed_flags.input_arrays.bind(),
parsed_flags.input_arrays.default_value(),
"Names of the input arrays, comma-separated. If not specified, "
"will try to read that information from the input file."),
Flag("output_array", parsed_flags.output_array.bind(),
parsed_flags.output_array.default_value(),
"Deprecated: use --output_arrays instead. Name of the output array, "
"when specifying a unique output array. "
"If not specified, will try to read that information from the "
"input file."),
Flag("output_arrays", parsed_flags.output_arrays.bind(),
parsed_flags.output_arrays.default_value(),
"Names of the output arrays, comma-separated. "
"If not specified, will try to read "
"that information from the input file."),
Flag("input_shape", parsed_flags.input_shape.bind(),
parsed_flags.input_shape.default_value(),
"Deprecated: use --input_shapes instead. Input array shape. For "
"many models the shape takes the form "
"batch size, input array height, input array width, input array "
"depth."),
Flag("input_shapes", parsed_flags.input_shapes.bind(),
parsed_flags.input_shapes.default_value(),
"Shapes corresponding to --input_arrays, colon-separated. For "
"many models each shape takes the form batch size, input array "
"height, input array width, input array depth."),
Flag("batch_size", parsed_flags.batch_size.bind(),
parsed_flags.batch_size.default_value(),
"Deprecated. Batch size for the model. Replaces the first dimension "
"of an input size array if undefined. Use only with SavedModels "
"when --input_shapes flag is not specified. Always use "
"--input_shapes flag with frozen graphs."),
Flag("input_data_type", parsed_flags.input_data_type.bind(),
parsed_flags.input_data_type.default_value(),
"Deprecated: use --input_data_types instead. Input array type, if "
"not already provided in the graph. "
"Typically needs to be specified when passing arbitrary arrays "
"to --input_arrays."),
Flag("input_data_types", parsed_flags.input_data_types.bind(),
parsed_flags.input_data_types.default_value(),
"Input arrays types, comma-separated, if not already provided in "
"the graph. "
"Typically needs to be specified when passing arbitrary arrays "
"to --input_arrays."),
Flag("mean_value", parsed_flags.mean_value.bind(),
parsed_flags.mean_value.default_value(),
"Deprecated: use --mean_values instead. mean_value parameter for "
"image models, used to compute input "
"activations from input pixel data."),
Flag("mean_values", parsed_flags.mean_values.bind(),
parsed_flags.mean_values.default_value(),
"mean_values parameter for image models, comma-separated list of "
"doubles, used to compute input activations from input pixel "
"data. Each entry in the list should match an entry in "
"--input_arrays."),
Flag("std_value", parsed_flags.std_value.bind(),
parsed_flags.std_value.default_value(),
"Deprecated: use --std_values instead. std_value parameter for "
"image models, used to compute input "
"activations from input pixel data."),
Flag("std_values", parsed_flags.std_values.bind(),
parsed_flags.std_values.default_value(),
"std_value parameter for image models, comma-separated list of "
"doubles, used to compute input activations from input pixel "
"data. Each entry in the list should match an entry in "
"--input_arrays."),
Flag("variable_batch", parsed_flags.variable_batch.bind(),
parsed_flags.variable_batch.default_value(),
"If true, the model accepts an arbitrary batch size. Mutually "
"exclusive "
"with the 'batch' field: at most one of these two fields can be "
"set."),
Flag("rnn_states", parsed_flags.rnn_states.bind(),
parsed_flags.rnn_states.default_value(), ""),
Flag("model_checks", parsed_flags.model_checks.bind(),
parsed_flags.model_checks.default_value(),
"A list of model checks to be applied to verify the form of the "
"model. Applied after the graph transformations after import."),
Flag("dump_graphviz", parsed_flags.dump_graphviz.bind(),
parsed_flags.dump_graphviz.default_value(),
"Dump graphviz during LogDump call. If string is non-empty then "
"it defines path to dump, otherwise will skip dumping."),
Flag("dump_graphviz_video", parsed_flags.dump_graphviz_video.bind(),
parsed_flags.dump_graphviz_video.default_value(),
"If true, will dump graphviz at each "
"graph transformation, which may be used to generate a video."),
Flag("conversion_summary_dir", parsed_flags.conversion_summary_dir.bind(),
parsed_flags.conversion_summary_dir.default_value(),
"Local file directory to store the conversion logs."),
Flag("allow_nonexistent_arrays",
parsed_flags.allow_nonexistent_arrays.bind(),
parsed_flags.allow_nonexistent_arrays.default_value(),
"If true, will allow passing inexistent arrays in --input_arrays "
"and --output_arrays. This makes little sense, is only useful to "
"more easily get graph visualizations."),
Flag("allow_nonascii_arrays", parsed_flags.allow_nonascii_arrays.bind(),
parsed_flags.allow_nonascii_arrays.default_value(),
"If true, will allow passing non-ascii-printable characters in "
"--input_arrays and --output_arrays. By default (if false), only "
"ascii printable characters are allowed, i.e. character codes "
"ranging from 32 to 127. This is disallowed by default so as to "
"catch common copy-and-paste issues where invisible unicode "
"characters are unwittingly added to these strings."),
Flag(
"arrays_extra_info_file", parsed_flags.arrays_extra_info_file.bind(),
parsed_flags.arrays_extra_info_file.default_value(),
"Path to an optional file containing a serialized ArraysExtraInfo "
"proto allowing to pass extra information about arrays not specified "
"in the input model file, such as extra MinMax information."),
Flag("model_flags_file", parsed_flags.model_flags_file.bind(),
parsed_flags.model_flags_file.default_value(),
"Path to an optional file containing a serialized ModelFlags proto. "
"Options specified on the command line will override the values in "
"the proto."),
Flag("change_concat_input_ranges",
parsed_flags.change_concat_input_ranges.bind(),
parsed_flags.change_concat_input_ranges.default_value(),
"Boolean to change the behavior of min/max ranges for inputs and"
" output of the concat operators."),
};
bool asked_for_help =
*argc == 2 && (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-help"));
if (asked_for_help) {
*msg += tensorflow::Flags::Usage(argv[0], flags);
return false;
} else {
if (!tensorflow::Flags::Parse(argc, argv, flags)) return false;
}
auto& dump_options = *GraphVizDumpOptions::singleton();
dump_options.dump_graphviz_video = parsed_flags.dump_graphviz_video.value();
dump_options.dump_graphviz = parsed_flags.dump_graphviz.value();
return true;
}
void ReadModelFlagsFromCommandLineFlags(
const ParsedModelFlags& parsed_model_flags, ModelFlags* model_flags) {
toco::port::CheckInitGoogleIsDone("InitGoogle is not done yet");
if (parsed_model_flags.model_flags_file.specified()) {
std::string model_flags_file_contents;
QCHECK(port::file::GetContents(parsed_model_flags.model_flags_file.value(),
&model_flags_file_contents,
port::file::Defaults())
.ok())
<< "Specified --model_flags_file="
<< parsed_model_flags.model_flags_file.value()
<< " was not found or could not be read";
QCHECK(ParseFromStringEitherTextOrBinary(model_flags_file_contents,
model_flags))
<< "Specified --model_flags_file="
<< parsed_model_flags.model_flags_file.value()
<< " could not be parsed";
}
#ifdef PLATFORM_GOOGLE
CHECK(!((base::WasPresentOnCommandLine("batch") &&
parsed_model_flags.variable_batch.specified())))
<< "The --batch and --variable_batch flags are mutually exclusive.";
#endif
CHECK(!(parsed_model_flags.output_array.specified() &&
parsed_model_flags.output_arrays.specified()))
<< "The --output_array and --vs flags are mutually exclusive.";
if (parsed_model_flags.output_array.specified()) {
model_flags->add_output_arrays(parsed_model_flags.output_array.value());
}
if (parsed_model_flags.output_arrays.specified()) {
std::vector<std::string> output_arrays =
absl::StrSplit(parsed_model_flags.output_arrays.value(), ',');
for (const std::string& output_array : output_arrays) {
model_flags->add_output_arrays(output_array);
}
}
const bool uses_single_input_flags =
parsed_model_flags.input_array.specified() ||
parsed_model_flags.mean_value.specified() ||
parsed_model_flags.std_value.specified() ||
parsed_model_flags.input_shape.specified();
const bool uses_multi_input_flags =
parsed_model_flags.input_arrays.specified() ||
parsed_model_flags.mean_values.specified() ||
parsed_model_flags.std_values.specified() ||
parsed_model_flags.input_shapes.specified();
QCHECK(!(uses_single_input_flags && uses_multi_input_flags))
<< "Use either the singular-form input flags (--input_array, "
"--input_shape, --mean_value, --std_value) or the plural form input "
"flags (--input_arrays, --input_shapes, --mean_values, --std_values), "
"but not both forms within the same command line.";
if (parsed_model_flags.input_array.specified()) {
QCHECK(uses_single_input_flags);
model_flags->add_input_arrays()->set_name(
parsed_model_flags.input_array.value());
}
if (parsed_model_flags.input_arrays.specified()) {
QCHECK(uses_multi_input_flags);
for (const auto& input_array :
absl::StrSplit(parsed_model_flags.input_arrays.value(), ',')) {
model_flags->add_input_arrays()->set_name(std::string(input_array));
}
}
if (parsed_model_flags.mean_value.specified()) {
QCHECK(uses_single_input_flags);
model_flags->mutable_input_arrays(0)->set_mean_value(
parsed_model_flags.mean_value.value());
}
if (parsed_model_flags.mean_values.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> mean_values =
absl::StrSplit(parsed_model_flags.mean_values.value(), ',');
QCHECK(static_cast<int>(mean_values.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < mean_values.size(); ++i) {
char* last = nullptr;
model_flags->mutable_input_arrays(i)->set_mean_value(
strtod(mean_values[i].data(), &last));
CHECK(last != mean_values[i].data());
}
}
if (parsed_model_flags.std_value.specified()) {
QCHECK(uses_single_input_flags);
model_flags->mutable_input_arrays(0)->set_std_value(
parsed_model_flags.std_value.value());
}
if (parsed_model_flags.std_values.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> std_values =
absl::StrSplit(parsed_model_flags.std_values.value(), ',');
QCHECK(static_cast<int>(std_values.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < std_values.size(); ++i) {
char* last = nullptr;
model_flags->mutable_input_arrays(i)->set_std_value(
strtod(std_values[i].data(), &last));
CHECK(last != std_values[i].data());
}
}
if (parsed_model_flags.input_data_type.specified()) {
QCHECK(uses_single_input_flags);
IODataType type;
QCHECK(IODataType_Parse(parsed_model_flags.input_data_type.value(), &type));
model_flags->mutable_input_arrays(0)->set_data_type(type);
}
if (parsed_model_flags.input_data_types.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> input_data_types =
absl::StrSplit(parsed_model_flags.input_data_types.value(), ',');
QCHECK(static_cast<int>(input_data_types.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < input_data_types.size(); ++i) {
IODataType type;
QCHECK(IODataType_Parse(input_data_types[i], &type));
model_flags->mutable_input_arrays(i)->set_data_type(type);
}
}
if (parsed_model_flags.input_shape.specified()) {
QCHECK(uses_single_input_flags);
if (model_flags->input_arrays().empty()) {
model_flags->add_input_arrays();
}
auto* shape = model_flags->mutable_input_arrays(0)->mutable_shape();
shape->clear_dims();
const IntList& list = parsed_model_flags.input_shape.value();
for (auto& dim : list.elements) {
shape->add_dims(dim);
}
}
if (parsed_model_flags.input_shapes.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> input_shapes =
absl::StrSplit(parsed_model_flags.input_shapes.value(), ':');
QCHECK(static_cast<int>(input_shapes.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < input_shapes.size(); ++i) {
auto* shape = model_flags->mutable_input_arrays(i)->mutable_shape();
shape->clear_dims();
if (input_shapes[i].empty()) {
continue;
}
for (const auto& dim_str : absl::StrSplit(input_shapes[i], ',')) {
int size;
CHECK(absl::SimpleAtoi(dim_str, &size))
<< "Failed to parse input_shape: " << input_shapes[i];
shape->add_dims(size);
}
}
}
#define READ_MODEL_FLAG(name) \
do { \
if (parsed_model_flags.name.specified()) { \
model_flags->set_##name(parsed_model_flags.name.value()); \
} \
} while (false)
READ_MODEL_FLAG(variable_batch);
#undef READ_MODEL_FLAG
for (const auto& element : parsed_model_flags.rnn_states.value().elements) {
auto* rnn_state_proto = model_flags->add_rnn_states();
for (const auto& kv_pair : element) {
const std::string& key = kv_pair.first;
const std::string& value = kv_pair.second;
if (key == "state_array") {
rnn_state_proto->set_state_array(value);
} else if (key == "back_edge_source_array") {
rnn_state_proto->set_back_edge_source_array(value);
} else if (key == "size") {
int32_t size = 0;
CHECK(absl::SimpleAtoi(value, &size));
CHECK_GT(size, 0);
rnn_state_proto->set_size(size);
} else if (key == "num_dims") {
int32_t size = 0;
CHECK(absl::SimpleAtoi(value, &size));
CHECK_GT(size, 0);
rnn_state_proto->set_num_dims(size);
} else {
LOG(FATAL) << "Unknown key '" << key << "' in --rnn_states";
}
}
CHECK(rnn_state_proto->has_state_array() &&
rnn_state_proto->has_back_edge_source_array() &&
rnn_state_proto->has_size())
<< "--rnn_states must include state_array, back_edge_source_array and "
"size.";
}
for (const auto& element : parsed_model_flags.model_checks.value().elements) {
auto* model_check_proto = model_flags->add_model_checks();
for (const auto& kv_pair : element) {
const std::string& key = kv_pair.first;
const std::string& value = kv_pair.second;
if (key == "count_type") {
model_check_proto->set_count_type(value);
} else if (key == "count_min") {
int32_t count = 0;
CHECK(absl::SimpleAtoi(value, &count));
CHECK_GE(count, -1);
model_check_proto->set_count_min(count);
} else if (key == "count_max") {
int32_t count = 0;
CHECK(absl::SimpleAtoi(value, &count));
CHECK_GE(count, -1);
model_check_proto->set_count_max(count);
} else {
LOG(FATAL) << "Unknown key '" << key << "' in --model_checks";
}
}
}
if (!model_flags->has_allow_nonascii_arrays()) {
model_flags->set_allow_nonascii_arrays(
parsed_model_flags.allow_nonascii_arrays.value());
}
if (!model_flags->has_allow_nonexistent_arrays()) {
model_flags->set_allow_nonexistent_arrays(
parsed_model_flags.allow_nonexistent_arrays.value());
}
if (!model_flags->has_change_concat_input_ranges()) {
model_flags->set_change_concat_input_ranges(
parsed_model_flags.change_concat_input_ranges.value());
}
if (parsed_model_flags.arrays_extra_info_file.specified()) {
std::string arrays_extra_info_file_contents;
CHECK(port::file::GetContents(
parsed_model_flags.arrays_extra_info_file.value(),
&arrays_extra_info_file_contents, port::file::Defaults())
.ok());
ParseFromStringEitherTextOrBinary(arrays_extra_info_file_contents,
model_flags->mutable_arrays_extra_info());
}
}
ParsedModelFlags* UncheckedGlobalParsedModelFlags(bool must_already_exist) {
static auto* flags = [must_already_exist]() {
if (must_already_exist) {
fprintf(stderr, __FILE__
":"
"GlobalParsedModelFlags() used without initialization\n");
fflush(stderr);
abort();
}
return new toco::ParsedModelFlags;
}();
return flags;
}
ParsedModelFlags* GlobalParsedModelFlags() {
return UncheckedGlobalParsedModelFlags(true);
}
void ParseModelFlagsOrDie(int* argc, char* argv[]) {
auto* flags = UncheckedGlobalParsedModelFlags(false);
std::string msg;
bool model_success =
toco::ParseModelFlagsFromCommandLineFlags(argc, argv, &msg, flags);
if (!model_success || !msg.empty()) {
fprintf(stderr, "%s", msg.c_str());
fflush(stderr);
abort();
}
}
} | #include <string>
#include <unordered_map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/model_cmdline_flags.h"
namespace toco {
namespace {
TEST(ModelCmdlineFlagsTest, ParseArgsStringMapList) {
int args_count = 3;
const char* args[] = {
"toco", "--input_arrays=input_1",
"--rnn_states={state_array:rnn/BasicLSTMCellZeroState/zeros,"
"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},"
"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,"
"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}",
nullptr};
std::string expected_input_arrays = "input_1";
std::vector<std::unordered_map<std::string, std::string>> expected_rnn_states;
expected_rnn_states.push_back(
{{"state_array", "rnn/BasicLSTMCellZeroState/zeros"},
{"back_edge_source_array", "rnn/basic_lstm_cell/Add_1"},
{"size", "4"}});
expected_rnn_states.push_back(
{{"state_array", "rnn/BasicLSTMCellZeroState/zeros_1"},
{"back_edge_source_array", "rnn/basic_lstm_cell/Mul_2"},
{"size", "4"}});
std::string message;
ParsedModelFlags result_flags;
EXPECT_TRUE(ParseModelFlagsFromCommandLineFlags(
&args_count, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.input_arrays.value(), expected_input_arrays);
EXPECT_EQ(result_flags.rnn_states.value().elements, expected_rnn_states);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/model_cmdline_flags.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/model_cmdline_flags_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a08fa36a-6348-4f9f-89fa-8d02736d352a | cpp | tensorflow/tensorflow | export | tensorflow/compiler/mlir/tfrt/utils/export.cc | tensorflow/lite/toco/tflite/export_test.cc | #include "tensorflow/compiler/mlir/tfrt/utils/export.h"
#include <memory>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/tf_dialect_to_executor.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
absl::Status ExportFunctionDefs(
mlir::ModuleOp module,
absl::AnyInvocable<absl::Status(tensorflow::FunctionDef)> callback,
bool export_tf_original_func_name) {
tsl::profiler::TraceMe traceme([&]() {
return tsl::profiler::TraceMeEncode(
"ExportFunctionDefs",
{{"module_name", absl::string_view(module.getName().value_or("?"))}});
});
TF_RETURN_IF_ERROR(
tensorflow::tf2xla::v1::ExportFromTensorflowDialectToExecutor(module));
{
mlir::StatusScopedDiagnosticHandler diag_handler(module.getContext());
mlir::PassManager pm(module.getContext());
pm.addPass(mlir::CreateBreakUpIslandsPass());
if (mlir::failed(pm.run(module))) {
return diag_handler.ConsumeStatus();
}
}
tensorflow::GraphExportConfig configs;
configs.export_original_tf_func_name = export_tf_original_func_name;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
tensorflow::FunctionDef function_def;
TF_RETURN_IF_ERROR(
tensorflow::tf2xla::v2::ConvertMlirFunctionToFunctionLibraryDef(
func, configs, &function_def));
TF_RETURN_IF_ERROR(callback(std::move(function_def)));
}
return absl::OkStatus();
}
} | #include "tensorflow/lite/toco/tflite/export.h"
#include <algorithm>
#include <initializer_list>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tflite/builtin_operator.h"
#include "tensorflow/lite/toco/tflite/operator.h"
#include "tensorflow/lite/toco/tflite/types.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace toco {
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
class ExportTest : public ::testing::Test {
protected:
void ResetOperators() { input_model_.operators.clear(); }
void AddTensorsByName(std::initializer_list<std::string> names) {
for (const std::string& name : names) {
input_model_.GetOrCreateArray(name);
}
}
void AddOperatorsByName(std::initializer_list<std::string> names) {
for (const std::string& name : names) {
if (name == "Conv") {
auto* op = new ConvOperator;
op->padding.type = PaddingType::kSame;
op->inputs = {"input", "filter"};
op->outputs = {"output"};
Array& input_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& filter_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kFloat;
filter_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
} else if (name == "Add") {
auto* op = new AddOperator;
op->inputs = {"input1", "input2"};
op->outputs = {"output"};
Array& input1_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& input2_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input1_array.data_type = ArrayDataType::kFloat;
input2_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
} else if (name == "Sub") {
auto* op = new SubOperator;
op->inputs = {"input1", "input2"};
op->outputs = {"output"};
Array& input1_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& input2_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input1_array.data_type = ArrayDataType::kFloat;
input2_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input1_array.copy_shape({1, 2, 2, 2});
input2_array.copy_shape({1, 2, 2, 2});
output_array.copy_shape({1, 2, 2, 2});
input_model_.operators.emplace_back(op);
} else if (name == "Assert") {
auto* op = new TensorFlowAssertOperator;
::tensorflow::NodeDef node_def;
node_def.set_name("Assert");
node_def.set_op("Assert");
node_def.SerializeToString(&op->tensorflow_node_def);
input_model_.operators.emplace_back(op);
} else {
auto* op = new TensorFlowUnsupportedOperator;
op->tensorflow_op = name;
input_model_.operators.emplace_back(op);
}
}
}
void BuildQuantizableTestModel() {
input_model_.GetOrCreateArray("inputs");
Array& weight_array = input_model_.GetOrCreateArray("weights");
int buf_size = 1296;
auto weight_buf = std::make_unique<float[]>(buf_size);
for (int i = 0; i < buf_size; i++) {
weight_buf[i] = static_cast<float>(i % 128);
}
weight_array.data_type = ArrayDataType::kFloat;
Shape* weight_array_shape = weight_array.mutable_shape();
std::vector<int>* weight_array_shape_dim =
weight_array_shape->mutable_dims();
weight_array_shape_dim->resize(4, 6);
auto& weight_array_buffer =
weight_array.GetMutableBuffer<ArrayDataType::kFloat>();
weight_array_buffer.data.resize(buf_size);
float* buf_ptr =
weight_array.GetMutableBuffer<ArrayDataType::kFloat>().data.data();
std::copy(weight_buf.get(), weight_buf.get() + buf_size, buf_ptr);
{
auto* op = new ConvOperator;
op->padding.type = PaddingType::kSame;
op->inputs = {"inputs", "weights"};
op->outputs = {"output"};
Array& input_array = input_model_.GetArray(op->inputs[0]);
Array& filter_array = input_model_.GetArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kFloat;
filter_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
}
{
auto* op = new AddOperator;
op->inputs = {"input1", "input2"};
op->outputs = {"output"};
Array& input1_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& input2_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input1_array.data_type = ArrayDataType::kFloat;
input2_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
}
}
tensorflow::Status ExportAndReturnStatus(const ExportParams& params) {
std::string result;
return Export(input_model_, &result, params);
}
std::vector<std::string> ExportAndSummarizeOperators(
const ExportParams& params) {
std::vector<std::string> names;
std::string result;
auto status = Export(input_model_, &result, params);
if (!status.ok()) {
LOG(INFO) << status.message();
return names;
}
auto* model = ::tflite::GetModel(result.data());
for (const ::tflite::OperatorCode* opcode : *model->operator_codes()) {
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code != ::tflite::BuiltinOperator_CUSTOM) {
names.push_back(std::string("builtin:") +
::tflite::EnumNameBuiltinOperator(builtin_code));
} else {
names.push_back(std::string("custom:") +
opcode->custom_code()->c_str());
}
}
return names;
}
std::vector<uint32_t> ExportAndGetOperatorIndices(
const ExportParams& params) {
std::vector<uint32_t> indices;
std::string result;
if (!Export(input_model_, &result, params).ok()) return indices;
auto* model = ::tflite::GetModel(result.data());
auto operators = (*model->subgraphs())[0]->operators();
for (const auto* op : *operators) {
indices.push_back(op->opcode_index());
}
return indices;
}
Model input_model_;
};
TEST_F(ExportTest, LoadTensorsMap) {
AddTensorsByName({"tensor_one", "tensor_two"});
details::TensorsMap tensors;
details::LoadTensorsMap(input_model_, &tensors);
EXPECT_EQ(0, tensors["tensor_one"]);
EXPECT_EQ(1, tensors["tensor_two"]);
}
TEST_F(ExportTest, LoadOperatorsMap) {
AddOperatorsByName({"Conv", "Add", "MyCrazyOp", "Sub"});
details::OperatorsMap operators;
const auto ops_by_type = BuildOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(
0, operators[details::OperatorKey(::tflite::BuiltinOperator_ADD, "", 1)]);
EXPECT_EQ(1, operators[details::OperatorKey(::tflite::BuiltinOperator_CONV_2D,
"", 1)]);
EXPECT_EQ(2, operators[details::OperatorKey(::tflite::BuiltinOperator_CUSTOM,
"MyCrazyOp", 1)]);
EXPECT_EQ(
3, operators[details::OperatorKey(::tflite::BuiltinOperator_SUB, "", 1)]);
}
TEST_F(ExportTest, UnsupportedFunctionality) {
AddOperatorsByName({"Conv"});
ExportParams params;
params.allow_dynamic_tensors = false;
auto status = ExportAndReturnStatus(params);
EXPECT_EQ(status.code(), ::tensorflow::error::UNIMPLEMENTED);
EXPECT_THAT(status.message(),
HasSubstr("Unsupported flag: allow_dynamic_tensors."));
}
TEST_F(ExportTest, Export) {
AddOperatorsByName({"Conv", "Add", "MyCrazyOp", "Sub"});
ExportParams params;
params.allow_custom_ops = true;
params.enable_select_tf_ops = false;
params.quantize_weights = QuantizedBufferType::NONE;
EXPECT_THAT(ExportAndSummarizeOperators(params),
ElementsAre("builtin:ADD", "builtin:CONV_2D", "custom:MyCrazyOp",
"builtin:SUB"));
EXPECT_THAT(ExportAndGetOperatorIndices(params), ElementsAre(1, 0, 2, 3));
}
TEST_F(ExportTest, ExportMinRuntime) {
AddOperatorsByName({"Conv", "Add", "Sub"});
ExportParams params;
params.allow_custom_ops = true;
params.enable_select_tf_ops = false;
params.quantize_weights = QuantizedBufferType::NONE;
std::string output;
auto status = Export(input_model_, &output, params);
auto* model = ::tflite::GetModel(output.data());
EXPECT_EQ(model->metadata()->size(), 1);
EXPECT_EQ(model->metadata()->Get(0)->name()->str(), "min_runtime_version");
auto buf = model->metadata()->Get(0)->buffer();
auto* buffer = (*model->buffers())[buf];
auto* array = buffer->data();
EXPECT_EQ(reinterpret_cast<const char*>(array->data()), std::string("1.6.0"));
}
TEST_F(ExportTest, ExportEmptyMinRuntime) {
AddOperatorsByName({"Switch", "MyCustomOp", "Assert"});
ExportParams params;
params.allow_custom_ops = true;
std::string output;
auto status = Export(input_model_, &output, params);
auto* model = ::tflite::GetModel(output.data());
EXPECT_EQ(model->metadata()->size(), 1);
EXPECT_EQ(model->metadata()->Get(0)->name()->str(), "min_runtime_version");
auto buf = model->metadata()->Get(0)->buffer();
auto* buffer = (*model->buffers())[buf];
auto* array = buffer->data();
EXPECT_EQ(reinterpret_cast<const char*>(array->data()), std::string(""));
}
TEST_F(ExportTest, UnsupportedControlFlowErrors) {
AddOperatorsByName({"Conv", "Add", "Switch", "Merge"});
ExportParams params;
params.allow_custom_ops = false;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(status.message(),
"We are continually in the process of adding support to TensorFlow "
"Lite for more ops. It would be helpful if you could inform us of "
"how this conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nTensorFlow Lite currently doesn't support control "
"flow ops: Merge, Switch. We are working on supporting control "
"flow ops, please see github issue at "
"https:
}
TEST_F(ExportTest, UnsupportedOpsAndNeedEnableFlex) {
AddOperatorsByName({"Conv", "Add", "BatchNormWithGlobalNormalization"});
ExportParams params;
params.allow_custom_ops = false;
params.enable_select_tf_ops = false;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(
status.message(),
"We are continually in the process of adding support to TensorFlow Lite "
"for more ops. It would be helpful if you could inform us of how this "
"conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nSome of the operators in the model are not supported by "
"the standard TensorFlow Lite runtime. If those are native TensorFlow "
"operators, you might be able to use the extended runtime by passing "
"--enable_select_tf_ops, or by setting "
"target_ops=TFLITE_BUILTINS,SELECT_TF_OPS when calling "
"tf.lite.TFLiteConverter(). Otherwise, if you have a custom "
"implementation for them you can disable this error with "
"--allow_custom_ops, or by setting allow_custom_ops=True when calling "
"tf.lite.TFLiteConverter(). Here is a list of builtin operators you are "
"using: ADD, CONV_2D. Here is a list of operators for which you will "
"need custom implementations: BatchNormWithGlobalNormalization.");
}
TEST_F(ExportTest, UnsupportedOpsNeedCustomImplementation) {
AddOperatorsByName({"Conv", "Add", "MyCustomOp1", "MyCustomOp2"});
ExportParams params;
params.allow_custom_ops = false;
params.enable_select_tf_ops = true;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(
status.message(),
"We are continually in the process of adding support to TensorFlow Lite "
"for more ops. It would be helpful if you could inform us of how this "
"conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nSome of the operators in the model are not supported by "
"the standard TensorFlow Lite runtime and are not recognized by "
"TensorFlow. If you have a custom implementation for them you can "
"disable this error with --allow_custom_ops, or by setting "
"allow_custom_ops=True when calling tf.lite.TFLiteConverter(). Here is a "
"list of builtin operators you are using: ADD, CONV_2D. Here is a list "
"of operators for which you will need custom implementations: "
"MyCustomOp1, MyCustomOp2.");
}
TEST_F(ExportTest, UnsupportedControlFlowAndCustomOpsErrors) {
AddOperatorsByName(
{"Conv", "Add", "Switch", "Merge", "MyCustomOp1", "MyCustomOp2"});
ExportParams params;
params.allow_custom_ops = false;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(
status.message(),
"We are continually in the process of adding support to TensorFlow Lite "
"for more ops. It would be helpful if you could inform us of how this "
"conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nTensorFlow Lite currently doesn't support control flow "
"ops: Merge, Switch. We are working on supporting control flow ops, "
"please see github issue at "
"https:
"operators in the model are not supported by the standard TensorFlow "
"Lite runtime. If those are native TensorFlow operators, you might be "
"able to use the extended runtime by passing --enable_select_tf_ops, or "
"by setting target_ops=TFLITE_BUILTINS,SELECT_TF_OPS when calling "
"tf.lite.TFLiteConverter(). Otherwise, if you have a custom "
"implementation for them you can disable this error with "
"--allow_custom_ops, or by setting allow_custom_ops=True when calling "
"tf.lite.TFLiteConverter(). Here is a list of builtin operators you are "
"using: ADD, CONV_2D. Here is a list of operators for which you will "
"need custom implementations: MyCustomOp1, MyCustomOp2.");
}
TEST_F(ExportTest, QuantizeWeights) {
BuildQuantizableTestModel();
std::string unquantized_result;
Export(input_model_, true, false, &unquantized_result);
BuildQuantizableTestModel();
std::string quantized_result;
Export(input_model_, true, true, &quantized_result);
EXPECT_LT(quantized_result.size(), unquantized_result.size());
}
class OpSetsTest : public ExportTest {
public:
enum OpSet { kTfLiteBuiltins, kSelectTfOps, kCustomOps };
void SetAllowedOpSets(std::initializer_list<OpSet> sets) {
import_all_ops_as_unsupported_ = true;
params_.allow_custom_ops = false;
params_.enable_select_tf_ops = false;
params_.quantize_weights = QuantizedBufferType::NONE;
for (const OpSet& i : sets) {
switch (i) {
case kTfLiteBuiltins:
import_all_ops_as_unsupported_ = false;
break;
case kSelectTfOps:
params_.enable_select_tf_ops = true;
break;
case kCustomOps:
params_.allow_custom_ops = true;
break;
}
}
}
std::vector<std::string> ImportExport(
std::initializer_list<std::string> op_names) {
ResetOperators();
if (!import_all_ops_as_unsupported_) {
AddOperatorsByName(op_names);
} else {
for (const std::string& name : op_names) {
auto* op = new TensorFlowUnsupportedOperator;
op->tensorflow_op = name;
input_model_.operators.emplace_back(op);
}
}
return ExportAndSummarizeOperators(params_);
}
private:
bool import_all_ops_as_unsupported_;
ExportParams params_;
};
TEST_F(OpSetsTest, BuiltinsOnly) {
SetAllowedOpSets({kTfLiteBuiltins});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "UnrollAndFold", "Assert"}),
ElementsAre());
EXPECT_THAT(ImportExport({"Add"}), ElementsAre("builtin:ADD"));
SetAllowedOpSets({kTfLiteBuiltins, kCustomOps});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "UnrollAndFold", "Assert"}),
ElementsAre("builtin:ADD", "custom:AdjustHue", "custom:Assert",
"custom:UnrollAndFold"));
}
TEST_F(OpSetsTest, TfSelectOnly) {
SetAllowedOpSets({kSelectTfOps});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "RandomUniform",
"UnrollAndFold", "Assert"}),
ElementsAre());
EXPECT_THAT(ImportExport({"Add"}), ElementsAre("custom:FlexAdd"));
SetAllowedOpSets({kSelectTfOps, kCustomOps});
EXPECT_THAT(
ImportExport(
{"Add", "AdjustHue", "RandomUniform", "UnrollAndFold", "Assert"}),
ElementsAre("custom:FlexAdd", "custom:FlexAdjustHue", "custom:FlexAssert",
"custom:FlexRandomUniform", "custom:UnrollAndFold"));
}
TEST_F(OpSetsTest, BuiltinsAndTfSelect) {
SetAllowedOpSets({kTfLiteBuiltins, kSelectTfOps});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "UnrollAndFold", "Assert"}),
ElementsAre());
EXPECT_THAT(ImportExport({"Add", "RandomUniform"}),
ElementsAre("builtin:ADD", "custom:FlexRandomUniform"));
SetAllowedOpSets({kTfLiteBuiltins, kSelectTfOps, kCustomOps});
EXPECT_THAT(
ImportExport(
{"Add", "AdjustHue", "RandomUniform", "UnrollAndFold", "Assert"}),
ElementsAre("builtin:ADD", "custom:FlexAdjustHue", "custom:FlexAssert",
"custom:FlexRandomUniform", "custom:UnrollAndFold"));
}
class FakeConvolutionOperator
: public BuiltinOperator<ConvOperator, ::tflite::Conv2DOptions,
::tflite::BuiltinOptions_Conv2DOptions> {
public:
FakeConvolutionOperator()
: BuiltinOperator(::tflite::BuiltinOperator_CONV_2D,
OperatorType::kConv) {}
int GetVersion(const OperatorSignature& op_signature) const override {
const TocoOperator& conv_op =
static_cast<const TocoOperator&>(*op_signature.op);
if (conv_op.dilation_width_factor != 1 ||
conv_op.dilation_height_factor != 1) {
return 2;
}
return 1;
}
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateConv2DOptions(*builder, padding, op.stride_width,
op.stride_height, activation_function,
op.dilation_width_factor,
op.dilation_height_factor);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->dilation_width_factor = options.dilation_w_factor();
op->dilation_height_factor = options.dilation_h_factor();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class VersionedOpExportTest : public ::testing::Test {
protected:
void SetUp() override {
input_model_.GetOrCreateArray("input");
input_model_.GetOrCreateArray("filter");
input_model_.GetOrCreateArray("output");
}
void AddConvOp(bool use_dilation) {
{
auto* op = new ConvOperator;
op->inputs.push_back("input");
op->inputs.push_back("filter");
op->outputs.push_back("output");
op->padding.type = PaddingType::kSame;
op->stride_width = 1;
op->stride_height = 1;
if (use_dilation) {
op->dilation_width_factor = 2;
op->dilation_height_factor = 2;
} else {
op->dilation_width_factor = 1;
op->dilation_height_factor = 1;
}
input_model_.operators.emplace_back(op);
}
}
std::map<OperatorType, std::unique_ptr<BaseOperator>>
BuildFakeOperatorByTypeMap() {
std::map<OperatorType, std::unique_ptr<BaseOperator>> result;
result[OperatorType::kConv] =
std::unique_ptr<BaseOperator>(new FakeConvolutionOperator);
return result;
}
Model input_model_;
};
TEST_F(VersionedOpExportTest, LoadOperatorsMapWithOpV1) {
AddConvOp(false);
details::OperatorsMap operators;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(1, operators.size());
EXPECT_EQ(0, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 1)));
}
TEST_F(VersionedOpExportTest, LoadOperatorsMapWithOpV2) {
AddConvOp(true);
details::OperatorsMap operators;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(1, operators.size());
EXPECT_EQ(0, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 2)));
}
TEST_F(VersionedOpExportTest, LoadOperatorsMapWithBothVersions) {
AddConvOp(false);
AddConvOp(true);
details::OperatorsMap operators;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(2, operators.size());
EXPECT_EQ(0, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 1)));
EXPECT_EQ(1, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 2)));
}
TEST_F(VersionedOpExportTest, Export) {
AddConvOp(false);
AddConvOp(true);
std::string result;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
Export(input_model_, true, false, &result, ops_by_type);
auto* model = ::tflite::GetModel(result.data());
auto operator_codes = model->operator_codes();
EXPECT_EQ(2, operator_codes->size());
EXPECT_EQ(::tflite::BuiltinOperator_CONV_2D,
GetBuiltinCode((*operator_codes)[0]));
EXPECT_EQ(1, (*operator_codes)[0]->version());
EXPECT_EQ(::tflite::BuiltinOperator_CONV_2D,
GetBuiltinCode((*operator_codes)[1]));
EXPECT_EQ(2, (*operator_codes)[1]->version());
auto operators = (*model->subgraphs())[0]->operators();
EXPECT_EQ(2, operators->size());
EXPECT_EQ(0, (*operators)[0]->opcode_index());
EXPECT_EQ(1, (*operators)[1]->opcode_index());
}
TEST(OperatorKeyTest, TestBuiltinOp) {
Model model;
auto op = std::make_unique<ConvOperator>();
op->inputs = {"input", "filter"};
op->outputs = {"output"};
Array& input_array = model.GetOrCreateArray(op->inputs[0]);
Array& filter_array = model.GetOrCreateArray(op->inputs[1]);
Array& output_array = model.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kFloat;
filter_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CONV_2D);
EXPECT_EQ(key.custom_code(), "");
EXPECT_EQ(key.version(), 1);
}
TEST(OperatorKeyTest, TestBuiltinOpWithVersionedInputTypes) {
Model model;
auto op = std::make_unique<DequantizeOperator>();
op->inputs = {"input"};
op->outputs = {"output"};
Array& input_array = model.GetOrCreateArray(op->inputs[0]);
Array& output_array = model.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kInt8;
output_array.data_type = ArrayDataType::kFloat;
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_DEQUANTIZE);
EXPECT_EQ(key.custom_code(), "");
EXPECT_EQ(key.version(), 2);
}
TEST(OperatorKeyTest, TestCustomOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "MyCrazyCustomOp";
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "MyCrazyCustomOp");
EXPECT_EQ(key.version(), 1);
}
TEST(OperatorKeyTest, TestFlexOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "BatchMatMul";
const auto ops_by_type = BuildOperatorByTypeMap();
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "BatchMatMul");
EXPECT_EQ(key.version(), 1);
EXPECT_TRUE(key.is_custom_op());
EXPECT_FALSE(key.is_flex_op());
}
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "FlexBatchMatMul");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_custom_op());
EXPECT_TRUE(key.is_flex_op());
}
}
TEST(OperatorKeyTest, TestFlexWithControlFlowOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "Merge";
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "FlexMerge");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_custom_op());
EXPECT_TRUE(key.is_flex_op());
EXPECT_TRUE(key.is_unsupported_flex_op());
}
TEST(OperatorKeyTest, TestFlexWithUnsupportedOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "UnsupportedOp";
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "UnsupportedOp");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_flex_op());
EXPECT_FALSE(key.is_unsupported_flex_op());
}
TEST(OperatorKeyTest, TestFlexWithPartiallySupportedOps) {
Model model;
auto op = std::make_unique<TensorFlowAssertOperator>();
const auto ops_by_type = BuildOperatorByTypeMap();
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "Assert");
EXPECT_EQ(key.version(), 1);
EXPECT_TRUE(key.is_custom_op());
EXPECT_FALSE(key.is_flex_op());
}
::tensorflow::NodeDef node_def;
node_def.set_name("TensorFlowAssert");
node_def.set_op("TensorFlowAssert");
node_def.SerializeToString(&op->tensorflow_node_def);
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "FlexAssert");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_custom_op());
EXPECT_TRUE(key.is_flex_op());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/utils/export.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/export_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0a821a6c-44b2-46e5-928c-96e5c8548a77 | cpp | tensorflow/tensorflow | import | tensorflow/lite/toco/tflite/import.cc | tensorflow/lite/toco/tflite/import_test.cc | #include "tensorflow/lite/toco/tflite/import.h"
#include <memory>
#include <string>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "flatbuffers/verifier.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/lite/core/tools/verifier.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/tflite/operator.h"
#include "tensorflow/lite/toco/tflite/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace tflite {
namespace details {
void LoadTensorsTable(const ::tflite::Model& input_model,
TensorsTable* tensors_table) {
auto tensors = (*input_model.subgraphs())[0]->tensors();
if (!tensors) return;
for (const auto* tensor : *tensors) {
tensors_table->push_back(tensor->name()->c_str());
}
}
void LoadOperatorsTable(const ::tflite::Model& input_model,
OperatorsTable* operators_table) {
auto opcodes = input_model.operator_codes();
if (!opcodes) return;
for (const auto* opcode : *opcodes) {
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code != ::tflite::BuiltinOperator_CUSTOM) {
operators_table->push_back(EnumNameBuiltinOperator(builtin_code));
} else {
operators_table->push_back(opcode->custom_code()->c_str());
}
}
}
}
void ImportTensors(const ::tflite::Model& input_model, Model* model) {
auto tensors = (*input_model.subgraphs())[0]->tensors();
auto* buffers = input_model.buffers();
if (!tensors) return;
for (const auto* input_tensor : *tensors) {
Array& array = model->GetOrCreateArray(input_tensor->name()->c_str());
array.data_type = DataType::Deserialize(input_tensor->type());
int buffer_index = input_tensor->buffer();
auto* buffer = buffers->Get(buffer_index);
DataBuffer::Deserialize(*input_tensor, *buffer, &array);
auto shape = input_tensor->shape();
if (shape) {
array.mutable_shape()->mutable_dims()->clear();
for (uint32_t i = 0; i < shape->Length(); ++i) {
auto d = shape->Get(i);
array.mutable_shape()->mutable_dims()->push_back(d);
}
}
auto quantization = input_tensor->quantization();
if (quantization) {
if (quantization->min() && quantization->max()) {
CHECK_EQ(1, quantization->min()->Length());
CHECK_EQ(1, quantization->max()->Length());
MinMax& minmax = array.GetOrCreateMinMax();
minmax.min = quantization->min()->Get(0);
minmax.max = quantization->max()->Get(0);
}
if (quantization->scale() && quantization->zero_point()) {
CHECK_EQ(1, quantization->scale()->Length());
CHECK_EQ(1, quantization->zero_point()->Length());
QuantizationParams& q = array.GetOrCreateQuantizationParams();
q.scale = quantization->scale()->Get(0);
q.zero_point = quantization->zero_point()->Get(0);
}
}
}
}
void ImportOperators(
const ::tflite::Model& input_model,
const std::map<std::string, std::unique_ptr<BaseOperator>>& ops_by_name,
const details::TensorsTable& tensors_table,
const details::OperatorsTable& operators_table, Model* model) {
auto ops = (*input_model.subgraphs())[0]->operators();
if (!ops) return;
for (const auto* input_op : *ops) {
uint32_t index = input_op->opcode_index();
if (index > operators_table.size()) {
LOG(FATAL) << "Index " << index << " must be between zero and "
<< operators_table.size();
}
std::string opname = operators_table.at(index);
std::unique_ptr<Operator> new_op = nullptr;
if (ops_by_name.count(opname) == 0) {
std::string effective_opname = "TENSORFLOW_UNSUPPORTED";
if (ops_by_name.count(effective_opname) == 0) {
LOG(FATAL) << "Internal logic error: TENSORFLOW_UNSUPPORTED not found.";
}
new_op = ops_by_name.at(effective_opname)
->Deserialize(input_op->builtin_options(),
input_op->custom_options());
if (new_op->type == OperatorType::kUnsupported) {
auto* unsupported_op =
static_cast<TensorFlowUnsupportedOperator*>(new_op.get());
unsupported_op->tensorflow_op = opname;
unsupported_op->quantized = true;
} else {
LOG(FATAL) << "Expected a TensorFlowUnsupportedOperator";
}
} else {
new_op = ops_by_name.at(opname)->Deserialize(input_op->builtin_options(),
input_op->custom_options());
}
model->operators.emplace_back(new_op.release());
auto* op = model->operators.back().get();
auto inputs = input_op->inputs();
for (uint32_t i = 0; i < inputs->Length(); i++) {
auto input_index = inputs->Get(i);
if (input_index != -1) {
const std::string& input_name = tensors_table.at(input_index);
op->inputs.push_back(input_name);
} else {
const std::string& tensor_name =
toco::AvailableArrayName(*model, "OptionalTensor");
model->CreateOptionalArray(tensor_name);
op->inputs.push_back(tensor_name);
}
}
auto outputs = input_op->outputs();
for (int i = 0, end = outputs->Length(); i < end; i++) {
auto output_index = outputs->Get(i);
const std::string& output_name = tensors_table.at(output_index);
op->outputs.push_back(output_name);
}
}
}
void ImportIOTensors(const ModelFlags& model_flags,
const ::tflite::Model& input_model,
const details::TensorsTable& tensors_table, Model* model) {
if (model_flags.input_arrays().empty()) {
auto inputs = (*input_model.subgraphs())[0]->inputs();
if (inputs) {
for (int input : *inputs) {
const std::string& input_name = tensors_table.at(input);
model->flags.add_input_arrays()->set_name(input_name);
}
}
}
if (model_flags.output_arrays().empty()) {
auto outputs = (*input_model.subgraphs())[0]->outputs();
if (outputs) {
for (int output : *outputs) {
const std::string& output_name = tensors_table.at(output);
model->flags.add_output_arrays(output_name);
}
}
}
}
namespace {
bool Verify(const void* buf, size_t len) {
::flatbuffers::Verifier verifier(static_cast<const uint8_t*>(buf), len);
return ::tflite::VerifyModelBuffer(verifier);
}
}
std::unique_ptr<Model> Import(const ModelFlags& model_flags,
const std::string& input_file_contents) {
::tflite::AlwaysTrueResolver r;
if (!::tflite::Verify(input_file_contents.data(), input_file_contents.size(),
r, ::tflite::DefaultErrorReporter())) {
LOG(FATAL) << "Invalid flatbuffer.";
}
const ::tflite::Model* input_model =
::tflite::GetModel(input_file_contents.data());
const auto ops_by_name = BuildOperatorByNameMap();
if (!input_model->subgraphs() || input_model->subgraphs()->size() != 1) {
LOG(FATAL) << "Number of subgraphs in tflite should be exactly 1.";
}
std::unique_ptr<Model> model;
model = std::make_unique<Model>();
details::TensorsTable tensors_table;
details::LoadTensorsTable(*input_model, &tensors_table);
details::OperatorsTable operators_table;
details::LoadOperatorsTable(*input_model, &operators_table);
ImportTensors(*input_model, model.get());
ImportOperators(*input_model, ops_by_name, tensors_table, operators_table,
model.get());
ImportIOTensors(model_flags, *input_model, tensors_table, model.get());
UndoWeightsShuffling(model.get());
return model;
}
}
} | #include "tensorflow/lite/toco/tflite/import.h"
#include <initializer_list>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_types.h"
#include "tensorflow/lite/version.h"
namespace toco {
namespace tflite {
namespace {
using ::testing::ElementsAre;
using flatbuffers::Offset;
using flatbuffers::Vector;
class ImportTest : public ::testing::Test {
protected:
template <typename T>
Offset<Vector<unsigned char>> CreateDataVector(const std::vector<T>& data) {
return builder_.CreateVector(reinterpret_cast<const uint8_t*>(data.data()),
sizeof(T) * data.size());
}
Offset<Vector<Offset<::tflite::Buffer>>> BuildBuffers() {
auto buf0 = ::tflite::CreateBuffer(builder_, CreateDataVector<float>({}));
auto buf1 = ::tflite::CreateBuffer(
builder_, CreateDataVector<float>({1.0f, 2.0f, 3.0f, 4.0f}));
auto buf2 =
::tflite::CreateBuffer(builder_, CreateDataVector<float>({3.0f, 4.0f}));
return builder_.CreateVector(
std::vector<Offset<::tflite::Buffer>>({buf0, buf1, buf2}));
}
Offset<Vector<Offset<::tflite::Tensor>>> BuildTensors() {
auto q = ::tflite::CreateQuantizationParameters(
builder_,
builder_.CreateVector<float>({0.1f}),
builder_.CreateVector<float>({0.2f}),
builder_.CreateVector<float>({0.3f}),
builder_.CreateVector<int64_t>({100LL}));
auto t1 =
::tflite::CreateTensor(builder_, builder_.CreateVector<int>({1, 2, 2}),
::tflite::TensorType_FLOAT32, 1,
builder_.CreateString("tensor_one"), q);
auto t2 =
::tflite::CreateTensor(builder_, builder_.CreateVector<int>({2, 1}),
::tflite::TensorType_FLOAT32, 0,
builder_.CreateString("tensor_two"), q);
return builder_.CreateVector(
std::vector<Offset<::tflite::Tensor>>({t1, t2}));
}
Offset<Vector<Offset<::tflite::OperatorCode>>> BuildOpCodes(
std::initializer_list<::tflite::BuiltinOperator> op_codes) {
std::vector<Offset<::tflite::OperatorCode>> op_codes_vector;
for (auto op : op_codes) {
op_codes_vector.push_back(::tflite::CreateOperatorCode(builder_, op, 0));
}
return builder_.CreateVector(op_codes_vector);
}
Offset<Vector<Offset<::tflite::OperatorCode>>> BuildOpCodes() {
return BuildOpCodes({::tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::BuiltinOperator_CONV_2D});
}
Offset<Vector<Offset<::tflite::Operator>>> BuildOperators(
std::initializer_list<int> inputs, std::initializer_list<int> outputs) {
auto is = builder_.CreateVector<int>(inputs);
if (inputs.size() == 0) is = 0;
auto os = builder_.CreateVector<int>(outputs);
if (outputs.size() == 0) os = 0;
auto op = ::tflite::CreateOperator(
builder_, 0, is, os, ::tflite::BuiltinOptions_Conv2DOptions,
::tflite::CreateConv2DOptions(builder_, ::tflite::Padding_VALID, 1, 1,
::tflite::ActivationFunctionType_NONE)
.Union(),
0, ::tflite::CustomOptionsFormat_FLEXBUFFERS);
return builder_.CreateVector(std::vector<Offset<::tflite::Operator>>({op}));
}
Offset<Vector<Offset<::tflite::Operator>>> BuildOperators() {
return BuildOperators({0}, {1});
}
Offset<Vector<Offset<::tflite::SubGraph>>> BuildSubGraphs(
Offset<Vector<Offset<::tflite::Tensor>>> tensors,
Offset<Vector<Offset<::tflite::Operator>>> operators,
int num_sub_graphs = 1) {
std::vector<int32_t> inputs = {0};
std::vector<int32_t> outputs = {1};
std::vector<Offset<::tflite::SubGraph>> v;
for (int i = 0; i < num_sub_graphs; ++i) {
v.push_back(::tflite::CreateSubGraph(
builder_, tensors, builder_.CreateVector(inputs),
builder_.CreateVector(outputs), operators,
builder_.CreateString("subgraph")));
}
return builder_.CreateVector(v);
}
void BuildTestModel() {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators);
auto s = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION,
opcodes, subgraphs, s, buffers));
input_model_ = ::tflite::GetModel(builder_.GetBufferPointer());
}
std::string InputModelAsString() {
return std::string(reinterpret_cast<char*>(builder_.GetBufferPointer()),
builder_.GetSize());
}
flatbuffers::FlatBufferBuilder builder_;
const ::tflite::Model* input_model_ = nullptr;
};
TEST_F(ImportTest, LoadTensorsTable) {
BuildTestModel();
details::TensorsTable tensors;
details::LoadTensorsTable(*input_model_, &tensors);
EXPECT_THAT(tensors, ElementsAre("tensor_one", "tensor_two"));
}
TEST_F(ImportTest, LoadOperatorsTable) {
BuildTestModel();
details::OperatorsTable operators;
details::LoadOperatorsTable(*input_model_, &operators);
EXPECT_THAT(operators, ElementsAre("MAX_POOL_2D", "CONV_2D"));
}
TEST_F(ImportTest, Tensors) {
BuildTestModel();
auto model = Import(ModelFlags(), InputModelAsString());
ASSERT_GT(model->HasArray("tensor_one"), 0);
Array& a1 = model->GetArray("tensor_one");
EXPECT_EQ(ArrayDataType::kFloat, a1.data_type);
EXPECT_THAT(a1.GetBuffer<ArrayDataType::kFloat>().data,
ElementsAre(1.0f, 2.0f, 3.0f, 4.0f));
ASSERT_TRUE(a1.has_shape());
EXPECT_THAT(a1.shape().dims(), ElementsAre(1, 2, 2));
const auto& mm = a1.minmax;
ASSERT_TRUE(mm.get());
EXPECT_FLOAT_EQ(0.1, mm->min);
EXPECT_FLOAT_EQ(0.2, mm->max);
const auto& q = a1.quantization_params;
ASSERT_TRUE(q.get());
EXPECT_FLOAT_EQ(0.3, q->scale);
EXPECT_EQ(100, q->zero_point);
}
TEST_F(ImportTest, NoBuffers) {
auto buffers = 0;
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Missing 'buffers' section.");
}
TEST_F(ImportTest, NoInputs) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators({}, {1});
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Missing 'inputs' for operator.");
}
TEST_F(ImportTest, NoOutputs) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators({0}, {});
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Missing 'outputs' for operator.");
}
TEST_F(ImportTest, InvalidOpCode) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes({static_cast<::tflite::BuiltinOperator>(-1),
::tflite::BuiltinOperator_CONV_2D});
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Operator id '-1' is out of range.");
}
TEST_F(ImportTest, MultipleSubGraphs) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators, 2);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
input_model_ = ::tflite::GetModel(builder_.GetBufferPointer());
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Number of subgraphs in tflite should be exactly 1.");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/import.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/import_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
64f4977d-b4ee-450d-b8ec-812ee549e6e9 | cpp | tensorflow/tensorflow | resolve_constant_concatenation | tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc | tensorflow/lite/toco/graph_transformations/tests/resolve_constant_concatenation_test.cc | #include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
template <ArrayDataType A, typename T>
void CopyTensorSegments(const std::vector<Array*>& input_arrays,
const std::vector<int>& array_copy_size,
const int num_elements_concatenated_array,
Array* concatenated_array) {
for (Array* input_array : input_arrays) {
if (!input_array->buffer) {
return;
}
}
auto& concatenated_array_buffer =
concatenated_array->GetMutableBuffer<A>().data;
concatenated_array_buffer.resize(num_elements_concatenated_array);
CHECK(!input_arrays.empty());
CHECK_NE(array_copy_size[0], 0);
const int total_copy_steps =
input_arrays[0]->GetBuffer<A>().data.size() / array_copy_size[0];
std::vector<const T*> src_ptr;
src_ptr.reserve(input_arrays.size());
for (Array* input_array : input_arrays) {
src_ptr.push_back(input_array->GetBuffer<A>().data.data());
}
T* dest_ptr = concatenated_array_buffer.data();
for (int s = 0; s < total_copy_steps; s++) {
for (size_t i = 0; i < input_arrays.size(); i++) {
std::copy(src_ptr[i], src_ptr[i] + array_copy_size[i], dest_ptr);
src_ptr[i] += array_copy_size[i];
dest_ptr += array_copy_size[i];
}
}
}
template <ArrayDataType A>
void ConcatenateTensorBuffers(const std::vector<Array*>& input_arrays,
int concatenation_axis,
Array* concatenated_array) {
int num_elements_concatenated_array = 1;
for (int i = 0; i < concatenated_array->shape().dimensions_count(); i++) {
num_elements_concatenated_array *= concatenated_array->shape().dims()[i];
}
std::vector<int> array_copy_size(input_arrays.size());
int count = 0;
for (Array* input_array : input_arrays) {
const Shape array_shape = input_array->shape();
array_copy_size[count] = 1;
for (int i = concatenation_axis; i < array_shape.dimensions_count(); i++) {
array_copy_size[count] *= array_shape.dims()[i];
}
count++;
}
CopyTensorSegments<A, DataType<A>>(input_arrays, array_copy_size,
num_elements_concatenated_array,
concatenated_array);
}
void SetMinMaxForConcatenedArray(GraphTransformation* transformation,
const std::vector<Array*>& input_arrays,
Array* concatenated_array) {
CHECK(concatenated_array->data_type == ArrayDataType::kFloat);
if (concatenated_array->minmax) return;
double concat_min = std::numeric_limits<double>::infinity();
double concat_max = -std::numeric_limits<double>::infinity();
for (Array* input_array : input_arrays) {
if (!input_array->minmax) return;
const MinMax& input_minmax = input_array->GetMinMax();
concat_min = std::min(concat_min, input_minmax.min);
concat_max = std::max(concat_max, input_minmax.max);
}
MinMax& minmax = concatenated_array->GetOrCreateMinMax();
minmax.min = concat_min;
minmax.max = concat_max;
transformation->AddMessageF("Setting concatenated array min/max to %g,%g",
concat_min, concat_max);
}
}
::tensorflow::Status ResolveConstantConcatenation::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
const auto concat_it = model->operators.begin() + op_index;
const auto* concat_base_op = concat_it->get();
if (concat_base_op->type != OperatorType::kConcatenation) {
return absl::OkStatus();
}
const auto* concat_op =
static_cast<const ConcatenationOperator*>(concat_base_op);
for (const std::string& input_name : concat_op->inputs) {
const Operator* input_op = GetOpWithOutput(*model, input_name);
if (input_op) return absl::OkStatus();
if (!IsConstantParameterArray(*model, input_name)) return absl::OkStatus();
if (!model->GetArray(input_name).has_shape()) return absl::OkStatus();
if (model->GetArray(input_name).quantization_params)
return absl::OkStatus();
if (!IsDiscardableArray(*model, input_name)) return absl::OkStatus();
}
const int concatenation_axis = concat_op->axis;
CHECK_EQ(concat_op->outputs.size(), 1);
std::string concatenated_array_name = concat_op->outputs[0];
Array& concatenated_array = model->GetOrCreateArray(concatenated_array_name);
std::vector<Array*> input_arrays;
input_arrays.reserve(concat_op->inputs.size());
for (const std::string& input_name : concat_op->inputs) {
input_arrays.push_back(&model->GetArray(input_name));
}
AddMessageF("Performing constant concat of %s into %s",
absl::StrJoin(concat_op->inputs, ", "), concatenated_array_name);
switch (concatenated_array.data_type) {
case ArrayDataType::kFloat:
ConcatenateTensorBuffers<ArrayDataType::kFloat>(
input_arrays, concatenation_axis, &concatenated_array);
SetMinMaxForConcatenedArray(this, input_arrays, &concatenated_array);
break;
case ArrayDataType::kUint8:
ConcatenateTensorBuffers<ArrayDataType::kUint8>(
input_arrays, concatenation_axis, &concatenated_array);
break;
case ArrayDataType::kInt32:
ConcatenateTensorBuffers<ArrayDataType::kInt32>(
input_arrays, concatenation_axis, &concatenated_array);
break;
case ArrayDataType::kInt64:
ConcatenateTensorBuffers<ArrayDataType::kInt64>(
input_arrays, concatenation_axis, &concatenated_array);
break;
case ArrayDataType::kString:
ConcatenateTensorBuffers<ArrayDataType::kString>(
input_arrays, concatenation_axis, &concatenated_array);
break;
case ArrayDataType::kComplex64:
ConcatenateTensorBuffers<ArrayDataType::kComplex64>(
input_arrays, concatenation_axis, &concatenated_array);
break;
default:
LOG(FATAL) << "ArrayDataType not supported";
}
DeleteOpAndArrays(model, concat_op);
*modified = true;
return absl::OkStatus();
}
} | #include <algorithm>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
std::vector<testing::Matcher<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error = 1e-5) {
std::vector<testing::Matcher<float>> matchers;
matchers.reserve(values.size());
for (const float& v : values) {
matchers.emplace_back(testing::FloatNear(v, max_abs_error));
}
return matchers;
}
}
class ResolveConstantConcatenationTest : public ::testing::Test {
protected:
ResolveConstantConcatenationTest() {}
void PrepareModel(Model* model, int axis) {
const std::string output_name("concat_op_output");
model->flags.add_output_arrays(output_name);
std::vector<std::string> concat_input_names = {"array0", "array1", "array2",
"array3"};
const int kDim = 3;
const int kElementPerDim = 2;
const int kBufSize = 8;
const int kNumArrays = 4;
static float in_buf[kNumArrays][kBufSize] = {
{0., 1., 2., 3., 4., 5., 6., 7.},
{10., 11., 12., 13., 14., 15., 16., 17.},
{20., 21., 22., 23., 24., 25., 26., 27.},
{30., 31., 32., 33., 34., 35., 36., 37.}};
int cnt = 0;
for (const std::string& concat_input_name : concat_input_names) {
Array& in_array = model->GetOrCreateArray(concat_input_name);
in_array.data_type = ArrayDataType::kFloat;
Shape* in_array_shape = in_array.mutable_shape();
std::vector<int>* in_array_shape_dim = in_array_shape->mutable_dims();
for (int i = 0; i < kDim; i++) {
in_array_shape_dim->push_back(kElementPerDim);
}
auto& in_array_buffer =
in_array.GetMutableBuffer<toco::ArrayDataType::kFloat>();
in_array_buffer.data.resize(kBufSize);
float* buf_ptr =
in_array.GetMutableBuffer<toco::ArrayDataType::kFloat>().data.data();
std::copy(in_buf[cnt], in_buf[cnt] + kBufSize, buf_ptr);
cnt++;
}
auto* concatenation_op = new ConcatenationOperator;
concatenation_op->axis = axis;
concatenation_op->inputs = concat_input_names;
concatenation_op->outputs = {output_name};
Array& out_array = model->GetOrCreateArray(concatenation_op->outputs[0]);
out_array.data_type = ArrayDataType::kFloat;
Shape* out_array_shape = out_array.mutable_shape();
std::vector<int>* out_array_shape_dim = out_array_shape->mutable_dims();
out_array_shape_dim->resize(kDim);
for (int i = 0; i < kDim; i++) {
if (i == axis) {
(*out_array_shape_dim)[i] = kNumArrays * kElementPerDim;
} else {
(*out_array_shape_dim)[i] = kElementPerDim;
}
}
model->operators.push_back(std::unique_ptr<Operator>(concatenation_op));
}
};
TEST_F(ResolveConstantConcatenationTest, ConcatAtAxis0) {
Model model;
const int axis = 0;
PrepareModel(&model, axis);
GraphTransformationsSet graph_transformation_set;
graph_transformation_set.Add(new toco::ResolveConstantConcatenation);
EXPECT_THAT(model.GetArrayMap().size(), 5);
bool modified;
ASSERT_TRUE((*graph_transformation_set.begin())
->Run(&model, 0, &modified)
.ok());
EXPECT_THAT(model.GetArrayMap().size(), 1);
const auto& concatenated_array = model.GetArray(model.flags.output_arrays(0));
EXPECT_THAT(concatenated_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear(
{0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12.,
13., 14., 15., 16., 17., 20., 21., 22., 23., 24., 25.,
26., 27., 30., 31., 32., 33., 34., 35., 36., 37.})));
}
TEST_F(ResolveConstantConcatenationTest, ConcatAtAxis1) {
Model model;
const int axis = 1;
PrepareModel(&model, axis);
GraphTransformationsSet graph_transformation_set;
graph_transformation_set.Add(new toco::ResolveConstantConcatenation);
EXPECT_THAT(model.GetArrayMap().size(), 5);
bool modified;
ASSERT_TRUE((*graph_transformation_set.begin())
->Run(&model, 0, &modified)
.ok());
EXPECT_THAT(model.GetArrayMap().size(), 1);
auto& concatenated_array = (*model.GetArrayMap().begin()).second;
EXPECT_THAT(concatenated_array->GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear(
{0., 1., 2., 3., 10., 11., 12., 13., 20., 21., 22.,
23., 30., 31., 32., 33., 4., 5., 6., 7., 14., 15.,
16., 17., 24., 25., 26., 27., 34., 35., 36., 37.})));
}
TEST_F(ResolveConstantConcatenationTest, ConcatAtAxis2) {
Model model;
const int axis = 2;
PrepareModel(&model, axis);
GraphTransformationsSet graph_transformation_set;
graph_transformation_set.Add(new toco::ResolveConstantConcatenation);
EXPECT_THAT(model.GetArrayMap().size(), 5);
bool modified;
ASSERT_TRUE((*graph_transformation_set.begin())
->Run(&model, 0, &modified)
.ok());
EXPECT_THAT(model.GetArrayMap().size(), 1);
auto& concatenated_array = (*model.GetArrayMap().begin()).second;
EXPECT_THAT(concatenated_array->GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear(
{0., 1., 10., 11., 20., 21., 30., 31., 2., 3., 12.,
13., 22., 23., 32., 33., 4., 5., 14., 15., 24., 25.,
34., 35., 6., 7., 16., 17., 26., 27., 36., 37.})));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/resolve_constant_concatenation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
72c4e89d-737d-4456-b334-74f2ee5903af | cpp | tensorflow/tensorflow | remove_successive_transpose | tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc | tensorflow/lite/toco/graph_transformations/tests/remove_successive_transpose_test.cc | #include <string>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
bool TransformsToIdentity(std::vector<int> const& perm1,
std::vector<int> const& perm2) {
if (perm2.size() != perm1.size() || perm1.empty()) {
return false;
}
for (size_t i = 0; i < perm1.size(); ++i) {
if (perm1[i] < 0 || perm1[i] >= static_cast<int>(perm1.size()) ||
perm2[i] < 0 || perm2[i] >= static_cast<int>(perm1.size())) {
return false;
}
if (perm1[perm2[i]] != static_cast<int>(i)) {
return false;
}
}
return true;
}
void ReplaceOpInputsWith(Model* model, const std::string& lookfor,
const std::string& replacewith) {
for (const auto& op : model->operators) {
for (size_t i = 0; i < op->inputs.size(); ++i) {
if (op->inputs[i] == lookfor) {
op->inputs[i] = replacewith;
}
}
}
}
}
::tensorflow::Status RemoveSuccessiveTranspose::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
auto op = model->operators.begin() + op_index;
if (op->get()->type != OperatorType::kTranspose) {
return absl::OkStatus();
}
TransposeOperator* t_op = static_cast<TransposeOperator*>(op->get());
if (CountOpsWithInput(*model, t_op->outputs[0]) != 1) {
return absl::OkStatus();
}
Operator* next = GetOpWithInput(*model, t_op->outputs[0]);
if (!next || next->type != OperatorType::kTranspose) {
return absl::OkStatus();
}
TransposeOperator* t_next = static_cast<TransposeOperator*>(next);
if (!CountOpsWithInput(*model, t_next->outputs[0])) {
return absl::OkStatus();
}
if (TransformsToIdentity(t_op->perm, t_next->perm)) {
ReplaceOpInputsWith(model, t_next->outputs[0], t_op->inputs[0]);
DeleteOpAndArrays(model, t_next);
DeleteOpAndArrays(model, t_op);
*modified = true;
}
return absl::OkStatus();
}
} | #include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace {
using ::testing::Test;
class RemoveSuccessiveTransposeTest : public Test {
protected:
RemoveSuccessiveTransposeTest() {}
void SetUp() override { model_ = std::make_unique<toco::Model>(); }
void CreateArray(const std::string& name, const std::vector<int>& shape) {
toco::Array& array = model_->GetOrCreateArray(name);
array.data_type = toco::ArrayDataType::kFloat;
toco::Shape* array_shape = array.mutable_shape();
*(array_shape->mutable_dims()) = shape;
}
void CreateConstantArray(const std::string& name,
const std::vector<int>& shape,
const std::vector<float>& data) {
CreateArray(name, shape);
toco::Array& array = model_->GetOrCreateArray(name);
auto& array_buffer = array.GetMutableBuffer<toco::ArrayDataType::kFloat>();
int bufsize = 1;
for (int dim : shape) {
bufsize *= dim;
}
array_buffer.data.resize(bufsize);
float* buf_ptr = array_buffer.data.data();
for (int i = 0; i < bufsize; ++i) {
buf_ptr[i] = data[i];
}
}
void CreateGraph(const std::vector<int>& perm1,
const std::vector<int>& perm2) {
CreateArray("InputA", {2, 2});
CreateArray("InputB", {2, 2});
CreateArray("Input", {2, 2});
CreateArray("InputTranspose", {2, 2});
CreateArray("InputTransposeTranspose", {2, 2});
CreateArray("InputTransposeTransposePlusB", {2, 2});
auto* add_op = new toco::AddOperator;
add_op->inputs = {"InputA", "InputB"};
add_op->outputs = {"Input"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(add_op));
auto* transpose_op = new toco::TransposeOperator;
transpose_op->inputs = {"Input"};
transpose_op->perm = perm1;
transpose_op->outputs = {"InputTranspose"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(transpose_op));
auto* transpose2_op = new toco::TransposeOperator;
transpose2_op->inputs = {"InputTranspose"};
transpose2_op->perm = perm2;
transpose2_op->outputs = {"InputTransposeTranspose"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(transpose2_op));
auto* add2_op = new toco::AddOperator;
add2_op->inputs = {"InputTransposeTranspose", "InputB"};
add2_op->outputs = {"InputTransposeTransposePlusB"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(add2_op));
}
std::unique_ptr<toco::Model> model_;
};
TEST_F(RemoveSuccessiveTransposeTest, RemoveTranspose) {
CreateGraph({1, 0}, {1, 0});
toco::RemoveSuccessiveTranspose transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 1, &modified).ok());
EXPECT_TRUE(modified);
ASSERT_EQ(model_->operators.size(), 2);
ASSERT_EQ(model_->operators[0]->type, toco::OperatorType::kAdd);
ASSERT_EQ(model_->operators[1]->type, toco::OperatorType::kAdd);
ASSERT_EQ(model_->operators[1]->inputs[0], model_->operators[0]->outputs[0]);
}
TEST_F(RemoveSuccessiveTransposeTest, DontRemoveNotIdentityTranspose) {
CreateGraph({0, 2, 1}, {1, 0, 2});
toco::RemoveSuccessiveTranspose transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 1, &modified).ok());
EXPECT_FALSE(modified);
}
TEST_F(RemoveSuccessiveTransposeTest, DontRemoveTransposeOutputUnused) {
CreateArray("InputA", {2, 2});
CreateArray("InputB", {2, 2});
CreateArray("Input", {2, 2});
CreateArray("InputTranspose", {2, 2});
CreateArray("InputTransposeTranspose", {2, 2});
auto* add_op = new toco::AddOperator;
add_op->inputs = {"InputA", "InputB"};
add_op->outputs = {"Input"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(add_op));
auto* transpose_op = new toco::TransposeOperator;
transpose_op->inputs = {"Input"};
transpose_op->perm = {0, 2, 1};
transpose_op->outputs = {"InputTranspose"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(transpose_op));
auto* transpose2_op = new toco::TransposeOperator;
transpose2_op->inputs = {"InputTranspose"};
transpose2_op->perm = {0, 2, 1};
transpose2_op->outputs = {"InputTransposeTranspose"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(transpose2_op));
toco::RemoveSuccessiveTranspose transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 1, &modified).ok());
EXPECT_FALSE(modified);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/remove_successive_transpose_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bf5c041b-feb6-4665-97af-0b6a497e1521 | cpp | tensorflow/tensorflow | identify_l2_normalization | tensorflow/lite/toco/graph_transformations/identify_l2_normalization.cc | tensorflow/lite/toco/graph_transformations/tests/identify_l2_normalization_test.cc | #include <cmath>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
::tensorflow::Status IdentifyL2Normalization::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
const auto div_it = model->operators.begin() + op_index;
const auto* div_or_mul_op = div_it->get();
OperatorType expected_op_type_producing_div_or_mul_input;
if (div_or_mul_op->type == OperatorType::kDiv) {
expected_op_type_producing_div_or_mul_input = OperatorType::kSqrt;
} else if (div_or_mul_op->type == OperatorType::kMul) {
expected_op_type_producing_div_or_mul_input = OperatorType::kRsqrt;
} else {
return absl::OkStatus();
}
CHECK_EQ(div_or_mul_op->inputs.size(), 2);
Operator* op_producing_div_or_mul_input[2] = {
GetOpWithOutput(*model, div_or_mul_op->inputs[0]),
GetOpWithOutput(*model, div_or_mul_op->inputs[1]),
};
if (!op_producing_div_or_mul_input[1] ||
op_producing_div_or_mul_input[1]->type !=
expected_op_type_producing_div_or_mul_input) {
return absl::OkStatus();
}
Operator* sqrt_or_rsqrt_op = op_producing_div_or_mul_input[1];
CHECK_EQ(sqrt_or_rsqrt_op->inputs.size(), 1);
Operator* op_producing_sqrt_or_rsqrt_input =
GetOpWithOutput(*model, sqrt_or_rsqrt_op->inputs[0]);
if (!op_producing_sqrt_or_rsqrt_input) {
return absl::OkStatus();
}
Operator* add_op = nullptr;
Operator* op_producing_add_input = nullptr;
if (op_producing_sqrt_or_rsqrt_input->type == OperatorType::kAdd ||
op_producing_sqrt_or_rsqrt_input->type == OperatorType::kMaximum) {
add_op = op_producing_sqrt_or_rsqrt_input;
bool add_can_be_removed = false;
CHECK_EQ(op_producing_sqrt_or_rsqrt_input->inputs.size(), 2);
for (int i = 0; i < 2; i++) {
const auto& input_array =
model->GetArray(op_producing_sqrt_or_rsqrt_input->inputs[i]);
if (!input_array.buffer) {
continue;
}
if (input_array.buffer->type != ArrayDataType::kFloat) {
continue;
}
if (RequiredBufferSizeForShape(input_array.shape()) != 1) {
continue;
}
const auto& input_float_data =
input_array.GetBuffer<ArrayDataType::kFloat>().data;
if (std::abs(input_float_data[0]) > 1e-3f) {
continue;
}
add_can_be_removed = true;
op_producing_add_input = GetOpWithOutput(*model, add_op->inputs[1 - i]);
break;
}
if (!add_can_be_removed) {
AddMessageF(
"Giving up trying to identify L2Normalization subgraph "
" because the operator producing the input to the square root, %s,"
", does not match the expected pattern",
LogName(*op_producing_sqrt_or_rsqrt_input));
return absl::OkStatus();
}
}
Operator* sum_op =
add_op ? op_producing_add_input : op_producing_sqrt_or_rsqrt_input;
if (sum_op->type != OperatorType::kSum) {
AddMessageF(
"Giving up trying to identify L2Normalization subgraph: "
"expected Sum op, got %s",
LogName(*sum_op));
return absl::OkStatus();
}
Operator* square_op = GetOpWithOutput(*model, sum_op->inputs[0]);
if (square_op->type != OperatorType::kSquare) {
AddMessageF(
"Giving up trying to identify L2Normalization subgraph: "
"expected Square op, got %s",
LogName(*square_op));
return absl::OkStatus();
}
CHECK_EQ(square_op->inputs.size(), 1);
if (square_op->inputs[0] != div_or_mul_op->inputs[0]) {
AddMessageF(
"Giving up trying to identify L2Normalization subgraph: %s does not "
"take the same input as the Mul/Div node",
LogName(*square_op));
return absl::OkStatus();
}
auto* l2norm_op = new L2NormalizationOperator;
l2norm_op->inputs = {div_or_mul_op->inputs[0]};
l2norm_op->outputs = div_or_mul_op->outputs;
model->operators.emplace(div_it, l2norm_op);
AddMessageF("Creating %s replacing equivalent subgraph", LogName(*l2norm_op));
DeleteOpAndArrays(model, square_op);
DeleteOpAndArrays(model, sum_op);
if (add_op) {
DeleteOpAndArrays(model, add_op);
}
DeleteOpAndArrays(model, sqrt_or_rsqrt_op);
DeleteOpAndArrays(model, div_or_mul_op);
*modified = true;
return absl::OkStatus();
}
} | #include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
void RunIdentifyL2Normalization(const std::vector<float>& input,
const std::vector<int>& input_shape,
const std::vector<int>& output_shape,
const bool div_square = false) {
Model model;
Array& input0 = model.GetOrCreateArray("input0");
Array& output = model.GetOrCreateArray("output");
*input0.mutable_shape()->mutable_dims() = input_shape;
input0.data_type = ArrayDataType::kFloat;
input0.GetMutableBuffer<ArrayDataType::kFloat>().data = input;
*output.mutable_shape()->mutable_dims() = output_shape;
auto sq_op = new TensorFlowSquareOperator;
sq_op->inputs = {"input0"};
sq_op->outputs = {"output"};
Array& sumoutput = model.GetOrCreateArray("Sumoutput");
*sumoutput.mutable_shape()->mutable_dims() = output_shape;
auto sum_op = new TensorFlowSumOperator;
sum_op->inputs = {sq_op->outputs[0]};
sum_op->outputs = {"Sumoutput"};
if (div_square) {
Array& sqrtoutput = model.GetOrCreateArray("squarertoutput");
*sqrtoutput.mutable_shape()->mutable_dims() = output_shape;
auto sqrt_op = new TensorFlowSqrtOperator;
sqrt_op->inputs = {sum_op->outputs[0]};
sqrt_op->outputs = {"squarertoutput"};
Array& divoutput = model.GetOrCreateArray("Divoutput");
*divoutput.mutable_shape()->mutable_dims() = output_shape;
auto div_op = new DivOperator;
div_op->inputs = {"input0", sqrt_op->outputs[0]};
div_op->outputs = {"Divoutput"};
model.operators.push_back(std::unique_ptr<Operator>(div_op));
model.operators.push_back(std::unique_ptr<Operator>(sqrt_op));
model.operators.push_back(std::unique_ptr<Operator>(sum_op));
model.operators.push_back(std::unique_ptr<Operator>(sq_op));
} else {
Array& rsqoutput = model.GetOrCreateArray("Rsquareoutput");
*rsqoutput.mutable_shape()->mutable_dims() = output_shape;
auto rsqrt_op = new TensorFlowRsqrtOperator;
rsqrt_op->inputs = {sum_op->outputs[0]};
rsqrt_op->outputs = {"Rsquareoutput"};
Array& muloutput = model.GetOrCreateArray("Muloutput");
*muloutput.mutable_shape()->mutable_dims() = output_shape;
auto mul_op = new MulOperator;
mul_op->inputs = {"input0", rsqrt_op->outputs[0]};
mul_op->outputs = {"Muloutput"};
model.operators.push_back(std::unique_ptr<Operator>(mul_op));
model.operators.push_back(std::unique_ptr<Operator>(rsqrt_op));
model.operators.push_back(std::unique_ptr<Operator>(sum_op));
model.operators.push_back(std::unique_ptr<Operator>(sq_op));
}
bool modified;
ASSERT_TRUE(IdentifyL2Normalization().Run(&model, 0, &modified).ok());
for (auto& op_it : model.operators) {
Operator* op = op_it.get();
if (div_square) {
EXPECT_FALSE(op->type == OperatorType::kDiv);
EXPECT_FALSE(op->type == OperatorType::kSqrt);
} else {
EXPECT_FALSE(op->type == OperatorType::kMul);
EXPECT_FALSE(op->type == OperatorType::kRsqrt);
}
EXPECT_FALSE(op->type == OperatorType::kAdd);
EXPECT_FALSE(op->type == OperatorType::kSquare);
}
}
TEST(IdentifyL2Normalization, MulRsqrtTest) {
RunIdentifyL2Normalization(
{3, 1, 4, 1, -5, 9, -2, 6, 5, 3, 5, 8},
{3, 4},
{3, 4},
false);
}
TEST(IdentifyL2Normalization, DivSqrtNormTest) {
RunIdentifyL2Normalization(
{3, 1, 4, 1, -5, 9, -2, 6, 5, 3, 5, 8},
{3, 4},
{3, 4},
true);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/identify_l2_normalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/identify_l2_normalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e1a0094-cd3f-49b9-9457-3114eaec0af7 | cpp | tensorflow/tensorflow | lstm_utils | tensorflow/compiler/mlir/lite/utils/lstm_utils.cc | tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc | #include "tensorflow/compiler/mlir/lite/utils/lstm_utils.h"
#include <algorithm>
#include <optional>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/utils/utils.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
namespace mlir {
namespace TFL {
namespace {
Value CreateI32SplatConst(OpBuilder* builder, ArrayRef<int64_t> shape,
int32_t val, mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getIntegerType(32));
auto attr = DenseElementsAttr::get(type, val);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreateF32SplatConst(OpBuilder* builder, ArrayRef<int64_t> shape,
float val, mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getF32Type());
auto attr = DenseElementsAttr::get(type, val);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreatTfF32ConstOp(OpBuilder* builder, ArrayRef<int64_t> shape, float val,
mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getF32Type());
auto ele_type = RankedTensorType::get({1}, builder->getF32Type());
auto attr = DenseElementsAttr::get(ele_type, val);
return builder->create<TF::ConstOp>(location, type, attr);
}
Value CreateI64DenseConst(OpBuilder* builder, ArrayRef<int64_t> shape,
ArrayRef<int64_t> values, mlir::Location location) {
auto type = RankedTensorType::get(static_cast<int>(shape.size()),
builder->getIntegerType(64));
auto attr = DenseElementsAttr::get(type, values);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreateI32DenseConst(OpBuilder* builder, ArrayRef<int32_t> values,
mlir::Location location) {
auto type = RankedTensorType::get(static_cast<int>(values.size()),
builder->getIntegerType(32));
auto attr = DenseElementsAttr::get(type, values);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreateNoneValue(OpBuilder* builder, mlir::Location location) {
return builder->create<TFL::NoValueOp>(location, builder->getNoneType(),
builder->getUnitAttr());
}
Value Transpose(OpBuilder* builder, Value value_to_transpose,
SmallVector<int32_t, 4> perm, RankedTensorType original_type,
mlir::Location location) {
auto perm_op = CreateI32DenseConst(builder, perm, location);
auto transpose_type = original_type;
auto transpose_shape =
llvm::to_vector<8>(llvm::map_range(perm, [transpose_type](int32_t dim) {
return transpose_type.getDimSize(dim);
}));
auto elem_type = transpose_type.getElementType();
auto result_type = RankedTensorType::get(transpose_shape, elem_type);
return builder->create<TF::TransposeOp>(location, result_type,
value_to_transpose, perm_op);
}
Value Transpose2D(OpBuilder* builder, Value value_to_transpose,
RankedTensorType type, mlir::Location location) {
SmallVector<int32_t, 4> perm = {1, 0};
return Transpose(builder, value_to_transpose, perm, type, location);
}
Value Reverse(OpBuilder* builder, Value value_to_reverse, int axis,
RankedTensorType type, mlir::Location location) {
auto axis_op = CreateI32SplatConst(builder, {1}, axis, location);
return builder->create<TF::ReverseV2Op>(location, type, value_to_reverse,
axis_op);
}
ArrayRef<int64_t> GetRankedTensorShape(Value value) {
return mlir::cast<RankedTensorType>(value.getType()).getShape();
}
Value SliceRankedTensor(OpBuilder* builder, Value input,
ArrayRef<int64_t> begin_shape,
ArrayRef<int64_t> begin_values,
ArrayRef<int64_t> size_shape,
ArrayRef<int64_t> size_values,
mlir::Location location) {
ArrayRef<int64_t> input_shape = GetRankedTensorShape(input);
for (int i = 0, end = input_shape.size(); i < end; i++) {
if (begin_values[i] < 0 ||
(begin_values[i] + size_values[i] > input_shape[i])) {
return CreateF32SplatConst(builder, size_shape, 0, location);
}
}
auto slice_i2c_begin =
CreateI64DenseConst(builder, begin_shape, begin_values, location);
auto slice_i2c_size =
CreateI64DenseConst(builder, size_shape, size_values, location);
return builder->create<TF::SliceOp>(
location,
RankedTensorType::get(
size_values,
mlir::cast<RankedTensorType>(input.getType()).getElementType()),
input, slice_i2c_begin, slice_i2c_size);
}
Value CreateStridedSliceOp(mlir::Location loc, ArrayRef<int64_t> output_shape,
Value input, ArrayRef<int32_t> begin,
ArrayRef<int32_t> end, ArrayRef<int32_t> strides,
int64_t begin_mask, int64_t end_mask,
int64_t ellipsis_mask, int64_t new_axis_mask,
int64_t shrink_axis_mask, OpBuilder* builder) {
auto output_type = RankedTensorType::get(
output_shape,
mlir::cast<RankedTensorType>(input.getType()).getElementType());
auto begin_tensor = CreateI32DenseConst(builder, begin, loc);
auto end_tensor = CreateI32DenseConst(builder, end, loc);
auto strides_tensor = CreateI32DenseConst(builder, strides, loc);
return builder->create<TF::StridedSliceOp>(
loc, output_type, input, begin_tensor, end_tensor, strides_tensor,
builder->getI64IntegerAttr(begin_mask),
builder->getI64IntegerAttr(end_mask),
builder->getI64IntegerAttr(ellipsis_mask),
builder->getI64IntegerAttr(new_axis_mask),
builder->getI64IntegerAttr(shrink_axis_mask));
}
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToCellGate() {
SmallVector<int64_t, 2> begin_i2c_values = {0, 0};
input2cell_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2c_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToInputGate() {
SmallVector<int64_t, 2> begin_i2i_values = {n_cell_, 0};
input2input_ = couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, weight_transposed_,
weight_slice_shape_, begin_i2i_values,
weight_slice_shape_,
weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToForgetGate() {
int input_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 2> begin_i2f_values = {input_forget_start, 0};
input2forget_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2f_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToOutputGate() {
int input_output_start =
couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 2> begin_i2o_values = {input_output_start, 0};
input2output_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2o_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToCellGate() {
SmallVector<int64_t, 2> begin_rec2c_values = {0, n_input_};
rec2cell_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2c_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToInputGate() {
SmallVector<int64_t, 2> begin_rec2i_values = {n_cell_, n_input_};
rec2input_ = couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, weight_transposed_,
weight_slice_shape_, begin_rec2i_values,
weight_slice_shape_,
weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToForgetGate() {
int rec_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 2> begin_rec2f_values = {rec_forget_start, n_input_};
rec2forget_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2f_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToOutputGate() {
int rec_output_start = couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 2> begin_rec2o_values = {rec_output_start, n_input_};
rec2output_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2o_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToCellGate() {
SmallVector<int64_t, 1> begin_bias2c_values = {0};
bias2cell_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2c_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToInputGate() {
SmallVector<int64_t, 1> begin_bias2i_values = {n_cell_};
bias2input_ =
couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2i_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToForgetGate() {
int bias_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 1> begin_bias2f_values = {bias_forget_start};
bias2forget_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2f_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToOutputGate() {
int bias_output_start =
couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 1> begin_bias2o_values = {bias_output_start};
bias2output_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2o_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetProjection() {
SmallVector<int64_t, 2> projection_slice_shape = {
1, num_cols_projection_transposed_};
SmallVector<int64_t, 2> projection_slice_size_values = {n_output_, n_cell_};
SmallVector<int64_t, 2> projection_slice_begin_values = {0, 0};
proj_weight_ =
!projection_
? none_
: SliceRankedTensor(
&builder_, projection_transposed_, projection_slice_shape,
projection_slice_begin_values, projection_slice_shape,
projection_slice_size_values, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetProjectionBias() {
proj_bias_ = !projection_type_
? none_
: CreateF32SplatConst(&builder_, {n_output_}, 0,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputActivationState() {
input_activation_state_ = CreateF32SplatConst(&builder_, {1, n_output_}, 0,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputCellState() {
input_cell_state_ =
CreateF32SplatConst(&builder_, {1, n_cell_}, 0, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetCellLayerNormCoefficients() {
cell_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputLayerNormCoefficients() {
input_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetForgetLayerNormCoefficients() {
forget_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetOutputLayerNormCoefficients() {
output_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::GenerateFusedOpOperands() {
weight_transposed_ =
Transpose2D(&builder_, weight_, weight_type_, fused_func_op_.getLoc());
projection_transposed_ = Transpose2D(&builder_, projection_, projection_type_,
fused_func_op_.getLoc());
none_ = CreateNoneValue(&builder_, fused_func_op_.getLoc());
SetWeightForInputToCellGate();
SetWeightForInputToInputGate();
SetWeightForInputToForgetGate();
SetWeightForInputToOutputGate();
SetWeightForRecurrentToCellGate();
SetWeightForRecurrentToInputGate();
SetWeightForRecurrentToForgetGate();
SetWeightForRecurrentToOutputGate();
SetBiasToCellGate();
SetBiasToInputGate();
SetBiasToForgetGate();
SetBiasToOutputGate();
SetProjection();
SetProjectionBias();
SetInputActivationState();
SetInputCellState();
SetCellLayerNormCoefficients();
SetInputLayerNormCoefficients();
SetForgetLayerNormCoefficients();
SetOutputLayerNormCoefficients();
}
void ConvertLSTMCellSimpleToFusedLSTM::UpdateFuncSignature() {
SmallVector<int64_t, 2> output_shape{1, tensorflow::kTFDynamicSize};
auto input_types = fused_func_op_.getFunctionType().getInputs();
auto output_type = tensorflow::GetTypeFromTFTensorShape(
output_shape,
mlir::cast<RankedTensorType>(input_.getType()).getElementType());
fused_func_op_.setType(mlir::FunctionType::get(fused_func_op_.getContext(),
input_types, output_type));
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::RewriteFunc() {
LogicalResult result = Initialize();
if (failed(result)) {
return result;
}
UpdateFuncSignature();
GenerateFusedOpOperands();
SmallVector<int64_t, 2> output_shape = {1, n_output_};
auto result_type = mlir::RankedTensorType::get(
output_shape,
mlir::cast<RankedTensorType>(input_.getType()).getElementType());
lstm_ = builder_.create<mlir::TFL::LSTMOp>(
fused_func_op_.getLoc(), result_type, input_, input2input_, input2forget_,
input2cell_, input2output_, rec2input_, rec2forget_, rec2cell_,
rec2output_, none_,
none_,
none_, bias2input_, bias2forget_, bias2cell_,
bias2output_, proj_weight_, proj_bias_, input_activation_state_,
input_cell_state_, input_layer_norm_coefficients_,
forget_layer_norm_coefficients_, cell_layer_norm_coefficients_,
output_layer_norm_coefficients_, builder_.getStringAttr("TANH"),
builder_.getF32FloatAttr(10.0), builder_.getF32FloatAttr(0.0),
mlir::TFL::LSTMKernelTypeAttr::get(builder_.getContext(),
mlir::TFL::LSTMKernelType::FULL),
mlir::BoolAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr());
SmallVector<int64_t, 2> func_output_shape = {1, tensorflow::kTFDynamicSize};
auto func_result_type = tensorflow::GetTypeFromTFTensorShape(
func_output_shape,
mlir::cast<RankedTensorType>(input_.getType()).getElementType());
auto tensor_cast = builder_.create<mlir::tensor::CastOp>(
fused_func_op_.getLoc(), func_result_type, lstm_.getResult());
builder_.create<mlir::func::ReturnOp>(fused_func_op_.getLoc(),
tensor_cast.getResult());
return success();
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::InitializeFromFuncAttributes() {
auto attr = fused_func_op_->getAttrOfType<StringAttr>(kTFImplements);
if (!attr) {
return fused_func_op_.emitError()
<< "Invalid function attribute, expected " << kTFImplements
<< " attribute "
"not found";
}
llvm::SmallVector<llvm::StringRef, 4> attr_tokens;
attr.getValue().split(attr_tokens, ",");
if (attr_tokens.empty()) {
return fused_func_op_.emitError()
<< kTFImplements << " attribute should be set";
}
if (GetCompositeOpName().str() != attr_tokens[0]) {
return fused_func_op_.emitError()
<< "Unexpected interface for the composite op. Expected: "
<< GetCompositeOpName() << " Actual: " << attr_tokens[0];
}
couple_input_forget_gates_ =
std::find(attr_tokens.begin() + 1, attr_tokens.end(),
kCoupleInputForgetGates) != attr_tokens.end();
return success();
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::Initialize() {
if (failed(InitializeFromFuncAttributes())) {
return fused_func_op_.emitError()
<< "Expected function attributes were not set on the function "
"encapsulating the composite op";
}
num_gates_ = couple_input_forget_gates_ ? 3 : 4;
input_ = fused_func_op_.getArgument(0);
bias_ = fused_func_op_.getArgument(2);
weight_ = fused_func_op_.getArgument(1);
weight_type_ = mlir::cast<RankedTensorType>(weight_.getType());
if (weight_type_.getRank() != 2) {
return fused_func_op_.emitError() << "The weight tensor was not of rank 2";
}
if (weight_type_.getDimSize(1) % num_gates_ != 0) {
return fused_func_op_.emitError()
<< "Invalid dimension 1 of weight tensor, "
"should be divisible by the number of gates";
}
n_cell_ = weight_type_.getDimSize(1) / num_gates_;
projection_ = fused_func_op_.getArgument(3);
projection_type_ = mlir::cast<RankedTensorType>(projection_.getType());
if (projection_type_.getRank() != 2) {
n_output_ = n_cell_;
} else {
n_output_ = projection_type_.getDimSize(1);
}
n_input_ = weight_type_.getDimSize(0) - n_output_;
num_cols_weight_transposed_ = weight_type_.getDimSize(0);
num_cols_projection_transposed_ = projection_type_.getDimSize(0);
bias_slice_shape_ = {n_cell_};
bias_size_values_ = {n_cell_};
weight_slice_shape_ = {1, num_cols_weight_transposed_};
weight_slice_size_input_values_ = {n_cell_, n_input_};
weight_slice_size_recurrent_values_ = {n_cell_, n_output_};
return success();
}
LogicalResult ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::Initialize() {
if (failed(ConvertLSTMCellSimpleToFusedLSTM::Initialize())) {
return fused_func_op_.emitError()
<< "Specified LayerNormalizedLSTMCellSimple was not of the expected "
"interface and cannot not be converted to the fused LSTM op";
}
layer_norm_scale_ = fused_func_op_.getArgument(4);
layer_norm_scale_type_ =
mlir::cast<RankedTensorType>(layer_norm_scale_.getType());
if (layer_norm_scale_type_.getRank() != 1) {
return fused_func_op_.emitError()
<< "The layer_norm_scale tensor was not of rank 1";
}
layer_norm_slice_shape_ = {n_cell_};
layer_norm_size_values_ = {n_cell_};
return success();
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetCellLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_cell_layer_norm_values = {0};
cell_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_cell_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetInputLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_input_layer_norm_values = {n_cell_};
input_layer_norm_coefficients_ =
couple_input_forget_gates_
? none_
: SliceRankedTensor(
&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_input_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetForgetLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_forget_layer_norm_values = {2 * n_cell_};
forget_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_forget_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetOutputLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_output_layer_norm_values = {3 * n_cell_};
output_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_output_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
TF::ConstOp Create1DConstantOp(const std::vector<int>& value, Location loc,
OpBuilder* builder) {
auto type =
mlir::RankedTensorType::get(value.size(), builder->getIntegerType(32));
auto dense_values = mlir::DenseIntElementsAttr::get(type, value);
return builder->create<TF::ConstOp>(loc, dense_values);
}
TF::ConstOp CreateScalarConstantOp(int value, Location loc,
OpBuilder* builder) {
return builder->create<TF::ConstOp>(loc, builder->getI32IntegerAttr(value));
}
TF::ReshapeOp CreateFlattenOP(const Value& input, Location loc,
OpBuilder* builder) {
auto output_shape = Create1DConstantOp({-1}, loc, builder);
return builder->create<mlir::TF::ReshapeOp>(
loc,
input,
output_shape.getResult());
}
LogicalResult CreateEqualSizeSplitVOp(Value input, int axis, int splits,
Location loc, OpBuilder* builder,
Operation** result) {
auto input_type = mlir::cast<RankedTensorType>(input.getType());
SmallVector<int64_t, 4> output_shape;
int size_of_splits;
if (input_type.getRank() < axis || axis < 0) return failure();
for (int i = 0; i < input_type.getRank(); ++i) {
int64_t dim = input_type.getDimSize(i);
if (i == axis) {
if (dim % splits != 0) {
return failure();
}
size_of_splits = dim / splits;
output_shape.push_back(size_of_splits);
} else {
output_shape.push_back(dim);
}
}
SmallVector<mlir::Type, 4> output_types;
for (int i = 0; i < splits; ++i) {
output_types.push_back(
mlir::RankedTensorType::get(output_shape, input_type.getElementType()));
}
auto size_of_splits_op = Create1DConstantOp(
{size_of_splits, size_of_splits, size_of_splits, size_of_splits}, loc,
builder);
auto axis_op = CreateScalarConstantOp(axis, loc, builder);
*result = builder->create<TF::SplitVOp>(loc, output_types, input,
size_of_splits_op.getResult(),
axis_op.getResult());
return success();
}
LogicalResult ConvertKerasLSTMLayer(mlir::func::FuncOp func_op,
OpBuilder* builder) {
return ConvertKerasLSTMLayer(func_op, builder, false);
}
LogicalResult ConvertKerasLSTMLayer(mlir::func::FuncOp func_op,
OpBuilder* builder, bool indy) {
Value input = func_op.getArgument(0);
Value output_init_state = func_op.getArgument(1);
Value hidden_init_state = func_op.getArgument(2);
Value weight_kernel = func_op.getArgument(3);
Value recurrent_kernel = func_op.getArgument(4);
Value bias = func_op.getArgument(5);
if (func_op.getNumResults() != 5) return failure();
auto time_major_attr = func_op->getAttrOfType<BoolAttr>("tf.time_major");
if (time_major_attr == nullptr) return failure();
bool time_majored = time_major_attr.getValue();
auto input_type = mlir::dyn_cast_or_null<RankedTensorType>(input.getType());
if (!input_type) {
func_op.emitError() << "Input type is not a ranked tensor type";
return failure();
}
auto final_inputs = input;
auto final_input_type = input_type;
auto go_backwards_attr = func_op->getAttrOfType<BoolAttr>("tf.go_backwards");
if (go_backwards_attr != nullptr && go_backwards_attr.getValue()) {
int time_dim = time_majored ? 0 : 1;
final_inputs = Reverse(builder, final_inputs, time_dim, final_input_type,
func_op.getLoc());
}
int64_t batch = time_majored ? final_input_type.getDimSize(1)
: final_input_type.getDimSize(0);
int64_t time = time_majored ? final_input_type.getDimSize(0)
: final_input_type.getDimSize(1);
RankedTensorType weight_type =
mlir::cast<RankedTensorType>(weight_kernel.getType());
if (weight_type.getRank() != 2)
return func_op.emitError() << "The weight should be rank of 2";
Value transposed_weight_kernel =
Transpose2D(builder, weight_kernel, weight_type, func_op.getLoc());
RankedTensorType recurrent_kernel_type =
mlir::cast<RankedTensorType>(recurrent_kernel.getType());
const int64_t n_output = recurrent_kernel_type.getDimSize(0);
Value transpose_recurrent_kernel = Transpose2D(
builder, recurrent_kernel, recurrent_kernel_type, func_op.getLoc());
const int splits = 4;
Operation* weights_array;
if (failed(CreateEqualSizeSplitVOp(transposed_weight_kernel, 0, splits,
func_op.getLoc(), builder,
&weights_array)))
return failure();
Operation* recurrent_weights_array;
if (failed(CreateEqualSizeSplitVOp(transpose_recurrent_kernel, 0, splits,
func_op.getLoc(), builder,
&recurrent_weights_array)))
return failure();
Value recurrent_to_input_weights =
indy ? mlir::cast<Value>(
CreateFlattenOP(recurrent_weights_array->getResult(0),
func_op.getLoc(), builder)
.getResult())
: recurrent_weights_array->getResult(0);
Value recurrent_to_forget_weights =
indy ? mlir::cast<Value>(
CreateFlattenOP(recurrent_weights_array->getResult(1),
func_op.getLoc(), builder)
.getResult())
: recurrent_weights_array->getResult(1);
Value recurrent_to_cell_weights =
indy ? mlir::cast<Value>(
CreateFlattenOP(recurrent_weights_array->getResult(2),
func_op.getLoc(), builder)
.getResult())
: recurrent_weights_array->getResult(2);
Value recurrent_to_output_weights =
indy ? mlir::cast<Value>(
CreateFlattenOP(recurrent_weights_array->getResult(3),
func_op.getLoc(), builder)
.getResult())
: recurrent_weights_array->getResult(3);
Operation* bias_array;
if (failed(CreateEqualSizeSplitVOp(bias, 0, splits, func_op.getLoc(), builder,
&bias_array)))
return failure();
SmallVector<int64_t, 3> output_shape;
if (time_majored) {
output_shape = {time, batch, n_output};
} else {
output_shape = {batch, time, n_output};
}
auto result_type = mlir::RankedTensorType::get(
output_shape,
mlir::cast<RankedTensorType>(final_inputs.getType()).getElementType());
Value none = CreateNoneValue(builder, func_op.getLoc());
auto lstm = builder->create<mlir::TFL::UnidirectionalSequenceLSTMOp>(
func_op.getLoc(), result_type, final_inputs,
weights_array->getResult(0),
weights_array->getResult(1),
weights_array->getResult(2),
weights_array->getResult(3),
recurrent_to_input_weights,
recurrent_to_forget_weights,
recurrent_to_cell_weights,
recurrent_to_output_weights,
none,
none,
none,
bias_array->getResult(0),
bias_array->getResult(1),
bias_array->getResult(2),
bias_array->getResult(3),
none,
none,
output_init_state,
hidden_init_state,
none,
none,
none,
none,
builder->getStringAttr("TANH"),
builder->getF32FloatAttr(10.0),
builder->getF32FloatAttr(0.0),
builder->getBoolAttr(time_majored),
mlir::BoolAttr(),
builder->getBoolAttr(indy),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr());
auto final_output_full_sequences = lstm.getResult();
SmallVector<int64_t, 2> last_output_shape({batch, n_output});
SmallVector<int32_t, 3> end({0, 0, 0});
SmallVector<int32_t, 3> strides({1, 1, 1});
SmallVector<int32_t, 3> begin;
int64_t new_axis_mask = 0;
int64_t ellipsis_mask = 0;
int64_t begin_mask;
int64_t end_mask;
int64_t shrink_axis_mask;
if (time_majored) {
begin_mask = 6;
end_mask = 6;
shrink_axis_mask = 1;
begin = {-1, 0, 0};
} else {
begin_mask = 5;
end_mask = 5;
shrink_axis_mask = 2;
begin = {0, -1, 0};
}
auto last_output = CreateStridedSliceOp(
func_op.getLoc(), last_output_shape, final_output_full_sequences, begin,
end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask,
shrink_axis_mask, builder);
SmallVector<Value, 5> outputs;
SmallVector<Type, 5> output_types;
outputs.push_back(last_output);
output_types.push_back(last_output.getType());
outputs.push_back(final_output_full_sequences);
output_types.push_back(final_output_full_sequences.getType());
for (int i = 2; i < 5; ++i) {
auto result_type =
mlir::dyn_cast<RankedTensorType>(func_op.getResultTypes()[i]);
outputs.push_back(CreatTfF32ConstOp(builder, result_type.getShape(), 0.0f,
func_op.getLoc()));
output_types.push_back(result_type);
}
func_op.setType(mlir::FunctionType::get(func_op.getContext(),
func_op.getFunctionType().getInputs(),
output_types));
builder->create<mlir::func::ReturnOp>(func_op.getLoc(), outputs);
return success();
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/lstm_utils.h"
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
func::FuncOp createLstmCompositeFunc(mlir::Builder* builder, bool ln,
bool cifg) {
SmallVector<int64_t, 2> input_shape{1, 2};
SmallVector<int64_t, 2> weight_shape{3, 12};
SmallVector<int64_t, 1> bias_shape{2};
SmallVector<int64_t, 2> projection_shape{1, 2};
SmallVector<int64_t, 1> layer_norm_scale{4};
SmallVector<int64_t, 2> output_shape{1, 2};
auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
auto weight_type = RankedTensorType::get(weight_shape, builder->getF32Type());
auto bias_type = RankedTensorType::get(bias_shape, builder->getF32Type());
auto projection_type =
RankedTensorType::get(projection_shape, builder->getF32Type());
auto layer_norm_scale_type =
RankedTensorType::get(layer_norm_scale, builder->getF32Type());
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type());
SmallVector<mlir::Type, 4> input_types{input_type, weight_type, bias_type,
projection_type,
layer_norm_scale_type};
auto func_type = builder->getFunctionType(input_types, output_type);
auto func = func::FuncOp::create(
mlir::NameLoc::get(builder->getStringAttr("fused_func")), "fused_func",
func_type, {});
func.addEntryBlock();
std::vector<std::string> attributes;
if (ln) {
attributes.push_back(kLayerNormalizedLstmCellSimple);
} else {
attributes.push_back(kLstmCellSimple);
}
if (cifg) {
attributes.push_back(kCoupleInputForgetGates);
}
mlir::StringAttr attr_values =
builder->getStringAttr(llvm::join(attributes, ","));
func->setAttr(kTFImplements, attr_values);
return func;
}
class LstmUtilsTest : public ::testing::Test {
protected:
LstmUtilsTest() {}
void SetUp() override {
context_ = std::make_unique<mlir::MLIRContext>();
context_->loadDialect<arith::ArithDialect, mlir::func::FuncDialect,
tensor::TensorDialect, mlir::TF::TensorFlowDialect,
TensorFlowLiteDialect>();
builder_ = std::make_unique<mlir::Builder>(context_.get());
fused_lstm_func_ = createLstmCompositeFunc(builder_.get(), false, false);
fused_lstm_func_cifg_ =
createLstmCompositeFunc(builder_.get(), false, true);
fused_ln_lstm_func_ = createLstmCompositeFunc(builder_.get(), true, false);
}
void TearDown() override {
fused_lstm_func_.erase();
fused_lstm_func_cifg_.erase();
fused_ln_lstm_func_.erase();
builder_.reset();
}
func::FuncOp fused_lstm_func_;
func::FuncOp fused_lstm_func_cifg_;
func::FuncOp fused_ln_lstm_func_;
std::unique_ptr<mlir::MLIRContext> context_;
std::unique_ptr<mlir::Builder> builder_;
};
TEST_F(LstmUtilsTest, ConvertLSTMCellSimple) {
mlir::TFL::ConvertLSTMCellSimpleToFusedLSTM convert(fused_lstm_func_);
auto result = convert.RewriteFunc();
EXPECT_FALSE(failed(result));
fused_lstm_func_.dump();
EXPECT_EQ(
fused_lstm_func_->getAttrOfType<StringAttr>(kTFImplements).getValue(),
convert.GetCompositeOpName());
EXPECT_EQ(fused_lstm_func_.getNumArguments(), 5);
EXPECT_EQ(fused_lstm_func_.getFunctionType().getNumResults(), 1);
auto transpose_op = fused_lstm_func_.getBody().front().begin();
transpose_op++;
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getOperand(0).getType())
.getDimSize(0),
3);
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getOperand(0).getType())
.getDimSize(1),
12);
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getResult(0).getType())
.getDimSize(0),
12);
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getResult(0).getType())
.getDimSize(1),
3);
auto it = fused_lstm_func_.getBody().back().rbegin();
EXPECT_EQ(it->getName().getStringRef(),
mlir::func::ReturnOp::getOperationName());
it++;
it++;
EXPECT_EQ(it->getName().getStringRef(),
mlir::TFL::LSTMOp::getOperationName());
EXPECT_EQ(it->getNumOperands(), 24);
EXPECT_EQ(it->getNumResults(), 1);
EXPECT_FALSE(mlir::isa<NoneType>(it->getOperand(1).getType()));
EXPECT_TRUE(mlir::isa<NoneType>(it->getOperand(20).getType()));
EXPECT_TRUE(mlir::cast<RankedTensorType>(it->getOperand(17).getType())
.getElementType()
.isF32());
EXPECT_TRUE(
mlir::cast<ElementsAttr>(mlir::cast<mlir::arith::ConstantOp>(
it->getOpOperand(15).get().getDefiningOp())
.getValue())
.getValues<FloatAttr>()[0]
.getValue()
.isExactlyValue(0.0f));
EXPECT_EQ(fused_lstm_func_.getFunctionType().getNumResults(), 1);
auto output_types = fused_lstm_func_.getFunctionType().getResults();
SmallVector<int64_t, 2> output_shape{1, mlir::ShapedType::kDynamic};
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getShape().size(),
output_shape.size());
for (int i = 0; i < output_shape.size(); i++) {
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getDimSize(i),
output_shape[i]);
}
}
TEST_F(LstmUtilsTest, ConvertLSTMCellSimpleToFusedLSTMCoupleInputForget) {
mlir::TFL::ConvertLSTMCellSimpleToFusedLSTM convert(fused_lstm_func_cifg_);
auto result = convert.RewriteFunc();
EXPECT_FALSE(failed(result));
fused_lstm_func_cifg_.dump();
llvm::SmallVector<std::string, 2> attributes{kLstmCellSimple,
kCoupleInputForgetGates};
EXPECT_EQ(fused_lstm_func_cifg_->getAttrOfType<StringAttr>(kTFImplements)
.getValue(),
llvm::join(attributes, ","));
auto it = fused_lstm_func_cifg_.getBody().back().rbegin();
EXPECT_EQ(it->getName().getStringRef(),
mlir::func::ReturnOp::getOperationName());
it++;
it++;
EXPECT_EQ(it->getName().getStringRef(),
mlir::TFL::LSTMOp::getOperationName());
EXPECT_EQ(it->getNumOperands(), 24);
EXPECT_EQ(it->getNumResults(), 1);
EXPECT_TRUE(mlir::isa<NoneType>(it->getOperand(1).getType()));
}
TEST_F(LstmUtilsTest, ConvertLayerNormLSTMCellSimpleToFusedLSTM) {
mlir::TFL::ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM convert(
fused_ln_lstm_func_);
auto result = convert.RewriteFunc();
EXPECT_FALSE(failed(result));
fused_ln_lstm_func_.dump();
EXPECT_EQ(
fused_ln_lstm_func_->getAttrOfType<StringAttr>(kTFImplements).getValue(),
convert.GetCompositeOpName());
EXPECT_EQ(fused_ln_lstm_func_.getNumArguments(), 5);
EXPECT_EQ(fused_ln_lstm_func_.getFunctionType().getNumResults(), 1);
auto it = fused_ln_lstm_func_.getBody().back().rbegin();
EXPECT_EQ(it->getName().getStringRef(),
mlir::func::ReturnOp::getOperationName());
it++;
it++;
EXPECT_EQ(it->getName().getStringRef(),
mlir::TFL::LSTMOp::getOperationName());
EXPECT_EQ(it->getNumOperands(), 24);
EXPECT_EQ(it->getNumResults(), 1);
EXPECT_FALSE(mlir::isa<NoneType>(it->getOperand(1).getType()));
EXPECT_FALSE(mlir::isa<NoneType>(it->getOperand(20).getType()));
EXPECT_EQ(mlir::cast<RankedTensorType>(it->getOperand(20).getType())
.getShape()
.size(),
1);
EXPECT_EQ(
mlir::cast<RankedTensorType>(it->getOperand(20).getType()).getDimSize(0),
3);
EXPECT_EQ(fused_ln_lstm_func_.getFunctionType().getNumResults(), 1);
auto output_types = fused_ln_lstm_func_.getFunctionType().getResults();
SmallVector<int64_t, 2> output_shape{1, mlir::ShapedType::kDynamic};
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getShape().size(),
output_shape.size());
for (int i = 0; i < output_shape.size(); i++) {
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getDimSize(i),
output_shape[i]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/lstm_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3a62e24f-68c6-41a6-a04b-7c1619ac4aad | cpp | tensorflow/tensorflow | resolve_constant_unary | tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc | tensorflow/lite/toco/graph_transformations/tests/resolve_constant_unary_test.cc | #include <string.h>
#include <algorithm>
#include <cmath>
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
void ReduceGeneric(bool keep_dims, const std::vector<int>& axes,
const Shape& input_shape, const std::vector<float>& input,
Shape* check_output_shape, std::vector<float>* output,
const std::function<float(float, float)>& reducer) {
if (!IsNonEmpty(input_shape)) {
return;
}
Shape output_shape = input_shape;
std::vector<int> reduction_mask(input_shape.dimensions_count(), 1);
for (const auto& axis : axes) {
CHECK_GE(axis, 0);
CHECK_LT(axis, input_shape.dimensions_count());
reduction_mask[axis] = 0;
output_shape.mutable_dims()->at(axis) = 1;
}
std::vector<int> output_indices(input_shape.dimensions_count());
for (size_t input_offset = 0; input_offset < input.size(); ++input_offset) {
std::vector<int> input_indices = ReverseOffset(input_shape, input_offset);
for (int i = 0; i < input_shape.dimensions_count(); ++i) {
output_indices[i] = input_indices[i] * reduction_mask[i];
}
int output_offset = Offset(output_shape, output_indices);
if (input_indices == output_indices) {
output->at(output_offset) = input.at(input_offset);
} else {
output->at(output_offset) =
reducer(output->at(output_offset), input.at(input_offset));
}
}
if (!keep_dims) {
std::vector<int> new_dims;
for (int i = 0; i < output_shape.dimensions_count(); ++i) {
if (reduction_mask[i]) {
new_dims.push_back(output_shape.dims(i));
}
}
output_shape.mutable_dims()->swap(new_dims);
}
*check_output_shape = output_shape;
}
}
bool CopyMinMaxFromFirstInput(const Operator& op, Model* model) {
auto& output_array = model->GetArray(op.outputs[0]);
if (output_array.minmax) {
return false;
}
const auto& input_array = model->GetArray(op.inputs[0]);
if (!input_array.minmax) {
return false;
}
const auto& input_minmax = input_array.GetMinMax();
CHECK(!output_array.minmax);
auto& output_minmax = output_array.GetOrCreateMinMax();
output_minmax.min = input_minmax.min;
output_minmax.max = input_minmax.max;
return true;
}
::tensorflow::Status ResolveConstantUnaryOperator::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
const auto unary_it = model->operators.begin() + op_index;
const auto* unary_op = unary_it->get();
switch (unary_op->type) {
case OperatorType::kCast:
case OperatorType::kExp:
case OperatorType::kLog:
case OperatorType::kNeg:
case OperatorType::kRsqrt:
case OperatorType::kSqrt:
case OperatorType::kSquare:
case OperatorType::kSum:
case OperatorType::kReduceMin:
case OperatorType::kReduceMax:
case OperatorType::kReshape:
case OperatorType::kRelu6:
case OperatorType::kRelu1:
case OperatorType::kRelu:
break;
default:
return absl::OkStatus();
}
if (!IsConstantParameterArray(*model, unary_op->inputs[0])) {
return absl::OkStatus();
}
for (const auto& rnn_state : model->flags.rnn_states()) {
if (unary_op->inputs[0] == rnn_state.back_edge_source_array()) {
return absl::OkStatus();
}
if (unary_op->inputs[0] == rnn_state.state_array()) {
return absl::OkStatus();
}
}
auto& output_array = model->GetArray(unary_op->outputs[0]);
if (!output_array.has_shape()) {
return absl::OkStatus();
}
if (unary_op->fused_activation_function !=
FusedActivationFunctionType::kNone) {
AddMessageF(
"Not resolving constant %s "
" because it has a fused activation function",
LogName(*unary_op));
return absl::OkStatus();
}
if (unary_op->type == OperatorType::kReshape) {
CopyMinMaxFromFirstInput(*unary_op, model);
}
const auto& input_array = model->GetArray(unary_op->inputs[0]);
CHECK(input_array.buffer);
std::vector<DataType<ArrayDataType::kFloat>> const* input_float_data =
nullptr;
if (unary_op->type == OperatorType::kCast) {
CastOperator const* cast_op = static_cast<CastOperator const*>(unary_op);
if (cast_op->dst_data_type != ArrayDataType::kFloat) {
AddMessageF(
"Not resolving constant %s because we currently only support casting "
"to float",
LogName(*unary_op));
return absl::OkStatus();
}
if (cast_op->src_data_type != input_array.buffer->type) {
AddMessageF(
"Not resolving constant %s because cast op source type does not "
"match input type",
LogName(*unary_op));
}
} else {
if (input_array.buffer->type != ArrayDataType::kFloat) {
return absl::OkStatus();
}
input_float_data = &(input_array.GetBuffer<ArrayDataType::kFloat>().data);
}
const Shape& output_shape = output_array.shape();
const int output_dims_count = output_shape.dimensions_count();
const int output_buffer_size = RequiredBufferSizeForShape(output_shape);
auto& output_float_data =
output_array.GetMutableBuffer<ArrayDataType::kFloat>().data;
output_float_data.resize(output_buffer_size);
const Shape& input_shape = input_array.shape();
const int input_buffer_size = RequiredBufferSizeForShape(input_shape);
if (unary_op->type == OperatorType::kCast) {
for (int i = 0; i < output_buffer_size; i++) {
float outval = 0.0f;
if (input_array.buffer->type == ArrayDataType::kFloat) {
outval = static_cast<float>(
input_array.GetBuffer<ArrayDataType::kFloat>().data[i]);
} else if (input_array.buffer->type == ArrayDataType::kUint8) {
outval = static_cast<float>(
input_array.GetBuffer<ArrayDataType::kUint8>().data[i]);
} else if (input_array.buffer->type == ArrayDataType::kInt32) {
outval = static_cast<float>(
input_array.GetBuffer<ArrayDataType::kInt32>().data[i]);
} else if (input_array.buffer->type == ArrayDataType::kInt64) {
outval = static_cast<float>(
input_array.GetBuffer<ArrayDataType::kInt64>().data[i]);
} else if (input_array.buffer->type == ArrayDataType::kBool) {
outval = static_cast<float>(
input_array.GetBuffer<ArrayDataType::kBool>().data[i]);
} else {
LOG(FATAL) << "Unsupported cast op input type";
}
output_float_data[i] = outval;
}
} else if (unary_op->type == OperatorType::kReshape) {
CHECK(input_buffer_size == output_buffer_size);
output_float_data = *input_float_data;
} else if (unary_op->type == OperatorType::kSum) {
CHECK_EQ(unary_op->inputs.size(), 2) << "Sum needs 2 inputs";
if (!IsConstantParameterArray(*model, unary_op->inputs[1])) {
AddMessageF("Axis input is non-constant");
return absl::OkStatus();
}
auto& axis_array = model->GetArray(unary_op->inputs[1]);
CHECK(axis_array.data_type == ArrayDataType::kInt32);
auto sum_op = static_cast<const TensorFlowSumOperator*>(unary_op);
Shape check_output_shape;
ReduceGeneric(
sum_op->keep_dims, axis_array.GetBuffer<ArrayDataType::kInt32>().data,
input_shape, *input_float_data, &check_output_shape, &output_float_data,
[](float existing, float current) -> float {
return existing + current;
});
CHECK(check_output_shape == output_shape)
<< "Shape propagation output shape doesn't match output shape from op";
} else if (unary_op->type == OperatorType::kReduceMin) {
for (int i = 0; i < output_dims_count; i++) {
CHECK_EQ(output_shape.dims(i), 1);
}
float min = (*input_float_data)[0];
for (int i = 0; i < input_buffer_size; i++) {
min = std::min(min, (*input_float_data)[i]);
}
output_float_data[0] = min;
} else if (unary_op->type == OperatorType::kReduceMax) {
for (int i = 0; i < output_dims_count; i++) {
CHECK_EQ(output_shape.dims(i), 1);
}
float max = (*input_float_data)[0];
for (int i = 0; i < input_buffer_size; i++) {
max = std::max(max, (*input_float_data)[i]);
}
output_float_data[0] = max;
} else if (unary_op->type == OperatorType::kExp ||
unary_op->type == OperatorType::kNeg ||
unary_op->type == OperatorType::kLog ||
unary_op->type == OperatorType::kRsqrt ||
unary_op->type == OperatorType::kSqrt ||
unary_op->type == OperatorType::kSquare) {
for (int i = 0; i < output_dims_count; i++) {
CHECK_EQ(output_shape.dims(i), input_shape.dims(i));
}
for (int i = 0; i < output_buffer_size; i++) {
const float val = (*input_float_data)[i];
float outval = 0.f;
if (unary_op->type == OperatorType::kExp) {
outval = std::exp(val);
} else if (unary_op->type == OperatorType::kNeg) {
outval = -val;
} else if (unary_op->type == OperatorType::kLog) {
outval = std::log(val);
} else if (unary_op->type == OperatorType::kRsqrt) {
outval = 1.0f / std::sqrt(val);
} else if (unary_op->type == OperatorType::kSqrt) {
outval = std::sqrt(val);
} else if (unary_op->type == OperatorType::kSquare) {
outval = val * val;
} else {
LOG(FATAL) << "should not get here.";
}
output_float_data[i] = outval;
}
} else if (unary_op->type == OperatorType::kRelu6 ||
unary_op->type == OperatorType::kRelu1 ||
unary_op->type == OperatorType::kRelu) {
for (int i = 0; i < output_buffer_size; ++i) {
const float value = (*input_float_data)[i];
float new_value = 0.0f;
switch (unary_op->type) {
case OperatorType::kRelu: {
static constexpr float kLower = 0;
new_value = value < kLower ? kLower : value;
break;
}
case OperatorType::kRelu1: {
static constexpr float kUpper = 1;
static constexpr float kLower = -1;
new_value = value > kUpper ? kUpper : value < kLower ? kLower : value;
break;
}
case OperatorType::kRelu6: {
static constexpr float kUpper = 6;
static constexpr float kLower = 0;
new_value = value > kUpper ? kUpper : value < kLower ? kLower : value;
break;
}
default:
LOG(FATAL) << "Unsupported activation function "
<< LogName(*unary_op);
return absl::OkStatus();
}
output_float_data[i] = new_value;
}
} else {
LOG(FATAL) << "should not get here.";
}
DeleteOpAndArrays(model, unary_op);
*modified = true;
return absl::OkStatus();
}
} | #include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
void RunResolveSum(const std::vector<float>& input,
const std::vector<int>& input_shape,
const std::vector<int>& axis,
const std::vector<int>& output_shape,
const std::vector<float>& expected_output) {
Model model;
const std::string output_name("output");
model.flags.add_output_arrays(output_name);
Array& input0 = model.GetOrCreateArray("input0");
Array& input1 = model.GetOrCreateArray("input1");
Array& output = model.GetOrCreateArray(output_name);
*input0.mutable_shape()->mutable_dims() = input_shape;
input0.data_type = ArrayDataType::kFloat;
input0.GetMutableBuffer<ArrayDataType::kFloat>().data = input;
*input1.mutable_shape()->mutable_dims() = {static_cast<int>(axis.size())};
input1.GetMutableBuffer<ArrayDataType::kInt32>().data = axis;
input1.data_type = ArrayDataType::kInt32;
*output.mutable_shape()->mutable_dims() = output_shape;
auto sum_op = std::make_unique<TensorFlowSumOperator>();
sum_op->keep_dims = true;
sum_op->inputs = {"input0", "input1"};
sum_op->outputs = {output_name};
model.operators.push_back(std::move(sum_op));
bool modified;
ASSERT_TRUE(ResolveConstantUnaryOperator().Run(&model, 0, &modified).ok());
EXPECT_EQ(model.GetArray("output").GetBuffer<ArrayDataType::kFloat>().data,
expected_output);
EXPECT_EQ(model.GetArray("output").shape().dims(), output_shape);
}
TEST(ResolveConstantUnary, ResolveSumAxis0_2D) {
RunResolveSum(
{3, 1, 4, 1,
5, 9, 2, 6,
5, 3, 5, 8},
{3, 4},
{0},
{1, 4},
{13, 13, 11, 15});
}
TEST(ResolveConstantUnary, ResolveSumAxis1_2D) {
RunResolveSum(
{3, 1, 4, 1,
5, 9, 2, 6,
5, 3, 5, 8},
{3, 4},
{1},
{3, 1},
{9, 22, 21});
}
TEST(ResolveConstantUnary, ResolveSumAxis0_2_3D) {
RunResolveSum(
{ 0, 1, 2,
3, 10, 11,
12, 13, 20,
21, 22, 23,
100, 101, 102,
103, 110, 111,
112, 113, 120,
121, 122, 123,
200, 201, 202,
203, 210, 211,
212, 213, 220,
221, 222, 223 },
{3, 4, 3},
{0, 2},
{1, 4, 1},
{ 909, 972, 1035, 1098});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/resolve_constant_unary_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2503dbb1-5ed5-4d36-981b-77edba1e0db3 | cpp | tensorflow/tensorflow | fuse_binary_into_following_affine | tensorflow/lite/toco/graph_transformations/fuse_binary_into_following_affine.cc | tensorflow/lite/toco/graph_transformations/tests/fuse_binary_into_following_affine_test.cc | #include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
void FuseAddOrSubParamsIntoFollowingAffine(Model* model, Operator* following_op,
const Operator* add_or_sub_op,
int index_of_constant_input) {
CHECK(add_or_sub_op->type == OperatorType::kAdd ||
add_or_sub_op->type == OperatorType::kSub);
CHECK(index_of_constant_input == 0 || index_of_constant_input == 1);
CHECK(add_or_sub_op->type != OperatorType::kSub ||
index_of_constant_input == 1);
if (following_op->inputs.size() < 3) {
LOG(FATAL) << "Missing bias parameter";
}
const auto& weights = model->GetArray(following_op->inputs[1]);
auto& bias = model->GetArray(following_op->inputs[2]);
bias.minmax = nullptr;
const auto& operand =
model->GetArray(add_or_sub_op->inputs[index_of_constant_input]);
CHECK_EQ(RequiredBufferSizeForShape(operand.shape()), 1);
const float scalar_operand =
operand.GetBuffer<ArrayDataType::kFloat>().data[0];
float add_scalar_operand = 0.f;
if (add_or_sub_op->type == OperatorType::kAdd) {
add_scalar_operand = scalar_operand;
} else if (add_or_sub_op->type == OperatorType::kSub &&
index_of_constant_input == 1) {
add_scalar_operand = -scalar_operand;
} else {
LOG(FATAL) << "Should not get here";
}
const Shape& weights_shape = weights.shape();
const Shape& bias_shape = bias.shape();
const auto& weights_buffer = weights.GetBuffer<ArrayDataType::kFloat>();
const float* const weights_data = weights_buffer.data.data();
auto& bias_buffer = bias.GetMutableBuffer<ArrayDataType::kFloat>();
float* const bias_data = bias_buffer.data.data();
if (following_op->type == OperatorType::kConv ||
following_op->type == OperatorType::kFullyConnected) {
const int output_depth = weights_shape.dims(0);
CHECK_EQ(output_depth, bias_shape.dims(bias_shape.dimensions_count() - 1));
const int weights_size = RequiredBufferSizeForShape(weights_shape);
const int weights_per_depth = weights_size / output_depth;
CHECK_EQ(weights_size, weights_per_depth * output_depth);
for (int d = 0; d < output_depth; d++) {
float accumulation = 0;
for (int i = 0; i < weights_per_depth; i++) {
accumulation +=
add_scalar_operand * weights_data[d * weights_per_depth + i];
}
bias_data[d] += accumulation;
}
} else if (following_op->type == OperatorType::kDepthwiseConv) {
const int output_depth =
weights_shape.dims(weights_shape.dimensions_count() - 1);
const int weights_size = RequiredBufferSizeForShape(weights_shape);
const int weights_per_depth = weights_size / output_depth;
CHECK_EQ(weights_size, weights_per_depth * output_depth);
for (int c = 0; c < output_depth; c++) {
float accumulation = 0;
for (int k = 0; k < weights_per_depth; k++) {
accumulation += add_scalar_operand * weights_data[k * output_depth + c];
}
bias_data[c] += accumulation;
}
} else {
LOG(FATAL) << "Should not get here.";
}
}
void FuseMulOrDivParamsIntoFollowingAffine(Model* model, Operator* following_op,
const Operator* mul_or_div_op,
int index_of_constant_input) {
CHECK(mul_or_div_op->type == OperatorType::kMul ||
mul_or_div_op->type == OperatorType::kDiv);
CHECK(index_of_constant_input == 0 || index_of_constant_input == 1);
CHECK(mul_or_div_op->type != OperatorType::kDiv ||
index_of_constant_input == 1);
const auto& weights_name = following_op->inputs[1];
const auto& bias_name = following_op->inputs[2];
auto& weights = model->GetArray(weights_name);
DropMinMax(model, weights_name);
DropMinMax(model, bias_name);
const auto& operand =
model->GetArray(mul_or_div_op->inputs[index_of_constant_input]);
CHECK_EQ(RequiredBufferSizeForShape(operand.shape()), 1);
const float scalar_operand =
operand.GetBuffer<ArrayDataType::kFloat>().data[0];
float* weights_data =
weights.GetMutableBuffer<ArrayDataType::kFloat>().data.data();
const int weights_size = RequiredBufferSizeForShape(weights.shape());
for (int i = 0; i < weights_size; i++) {
if (mul_or_div_op->type == OperatorType::kMul) {
weights_data[i] *= scalar_operand;
} else if (mul_or_div_op->type == OperatorType::kDiv) {
weights_data[i] /= scalar_operand;
} else {
LOG(FATAL) << "Should not get here";
}
}
}
}
::tensorflow::Status FuseBinaryIntoFollowingAffine::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
const auto binary_it = model->operators.begin() + op_index;
auto* binary_op = binary_it->get();
if (binary_op->type != OperatorType::kAdd &&
binary_op->type != OperatorType::kMul &&
binary_op->type != OperatorType::kSub &&
binary_op->type != OperatorType::kDiv) {
return absl::OkStatus();
}
CHECK_EQ(binary_op->inputs.size(), 2);
const bool is_input_constant[2] = {
IsConstantParameterArray(*model, binary_op->inputs[0]),
IsConstantParameterArray(*model, binary_op->inputs[1]),
};
if (!is_input_constant[0] && !is_input_constant[1]) {
return absl::OkStatus();
}
if (is_input_constant[0] && is_input_constant[1]) {
return absl::OkStatus();
}
const int index_of_constant_input = is_input_constant[0] ? 0 : 1;
const int index_of_variable_input = is_input_constant[0] ? 1 : 0;
CHECK(is_input_constant[index_of_constant_input]);
CHECK(!is_input_constant[index_of_variable_input]);
if (binary_op->type == OperatorType::kDiv) {
if (index_of_constant_input != 1) {
AddMessageF("Not fusing %s because the denominator is not constant",
LogName(*binary_op));
return absl::OkStatus();
}
}
const auto& operand_shape =
model->GetArray(binary_op->inputs[index_of_constant_input]).shape();
for (const auto& dim : operand_shape.dims()) {
if (dim > 1) {
AddMessageF(
"Not fusing %s into the following affine op, because we only know "
"how to do so when the constant operand is a scalar",
LogName(*binary_op));
return absl::OkStatus();
}
}
if (binary_op->fused_activation_function !=
FusedActivationFunctionType::kNone) {
AddMessageF("Not fusing %s because it has a fused activation function",
LogName(*binary_op));
return absl::OkStatus();
}
if (CountOpsWithInput(*model, binary_op->outputs[0]) != 1) {
AddMessageF("Not fusing %s because it's consumed by multiple ops",
LogName(*binary_op));
return absl::OkStatus();
}
Operator* following_op = GetOpWithInput(*model, binary_op->outputs[0]);
if (!following_op) {
AddMessageF("Not fusing %s because it is not consumed by any op",
LogName(*binary_op));
return absl::OkStatus();
}
if (following_op->type != OperatorType::kConv &&
following_op->type != OperatorType::kFullyConnected &&
following_op->type != OperatorType::kDepthwiseConv) {
AddMessageF(
"Not fusing %s because the following %s is not of one of the supported "
"types",
LogName(*binary_op), LogName(*following_op));
return absl::OkStatus();
}
if (following_op->inputs.size() < 3) {
AddMessageF(
"Not fusing %s because the following %s does not have a bias vector",
LogName(*following_op), LogName(*binary_op));
return absl::OkStatus();
}
const auto& weights = model->GetArray(following_op->inputs[1]);
const auto& bias = model->GetArray(following_op->inputs[2]);
if (!weights.buffer || !bias.buffer) {
AddMessageF(
"Not fusing %s because the following %s has non-constant weights or "
"bias arrays",
LogName(*binary_op), LogName(*following_op));
return absl::OkStatus();
}
if (binary_op->type == OperatorType::kAdd ||
binary_op->type == OperatorType::kSub) {
if (following_op->type == OperatorType::kConv) {
if (static_cast<ConvOperator*>(following_op)->padding.type !=
PaddingType::kValid) {
AddMessageF(
"Not fusing %s because the following %s does not use VALID padding",
LogName(*binary_op), LogName(*following_op));
return absl::OkStatus();
}
}
if (following_op->type == OperatorType::kDepthwiseConv) {
if (static_cast<DepthwiseConvOperator*>(following_op)->padding.type !=
PaddingType::kValid) {
AddMessageF(
"Not fusing %s because the following %s does not use VALID padding",
LogName(*binary_op), LogName(*following_op));
return absl::OkStatus();
}
}
FuseAddOrSubParamsIntoFollowingAffine(model, following_op, binary_op,
index_of_constant_input);
} else if (binary_op->type == OperatorType::kMul ||
binary_op->type == OperatorType::kDiv) {
FuseMulOrDivParamsIntoFollowingAffine(model, following_op, binary_op,
index_of_constant_input);
} else {
LOG(FATAL) << "should not get here";
}
AddMessageF("Fusing %s into the following %s", LogName(*binary_op),
LogName(*following_op));
model->EraseArray(binary_op->outputs[0]);
following_op->inputs[0] = binary_op->inputs[index_of_variable_input];
DeleteOpAndArrays(model, binary_op);
*modified = true;
return absl::OkStatus();
}
} | #include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
std::vector<testing::Matcher<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error = 1e-5) {
std::vector<testing::Matcher<float>> matchers;
matchers.reserve(values.size());
for (const float& v : values) {
matchers.emplace_back(testing::FloatNear(v, max_abs_error));
}
return matchers;
}
}
class FuseBinaryIntoFollowingAffineTest : public ::testing::Test {
protected:
FuseBinaryIntoFollowingAffineTest() {}
void SetUp() override { model_ = std::make_unique<Model>(); }
void CreateArray(const std::string& name, const std::vector<int>& shape) {
Array& array = model_->GetOrCreateArray(name);
array.data_type = ArrayDataType::kFloat;
Shape* array_shape = array.mutable_shape();
*(array_shape->mutable_dims()) = shape;
}
void CreateConstantArray(const std::string& name,
const std::vector<int>& shape,
const std::vector<float>& data) {
CreateArray(name, shape);
Array& array = model_->GetOrCreateArray(name);
auto& array_buffer = array.GetMutableBuffer<ArrayDataType::kFloat>();
int bufsize = 1;
for (int dim : shape) {
bufsize *= dim;
}
array_buffer.data.resize(bufsize);
float* buf_ptr = array_buffer.data.data();
for (int i = 0; i < bufsize; ++i) {
buf_ptr[i] = data[i];
}
}
std::unique_ptr<Model> model_;
};
TEST_F(FuseBinaryIntoFollowingAffineTest, FuseMulIntoFullyConnected) {
{
CreateArray("Input", {2, 2});
CreateConstantArray("MulInput2", {1}, {2.0});
CreateArray("MulOutput", {2, 2});
CreateConstantArray("FCWeight", {2, 2}, {1.0, 2.0, 3.0, 4.0});
CreateConstantArray("FCBias", {1}, {1.0});
CreateArray("Output", {2, 2});
auto* mul_op = new MulOperator;
mul_op->inputs = {"Input", "MulInput2"};
mul_op->outputs = {"MulOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(mul_op));
auto* fc_op = new FullyConnectedOperator;
fc_op->inputs = {"MulOutput", "FCWeight", "FCBias"};
fc_op->outputs = {"Output"};
model_->operators.push_back(std::unique_ptr<Operator>(fc_op));
}
toco::FuseBinaryIntoFollowingAffine transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 0, &modified).ok());
EXPECT_TRUE(modified);
ASSERT_EQ(model_->operators.size(), 1);
const auto& op = model_->operators[0];
ASSERT_EQ(op->type, OperatorType::kFullyConnected);
ASSERT_EQ(op->inputs.size(), 3);
auto& weights_array = model_->GetArray(op->inputs[1]);
EXPECT_THAT(weights_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear({2.0, 4.0, 6.0, 8.0})));
auto& bias_array = model_->GetArray(op->inputs[2]);
EXPECT_THAT(bias_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear({1.0})));
}
TEST_F(FuseBinaryIntoFollowingAffineTest, DoNotFuseWithMultipleConsumers) {
{
CreateArray("Input", {2, 2});
CreateConstantArray("MulInput2", {1}, {2.0});
CreateArray("MulOutput", {2, 2});
CreateConstantArray("FCWeight", {2, 2}, {1.0, 2.0, 3.0, 4.0});
CreateConstantArray("FCBias", {1}, {1.0});
CreateArray("Output", {2, 2});
CreateArray("AnotherOutput", {2, 2});
auto* mul_op = new MulOperator;
mul_op->inputs = {"Input", "MulInput2"};
mul_op->outputs = {"MulOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(mul_op));
auto* fc_op = new FullyConnectedOperator;
fc_op->inputs = {"MulOutput", "FCWeight", "FCBias"};
fc_op->outputs = {"Output"};
model_->operators.push_back(std::unique_ptr<Operator>(fc_op));
auto identity_op = new TensorFlowIdentityOperator;
identity_op->inputs = {"MulOutput"};
identity_op->outputs = {"AnotherOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(identity_op));
}
toco::FuseBinaryIntoFollowingAffine transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 0, &modified).ok());
EXPECT_FALSE(modified);
EXPECT_EQ(model_->operators.size(), 3);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/fuse_binary_into_following_affine.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/fuse_binary_into_following_affine_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a7531dc5-9be2-411c-9d36-8d079fac79c4 | cpp | tensorflow/tensorflow | identify_l2_pool | tensorflow/lite/toco/graph_transformations/identify_l2_pool.cc | tensorflow/lite/toco/graph_transformations/tests/identify_l2_pool_test.cc | #include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
::tensorflow::Status IdentifyL2Pool::Run(Model* model, std::size_t op_index,
bool* modified) {
*modified = false;
const auto sqrt_it = model->operators.begin() + op_index;
const auto* sqrt_op = sqrt_it->get();
if (sqrt_op->type != OperatorType::kSqrt) {
return absl::OkStatus();
}
CHECK_EQ(sqrt_op->inputs.size(), 1);
CHECK_EQ(sqrt_op->outputs.size(), 1);
const AveragePoolOperator* avpool_op;
const Operator* square_op;
Operator* prev_to_sqrt_op = GetOpWithOutput(*model, sqrt_op->inputs[0]);
if (prev_to_sqrt_op == nullptr) {
AddMessageF(
"Giving up trying to identify L2Pool subgraph: "
"expected AveragePool op, but Sqrt op has no preceding op");
return absl::OkStatus();
}
if (prev_to_sqrt_op->type != OperatorType::kAveragePool) {
AddMessageF(
"Giving up trying to identify L2Pool subgraph: "
"expected AveragePool op, got %s",
LogName(*prev_to_sqrt_op));
return absl::OkStatus();
}
avpool_op = static_cast<const AveragePoolOperator*>(prev_to_sqrt_op);
CHECK_EQ(avpool_op->inputs.size(), 1);
square_op = GetOpWithOutput(*model, avpool_op->inputs[0]);
CHECK_EQ(square_op->inputs.size(), 1);
if (square_op->type != OperatorType::kSquare) {
AddMessageF(
"Giving up trying to identify L2Pool subgraph: "
"expected Square op, got %s",
LogName(*square_op));
return absl::OkStatus();
}
auto* l2pool_op = new L2PoolOperator;
l2pool_op->inputs = {square_op->inputs[0]};
l2pool_op->outputs = sqrt_op->outputs;
l2pool_op->padding.type = avpool_op->padding.type;
l2pool_op->stride_height = avpool_op->stride_height;
l2pool_op->stride_width = avpool_op->stride_width;
l2pool_op->kheight = avpool_op->kheight;
l2pool_op->kwidth = avpool_op->kwidth;
model->operators.emplace(sqrt_it, l2pool_op);
AddMessageF("Creating %s replacing equivalent subgraph", LogName(*l2pool_op));
DeleteOpAndArrays(model, square_op);
DeleteOpAndArrays(model, avpool_op);
DeleteOpAndArrays(model, sqrt_op);
*modified = true;
return absl::OkStatus();
}
} | #include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
void RunIdentifyL2Pool(const std::vector<float>& input,
const std::vector<int>& input_shape,
const std::vector<int>& output_shape) {
Model model;
Array& input0 = model.GetOrCreateArray("input0");
Array& output = model.GetOrCreateArray("output");
*input0.mutable_shape()->mutable_dims() = input_shape;
input0.data_type = ArrayDataType::kFloat;
input0.GetMutableBuffer<ArrayDataType::kFloat>().data = input;
*output.mutable_shape()->mutable_dims() = output_shape;
auto sq_op = new TensorFlowSquareOperator;
sq_op->inputs = {"input0"};
sq_op->outputs = {"output"};
Array& avgpooloutput = model.GetOrCreateArray("Avgpooloutput");
*avgpooloutput.mutable_shape()->mutable_dims() = output_shape;
auto avgpool_op = new AveragePoolOperator;
avgpool_op->inputs = {sq_op->outputs[0]};
avgpool_op->outputs = {"Avgpooloutput"};
Array& sqrtoutput = model.GetOrCreateArray("Sqrtoutput");
*sqrtoutput.mutable_shape()->mutable_dims() = output_shape;
auto sqrt_op = new TensorFlowSqrtOperator;
sqrt_op->inputs = {avgpool_op->outputs[0]};
sqrt_op->outputs = {"Sqrtoutput"};
model.operators.push_back(std::unique_ptr<Operator>(sqrt_op));
model.operators.push_back(std::unique_ptr<Operator>(avgpool_op));
model.operators.push_back(std::unique_ptr<Operator>(sq_op));
bool modified;
ASSERT_TRUE(IdentifyL2Pool().Run(&model, 0, &modified).ok());
for (auto& op_it : model.operators) {
Operator* op = op_it.get();
EXPECT_FALSE(op->type == OperatorType::kSqrt);
EXPECT_FALSE(op->type == OperatorType::kAveragePool);
EXPECT_FALSE(op->type == OperatorType::kSquare);
}
}
}
TEST(IdentifyL2Pool, SimpleTest) {
RunIdentifyL2Pool(
{3, 1, 4, 1, -5, 9, -2, 6, 5, 3, 5, 8},
{3, 4},
{3, 4});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/identify_l2_pool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/identify_l2_pool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49811fe3-1d03-4cbf-b0e2-07760a03d2b0 | cpp | tensorflow/tensorflow | fuse_binary_into_preceding_affine | tensorflow/lite/toco/graph_transformations/fuse_binary_into_preceding_affine.cc | tensorflow/lite/toco/graph_transformations/tests/fuse_binary_into_preceding_affine_test.cc | #include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
int GetBiasIndex(const Operator& op) {
if (op.type == OperatorType::kConv ||
op.type == OperatorType::kFullyConnected ||
op.type == OperatorType::kDepthwiseConv) {
return 2;
} else if (op.type == OperatorType::kTransposeConv) {
return 3;
}
LOG(FATAL) << "Unhandled operator type";
return 0;
}
void FuseAddOrSubParamsIntoPrecedingAffine(Model* model, Operator* preceding_op,
const Operator* add_or_sub_op,
int index_of_constant_input) {
CHECK(add_or_sub_op->type == OperatorType::kAdd ||
add_or_sub_op->type == OperatorType::kSub);
CHECK(index_of_constant_input == 0 || index_of_constant_input == 1);
if (preceding_op->inputs.size() < 3) {
LOG(FATAL) << "Missing bias parameter";
}
const auto bias_ind = GetBiasIndex(*preceding_op);
auto& bias = model->GetArray(preceding_op->inputs[bias_ind]);
bias.minmax = nullptr;
const auto& operand =
model->GetArray(add_or_sub_op->inputs[index_of_constant_input]);
const Shape& bias_shape = bias.shape();
const Shape& operand_shape = operand.shape();
auto& bias_buffer = bias.GetMutableBuffer<ArrayDataType::kFloat>();
float* const bias_data = bias_buffer.data.data();
const auto& operand_buffer = operand.GetBuffer<ArrayDataType::kFloat>();
const float* const operand_data = operand_buffer.data.data();
const int depth = bias_shape.dims(bias_shape.dimensions_count() - 1);
int operand_channel_increment = 0;
if (operand_shape.dimensions_count() >= 1 &&
operand_shape.dims(operand_shape.dimensions_count() - 1) ==
bias_shape.dims(bias_shape.dimensions_count() - 1)) {
operand_channel_increment = 1;
} else if (operand_shape.dimensions_count() == 0 ||
operand_shape.dims(operand_shape.dimensions_count() - 1) == 1) {
operand_channel_increment = 0;
} else {
LOG(FATAL) << "Operand shape mismatch.";
}
enum class OpType { BiasPlusOperand, BiasMinusOperand, OperandMinusBias };
const OpType optype = (add_or_sub_op->type == OperatorType::kAdd)
? OpType::BiasPlusOperand
: (index_of_constant_input == 1)
? OpType::BiasMinusOperand
: OpType::OperandMinusBias;
int operand_channel = 0;
for (int i = 0; i < depth; i++) {
float& bias_val = bias_data[i];
const float operand_val = operand_data[operand_channel];
if (optype == OpType::BiasPlusOperand) {
bias_val += operand_val;
} else if (optype == OpType::BiasMinusOperand) {
bias_val -= operand_val;
} else if (optype == OpType::OperandMinusBias) {
bias_val = operand_val - bias_val;
} else {
LOG(FATAL) << "Should not get here.";
}
operand_channel += operand_channel_increment;
}
}
void FuseMulOrDivParamsIntoPrecedingAffine(Model* model, Operator* preceding_op,
const Operator* mul_or_div_op,
int index_of_constant_input) {
CHECK(mul_or_div_op->type == OperatorType::kMul ||
mul_or_div_op->type == OperatorType::kDiv);
CHECK(index_of_constant_input == 0 || index_of_constant_input == 1);
CHECK(mul_or_div_op->type != OperatorType::kDiv ||
index_of_constant_input == 1);
if (preceding_op->inputs.size() < 3) {
LOG(FATAL) << "Missing bias parameter";
}
const auto& weights_name = preceding_op->inputs[1];
const auto bias_ind = GetBiasIndex(*preceding_op);
const auto& bias_name = preceding_op->inputs[bias_ind];
auto& weights = model->GetArray(weights_name);
DropMinMax(model, weights_name);
auto& bias = model->GetArray(bias_name);
DropMinMax(model, bias_name);
const auto& operand =
model->GetArray(mul_or_div_op->inputs[index_of_constant_input]);
const Shape& weights_shape = weights.shape();
const Shape& bias_shape = bias.shape();
const Shape& operand_shape = operand.shape();
auto& weights_buffer = weights.GetMutableBuffer<ArrayDataType::kFloat>();
float* const weights_data = weights_buffer.data.data();
auto& bias_buffer = bias.GetMutableBuffer<ArrayDataType::kFloat>();
float* const bias_data = bias_buffer.data.data();
const auto& operand_buffer = operand.GetBuffer<ArrayDataType::kFloat>();
const float* const operand_data = operand_buffer.data.data();
int operand_channel_increment = 0;
if (operand_shape.dimensions_count() >= 1 &&
operand_shape.dims(operand_shape.dimensions_count() - 1) ==
bias_shape.dims(bias_shape.dimensions_count() - 1)) {
operand_channel_increment = 1;
} else if (operand_shape.dimensions_count() == 0 ||
operand_shape.dims(operand_shape.dimensions_count() - 1) == 1) {
operand_channel_increment = 0;
} else {
LOG(FATAL) << "Operand shape mismatch.";
}
int output_depth;
if (preceding_op->type == OperatorType::kConv ||
preceding_op->type == OperatorType::kFullyConnected ||
preceding_op->type == OperatorType::kTransposeConv) {
output_depth = weights_shape.dims(0);
} else if (preceding_op->type == OperatorType::kDepthwiseConv) {
output_depth = weights_shape.dims(weights_shape.dimensions_count() - 1);
} else {
LOG(FATAL) << "Should not get here";
}
const int weights_size = RequiredBufferSizeForShape(weights_shape);
const int weights_per_depth = weights_size / output_depth;
CHECK_EQ(weights_size, weights_per_depth * output_depth);
int operand_channel = 0;
for (int c = 0; c < output_depth; c++) {
if (mul_or_div_op->type == OperatorType::kMul) {
bias_data[c] *= operand_data[operand_channel];
} else if (mul_or_div_op->type == OperatorType::kDiv) {
bias_data[c] /= operand_data[operand_channel];
} else {
LOG(FATAL) << "Should not get here";
}
if (preceding_op->type == OperatorType::kConv ||
preceding_op->type == OperatorType::kFullyConnected) {
for (int i = 0; i < weights_per_depth; i++) {
if (mul_or_div_op->type == OperatorType::kMul) {
weights_data[c * weights_per_depth + i] *=
operand_data[operand_channel];
} else if (mul_or_div_op->type == OperatorType::kDiv) {
weights_data[c * weights_per_depth + i] /=
operand_data[operand_channel];
} else {
LOG(FATAL) << "Should not get here";
}
}
} else if (preceding_op->type == OperatorType::kDepthwiseConv) {
for (int k = 0; k < weights_per_depth; k++) {
if (mul_or_div_op->type == OperatorType::kMul) {
weights_data[k * output_depth + c] *= operand_data[operand_channel];
} else if (mul_or_div_op->type == OperatorType::kDiv) {
weights_data[k * output_depth + c] /= operand_data[operand_channel];
} else {
LOG(FATAL) << "Should not get here";
}
}
} else {
LOG(FATAL) << "Should not get here";
}
operand_channel += operand_channel_increment;
}
}
}
::tensorflow::Status FuseBinaryIntoPrecedingAffine::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
const auto binary_it = model->operators.begin() + op_index;
const auto* binary_op = binary_it->get();
if (binary_op->type != OperatorType::kAdd &&
binary_op->type != OperatorType::kMul &&
binary_op->type != OperatorType::kSub &&
binary_op->type != OperatorType::kDiv) {
return absl::OkStatus();
}
CHECK_EQ(binary_op->inputs.size(), 2);
const bool is_input_constant[2] = {
IsConstantParameterArray(*model, binary_op->inputs[0]),
IsConstantParameterArray(*model, binary_op->inputs[1]),
};
if (!is_input_constant[0] && !is_input_constant[1]) {
return absl::OkStatus();
}
if (is_input_constant[0] && is_input_constant[1]) {
return absl::OkStatus();
}
const int index_of_constant_input = is_input_constant[0] ? 0 : 1;
const int index_of_variable_input = is_input_constant[0] ? 1 : 0;
CHECK(is_input_constant[index_of_constant_input]);
CHECK(!is_input_constant[index_of_variable_input]);
if (binary_op->type == OperatorType::kDiv) {
if (index_of_constant_input != 1) {
AddMessageF("Not fusing %s because the denominator is not constant",
LogName(*binary_op));
return absl::OkStatus();
}
}
Operator* preceding_op =
GetOpWithOutput(*model, binary_op->inputs[index_of_variable_input]);
if (!preceding_op) {
AddMessageF("Not fusing %s because it is not the output of another op",
LogName(*binary_op));
return absl::OkStatus();
}
for (const std::string& output_array : model->flags.output_arrays()) {
if (preceding_op->outputs[0] == output_array) {
return absl::OkStatus();
}
}
if (preceding_op->type != OperatorType::kConv &&
preceding_op->type != OperatorType::kFullyConnected &&
preceding_op->type != OperatorType::kDepthwiseConv &&
preceding_op->type != OperatorType::kTransposeConv) {
AddMessageF(
"Not fusing %s because the preceding %s is not of one of the supported "
"types",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
if (preceding_op->type == OperatorType::kTransposeConv &&
binary_op->type != OperatorType::kAdd) {
AddMessageF("Not fusing %s to preceding %s", LogName(*binary_op),
LogName(*preceding_op));
return absl::OkStatus();
}
if (preceding_op->fused_activation_function !=
FusedActivationFunctionType::kNone) {
AddMessageF(
"Not fusing %s because the preceding %s has a fused activation "
"function",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
if (preceding_op->inputs.size() < 3) {
AddMessageF(
"Not fusing %s because the preceding %s does not have a bias vector",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
const auto& weights_name = preceding_op->inputs[1];
const auto bias_ind = GetBiasIndex(*preceding_op);
const auto& bias_name = preceding_op->inputs[bias_ind];
const auto& weights = model->GetArray(weights_name);
const auto& bias = model->GetArray(bias_name);
if (weights.data_type != ArrayDataType::kFloat ||
bias.data_type != ArrayDataType::kFloat) {
AddMessageF(
"Not fusing %s into preceding %s because one of weights or bias array "
"is not float (types are %s and %s)",
LogName(*binary_op), LogName(*preceding_op),
ArrayDataTypeName(weights.data_type),
ArrayDataTypeName(bias.data_type));
return absl::OkStatus();
}
const int count_ops_consuming_bias = CountOpsWithInput(*model, bias_name);
const int count_ops_consuming_weights =
CountOpsWithInput(*model, weights_name);
if (binary_op->type == OperatorType::kAdd ||
binary_op->type == OperatorType::kSub) {
if (!bias.buffer) {
AddMessageF(
"Not fusing %s because the preceding %s has a non-constant bias "
"array",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
if (count_ops_consuming_bias > 1) {
AddMessageF(
"Not fusing %s because the bias of the preceding %s is consumed by "
"another op",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
} else {
if (!weights.buffer || !bias.buffer) {
AddMessageF(
"Not fusing %s because the preceding %s has non-constant weights or "
"bias arrays",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
if (count_ops_consuming_weights > 1 || count_ops_consuming_bias > 1) {
AddMessageF(
"Not fusing %s because the weights or bias of the preceding %s is "
"consumed by another op",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
}
int count_ops_consuming_output =
CountOpsWithInput(*model, preceding_op->outputs[0]);
DCHECK_GE(count_ops_consuming_output, 1);
if (count_ops_consuming_output > 1) {
AddMessageF(
"Not fusing %s because the output of the preceding %s is consumed by "
"another op",
LogName(*binary_op), LogName(*preceding_op));
return absl::OkStatus();
}
AddMessageF("Fusing %s into the preceding %s", LogName(*binary_op),
LogName(*preceding_op));
if (binary_op->type == OperatorType::kAdd ||
binary_op->type == OperatorType::kSub) {
FuseAddOrSubParamsIntoPrecedingAffine(model, preceding_op, binary_op,
index_of_constant_input);
} else if (binary_op->type == OperatorType::kMul ||
binary_op->type == OperatorType::kDiv) {
FuseMulOrDivParamsIntoPrecedingAffine(model, preceding_op, binary_op,
index_of_constant_input);
} else {
LOG(FATAL) << "should not get here";
}
model->EraseArray(preceding_op->outputs[0]);
preceding_op->outputs[0] = binary_op->outputs[0];
preceding_op->fused_activation_function =
binary_op->fused_activation_function;
DeleteOpAndArrays(model, binary_op);
*modified = true;
return absl::OkStatus();
}
} | #include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
std::vector<testing::Matcher<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error = 1e-5) {
std::vector<testing::Matcher<float>> matchers;
matchers.reserve(values.size());
for (const float& v : values) {
matchers.emplace_back(testing::FloatNear(v, max_abs_error));
}
return matchers;
}
}
class FuseBinaryIntoPrecedingAffineTest : public ::testing::Test {
protected:
FuseBinaryIntoPrecedingAffineTest() {}
void SetUp() override { model_ = std::make_unique<Model>(); }
void CreateArray(const std::string& name, const std::vector<int>& shape) {
Array& array = model_->GetOrCreateArray(name);
array.data_type = ArrayDataType::kFloat;
Shape* array_shape = array.mutable_shape();
*(array_shape->mutable_dims()) = shape;
}
void CreateConstantArray(const std::string& name,
const std::vector<int>& shape,
const std::vector<float>& data) {
CreateArray(name, shape);
Array& array = model_->GetOrCreateArray(name);
auto& array_buffer = array.GetMutableBuffer<ArrayDataType::kFloat>();
int bufsize = 1;
for (int dim : shape) {
bufsize *= dim;
}
array_buffer.data.resize(bufsize);
float* buf_ptr = array_buffer.data.data();
for (int i = 0; i < bufsize; ++i) {
buf_ptr[i] = data[i];
}
}
std::unique_ptr<Model> model_;
};
TEST_F(FuseBinaryIntoPrecedingAffineTest, FuseAddIntoTransposeConv) {
{
CreateConstantArray("OutputShape",
{1, 2}, {2, 2});
CreateConstantArray("TransConvWeight", {2, 2}, {1.0, 2.0, 3.0, 4.0});
CreateConstantArray("TransConvBias", {1}, {1.0});
CreateArray("TransConvInput",
{2, 2});
CreateArray("TransConvOutput", {2, 2});
CreateConstantArray("AddInput2", {1}, {2.0});
CreateArray("AddOutput", {2, 2});
auto* tc_op = new TransposeConvOperator;
tc_op->inputs = {"OutputShape", "TransConvWeight", "TransConvInput",
"TransConvBias"};
tc_op->outputs = {"TransConvOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(tc_op));
auto* add_op = new AddOperator;
add_op->inputs = {"TransConvOutput", "AddInput2"};
add_op->outputs = {"AddOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(add_op));
}
toco::FuseBinaryIntoPrecedingAffine transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 1, &modified).ok());
EXPECT_TRUE(modified);
ASSERT_EQ(model_->operators.size(), 1);
const auto& op = model_->operators[0];
ASSERT_EQ(op->type, OperatorType::kTransposeConv);
ASSERT_EQ(op->inputs.size(), 4);
auto& weights_array = model_->GetArray(op->inputs[1]);
EXPECT_THAT(weights_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear({1.0, 2.0, 3.0, 4.0})));
auto& bias_array = model_->GetArray(op->inputs[3]);
EXPECT_THAT(bias_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear({3.0})));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/fuse_binary_into_preceding_affine.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/fuse_binary_into_preceding_affine_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cba3fe7d-25f6-4c31-b4b6-466fcdf3a27e | cpp | tensorflow/tensorflow | conversion_log_util | tensorflow/lite/toco/logging/conversion_log_util.cc | tensorflow/lite/toco/logging/conversion_log_util_test.cc | #include "tensorflow/lite/toco/logging/conversion_log_util.h"
#include <string>
#ifdef __linux__
#include <sys/utsname.h>
#endif
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tflite/export.h"
#include "tensorflow/lite/toco/tflite/operator.h"
#include "tensorflow/lite/toco/tooling_util.h"
#include "tensorflow/lite/version.h"
namespace toco {
namespace {
std::string TryGetOperatorName(const Operator& op) {
std::string op_name;
if (!op.tensorflow_node_def.empty()) {
tensorflow::NodeDef node_def;
if (!node_def.ParseFromString(op.tensorflow_node_def)) {
LOG(ERROR) << "Failed to parse Tensorflow NodeDef";
} else {
op_name = node_def.op();
if (!op_name.empty()) return op_name;
}
}
if (op.type == OperatorType::kUnsupported) {
const TensorFlowUnsupportedOperator& unsupported_op =
static_cast<const TensorFlowUnsupportedOperator&>(op);
if (!unsupported_op.tensorflow_op.empty()) {
op_name = unsupported_op.tensorflow_op;
return op_name;
}
}
op_name = OperatorTypeName(op.type);
return op_name;
}
std::string GetOSVersion() {
std::string os_info;
#ifdef __linux__
utsname info;
if (uname(&info)) {
LOG(ERROR) << "Cannot get OS info.";
return "";
}
os_info =
std::string(info.sysname) + ";OSVer=" + std::string(info.release) + ";";
#endif
return os_info;
}
std::string ShapeToStringNoSpace(const Shape& shape) {
if (shape.dimensions_count() == 0) {
return "[]";
}
return absl::StrCat("[", absl::StrJoin(shape.dims(), ","), "]");
}
std::string GetOperatorSignature(
const Model& model, const Operator& op,
const std::map<OperatorType, std::unique_ptr<tflite::BaseOperator>>&
op_types_map) {
std::string op_signature;
constexpr char delimiter[] = "::";
op_signature.append("INPUT:");
for (const auto& input : op.inputs) {
const auto& array = model.GetArray(input);
if (array.has_shape()) {
op_signature.append(ShapeToStringNoSpace(array.shape()));
} else {
op_signature.append("None");
}
op_signature.append(delimiter);
op_signature.append(ArrayDataTypeName(array.data_type) + delimiter);
}
op_signature.append("OUTPUT:");
for (const auto& output : op.outputs) {
const auto& array = model.GetArray(output);
if (array.has_shape()) {
op_signature.append(ShapeToStringNoSpace(array.shape()));
} else {
op_signature.append("None");
}
op_signature.append(delimiter);
op_signature.append(ArrayDataTypeName(array.data_type) + delimiter);
}
op_signature.append("NAME:");
op_signature.append(TryGetOperatorName(op) + delimiter);
op_signature.append("VERSION:");
OperatorSignature toco_op_signature;
toco_op_signature.op = &op;
toco_op_signature.model = &model;
if (op_types_map.find(op.type) != op_types_map.end()) {
const int version = op_types_map.at(op.type)->GetVersion(toco_op_signature);
op_signature.append(std::to_string(version));
} else {
op_signature.append("None");
}
return op_signature;
}
}
std::vector<std::string> GetOperatorNames(const Model& model) {
std::vector<std::string> op_names;
op_names.reserve(model.operators.size());
for (const auto& op : model.operators) {
op_names.push_back(TryGetOperatorName(*op));
}
return op_names;
}
void CountOperatorsByType(const Model& model,
std::map<std::string, int>* built_in_ops,
std::map<std::string, int>* custom_ops,
std::map<std::string, int>* select_ops) {
for (const auto& op : model.operators) {
OperatorSignature op_signature = {op.get(), &model};
const auto ops_by_type =
tflite::BuildOperatorByTypeMap(true );
tflite::details::OperatorKey op_key(op_signature, ops_by_type,
true );
const std::string op_name = TryGetOperatorName(*op);
if (op_key.is_custom_op()) {
(*custom_ops)[op_name]++;
} else if (op_key.is_flex_op()) {
(*select_ops)[op_name]++;
} else {
(*built_in_ops)[op_name]++;
}
}
}
void GetInputAndOutputTypes(
const Model& model,
TFLITE_PROTO_NS::RepeatedPtrField<std::string>* input_types,
TFLITE_PROTO_NS::RepeatedPtrField<std::string>* output_types) {
for (const auto& input_array : model.flags.input_arrays()) {
const Array& array = model.GetArray(input_array.name());
input_types->Add(ArrayDataTypeName(array.data_type));
}
for (const auto& output_array : model.flags.output_arrays()) {
const Array& array = model.GetArray(output_array);
output_types->Add(ArrayDataTypeName(array.data_type));
}
}
std::string GetTfLiteVersion() { return TFLITE_VERSION_STRING; }
std::string GetCachedOSVersion() {
static std::string* version = new std::string(GetOSVersion());
return *version;
}
void GetOpSignatures(
const Model& model,
TFLITE_PROTO_NS::RepeatedPtrField<std::string>* op_signatures) {
const auto& op_types_map =
tflite::BuildOperatorByTypeMap(true );
for (const auto& op : model.operators) {
op_signatures->Add(GetOperatorSignature(model, *op, op_types_map));
}
}
std::string GetModelHash(const Model& model) {
return "";
}
std::string SanitizeErrorMessage(absl::string_view error_message) {
const std::string s1 = "Ops that can be supported by the flex runtime";
const std::string s2 = "Ops that need custom implementation";
std::string pruned_message;
size_t pos = error_message.find(s1);
if (pos != std::string::npos) {
auto end = error_message.find('.', pos);
pruned_message.append(error_message.substr(pos, end - pos + 1));
}
pos = error_message.find(s2);
if (pos != std::string::npos) {
auto end = error_message.find('.', pos);
pruned_message.append(error_message.substr(pos, end - pos + 1));
}
return pruned_message;
}
void PopulateConversionLog(const Model& model, TocoConversionLog* log) {
const std::vector<std::string> op_names = GetOperatorNames(model);
for (const auto& op_name : op_names) {
log->add_op_list(op_name);
}
TFLITE_PROTO_NS::RepeatedPtrField<std::string> op_signatures;
GetOpSignatures(model, &op_signatures);
log->mutable_op_signatures()->CopyFrom(op_signatures);
std::map<std::string, int> custom_ops, select_ops, built_in_ops;
CountOperatorsByType(model, &built_in_ops, &custom_ops, &select_ops);
log->mutable_custom_ops()->insert(custom_ops.cbegin(), custom_ops.cend());
log->mutable_built_in_ops()->insert(built_in_ops.cbegin(),
built_in_ops.cend());
log->mutable_select_ops()->insert(select_ops.cbegin(), select_ops.cend());
TFLITE_PROTO_NS::RepeatedPtrField<std::string> input_types, output_types;
GetInputAndOutputTypes(model, &input_types, &output_types);
log->mutable_input_tensor_types()->CopyFrom(input_types);
log->mutable_output_tensor_types()->CopyFrom(output_types);
log->set_log_generation_ts(absl::ToUnixMicros(absl::Now()));
log->set_model_size(model.operators.size());
log->set_tf_lite_version(GetTfLiteVersion());
log->set_os_version(GetCachedOSVersion());
log->set_model_hash(GetModelHash(model));
}
} | #include "tensorflow/lite/toco/logging/conversion_log_util.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
namespace toco {
namespace {
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
TEST(ConversionLogUtilTest, TestGetOperatorNames) {
Model model;
model.operators.push_back(std::make_unique<ConvOperator>());
model.operators.push_back(std::make_unique<MeanOperator>());
model.operators.push_back(std::make_unique<NegOperator>());
auto avg_pool_3d = std::make_unique<TensorFlowUnsupportedOperator>();
avg_pool_3d->tensorflow_op = "AvgPool3D";
tensorflow::NodeDef node_def;
node_def.set_op("AvgPool3D");
node_def.SerializeToString(&avg_pool_3d->tensorflow_node_def);
model.operators.push_back(std::move(avg_pool_3d));
auto my_custom_op = std::make_unique<TensorFlowUnsupportedOperator>();
my_custom_op->tensorflow_op = "MyAwesomeCustomOp";
model.operators.push_back(std::move(my_custom_op));
const auto& output = GetOperatorNames(model);
EXPECT_THAT(output, ElementsAre("Conv", "Mean", "Neg", "AvgPool3D",
"MyAwesomeCustomOp"));
}
TEST(ConversionLogUtilTest, TestCountOperatorsByType) {
Model model;
std::unique_ptr<ConvOperator> conv1(new ConvOperator());
const std::string conv1_input_name = "conv_input1";
const std::string conv1_filter_name = "conv_filter1";
const std::string conv1_output_name = "conv_output1";
conv1->inputs.push_back(conv1_input_name);
conv1->inputs.push_back(conv1_filter_name);
conv1->outputs.push_back(conv1_output_name);
auto& array_map = model.GetMutableArrayMap();
array_map[conv1_input_name] = std::make_unique<Array>();
array_map[conv1_filter_name] = std::make_unique<Array>();
array_map[conv1_output_name] = std::make_unique<Array>();
std::unique_ptr<ConvOperator> conv2(new ConvOperator());
const std::string conv2_input_name = "conv_input2";
const std::string conv2_filter_name = "conv_filter2";
const std::string conv2_output_name = "conv_output2";
conv2->inputs.push_back(conv2_input_name);
conv2->inputs.push_back(conv2_filter_name);
conv2->outputs.push_back(conv2_output_name);
array_map[conv2_input_name] = std::make_unique<Array>();
array_map[conv2_filter_name] = std::make_unique<Array>();
array_map[conv2_output_name] = std::make_unique<Array>();
std::unique_ptr<MeanOperator> mean(new MeanOperator());
const std::string mean_input_name = "mean_input";
mean->inputs.push_back(mean_input_name);
array_map[mean_input_name] = std::make_unique<Array>();
auto avg_pool_3d = std::make_unique<TensorFlowUnsupportedOperator>();
avg_pool_3d->tensorflow_op = "AvgPool3D";
tensorflow::NodeDef node_def;
node_def.set_op("AvgPool3D");
node_def.SerializeToString(&avg_pool_3d->tensorflow_node_def);
auto elu_grad = std::make_unique<TensorFlowUnsupportedOperator>();
elu_grad->tensorflow_op = "EluGrad";
node_def.set_op("EluGrad");
node_def.SerializeToString(&elu_grad->tensorflow_node_def);
auto my_custom_op = std::make_unique<TensorFlowUnsupportedOperator>();
my_custom_op->tensorflow_op = "MyAwesomeCustomOp";
model.operators.push_back(std::move(conv1));
model.operators.push_back(std::move(conv2));
model.operators.push_back(std::move(mean));
model.operators.push_back(std::move(avg_pool_3d));
model.operators.push_back(std::move(elu_grad));
model.operators.push_back(std::move(my_custom_op));
std::map<std::string, int> built_in_ops, select_ops, custom_ops;
CountOperatorsByType(model, &built_in_ops, &custom_ops, &select_ops);
EXPECT_THAT(built_in_ops,
UnorderedElementsAre(std::pair<std::string, int>("Conv", 2),
std::pair<std::string, int>("Mean", 1)));
EXPECT_THAT(select_ops,
UnorderedElementsAre(std::pair<std::string, int>("AvgPool3D", 1),
std::pair<std::string, int>("EluGrad", 1)));
EXPECT_THAT(custom_ops, UnorderedElementsAre(std::pair<std::string, int>(
"MyAwesomeCustomOp", 1)));
}
TEST(ConversionLogUtilTest, TestGetInputAndOutputTypes) {
Model model;
auto& array_map = model.GetMutableArrayMap();
const std::string input1 = "conv_input";
const std::string input2 = "conv_filter";
const std::string input3 = "feature";
const std::string output = "softmax";
array_map[input1] = std::make_unique<Array>();
array_map[input1]->data_type = ArrayDataType::kFloat;
array_map[input2] = std::make_unique<Array>();
array_map[input2]->data_type = ArrayDataType::kFloat;
array_map[input3] = std::make_unique<Array>();
array_map[input3]->data_type = ArrayDataType::kInt16;
array_map[output] = std::make_unique<Array>();
array_map[output]->data_type = ArrayDataType::kFloat;
InputArray input_arrays[3];
input_arrays[0].set_name(input1);
input_arrays[1].set_name(input2);
input_arrays[2].set_name(input3);
*model.flags.add_input_arrays() = input_arrays[0];
*model.flags.add_input_arrays() = input_arrays[1];
*model.flags.add_input_arrays() = input_arrays[2];
model.flags.add_output_arrays(output);
TFLITE_PROTO_NS::RepeatedPtrField<std::string> input_types, output_types;
GetInputAndOutputTypes(model, &input_types, &output_types);
EXPECT_THAT(input_types, ElementsAre("float", "float", "int16"));
EXPECT_THAT(output_types, ElementsAre("float"));
}
TEST(ConversionLogUtilTest, TestGetOpSignatures) {
Model model;
auto& array_map = model.GetMutableArrayMap();
std::unique_ptr<ConvOperator> conv(new ConvOperator());
const std::string conv_input_name = "conv_input";
const std::string conv_filter_name = "conv_filter";
const std::string conv_output_name = "conv_output";
conv->inputs.push_back(conv_input_name);
conv->inputs.push_back(conv_filter_name);
conv->outputs.push_back(conv_output_name);
array_map[conv_input_name] = std::make_unique<Array>();
array_map[conv_input_name]->data_type = ArrayDataType::kFloat;
array_map[conv_input_name]->copy_shape({4, 4, 3});
array_map[conv_filter_name] = std::make_unique<Array>();
array_map[conv_filter_name]->data_type = ArrayDataType::kFloat;
array_map[conv_filter_name]->copy_shape({2, 2});
array_map[conv_output_name] = std::make_unique<Array>();
array_map[conv_output_name]->data_type = ArrayDataType::kFloat;
array_map[conv_output_name]->copy_shape({4, 4, 2});
const std::string mean_input_name = "mean_input";
const std::string mean_output_name = "mean_output";
std::unique_ptr<MeanOperator> mean(new MeanOperator());
mean->inputs.push_back(mean_input_name);
mean->outputs.push_back(mean_output_name);
array_map[mean_input_name] = std::make_unique<Array>();
array_map[mean_output_name] = std::make_unique<Array>();
const std::string avg_pool_3d_output_name = "avg_pool_output";
auto avg_pool_3d = std::make_unique<TensorFlowUnsupportedOperator>();
avg_pool_3d->tensorflow_op = "AvgPool3D";
tensorflow::NodeDef node_def;
node_def.set_op("AvgPool3D");
node_def.SerializeToString(&avg_pool_3d->tensorflow_node_def);
avg_pool_3d->inputs.push_back(conv_output_name);
avg_pool_3d->outputs.push_back(avg_pool_3d_output_name);
array_map[avg_pool_3d_output_name] = std::make_unique<Array>();
array_map[avg_pool_3d_output_name]->data_type = ArrayDataType::kInt32;
array_map[avg_pool_3d_output_name]->copy_shape({2, 2});
const std::string custom_op_output_name = "custom_op_output";
auto my_custom_op = std::make_unique<TensorFlowUnsupportedOperator>();
my_custom_op->tensorflow_op = "MyAwesomeCustomOp";
my_custom_op->inputs.push_back(avg_pool_3d_output_name);
my_custom_op->outputs.push_back(custom_op_output_name);
array_map[custom_op_output_name] = std::make_unique<Array>();
array_map[custom_op_output_name]->data_type = ArrayDataType::kFloat;
array_map[custom_op_output_name]->copy_shape({3});
model.operators.push_back(std::move(conv));
model.operators.push_back(std::move(mean));
model.operators.push_back(std::move(avg_pool_3d));
model.operators.push_back(std::move(my_custom_op));
TFLITE_PROTO_NS::RepeatedPtrField<std::string> op_signatures;
GetOpSignatures(model, &op_signatures);
EXPECT_THAT(op_signatures,
UnorderedElementsAre(
"INPUT:[4,4,3]::float::[2,2]::float::OUTPUT:[4,4,2]::float::"
"NAME:Conv::VERSION:1",
"INPUT:None::None::OUTPUT:None::None::NAME:Mean::VERSION:1",
"INPUT:[4,4,2]::float::OUTPUT:[2,2]::int32::NAME:AvgPool3D::"
"VERSION:1",
"INPUT:[2,2]::int32::OUTPUT:[3]::float::NAME:"
"MyAwesomeCustomOp::VERSION:1"));
}
TEST(ConversionLogUtilTest, TestSanitizeErrorMessage) {
const std::string error =
"error: failed while converting: 'main': Ops that can be supported by "
"the flex runtime (enabled via setting the -emit-select-tf-ops flag): "
"ResizeNearestNeighbor,ResizeNearestNeighbor. Ops that need custom "
"implementation (enabled via setting the -emit-custom-ops flag): "
"CombinedNonMaxSuppression.\nTraceback (most recent call last): File "
"/usr/local/bin/toco_from_protos, line 8, in <module>";
const std::string pruned_error =
"Ops that can be supported by "
"the flex runtime (enabled via setting the -emit-select-tf-ops flag): "
"ResizeNearestNeighbor,ResizeNearestNeighbor.Ops that need custom "
"implementation (enabled via setting the -emit-custom-ops flag): "
"CombinedNonMaxSuppression.";
EXPECT_EQ(SanitizeErrorMessage(error), pruned_error);
}
TEST(ConversionLogUtilTest, TestSanitizeErrorMessageNoMatching) {
const std::string error =
"error: failed while converting: 'main': Traceback (most recent call "
"last): File "
"/usr/local/bin/toco_from_protos, line 8, in <module>";
EXPECT_EQ(SanitizeErrorMessage(error), "");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/logging/conversion_log_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/logging/conversion_log_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fb36523d-3c6a-4c05-8511-c90aa28acc00 | cpp | tensorflow/tensorflow | resolve_svdf | tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf.cc | tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf_test.cc | #include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf.h"
#include <ctype.h>
#include <stddef.h>
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster_utils.h"
#include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/toco/toco_types.h"
using tensorflow::GraphDef;
using tensorflow::NodeDef;
namespace toco {
namespace {
void FilterPartitionedConstNodes(
const std::string& const_pattern,
const std::vector<const NodeDef*>& cluster_nodes,
std::vector<const NodeDef*>* const_node_parts) {
for (const NodeDef* node : cluster_nodes) {
std::string node_name_to_upper = node->name();
std::transform(node_name_to_upper.begin(), node_name_to_upper.end(),
node_name_to_upper.begin(), ::toupper);
if (StrContains(node->name(), const_pattern) && node->op() == "Const") {
if (StrContains(node_name_to_upper, "/PART_")) {
const_node_parts->push_back(node);
} else if (StrContains(node->name(), "AXIS") &&
StrContains(node->name(), "CONCAT")) {
const auto& value_attr = node->attr().at("value");
const tensorflow::TensorProto& tensor = value_attr.tensor();
CHECK_EQ(tensor.int_val(0), 0);
}
}
}
std::sort(const_node_parts->begin(), const_node_parts->end(),
[](const NodeDef* a, const NodeDef* b) {
return (a->name().compare(b->name()) < 0 &&
(a->name().size() < b->name().size()));
});
}
}
int SvdfCluster::InferFilterRank() {
for (const NodeDef* node : nodes_) {
if (StrContains(node->name(), "Reshape/shape")) {
const auto& value_attr = node->attr().at("value");
const tensorflow::TensorProto& tensor = value_attr.tensor();
std::vector<int32> shape_values(
tensor.tensor_content().size() / sizeof(int), 0);
port::CopyToBuffer(tensor.tensor_content(),
reinterpret_cast<char*>(shape_values.data()));
CHECK_EQ(shape_values.size(), 3);
CHECK_EQ(shape_values[2], -1);
return shape_values[1];
}
}
return -1;
}
void SvdfCluster::CreateNodes() {
for (const std::string& const_pattern : const_node_patterns_) {
CreateConstNode(const_pattern);
}
std::unique_ptr<tensorflow::NodeDef> svdf_node(new NodeDef);
svdf_node->set_op("Svdf");
svdf_node->set_name(name_);
svdf_node->set_device(device_);
svdf_node->add_input(inputs_[0]);
CHECK(new_nodes_.size() == 3 || new_nodes_.size() == 2);
std::string* weights_feature_input = svdf_node->add_input();
std::string* weights_time_input = svdf_node->add_input();
std::string* bias_input;
if (new_nodes_.size() == 3) {
bias_input = svdf_node->add_input();
}
for (const std::unique_ptr<tensorflow::NodeDef>& node : new_nodes_) {
const std::string node_name = node->name();
if (StrContains(node_name, "SVDF_weights_feature")) {
*weights_feature_input = node_name;
} else if (StrContains(node_name, "SVDF_weights_time")) {
*weights_time_input = node_name;
} else if (StrContains(node_name, "SVDF_bias")) {
CHECK(bias_input) << "Bias input cannot be provided when there are only "
"two Const input nodes!";
*bias_input = node_name;
} else {
LOG(FATAL) << "Unexpected input node for SVDF op! Accepted inputs are: "
"weights_feature, weights_time and bias.";
}
}
const int rank = InferFilterRank();
CHECK_GT(rank, 0);
std::string activation_function =
StrContains(outputs_[0], "Relu") ? "Relu" : "None";
(*svdf_node->mutable_attr())["ActivationFunction"].set_s(activation_function);
(*svdf_node->mutable_attr())["Rank"].set_i(rank);
new_nodes_.push_back(std::move(svdf_node));
}
void SvdfCluster::CreateConstNode(const std::string& const_pattern) {
std::vector<const NodeDef*> const_node_parts;
FilterPartitionedConstNodes(const_pattern, nodes_, &const_node_parts);
if (const_node_parts.empty()) return;
bool transpose_tensor_value =
StrContains(const_pattern, "SVDF_weights_feature");
std::unique_ptr<tensorflow::NodeDef> merged_node(new NodeDef);
MaybeMergeConstNodes(const_node_parts, transpose_tensor_value, merged_node);
new_nodes_.push_back(std::move(merged_node));
}
void SvdfCluster::MaybeMergeConstNodes(
const std::vector<const NodeDef*>& const_node_parts,
bool transpose_tensor_value,
const std::unique_ptr<tensorflow::NodeDef>& merged_node) {
merged_node->set_name(const_node_parts[0]->name());
merged_node->set_op("Const");
merged_node->set_device(const_node_parts[0]->device());
(*merged_node->mutable_attr())["dtype"].set_type(
const_node_parts[0]->attr().at("dtype").type());
int dim0_size = 0;
int dim1_size = 1;
tensorflow::TensorProto* allocated_tensor =
(*merged_node->mutable_attr())["value"].mutable_tensor();
tensorflow::TensorShapeProto* allocated_tensor_shape =
allocated_tensor->mutable_tensor_shape();
auto tensor_shape_dim0 = allocated_tensor_shape->add_dim();
int allocated_content_flat_size = 0;
for (size_t i = 0; i < const_node_parts.size(); i++) {
const auto& value_attr = const_node_parts[i]->attr().at("value");
const tensorflow::TensorProto& tensor = value_attr.tensor();
if (i == 0) {
allocated_tensor->set_dtype(tensor.dtype());
} else {
CHECK_EQ(allocated_tensor->dtype(), tensor.dtype());
}
allocated_content_flat_size += tensor.tensor_content().size();
CHECK(tensor.has_tensor_shape());
const tensorflow::TensorShapeProto shape = tensor.tensor_shape();
dim0_size += shape.dim(0).size();
for (int d = 1; d < shape.dim_size(); d++) {
if (i == 0) {
allocated_tensor_shape->add_dim()->set_size(shape.dim(d).size());
allocated_tensor_shape->set_unknown_rank(shape.unknown_rank());
dim1_size *= shape.dim(d).size();
} else {
CHECK_EQ(shape.dim(d).size(), allocated_tensor_shape->dim(d).size());
CHECK_EQ(allocated_tensor_shape->unknown_rank(), shape.unknown_rank());
}
}
}
std::unique_ptr<char[]> allocated_content(
new char[allocated_content_flat_size]);
char* content_ptr = allocated_content.get();
for (size_t i = 0; i < const_node_parts.size(); i++) {
const auto& value_attr = const_node_parts[i]->attr().at("value");
const tensorflow::TensorProto& tensor = value_attr.tensor();
port::CopyToBuffer(tensor.tensor_content(), content_ptr);
content_ptr += tensor.tensor_content().size();
}
if (transpose_tensor_value) {
std::unique_ptr<float[]> transposed_tensor(
new float[dim0_size * dim1_size]);
Transpose2DTensor(reinterpret_cast<float*>(allocated_content.get()),
dim0_size, dim1_size, transposed_tensor.get());
allocated_tensor_shape->clear_dim();
allocated_tensor_shape->add_dim()->set_size(dim1_size);
allocated_tensor_shape->add_dim()->set_size(dim0_size);
allocated_tensor->set_tensor_content(
std::string(reinterpret_cast<const char*>(transposed_tensor.get()),
allocated_content_flat_size));
} else {
tensor_shape_dim0->set_size(dim0_size);
allocated_tensor->set_tensor_content(
std::string(reinterpret_cast<const char*>(allocated_content.get()),
allocated_content_flat_size));
}
}
std::unique_ptr<Cluster> SvdfClusterFactory::CreateCluster(
const NodeDef& node, const GraphDef& graph_def) const {
std::vector<std::string> node_patterns = {"SVDF_weights_feature",
"SVDF_weights_time", "SVDF_bias"};
std::string node_name_to_upper = node.name();
std::transform(node_name_to_upper.begin(), node_name_to_upper.end(),
node_name_to_upper.begin(), ::toupper);
std::unique_ptr<SvdfCluster> cluster = nullptr;
if (node_name_to_upper.find("SVDF", 0) != std::string::npos) {
size_t weights_pos = node.name().find(node_patterns[0]);
if (weights_pos != std::string::npos) {
size_t cell_pos = node.name().rfind('/', weights_pos - 2) + 1;
std::string cell_name =
node.name().substr(cell_pos, weights_pos - cell_pos - 1);
cluster = std::make_unique<SvdfCluster>();
cluster->SetName(cell_name);
cluster->SetDevice(node.device());
cluster->SetGraphDefInfo(&graph_def);
CHECK(cluster->FindClusterInputsAndOutputs());
for (const std::string& const_pattern : node_patterns) {
cluster->AddConstNodePattern(const_pattern);
}
}
}
return std::move(cluster);
}
} | #include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf.h"
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/cluster_utils.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_cluster.h"
#include "tensorflow/lite/toco/toco_port.h"
using tensorflow::GraphDef;
using tensorflow::NodeDef;
namespace toco {
class ResolveSvdfTest : public ::testing::Test {
public:
ResolveSvdfTest() {
AddNewNode("Input1", "Const", {});
AddNewNode("Svdf1/SVDF_weights_feature/part_0", "Const", {},
{0.1, 0.2, 0.3});
AddNewNode("Svdf1/SVDF_weights_feature/part_0/read", "Identity",
{"Svdf1/SVDF_weights_feature/part_0"});
AddNewNode("Svdf1/SVDF_weights_time/part_0", "Const", {}, {0.1, 0.2, 0.3});
AddNewNode("Svdf1/SVDF_weights_time/part_0/read", "Identity",
{"Svdf1/SVDF_weights_time/part_0"});
AddNewNode("Svdf1/f1", "SVDF_F1",
{"Input1", "Svdf1/SVDF_weights_feature/part_0/read"});
AddNewNode("Svdf1/f2", "SVDF_F2",
{"Svdf1/SVDF_weights_time/part_0/read", "Svdf1/f1"});
AddNewNode("Svdf1/Relu", "Relu", {"Svdf1/f2"});
AddShapeNode("Svdf1/Reshape/shape", {10, 1, -1});
AddNewNode("Output1", "Const", {"Svdf1/Relu"});
AddNewNode("Input2", "Const", {});
AddNewNode("Svdf2/SVDF_weights_feature/part_0", "Const", {},
{0.1, 0.2, 0.3});
AddNewNode("Svdf2/SVDF_weights_feature/part_0/read", "Identity",
{"Svdf2/SVDF_weights_feature/part_0"});
AddNewNode("Svdf2/SVDF_weights_time/part_0", "Const", {}, {0.1, 0.2, 0.3});
AddNewNode("Svdf2/SVDF_weights_time/part_0/read", "Identity",
{"Svdf2/SVDF_weights_time/part_0"});
AddNewNode("Svdf2/f1", "SVDF_F1",
{"Input1", "Svdf2/SVDF_weights_feature/part_0/read"});
AddNewNode("Svdf2/f2", "SVDF_F2",
{"Svdf2/SVDF_weights_time/part_0/read", "Svdf2/f1"});
AddNewNode("Svdf2/Relu", "Relu", {"Svdf2/f2"});
AddShapeNode("Svdf2/Reshape/shape", {10, 2, -1});
AddNewNode("Output2", "Const", {"Svdf2/Relu"});
}
~ResolveSvdfTest() override {}
protected:
void AddNewNode(const std::string& name, const std::string& op,
const std::vector<std::string>& inputs) {
NodeDef* node = graph_.add_node();
node->set_name(name);
node->set_op(op);
node->set_device("");
for (int i = 0; i < inputs.size(); i++) {
node->add_input();
node->set_input(i, inputs[i]);
}
}
void AddNewNode(const std::string& name, const std::string& op,
const std::vector<std::string>& inputs,
const std::vector<float>& values) {
NodeDef* node = graph_.add_node();
node->set_name(name);
node->set_op(op);
node->set_device("");
for (int i = 0; i < inputs.size(); i++) {
node->add_input();
node->set_input(i, inputs[i]);
}
(*node->mutable_attr())["dtype"].set_type(tensorflow::DT_FLOAT);
tensorflow::TensorProto* allocated_tensor = new tensorflow::TensorProto;
tensorflow::TensorShapeProto* allocated_tensor_shape =
new tensorflow::TensorShapeProto;
auto tensor_shape_dim0 = allocated_tensor_shape->add_dim();
tensor_shape_dim0->set_size(values.size());
allocated_tensor->set_allocated_tensor_shape(allocated_tensor_shape);
allocated_tensor->set_tensor_content(
std::string(reinterpret_cast<const char*>(values.data()),
values.size() * sizeof(float)));
(*node->mutable_attr())["value"].set_allocated_tensor(allocated_tensor);
}
void AddShapeNode(const std::string& name, const std::vector<int>& values) {
NodeDef* node = graph_.add_node();
node->set_name(name);
node->set_op("Const");
node->set_device("");
(*node->mutable_attr())["dtype"].set_type(tensorflow::DT_INT32);
tensorflow::TensorProto* allocated_tensor = new tensorflow::TensorProto;
tensorflow::TensorShapeProto* allocated_tensor_shape =
new tensorflow::TensorShapeProto;
auto tensor_shape_dim0 = allocated_tensor_shape->add_dim();
tensor_shape_dim0->set_size(values.size());
allocated_tensor->set_allocated_tensor_shape(allocated_tensor_shape);
allocated_tensor->set_tensor_content(
std::string(reinterpret_cast<const char*>(values.data()),
values.size() * sizeof(int)));
(*node->mutable_attr())["value"].set_allocated_tensor(allocated_tensor);
}
GraphDef graph_;
SvdfClusterFactory svdf_cluster_factory_;
std::vector<std::unique_ptr<Cluster>> clusters_;
};
TEST_F(ResolveSvdfTest, TestTranspose2DTensor) {
static float matrix[] = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.};
static float expected_transposed_matrix[] = {1., 5., 9., 2., 6., 10.,
3., 7., 11., 4., 8., 12.};
float* transposed_matrix = new float[12];
Transpose2DTensor(matrix, 3, 4, transposed_matrix);
std::vector<float> actual;
actual.insert(
actual.end(), transposed_matrix,
transposed_matrix + sizeof(expected_transposed_matrix) / sizeof(float));
std::vector<float> expected;
expected.insert(expected.end(), expected_transposed_matrix,
expected_transposed_matrix +
sizeof(expected_transposed_matrix) / sizeof(float));
delete[] transposed_matrix;
}
TEST_F(ResolveSvdfTest, TestResolveSvdfFlow) {
std::unordered_map<std::string, bool> is_node_in_cluster;
for (const NodeDef& node : graph_.node()) {
is_node_in_cluster[node.name()] = false;
}
std::vector<std::string> cluster_names;
CHECK(FindCluster(svdf_cluster_factory_, graph_, &is_node_in_cluster,
&clusters_));
for (const std::unique_ptr<Cluster>& cluster : clusters_) {
cluster_names.push_back(cluster->GetName());
cluster->CreateNodes();
}
EXPECT_THAT(cluster_names,
testing::UnorderedElementsAreArray({"Svdf1", "Svdf2"}));
std::vector<std::string> new_node_names;
std::vector<float> content_array(3);
for (const std::unique_ptr<Cluster>& cluster : clusters_) {
CHECK_EQ(cluster->GetNewNodes().size(), 3);
for (const std::unique_ptr<tensorflow::NodeDef>& node :
cluster->GetNewNodes()) {
new_node_names.push_back(node->name());
if (node->op() == "Const") {
CHECK_EQ(node->attr().at("dtype").type(), tensorflow::DT_FLOAT);
toco::port::CopyToBuffer(
node->attr().at("value").tensor().tensor_content(),
reinterpret_cast<char*>(content_array.data()));
EXPECT_THAT(content_array,
testing::UnorderedElementsAreArray({0.1, 0.2, 0.3}));
} else {
if (node->name() == "Svdf1") {
CHECK_EQ(node->attr().at("Rank").i(), 1);
} else if (node->name() == "Svdf2") {
CHECK_EQ(node->attr().at("Rank").i(), 2);
}
CHECK_EQ(node->attr().at("ActivationFunction").s(), "Relu");
}
}
}
EXPECT_THAT(new_node_names, testing::UnorderedElementsAreArray(
{"Svdf2/SVDF_weights_feature/part_0",
"Svdf2/SVDF_weights_time/part_0", "Svdf2",
"Svdf1/SVDF_weights_feature/part_0",
"Svdf1/SVDF_weights_time/part_0", "Svdf1"}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tensorflow_graph_matching/resolve_svdf_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce739db8-d021-48f4-9f21-a73dda38b5ef | cpp | tensorflow/tensorflow | source_writer | tensorflow/java/src/gen/cc/source_writer.cc | tensorflow/java/src/gen/cc/source_writer_test.cc | #include "tensorflow/java/src/gen/cc/source_writer.h"
#include <algorithm>
#include <list>
#include <string>
#include "absl/log/check.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/java/src/gen/cc/java_defs.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace java {
SourceWriter::SourceWriter() {
generic_namespaces_.push(new GenericNamespace());
}
SourceWriter::~SourceWriter() {
while (!generic_namespaces_.empty()) {
GenericNamespace* generic_namespace = generic_namespaces_.top();
generic_namespaces_.pop();
delete generic_namespace;
}
}
SourceWriter& SourceWriter::Indent(int tab) {
left_margin_.resize(
std::max(static_cast<int>(left_margin_.size() + tab), 0), ' ');
return *this;
}
SourceWriter& SourceWriter::Prefix(const char* line_prefix) {
line_prefix_ = line_prefix;
return *this;
}
SourceWriter& SourceWriter::Write(const StringPiece& str) {
size_t line_pos = 0;
do {
size_t start_pos = line_pos;
line_pos = str.find('\n', start_pos);
if (line_pos != string::npos) {
++line_pos;
Append(str.substr(start_pos, line_pos - start_pos));
newline_ = true;
} else {
Append(str.substr(start_pos, str.size() - start_pos));
}
} while (line_pos != string::npos && line_pos < str.size());
return *this;
}
SourceWriter& SourceWriter::WriteFromFile(const string& fname, Env* env) {
string data_;
TF_CHECK_OK(ReadFileToString(env, fname, &data_));
return Write(data_);
}
SourceWriter& SourceWriter::Append(const StringPiece& str) {
if (!str.empty()) {
if (newline_) {
DoAppend(left_margin_ + line_prefix_);
newline_ = false;
}
DoAppend(str);
}
return *this;
}
SourceWriter& SourceWriter::AppendType(const Type& type) {
if (type.wildcard()) {
Append("?");
} else {
Append(type.name());
if (!type.parameters().empty()) {
Append("<");
bool first = true;
for (const Type& t : type.parameters()) {
if (!first) {
Append(", ");
}
AppendType(t);
first = false;
}
Append(">");
}
}
return *this;
}
SourceWriter& SourceWriter::EndLine() {
Append("\n");
newline_ = true;
return *this;
}
SourceWriter& SourceWriter::BeginBlock(const string& expression) {
if (!expression.empty()) {
Append(expression + " {");
} else {
Append(newline_ ? "{" : " {");
}
return EndLine().Indent(2);
}
SourceWriter& SourceWriter::EndBlock() {
return Indent(-2).Append("}").EndLine();
}
SourceWriter& SourceWriter::BeginMethod(const Method& method, int modifiers,
const Javadoc* javadoc) {
GenericNamespace* generic_namespace = PushGenericNamespace(modifiers);
if (!method.constructor()) {
generic_namespace->Visit(method.return_type());
}
for (const Variable& v : method.arguments()) {
generic_namespace->Visit(v.type());
}
EndLine();
if (javadoc != nullptr) {
WriteJavadoc(*javadoc);
}
if (!method.annotations().empty()) {
WriteAnnotations(method.annotations());
}
WriteModifiers(modifiers);
if (!generic_namespace->declared_types().empty()) {
WriteGenerics(generic_namespace->declared_types());
Append(" ");
}
if (!method.constructor()) {
AppendType(method.return_type()).Append(" ");
}
Append(method.name()).Append("(");
bool first = true;
for (const Variable& v : method.arguments()) {
if (!first) {
Append(", ");
}
AppendType(v.type()).Append(v.variadic() ? "... " : " ").Append(v.name());
first = false;
}
return Append(")").BeginBlock();
}
SourceWriter& SourceWriter::EndMethod() {
EndBlock();
PopGenericNamespace();
return *this;
}
SourceWriter& SourceWriter::BeginType(const Type& type, int modifiers,
const std::list<Type>* extra_dependencies,
const Javadoc* javadoc) {
if (!type.package().empty()) {
Append("package ").Append(type.package()).Append(";").EndLine();
}
TypeImporter type_importer(type.package());
type_importer.Visit(type);
if (extra_dependencies != nullptr) {
for (const Type& t : *extra_dependencies) {
type_importer.Visit(t);
}
}
if (!type_importer.imports().empty()) {
EndLine();
for (const string& s : type_importer.imports()) {
Append("import ").Append(s).Append(";").EndLine();
}
}
return BeginInnerType(type, modifiers, javadoc);
}
SourceWriter& SourceWriter::BeginInnerType(const Type& type, int modifiers,
const Javadoc* javadoc) {
GenericNamespace* generic_namespace = PushGenericNamespace(modifiers);
generic_namespace->Visit(type);
EndLine();
if (javadoc != nullptr) {
WriteJavadoc(*javadoc);
}
if (!type.annotations().empty()) {
WriteAnnotations(type.annotations());
}
WriteModifiers(modifiers);
CHECK_EQ(Type::Kind::CLASS, type.kind()) << ": Not supported yet";
Append("class ").Append(type.name());
if (!generic_namespace->declared_types().empty()) {
WriteGenerics(generic_namespace->declared_types());
}
if (!type.supertypes().empty()) {
bool first_interface = true;
for (const Type& t : type.supertypes()) {
if (t.kind() == Type::CLASS) {
Append(" extends ");
} else if (first_interface) {
Append(" implements ");
first_interface = false;
} else {
Append(", ");
}
AppendType(t);
}
}
return BeginBlock();
}
SourceWriter& SourceWriter::EndType() {
EndBlock();
PopGenericNamespace();
return *this;
}
SourceWriter& SourceWriter::WriteField(const Variable& field, int modifiers,
const Javadoc* javadoc) {
if (javadoc != nullptr && !javadoc->brief().empty()) {
Append("").EndLine();
}
WriteModifiers(modifiers);
AppendType(field.type()).Append(" ").Append(field.name()).Append(";");
EndLine();
return *this;
}
SourceWriter& SourceWriter::WriteModifiers(int modifiers) {
if (modifiers & PUBLIC) {
Append("public ");
} else if (modifiers & PROTECTED) {
Append("protected ");
} else if (modifiers & PRIVATE) {
Append("private ");
}
if (modifiers & STATIC) {
Append("static ");
}
if (modifiers & FINAL) {
Append("final ");
}
return *this;
}
SourceWriter& SourceWriter::WriteJavadoc(const Javadoc& javadoc) {
Append("").EndLine();
}
SourceWriter& SourceWriter::WriteAnnotations(
const std::list<Annotation>& annotations) {
for (const Annotation& a : annotations) {
Append("@" + a.name());
if (!a.attributes().empty()) {
Append("(").Append(a.attributes()).Append(")");
}
EndLine();
}
return *this;
}
SourceWriter& SourceWriter::WriteGenerics(
const std::list<const Type*>& generics) {
Append("<");
bool first = true;
for (const Type* pt : generics) {
if (!first) {
Append(", ");
}
Append(pt->name());
if (!pt->supertypes().empty()) {
Append(" extends ").AppendType(pt->supertypes().front());
}
first = false;
}
return Append(">");
}
SourceWriter::GenericNamespace* SourceWriter::PushGenericNamespace(
int modifiers) {
GenericNamespace* generic_namespace;
if (modifiers & STATIC) {
generic_namespace = new GenericNamespace();
} else {
generic_namespace = new GenericNamespace(generic_namespaces_.top());
}
generic_namespaces_.push(generic_namespace);
return generic_namespace;
}
void SourceWriter::PopGenericNamespace() {
GenericNamespace* generic_namespace = generic_namespaces_.top();
generic_namespaces_.pop();
delete generic_namespace;
}
void SourceWriter::TypeVisitor::Visit(const Type& type) {
DoVisit(type);
for (const Type& t : type.parameters()) {
Visit(t);
}
for (const Annotation& t : type.annotations()) {
DoVisit(t);
}
for (const Type& t : type.supertypes()) {
Visit(t);
}
}
void SourceWriter::GenericNamespace::DoVisit(const Type& type) {
if (type.kind() == Type::GENERIC && !type.wildcard() &&
generic_names_.find(type.name()) == generic_names_.end()) {
declared_types_.push_back(&type);
generic_names_.insert(type.name());
}
}
void SourceWriter::TypeImporter::DoVisit(const Type& type) {
if (!type.package().empty() && type.package() != current_package_) {
imports_.insert(type.canonical_name());
}
}
}
} | #include "tensorflow/java/src/gen/cc/source_writer.h"
#include <list>
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/java/src/gen/cc/java_defs.h"
namespace tensorflow {
namespace java {
namespace {
TEST(AppendTest, SingleLineText) {
SourceBufferWriter writer;
writer.Append("You say goodbye and I say hello!");
const char* expected = "You say goodbye and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineText) {
SourceBufferWriter writer;
writer.Append("You say goodbye\nand I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithIndent) {
SourceBufferWriter writer;
writer.Indent(2).Append("You say goodbye\nand I say hello!");
const char* expected = " You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithPrefix) {
SourceBufferWriter writer;
writer.Prefix("--").Append("You say goodbye\nand I say hello!");
const char* expected = "--You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithIndentAndPrefix) {
SourceBufferWriter writer;
writer.Indent(2).Prefix("--").Append("You say goodbye\nand I say hello!");
const char* expected = " --You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, SingleLineText) {
SourceBufferWriter writer;
writer.Write("You say goodbye and I say hello!");
const char* expected = "You say goodbye and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineText) {
SourceBufferWriter writer;
writer.Write("You say goodbye\nand I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithIndent) {
SourceBufferWriter writer;
writer.Indent(2).Write("You say goodbye\nand I say hello!");
const char* expected = " You say goodbye\n and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithPrefix) {
SourceBufferWriter writer;
writer.Prefix("--").Write("You say goodbye\nand I say hello!");
const char* expected = "--You say goodbye\n--and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithIndentAndPrefix) {
SourceBufferWriter writer;
writer.Indent(2).Prefix("--").Write("You say goodbye\nand I say hello!");
const char* expected = " --You say goodbye\n --and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Basic) {
SourceBufferWriter writer;
writer.Append("You say goodbye").EndLine().Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Indent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!");
const char* expected = "You say goodbye\n and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, IndentAndOutdent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!")
.EndLine()
.Indent(-2)
.Append("Hello, hello!");
const char* expected = "You say goodbye\n and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Prefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("--")
.Append("and I say hello!");
const char* expected = "You say goodbye\n--and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, PrefixAndRemovePrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("--")
.Append("and I say hello!")
.EndLine()
.Prefix("")
.Append("Hello, hello!");
const char* expected = "You say goodbye\n--and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, IndentAndPrefixAndOutdentAndRemovePrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Prefix("--")
.Append("and I say hello!")
.EndLine()
.Indent(-2)
.Prefix("")
.Append("Hello, hello!");
const char* expected = "You say goodbye\n --and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, NegativeIndent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(-10)
.Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, CumulativeIndent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!")
.EndLine()
.Indent(2)
.Append("Hello, hello!");
const char* expected =
"You say goodbye\n and I say hello!\n Hello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, EmptyPrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("")
.Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, BlocksAndLines) {
SourceBufferWriter writer;
writer.Append("int i = 0;").EndLine()
.Append("int j = 10;").EndLine()
.Append("if (true)")
.BeginBlock()
.Append("int aLongWayToTen = 0;").EndLine()
.Append("while (++i <= j)")
.BeginBlock()
.Append("++aLongWayToTen;").EndLine()
.EndBlock()
.EndBlock();
const char* expected =
"int i = 0;\n"
"int j = 10;\n"
"if (true) {\n"
" int aLongWayToTen = 0;\n"
" while (++i <= j) {\n"
" ++aLongWayToTen;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, Types) {
SourceBufferWriter writer;
Type generic = Type::Generic("T").add_supertype(Type::Class("Number"));
writer.AppendType(Type::Int())
.Append(", ")
.AppendType(Type::Class("String"))
.Append(", ")
.AppendType(generic)
.Append(", ")
.AppendType(Type::ListOf(generic))
.Append(", ")
.AppendType(Type::ListOf(Type::IterableOf(generic)))
.Append(", ")
.AppendType(Type::ListOf(Type::Wildcard()));
const char* expected =
"int, String, T, List<T>, List<Iterable<T>>, List<?>";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, FileSnippet) {
SourceBufferWriter writer;
const string fname = tensorflow::io::JoinPath(
tensorflow::testing::TensorFlowSrcRoot(),
"java/src/gen/resources/test.java.snippet");
writer.WriteFromFile(fname)
.BeginBlock()
.WriteFromFile(fname)
.EndBlock();
const char* expected =
"
"System.out.println(\"Hello!\");\n"
"{\n"
"
" System.out.println(\"Hello!\");\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleClassWithDependencies) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
std::list<Type> deps;
deps.push_back(Type::Class("TypeA", "org.test.sub"));
deps.push_back(Type::Class("TypeA", "org.test.sub"));
deps.push_back(Type::Class("TypeB", "org.other"));
deps.push_back(Type::Class("SamePackageType", "org.tensorflow"));
deps.push_back(Type::Class("NoPackageType"));
writer.BeginType(clazz, PUBLIC, &deps).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"import org.other.TypeB;\n"
"import org.test.sub.TypeA;\n\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, AnnotatedAndDocumentedClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Javadoc clazz_doc = Javadoc::Create("Javadoc test")
.details("This is a\nmultiline description.");
clazz.add_annotation(Annotation::Create("Bean"));
clazz.add_annotation(Annotation::Create("SuppressWarnings")
.attributes("\"rawtypes\""));
writer.BeginType(clazz, PUBLIC, nullptr, &clazz_doc).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"\n"
"@Bean\n"
"@SuppressWarnings(\"rawtypes\")\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
clazz.add_parameter(Type::Generic("T"));
clazz.add_parameter(Type::Generic("U").add_supertype(Type::Class("Number")));
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T, U extends Number> {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClassAndSupertypes) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T");
clazz.add_parameter(type_t);
Type type_u = Type::Generic("U").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_u);
clazz.add_supertype(Type::Interface("Parameterizable").add_parameter(type_u));
clazz.add_supertype(Type::Interface("Runnable"));
clazz.add_supertype(Type::Class("SuperTest").add_parameter(type_t));
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T, U extends Number>"
" extends SuperTest<T> implements Parameterizable<U>, Runnable {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClassFields) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Variable field1 = Variable::Create("field1", Type::Class("String"));
Variable field2 = Variable::Create("field2", Type::Class("String"));
Variable field3 = Variable::Create("field3", type_t);
Javadoc field3_doc = Javadoc::Create("This variable is documented");
writer.BeginType(clazz, PUBLIC)
.WriteField(field1, STATIC | PUBLIC | FINAL)
.WriteField(field2, PRIVATE)
.WriteField(field3, PRIVATE, &field3_doc)
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" public static final String field1;\n"
" private String field2;\n"
" \n"
" private T field3;\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleInnerClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type inner_class = Type::Class("InnerTest");
writer.BeginType(clazz, PUBLIC)
.BeginInnerType(inner_class, PUBLIC)
.EndType()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" public class InnerTest {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, StaticParameterizedInnerClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Type inner_class = Type::Class("InnerTest");
inner_class.add_parameter(type_t);
writer.BeginType(clazz, PUBLIC)
.BeginInnerType(inner_class, PUBLIC | STATIC)
.EndType()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public static class InnerTest<T extends Number> {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, SimpleMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Method method = Method::Create("doNothing", Type::Void());
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC)
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" public void doNothing() {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, AnnotatedAndDocumentedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Method method = Method::Create("doNothing", Type::Void());
Javadoc method_doc =
Javadoc::Create("Javadoc test")
.details("This method has a\nmultiline description.");
method.add_annotation(Annotation::Create("Override"));
method.add_annotation(Annotation::Create("SuppressWarnings")
.attributes("\"rawtypes\""));
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC, &method_doc)
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" \n"
" @Override\n"
" @SuppressWarnings(\"rawtypes\")\n"
" public void doNothing() {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, DocumentedMethodWithArguments) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Variable reverse = Variable::Create("reverse", Type::Boolean());
Method method = Method::Create("boolToInt", Type::Int());
method.add_argument(Variable::Create("b", Type::Boolean()));
method.add_argument(reverse);
Javadoc method_doc =
Javadoc::Create("Converts a boolean to an int")
.details("This method will convert\na boolean to an int")
.add_param_tag(reverse.name(), "if true, value is reversed")
.add_tag("return", "int value for this boolean");
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC, &method_doc)
.Append("if (b && !reverse)")
.BeginBlock()
.Append("return 1;")
.EndLine()
.EndBlock()
.Append("return 0;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" \n"
" public int boolToInt(boolean b, boolean reverse) {\n"
" if (b && !reverse) {\n"
" return 1;\n"
" }\n"
" return 0;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, ParameterizedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Method method = Method::Create("doNothing", type_t);
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC)
.Append("return null;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public T doNothing() {\n"
" return null;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, StaticParameterizedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Method method = Method::Create("doNothing", type_t);
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC | STATIC)
.Append("return null;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public static <T extends Number> T doNothing() {\n"
" return null;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/java/src/gen/cc/source_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/java/src/gen/cc/source_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1da53164-e7c9-403c-8625-eaec6fdf5874 | cpp | tensorflow/tensorflow | wav_to_spectrogram | tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.cc | tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram_test.cc | #include "tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h"
#include <vector>
#include "tensorflow/cc/ops/audio_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
using tensorflow::DT_FLOAT;
using tensorflow::DT_UINT8;
using tensorflow::Output;
using tensorflow::TensorShape;
tensorflow::Status WavToSpectrogram(const tensorflow::string& input_wav,
int32_t window_size, int32_t stride,
float brightness,
const tensorflow::string& output_image) {
auto root = tensorflow::Scope::NewRootScope();
using namespace tensorflow::ops;
Output file_reader =
tensorflow::ops::ReadFile(root.WithOpName("input_wav"), input_wav);
DecodeWav wav_decoder =
DecodeWav(root.WithOpName("wav_decoder"), file_reader);
Output spectrogram = AudioSpectrogram(root.WithOpName("spectrogram"),
wav_decoder.audio, window_size, stride);
Output brightness_placeholder =
Placeholder(root.WithOpName("brightness_placeholder"), DT_FLOAT,
Placeholder::Attrs().Shape(TensorShape({})));
Output mul = Mul(root.WithOpName("mul"), spectrogram, brightness_placeholder);
Output min_const = Const(root.WithOpName("min_const"), 255.0f);
Output min = Minimum(root.WithOpName("min"), mul, min_const);
Output cast = Cast(root.WithOpName("cast"), min, DT_UINT8);
Output expand_dims_const = Const(root.WithOpName("expand_dims_const"), -1);
Output expand_dims =
ExpandDims(root.WithOpName("expand_dims"), cast, expand_dims_const);
Output squeeze = Squeeze(root.WithOpName("squeeze"), expand_dims,
Squeeze::Attrs().Axis({0}));
Output png_encoder = EncodePng(root.WithOpName("png_encoder"), squeeze);
tensorflow::ops::WriteFile file_writer = tensorflow::ops::WriteFile(
root.WithOpName("output_image"), output_image, png_encoder);
tensorflow::GraphDef graph;
TF_RETURN_IF_ERROR(root.ToGraphDef(&graph));
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(tensorflow::SessionOptions()));
TF_RETURN_IF_ERROR(session->Create(graph));
tensorflow::Tensor brightness_tensor(DT_FLOAT, TensorShape({}));
brightness_tensor.scalar<float>()() = brightness;
TF_RETURN_IF_ERROR(
session->Run({{"brightness_placeholder", brightness_tensor}}, {},
{"output_image"}, nullptr));
return absl::OkStatus();
} | #include "tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/wav/wav_io.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
TEST(WavToSpectrogramTest, WavToSpectrogramTest) {
const tensorflow::string input_wav =
tensorflow::io::JoinPath(tensorflow::testing::TmpDir(), "input_wav.wav");
const tensorflow::string output_image = tensorflow::io::JoinPath(
tensorflow::testing::TmpDir(), "output_image.png");
float audio[8] = {-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f};
tensorflow::string wav_string;
TF_ASSERT_OK(
tensorflow::wav::EncodeAudioAsS16LEWav(audio, 44100, 1, 8, &wav_string));
TF_ASSERT_OK(tensorflow::WriteStringToFile(tensorflow::Env::Default(),
input_wav, wav_string));
TF_ASSERT_OK(WavToSpectrogram(input_wav, 4, 4, 64.0f, output_image));
TF_EXPECT_OK(tensorflow::Env::Default()->FileExists(output_image));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
752addfa-8501-4302-a483-b6663f69ea03 | cpp | tensorflow/tensorflow | recognize_commands | tensorflow/examples/speech_commands/recognize_commands.cc | tensorflow/examples/speech_commands/recognize_commands_test.cc | #include "tensorflow/examples/speech_commands/recognize_commands.h"
#include "absl/status/status.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
RecognizeCommands::RecognizeCommands(const std::vector<string>& labels,
int32_t average_window_duration_ms,
float detection_threshold,
int32_t suppression_ms,
int32_t minimum_count)
: labels_(labels),
average_window_duration_ms_(average_window_duration_ms),
detection_threshold_(detection_threshold),
suppression_ms_(suppression_ms),
minimum_count_(minimum_count) {
labels_count_ = labels.size();
previous_top_label_ = "_silence_";
previous_top_label_time_ = std::numeric_limits<int64_t>::min();
}
Status RecognizeCommands::ProcessLatestResults(const Tensor& latest_results,
const int64_t current_time_ms,
string* found_command,
float* score,
bool* is_new_command) {
if (latest_results.NumElements() != labels_count_) {
return errors::InvalidArgument(
"The results for recognition should contain ", labels_count_,
" elements, but there are ", latest_results.NumElements());
}
if ((!previous_results_.empty()) &&
(current_time_ms < previous_results_.front().first)) {
return errors::InvalidArgument(
"Results must be fed in increasing time order, but received a "
"timestamp of ",
current_time_ms, " that was earlier than the previous one of ",
previous_results_.front().first);
}
previous_results_.push_back({current_time_ms, latest_results});
const int64_t time_limit = current_time_ms - average_window_duration_ms_;
while (previous_results_.front().first < time_limit) {
previous_results_.pop_front();
}
const int64_t how_many_results = previous_results_.size();
const int64_t earliest_time = previous_results_.front().first;
const int64_t samples_duration = current_time_ms - earliest_time;
if ((how_many_results < minimum_count_) ||
(samples_duration < (average_window_duration_ms_ / 4))) {
*found_command = previous_top_label_;
*score = 0.0f;
*is_new_command = false;
return absl::OkStatus();
}
std::vector<float> average_scores(labels_count_);
for (const auto& previous_result : previous_results_) {
const Tensor& scores_tensor = previous_result.second;
auto scores_flat = scores_tensor.flat<float>();
for (int i = 0; i < scores_flat.size(); ++i) {
average_scores[i] += scores_flat(i) / how_many_results;
}
}
std::vector<std::pair<int, float>> sorted_average_scores;
sorted_average_scores.reserve(labels_count_);
for (int i = 0; i < labels_count_; ++i) {
sorted_average_scores.push_back(
std::pair<int, float>({i, average_scores[i]}));
}
std::sort(sorted_average_scores.begin(), sorted_average_scores.end(),
[](const std::pair<int, float>& left,
const std::pair<int, float>& right) {
return left.second > right.second;
});
const int current_top_index = sorted_average_scores[0].first;
const string current_top_label = labels_[current_top_index];
const float current_top_score = sorted_average_scores[0].second;
int64_t time_since_last_top;
if ((previous_top_label_ == "_silence_") ||
(previous_top_label_time_ == std::numeric_limits<int64_t>::min())) {
time_since_last_top = std::numeric_limits<int64_t>::max();
} else {
time_since_last_top = current_time_ms - previous_top_label_time_;
}
if ((current_top_score > detection_threshold_) &&
(current_top_label != previous_top_label_) &&
(time_since_last_top > suppression_ms_)) {
previous_top_label_ = current_top_label;
previous_top_label_time_ = current_time_ms;
*is_new_command = true;
} else {
*is_new_command = false;
}
*found_command = current_top_label;
*score = current_top_score;
return absl::OkStatus();
}
} | #include "tensorflow/examples/speech_commands/recognize_commands.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(RecognizeCommandsTest, Basic) {
RecognizeCommands recognize_commands({"_silence_", "a", "b"});
Tensor results(DT_FLOAT, {3});
test::FillValues<float>(&results, {1.0f, 0.0f, 0.0f});
string found_command;
float score;
bool is_new_command;
TF_EXPECT_OK(recognize_commands.ProcessLatestResults(
results, 0, &found_command, &score, &is_new_command));
}
TEST(RecognizeCommandsTest, FindCommands) {
RecognizeCommands recognize_commands({"_silence_", "a", "b"}, 1000, 0.2f);
Tensor results(DT_FLOAT, {3});
test::FillValues<float>(&results, {0.0f, 1.0f, 0.0f});
bool has_found_new_command = false;
string new_command;
for (int i = 0; i < 10; ++i) {
string found_command;
float score;
bool is_new_command;
int64_t current_time_ms = 0 + (i * 100);
TF_EXPECT_OK(recognize_commands.ProcessLatestResults(
results, current_time_ms, &found_command, &score, &is_new_command));
if (is_new_command) {
EXPECT_FALSE(has_found_new_command);
has_found_new_command = true;
new_command = found_command;
}
}
EXPECT_TRUE(has_found_new_command);
EXPECT_EQ("a", new_command);
test::FillValues<float>(&results, {0.0f, 0.0f, 1.0f});
has_found_new_command = false;
new_command = "";
for (int i = 0; i < 10; ++i) {
string found_command;
float score;
bool is_new_command;
int64_t current_time_ms = 1000 + (i * 100);
TF_EXPECT_OK(recognize_commands.ProcessLatestResults(
results, current_time_ms, &found_command, &score, &is_new_command));
if (is_new_command) {
EXPECT_FALSE(has_found_new_command);
has_found_new_command = true;
new_command = found_command;
}
}
EXPECT_TRUE(has_found_new_command);
EXPECT_EQ("b", new_command);
}
TEST(RecognizeCommandsTest, BadInputLength) {
RecognizeCommands recognize_commands({"_silence_", "a", "b"}, 1000, 0.2f);
Tensor bad_results(DT_FLOAT, {2});
test::FillValues<float>(&bad_results, {1.0f, 0.0f});
string found_command;
float score;
bool is_new_command;
EXPECT_FALSE(recognize_commands
.ProcessLatestResults(bad_results, 0, &found_command, &score,
&is_new_command)
.ok());
}
TEST(RecognizeCommandsTest, BadInputTimes) {
RecognizeCommands recognize_commands({"_silence_", "a", "b"}, 1000, 0.2f);
Tensor results(DT_FLOAT, {3});
test::FillValues<float>(&results, {1.0f, 0.0f, 0.0f});
string found_command;
float score;
bool is_new_command;
TF_EXPECT_OK(recognize_commands.ProcessLatestResults(
results, 100, &found_command, &score, &is_new_command));
EXPECT_FALSE(recognize_commands
.ProcessLatestResults(results, 0, &found_command, &score,
&is_new_command)
.ok());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/speech_commands/recognize_commands.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/speech_commands/recognize_commands_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5f08271a-5063-4bbc-afe0-338a7720baa9 | cpp | tensorflow/tensorflow | accuracy_utils | tensorflow/examples/speech_commands/accuracy_utils.cc | tensorflow/examples/speech_commands/accuracy_utils_test.cc | #include "tensorflow/examples/speech_commands/accuracy_utils.h"
#include <fstream>
#include <iomanip>
#include <unordered_set>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/numbers.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Status ReadGroundTruthFile(const string& file_name,
std::vector<std::pair<string, int64_t>>* result) {
std::ifstream file(file_name);
if (!file) {
return tensorflow::errors::NotFound("Ground truth file '", file_name,
"' not found.");
}
result->clear();
string line;
while (std::getline(file, line)) {
std::vector<string> pieces = tensorflow::str_util::Split(line, ',');
if (pieces.size() != 2) {
continue;
}
float timestamp;
if (!tensorflow::strings::safe_strtof(pieces[1], ×tamp)) {
return tensorflow::errors::InvalidArgument(
"Wrong number format at line: ", line);
}
string label = pieces[0];
auto timestamp_int64 = static_cast<int64_t>(timestamp);
result->push_back({label, timestamp_int64});
}
std::sort(result->begin(), result->end(),
[](const std::pair<string, int64>& left,
const std::pair<string, int64>& right) {
return left.second < right.second;
});
return absl::OkStatus();
}
void CalculateAccuracyStats(
const std::vector<std::pair<string, int64_t>>& ground_truth_list,
const std::vector<std::pair<string, int64_t>>& found_words,
int64_t up_to_time_ms, int64_t time_tolerance_ms,
StreamingAccuracyStats* stats) {
int64_t latest_possible_time;
if (up_to_time_ms == -1) {
latest_possible_time = std::numeric_limits<int64_t>::max();
} else {
latest_possible_time = up_to_time_ms + time_tolerance_ms;
}
stats->how_many_ground_truth_words = 0;
for (const std::pair<string, int64_t>& ground_truth : ground_truth_list) {
const int64_t ground_truth_time = ground_truth.second;
if (ground_truth_time > latest_possible_time) {
break;
}
++stats->how_many_ground_truth_words;
}
stats->how_many_false_positives = 0;
stats->how_many_correct_words = 0;
stats->how_many_wrong_words = 0;
std::unordered_set<int64_t> has_ground_truth_been_matched;
for (const std::pair<string, int64_t>& found_word : found_words) {
const string& found_label = found_word.first;
const int64_t found_time = found_word.second;
const int64_t earliest_time = found_time - time_tolerance_ms;
const int64_t latest_time = found_time + time_tolerance_ms;
bool has_match_been_found = false;
for (const std::pair<string, int64_t>& ground_truth : ground_truth_list) {
const int64_t ground_truth_time = ground_truth.second;
if ((ground_truth_time > latest_time) ||
(ground_truth_time > latest_possible_time)) {
break;
}
if (ground_truth_time < earliest_time) {
continue;
}
const string& ground_truth_label = ground_truth.first;
if ((ground_truth_label == found_label) &&
(has_ground_truth_been_matched.count(ground_truth_time) == 0)) {
++stats->how_many_correct_words;
} else {
++stats->how_many_wrong_words;
}
has_ground_truth_been_matched.insert(ground_truth_time);
has_match_been_found = true;
break;
}
if (!has_match_been_found) {
++stats->how_many_false_positives;
}
}
stats->how_many_ground_truth_matched = has_ground_truth_been_matched.size();
}
void PrintAccuracyStats(const StreamingAccuracyStats& stats) {
if (stats.how_many_ground_truth_words == 0) {
LOG(INFO) << "No ground truth yet, " << stats.how_many_false_positives
<< " false positives";
} else {
float any_match_percentage =
(stats.how_many_ground_truth_matched * 100.0f) /
stats.how_many_ground_truth_words;
float correct_match_percentage = (stats.how_many_correct_words * 100.0f) /
stats.how_many_ground_truth_words;
float wrong_match_percentage = (stats.how_many_wrong_words * 100.0f) /
stats.how_many_ground_truth_words;
float false_positive_percentage =
(stats.how_many_false_positives * 100.0f) /
stats.how_many_ground_truth_words;
LOG(INFO) << std::setprecision(1) << std::fixed << any_match_percentage
<< "% matched, " << correct_match_percentage << "% correctly, "
<< wrong_match_percentage << "% wrongly, "
<< false_positive_percentage << "% false positives ";
}
}
} | #include "tensorflow/examples/speech_commands/accuracy_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(AccuracyUtilsTest, ReadGroundTruthFile) {
string file_name = tensorflow::io::JoinPath(tensorflow::testing::TmpDir(),
"ground_truth.txt");
string file_data = "a,10\nb,12\n";
TF_ASSERT_OK(WriteStringToFile(Env::Default(), file_name, file_data));
std::vector<std::pair<string, int64_t>> ground_truth;
TF_ASSERT_OK(ReadGroundTruthFile(file_name, &ground_truth));
ASSERT_EQ(2, ground_truth.size());
EXPECT_EQ("a", ground_truth[0].first);
EXPECT_EQ(10, ground_truth[0].second);
EXPECT_EQ("b", ground_truth[1].first);
EXPECT_EQ(12, ground_truth[1].second);
}
TEST(AccuracyUtilsTest, CalculateAccuracyStats) {
StreamingAccuracyStats stats;
CalculateAccuracyStats({{"a", 1000}, {"b", 9000}},
{{"a", 1200}, {"b", 5000}, {"a", 8700}}, 10000, 500,
&stats);
EXPECT_EQ(2, stats.how_many_ground_truth_words);
EXPECT_EQ(2, stats.how_many_ground_truth_matched);
EXPECT_EQ(1, stats.how_many_false_positives);
EXPECT_EQ(1, stats.how_many_correct_words);
EXPECT_EQ(1, stats.how_many_wrong_words);
}
TEST(AccuracyUtilsTest, PrintAccuracyStats) {
StreamingAccuracyStats stats;
PrintAccuracyStats(stats);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/speech_commands/accuracy_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/speech_commands/accuracy_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
519dbf87-0273-4254-a829-e982aa632a86 | cpp | tensorflow/tensorflow | load | tensorflow/cc/experimental/libexport/load.cc | tensorflow/cc/experimental/libexport/load_test.cc | #include "tensorflow/cc/experimental/libexport/load.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
namespace libexport {
using protobuf::RepeatedPtrField;
absl::StatusOr<TFPackage> TFPackage::Load(const std::string& path) {
TFPackage tf_package;
const string saved_model_pb_path = io::JoinPath(path, kSavedModelFilenamePb);
const string saved_model_pbtxt_path =
io::JoinPath(path, kSavedModelFilenamePbTxt);
if (Env::Default()->FileExists(saved_model_pb_path).ok()) {
TF_RETURN_IF_ERROR(ReadBinaryProto(Env::Default(), saved_model_pb_path,
&tf_package.saved_model_proto_));
} else if (Env::Default()->FileExists(saved_model_pbtxt_path).ok()) {
TF_RETURN_IF_ERROR(ReadTextProto(Env::Default(), saved_model_pbtxt_path,
&tf_package.saved_model_proto_));
} else {
return Status(absl::StatusCode::kNotFound,
"Could not find SavedModel .pb or .pbtxt at supplied export "
"directory path: " +
path);
}
const std::string variables_dir =
tensorflow::io::JoinPath(path, tensorflow::kSavedModelVariablesDirectory);
if (Env::Default()->FileExists(variables_dir).ok()) {
tf_package.has_checkpoint_ = true;
tf_package.variables_filepath_ = tensorflow::io::JoinPath(
variables_dir, tensorflow::kSavedModelVariablesFilename);
tf_package.variable_reader_ = std::make_unique<tensorflow::BundleReader>(
tensorflow::Env::Default(), tf_package.variables_filepath_);
tensorflow::Tensor object_graph_tensor;
TF_RETURN_IF_ERROR(tf_package.variable_reader_->Lookup(
tensorflow::kObjectGraphProtoKey, &object_graph_tensor));
const auto* object_graph_string =
reinterpret_cast<const tensorflow::tstring*>(
object_graph_tensor.tensor_data().data());
tf_package.trackable_object_graph_.ParseFromString(*object_graph_string);
} else {
tf_package.has_checkpoint_ = false;
LOG(INFO)
<< "No checkpoint found, assuming this is a program-only SavedModel";
}
const auto& nodes =
tf_package.saved_model_proto_.meta_graphs(0).graph_def().node();
for (const auto& node : nodes) {
tf_package.graph_def_nodes_by_name_[node.name()] = &node;
}
return tf_package;
}
absl::StatusOr<std::string> TFPackage::GetVariableCheckpointKey(int index) {
const auto& trackable_object = trackable_object_graph_.nodes(index);
const TrackableObjectGraph::TrackableObject::SerializedTensor*
serialized_tensor = nullptr;
for (auto& maybe_serialized_tensor : trackable_object.attributes()) {
if (maybe_serialized_tensor.name() == "VARIABLE_VALUE") {
serialized_tensor = &maybe_serialized_tensor;
}
}
if (serialized_tensor == nullptr) {
return tensorflow::Status(absl::StatusCode::kInternal,
"Failed to find variable value field.");
}
return serialized_tensor->checkpoint_key();
}
const SavedObjectGraph& TFPackage::GetObjectGraph() {
return saved_model_proto_.mutable_meta_graphs(0)->object_graph_def();
}
absl::StatusOr<const tensorflow::NodeDef*> TFPackage::GetGraphDefNode(
std::string name) {
const auto& iter = graph_def_nodes_by_name_.find(name);
if (iter == graph_def_nodes_by_name_.end()) {
return tensorflow::Status(absl::StatusCode::kInternal,
absl::StrCat("Failed to find node named ", name));
}
return iter->second;
}
const RepeatedPtrField<FunctionDef>& TFPackage::GetFunctionDefs() {
auto& function_library =
saved_model_proto_.mutable_meta_graphs(0)->graph_def().library();
return function_library.function();
}
}
} | #include "tensorflow/cc/experimental/libexport/load.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace libexport {
namespace {
TEST(LoadTest, TestDiskSavedModelLoad) {
absl::StatusOr<TFPackage> result = TFPackage::Load("test");
EXPECT_FALSE(result.status().ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/load.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/load_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4a0d1112-f66c-4f43-aa09-131a59dde473 | cpp | tensorflow/tensorflow | save | tensorflow/cc/experimental/libexport/save.cc | tensorflow/cc/experimental/libexport/save_test.cc | #include "tensorflow/cc/experimental/libexport/save.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace libexport {
Status Save(const std::string& export_dir) {
TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(export_dir));
return absl::OkStatus();
}
}
} | #include "tensorflow/cc/experimental/libexport/save.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace libexport {
namespace {
TEST(SaveTest, TestDirectoryStructure) {
const string base_dir = tensorflow::io::JoinPath(
tensorflow::testing::TmpDir(), "test_directory_structure");
TF_ASSERT_OK(Save(base_dir));
TF_ASSERT_OK(Env::Default()->IsDirectory(base_dir));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/save.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/save_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7c19b9ed-d84d-4088-8f01-1a38749b11b0 | cpp | tensorflow/tensorflow | freeze_saved_model | tensorflow/cc/tools/freeze_saved_model.cc | tensorflow/cc/tools/freeze_saved_model_test.cc | #include "tensorflow/cc/tools/freeze_saved_model.h"
#include <iostream>
#include <queue>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
void GetTensorNamesFromTensorInfo(const TensorInfo& tensor_info,
std::unordered_set<string>* tensor_names) {
if (tensor_info.has_coo_sparse()) {
const TensorInfo_CooSparse& coo_sparse = tensor_info.coo_sparse();
tensor_names->insert(coo_sparse.values_tensor_name());
tensor_names->insert(coo_sparse.indices_tensor_name());
tensor_names->insert(coo_sparse.dense_shape_tensor_name());
} else if (tensor_info.has_composite_tensor()) {
for (const auto& component : tensor_info.composite_tensor().components()) {
tensor_names->insert(component.name());
}
} else {
tensor_names->insert(tensor_info.name());
}
}
void GetSignatureDefsInputsAndOutputs(
const SavedModelBundle& saved_model_bundle,
std::unordered_set<string>* inputs, std::unordered_set<string>* outputs) {
for (auto& sigdef_elem : saved_model_bundle.meta_graph_def.signature_def()) {
const SignatureDef& signature_def = sigdef_elem.second;
for (auto& input_elem : signature_def.inputs()) {
GetTensorNamesFromTensorInfo(input_elem.second, inputs);
}
for (auto& output_elem : signature_def.outputs()) {
GetTensorNamesFromTensorInfo(output_elem.second, outputs);
}
}
}
void GetNodeNameToNodeDefMap(
GraphDef* graph_def,
std::unordered_map<string, NodeDef*>* name_to_node_map) {
for (size_t i = 0; i < graph_def->node_size(); i++) {
NodeDef* node = graph_def->mutable_node(i);
(*name_to_node_map)[node->name()] = node;
}
}
const string GetNodeNameFromTensorName(string tensor_name) {
if (tensor_name[0] == '^') {
tensor_name.erase(0, 1);
}
std::vector<string> tensor_name_parts = str_util::Split(tensor_name, ':');
return tensor_name_parts[0];
}
void GetReachableNodesAndVariables(
GraphDef* graph_def, const std::unordered_set<string>& outputs,
const std::unordered_map<string, NodeDef*>& name_to_node_map,
std::unordered_set<string>* reachable_node_names,
std::unordered_set<string>* variable_node_names) {
static const std::unordered_set<string>* kVariableTypes =
new std::unordered_set<string>({"Variable", "VariableV2", "VarHandleOp"});
std::queue<string> nodes_to_visit;
for (const string& output_tensor_name : outputs) {
nodes_to_visit.push(GetNodeNameFromTensorName(output_tensor_name));
}
while (!nodes_to_visit.empty()) {
const string node_name = nodes_to_visit.front();
nodes_to_visit.pop();
if (reachable_node_names->find(node_name) != reachable_node_names->end()) {
continue;
}
reachable_node_names->insert(node_name);
NodeDef* node = name_to_node_map.at(node_name);
if (kVariableTypes->find(node->op()) != kVariableTypes->end()) {
variable_node_names->insert(node->name());
}
for (const string& input_tensor_name : node->input()) {
nodes_to_visit.push(GetNodeNameFromTensorName(input_tensor_name));
}
}
}
Status GetVariableNameToTensorMap(
Session* session,
const std::unordered_map<string, NodeDef*>& name_to_node_map,
std::unordered_set<string> variable_names_set,
std::unordered_map<string, Tensor>* variable_name_to_value_map) {
if (variable_names_set.empty()) {
return absl::OkStatus();
}
std::vector<string> variable_names;
variable_names.reserve(variable_names_set.size());
std::vector<string> tensor_names;
tensor_names.reserve(variable_names_set.size());
for (const string& node_name : variable_names_set) {
variable_names.push_back(node_name);
NodeDef* node_def = name_to_node_map.at(node_name);
if (node_def->op() == "VarHandleOp") {
tensor_names.push_back(node_name + "/Read/ReadVariableOp:0");
} else {
tensor_names.push_back(node_name + ":0");
}
}
std::vector<Tensor> outputs;
TF_RETURN_IF_ERROR(
session->Run( {}, tensor_names, {}, &outputs));
for (size_t i = 0; i < variable_names.size(); i++) {
(*variable_name_to_value_map)[variable_names[i]] = outputs[i];
}
return absl::OkStatus();
}
void ConvertVariableToConstant(const NodeDef& variable_node,
const Tensor& variable_value,
NodeDef* const_node) {
const_node->set_name(variable_node.name());
const_node->set_op("Const");
(*const_node->mutable_attr())["dtype"] = variable_node.attr().at("dtype");
variable_value.AsProtoTensorContent(
(*const_node->mutable_attr())["value"].mutable_tensor());
}
void ConvertReadVariableOpToIdentity(const NodeDef& node,
NodeDef* identity_node) {
identity_node->set_name(node.name());
identity_node->set_op("Identity");
(*identity_node->mutable_attr())["T"] = node.attr().at("dtype");
identity_node->add_input(node.input(0));
}
StatusOr<string> GetVarHandleName(
const std::unordered_map<string, NodeDef*>& name_to_node_map,
string node_name) {
const NodeDef* node = name_to_node_map.at(node_name);
while (node->input_size() > 0) {
auto parent = name_to_node_map.find(node->input(0));
if (parent == name_to_node_map.end()) break;
node = parent->second;
if (node->op() != "Identity") {
VLOG(2) << "Stopping at non-identity node " << node->op();
break;
}
}
if (node->op() == "VarHandleOp") {
return node->name();
}
return absl::NotFoundError("No VarHandleOp ancestor found");
}
StatusOr<string> GetHandleNameIfNeedsToFreeze(
const std::unordered_map<string, NodeDef*>& name_to_node_map,
string node_name, const std::unordered_set<string>& variable_node_names) {
StatusOr<string> var_handle_name =
GetVarHandleName(name_to_node_map, node_name);
if (var_handle_name.ok() && variable_node_names.count(*var_handle_name)) {
return var_handle_name;
}
return absl::NotFoundError("No VarHandleOp ancestor found");
}
Status FreezeGraphDef(const SavedModelBundle& saved_model_bundle,
const std::unordered_set<string>& outputs,
GraphDef* frozen_graph_def) {
GraphDef graph_def = saved_model_bundle.meta_graph_def.graph_def();
*frozen_graph_def->mutable_versions() = graph_def.versions();
*frozen_graph_def->mutable_library() = graph_def.library();
if (graph_def.node_size() == 0) {
return absl::OkStatus();
}
std::unordered_map<string, NodeDef*> name_to_node_map;
GetNodeNameToNodeDefMap(&graph_def, &name_to_node_map);
std::unordered_set<string> reachable_node_names;
std::unordered_set<string> variable_node_names;
GetReachableNodesAndVariables(&graph_def, outputs, name_to_node_map,
&reachable_node_names, &variable_node_names);
std::unordered_map<string, Tensor> variable_to_value_map;
TF_RETURN_IF_ERROR(GetVariableNameToTensorMap(
saved_model_bundle.session.get(), name_to_node_map, variable_node_names,
&variable_to_value_map));
for (const NodeDef& node : graph_def.node()) {
if (reachable_node_names.find(node.name()) == reachable_node_names.end()) {
continue;
}
if (variable_node_names.find(node.name()) != variable_node_names.end()) {
ConvertVariableToConstant(node, variable_to_value_map[node.name()],
frozen_graph_def->add_node());
continue;
} else if (node.op() == "ReadVariableOp" &&
GetHandleNameIfNeedsToFreeze(name_to_node_map, node.name(),
variable_node_names)
.ok()) {
ConvertReadVariableOpToIdentity(node, frozen_graph_def->add_node());
continue;
} else if (node.op() == "Identity") {
StatusOr<string> handle_name = GetHandleNameIfNeedsToFreeze(
name_to_node_map, node.name(), variable_node_names);
if (handle_name.ok()) {
NodeDef* new_node = frozen_graph_def->add_node();
*new_node = node;
(*new_node->mutable_attr())["T"] =
name_to_node_map.at(*handle_name)->attr().at("dtype");
continue;
}
}
*frozen_graph_def->add_node() = node;
}
return absl::OkStatus();
}
}
Status FreezeSavedModel(const SavedModelBundle& saved_model_bundle,
GraphDef* frozen_graph_def,
std::unordered_set<string>* inputs,
std::unordered_set<string>* outputs) {
GetSignatureDefsInputsAndOutputs(saved_model_bundle, inputs, outputs);
TF_RETURN_IF_ERROR(
FreezeGraphDef(saved_model_bundle, *outputs, frozen_graph_def));
return absl::OkStatus();
}
} | #include "tensorflow/cc/tools/freeze_saved_model.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/state_ops.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
class FreezeTest : public ::testing::Test {
protected:
void GraphDefEqual(const GraphDef& actual, const GraphDef& expected) {
EXPECT_EQ(actual.ShortDebugString(), expected.ShortDebugString());
}
SignatureDef BuildSignatureDef(const std::unordered_set<string>& inputs,
const std::unordered_set<string>& outputs) {
SignatureDef signature_def;
for (const string& input : inputs) {
(*signature_def.mutable_inputs())[input].set_name(input);
}
for (const string& output : outputs) {
(*signature_def.mutable_outputs())[output].set_name(output);
}
return signature_def;
}
void AddSignatureDefToSavedModelBundle(const SignatureDef& signature_def,
const string& key,
SavedModelBundle* saved_model_bundle) {
MetaGraphDef* meta_graph_def = &saved_model_bundle->meta_graph_def;
(*meta_graph_def->mutable_signature_def())[key] = signature_def;
}
Status InitializeSavedModelBundleSession(
const GraphDef& graph_def, const string& init_node,
SavedModelBundle* saved_model_bundle) {
SessionOptions session_options;
saved_model_bundle->session.reset(NewSession(session_options));
TF_RETURN_IF_ERROR(saved_model_bundle->session->Create(graph_def));
if (!init_node.empty()) {
std::vector<Tensor> outputs;
return saved_model_bundle->session->Run(
{}, {}, {init_node}, &outputs);
}
return absl::OkStatus();
}
Status AddGraphDefToSavedModelBundle(const GraphDef& graph_def,
const string& init_node,
SavedModelBundle* saved_model_bundle) {
MetaGraphDef* meta_graph_def = &saved_model_bundle->meta_graph_def;
*meta_graph_def->mutable_graph_def() = graph_def;
return InitializeSavedModelBundleSession(graph_def, init_node,
saved_model_bundle);
}
Status AddGraphDefWithOutputsToSavedModelBundle(
const GraphDef& graph_def, const std::unordered_set<string>& outputs,
const string& init_node, SavedModelBundle* saved_model_bundle) {
SignatureDef signature_def =
BuildSignatureDef(std::unordered_set<string>(), outputs);
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
saved_model_bundle);
return AddGraphDefToSavedModelBundle(graph_def, init_node,
saved_model_bundle);
}
void RunAndCompareFrozenAndUnfrozenGraphs(Session* unfrozen_session,
const GraphDef& frozen_graph_def,
const string& tensor_name) {
std::vector<Tensor> unfrozen_outputs;
TF_ASSERT_OK(unfrozen_session->Run( {}, {tensor_name},
{}, &unfrozen_outputs));
SessionOptions session_options;
std::unique_ptr<Session> frozen_session(NewSession(session_options));
TF_ASSERT_OK(frozen_session->Create(frozen_graph_def));
std::vector<Tensor> frozen_outputs;
TF_ASSERT_OK(frozen_session->Run( {}, {tensor_name},
{}, &frozen_outputs));
test::ExpectTensorEqual<float>(unfrozen_outputs[0], frozen_outputs[0]);
}
void TestFreezeGraphWithoutDependentVariables(bool use_resource) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
Output read_var = ops::ReadVariableOp(
scope.WithOpName("var/Read/ReadVariableOp"), var, DataType::DT_FLOAT);
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
} else {
Output var =
ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), var, a);
}
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
GraphDef expected_graph_def;
Scope expected_scope = Scope::NewRootScope();
Output expected_a = ops::Const(expected_scope.WithOpName("a"), 10.0f, {});
Output expected_b = ops::Const(expected_scope.WithOpName("b"), 10.0f, {});
Output expected_c =
ops::Mul(expected_scope.WithOpName("c"), expected_a, expected_b);
TF_ASSERT_OK(expected_scope.ToGraphDef(&expected_graph_def));
GraphDefEqual(frozen_graph_def, expected_graph_def);
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
void TestFreezeGraphWithDependentVariables(bool use_resource,
bool use_identity = false) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output read_var;
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
if (use_identity) {
Output identity = ops::Identity(scope.WithOpName("identity"), var);
read_var =
ops::ReadVariableOp(scope.WithOpName("var/Read/ReadVariableOp"),
identity, DataType::DT_FLOAT);
} else {
read_var =
ops::ReadVariableOp(scope.WithOpName("var/Read/ReadVariableOp"),
var, DataType::DT_FLOAT);
}
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
} else {
Output read_var =
ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), read_var, a);
}
Output c = ops::Mul(scope.WithOpName("c"), a, read_var);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
size_t expected_nodes = use_resource ? (use_identity ? 5 : 4) : 3;
EXPECT_EQ(frozen_graph_def.node_size(), expected_nodes);
for (const NodeDef& node : frozen_graph_def.node()) {
EXPECT_NE(node.op(), "Variable") << node.name();
EXPECT_NE(node.op(), "VariableV2") << node.name();
EXPECT_NE(node.op(), "VarHandleOp") << node.name();
EXPECT_NE(node.op(), "ReadVariableOp") << node.name();
}
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
void TestFreezeGraphWithAndWithoutDependentVariables(bool use_resource) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output read_var;
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
read_var = ops::ReadVariableOp(
scope.WithOpName("var/Read/ReadVariableOp"), var, DataType::DT_FLOAT);
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
Output var_1 =
ops::VarHandleOp(scope.WithOpName("var_1"), DataType::DT_FLOAT, {});
Output read_var_1 =
ops::ReadVariableOp(scope.WithOpName("var_1/Read/ReadVariableOp"),
var, DataType::DT_FLOAT);
auto assign_1 =
ops::AssignVariableOp(scope.WithOpName("assign_1"), var_1, a);
} else {
read_var = ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), read_var, a);
Output var_1 =
ops::Variable(scope.WithOpName("var_1"), {}, DataType::DT_FLOAT);
Output assign_1 = ops::Assign(scope.WithOpName("assign_1"), var_1, a);
}
Output c = ops::Mul(scope.WithOpName("c"), a, read_var);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
size_t expected_nodes = use_resource ? 4 : 3;
EXPECT_EQ(frozen_graph_def.node_size(), expected_nodes);
for (const NodeDef& node : frozen_graph_def.node()) {
EXPECT_NE(node.op(), "Variable") << node.name();
EXPECT_NE(node.op(), "VariableV2") << node.name();
EXPECT_NE(node.op(), "VarHandleOp") << node.name();
EXPECT_NE(node.op(), "ReadVariableOp") << node.name();
}
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
};
TEST_F(FreezeTest, InputsAndOutputsSingleSignatureDef) {
SavedModelBundle saved_model_bundle;
std::unordered_set<string> expected_inputs = {"input0:0", "input1:0"};
std::unordered_set<string> expected_outputs = {"output0:0", "output1:0"};
SignatureDef signature_def =
BuildSignatureDef(expected_inputs, expected_outputs);
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, InputsAndOutputsMultipleSignatureDefs) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def_0 = BuildSignatureDef({"input0:0"}, {"output0:0"});
SignatureDef signature_def_1 = BuildSignatureDef({"input1:0"}, {"output1:0"});
AddSignatureDefToSavedModelBundle(signature_def_0, "signature_def_0",
&saved_model_bundle);
AddSignatureDefToSavedModelBundle(signature_def_1, "signature_def_1",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input0:0", "input1:0"};
std::unordered_set<string> expected_outputs = {"output0:0", "output1:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, GraphDefVersionsAndLibrary) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(1234);
graph_def.mutable_versions()->set_min_consumer(1234);
*graph_def.mutable_library()->add_function() = test::function::NonZero();
TF_ASSERT_OK(
AddGraphDefToSavedModelBundle(graph_def, "", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithNoVariables) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithMultiOutputOperation) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), {10.0f, 10.0f}, {2});
Output axis = ops::Const(scope.WithOpName("axis"), 0, {});
OutputList split = ops::Split(scope.WithOpName("split"), axis, a, 2).output;
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), split[1], b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithControlDependency) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output source = ops::Const(scope.WithOpName("source"), 10.0f, {});
Output a = ops::Const(scope.WithOpName("a").WithControlDependencies(source),
{10.0f, 10.0f}, {2});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithoutDependentVariables) {
TestFreezeGraphWithoutDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithoutDependentResourceVariables) {
TestFreezeGraphWithoutDependentVariables(true);
}
TEST_F(FreezeTest, GraphDefWithDependentVariables) {
TestFreezeGraphWithDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithDependentResourceVariables) {
TestFreezeGraphWithDependentVariables(true);
}
TEST_F(FreezeTest, GraphDefWithDependentResourceVariablesAndIdentity) {
TestFreezeGraphWithDependentVariables(true, true);
}
TEST_F(FreezeTest, GraphDefWithAndWithoutDependentVariables) {
TestFreezeGraphWithAndWithoutDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithAndWithoutDependentResourceVariables) {
TestFreezeGraphWithAndWithoutDependentVariables(true);
}
TEST_F(FreezeTest, InputsAndOutputsCompositeTensorSignatureDef) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def;
TensorInfo& in = (*signature_def.mutable_inputs())["input_arg"];
in.mutable_composite_tensor()->add_components()->set_name("input1:0");
in.mutable_composite_tensor()->add_components()->set_name("input2:0");
TensorInfo& out = (*signature_def.mutable_outputs())["output_arg"];
out.mutable_composite_tensor()->add_components()->set_name("output2:0");
out.mutable_composite_tensor()->add_components()->set_name("output1:0");
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input1:0", "input2:0"};
std::unordered_set<string> expected_outputs = {"output1:0", "output2:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, InputsAndOutputsSparseCooSignatureDef) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def;
TensorInfo& in = (*signature_def.mutable_inputs())["input_arg"];
in.mutable_coo_sparse()->set_values_tensor_name("input1:0");
in.mutable_coo_sparse()->set_indices_tensor_name("input2:0");
in.mutable_coo_sparse()->set_dense_shape_tensor_name("input3:0");
TensorInfo& out = (*signature_def.mutable_outputs())["output_arg"];
out.mutable_coo_sparse()->set_values_tensor_name("output1:0");
out.mutable_coo_sparse()->set_indices_tensor_name("output2:0");
out.mutable_coo_sparse()->set_dense_shape_tensor_name("output3:0");
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input1:0", "input2:0",
"input3:0"};
std::unordered_set<string> expected_outputs = {"output1:0", "output2:0",
"output3:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/tools/freeze_saved_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/tools/freeze_saved_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5ac1b344-4708-4ddf-93cf-5d8ed5df8d87 | cpp | tensorflow/tensorflow | reader | tensorflow/cc/saved_model/reader.cc | tensorflow/cc/saved_model/reader_test.cc | #include "tensorflow/cc/saved_model/reader.h"
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/cc/saved_model/util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#define IS_OSS true
namespace tensorflow {
absl::StatusOr<MetaGraphDef*> FindMetaGraphDef(
const std::unordered_set<string>& tags, SavedModel* saved_model_proto) {
LOG(INFO) << "Reading meta graph with tags { " << absl::StrJoin(tags, " ")
<< " }";
for (MetaGraphDef& graph_def : *saved_model_proto->mutable_meta_graphs()) {
std::unordered_set<string> graph_tags;
for (const string& tag : graph_def.meta_info_def().tags()) {
graph_tags.insert(tag);
}
if (graph_tags == tags) {
MetaGraphDef* meta_graph_def = &graph_def;
if (!port::kLittleEndian) {
TF_RETURN_IF_ERROR(ByteSwapTensorContentInMetaGraphDef(meta_graph_def));
}
return meta_graph_def;
}
}
return Status(
absl::StatusCode::kNotFound,
strings::StrCat(
"Could not find meta graph def matching supplied tags: { ",
absl::StrJoin(tags, " "),
" }. To inspect available tag-sets in the SavedModel, please "
"use the SavedModel CLI: `saved_model_cli`"));
}
Status ReadSavedModel(absl::string_view export_dir,
SavedModel* saved_model_proto) {
LOG(INFO) << "Reading SavedModel from: " << export_dir;
if (IS_OSS) {
const std::string saved_model_pb_path =
io::JoinPath(export_dir, kSavedModelFilenamePb);
TF_ASSIGN_OR_RETURN(
bool saved_model_pb_exists,
internal::FileExists(Env::Default(), saved_model_pb_path));
if (saved_model_pb_exists) {
Status result = ReadBinaryProto(Env::Default(), saved_model_pb_path,
saved_model_proto);
if (result.ok()) {
metrics::SavedModelReadCount(
saved_model::GetWriteVersion(*saved_model_proto))
.IncrementBy(1);
}
return result;
}
}
const std::string saved_model_pbtxt_path =
io::JoinPath(export_dir, kSavedModelFilenamePbTxt);
auto saved_model_pbtxt_exists =
internal::FileExists(Env::Default(), saved_model_pbtxt_path);
if (saved_model_pbtxt_exists.value_or(false)) {
Status result = ReadTextProto(Env::Default(), saved_model_pbtxt_path,
saved_model_proto);
if (result.ok()) {
metrics::SavedModelReadCount(
saved_model::GetWriteVersion(*saved_model_proto))
.IncrementBy(1);
}
return result;
}
if (!IS_OSS) {
}
return Status(
absl::StatusCode::kNotFound,
strings::StrCat("Could not find SavedModel .pb or .pbtxt at supplied "
"export directory path: ",
export_dir,
". Check that "
"the directory exists and that you have the right "
"permissions for accessing it."));
}
Status ReadMetaGraphDefFromSavedModel(absl::string_view export_dir,
const std::unordered_set<string>& tags,
MetaGraphDef* const meta_graph_def) {
SavedModel saved_model_proto;
TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
TF_ASSIGN_OR_RETURN(MetaGraphDef * m,
FindMetaGraphDef(tags, &saved_model_proto));
*meta_graph_def = std::move(*m);
return absl::OkStatus();
}
Status ReadSavedModelDebugInfoIfPresent(
absl::string_view export_dir,
std::unique_ptr<GraphDebugInfo>* debug_info_proto) {
LOG(INFO) << "Reading SavedModel debug info (if present) from: "
<< export_dir;
const string debug_info_pb_path =
io::JoinPath(export_dir, "debug", "saved_model_debug_info.pb");
TF_ASSIGN_OR_RETURN(bool debug_info_pb_exists,
internal::FileExists(Env::Default(), debug_info_pb_path));
if (debug_info_pb_exists) {
GraphDebugInfo debug_info;
TF_RETURN_IF_ERROR(
ReadBinaryProto(Env::Default(), debug_info_pb_path, &debug_info));
*debug_info_proto = std::make_unique<GraphDebugInfo>(std::move(debug_info));
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/saved_model/reader.h"
#include <gmock/gmock.h>
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/cc/saved_model/tag_constants.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/resource_loader.h"
namespace tensorflow {
namespace {
string TestDataPbTxt() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"half_plus_two_pbtxt", "00000123");
}
string TestDataSharded() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"half_plus_two", "00000123");
}
string ChunkedSavedModel() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"chunked_saved_model", "chunked_model");
}
string NonChunkedSavedModel() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"chunked_saved_model", "non_chunked_model");
}
class ReaderTest : public ::testing::Test {
protected:
ReaderTest() {}
void CheckMetaGraphDef(const MetaGraphDef& meta_graph_def) {
const auto& tags = meta_graph_def.meta_info_def().tags();
EXPECT_TRUE(std::find(tags.begin(), tags.end(), kSavedModelTagServe) !=
tags.end());
EXPECT_NE(meta_graph_def.meta_info_def().tensorflow_version(), "");
EXPECT_EQ(
meta_graph_def.signature_def().at("serving_default").method_name(),
"tensorflow/serving/predict");
}
};
TEST_F(ReaderTest, TagMatch) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
&meta_graph_def));
CheckMetaGraphDef(meta_graph_def);
}
TEST_F(ReaderTest, NoTagMatch) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
Status st = ReadMetaGraphDefFromSavedModel(export_dir, {"missing-tag"},
&meta_graph_def);
EXPECT_FALSE(st.ok());
EXPECT_TRUE(absl::StrContains(
st.message(),
"Could not find meta graph def matching supplied tags: { missing-tag }"))
<< st.message();
}
TEST_F(ReaderTest, NoTagMatchMultiple) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
Status st = ReadMetaGraphDefFromSavedModel(
export_dir, {kSavedModelTagServe, "missing-tag"}, &meta_graph_def);
EXPECT_FALSE(st.ok());
EXPECT_TRUE(absl::StrContains(
st.message(), "Could not find meta graph def matching supplied tags: "))
<< st.message();
}
TEST_F(ReaderTest, InvalidExportPath) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath("missing-path");
Status st = ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
&meta_graph_def);
EXPECT_FALSE(st.ok());
}
TEST_F(ReaderTest, ReadSavedModelDebugInfoIfPresent) {
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
std::unique_ptr<GraphDebugInfo> debug_info_proto;
TF_ASSERT_OK(ReadSavedModelDebugInfoIfPresent(export_dir, &debug_info_proto));
}
TEST_F(ReaderTest, MetricsNotUpdatedFailedRead) {
MetaGraphDef meta_graph_def;
const int read_count_v1 = metrics::SavedModelReadCount("1").value();
const int read_count_v2 = metrics::SavedModelReadCount("2").value();
const string export_dir = GetDataDependencyFilepath("missing-path");
Status st =
ReadMetaGraphDefFromSavedModel(export_dir, {"serve"}, &meta_graph_def);
EXPECT_FALSE(st.ok());
EXPECT_EQ(metrics::SavedModelReadCount("1").value(), read_count_v1);
EXPECT_EQ(metrics::SavedModelReadCount("2").value(), read_count_v2);
}
TEST_F(ReaderTest, MetricsUpdatedSuccessfulRead) {
MetaGraphDef meta_graph_def;
const int read_count_v1 = metrics::SavedModelReadCount("1").value();
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
Status st =
ReadMetaGraphDefFromSavedModel(export_dir, {"serve"}, &meta_graph_def);
EXPECT_EQ(metrics::SavedModelReadCount("1").value(), read_count_v1 + 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/reader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/reader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
15e3231c-49f1-4370-965b-db6587959c9c | cpp | tensorflow/tensorflow | fingerprinting_utils | tensorflow/cc/saved_model/fingerprinting_utils.cc | tensorflow/cc/saved_model/fingerprinting_utils_test.cc | #include "tensorflow/cc/saved_model/fingerprinting_utils.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/records/record_reader.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/merge.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::saved_model::fingerprinting {
using ::tensorflow::proto_splitter::ChunkedField;
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkInfo;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter::FieldIndex;
using tools::proto_splitter::Field;
using tools::proto_splitter::FieldType;
using tools::proto_splitter::GetChunkMetadata;
using tools::proto_splitter::GetFieldTypes;
using tools::proto_splitter::GetMutableField;
using tools::proto_splitter::GetRiegeliReader;
using tools::proto_splitter::Merger;
using tools::proto_splitter::MutableFieldResult;
using tools::proto_splitter::ReadChunk;
namespace fingerprinting_utils_internal {
using ::tensorflow::protobuf::Map;
using ::tensorflow::protobuf::Message;
using ::tensorflow::protobuf::RepeatedPtrField;
using ::tensorflow::protobuf::io::CodedOutputStream;
using ::tensorflow::protobuf::io::StringOutputStream;
absl::StatusOr<int> fieldTagMatches(const RepeatedPtrField<FieldIndex>& a,
const RepeatedPtrField<FieldIndex>& b) {
int matches = 0;
for (int i = 0; i == matches && i < a.size() && i < b.size(); i++) {
switch (b[i].kind_case()) {
case ::tensorflow::proto_splitter::FieldIndex::KindCase::kField:
if (a.at(i).has_field() && a.at(i).field() == b.at(i).field()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::KindCase::kIndex:
if (a.at(i).has_index() && a.at(i).index() == b.at(i).index()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::KindCase::kMapKey:
if (a.at(i).has_map_key()) {
const ::tensorflow::proto_splitter::FieldIndex_MapKey& key =
b.at(i).map_key();
const ::tensorflow::proto_splitter::FieldIndex_MapKey& chunked_key =
a.at(i).map_key();
switch (key.type_case()) {
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::kS:
if (chunked_key.has_s() && chunked_key.s() == key.s()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kBoolean:
if (chunked_key.has_boolean() &&
chunked_key.boolean() == key.boolean()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kUi32:
if (chunked_key.has_ui32() && chunked_key.ui32() == key.ui32()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kUi64:
if (chunked_key.has_ui64() && chunked_key.ui64() == key.ui64()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kI32:
if (chunked_key.has_i32() && chunked_key.i32() == key.i32()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kI64:
if (chunked_key.has_i64() && chunked_key.i64() == key.i64()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
TYPE_NOT_SET:
default:
return absl::FailedPreconditionError(
"Encountered unknown field_tag.map_key type.");
}
}
break;
case FieldIndex::KindCase::KIND_NOT_SET:
default:
return absl::FailedPreconditionError(
"Encountered unknown field_tag kind.");
}
}
return matches;
}
absl::StatusOr<::tensorflow::proto_splitter::ChunkedMessage>
PruneChunkedMessage(
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
std::vector<ChunkInfo> chunks_info,
std::vector<RepeatedPtrField<FieldIndex>> target_fields_list) {
::tensorflow::proto_splitter::ChunkedMessage pruned_chunked_message;
if (chunked_message.has_chunk_index()) {
pruned_chunked_message.set_chunk_index(chunked_message.chunk_index());
}
for (const ChunkedField& chunked_field : chunked_message.chunked_fields()) {
for (const auto& target_fields : target_fields_list) {
TF_ASSIGN_OR_RETURN(
int matches,
fieldTagMatches(chunked_field.field_tag(), target_fields));
if (matches == chunked_field.field_tag_size()) {
auto cf = std::make_unique<proto_splitter::ChunkedField>();
cf->mutable_field_tag()->CopyFrom(chunked_field.field_tag());
TF_ASSIGN_OR_RETURN(
*cf->mutable_message(),
PruneChunkedMessage(chunked_field.message(), reader, chunks_info,
target_fields_list));
pruned_chunked_message.mutable_chunked_fields()->AddAllocated(
cf.release());
}
}
}
return pruned_chunked_message;
}
std::string SerializeProto(const Message& message) {
std::string serialized_message;
{
StringOutputStream stream(&serialized_message);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
message.SerializeToCodedStream(&output);
}
return serialized_message;
}
absl::StatusOr<uint64_t> HashFields(
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info,
const RepeatedPtrField<FieldIndex>& field_tags, Message* merged_message) {
uint64_t field_checksum = 0;
for (const ChunkedField& chunked_field : chunked_message.chunked_fields()) {
const RepeatedPtrField<FieldIndex> chunked_field_tags =
chunked_field.field_tag();
const ChunkedMessage& chunked_message = chunked_field.message();
TF_ASSIGN_OR_RETURN(int matches,
fieldTagMatches(chunked_field_tags, field_tags));
if (chunked_message.has_chunk_index() && matches == field_tags.size()) {
TF_ASSIGN_OR_RETURN(
std::string chunk,
ReadChunk(reader, chunks_info[chunked_message.chunk_index()]));
field_checksum = FingerprintCat64(field_checksum, Fingerprint64(chunk));
} else if (matches == field_tags.size()) {
TF_ASSIGN_OR_RETURN(uint64_t hash,
HashFields(chunked_message, reader, chunks_info,
field_tags, merged_message));
field_checksum = FingerprintCat64(field_checksum, hash);
} else if (chunked_message.has_chunk_index() &&
matches == chunked_field_tags.size()) {
TF_ASSIGN_OR_RETURN(std::vector<Field> fields,
GetFieldTypes(chunked_field_tags));
for (const auto& field : fields) {
TF_ASSIGN_OR_RETURN(MutableFieldResult mfr,
GetMutableField(merged_message, field));
merged_message =
mfr.parent->GetReflection()->MutableMessage(mfr.parent, mfr.field);
}
TF_ASSIGN_OR_RETURN(
std::string chunk,
ReadChunk(reader, chunks_info[chunked_message.chunk_index()]));
merged_message->ParseFromString(chunk);
TF_ASSIGN_OR_RETURN(uint64_t hash,
HashFields(chunked_message, reader, chunks_info,
field_tags, merged_message));
field_checksum = FingerprintCat64(field_checksum, hash);
} else if (matches == chunked_field_tags.size()) {
for (const ChunkedField& cf : chunked_message.chunked_fields()) {
TF_ASSIGN_OR_RETURN(uint64_t hash,
HashFields(cf.message(), reader, chunks_info,
field_tags, merged_message));
field_checksum = FingerprintCat64(field_checksum, hash);
}
}
}
return field_checksum;
}
inline RepeatedPtrField<FieldIndex> GraphDefFieldTags() {
FieldIndex meta_graph_field_tag;
meta_graph_field_tag.set_field(2);
FieldIndex meta_graph_index_field_tag;
meta_graph_index_field_tag.set_index(0);
FieldIndex graph_def_field_tag;
graph_def_field_tag.set_field(2);
RepeatedPtrField<FieldIndex> graph_def_field_tags;
graph_def_field_tags.Add(FieldIndex(meta_graph_field_tag));
graph_def_field_tags.Add(FieldIndex(meta_graph_index_field_tag));
graph_def_field_tags.Add(FieldIndex(graph_def_field_tag));
return graph_def_field_tags;
}
inline RepeatedPtrField<FieldIndex> SignatureDefFieldTags() {
FieldIndex meta_graph_field_tag;
meta_graph_field_tag.set_field(2);
FieldIndex meta_graph_index_field_tag;
meta_graph_index_field_tag.set_index(0);
FieldIndex signature_def_field_tag;
signature_def_field_tag.set_field(5);
RepeatedPtrField<FieldIndex> signature_def_field_tags;
signature_def_field_tags.Add(FieldIndex(meta_graph_field_tag));
signature_def_field_tags.Add(FieldIndex(meta_graph_index_field_tag));
signature_def_field_tags.Add(FieldIndex(signature_def_field_tag));
return signature_def_field_tags;
}
inline RepeatedPtrField<FieldIndex> SavedObjectGraphFieldTags() {
FieldIndex meta_graph_field_tag;
meta_graph_field_tag.set_field(2);
FieldIndex meta_graph_index_field_tag;
meta_graph_index_field_tag.set_index(0);
FieldIndex saved_object_graph_field_tag;
saved_object_graph_field_tag.set_field(7);
RepeatedPtrField<FieldIndex> saved_object_graph_field_tags;
saved_object_graph_field_tags.Add(FieldIndex(meta_graph_field_tag));
saved_object_graph_field_tags.Add(FieldIndex(meta_graph_index_field_tag));
saved_object_graph_field_tags.Add(FieldIndex(saved_object_graph_field_tag));
return saved_object_graph_field_tags;
}
absl::StatusOr<SavedModel> PrunedSavedModel(
absl::string_view export_dir,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info, ChunkMetadata& chunk_metadata) {
SavedModel saved_model;
ChunkMetadata pruned_chunk_metadata;
pruned_chunk_metadata.mutable_chunks()->CopyFrom(chunk_metadata.chunks());
TF_ASSIGN_OR_RETURN(
*pruned_chunk_metadata.mutable_message(),
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info,
{GraphDefFieldTags(), SignatureDefFieldTags(),
SavedObjectGraphFieldTags()}));
TF_RETURN_IF_ERROR(
Merger::ReadPartial(io::JoinPath(export_dir, kSavedModelFilenamePrefix),
pruned_chunk_metadata, &saved_model));
return saved_model;
}
absl::StatusOr<uint64_t> HashMessage(
Message* message, const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info,
const RepeatedPtrField<FieldIndex>& field_tags) {
uint64_t total_message_hash = Fingerprint64(SerializeProto(*message));
TF_ASSIGN_OR_RETURN(
uint64_t message_hash,
HashFields(chunked_message, reader, chunks_info, field_tags, message));
return FingerprintCat64(total_message_hash, message_hash);
}
absl::StatusOr<uint64_t> HashGraphDef(
::tensorflow::GraphDef* graph_def, const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info) {
return HashMessage(graph_def, chunked_message, reader, chunks_info,
GraphDefFieldTags());
}
absl::StatusOr<uint64_t> HashSignatureDef(
const Map<std::string, ::tensorflow::SignatureDef>& signature_def_map,
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info) {
uint64_t signature_def_hash = 0;
std::vector<std::pair<std::string, ::tensorflow::SignatureDef>>
signature_def_sorted(signature_def_map.begin(), signature_def_map.end());
std::sort(signature_def_sorted.begin(), signature_def_sorted.end(),
[](const std::pair<std::string, ::tensorflow::SignatureDef>& a,
const std::pair<std::string, ::tensorflow::SignatureDef>& b) {
return a.first < b.first;
});
for (const auto& signature_def : signature_def_sorted) {
uint64_t signature_def_pair_hash =
FingerprintCat64(Fingerprint64(signature_def.first),
Fingerprint64(SerializeProto(signature_def.second)));
signature_def_hash =
FingerprintCat64(signature_def_hash, signature_def_pair_hash);
SignatureDef signature_def_val = signature_def.second;
TF_ASSIGN_OR_RETURN(
uint64_t signature_def_entry_hash,
HashFields(chunked_message, reader, chunks_info,
SignatureDefFieldTags(), &signature_def_val));
signature_def_hash =
FingerprintCat64(signature_def_hash, signature_def_entry_hash);
}
return signature_def_hash;
}
absl::StatusOr<uint64_t> HashSavedObjectGraph(
::tensorflow::SavedObjectGraph* saved_object_graph,
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info) {
return HashMessage(saved_object_graph, chunked_message, reader, chunks_info,
SavedObjectGraphFieldTags());
}
}
using fingerprinting_utils_internal::HashFields;
using fingerprinting_utils_internal::HashGraphDef;
using fingerprinting_utils_internal::HashSavedObjectGraph;
using fingerprinting_utils_internal::HashSignatureDef;
using fingerprinting_utils_internal::PrunedSavedModel;
using fingerprinting_utils_internal::SerializeProto;
uint64_t HashCheckpointIndexFile(absl::string_view model_dir) {
std::string meta_filename = MetaFilename(io::JoinPath(
model_dir, kSavedModelVariablesDirectory, kSavedModelVariablesFilename));
std::string data;
absl::Status read_status =
ReadFileToString(Env::Default(), meta_filename, &data);
if (read_status.ok()) {
return tensorflow::Fingerprint64(data);
} else {
return 0;
}
}
absl::StatusOr<FingerprintDef> CreateFingerprintDefCpb(
absl::string_view export_dir, std::string cpb_file) {
const int kFingerprintProducer = 2;
TF_ASSIGN_OR_RETURN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
return absl::FailedPreconditionError(
absl::StrCat("Couldn't read ChunkMetadata from chunked proto.\n",
read_metadata.status().ToString()));
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
FingerprintDef fingerprint_def;
SavedModel saved_model;
TF_ASSIGN_OR_RETURN(uint64_t saved_model_hash,
HashFields(chunk_metadata.message(), reader, chunks_info,
{}, &saved_model));
saved_model_hash = FingerprintCat64(
saved_model_hash, Fingerprint64(SerializeProto(saved_model)));
fingerprint_def.set_saved_model_checksum(saved_model_hash);
TF_ASSIGN_OR_RETURN(
saved_model,
PrunedSavedModel(export_dir, reader, chunks_info, chunk_metadata));
TF_ASSIGN_OR_RETURN(
uint64_t graph_def_program_hash,
HashGraphDef(saved_model.mutable_meta_graphs(0)->mutable_graph_def(),
chunk_metadata.message(), reader, chunks_info));
fingerprint_def.set_graph_def_program_hash(graph_def_program_hash);
TF_ASSIGN_OR_RETURN(
uint64_t signature_def_hash,
HashSignatureDef(saved_model.meta_graphs(0).signature_def(),
chunk_metadata.message(), reader, chunks_info));
fingerprint_def.set_signature_def_hash(signature_def_hash);
TF_ASSIGN_OR_RETURN(
uint64_t saved_object_graph_hash,
HashSavedObjectGraph(
saved_model.mutable_meta_graphs(0)->mutable_object_graph_def(),
chunk_metadata.message(), reader, chunks_info));
fingerprint_def.set_saved_object_graph_hash(saved_object_graph_hash);
fingerprint_def.set_checkpoint_hash(HashCheckpointIndexFile(export_dir));
reader.Close();
VersionDef* version = fingerprint_def.mutable_version();
version->set_producer(kFingerprintProducer);
return fingerprint_def;
}
} | #include "tensorflow/cc/saved_model/fingerprinting_utils.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow::saved_model::fingerprinting {
namespace {
using fingerprinting_utils_internal::fieldTagMatches;
using fingerprinting_utils_internal::HashFields;
using fingerprinting_utils_internal::HashGraphDef;
using fingerprinting_utils_internal::HashSavedObjectGraph;
using fingerprinting_utils_internal::HashSignatureDef;
using fingerprinting_utils_internal::PruneChunkedMessage;
using fingerprinting_utils_internal::SerializeProto;
using ::tensorflow::proto_splitter::ChunkedField;
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkInfo;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter::FieldIndex;
using ::tensorflow::proto_splitter_testdata::ManyFields;
using ::tensorflow::protobuf::Message;
using ::tensorflow::protobuf::RepeatedPtrField;
using ::tensorflow::protobuf::TextFormat;
using ::tensorflow::protobuf::io::ArrayInputStream;
using ::tensorflow::protobuf::util::MessageDifferencer;
using tools::proto_splitter::GetChunkMetadata;
using tools::proto_splitter::GetRiegeliReader;
using tsl::testing::IsOkAndHolds;
using tsl::testing::TensorFlowSrcRoot;
absl::Status ParseTextProto(absl::string_view text_proto,
Message* parsed_proto) {
TextFormat::Parser parser;
ArrayInputStream input_stream(text_proto.data(), text_proto.size());
if (parser.Parse(&input_stream, parsed_proto)) {
return absl::OkStatus();
}
parsed_proto->Clear();
return absl::InvalidArgumentError(
absl::StrCat("Could not parse text proto: ", text_proto));
}
absl::StatusOr<RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>>
ExtractFieldTags(absl::string_view chunked_field_text_proto) {
ChunkedField chunked_field;
TF_RETURN_IF_ERROR(ParseTextProto(chunked_field_text_proto, &chunked_field));
return chunked_field.field_tag();
}
TEST(FingerprintingTest, TestFieldTagMatchesInitialSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
field_tags_sub.DeleteSubrange(2, 2);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(2));
}
TEST(FingerprintingTest, TestFieldTagMatchesNoninitialSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
field_tags_sub.DeleteSubrange(0, 2);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(0));
}
TEST(FingerprintingTest, TestFieldTagMatchesIdenticalSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(4));
}
TEST(FingerprintingTest, TestFieldTagMatchesSuperSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
field_tags_sub.Add()->set_field(6);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(4));
}
TEST(FingerprintingTest, TestPruneChunkedMessageSingleTarget) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
FieldIndex field_one_field_tag;
field_one_field_tag.set_field(1);
FieldIndex repeated_field_field_tag;
repeated_field_field_tag.set_field(2);
FieldIndex repeated_field_index_field_tag;
repeated_field_index_field_tag.set_index(1);
RepeatedPtrField<FieldIndex> target_field_tags;
target_field_tags.Add(FieldIndex(field_one_field_tag));
target_field_tags.Add(FieldIndex(repeated_field_field_tag));
target_field_tags.Add(FieldIndex(repeated_field_index_field_tag));
ChunkedMessage pruned_chunked_message;
TF_ASSERT_OK_AND_ASSIGN(
pruned_chunked_message,
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info,
{target_field_tags}));
std::string expected_pruned_chunked_message_text_proto = R"pb(
chunk_index: 0
chunked_fields {
field_tag { field: 1 }
message { chunk_index: 1 }
}
)pb";
ChunkedMessage expected_pruned_chunked_message;
TF_ASSERT_OK(ParseTextProto(expected_pruned_chunked_message_text_proto,
&expected_pruned_chunked_message));
ASSERT_TRUE(MessageDifferencer::Equals(pruned_chunked_message,
expected_pruned_chunked_message));
}
TEST(FingerprintingTest, TestPruneChunkedMessageMultiTarget) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
FieldIndex field_one_field_tag;
field_one_field_tag.set_field(1);
FieldIndex repeated_field_field_tag;
repeated_field_field_tag.set_field(2);
FieldIndex repeated_field_index_field_tag;
repeated_field_index_field_tag.set_index(1);
RepeatedPtrField<FieldIndex> target_one_field_tags;
target_one_field_tags.Add(FieldIndex(field_one_field_tag));
target_one_field_tags.Add(FieldIndex(repeated_field_field_tag));
target_one_field_tags.Add(FieldIndex(repeated_field_index_field_tag));
FieldIndex nested_map_bool_field_tag;
nested_map_bool_field_tag.set_field(7);
FieldIndex nested_map_bool_mapkey_field_tag;
nested_map_bool_mapkey_field_tag.mutable_map_key()->set_boolean(true);
FieldIndex string_field_field_tag;
string_field_field_tag.set_field(3);
RepeatedPtrField<FieldIndex> target_two_field_tags;
target_two_field_tags.Add(FieldIndex(nested_map_bool_field_tag));
target_two_field_tags.Add(FieldIndex(nested_map_bool_mapkey_field_tag));
target_two_field_tags.Add(FieldIndex(string_field_field_tag));
ChunkedMessage pruned_chunked_message;
TF_ASSERT_OK_AND_ASSIGN(
pruned_chunked_message,
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info,
{target_one_field_tags, target_two_field_tags}));
std::string expected_pruned_chunked_message_text_proto = R"pb(
chunk_index: 0
chunked_fields {
field_tag { field: 1 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 7 }
field_tag { map_key { boolean: true } }
message { chunk_index: 2 }
}
)pb";
ChunkedMessage expected_pruned_chunked_message;
TF_ASSERT_OK(ParseTextProto(expected_pruned_chunked_message_text_proto,
&expected_pruned_chunked_message));
ASSERT_TRUE(MessageDifferencer::Equals(pruned_chunked_message,
expected_pruned_chunked_message));
}
TEST(FingerprintingTest, TestPruneChunkedMessageNoTarget) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
ChunkedMessage pruned_chunked_message;
TF_ASSERT_OK_AND_ASSIGN(
pruned_chunked_message,
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info, {}));
std::string expected_pruned_chunked_message_text_proto = R"pb(
chunk_index: 0
)pb";
ChunkedMessage expected_pruned_chunked_message;
TF_ASSERT_OK(ParseTextProto(expected_pruned_chunked_message_text_proto,
&expected_pruned_chunked_message));
ASSERT_TRUE(MessageDifferencer::Equals(pruned_chunked_message,
expected_pruned_chunked_message));
}
TEST(FingerprintingTest, TestSerializeProto) {
std::string many_fields_text_proto = R"pb(
string_field: "abc123"
)pb";
ManyFields many_fields;
TF_ASSERT_OK(ParseTextProto(many_fields_text_proto, &many_fields));
ASSERT_EQ(SerializeProto(many_fields), many_fields.SerializeAsString());
}
TEST(FingerprintingTest, TestHashFieldsV2) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
ManyFields many_fields;
TF_ASSERT_OK_AND_ASSIGN(uint64_t many_fields_hash,
HashFields(chunk_metadata.message(), reader,
chunks_info, {}, &many_fields));
ASSERT_EQ(many_fields_hash, 14850154939410192811U);
}
TEST(FingerprintingTest, TestHashGraphDef) {
std::string cpb_file =
io::JoinPath(TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"split-standard.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
GraphDef graph_def;
EXPECT_THAT(
HashGraphDef(&graph_def, chunk_metadata.message(), reader, chunks_info),
IsOkAndHolds(16782272393894422524U));
}
TEST(FingerprintingTest, TestHashSignatureDef) {
std::string cpb_file =
io::JoinPath(TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"split-standard.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
::tensorflow::protobuf::Map<std::string, SignatureDef> signature_def_map;
SignatureDef signature_def;
EXPECT_THAT(HashSignatureDef(signature_def_map, chunk_metadata.message(),
reader, chunks_info),
IsOkAndHolds(0));
}
TEST(FingerprintingTest, TestHashSavedObjectGraph) {
std::string cpb_file =
io::JoinPath(TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"split-standard.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
SavedObjectGraph saved_object_graph;
EXPECT_THAT(
HashSavedObjectGraph(&saved_object_graph, chunk_metadata.message(),
reader, chunks_info),
IsOkAndHolds(17454850744699451884U));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/fingerprinting_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/fingerprinting_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93707a59-8f29-468f-8a8b-35eca835d93e | cpp | tensorflow/tensorflow | bundle_v2 | tensorflow/cc/saved_model/bundle_v2.cc | tensorflow/cc/saved_model/bundle_v2_test.cc | #include "tensorflow/cc/saved_model/bundle_v2.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/fingerprinting.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/strcat.h"
namespace tensorflow {
namespace {
using strings::StrCat;
constexpr char kCCLoadBundleV2Label[] = "cc_load_bundle_v2";
absl::Status ReadCheckpointObjectGraph(BundleReader* bundle_reader,
TrackableObjectGraph* object_graph) {
Tensor object_graph_tensor;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
bundle_reader->Lookup(kObjectGraphProtoKey, &object_graph_tensor),
"SavedModel checkpoint does not contain object graph.");
if (object_graph_tensor.dtype() != DT_STRING ||
object_graph_tensor.dims() != 0 ||
object_graph_tensor.NumElements() != 1) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
"SavedModel checkpoint object graph was not the correct type.");
}
const tstring* object_graph_string = reinterpret_cast<const tstring*>(
object_graph_tensor.tensor_data().data());
if (!object_graph->ParseFromString(*object_graph_string)) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
"SavedModel checkpoint object graph could not be deserialized.");
}
return absl::OkStatus();
}
}
absl::Status SavedModelV2Bundle::Load(const std::string& export_dir,
SavedModelV2Bundle* const bundle) {
metrics::SavedModelReadApi(kCCLoadBundleV2Label).IncrementBy(1);
SavedModel saved_model_proto;
TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
metrics::SavedModelReadPath().Set(export_dir);
if (saved_model_proto.meta_graphs_size() != 1) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
strings::StrCat(
"SavedModelV2 should have exactly one MetaGraphDef but actually ",
"contains ", saved_model_proto.meta_graphs_size()));
}
bundle->meta_graph_def_ =
std::move(*saved_model_proto.mutable_meta_graphs(0));
if (!port::kLittleEndian) {
TF_RETURN_IF_ERROR(
ByteSwapTensorContentInMetaGraphDef(&(bundle->meta_graph_def_)));
}
TF_RETURN_IF_ERROR(
ReadSavedModelDebugInfoIfPresent(export_dir, &bundle->debug_info_));
const std::string variables_dir =
io::JoinPath(export_dir, kSavedModelVariablesDirectory);
if (!Env::Default()->FileExists(variables_dir).ok()) {
LOG(INFO)
<< "No checkpoint found, assuming this is a program-only SavedModel";
} else {
const std::string variables_prefix =
io::JoinPath(variables_dir, kSavedModelVariablesFilename);
bundle->variable_reader_ =
std::make_unique<BundleReader>(Env::Default(), variables_prefix);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
bundle->variable_reader_->status(),
"Unable to load SavedModel variables checkpoint from ",
variables_prefix);
TF_RETURN_IF_ERROR(ReadCheckpointObjectGraph(
bundle->variable_reader_.get(), &bundle->trackable_object_graph_));
}
auto fingerprint_proto =
saved_model::fingerprinting::ReadSavedModelFingerprint(export_dir);
if (fingerprint_proto.ok()) {
metrics::SavedModelReadFingerprint().Set(
metrics::MakeFingerprintJson(fingerprint_proto.value()));
TF_ASSIGN_OR_RETURN(
std::string path_and_singleprint,
metrics::MakeSavedModelPathAndSingleprint(
export_dir, saved_model::fingerprinting::Singleprint(
fingerprint_proto.value())));
metrics::SavedModelReadPathAndSingleprint().Set(path_and_singleprint);
}
return absl::OkStatus();
}
absl::Status SavedModelV2Bundle::VisitObjectsToRestore(
RestoreObjectsCallback callback) {
if (saved_object_graph().nodes_size() == 0 ||
trackable_object_graph().nodes_size() == 0) {
return absl::OkStatus();
}
const SavedObject* root_saved_object = &saved_object_graph().nodes(0);
const TrackableObjectGraph::TrackableObject* root_trackable_object =
&trackable_object_graph().nodes(0);
absl::flat_hash_set<int> trackable_node_ids;
return RecurseObjectsToRestore(root_saved_object, 0, root_trackable_object,
std::string(), &trackable_node_ids,
std::move(callback));
}
absl::Status SavedModelV2Bundle::RecurseObjectsToRestore(
const SavedObject* saved_object, int saved_object_node_id,
const TrackableObjectGraph::TrackableObject* trackable_object,
std::string object_name, absl::flat_hash_set<int>* seen_trackable_node_ids,
RestoreObjectsCallback callback) {
if (saved_object_node_id != 0 &&
(trackable_object->attributes_size() > 0 ||
trackable_object->slot_variables_size() > 0)) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
callback(saved_object_node_id, *trackable_object), "Unable to restore ",
object_name);
}
for (const auto& trackable_child_ref : trackable_object->children()) {
const auto& local_name = trackable_child_ref.local_name();
std::string child_name;
if (object_name.empty()) {
child_name = local_name;
} else {
child_name = strings::StrCat(object_name, ".", local_name);
}
int trackable_child_node_id = trackable_child_ref.node_id();
if (!seen_trackable_node_ids->insert(trackable_child_node_id).second) {
continue;
}
if (trackable_child_node_id < 0 ||
trackable_child_node_id >= trackable_object_graph().nodes_size()) {
return errors::FailedPrecondition(
strings::StrCat("Illegal trackable child node id for ", child_name));
}
const auto* trackable_child =
&trackable_object_graph().nodes(trackable_child_node_id);
int saved_child_node_id = -1;
const SavedObject* saved_child = nullptr;
for (const auto& saved_child_ref : saved_object->children()) {
if (saved_child_ref.local_name() == local_name) {
saved_child_node_id = saved_child_ref.node_id();
if (saved_child_node_id >= 0 &&
saved_child_node_id < saved_object_graph().nodes_size()) {
saved_child = &saved_object_graph().nodes(saved_child_node_id);
}
break;
}
}
if (!saved_child) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Could not find saved object to restore for ",
child_name));
}
TF_RETURN_IF_ERROR(RecurseObjectsToRestore(
saved_child, saved_child_node_id, trackable_child, child_name,
seen_trackable_node_ids, callback));
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/saved_model/bundle_v2.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "json/json.h"
#include "json/reader.h"
#include "json/value.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr char kTestData[] = "cc/saved_model/testdata";
class BundleV2Test : public ::testing::Test {
protected:
BundleV2Test() {}
void RestoreVarsAndVerify(SavedModelV2Bundle* bundle,
std::vector<std::string> expected_names) {
using RestoredVarType = std::tuple<int, std::string, std::string>;
std::vector<RestoredVarType> restored_vars;
TF_ASSERT_OK(bundle->VisitObjectsToRestore(
[&](int saved_node_id,
const TrackableObjectGraph::TrackableObject& trackable_object)
-> absl::Status {
for (const auto& attr : trackable_object.attributes()) {
if (attr.name() == "VARIABLE_VALUE") {
restored_vars.emplace_back(saved_node_id, attr.full_name(),
attr.checkpoint_key());
}
}
return absl::OkStatus();
}));
for (const auto& expected_name : expected_names) {
EXPECT_EQ(1, std::count_if(restored_vars.begin(), restored_vars.end(),
[&](RestoredVarType t) {
return std::get<1>(t) == expected_name;
}));
}
for (const auto& restored_var : restored_vars) {
const auto& saved_node =
bundle->saved_object_graph().nodes(std::get<0>(restored_var));
EXPECT_EQ(std::get<1>(restored_var), saved_node.variable().name());
Tensor value;
TF_ASSERT_OK(
bundle->variable_reader()->Lookup(std::get<2>(restored_var), &value));
}
}
};
TEST_F(BundleV2Test, LoadsVarsAndArithmeticObjectGraph) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), kTestData, "VarsAndArithmeticObjectGraph");
SavedModelV2Bundle bundle;
TF_ASSERT_OK(SavedModelV2Bundle::Load(export_dir, &bundle));
EXPECT_GT(bundle.trackable_object_graph().nodes_size(), 0);
RestoreVarsAndVerify(&bundle, {"variable_x", "variable_y", "child_variable"});
}
TEST_F(BundleV2Test, LoadsCyclicModule) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), kTestData, "CyclicModule");
SavedModelV2Bundle bundle;
TF_ASSERT_OK(SavedModelV2Bundle::Load(export_dir, &bundle));
EXPECT_GT(bundle.trackable_object_graph().nodes_size(), 0);
RestoreVarsAndVerify(&bundle, {"MyVariable"});
}
TEST_F(BundleV2Test, UpdatesMetrics) {
const std::string kCCLoadBundleV2Label = "cc_load_bundle_v2";
const int read_count = metrics::SavedModelReadCount("2").value();
const int api_count =
metrics::SavedModelReadApi(kCCLoadBundleV2Label).value();
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), kTestData, "VarsAndArithmeticObjectGraph");
SavedModelV2Bundle bundle;
TF_ASSERT_OK(SavedModelV2Bundle::Load(export_dir, &bundle));
EXPECT_EQ(metrics::SavedModelReadCount("2").value(), read_count + 1);
EXPECT_EQ(metrics::SavedModelReadApi(kCCLoadBundleV2Label).value(),
api_count + 1);
EXPECT_EQ(metrics::SavedModelReadPath().value(), export_dir);
Json::Value fingerprint = Json::objectValue;
Json::Reader reader = Json::Reader();
reader.parse(metrics::SavedModelReadFingerprint().value(), fingerprint);
EXPECT_EQ(fingerprint["saved_model_checksum"].asUInt64(),
15788619162413586750ULL);
EXPECT_EQ(fingerprint["graph_def_program_hash"].asUInt64(),
706963557435316516ULL);
EXPECT_EQ(fingerprint["signature_def_hash"].asUInt64(),
5693392539583495303ULL);
EXPECT_EQ(fingerprint["saved_object_graph_hash"].asUInt64(),
12074714563970609759ULL);
EXPECT_EQ(fingerprint["checkpoint_hash"].asUInt64(), 10788359570789890102ULL);
TF_ASSERT_OK_AND_ASSIGN(
auto path_and_singleprint,
metrics::ParseSavedModelPathAndSingleprint(
metrics::SavedModelReadPathAndSingleprint().value()));
auto [path, singleprint] = path_and_singleprint;
EXPECT_TRUE(absl::StrContains(
path, absl::StrCat(kTestData, "/VarsAndArithmeticObjectGraph")));
EXPECT_EQ(singleprint,
"706963557435316516/"
"5693392539583495303/"
"12074714563970609759/"
"10788359570789890102");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/bundle_v2.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/bundle_v2_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
75ef4a16-796c-4902-b0f9-b8a1804aa18c | cpp | tensorflow/tensorflow | fingerprinting | tensorflow/cc/saved_model/fingerprinting.cc | tensorflow/cc/saved_model/fingerprinting_test.cc | #include "tensorflow/cc/saved_model/fingerprinting.h"
#include <cstdint>
#include <string>
#include "absl/container/btree_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/regularization/simple_delete.h"
#include "tensorflow/core/graph/regularization/util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#if !defined(PLATFORM_WINDOWS) && !defined(__APPLE__)
#include "tensorflow/cc/saved_model/fingerprinting_utils.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#endif
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::saved_model::fingerprinting {
namespace {
using ::tensorflow::protobuf::Map;
using ::tensorflow::protobuf::io::CodedOutputStream;
using ::tensorflow::protobuf::io::StringOutputStream;
uint64_t HashCheckpointIndexFile(absl::string_view model_dir) {
std::string meta_filename = MetaFilename(io::JoinPath(
model_dir, kSavedModelVariablesDirectory, kSavedModelVariablesFilename));
std::string data;
absl::Status read_status =
ReadFileToString(Env::Default(), meta_filename, &data);
if (read_status.ok()) {
return tensorflow::Fingerprint64(data);
} else {
LOG(WARNING) << "Failed to read checkpoint file: " << read_status;
return 0;
}
}
uint64_t HashSavedModel(const SavedModel& saved_model) {
std::string saved_model_serialized;
{
StringOutputStream stream(&saved_model_serialized);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
saved_model.SerializeToCodedStream(&output);
}
return tensorflow::Fingerprint64(saved_model_serialized);
}
uint64_t RegularizeAndHashSignatureDefs(
const Map<std::string, SignatureDef>& signature_def_map) {
absl::btree_map<std::string, SignatureDef> sorted_signature_defs;
sorted_signature_defs.insert(signature_def_map.begin(),
signature_def_map.end());
uint64_t result_hash = 0;
for (const auto& item : sorted_signature_defs) {
result_hash =
FingerprintCat64(result_hash, tensorflow::Fingerprint64(item.first));
std::string signature_def_serialized;
{
StringOutputStream stream(&signature_def_serialized);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
item.second.SerializeToCodedStream(&output);
}
result_hash = FingerprintCat64(
result_hash, tensorflow::Fingerprint64(signature_def_serialized));
}
return result_hash;
}
absl::StatusOr<uint64_t> RegularizeAndHashSavedObjectGraph(
const SavedObjectGraph& object_graph_def) {
absl::btree_map<int64_t, std::string> uid_to_function_names;
for (const auto& [name, concrete_function] :
object_graph_def.concrete_functions()) {
TF_ASSIGN_OR_RETURN(int64_t uid, graph_regularization::GetSuffixUID(name));
uid_to_function_names.insert({uid, name});
}
uint64_t result_hash = 0;
for (const auto& [uid, function_name] : uid_to_function_names) {
result_hash = FingerprintCat64(result_hash,
tensorflow::Fingerprint64(absl::StripSuffix(
function_name, std::to_string(uid))));
std::string concrete_function_serialized;
{
StringOutputStream stream(&concrete_function_serialized);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
object_graph_def.concrete_functions()
.at(function_name)
.SerializeToCodedStream(&output);
}
result_hash = FingerprintCat64(
result_hash, tensorflow::Fingerprint64(concrete_function_serialized));
}
return result_hash;
}
absl::StatusOr<FingerprintDef> CreateFingerprintDefPb(
absl::string_view export_dir, std::string pb_file) {
const int kFingerprintProducer = 1;
SavedModel saved_model;
TF_RETURN_IF_ERROR(ReadBinaryProto(Env::Default(), pb_file, &saved_model));
FingerprintDef fingerprint_def;
MetaGraphDef* metagraph = saved_model.mutable_meta_graphs(0);
fingerprint_def.set_saved_model_checksum(HashSavedModel(saved_model));
graph_regularization::SimpleDelete(*metagraph->mutable_graph_def());
fingerprint_def.set_graph_def_program_hash(
graph_regularization::ComputeHash(metagraph->graph_def()));
fingerprint_def.set_signature_def_hash(
RegularizeAndHashSignatureDefs(metagraph->signature_def()));
TF_ASSIGN_OR_RETURN(
uint64_t object_graph_hash,
RegularizeAndHashSavedObjectGraph(metagraph->object_graph_def()));
fingerprint_def.set_saved_object_graph_hash(object_graph_hash);
fingerprint_def.set_checkpoint_hash(HashCheckpointIndexFile(export_dir));
VersionDef* version = fingerprint_def.mutable_version();
version->set_producer(kFingerprintProducer);
return fingerprint_def;
}
}
absl::StatusOr<FingerprintDef> CreateFingerprintDef(
absl::string_view export_dir) {
std::string prefix = io::JoinPath(export_dir, kSavedModelFilenamePrefix);
#if !defined(PLATFORM_WINDOWS) && !defined(__APPLE__)
TF_ASSIGN_OR_RETURN(bool only_contains_pb,
tools::proto_splitter::OnlyContainsPb(prefix));
if (only_contains_pb) {
return CreateFingerprintDefPb(export_dir, absl::StrCat(prefix, ".pb"));
}
return CreateFingerprintDefCpb(export_dir, absl::StrCat(prefix, ".cpb"));
#else
return CreateFingerprintDefPb(export_dir, absl::StrCat(prefix, ".pb"));
#endif
}
absl::StatusOr<FingerprintDef> ReadSavedModelFingerprint(
absl::string_view export_dir) {
const std::string fingerprint_pb_path =
io::JoinPath(export_dir, kFingerprintFilenamePb);
TF_RETURN_IF_ERROR(Env::Default()->FileExists(fingerprint_pb_path));
FingerprintDef fingerprint_proto;
absl::Status result =
ReadBinaryProto(Env::Default(), fingerprint_pb_path, &fingerprint_proto);
if (!result.ok()) return result;
return fingerprint_proto;
}
std::string Singleprint(uint64_t graph_def_program_hash,
uint64_t signature_def_hash,
uint64_t saved_object_graph_hash,
uint64_t checkpoint_hash) {
return std::to_string(graph_def_program_hash) + "/" +
std::to_string(signature_def_hash) + "/" +
std::to_string(saved_object_graph_hash) + "/" +
std::to_string(checkpoint_hash);
}
std::string Singleprint(const FingerprintDef& fingerprint) {
return Singleprint(
fingerprint.graph_def_program_hash(), fingerprint.signature_def_hash(),
fingerprint.saved_object_graph_hash(), fingerprint.checkpoint_hash());
}
absl::StatusOr<std::string> Singleprint(absl::string_view export_dir) {
TF_ASSIGN_OR_RETURN(const FingerprintDef fingerprint_def,
ReadSavedModelFingerprint(export_dir));
return Singleprint(fingerprint_def);
}
} | #include "tensorflow/cc/saved_model/fingerprinting.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::saved_model::fingerprinting {
namespace {
absl::StatusOr<SavedModel> ReadSavedModel(absl::string_view file_dir) {
std::string file_path = io::JoinPath(file_dir, "saved_model.pb");
std::string serialized_saved_model;
auto status =
ReadFileToString(Env::Default(), file_path, &serialized_saved_model);
if (!status.ok()) {
return status;
}
SavedModel saved_model_pb;
saved_model_pb.ParseFromString(serialized_saved_model);
return saved_model_pb;
}
TEST(FingerprintingTest, TestCreateFingerprint) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"VarsAndArithmeticObjectGraph");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
EXPECT_GT(fingerprint_def.saved_model_checksum(), 0);
EXPECT_EQ(fingerprint_def.graph_def_program_hash(), 10127142238652115842U);
EXPECT_EQ(fingerprint_def.signature_def_hash(), 15570736222402453744U);
EXPECT_EQ(fingerprint_def.saved_object_graph_hash(), 3678101440349108924U);
EXPECT_GT(fingerprint_def.checkpoint_hash(), 0);
}
TEST(FingerprintingTest, TestCompareFingerprintForTwoModelSavedTwice) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
const std::string export_dir2 = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert2");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb2,
ReadSavedModel(export_dir2));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def2,
CreateFingerprintDef(export_dir2));
EXPECT_GT(fingerprint_def.saved_model_checksum(), 0);
EXPECT_GT(fingerprint_def2.saved_model_checksum(), 0);
EXPECT_EQ(fingerprint_def.graph_def_program_hash(),
fingerprint_def2.graph_def_program_hash());
EXPECT_EQ(fingerprint_def.signature_def_hash(),
fingerprint_def2.signature_def_hash());
EXPECT_EQ(fingerprint_def.saved_object_graph_hash(),
fingerprint_def2.saved_object_graph_hash());
}
TEST(FingerprintingTest, TestFingerprintComputationDoesNotMutateModel) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def2,
CreateFingerprintDef(export_dir));
EXPECT_EQ(fingerprint_def.saved_model_checksum(),
fingerprint_def2.saved_model_checksum());
}
TEST(FingerprintingTest, TestFingerprintHasVersion) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
EXPECT_EQ(fingerprint_def.version().producer(), 1);
}
TEST(FingerprintingTest, TestHashCheckpointForModelWithNoVariables) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
EXPECT_EQ(fingerprint_def.checkpoint_hash(), 0);
}
TEST(FingerprintingTest, TestReadValidFingerprint) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"VarsAndArithmeticObjectGraph");
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_pb,
ReadSavedModelFingerprint(export_dir));
EXPECT_EQ(fingerprint_pb.saved_model_checksum(), 15788619162413586750u);
}
TEST(FingerprintingTest, TestReadNonexistentFingerprint) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "AssetModule");
EXPECT_EQ(ReadSavedModelFingerprint(export_dir).status().code(),
absl::StatusCode::kNotFound);
}
TEST(FingerprintingTest, TestSingleprint) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"VarsAndArithmeticObjectGraph");
const std::string const_singleprint =
"706963557435316516/5693392539583495303/12074714563970609759/"
"10788359570789890102";
TF_ASSERT_OK_AND_ASSIGN(std::string singleprint, Singleprint(export_dir));
EXPECT_EQ(singleprint, const_singleprint);
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_pb,
ReadSavedModelFingerprint(export_dir));
EXPECT_EQ(Singleprint(fingerprint_pb), const_singleprint);
EXPECT_EQ(Singleprint(fingerprint_pb.graph_def_program_hash(),
fingerprint_pb.signature_def_hash(),
fingerprint_pb.saved_object_graph_hash(),
fingerprint_pb.checkpoint_hash()),
const_singleprint);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/fingerprinting.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/fingerprinting_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f478bb32-0f86-4fc7-a0f3-26f1314617d9 | cpp | tensorflow/tensorflow | scope | tensorflow/cc/framework/scope.cc | tensorflow/cc/framework/scope_test.cc | #include <algorithm>
#include <vector>
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
Scope::Scope(Impl* impl) : impl_(impl) {}
Scope::Scope(const Scope& other) : impl_(new Impl(*other.impl())) {}
Scope::~Scope() {}
Scope& Scope::operator=(const Scope& other) {
impl_.reset(new Impl(*other.impl_));
return *this;
}
namespace {
const char kScopeSeparator[] = "/";
const char kSuffixSeparator[] = "_";
}
Scope::Impl::Impl(Graph* graph, Status* status, NameMap* name_map,
ShapeRefiner* refiner, bool disable_shape_inference)
: graph_(graph),
status_(status),
name_map_(name_map),
refiner_(refiner),
scope_used_(nullptr),
colocation_constraints_(),
disable_shape_inference_(disable_shape_inference) {}
Scope::Impl::Impl(const std::shared_ptr<Graph>& graph,
const std::shared_ptr<Status>& status,
const std::shared_ptr<NameMap>& name_map,
const std::shared_ptr<ShapeRefiner>& refiner)
: graph_(graph),
status_(status),
name_map_(name_map),
refiner_(refiner),
scope_used_(nullptr),
colocation_constraints_(),
disable_shape_inference_(refiner_ == nullptr) {}
Scope Scope::NewRootScope() {
Graph* graph = new Graph(OpRegistry::Global());
ShapeRefiner* refiner =
new ShapeRefiner(graph->versions(), graph->op_registry());
return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner,
false));
}
Scope Scope::DisabledShapeInferenceScope() {
Graph* graph = new Graph(OpRegistry::Global());
ShapeRefiner* refiner =
new ShapeRefiner(graph->versions(), graph->op_registry());
return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner,
true));
}
Scope::Impl::Impl(const Scope& other, Tags::ScopeName, const string& name,
bool copy_names)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(copy_names ? other.impl()->name_map_
: std::shared_ptr<NameMap>(new NameMap)),
refiner_(other.impl()->refiner_),
scope_used_(nullptr),
control_deps_(other.impl()->control_deps_),
name_(name),
op_name_(""),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::OpName, const string& name,
const string& op_name)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(name),
op_name_(op_name),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::ControlDeps,
std::vector<Operation> control_deps, bool clear_control_deps)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(
clear_control_deps
? std::vector<Operation>()
: (control_deps.insert(control_deps.begin(),
other.impl()->control_deps_.begin(),
other.impl()->control_deps_.end()),
control_deps)),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::Device, const string& device)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(device),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::SingleUseScope,
const string& op_name)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(new bool(false)),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(op_name),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::ExitOnError)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(true),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::KernelLabel,
const string& kernel_label)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(kernel_label),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::Colocate,
const Operation& colocate_with_op, bool clear_colocations)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(
clear_colocations
? std::unordered_set<string>()
: other.impl()->GetColocationConstraints(colocate_with_op)),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::AssignedDevice,
const string& assigned_device)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(assigned_device),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::XlaCluster,
const string& xla_cluster)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(xla_cluster),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
std::unordered_set<string> Scope::Impl::GetColocationConstraints(
const Operation& colocate_with_op) const {
std::unordered_set<string> current_constraints(colocation_constraints_);
const AttrSlice attrs = colocate_with_op.node()->attrs();
std::vector<string> node_constraints;
if (TryGetNodeAttr(attrs, kColocationAttrName, &node_constraints)) {
for (const string& entry : node_constraints) {
StringPiece s(entry);
if (absl::ConsumePrefix(&s, kColocationGroupPrefix)) {
current_constraints.emplace(s);
}
}
} else {
current_constraints.insert(colocate_with_op.node()->name());
}
return current_constraints;
}
bool Scope::ok() const { return impl()->status_->ok(); }
Graph* Scope::graph() const { return impl()->graph_.get(); }
std::shared_ptr<Graph> Scope::graph_as_shared_ptr() const {
return impl()->graph_;
}
Status Scope::status() const { return *impl()->status_; }
const std::vector<Operation>& Scope::control_deps() const {
return impl()->control_deps_;
}
void Scope::UpdateStatus(const Status& s) const {
impl()->status_->Update(s);
if (impl()->exit_on_error_ && !ok()) {
LOG(FATAL) << *impl()->status_;
}
}
Status Scope::ToGraphDef(GraphDef* gdef, bool include_debug_info) const {
if (!ok()) {
return *impl()->status_;
}
graph()->ToGraphDef(gdef, true, include_debug_info);
return absl::OkStatus();
}
Status Scope::ToGraph(Graph* g, GraphConstructorOptions opts) const {
if (ok()) {
GraphDef graph_def;
graph()->ToGraphDef(&graph_def);
UpdateStatus(ConvertGraphDefToGraph(opts, std::move(graph_def), g));
}
return *impl()->status_;
}
void Scope::UpdateBuilder(NodeBuilder* builder) const {
std::vector<Node*> control_inputs;
for (const auto& op : impl()->control_deps_) {
control_inputs.push_back(op.node());
}
builder->ControlInputs(control_inputs);
if (!impl()->kernel_label_.empty()) {
builder->Attr("_kernel", impl()->kernel_label_);
}
if (!impl()->colocation_constraints_.empty()) {
std::vector<string> constraints(impl()->colocation_constraints_.begin(),
impl()->colocation_constraints_.end());
std::sort(constraints.begin(), constraints.end());
std::transform(constraints.begin(), constraints.end(), constraints.begin(),
[](const string& s) {
return strings::StrCat(kColocationGroupPrefix, s);
});
builder->Attr(kColocationAttrName, constraints);
}
if (!impl()->device_.empty()) {
builder->Device(impl()->device_);
}
if (!impl()->assigned_device_.empty()) {
builder->AssignedDevice(impl()->assigned_device_);
}
if (!impl()->xla_cluster_.empty()) {
builder->XlaCluster(impl()->xla_cluster_);
}
}
string Scope::Impl::GetUniqueName(const string& prefix,
bool check_single_use) const {
if (check_single_use && single_use_scope()) {
if (*scope_used_) {
*status_ =
errors::AlreadyExists(prefix, " already exists in the current scope");
return "";
}
*scope_used_ = true;
return prefix;
}
auto entry = name_map_->find(prefix);
if (entry == name_map_->end()) {
name_map_->insert({prefix, 0});
return prefix;
}
string unique_name;
do {
unique_name = strings::StrCat(prefix, kSuffixSeparator, ++entry->second);
} while (name_map_->find(unique_name) != name_map_->end());
name_map_->insert({unique_name, 0});
return unique_name;
}
string Scope::Impl::GetNameForOp(const string& default_name) const {
const string unique_name =
GetUniqueName(default_name, true );
const string sep =
name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return strings::StrCat(name_, sep, unique_name);
}
string Scope::GetUniqueNameForOp(const string& default_name) const {
if (impl()->single_use_scope()) {
if (impl()->op_name_.empty() || *impl()->scope_used_) {
*impl()->status_ =
errors::InvalidArgument("Cannot get a unique name in this scope");
return "";
}
*impl()->scope_used_ = true;
return impl()->op_name_;
}
return impl()->op_name_.empty() ? impl()->GetNameForOp(default_name)
: impl()->GetNameForOp(impl()->op_name_);
}
Scope Scope::NewSubScope(const string& child_scope_name) const {
if (child_scope_name.empty()) {
return Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->name_,
true ));
}
const string unique_name =
impl()->GetUniqueName(child_scope_name, false );
const string sep =
impl()->name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return Scope(new Impl(*this, Impl::Tags::ScopeName(),
strings::StrCat(impl()->name_, sep, unique_name),
false ));
}
Scope Scope::WithOpNameImpl(const string& op_name) const {
if (impl()->single_use_scope()) {
UpdateStatus(errors::InvalidArgument("Cannot set op name ", op_name,
" on this scope"));
return *this;
}
return Scope(new Impl(*this, Impl::Tags::OpName(), impl()->name_, op_name));
}
Scope Scope::WithControlDependencies(
const absl::Span<const Operation> control_deps) const {
return Scope(
new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(control_deps.begin(), control_deps.end()),
false));
}
Scope Scope::WithControlDependencies(const Output& control_dep) const {
return Scope(new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(1, control_dep.op()),
false));
}
Scope Scope::WithNoControlDependencies() const {
return Scope(new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(),
true));
}
Scope Scope::WithDevice(const string& device) const {
return Scope(new Impl(*this, Impl::Tags::Device(), device));
}
Scope Scope::WithAssignedDevice(const string& assigned_device) const {
return Scope(new Impl(*this, Impl::Tags::AssignedDevice(), assigned_device));
}
Scope Scope::WithXlaCluster(const string& xla_cluster) const {
return Scope(new Impl(*this, Impl::Tags::XlaCluster(), xla_cluster));
}
Scope Scope::ColocateWith(const Operation& op) const {
return Scope(new Impl(*this, Impl::Tags::Colocate(), op,
false));
}
Scope Scope::ClearColocation() const {
return Scope(new Impl(*this, Impl::Tags::Colocate(), Operation(),
true));
}
Scope Scope::ExitOnError() const {
return Scope(new Impl(*this, Impl::Tags::ExitOnError()));
}
Scope Scope::WithKernelLabel(const string& kernel_label) const {
return Scope(new Impl(*this, Impl::Tags::KernelLabel(), kernel_label));
}
CompositeOpScopes Scope::GetCompositeOpScopes(
const string& composite_op_name) const {
if (impl()->op_name_.empty() && composite_op_name.empty()) {
UpdateStatus(errors::InvalidArgument(
"Cannot create composite op scopes with empty name"));
return {*this, *this};
}
if (!impl()->single_use_scope()) {
Scope child = NewSubScope(impl()->op_name_.empty() ? composite_op_name
: impl()->op_name_);
const string child_op_sep = impl()->name_.empty() ? "" : kSuffixSeparator;
const string child_name =
strings::StrCat(impl()->name_, child_op_sep, child.impl()->name_);
return {child,
Scope(new Impl(child, Impl::Tags::SingleUseScope(), child_name))};
} else {
return {Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->op_name_,
true )),
*this};
}
}
Status Scope::DoShapeInference(Node* node) const {
if (impl_->disable_shape_inference_) return absl::OkStatus();
return impl_->refiner_->AddNode(node);
}
class InternalScope {
public:
static Scope NewScope(Graph* graph, Status* status, ShapeRefiner* refiner) {
Scope::Impl::NameMap* name_map = new Scope::Impl::NameMap;
for (const Node* node : graph->nodes()) {
const string& name = node->name();
(*name_map)[name] = 0;
size_t idx = -1;
while ((idx = name.find(kScopeSeparator, idx + 1)) != string::npos) {
(*name_map)[name.substr(0, idx)] = 0;
}
}
return Scope(new Scope::Impl(
std::shared_ptr<Graph>(graph, [](Graph*) {}),
std::shared_ptr<Status>(status, [](Status*) {}),
std::shared_ptr<Scope::Impl::NameMap>(name_map),
std::shared_ptr<ShapeRefiner>(refiner, [](ShapeRefiner*) {})));
}
};
Scope NewInternalScope(Graph* graph, Status* status, ShapeRefiner* refiner) {
return InternalScope::NewScope(graph, status, refiner);
}
Status CreateOutputWithScope(string op_name,
absl::Span<const ::tensorflow::Input> inputs,
const Scope& scope, Output* output) {
TF_RETURN_IF_ERROR(scope.status());
const auto unique_name = scope.GetUniqueNameForOp(op_name);
auto builder = ::tensorflow::NodeBuilder(unique_name, op_name);
for (const auto& input : inputs) {
TF_RETURN_IF_ERROR(scope.status());
builder = builder.Input(input.node());
}
::tensorflow::Node* ret;
scope.UpdateBuilder(&builder);
TF_RETURN_IF_ERROR(scope.status());
scope.UpdateStatus(builder.Finalize(scope.graph(), &ret));
TF_RETURN_IF_ERROR(scope.status());
*output = Output(ret, 0);
return absl::OkStatus();
}
} | #include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ScopeTest, BasicNames) {
Scope root = Scope::NewRootScope();
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add");
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_1");
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_2");
EXPECT_EQ(root.GetUniqueNameForOp("mul"), "mul");
}
TEST(ScopeTest, OpAndScopeNameCollision) {
Scope root = Scope::NewRootScope();
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo");
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_1");
EXPECT_EQ(root.GetUniqueNameForOp("foo_1"), "foo_1_1");
EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2");
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_3");
EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2_1");
}
TEST(ScopeTest, HierarchicalNames) {
Scope root = Scope::NewRootScope();
Scope child = root.NewSubScope("child");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add_1");
EXPECT_EQ(child.GetUniqueNameForOp("mul"), "child/mul");
Scope child_1 = root.NewSubScope("child");
EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add");
EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add_1");
EXPECT_EQ(child_1.GetUniqueNameForOp("mul"), "child_1/mul");
Scope c_c = root.NewSubScope("c").NewSubScope("c");
EXPECT_EQ(c_c.GetUniqueNameForOp("add"), "c/c/add");
Scope c_1 = root.NewSubScope("c");
Scope c_1_c = c_1.NewSubScope("c");
EXPECT_EQ(c_1_c.GetUniqueNameForOp("add"), "c_1/c/add");
Scope c_1_c_1 = c_1.NewSubScope("c");
EXPECT_EQ(c_1_c_1.GetUniqueNameForOp("add"), "c_1/c_1/add");
EXPECT_EQ(root.NewSubScope("").NewSubScope("").GetUniqueNameForOp("d"), "d");
EXPECT_EQ(root.NewSubScope("").GetUniqueNameForOp("d"), "d_1");
EXPECT_EQ(root.GetUniqueNameForOp("d"), "d_2");
}
TEST(ScopeTest, ScopeAndOpNames) {
Scope root = Scope::NewRootScope();
Scope child = root.NewSubScope("child");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add");
EXPECT_EQ(root.GetUniqueNameForOp("child"), "child_1");
EXPECT_EQ(root.NewSubScope("child").GetUniqueNameForOp("p"), "child_2/p");
}
namespace {
string LastOp(const Scope& scope) { return scope.GetUniqueNameForOp("Last"); }
std::vector<string> AnotherCompositeOp(const Scope& scope) {
auto cop_scopes = scope.GetCompositeOpScopes("another_cop");
const string c1 = cop_scopes.child.GetUniqueNameForOp("c1");
const string c2 = cop_scopes.child.GetUniqueNameForOp("mul");
return {c1, c2, LastOp(cop_scopes.last)};
}
std::vector<string> LinearOp(const Scope& scope) {
auto cop_scopes = scope.GetCompositeOpScopes("linear");
Scope linear = cop_scopes.child;
const string mul_op_name = linear.GetUniqueNameForOp("mul");
const string bias_add_op_name = linear.GetUniqueNameForOp("bias_add");
auto cop_names = AnotherCompositeOp(cop_scopes.last);
return {mul_op_name, bias_add_op_name, cop_names[0], cop_names[1],
cop_names[2]};
}
}
TEST(ScopeTest, CompositeOp) {
Scope root = Scope::NewRootScope();
const auto names1 = LinearOp(root);
EXPECT_EQ(names1[0], "linear/mul");
EXPECT_EQ(names1[1], "linear/bias_add");
EXPECT_EQ(names1[2], "linear/c1");
EXPECT_EQ(names1[3], "linear/mul_1");
EXPECT_EQ(names1[4], "linear");
EXPECT_EQ(root.GetUniqueNameForOp("linear"), "linear_1");
const auto names2 = LinearOp(root);
EXPECT_EQ(names2[0], "linear_2/mul");
EXPECT_EQ(names2[1], "linear_2/bias_add");
EXPECT_EQ(names2[2], "linear_2/c1");
EXPECT_EQ(names2[3], "linear_2/mul_1");
EXPECT_EQ(names2[4], "linear_2");
const auto names3 = LinearOp(root.WithOpName("c"));
EXPECT_EQ(names3[0], "c/mul");
EXPECT_EQ(names3[1], "c/bias_add");
EXPECT_EQ(names3[2], "c/c1");
EXPECT_EQ(names3[3], "c/mul_1");
EXPECT_EQ(names3[4], "c");
}
TEST(ScopeTest, SingleUseScope) {
Scope root = Scope::NewRootScope();
auto cop_scopes = root.GetCompositeOpScopes("cop");
EXPECT_EQ(cop_scopes.last.GetUniqueNameForOp("foo"), "cop");
cop_scopes.last.GetUniqueNameForOp("foo");
EXPECT_FALSE(cop_scopes.last.ok());
}
TEST(ScopeTest, ControlDeps) {
Scope root = Scope::NewRootScope();
auto c1 = Operation();
auto c2 = Operation();
Scope c = root.WithControlDependencies({c1, c2});
EXPECT_EQ(c.control_deps().size(), 2);
Scope c_c = c.WithControlDependencies({Operation()});
EXPECT_EQ(c_c.control_deps().size(), 3);
}
TEST(ScopeTest, CreateOutput) {
Scope root = Scope::NewRootScope();
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output add;
ASSERT_TRUE(
CreateOutputWithScope("Add", {a, a}, root.WithOpName("add"), &add).ok());
EXPECT_EQ(add.node()->name(), "add");
EXPECT_EQ(add.node()->type_string(), "Add");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/scope.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/scope_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6c61abf8-3990-4d88-87c1-76687b47cd1a | cpp | tensorflow/tensorflow | gradient_checker | tensorflow/c/eager/gradient_checker.cc | tensorflow/c/eager/gradient_checker_test.cc | #include "tensorflow/c/eager/gradient_checker.h"
#include <memory>
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/experimental/ops/math_ops.h"
#include "tensorflow/c/tf_tensor.h"
namespace tensorflow {
namespace gradients {
using namespace std;
void Range(vector<int32_t>* data, int32_t start, int32_t end,
int32_t step = 1) {
for (int32_t i = start; i < end; i += step) {
(*data)[i] = i;
}
}
void GetDims(const TF_Tensor* t, int64_t* out_dims) {
int num_dims = TF_NumDims(t);
for (int i = 0; i < num_dims; i++) {
out_dims[i] = TF_Dim(t, i);
}
}
Status RunAndMaybeSum(AbstractContext* ctx, Model forward,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs,
bool use_function) {
AbstractTensorHandle* model_outputs[1];
TF_RETURN_IF_ERROR(
RunModel(forward, ctx, inputs, model_outputs, use_function));
AbstractTensorHandlePtr model_out(model_outputs[0]);
TF_Tensor* model_out_tensor;
TF_RETURN_IF_ERROR(GetValue(model_out.get(), &model_out_tensor));
int num_dims_out = TF_NumDims(model_out_tensor);
TF_DeleteTensor(model_out_tensor);
if (num_dims_out == 0) {
outputs[0] = model_out.release();
return absl::OkStatus();
}
AbstractTensorHandlePtr sum_dims;
{
vector<int32_t> vals(num_dims_out);
int64_t vals_shape[] = {num_dims_out};
Range(&vals, 0, num_dims_out);
AbstractTensorHandle* sum_dims_raw = nullptr;
TF_RETURN_IF_ERROR(TestTensorHandleWithDims<int32_t, TF_INT32>(
ctx, vals.data(), vals_shape, 1, &sum_dims_raw));
sum_dims.reset(sum_dims_raw);
}
TF_RETURN_IF_ERROR(ops::Sum(ctx, model_out.get(), sum_dims.get(), &outputs[0],
false, "sum_output"));
return absl::OkStatus();
}
Status CalcNumericalGrad(AbstractContext* ctx, Model forward,
absl::Span<AbstractTensorHandle* const> inputs,
int input_index, bool use_function,
AbstractTensorHandle** numerical_grad) {
vector<AbstractTensorHandle*> theta_inputs(inputs.size());
for (int i{}; i < inputs.size(); ++i) {
theta_inputs[i] = inputs[i];
}
AbstractTensorHandle* theta =
theta_inputs[input_index];
TF_Tensor* theta_tensor;
TF_RETURN_IF_ERROR(GetValue(theta, &theta_tensor));
int num_elems = TF_TensorElementCount(theta_tensor);
vector<float> theta_data(num_elems);
memcpy(theta_data.data(), TF_TensorData(theta_tensor),
TF_TensorByteSize(theta_tensor));
vector<float> dtheta_approx(num_elems);
int num_dims = TF_NumDims(theta_tensor);
vector<int64_t> theta_dims(num_dims);
GetDims(theta_tensor, theta_dims.data());
vector<float> thetaPlus_data(num_elems);
vector<float> thetaMinus_data(num_elems);
AbstractTensorHandle* f_outputs[1];
for (int i = 0; i < num_elems; i++) {
float epsilon = theta_data[i] == 0 ? 1e-4 : std::abs(theta_data[i] * 1e-4);
AbstractTensorHandlePtr two_eps;
{
AbstractTensorHandle* two_eps_raw = nullptr;
TF_RETURN_IF_ERROR(TestScalarTensorHandle<float, TF_FLOAT>(
ctx, 2 * epsilon, &two_eps_raw));
two_eps.reset(two_eps_raw);
}
memcpy(thetaPlus_data.data(), TF_TensorData(theta_tensor),
TF_TensorByteSize(theta_tensor));
thetaPlus_data[i] += epsilon;
AbstractTensorHandlePtr thetaPlus;
{
AbstractTensorHandle* thetaPlus_raw = nullptr;
TF_RETURN_IF_ERROR(TestTensorHandleWithDims<float, TF_FLOAT>(
ctx, thetaPlus_data.data(), theta_dims.data(), num_dims,
&thetaPlus_raw));
thetaPlus.reset(thetaPlus_raw);
}
memcpy(&thetaMinus_data[0], TF_TensorData(theta_tensor),
TF_TensorByteSize(theta_tensor));
thetaMinus_data[i] -= epsilon;
AbstractTensorHandlePtr thetaMinus;
{
AbstractTensorHandle* thetaMinus_raw = nullptr;
TF_RETURN_IF_ERROR(TestTensorHandleWithDims<float, TF_FLOAT>(
ctx, thetaMinus_data.data(), theta_dims.data(), num_dims,
&thetaMinus_raw));
thetaMinus.reset(thetaMinus_raw);
}
theta_inputs[input_index] = thetaPlus.get();
TF_RETURN_IF_ERROR(
RunAndMaybeSum(ctx, forward, theta_inputs, f_outputs, use_function));
AbstractTensorHandlePtr fPlus(f_outputs[0]);
theta_inputs[input_index] = thetaMinus.get();
TF_RETURN_IF_ERROR(
RunAndMaybeSum(ctx, forward, theta_inputs, f_outputs, use_function));
AbstractTensorHandlePtr fMinus(f_outputs[0]);
TF_RETURN_IF_ERROR(
ops::Sub(ctx, fPlus.get(), fMinus.get(), f_outputs, "sub_top"));
AbstractTensorHandlePtr fDiff(f_outputs[0]);
TF_RETURN_IF_ERROR(
ops::Div(ctx, fDiff.get(), two_eps.get(), f_outputs, "diff_quotient"));
AbstractTensorHandlePtr diff_quotient(f_outputs[0]);
TF_Tensor* grad_tensor;
TF_RETURN_IF_ERROR(GetValue(diff_quotient.get(), &grad_tensor));
float grad_data[1];
memcpy(&grad_data[0], TF_TensorData(grad_tensor),
TF_TensorByteSize(grad_tensor));
TF_DeleteTensor(grad_tensor);
dtheta_approx[i] = grad_data[0];
}
TF_RETURN_IF_ERROR(TestTensorHandleWithDims<float, TF_FLOAT>(
ctx, dtheta_approx.data(), theta_dims.data(), num_dims, numerical_grad));
TF_DeleteTensor(theta_tensor);
return absl::OkStatus();
}
}
} | #include "tensorflow/c/eager/gradient_checker.h"
#include <memory>
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api_unified_experimental.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/experimental/ops/math_ops.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gradients {
namespace internal {
namespace {
using tensorflow::TF_StatusPtr;
void CompareNumericalAndManualGradients(
Model model, AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs, int input_index,
float* expected_grad, int num_grad, bool use_function,
double abs_error = 1e-2) {
Status s;
AbstractTensorHandlePtr numerical_grad;
{
AbstractTensorHandle* numerical_grad_raw;
s = CalcNumericalGrad(ctx, model, inputs, input_index, use_function,
&numerical_grad_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
numerical_grad.reset(numerical_grad_raw);
}
TF_Tensor* numerical_tensor;
s = GetValue(numerical_grad.get(), &numerical_tensor);
ASSERT_EQ(errors::OK, s.code()) << s.message();
auto num_elem_numerical = TF_TensorElementCount(numerical_tensor);
ASSERT_EQ(num_elem_numerical, num_grad);
float* dnumerical = new float[num_elem_numerical]{0};
memcpy(&dnumerical[0], TF_TensorData(numerical_tensor),
TF_TensorByteSize(numerical_tensor));
for (int j = 0; j < num_grad; j++) {
ASSERT_NEAR(dnumerical[j], expected_grad[j], abs_error);
}
delete[] dnumerical;
TF_DeleteTensor(numerical_tensor);
}
Status MatMulModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::MatMul(ctx, inputs[0], inputs[1], &outputs[0],
false,
false, "MatMul");
}
Status MulModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Mul(ctx, inputs[0], inputs[1], &outputs[0], "Mul");
}
class GradientCheckerTest
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
{
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.message();
}
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx_.reset(ctx_raw);
}
enable_tensor_float_32_execution(false);
}
AbstractContextPtr ctx_;
public:
bool UseMlir() const { return strcmp(std::get<0>(GetParam()), "mlir") == 0; }
bool UseFunction() const { return std::get<2>(GetParam()); }
};
TEST_P(GradientCheckerTest, TestMatMul) {
float A_vals[] = {1.0f, 2.0f, 3.0f, 4.0f};
int64_t A_dims[] = {2, 2};
AbstractTensorHandlePtr A;
{
AbstractTensorHandle* A_raw;
Status s = TestTensorHandleWithDims<float, TF_FLOAT>(ctx_.get(), A_vals,
A_dims, 2, &A_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
A.reset(A_raw);
}
float B_vals[] = {.5f, -1.0f, 1.0f, 1.0f};
int64_t B_dims[] = {2, 2};
AbstractTensorHandlePtr B;
{
AbstractTensorHandle* B_raw;
Status s = TestTensorHandleWithDims<float, TF_FLOAT>(ctx_.get(), B_vals,
B_dims, 2, &B_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
B.reset(B_raw);
}
float expected_dA[4] = {-.5f, 2.0f, -.5f, 2.0f};
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndManualGradients(
MatMulModel, ctx_.get(), {A.get(), B.get()}, 0, expected_dA, 4,
UseFunction()));
}
TEST_P(GradientCheckerTest, TestMul) {
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
Status s =
TestScalarTensorHandle<float, TF_FLOAT>(ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
AbstractTensorHandlePtr y;
{
AbstractTensorHandle* y_raw = nullptr;
Status s =
TestScalarTensorHandle<float, TF_FLOAT>(ctx_.get(), 7.0f, &y_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
y.reset(y_raw);
}
float expected_dx[1] = {7.0f};
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndManualGradients(
MulModel, ctx_.get(), {x.get(), y.get()}, 0, expected_dx, 1,
UseFunction()));
}
#ifdef PLATFORM_GOOGLE
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, GradientCheckerTest,
::testing::Combine(::testing::Values("graphdef"),
::testing::Values(false),
::testing::Values(true, false)));
#else
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, GradientCheckerTest,
::testing::Combine(::testing::Values("graphdef"),
::testing::Values(false),
::testing::Values(true, false)));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/gradient_checker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/gradient_checker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5771585-ded8-4367-ad73-ce5eaccac290 | cpp | tensorflow/tensorflow | while_gradients | tensorflow/cc/framework/while_gradients.cc | tensorflow/cc/framework/while_gradients_test.cc | #include "tensorflow/cc/framework/while_gradients.h"
#include <string>
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
namespace tensorflow {
namespace {
using ops::BodyGraphBuilderFn;
using ops::BuildWhileLoop;
using ops::CondGraphBuilderFn;
Output ToOutput(OutputTensor output_tensor) {
return Output(const_cast<Node*>(output_tensor.node), output_tensor.index);
}
std::vector<Output> ToOutputVector(
const std::vector<OutputTensor>& output_tensors) {
const int n = output_tensors.size();
std::vector<Output> result;
result.reserve(n);
for (int i = 0; i < n; ++i) result.push_back(ToOutput(output_tensors[i]));
return result;
}
string BackPropFrameName(const string& forward_frame_name) {
return strings::StrCat(forward_frame_name, "_backprop");
}
Status AddForwardLoopCounter(WhileContext* while_ctx, const Scope& scope,
Output* count) {
Output zero = ops::Const(scope, 0, {});
CondGraphBuilderFn cond_fn = [while_ctx](const Scope& scope,
const std::vector<Output>& inputs,
Output* output) {
*output = ToOutput(while_ctx->cond_output());
return absl::OkStatus();
};
BodyGraphBuilderFn body_fn = [](const Scope& scope,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
DCHECK_EQ(inputs.size(), 1);
outputs->emplace_back(ops::Add(scope, inputs[0], 1));
return scope.status();
};
std::vector<Output> outputs;
TF_RETURN_IF_ERROR(BuildWhileLoop(scope, {zero}, cond_fn, body_fn,
while_ctx->frame_name(), &outputs,
false));
*count = outputs[0];
return absl::OkStatus();
}
Status AddBackPropLoopCounter(WhileContext* while_ctx, const Output& loop_count,
const Scope& scope,
Output* backprop_execution_pred) {
CondGraphBuilderFn cond_fn = [](const Scope& scope,
const std::vector<Output>& inputs,
Output* output) {
DCHECK_EQ(inputs.size(), 1);
*output = ops::Greater(scope, inputs[0], 0);
return scope.status();
};
BodyGraphBuilderFn body_fn = [](const Scope& scope,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
DCHECK_EQ(inputs.size(), 1);
outputs->emplace_back(ops::Subtract(scope, inputs[0], 1));
return scope.status();
};
string frame_name = BackPropFrameName(while_ctx->frame_name());
std::vector<Output> outputs;
TF_RETURN_IF_ERROR(BuildWhileLoop(
scope, {loop_count}, cond_fn, body_fn, frame_name, &outputs,
false, backprop_execution_pred));
return absl::OkStatus();
}
Status AddWhileGradientLoop(WhileContext* while_ctx,
const std::vector<Output>& grad_inputs,
const Output& backprop_execution_pred,
const Scope& parent_scope,
std::vector<Output>* grad_outputs) {
DCHECK_EQ(grad_inputs.size(), while_ctx->body_outputs().size());
DCHECK_EQ(while_ctx->body_inputs().size(), while_ctx->body_outputs().size());
Scope scope = parent_scope.NewSubScope("while");
CondGraphBuilderFn cond_fn = [backprop_execution_pred](
const Scope& scope,
const std::vector<Output>& inputs,
Output* output) {
*output = backprop_execution_pred;
return absl::OkStatus();
};
BodyGraphBuilderFn body_fn = [while_ctx](const Scope& scope,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
std::vector<Output> body_outputs =
ToOutputVector(while_ctx->body_outputs());
std::vector<Output> body_inputs = ToOutputVector(while_ctx->body_inputs());
return AddSymbolicGradients(scope, body_outputs, body_inputs, inputs,
outputs);
};
string frame_name = BackPropFrameName(while_ctx->frame_name());
TF_RETURN_IF_ERROR(BuildWhileLoop(scope, grad_inputs, cond_fn, body_fn,
frame_name, grad_outputs,
false));
return absl::OkStatus();
}
}
Status AddWhileLoopGradient(WhileContext* while_ctx, const Scope& scope,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
Output forward_loop_count;
TF_RETURN_IF_ERROR(AddForwardLoopCounter(
while_ctx, scope.NewSubScope("ForwardLoopCounter"), &forward_loop_count));
Output backprop_counter_cond;
TF_RETURN_IF_ERROR(AddBackPropLoopCounter(
while_ctx, forward_loop_count, scope.NewSubScope("BackPropLoopCounter"),
&backprop_counter_cond));
return AddWhileGradientLoop(while_ctx, grad_inputs, backprop_counter_cond,
scope, grad_outputs);
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class WhileGradientsTest : public ::testing::Test {
protected:
WhileGradientsTest() : scope_(Scope::NewRootScope()) {}
void Init(int num_inputs, DataType dtype = DT_INT32) {
for (int i = 0; i < num_inputs; ++i) {
inputs_.push_back(ops::Placeholder(scope_, dtype));
}
}
void CreateLoop(const ops::CondGraphBuilderFn& cond,
const ops::BodyGraphBuilderFn& body,
const std::vector<Output>* inputs = nullptr) {
if (inputs == nullptr) inputs = &inputs_;
TF_ASSERT_OK(ops::BuildWhileLoop(scope_, *inputs, cond, body, "test_loop",
&outputs_));
}
void CreateBackprop() {
TF_ASSERT_OK(
AddSymbolicGradients(scope_, outputs_, inputs_, &grad_outputs_));
ASSERT_EQ(grad_outputs_.size(), inputs_.size());
}
template <typename T>
void Run(const std::vector<Input::Initializer>& input_values,
const std::vector<T>& expected_grad_values) {
Run<T>(ClientSession(scope_), input_values, expected_grad_values);
}
template <typename T>
void Run(const ClientSession& session,
const std::vector<Input::Initializer>& input_values,
const std::vector<T>& expected_grad_values,
const RunOptions& run_options = RunOptions(),
RunMetadata* run_metadata = nullptr) {
DCHECK_EQ(input_values.size(), inputs_.size());
ClientSession::FeedType feeds;
for (int i = 0; i < inputs_.size(); ++i) {
feeds.emplace(inputs_[i], input_values[i]);
}
std::vector<Operation> run_outputs;
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(run_options, feeds, grad_outputs_, run_outputs,
&out_tensors, run_metadata));
ASSERT_EQ(out_tensors.size(), grad_outputs_.size());
DCHECK_EQ(expected_grad_values.size(), out_tensors.size());
for (int i = 0; i < out_tensors.size(); ++i) {
test::ExpectTensorEqual<T>(
out_tensors[i], test::AsTensor<T>({expected_grad_values[i]}, {}));
}
}
Scope scope_;
std::vector<Output> inputs_;
std::vector<Output> outputs_;
std::vector<Output> grad_outputs_;
};
TEST_F(WhileGradientsTest, Basic) {
Init(1);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(s, {inputs[0], 1}));
return s.status();
});
CreateBackprop();
Run<int>({1}, {1});
Run<int>({11}, {1});
}
TEST_F(WhileGradientsTest, MultipleLoopVars) {
Init(3);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(s, {inputs[0], inputs[1]}));
outputs->push_back(ops::AddN(s, {inputs[1], 1}));
outputs->push_back(inputs[2]);
return s.status();
});
CreateBackprop();
Run<int>({0, 1, 2}, {1, 5, 1});
Run<int>({1, 1, 0}, {1, 5, 1});
Run<int>({0, 0, 0}, {1, 6, 1});
}
TEST_F(WhileGradientsTest, Chaining) {
Init(2, DT_DOUBLE);
std::vector<Output> loop_inputs = {ops::Multiply(scope_, inputs_[0], 2.0),
ops::Multiply(scope_, inputs_[1], 2.0)};
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::LogicalAnd(s, ops::Greater(s, inputs[0], 0.0),
ops::Greater(s, inputs[1], 0.0));
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(s, {inputs[0], -1.0}));
outputs->push_back(inputs[1]);
return s.status();
},
&loop_inputs);
outputs_[0] = ops::Neg(scope_, outputs_[0]);
CreateBackprop();
Run<double>({1.0, 1.0}, {-2.0, 2.0});
Run<double>({0.0, 0.0}, {-2.0, 2.0});
}
TEST_F(WhileGradientsTest, MultipleDevices) {
scope_ = scope_.WithDevice("/cpu:0");
Init(2);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
Scope cpu1_scope = s.WithDevice("/cpu:1");
outputs->push_back(ops::AddN(cpu1_scope, {inputs[0], inputs[1]}));
outputs->push_back(inputs[1]);
return cpu1_scope.status();
});
Scope cpu1_scope = scope_.WithDevice("/cpu:1");
TF_ASSERT_OK(
AddSymbolicGradients(cpu1_scope, outputs_, inputs_, &grad_outputs_));
ASSERT_EQ(grad_outputs_.size(), inputs_.size());
SessionOptions session_options;
(*session_options.config.mutable_device_count())["CPU"] = 2;
RunOptions run_options;
run_options.set_output_partition_graphs(true);
RunMetadata run_metadata;
Run<int>(ClientSession(scope_, session_options), {0, 1}, {1, 11}, run_options,
&run_metadata);
ASSERT_EQ(run_metadata.partition_graphs().size(), 2);
for (const GraphDef& partition_graph : run_metadata.partition_graphs()) {
EXPECT_GE(partition_graph.node().size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/while_gradients.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/while_gradients_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
91e75353-5a71-453d-bbaa-606e96f59b4f | cpp | tensorflow/tensorflow | cc_op_gen | tensorflow/cc/framework/cc_op_gen.cc | tensorflow/cc/framework/cc_op_gen_test.cc | #include "tensorflow/cc/framework/cc_op_gen.h"
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/cc/framework/cc_op_gen_util.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace cc_op {
namespace {
const int kRightMargin = 79;
string GetConstructorDecl(const OpInfo& op_info, StringPiece op_name_prefix,
bool include_attr) {
const string prefix = strings::StrCat(op_name_prefix, op_info.op_name, "(");
string c_decl;
for (int i = 0; i < op_info.arg_types.size(); ++i) {
if (i > 0) strings::StrAppend(&c_decl, ", ");
strings::StrAppend(&c_decl, op_info.arg_types[i], " ",
op_info.arg_names[i]);
}
if (include_attr && op_info.has_optional_attrs) {
strings::StrAppend(&c_decl, ", const ", op_info.op_name, "::Attrs& attrs");
}
strings::StrAppend(&c_decl, ")");
return WordWrap(prefix, c_decl, kRightMargin);
}
void WriteClassDecl(const OpInfo& op_info, WritableFile* h) {
string class_decl = op_info.comment;
strings::StrAppend(&class_decl, "class ", op_info.op_name, " {\n");
strings::StrAppend(&class_decl, " public:\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, op_info.GetOpAttrStruct());
}
strings::StrAppend(&class_decl, " ",
GetConstructorDecl(op_info, "", false),
";\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, " ",
GetConstructorDecl(op_info, "", true),
";\n");
}
if (op_info.output_types.empty()) {
strings::StrAppend(&class_decl,
" operator ::tensorflow::Operation() const { "
"return operation; }\n");
} else if (op_info.output_types.size() == 1) {
if (op_info.is_list_output[0]) {
strings::StrAppend(&class_decl,
" ::tensorflow::Output operator[](size_t index) "
"const { return ",
op_info.output_names[0], "[index]; }\n\n");
} else {
strings::StrAppend(&class_decl,
" operator ::tensorflow::Output() const { return ",
op_info.output_names[0], "; }\n");
strings::StrAppend(&class_decl,
" operator ::tensorflow::Input() const { return ",
op_info.output_names[0], "; }\n");
strings::StrAppend(&class_decl,
" ::tensorflow::Node* node() const { return ",
op_info.output_names[0], ".node(); }\n");
}
}
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, "\n");
for (int i = 0; i < op_info.graph_op_def.attr_size(); ++i) {
const auto& attr(op_info.graph_op_def.attr(i));
const auto& api_def_attr(op_info.api_def.attr(i));
if ((op_info.inferred_input_attrs.find(attr.name()) !=
op_info.inferred_input_attrs.end()) ||
!api_def_attr.has_default_value()) {
continue;
}
const auto entry = AttrTypeName(attr.type());
const auto attr_type_name = entry.first;
const bool use_const = entry.second;
const string camel_case_name = ToCamelCase(api_def_attr.rename_to());
const string suffix =
(camel_case_name == op_info.op_name || camel_case_name == "Attrs")
? "_"
: "";
const string attr_func_def = strings::StrCat(
camel_case_name, suffix, "(", use_const ? "const " : "",
attr_type_name, use_const ? "&" : "");
strings::StrAppend(&class_decl, " static Attrs ", attr_func_def,
" x) {\n");
strings::StrAppend(&class_decl, " return Attrs().", camel_case_name,
suffix, "(x);\n");
strings::StrAppend(&class_decl, " }\n");
}
}
strings::StrAppend(&class_decl, "\n Operation operation;\n");
for (int i = 0; i < op_info.output_types.size(); ++i) {
strings::StrAppend(&class_decl, " ", op_info.output_types[i], " ",
op_info.output_names[i], ";\n");
}
strings::StrAppend(&class_decl, "};\n");
if (!op_info.aliases.empty()) {
for (const auto& alias : op_info.aliases) {
strings::StrAppend(&class_decl, "typedef ", op_info.op_name, " ", alias,
";\n");
}
}
strings::StrAppend(&class_decl, "\n");
TF_CHECK_OK(h->Append(class_decl));
}
void GetOutput(const OpInfo& op_info, string* out) {
const string scope_str = op_info.arg_names[0];
string return_on_error =
strings::StrCat("if (!", scope_str, ".ok()) return;");
strings::StrAppend(out, " this->operation = Operation(ret);\n");
if (op_info.graph_op_def.output_arg_size() == 0) {
strings::StrAppend(out, " return;\n");
return;
}
if (op_info.graph_op_def.output_arg_size() == 1) {
if (op_info.is_list_output[0]) {
strings::StrAppend(out,
" for (int32 i = 0; i < ret->num_outputs(); ++i)\n");
strings::StrAppend(out, " this->", op_info.output_names[0],
".push_back(Output(ret, i));\n");
} else {
strings::StrAppend(out, " this->", op_info.output_names[0],
" = Output(ret, 0);\n");
}
return;
}
strings::StrAppend(out, " ::tensorflow::NameRangeMap _outputs_range;\n");
strings::StrAppend(out,
" ::tensorflow::Status _status_ = "
"::tensorflow::NameRangesForNode(*ret, ret->op_def(), "
"nullptr, &_outputs_range);\n");
strings::StrAppend(out, " if (!_status_.ok()) {\n", " ", scope_str,
".UpdateStatus(_status_);\n", " return;\n");
strings::StrAppend(out, " }\n\n");
for (int i = 0; i < op_info.graph_op_def.output_arg_size(); ++i) {
const string arg_range = strings::StrCat(
"_outputs_range[\"", op_info.graph_op_def.output_arg(i).name(), "\"]");
if (op_info.is_list_output[i]) {
strings::StrAppend(out, " for (int32 i = ", arg_range, ".first; i < ",
arg_range, ".second; ++i)\n");
strings::StrAppend(out, " this->", op_info.output_names[i],
".push_back(Output(ret, i));\n");
} else {
strings::StrAppend(out, " this->", op_info.output_names[i],
" = Output(ret, ", arg_range, ".first);\n");
}
}
}
string GetConstructorBody(const OpInfo& op_info) {
const string scope_str = op_info.arg_names[0];
string body;
string return_on_error =
strings::StrCat("if (!", scope_str, ".ok()) return;");
strings::StrAppend(&body, " ", return_on_error, "\n");
for (int i = 0; i < op_info.graph_op_def.input_arg_size(); ++i) {
const auto& arg(op_info.graph_op_def.input_arg(i));
const auto& api_def_arg(op_info.api_def.in_arg(i));
strings::StrAppend(
&body, " auto _", api_def_arg.rename_to(), " = ::tensorflow::ops::",
ArgIsList(arg) ? "AsNodeOutList" : "AsNodeOut", "(", scope_str, ", ",
AvoidCPPKeywords(api_def_arg.rename_to()), ");\n");
strings::StrAppend(&body, " ", return_on_error, "\n");
}
strings::StrAppend(&body, " ::tensorflow::Node* ret;\n");
strings::StrAppend(&body, " const auto unique_name = ", scope_str,
".GetUniqueNameForOp(\"", op_info.op_name, "\");\n");
strings::StrAppend(
&body, " auto builder = ::tensorflow::NodeBuilder(unique_name, \"",
op_info.graph_op_def.name(), "\")\n");
const string spaces = " ";
for (int i = 0; i < op_info.api_def.in_arg_size(); ++i) {
const auto& arg(op_info.api_def.in_arg(i));
strings::StrAppend(&body, spaces, ".Input(_", arg.rename_to(), ")\n");
}
for (int i = 0; i < op_info.api_def.attr_size(); ++i) {
const auto& graph_attr(op_info.graph_op_def.attr(i));
const auto& api_def_attr(op_info.api_def.attr(i));
if (op_info.inferred_input_attrs.find(api_def_attr.name()) !=
op_info.inferred_input_attrs.end()) {
continue;
}
const string attr_name =
api_def_attr.has_default_value()
? strings::StrCat("attrs.", api_def_attr.rename_to(), "_")
: AvoidCPPKeywords(api_def_attr.rename_to());
strings::StrAppend(&body, spaces, ".Attr(\"", graph_attr.name(), "\", ",
attr_name, ")\n");
}
strings::StrAppend(&body, " ;\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateBuilder(&builder);\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateStatus(builder.Finalize(",
scope_str, ".graph(), &ret));\n");
strings::StrAppend(&body, " ", return_on_error, "\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateStatus(", scope_str,
".DoShapeInference(ret));\n");
GetOutput(op_info, &body);
return body;
}
void WriteClassDef(const OpInfo& op_info, WritableFile* cc) {
string class_def;
strings::StrAppend(
&class_def,
GetConstructorDecl(op_info, strings::StrCat(op_info.op_name, "::"),
true),
" {\n");
strings::StrAppend(&class_def, GetConstructorBody(op_info));
strings::StrAppend(&class_def, "}\n\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(
&class_def,
GetConstructorDecl(op_info, strings::StrCat(op_info.op_name, "::"),
false));
strings::StrAppend(&class_def, "\n : ", op_info.op_name, "(");
int i = 0;
for (; i < op_info.arg_names.size(); ++i) {
if (i > 0) strings::StrAppend(&class_def, ", ");
strings::StrAppend(&class_def, op_info.arg_names[i]);
}
if (i > 0) strings::StrAppend(&class_def, ", ");
strings::StrAppend(&class_def, op_info.op_name, "::Attrs()");
strings::StrAppend(&class_def, ") {}\n\n");
}
TF_CHECK_OK(cc->Append(class_def));
}
void WriteCCOp(const OpDef& graph_op_def, const ApiDef& api_def,
const std::vector<string>& aliases, WritableFile* h,
WritableFile* cc) {
OpInfo op_info(graph_op_def, api_def, aliases);
WriteClassDecl(op_info, h);
WriteClassDef(op_info, cc);
}
void StartFiles(bool internal, const string& dot_h_fname, WritableFile* h,
WritableFile* cc, string* op_header_guard) {
const string header =
R"header(
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
)header";
const string namespace_begin = internal ? R"namespace(
namespace tensorflow {
namespace ops {
namespace internal {
)namespace"
: R"namespace(
namespace tensorflow {
namespace ops {
)namespace";
const string op_header = GetPath(dot_h_fname);
*op_header_guard = ToGuard(op_header);
const string cc_header = strings::StrCat(
R"include(
#include "tensorflow/cc/ops/const_op.h"
)include",
"#include \"", op_header, "\"\n", namespace_begin);
const string filename = GetFilename(dot_h_fname);
const string doxygen = strings::StrCat("
ToTitle(filename), "\n", "
TF_CHECK_OK(h->Append(
strings::StrCat("
"#ifndef ",
*op_header_guard,
"\n"
"#define ",
*op_header_guard, "\n\n")));
TF_CHECK_OK(h->Append(header));
TF_CHECK_OK(h->Append(namespace_begin));
TF_CHECK_OK(h->Append(doxygen));
TF_CHECK_OK(cc->Append(cc_header));
}
void FinishFiles(bool internal, WritableFile* h, WritableFile* cc,
const string& op_header_guard) {
const string footer = internal ? R"footer(}
}
}
)footer"
:
R"footer(
}
}
)footer";
TF_CHECK_OK(h->Append(footer));
TF_CHECK_OK(
h->Append(strings::StrCat("\n#endif ", "
TF_CHECK_OK(cc->Append(footer));
TF_CHECK_OK(cc->Close());
TF_CHECK_OK(h->Close());
}
string MakeInternal(const string& fname) {
auto dot_pos = fname.rfind('.');
if (dot_pos == string::npos) {
return strings::StrCat(fname, "_internal");
} else {
return strings::StrCat(fname.substr(0, dot_pos), "_internal",
fname.substr(dot_pos));
}
}
}
void WriteCCOps(const OpList& ops, const ApiDefMap& api_def_map,
const string& dot_h_fname, const string& dot_cc_fname) {
Env* env = Env::Default();
std::unique_ptr<WritableFile> h = nullptr;
std::unique_ptr<WritableFile> cc = nullptr;
TF_CHECK_OK(env->NewWritableFile(dot_h_fname, &h));
TF_CHECK_OK(env->NewWritableFile(dot_cc_fname, &cc));
string op_header_guard;
StartFiles(false, dot_h_fname, h.get(), cc.get(), &op_header_guard);
std::unique_ptr<WritableFile> internal_h = nullptr;
std::unique_ptr<WritableFile> internal_cc = nullptr;
const string internal_dot_h_fname = MakeInternal(dot_h_fname);
TF_CHECK_OK(env->NewWritableFile(internal_dot_h_fname, &internal_h));
TF_CHECK_OK(env->NewWritableFile(MakeInternal(dot_cc_fname), &internal_cc));
string internal_op_header_guard;
StartFiles(true , internal_dot_h_fname, internal_h.get(),
internal_cc.get(), &internal_op_header_guard);
for (const auto& graph_op_def : ops.op()) {
if (graph_op_def.has_deprecation() &&
graph_op_def.deprecation().version() <= TF_GRAPH_DEF_VERSION) {
continue;
}
if (graph_op_def.name() == "Const") continue;
const auto* api_def = api_def_map.GetApiDef(graph_op_def.name());
std::vector<string> aliases;
if (api_def->visibility() == ApiDef::SKIP) continue;
for (int endpoint_i = 1; endpoint_i < api_def->endpoint_size();
++endpoint_i) {
aliases.push_back(api_def->endpoint(endpoint_i).name());
}
if (api_def->visibility() == ApiDef::HIDDEN) {
WriteCCOp(graph_op_def, *api_def, aliases, internal_h.get(),
internal_cc.get());
continue;
}
WriteCCOp(graph_op_def, *api_def, aliases, h.get(), cc.get());
}
FinishFiles(false, h.get(), cc.get(), op_header_guard);
FinishFiles(true , internal_h.get(), internal_cc.get(),
internal_op_header_guard);
}
}
} | #include "tensorflow/cc/framework/cc_op_gen.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "images"
description: "Images to process."
}
input_arg {
name: "dim"
description: "Description for dim."
type: DT_FLOAT
}
output_arg {
name: "output"
description: "Description for output."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for images"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
default_value {
i: 1
}
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
void ExpectHasSubstr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
void ExpectDoesNotHaveSubstr(StringPiece s, StringPiece expected) {
EXPECT_FALSE(absl::StrContains(s, expected))
<< "'" << s << "' contains '" << expected << "'";
}
void ExpectSubstrOrder(const string& s, const string& before,
const string& after) {
int before_pos = s.find(before);
int after_pos = s.find(after);
ASSERT_NE(std::string::npos, before_pos);
ASSERT_NE(std::string::npos, after_pos);
EXPECT_LT(before_pos, after_pos)
<< before << " is not before " << after << " in " << s;
}
void GenerateCcOpFiles(Env* env, const OpList& ops,
const ApiDefMap& api_def_map, string* h_file_text,
string* internal_h_file_text) {
const string& tmpdir = testing::TmpDir();
const auto h_file_path = io::JoinPath(tmpdir, "test.h");
const auto cc_file_path = io::JoinPath(tmpdir, "test.cc");
const auto internal_h_file_path = io::JoinPath(tmpdir, "test_internal.h");
const auto internal_cc_file_path = io::JoinPath(tmpdir, "test_internal.cc");
cc_op::WriteCCOps(ops, api_def_map, h_file_path, cc_file_path);
TF_ASSERT_OK(ReadFileToString(env, h_file_path, h_file_text));
TF_ASSERT_OK(
ReadFileToString(env, internal_h_file_path, internal_h_file_text));
}
TEST(CcOpGenTest, TestVisibilityChangedToHidden) {
const string api_def = R"(
op {
graph_op_name: "Foo"
visibility: HIDDEN
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string h_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo");
ExpectDoesNotHaveSubstr(internal_h_file_text, "class Foo");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(internal_h_file_text, "class Foo");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo");
}
TEST(CcOpGenTest, TestArgNameChanges) {
const string api_def = R"(
op {
graph_op_name: "Foo"
arg_order: "dim"
arg_order: "images"
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string cc_file_text, h_file_text;
string internal_cc_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectSubstrOrder(h_file_text, "Input images", "Input dim");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectSubstrOrder(h_file_text, "Input dim", "Input images");
}
TEST(CcOpGenTest, TestEndpoints) {
const string api_def = R"(
op {
graph_op_name: "Foo"
endpoint {
name: "Foo1"
}
endpoint {
name: "Foo2"
}
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string cc_file_text, h_file_text;
string internal_cc_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo {");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo1");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo2");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo1");
ExpectHasSubstr(h_file_text, "typedef Foo1 Foo2");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo {");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/cc_op_gen.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/cc_op_gen_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a3462daa-9223-4973-afc8-c1e4444e9482 | cpp | tensorflow/tensorflow | queue_runner | tensorflow/cc/training/queue_runner.cc | tensorflow/cc/training/queue_runner_test.cc | #include "tensorflow/cc/training/queue_runner.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/cc/training/coordinator.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
#include "tensorflow/core/public/session.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
Status QueueRunner::New(const QueueRunnerDef& queue_runner_def,
std::unique_ptr<QueueRunner>* result) {
result->reset(new QueueRunner());
return (*result)->Init(queue_runner_def);
}
Status QueueRunner::New(const QueueRunnerDef& queue_runner_def,
Coordinator* coord,
std::unique_ptr<QueueRunner>* result) {
result->reset(new QueueRunner());
(*result)->coord_ = coord;
return (*result)->Init(queue_runner_def);
}
void QueueRunner::AddErrorCallback(const std::function<void(Status)>& cb) {
mutex_lock l(cb_mu_);
callbacks_.push_back(cb);
}
void QueueRunner::ClearErrorCallbacks() {
mutex_lock l(cb_mu_);
callbacks_.clear();
}
Status QueueRunner::Init(const QueueRunnerDef& queue_runner_def) {
queue_name_ = queue_runner_def.queue_name();
enqueue_op_names_.clear();
enqueue_op_names_.insert(enqueue_op_names_.end(),
queue_runner_def.enqueue_op_name().begin(),
queue_runner_def.enqueue_op_name().end());
size_t op_names_size = enqueue_op_names_.size();
if (op_names_size > kint32max) {
return Status(absl::StatusCode::kInvalidArgument,
"Enqueue ops to run cannot exceed kint32max");
}
runs_ = static_cast<int>(op_names_size);
if (runs_ == 0) {
return Status(absl::StatusCode::kInvalidArgument,
"Empty enqueue ops to run.");
}
close_op_name_ = queue_runner_def.close_op_name();
cancel_op_name_ = queue_runner_def.cancel_op_name();
if (queue_runner_def.queue_closed_exception_types_size() == 0) {
queue_closed_exception_types_.insert(error::OUT_OF_RANGE);
} else {
for (const auto& code : queue_runner_def.queue_closed_exception_types()) {
queue_closed_exception_types_.insert(static_cast<int>(code));
}
}
int nthreads = runs_;
if (coord_) {
nthreads++;
}
thread_pool_.reset(new thread::ThreadPool(
Env::Default(), SanitizeThreadSuffix(queue_name_), nthreads));
return absl::OkStatus();
}
QueueRunner::~QueueRunner() {
Join().IgnoreError();
}
Status QueueRunner::Start(Session* sess) { return Start(sess, 0); }
Status QueueRunner::StartAndCollectCostGraph(Session* sess,
const RunOptions& run_options) {
SetRunArgumentsAndCostGraph(run_options);
return Start(sess, 0);
}
Status QueueRunner::Start(Session* sess, int wait_for) {
counter_.reset(new BlockingCounter(runs_));
for (const string& enqueue_op : enqueue_op_names_) {
thread_pool_->Schedule(
std::bind(&QueueRunner::Run, this, sess, enqueue_op));
}
if (coord_) {
thread_pool_->Schedule(std::bind(&QueueRunner::Stop, this, sess));
}
if (wait_for > 0) {
if (!counter_->WaitFor(std::chrono::milliseconds(wait_for))) {
return Status(absl::StatusCode::kDeadlineExceeded,
"Queues not fed before the timeout");
}
mutex_lock l(mu_);
if (!enqueue_status_.ok()) {
return enqueue_status_;
} else {
return status_;
}
}
return absl::OkStatus();
}
Status QueueRunner::StartAndCollectCostGraph(Session* session, int wait_for_ms,
const RunOptions& run_options) {
SetRunArgumentsAndCostGraph(run_options);
return Start(session, wait_for_ms);
}
void QueueRunner::Stop(Session* sess) {
if (coord_ != nullptr) {
coord_->WaitForStop();
}
if (!cancel_op_name_.empty()) {
UpdateStatus(RealRun(sess, cancel_op_name_, false));
}
stopped_ = true;
}
Status QueueRunner::Join() {
thread_pool_.reset();
mutex_lock l(mu_);
return status_;
}
void QueueRunner::UpdateStatus(const Status& status) {
{
mutex_lock l(mu_);
if (!status_.ok() || status.ok() || IsQueueClosed(status)) {
return;
}
status_ = status;
}
if (coord_) {
coord_->ReportStatus(status);
}
mutex_lock l(cb_mu_);
for (auto& cb : callbacks_) {
cb(status);
}
}
void QueueRunner::Run(Session* sess, const string& enqueue_op) {
bool first_iteration = true;
Status status;
while (status.ok()) {
if (coord_ && coord_->ShouldStop()) {
break;
}
status = RealRun(sess, enqueue_op, true);
if (first_iteration) {
if (!status.ok()) {
mutex_lock l(mu_);
enqueue_status_ = status;
}
counter_->DecrementCount();
first_iteration = false;
}
}
bool last_run = false;
{
mutex_lock l(mu_);
runs_--;
last_run = (runs_ == 0);
}
if (IsQueueClosed(status) && (!coord_ || !coord_->ShouldStop())) {
if (last_run && !close_op_name_.empty()) {
UpdateStatus(RealRun(sess, close_op_name_, false));
}
} else if (!status.ok()) {
LOG(ERROR) << "Queue runner thread got a failure status: " << status;
UpdateStatus(status);
if (coord_) {
coord_->RequestStop().IgnoreError();
}
}
}
Status QueueRunner::GetStatus() {
mutex_lock l(mu_);
return status_;
}
Status QueueRunner::ExportCostGraph(CostGraphDef* cost_graph) const {
if (!cg_mu_) {
return Status(absl::StatusCode::kFailedPrecondition,
"This QueueRunner doesn't collect a cost graph.");
}
mutex_lock l(*cg_mu_);
cost_graph->MergeFrom(*cost_graph_);
return absl::OkStatus();
}
void QueueRunner::SetRunArgumentsAndCostGraph(const RunOptions& run_options) {
cg_mu_.reset(new mutex());
{
mutex_lock l(*cg_mu_);
cost_graph_.reset(new CostGraphDef());
}
run_options_ = run_options;
}
Status QueueRunner::RealRun(Session* sess, const string& op,
bool update_costs) {
Status s;
if (update_costs && cg_mu_) {
RunMetadata metadata;
s = sess->Run(run_options_, {}, {}, {op}, nullptr, &metadata);
mutex_lock l(*cg_mu_);
cost_graph_->Swap(metadata.mutable_cost_graph());
} else {
s = sess->Run({}, {}, {op}, nullptr);
}
return s;
}
} | #include "tensorflow/cc/training/queue_runner.h"
#include <string>
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/data_flow_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/random_ops.h"
#include "tensorflow/cc/ops/state_ops.h"
#include "tensorflow/cc/training/coordinator.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/status.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using error::Code;
using ops::Assign;
using ops::Const;
using ops::CountUpTo;
using ops::FIFOQueue;
using ops::QueueClose;
using ops::QueueDequeue;
using ops::QueueEnqueue;
using ops::RandomNormal;
using ops::Square;
using ops::Variable;
constexpr char kAssignOpName[] = "assign";
constexpr char kCancelOp0[] = "cancel0";
constexpr char kCancelOp1[] = "cancel1";
constexpr char kCloseOp0[] = "close0";
constexpr char kCloseOp1[] = "close1";
constexpr char kCountUpToOpName[] = "count";
constexpr char kDequeueOp0[] = "dequeue0";
constexpr char kDequeueOp1[] = "dequeue1";
constexpr char kEnqueueOp0[] = "enqueue0";
constexpr char kEnqueueOp1[] = "enqueue1";
constexpr char kIllegalOpName1[] = "would fail";
constexpr char kIllegalOpName2[] = "fail again";
constexpr char kQueueName[] = "unit_test";
constexpr char kQueueName0[] = "q0";
constexpr char kQueueName1[] = "q1";
constexpr char kSquareOpName[] = "square";
constexpr char kVarOpName[] = "var";
GraphDef BuildSimpleGraph() {
Scope root = Scope::NewRootScope();
auto init_value = Const(root, 0);
auto var = Variable(root.WithOpName(kVarOpName), TensorShape({}),
DataType::DT_INT32);
auto assign = Assign(root.WithOpName(kAssignOpName), var, init_value);
auto count = CountUpTo(root.WithOpName(kCountUpToOpName), var, 10);
Square(root.WithOpName(kSquareOpName), var);
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
return graph_def;
}
QueueRunnerDef BuildQueueRunnerDef(
const std::string& queue_name, const std::vector<std::string>& enqueue_ops,
const std::string& close_op, const std::string& cancel_op,
const std::vector<Code>& queue_closed_error_codes) {
QueueRunnerDef queue_runner_def;
*queue_runner_def.mutable_queue_name() = queue_name;
for (const std::string& enqueue_op : enqueue_ops) {
*queue_runner_def.mutable_enqueue_op_name()->Add() = enqueue_op;
}
*queue_runner_def.mutable_close_op_name() = close_op;
*queue_runner_def.mutable_cancel_op_name() = cancel_op;
for (const auto& error_code : queue_closed_error_codes) {
*queue_runner_def.mutable_queue_closed_exception_types()->Add() =
error_code;
}
return queue_runner_def;
}
std::unique_ptr<Session> BuildSessionAndInitVariable(
const GraphDef& graph_def) {
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
TF_CHECK_OK(session->Run({}, {}, {kAssignOpName}, nullptr));
return session;
}
TEST(QueueRunnerTest, BasicTest) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kCountUpToOpName}, kSquareOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(qr->Join());
std::vector<Tensor> outputs;
TF_EXPECT_OK(session->Run({}, {kSquareOpName}, {}, &outputs));
int square_value = *outputs[0].scalar<int>().data();
EXPECT_EQ(square_value, 100);
}
TEST(QueueRunnerTest, QueueClosedCode) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kCountUpToOpName, kCountUpToOpName}, kSquareOpName, "",
{Code::OUT_OF_RANGE, Code::CANCELLED});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_EXPECT_OK(qr->Start(session.get()));
TF_EXPECT_OK(qr->Join());
std::vector<Tensor> outputs;
TF_EXPECT_OK(session->Run({}, {kSquareOpName}, {}, &outputs));
int square_value = *outputs[0].scalar<int>().data();
EXPECT_EQ(square_value, 100);
}
TEST(QueueRunnerTest, QueueCloseFails) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {kCountUpToOpName}, kIllegalOpName1, "",
{Code::OUT_OF_RANGE});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_EXPECT_OK(qr->Start(session.get()));
auto status = qr->Join();
EXPECT_EQ(status.code(), Code::NOT_FOUND) << status;
}
TEST(QueueRunnerTest, CatchErrorInJoin) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kIllegalOpName1, kIllegalOpName2}, kCountUpToOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_EXPECT_OK(qr->Start(session.get()));
EXPECT_EQ(qr->Join().code(), Code::NOT_FOUND);
}
GraphDef BuildDoubleQueueGraph() {
Scope root = Scope::NewRootScope();
auto q0 = FIFOQueue(root.WithOpName(kQueueName0), {DataType::DT_INT32});
auto ten = Const(root, 10);
auto enqueue0 = QueueEnqueue(root.WithOpName(kEnqueueOp0), q0, {ten});
auto close0 = QueueClose(root.WithOpName(kCloseOp0), q0);
auto cancel0 = QueueClose(root.WithOpName(kCancelOp0), q0,
QueueClose::CancelPendingEnqueues(true));
auto q1 = FIFOQueue(root.WithOpName(kQueueName1), {DataType::DT_INT32},
FIFOQueue::Capacity(3));
auto dequeue0 =
QueueDequeue(root.WithOpName(kDequeueOp0), q0, {DataType::DT_INT32});
auto enqueue1 = QueueEnqueue(root.WithOpName(kEnqueueOp1), q1, {dequeue0[0]});
auto dequeue1 =
QueueDequeue(root.WithOpName(kDequeueOp1), q1, {DataType::DT_INT32});
auto close1 = QueueClose(root.WithOpName(kCloseOp1), q1);
auto cancel1 = QueueClose(root.WithOpName(kCancelOp1), q1,
QueueClose::CancelPendingEnqueues(true));
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
return graph_def;
}
TEST(QueueRunnerTest, RealEnqueueDequeue) {
auto graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {kEnqueueOp1}, kCloseOp1, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(session->Run({}, {}, {kEnqueueOp0}, nullptr));
TF_EXPECT_OK(session->Run({}, {}, {kEnqueueOp0}, nullptr));
TF_EXPECT_OK(session->Run({}, {}, {kCloseOp0}, nullptr));
TF_EXPECT_OK(qr->Join());
std::vector<Tensor> dq1;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq1));
EXPECT_EQ(*dq1[0].scalar<int>().data(), 10);
std::vector<Tensor> dq2;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq2));
EXPECT_EQ(*dq2[0].scalar<int>().data(), 10);
EXPECT_EQ(session->Run({}, {kDequeueOp1}, {}, nullptr).code(),
Code::OUT_OF_RANGE);
}
void JoinThread(QueueRunner* queue_runner, bool* join_succeeded,
Notification* join_done) {
EXPECT_EQ(queue_runner->Join().code(), Code::CANCELLED);
*join_succeeded = true;
join_done->Notify();
}
TEST(QueueRunnerTest, SessionCloseCancelPendingEnqueue) {
auto graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName1, {kEnqueueOp1}, kCloseOp1, kCancelOp1, {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(session->Run({}, {}, {kEnqueueOp0}, nullptr));
std::vector<Tensor> dq1;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq1));
EXPECT_EQ(*dq1[0].scalar<int>().data(), 10);
bool join_succeeded = false;
Notification join_done;
Env::Default()->SchedClosure(
std::bind(&JoinThread, qr.get(), &join_succeeded, &join_done));
Env::Default()->SleepForMicroseconds(10000000);
EXPECT_EQ(join_succeeded, false);
TF_EXPECT_OK(session->Close());
join_done.WaitForNotification();
EXPECT_EQ(join_succeeded, true);
}
TEST(QueueRunnerTest, EmptyEnqueueOps) {
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {}, kCountUpToOpName, "", {});
std::unique_ptr<QueueRunner> qr;
EXPECT_EQ(QueueRunner::New(queue_runner_def, &qr).code(),
Code::INVALID_ARGUMENT);
}
TEST(QueueRunnerTest, StartTimeout) {
GraphDef graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName1, {kEnqueueOp1}, kCloseOp1, kCancelOp1, {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
EXPECT_EQ(qr->Start(session.get(), 1).code(), Code::DEADLINE_EXCEEDED);
TF_EXPECT_OK(session->Close());
}
TEST(QueueRunnerTest, TestCoordinatorStop) {
auto graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner0 =
BuildQueueRunnerDef(kQueueName0, {kEnqueueOp0}, kCloseOp0, kCancelOp0,
{Code::OUT_OF_RANGE, Code::CANCELLED});
QueueRunnerDef queue_runner1 =
BuildQueueRunnerDef(kQueueName1, {kEnqueueOp1}, kCloseOp1, kCancelOp1,
{Code::OUT_OF_RANGE, Code::CANCELLED});
Coordinator coord;
std::unique_ptr<QueueRunner> qr0;
TF_EXPECT_OK(QueueRunner::New(queue_runner0, &coord, &qr0));
TF_CHECK_OK(qr0->Start(session.get()));
std::unique_ptr<QueueRunner> qr1;
TF_EXPECT_OK(QueueRunner::New(queue_runner1, &coord, &qr1));
TF_CHECK_OK(qr1->Start(session.get()));
TF_EXPECT_OK(coord.RegisterRunner(std::move(qr0)));
TF_EXPECT_OK(coord.RegisterRunner(std::move(qr1)));
std::vector<Tensor> dq;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq));
EXPECT_EQ(*dq[0].scalar<int>().data(), 10);
TF_EXPECT_OK(coord.RequestStop());
TF_EXPECT_OK(coord.Join());
}
TEST(QueueRunnerTest, CallbackCalledOnError) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kIllegalOpName1, kIllegalOpName2}, kCountUpToOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
bool error_caught = false;
qr->AddErrorCallback([&error_caught](const Status&) { error_caught = true; });
TF_EXPECT_OK(qr->Start(session.get()));
EXPECT_FALSE(qr->Join().ok());
EXPECT_TRUE(error_caught);
}
TEST(QueueRunnerTest, RunMetaDataTest) {
Scope root = Scope::NewRootScope();
auto q0 = FIFOQueue(root.WithOpName(kQueueName), {DataType::DT_FLOAT});
Output rnd = RandomNormal(root.WithOpName("rnd"), {1, 1}, DataType::DT_FLOAT);
Output square = Square(root.WithOpName(kSquareOpName), rnd);
auto enqueue0 = QueueEnqueue(root.WithOpName(kEnqueueOp0), q0, {square});
auto close0 = QueueClose(root.WithOpName(kCloseOp0), q0);
auto cancel0 = QueueClose(root.WithOpName(kCancelOp0), q0,
QueueClose::CancelPendingEnqueues(true));
auto dequeue0 =
QueueDequeue(root.WithOpName(kDequeueOp0), q0, {DataType::DT_FLOAT});
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
for (auto& node : *graph_def.mutable_node()) {
node.set_device("/cpu:0");
}
SessionOptions sess_options;
sess_options.config.mutable_graph_options()->set_build_cost_model(1);
std::unique_ptr<Session> session(NewSession(sess_options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {kEnqueueOp0}, kCloseOp0, kCancelOp0, {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
RunOptions run_options;
TF_CHECK_OK(qr->StartAndCollectCostGraph(session.get(), run_options));
std::vector<Tensor> dq0;
TF_EXPECT_OK(session->Run({}, {kDequeueOp0}, {}, &dq0));
TF_EXPECT_OK(session->Run({}, {kDequeueOp0}, {}, &dq0));
CostGraphDef cost_graph;
TF_CHECK_OK(qr->ExportCostGraph(&cost_graph));
EXPECT_TRUE(cost_graph.node_size() > 0);
qr->Stop(session.get());
}
TEST(QueueRunnerTest, NoRunMetaDataTest) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kCountUpToOpName}, kSquareOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(qr->Join());
CostGraphDef cost_graph;
EXPECT_EQ(qr->ExportCostGraph(&cost_graph).code(),
error::FAILED_PRECONDITION);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/training/queue_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/training/queue_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bdce681d-16ba-4605-99c8-21cd2cd7c581 | cpp | tensorflow/tensorflow | coordinator | tensorflow/cc/training/coordinator.cc | tensorflow/cc/training/coordinator_test.cc | #include "tensorflow/cc/training/coordinator.h"
#include "absl/status/status.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
Coordinator::Coordinator() : Coordinator(std::vector<error::Code>()) {}
Coordinator::Coordinator(const std::vector<error::Code>& clean_stop_errors)
: should_stop_(false) {
if (clean_stop_errors.empty()) {
clean_stop_errors_.insert(error::OUT_OF_RANGE);
} else {
for (const auto& code : clean_stop_errors) {
clean_stop_errors_.insert(static_cast<int>(code));
}
}
}
Coordinator::~Coordinator() {
RequestStop().IgnoreError();
Join().IgnoreError();
}
Status Coordinator::RegisterRunner(std::unique_ptr<RunnerInterface> runner) {
{
mutex_lock l(mu_);
if (should_stop_) {
return Status(absl::StatusCode::kFailedPrecondition,
"The coordinator has been stopped.");
}
}
mutex_lock l(runners_lock_);
runners_.push_back(std::move(runner));
return absl::OkStatus();
}
bool Coordinator::AllRunnersStopped() {
mutex_lock l(runners_lock_);
for (const auto& runner : runners_) {
if (runner->IsRunning()) {
return false;
}
}
return true;
}
Status Coordinator::RequestStop() {
mutex_lock l(mu_);
if (should_stop_) {
return Status(absl::StatusCode::kFailedPrecondition,
"The Coordinator is not running.");
}
should_stop_ = true;
wait_for_stop_.notify_all();
return absl::OkStatus();
}
bool Coordinator::ShouldStop() {
mutex_lock l(mu_);
return should_stop_;
}
Status Coordinator::Join() {
{
mutex_lock l(mu_);
if (!should_stop_) {
return Status(absl::StatusCode::kFailedPrecondition,
"Joining coordinator without requesting to stop.");
}
}
{
mutex_lock l(runners_lock_);
for (const auto& t : runners_) {
ReportStatus(t->Join());
}
runners_.clear();
}
return GetStatus();
}
void Coordinator::ReportStatus(const Status& status) {
mutex_lock l(status_lock_);
if (status.ok() || !status_.ok() ||
clean_stop_errors_.count(static_cast<int>(status.code())) > 0) {
return;
}
status_ = status;
}
Status Coordinator::GetStatus() {
mutex_lock l(status_lock_);
return status_;
}
void Coordinator::WaitForStop() {
mutex_lock l(mu_);
while (!should_stop_) {
wait_for_stop_.wait(l);
}
}
Status Coordinator::ExportCostGraph(CostGraphDef* cost_graph) const {
mutex_lock l(runners_lock_);
for (auto& t : runners_) {
Status s = t->ExportCostGraph(cost_graph);
if (!s.ok()) {
return s;
}
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/training/coordinator.h"
#include "absl/status/status.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using error::Code;
void WaitForStopThread(Coordinator* coord, Notification* about_to_wait,
Notification* done) {
about_to_wait->Notify();
coord->WaitForStop();
done->Notify();
}
TEST(CoordinatorTest, TestStopAndWaitOnStop) {
Coordinator coord;
EXPECT_EQ(coord.ShouldStop(), false);
Notification about_to_wait;
Notification done;
Env::Default()->SchedClosure(
std::bind(&WaitForStopThread, &coord, &about_to_wait, &done));
about_to_wait.WaitForNotification();
Env::Default()->SleepForMicroseconds(1000 * 1000);
EXPECT_FALSE(done.HasBeenNotified());
TF_EXPECT_OK(coord.RequestStop());
done.WaitForNotification();
EXPECT_TRUE(coord.ShouldStop());
}
class MockQueueRunner : public RunnerInterface {
public:
explicit MockQueueRunner(Coordinator* coord) {
coord_ = coord;
join_counter_ = nullptr;
thread_pool_.reset(new thread::ThreadPool(Env::Default(), "test-pool", 10));
stopped_ = false;
}
MockQueueRunner(Coordinator* coord, int* join_counter)
: MockQueueRunner(coord) {
join_counter_ = join_counter;
}
void StartCounting(std::atomic<int>* counter, int until,
Notification* start = nullptr) {
thread_pool_->Schedule(
std::bind(&MockQueueRunner::CountThread, this, counter, until, start));
}
void StartSettingStatus(const Status& status, BlockingCounter* counter,
Notification* start) {
thread_pool_->Schedule(std::bind(&MockQueueRunner::SetStatusThread, this,
status, counter, start));
}
Status Join() override {
if (join_counter_ != nullptr) {
(*join_counter_)++;
}
thread_pool_.reset();
return status_;
}
Status GetStatus() { return status_; }
void SetStatus(const Status& status) { status_ = status; }
bool IsRunning() const override { return !stopped_; };
void Stop() { stopped_ = true; }
private:
void CountThread(std::atomic<int>* counter, int until, Notification* start) {
if (start != nullptr) start->WaitForNotification();
while (!coord_->ShouldStop() && counter->load() < until) {
(*counter)++;
Env::Default()->SleepForMicroseconds(10 * 1000);
}
coord_->RequestStop().IgnoreError();
}
void SetStatusThread(const Status& status, BlockingCounter* counter,
Notification* start) {
start->WaitForNotification();
SetStatus(status);
counter->DecrementCount();
}
std::unique_ptr<thread::ThreadPool> thread_pool_;
Status status_;
Coordinator* coord_;
int* join_counter_;
bool stopped_;
};
TEST(CoordinatorTest, TestRealStop) {
std::atomic<int> counter(0);
Coordinator coord;
std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord));
qr1->StartCounting(&counter, 100);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord));
qr2->StartCounting(&counter, 100);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
while (counter.load() == 0)
;
TF_EXPECT_OK(coord.RequestStop());
int temp_counter = counter.load();
Env::Default()->SleepForMicroseconds(1000 * 1000);
EXPECT_EQ(temp_counter, counter.load());
TF_EXPECT_OK(coord.Join());
}
TEST(CoordinatorTest, TestRequestStop) {
Coordinator coord;
std::atomic<int> counter(0);
Notification start;
std::unique_ptr<MockQueueRunner> qr;
for (int i = 0; i < 10; i++) {
qr.reset(new MockQueueRunner(&coord));
qr->StartCounting(&counter, 10, &start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr)));
}
start.Notify();
coord.WaitForStop();
EXPECT_EQ(coord.ShouldStop(), true);
EXPECT_EQ(counter.load(), 10);
TF_EXPECT_OK(coord.Join());
}
TEST(CoordinatorTest, TestJoin) {
Coordinator coord;
int join_counter = 0;
std::unique_ptr<MockQueueRunner> qr1(
new MockQueueRunner(&coord, &join_counter));
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(
new MockQueueRunner(&coord, &join_counter));
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
TF_EXPECT_OK(coord.RequestStop());
TF_EXPECT_OK(coord.Join());
EXPECT_EQ(join_counter, 2);
}
TEST(CoordinatorTest, StatusReporting) {
Coordinator coord({Code::CANCELLED, Code::OUT_OF_RANGE});
Notification start;
BlockingCounter counter(3);
std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord));
qr1->StartSettingStatus(Status(absl::StatusCode::kCancelled, ""), &counter,
&start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord));
qr2->StartSettingStatus(Status(absl::StatusCode::kInvalidArgument, ""),
&counter, &start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
std::unique_ptr<MockQueueRunner> qr3(new MockQueueRunner(&coord));
qr3->StartSettingStatus(Status(absl::StatusCode::kOutOfRange, ""), &counter,
&start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr3)));
start.Notify();
counter.Wait();
TF_EXPECT_OK(coord.RequestStop());
EXPECT_EQ(coord.Join().code(), absl::StatusCode::kInvalidArgument);
}
TEST(CoordinatorTest, JoinWithoutStop) {
Coordinator coord;
std::unique_ptr<MockQueueRunner> qr(new MockQueueRunner(&coord));
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr)));
EXPECT_EQ(coord.Join().code(), Code::FAILED_PRECONDITION);
}
TEST(CoordinatorTest, AllRunnersStopped) {
Coordinator coord;
MockQueueRunner* qr = new MockQueueRunner(&coord);
TF_ASSERT_OK(coord.RegisterRunner(std::unique_ptr<RunnerInterface>(qr)));
EXPECT_FALSE(coord.AllRunnersStopped());
qr->Stop();
EXPECT_TRUE(coord.AllRunnersStopped());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/training/coordinator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/training/coordinator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
711898fb-a9b8-4620-bf51-767c85a01c3c | cpp | tensorflow/tensorflow | const_op | tensorflow/compiler/tf2xla/kernels/const_op.cc | tensorflow/cc/ops/const_op_test.cc | #include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace {
template <typename DstT, typename SrcT>
DstT CastTo(SrcT src) {
return static_cast<DstT>(src);
}
template <typename DstT,
typename std::enable_if<std::is_same<DstT, Eigen::half>::value ||
std::is_same<DstT, bfloat16>::value>::type* =
nullptr>
DstT CastTo(int32_t src) {
return absl::bit_cast<DstT>(static_cast<uint16>(src));
}
xla::XlaOp GetScalarConst(const TensorProto& proto, xla::XlaBuilder* b) {
if (!proto.tensor_content().empty()) return xla::XlaOp();
TensorShape shape(proto.tensor_shape());
if (shape.num_elements() > 1) {
switch (proto.dtype()) {
#define HANDLE_SPLAT(DTYPE, field_name, xla_type) \
case DTYPE: \
if (proto.field_name##_val_size() == 0) { \
return xla::ConstantR0(b, CastTo<xla_type>(0)); \
} else if (proto.field_name##_val_size() == 1) { \
return xla::ConstantR0(b, CastTo<xla_type>(proto.field_name##_val(0))); \
} \
break;
HANDLE_SPLAT(DT_BOOL, bool, bool);
HANDLE_SPLAT(DT_INT8, int, int8_t);
HANDLE_SPLAT(DT_INT16, int, int16_t);
HANDLE_SPLAT(DT_INT32, int, int32_t);
HANDLE_SPLAT(DT_INT64, int64, int64_t);
HANDLE_SPLAT(DT_UINT8, int, uint8_t);
HANDLE_SPLAT(DT_UINT16, int, uint16_t);
HANDLE_SPLAT(DT_UINT32, uint32, uint32_t);
HANDLE_SPLAT(DT_UINT64, uint64, uint64_t);
HANDLE_SPLAT(DT_FLOAT, float, float);
HANDLE_SPLAT(DT_DOUBLE, double, double);
HANDLE_SPLAT(DT_BFLOAT16, half, bfloat16);
HANDLE_SPLAT(DT_HALF, half, Eigen::half);
#undef HANDLE_SPLAT
#define HANDLE_COMPLEX_SPLAT(DTYPE, field_name, xla_type) \
case DTYPE: \
if (proto.field_name##_val_size() == 2) { \
return xla::ConstantR0<xla_type>( \
b, xla_type(proto.field_name##_val(0), proto.field_name##_val(1))); \
} \
break;
HANDLE_COMPLEX_SPLAT(DT_COMPLEX64, scomplex, xla::complex64);
HANDLE_COMPLEX_SPLAT(DT_COMPLEX128, dcomplex, xla::complex128);
#undef HANDLE_COMPLEXSPLAT
default:
break;
}
}
return xla::XlaOp();
}
class ConstOp : public XlaOpKernel {
public:
explicit ConstOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
const TensorProto* proto = nullptr;
OP_REQUIRES_OK(ctx, ctx->GetAttr("value", &proto));
proto_ = *proto;
OP_REQUIRES(
ctx, ctx->output_type(0) == proto_.dtype(),
errors::InvalidArgument("Type mismatch between value (",
DataTypeString(proto_.dtype()), ") and dtype (",
DataTypeString(ctx->output_type(0)), ")"));
OP_REQUIRES_OK(ctx, TensorShape::IsValidShape(proto_.tensor_shape()));
}
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaBuilder* b = ctx->builder();
TensorShape shape(proto_.tensor_shape());
if (shape.num_elements() > 1) {
xla::XlaOp value = GetScalarConst(proto_, b);
if (value.valid()) {
ctx->SetOutput(0, xla::Broadcast(value, shape.dim_sizes()));
return;
}
}
Tensor tensor(proto_.dtype());
OP_REQUIRES(ctx, tensor.FromProto(cpu_allocator(), proto_),
errors::InvalidArgument("Cannot parse tensor from proto: ",
proto_.DebugString()));
ctx->SetConstantOutput(0, tensor);
}
private:
TensorProto proto_;
ConstOp(const ConstOp&) = delete;
void operator=(const ConstOp&) = delete;
};
REGISTER_XLA_OP(Name("Const").CompilationOnly(), ConstOp);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
template <typename T>
void ExpectNodeEqual(const Node* n, gtl::ArraySlice<T> values,
TensorShape shape) {
EXPECT_TRUE(n->IsConstant());
Tensor tensor;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "value", &tensor));
DataType dtype;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "dtype", &dtype));
EXPECT_EQ(tensor.dtype(), dtype);
test::ExpectTensorEqual<T>(tensor, test::AsTensor(values, shape));
}
void ExpectTypeAndShape(const Node* n, DataType expected_dtype,
TensorShape expected_shape) {
EXPECT_TRUE(n->IsConstant());
Tensor tensor;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "value", &tensor));
DataType dtype;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "dtype", &dtype));
EXPECT_EQ(dtype, expected_dtype);
EXPECT_EQ(expected_shape, TensorShape(tensor.shape()));
}
}
TEST(ConstOpTest, Basic) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0f);
TF_EXPECT_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_FLOAT);
ExpectNodeEqual<float>(c.node(), {42.0f}, {});
}
TEST(ConstOpTest, MultiDim) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, {{2.0}, {3.0}});
TF_CHECK_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_DOUBLE);
ExpectNodeEqual<double>(c.node(), {2.0, 3.0}, {2, 1});
}
TEST(ConstOpTest, Empty) {
Scope root = Scope::NewRootScope();
auto c1 = ops::Const(root, {});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c1.node(), DT_FLOAT, {0});
auto c2 = ops::Const(root, {{}});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c2.node(), DT_FLOAT, {1, 0});
auto c3 = ops::Const(root, {{{}, {}}});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c3.node(), DT_FLOAT, {1, 2, 0});
auto c4 = ops::Const<int>(root, {{{}}});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c4.node(), DT_INT32, {1, 1, 0});
ops::Const(root, {{}, {{}}});
EXPECT_FALSE(root.status().ok());
}
TEST(ConstOpTest, WithExplicitShape) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0, {2, 2});
TF_CHECK_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_DOUBLE);
ExpectNodeEqual<double>(c.node(), {42.0, 42.0, 42.0, 42.0}, {2, 2});
auto d = ops::Const(root, {"1", "2", "3", "4", "5", "6"}, {2, 3});
TF_CHECK_OK(root.status());
EXPECT_EQ(d.op().output_type(0), DT_STRING);
ExpectNodeEqual<tstring>(d.node(), {"1", "2", "3", "4", "5", "6"}, {2, 3});
}
TEST(ConstOpTest, FromProto) {
Scope root = Scope::NewRootScope();
TensorProto proto;
proto.set_dtype(DT_DOUBLE);
TensorShape({2, 2}).AsProto(proto.mutable_tensor_shape());
for (int i = 0; i < 4; ++i) {
proto.add_double_val(static_cast<double>(i));
}
auto c = ops::ConstFromProto(root, proto);
TF_CHECK_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_DOUBLE);
ExpectNodeEqual<double>(c.node(), {0.0, 1.0, 2.0, 3.0}, {2, 2});
}
TEST(ConstOpTest, InvalidInitializer) {
Scope root = Scope::NewRootScope();
ops::Const(root, {{2.0}, {"df"}});
EXPECT_FALSE(root.status().ok());
}
TEST(ConstOpTest, Names) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, {{2.0}, {3.0}});
EXPECT_EQ(c.node()->name(), "Const");
auto c_1 = ops::Const(root, {{2.0}, {3.0}});
EXPECT_EQ(c_1.node()->name(), "Const_1");
auto x = ops::Const(root.WithOpName("x"), 1);
EXPECT_EQ(x.node()->name(), "x");
auto x_1 = ops::Const(root.WithOpName("x"), 1);
EXPECT_EQ(x_1.node()->name(), "x_1");
Scope child = root.NewSubScope("c");
auto c_y = ops::Const(child.WithOpName("y"), 1);
EXPECT_EQ(c_y.node()->name(), "c/y");
auto c_y_1 = ops::Const(child.WithOpName("y"), 1);
EXPECT_EQ(c_y_1.node()->name(), "c/y_1");
}
TEST(ConstOpTest, TemplatedConst) {
Scope root = Scope::NewRootScope();
auto c1 = ops::Const<int>(root, {1, 2});
ExpectTypeAndShape(c1.node(), DT_INT32, {2});
auto c2 = ops::Const<tstring>(root, {{"this"}, {"is"}, {"a"}, {"constant"}});
ExpectTypeAndShape(c2.node(), DT_STRING, {4, 1});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/const_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/ops/const_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
969c9ff4-67ad-4903-a965-e394e51a07a6 | cpp | tensorflow/tensorflow | while_loop | tensorflow/cc/ops/while_loop.cc | tensorflow/c/while_loop_test.cc | #include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/graph/node_builder.h"
namespace tensorflow {
namespace ops {
namespace {
OutputTensor ToOutputTensor(const Output& output) {
return OutputTensor(output.node(), output.index());
}
std::vector<OutputTensor> ToOutputTensors(const std::vector<Output>& outputs) {
std::vector<OutputTensor> result(outputs.size());
for (int i = 0; i < outputs.size(); ++i) {
result[i] = ToOutputTensor(outputs[i]);
}
return result;
}
std::vector<Node*> ToNodes(const std::vector<Output>& outputs) {
std::vector<Node*> result(outputs.size());
for (int i = 0; i < outputs.size(); ++i) {
result[i] = outputs[i].node();
}
return result;
}
string NextIterationName(const Scope& scope, int loop_var_idx) {
string result;
const string& prefix = scope.impl()->name();
if (!prefix.empty()) strings::StrAppend(&result, prefix, "/");
strings::StrAppend(&result, "NextIteration");
if (loop_var_idx > 0) strings::StrAppend(&result, "_", loop_var_idx);
return result;
}
Status CreateMerge(const Scope& scope, int loop_var_idx,
const Output& enter_output, Output* merge_output) {
NodeBuilder::NodeOut enter_input(enter_output.node(), enter_output.index());
const int next_output_index = 0;
DataType dtype = enter_output.node()->output_type(0);
NodeBuilder::NodeOut next_input(NextIterationName(scope, loop_var_idx),
next_output_index, dtype);
std::vector<NodeBuilder::NodeOut> input_list({enter_input, next_input});
const string unique_name = scope.GetUniqueNameForOp("Merge");
NodeBuilder builder = NodeBuilder(unique_name, "Merge").Input(input_list);
scope.UpdateBuilder(&builder);
Node* merge_node;
TF_RETURN_IF_ERROR(builder.Finalize(scope.graph(), &merge_node));
TF_RETURN_IF_ERROR(scope.DoShapeInference(merge_node));
*merge_output = Output(merge_node, 0);
return absl::OkStatus();
}
Status CreateCond(const Scope& scope, const CondGraphBuilderFn& cond,
const std::vector<Output>& inputs, Output* output) {
Scope cond_scope =
scope.NewSubScope("cond").WithControlDependencies(inputs[0]);
Output raw_cond_out;
TF_RETURN_IF_ERROR(cond(cond_scope, inputs, &raw_cond_out));
TF_RETURN_IF_ERROR(scope.graph()->IsValidOutputTensor(raw_cond_out.node(),
raw_cond_out.index()));
if (raw_cond_out.type() != DT_BOOL) {
return errors::InvalidArgument(
"BuildWhileLoop: 'cond' argument must return a boolean output, got ",
DataTypeString(raw_cond_out.type()));
}
*output = LoopCond(scope, raw_cond_out).output;
return absl::OkStatus();
}
Status CreateBody(const Scope& scope, const BodyGraphBuilderFn& body,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
DCHECK(outputs != nullptr);
DCHECK(outputs->empty());
Scope body_scope =
scope.NewSubScope("body").WithControlDependencies(inputs[0]);
TF_RETURN_IF_ERROR(body(body_scope, inputs, outputs));
const size_t num_loop_vars = inputs.size();
if (outputs->size() != num_loop_vars) {
return errors::InvalidArgument(
"BuildWhileLoop: 'body' argument expected to return ", num_loop_vars,
" output(s), got ", outputs->size());
}
for (const Output& output : *outputs) {
TF_RETURN_IF_ERROR(
scope.graph()->IsValidOutputTensor(output.node(), output.index()));
}
return absl::OkStatus();
}
}
Status BuildWhileLoop(const Scope& scope, const std::vector<Output>& inputs,
const CondGraphBuilderFn& cond,
const BodyGraphBuilderFn& body, const string& frame_name,
OutputList* outputs, bool create_while_ctx,
Output* cond_output) {
DCHECK(!inputs.empty());
DCHECK(outputs != nullptr);
DCHECK(outputs->empty());
TF_RETURN_IF_ERROR(scope.status());
const size_t num_loop_vars = inputs.size();
std::vector<Output> enter_outputs(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
enter_outputs[i] = internal::Enter(scope, inputs[i], frame_name);
}
TF_RETURN_IF_ERROR(scope.status());
std::vector<Output> merge_outputs(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
TF_RETURN_IF_ERROR(
CreateMerge(scope, i, enter_outputs[i], &merge_outputs[i]));
}
Output cond_out;
TF_RETURN_IF_ERROR(CreateCond(scope, cond, merge_outputs, &cond_out));
if (cond_output != nullptr) *cond_output = cond_out;
std::vector<Output> switch_trues(num_loop_vars);
std::vector<Output> switch_falses(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
auto switch_i = Switch(scope, merge_outputs[i], cond_out);
switch_trues[i] = switch_i.output_true;
switch_falses[i] = switch_i.output_false;
}
TF_RETURN_IF_ERROR(scope.status());
std::vector<Output> body_outputs;
TF_RETURN_IF_ERROR(CreateBody(scope, body, switch_trues, &body_outputs));
std::vector<Output> next_outputs(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
next_outputs[i] = NextIteration(scope, body_outputs[i]);
DCHECK_EQ(next_outputs[i].node()->name(), NextIterationName(scope, i));
}
TF_RETURN_IF_ERROR(scope.status());
for (size_t i = 0; i < num_loop_vars; ++i) {
const int merge_backedge_output_index = 1;
scope.graph()->AddEdge(next_outputs[i].node(), next_outputs[i].index(),
merge_outputs[i].node(),
merge_backedge_output_index);
}
outputs->resize(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
(*outputs)[i] = internal::Exit(scope, switch_falses[i]);
}
TF_RETURN_IF_ERROR(scope.status());
if (create_while_ctx) {
WhileContext* while_ctx;
TF_RETURN_IF_ERROR(scope.graph()->AddWhileContext(
frame_name, ToNodes(enter_outputs), ToNodes(*outputs),
ToOutputTensor(cond_out), ToOutputTensors(switch_trues),
ToOutputTensors(body_outputs), &while_ctx));
for (size_t i = 0; i < num_loop_vars; ++i) {
(*outputs)[i].node()->set_while_ctx(while_ctx);
}
}
return absl::OkStatus();
}
}
} | #include <memory>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
using tensorflow::GraphDef;
namespace {
class CApiWhileLoopTest : public ::testing::Test {
protected:
CApiWhileLoopTest() : s_(TF_NewStatus()), graph_(TF_NewGraph()) {}
~CApiWhileLoopTest() override {
TF_DeleteGraph(graph_);
TF_DeleteStatus(s_);
}
void Init(int ninputs) {
DCHECK(inputs_.empty());
DCHECK_GT(ninputs, 0);
for (int i = 0; i < ninputs; ++i) {
TF_Operation* placeholder = Placeholder(
graph_, s_, ::tensorflow::strings::StrCat("p", i).c_str());
DCHECK_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
inputs_.push_back({placeholder, 0});
}
original_graph_description_ = GraphDebugString();
params_ = std::make_unique<TF_WhileParams>(
TF_NewWhile(graph_, &inputs_[0], inputs_.size(), s_));
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
ASSERT_EQ(original_graph_description_, GraphDebugString())
<< "TF_NewWhile() altered graph";
params_->name = "test_loop";
outputs_.resize(ninputs, {nullptr, -1});
}
void ExpectOK() {
TF_FinishWhile(params_.get(), s_, &outputs_[0]);
EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
}
void ExpectError(TF_Code expected_code, const string& expected_msg) {
TF_FinishWhile(params_.get(), s_, &outputs_[0]);
EXPECT_EQ(expected_code, TF_GetCode(s_));
EXPECT_EQ(expected_msg, TF_Message(s_));
}
void Run(std::initializer_list<int> input_values) {
Run(outputs_, input_values);
}
void Run(const std::vector<TF_Output>& run_outputs,
std::initializer_list<int> input_values) {
DCHECK_EQ(inputs_.size(), input_values.size());
std::vector<std::pair<TF_Operation*, TF_Tensor*>> inputs(inputs_.size());
int i = 0;
for (int v : input_values) {
inputs[i] = {inputs_[i].oper, Int32Tensor(v)};
++i;
}
csession_ = std::make_unique<CSession>(graph_, s_);
csession_->SetInputs(inputs);
csession_->SetOutputs(run_outputs);
csession_->Run(s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
}
void ExpectOutputValue(int idx, int expected_value) {
TF_Tensor* out = csession_->output_tensor(idx);
ASSERT_TRUE(out != nullptr);
EXPECT_EQ(TF_INT32, TF_TensorType(out));
EXPECT_EQ(0, TF_NumDims(out));
ASSERT_EQ(sizeof(int32_t), TF_TensorByteSize(out));
int32_t* data = static_cast<int32_t*>(TF_TensorData(out));
EXPECT_EQ(expected_value, *data);
}
void CreateCondGraph() {
TF_Operation* one = ScalarConst(1, params_->cond_graph, s_);
TF_Operation* less_than =
LessThan(params_->cond_inputs[0], {one, 0}, params_->cond_graph, s_);
DCHECK_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params_->cond_output = {less_than, 0};
}
string GraphDebugString() const {
TF_Buffer* buf = TF_NewBuffer();
TF_GraphToGraphDef(graph_, buf, s_);
DCHECK_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
GraphDef def;
bool success = def.ParseFromArray(buf->data, buf->length);
DCHECK(success);
TF_DeleteBuffer(buf);
return def.DebugString();
}
TF_Status* s_;
TF_Graph* graph_;
std::vector<TF_Output> inputs_;
std::vector<TF_Output> outputs_;
std::unique_ptr<TF_WhileParams> params_;
std::unique_ptr<CSession> csession_;
private:
string original_graph_description_;
};
TEST_F(CApiWhileLoopTest, BasicLoop) {
Init(2);
EXPECT_TRUE(params_->body_graph != nullptr);
EXPECT_TRUE(params_->cond_graph != nullptr);
EXPECT_EQ(params_->ninputs, 2);
ASSERT_TRUE(params_->cond_inputs != nullptr);
ASSERT_TRUE(params_->cond_inputs[0].oper != nullptr);
EXPECT_TRUE(params_->cond_inputs[1].oper != nullptr);
ASSERT_TRUE(params_->body_inputs != nullptr);
EXPECT_TRUE(params_->body_inputs[0].oper != nullptr);
EXPECT_TRUE(params_->body_inputs[1].oper != nullptr);
ASSERT_TRUE(params_->body_outputs != nullptr);
TF_Operation* less_than =
LessThan(params_->cond_inputs[0], params_->cond_inputs[1],
params_->cond_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params_->cond_output = {less_than, 0};
TF_Operation* add1 = Add(params_->body_inputs[0], params_->body_inputs[1],
params_->body_graph, s_, "add1");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* one = ScalarConst(1, params_->body_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* add2 = Add(add1, one, params_->body_graph, s_, "add2");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params_->body_outputs[0] = {add2, 0};
params_->body_outputs[1] = params_->body_inputs[1];
ExpectOK();
EXPECT_TRUE(outputs_[0].oper != nullptr);
EXPECT_GE(outputs_[0].index, 0);
EXPECT_TRUE(outputs_[1].oper != nullptr);
EXPECT_GE(outputs_[1].index, 0);
for (int i = 0; i < params_->ninputs; ++i) {
string cond_name =
::tensorflow::strings::StrCat(params_->name, "/cond/cond_input", i);
string body_name =
::tensorflow::strings::StrCat(params_->name, "/body/body_input", i);
EXPECT_TRUE(TF_GraphOperationByName(graph_, cond_name.c_str()) == nullptr);
EXPECT_TRUE(TF_GraphOperationByName(graph_, body_name.c_str()) == nullptr);
}
Run({-9, 2});
ExpectOutputValue(0, 3);
ExpectOutputValue(1, 2);
}
TEST_F(CApiWhileLoopTest, NestedLoop) {
Init(2);
TF_Operation* six = ScalarConst(6, params_->cond_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* less_than =
LessThan(params_->cond_inputs[0], {six, 0}, params_->cond_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params_->cond_output = {less_than, 0};
TF_Output inner_inputs[] = {params_->body_inputs[0], params_->body_inputs[1]};
TF_WhileParams inner_params =
TF_NewWhile(params_->body_graph, inner_inputs, 2, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
inner_params.name = "inner_loop";
TF_Operation* three = ScalarConst(3, inner_params.cond_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* inner_less_than = LessThan(
inner_params.cond_inputs[0], {three, 0}, inner_params.cond_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
inner_params.cond_output = {inner_less_than, 0};
TF_Operation* one = ScalarConst(1, inner_params.body_graph, s_, "one");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* two = ScalarConst(2, inner_params.body_graph, s_, "two");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* input2_add =
Add(inner_params.body_inputs[1].oper, one, inner_params.body_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
inner_params.body_outputs[1] = {input2_add, 0};
TF_Operation* inner_input1_add = Add(inner_params.body_inputs[0].oper, two,
inner_params.body_graph, s_, "add2");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
inner_params.body_outputs[0] = {inner_input1_add, 0};
TF_Output inner_outputs[2] = {{nullptr, -1}};
TF_FinishWhile(&inner_params, s_, inner_outputs);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* input1_add =
Add(params_->body_inputs[0], inner_outputs[1], params_->body_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params_->body_outputs[0] = {input1_add, 0};
params_->body_outputs[1] = inner_outputs[1];
ExpectOK();
const char* node_name = "test_loop/cond/scalar";
EXPECT_TRUE(TF_GraphOperationByName(graph_, node_name) != nullptr);
node_name = "test_loop/body/add";
EXPECT_TRUE(TF_GraphOperationByName(graph_, node_name) != nullptr);
node_name = "test_loop/body/inner_loop/body/one";
EXPECT_TRUE(TF_GraphOperationByName(graph_, node_name) != nullptr);
node_name = "test_loop/body/inner_loop/cond/less_than";
EXPECT_TRUE(TF_GraphOperationByName(graph_, node_name) != nullptr);
Run({0, 0});
ExpectOutputValue(0, 8);
ExpectOutputValue(1, 3);
}
TEST_F(CApiWhileLoopTest, UnsetCondOutput) {
Init(1);
params_->body_outputs[0] = params_->body_inputs[0];
ExpectError(TF_INVALID_ARGUMENT,
"TF_WhileParams `cond_output` field isn't set");
}
TEST_F(CApiWhileLoopTest, WrongCondOutputType) {
Init(1);
params_->cond_output = params_->cond_inputs[0];
params_->body_outputs[0] = params_->body_inputs[0];
ExpectError(TF_INVALID_ARGUMENT,
"BuildWhileLoop: 'cond' argument must return a boolean output, "
"got int32");
}
TEST_F(CApiWhileLoopTest, InvalidCondOutputNode) {
Init(1);
params_->cond_output = inputs_[0];
params_->body_outputs[0] = params_->body_inputs[0];
ExpectError(TF_INVALID_ARGUMENT,
"Requested return tensor 'p0:0' not found in graph def");
}
TEST_F(CApiWhileLoopTest, InvalidCondOutputIndex) {
Init(1);
CreateCondGraph();
params_->cond_output.index = 100;
params_->body_outputs[0] = params_->body_inputs[0];
ExpectError(TF_INVALID_ARGUMENT,
"Invalid return output 100 of node 'less_than', which has 1 "
"output(s)");
}
TEST_F(CApiWhileLoopTest, UnsetBodyOutput) {
Init(1);
CreateCondGraph();
ExpectError(TF_INVALID_ARGUMENT,
"TF_WhileParams `body_outputs[0]` field isn't set");
}
TEST_F(CApiWhileLoopTest, InvalidBodyOutputNode) {
Init(1);
CreateCondGraph();
params_->body_outputs[0] = inputs_[0];
ExpectError(TF_INVALID_ARGUMENT,
"Requested return tensor 'p0:0' not found in graph def");
}
TEST_F(CApiWhileLoopTest, NullName) {
Init(1);
CreateCondGraph();
params_->body_outputs[0] = params_->body_inputs[0];
params_->name = nullptr;
ExpectError(TF_INVALID_ARGUMENT, "TF_WhileParams `name` field is null");
}
TEST_F(CApiWhileLoopTest, WrongGraph) {
Init(1);
CreateCondGraph();
params_->body_outputs[0] = inputs_[0];
ExpectError(TF_INVALID_ARGUMENT,
"Requested return tensor 'p0:0' not found in graph def");
}
TEST_F(CApiWhileLoopTest, BadTypes) {
Init(1);
CreateCondGraph();
TF_OperationDescription* desc = TF_NewOperation(
params_->body_graph, "FakeQuantWithMinMaxArgs", "float_op");
TF_AddInput(desc, params_->body_inputs[0]);
TF_FinishOperation(desc, s_);
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
string msg(TF_Message(s_));
EXPECT_NE(msg.find("Input 'inputs' passed int32 expected float while "
"building NodeDef 'float_op'"),
msg.npos);
TF_AbortWhile(params_.get());
}
TEST_F(CApiWhileLoopTest, Gradients) {
Init(1);
TF_Operation* ten = ScalarConst(10, params_->cond_graph, s_);
TF_Operation* less_than =
LessThan(params_->cond_inputs[0], {ten, 0}, params_->cond_graph, s_);
DCHECK_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params_->cond_output = {less_than, 0};
TF_Operation* one = ScalarConst(1, params_->body_graph, s_);
TF_Operation* add =
Add(params_->body_inputs[0], {one, 0}, params_->body_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params_->body_outputs[0] = {add, 0};
ExpectOK();
TF_Output grad_output;
TF_AddGradients(graph_, outputs_.data(), outputs_.size(), inputs_.data(), 1,
nullptr, s_, &grad_output);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
Run({grad_output}, {0});
ExpectOutputValue(0, 1);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/ops/while_loop.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/while_loop_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa4eaeed-d89a-44ac-8a40-cbeca40aa15a | cpp | tensorflow/tensorflow | resource_variable_grad | tensorflow/cc/gradients/resource_variable_grad.cc | tensorflow/cc/gradients/resource_variable_grad_test.cc | #include <vector>
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/ops/array_ops.h"
namespace tensorflow {
namespace ops {
namespace {
Status ReadVariableOpGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Identity(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("ReadVariableOp", ReadVariableOpGrad);
}
}
} | #include <iostream>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace ops {
namespace {
TEST(ResourceVariableGradTest, ReadVariableOpGrad) {
TensorShape shape({});
auto scope = Scope::NewRootScope();
auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
auto var = VarHandleOp(scope, DT_FLOAT, shape);
auto init = AssignVariableOp(scope, var, Const(scope, 2.0f, shape));
auto temp = ReadVariableOp(scope, var, DT_FLOAT);
auto y = Mul(scope, temp, x);
auto dy = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
OutputList dxs;
TF_ASSERT_OK(AddSymbolicGradients(scope, {y}, {var}, {dy}, &dxs));
ClientSession::FeedType feed_list;
feed_list.insert({x, 5.0f});
feed_list.insert({dy, 1.0f});
std::vector<Tensor> dxout;
ClientSession session(scope);
TF_ASSERT_OK(session.Run(feed_list, dxs, &dxout));
auto grad = dxout[0].scalar<float>()();
EXPECT_EQ(grad, 5.0f);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/resource_variable_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/resource_variable_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ae9adf8a-7c8c-424b-ab3f-557b514e76f7 | cpp | tensorflow/tensorflow | manip_grad | tensorflow/cc/gradients/manip_grad.cc | tensorflow/cc/gradients/manip_grad_test.cc | #include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/ops/manip_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
namespace tensorflow {
namespace ops {
namespace {
Status RollGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto shift = op.input(1);
auto axis = op.input(2);
auto grad_op = Roll(scope, grad_inputs[0], Neg(scope, shift), axis);
grad_outputs->push_back(grad_op);
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("Roll", RollGrad);
}
}
} | #include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/manip_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using ops::Placeholder;
using ops::Roll;
class ManipGradTest : public ::testing::Test {
protected:
ManipGradTest() : scope_(Scope::NewRootScope()) {}
void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
const TensorShape& y_shape) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, {x}, {x_shape}, {y}, {y_shape}, &max_error)));
EXPECT_LT(max_error, 1e-4);
}
Scope scope_;
};
TEST_F(ManipGradTest, RollGrad) {
TensorShape shape({5, 4, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Roll(scope_, x, {2, 1}, {0, 1});
RunTest(x, shape, y, shape);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/manip_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/manip_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95a70d08-876e-43a6-95e7-3b610dc108ae | cpp | tensorflow/tensorflow | data_flow_grad | tensorflow/cc/gradients/data_flow_grad.cc | tensorflow/cc/gradients/data_flow_grad_test.cc | #include "tensorflow/cc/ops/data_flow_ops.h"
#include "tensorflow/cc/ops/data_flow_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
namespace tensorflow {
namespace ops {
namespace {
REGISTER_NO_GRADIENT_OP("Queue");
REGISTER_NO_GRADIENT_OP("QueueEnqueue");
REGISTER_NO_GRADIENT_OP("QueueEnqueueMany");
REGISTER_NO_GRADIENT_OP("QueueDequeue");
REGISTER_NO_GRADIENT_OP("QueueDequeueMany");
REGISTER_NO_GRADIENT_OP("QueueDequeueUpTo");
REGISTER_NO_GRADIENT_OP("QueueClose");
REGISTER_NO_GRADIENT_OP("QueueSize");
REGISTER_NO_GRADIENT_OP("Stack");
REGISTER_NO_GRADIENT_OP("StackPush");
REGISTER_NO_GRADIENT_OP("StackPop");
REGISTER_NO_GRADIENT_OP("StackClose");
REGISTER_NO_GRADIENT_OP("GetSessionHandle");
REGISTER_NO_GRADIENT_OP("GetSessionHandleV2");
REGISTER_NO_GRADIENT_OP("GetSessionTensor");
REGISTER_NO_GRADIENT_OP("DeleteSessionTensor");
Status DynamicPartitionGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto data = op.input(0);
auto partitions = op.input(1);
int32_t num_partitions;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "num_partitions", &num_partitions));
auto partitions_shape = Shape(scope, partitions);
auto zero = Const(scope, 0);
auto one = Const(scope, 1);
auto original_indices = Reshape(
scope, Range(scope, zero, Prod(scope, partitions_shape, zero), one),
partitions_shape);
auto partitioned_indices =
DynamicPartition(scope, original_indices, partitions, num_partitions);
auto reconstructed =
DynamicStitch(scope, partitioned_indices.outputs, grad_inputs);
grad_outputs->push_back(Reshape(scope, reconstructed, Shape(scope, data)));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("DynamicPartition", DynamicPartitionGrad);
Status DynamicStitchGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
int32_t num_values = op.num_inputs() / 2;
for (int32_t i = 0; i < num_values; i++) {
grad_outputs->push_back(NoGradient());
}
for (int32_t i = 0; i < num_values; i++) {
auto index = op.input(i);
if (index.type() != DT_INT32) {
index = Cast(scope, index, DT_INT32);
}
grad_outputs->push_back(Gather(scope, grad_inputs[0], index));
}
return scope.status();
}
REGISTER_GRADIENT_OP("DynamicStitch", DynamicStitchGrad);
REGISTER_GRADIENT_OP("ParallelDynamicStitch", DynamicStitchGrad);
}
}
} | #include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
namespace {
using ops::Const;
using ops::DynamicPartition;
using ops::DynamicStitch;
using ops::Placeholder;
class DataFlowGradTest : public ::testing::Test {
protected:
DataFlowGradTest() : scope_(Scope::NewRootScope()) {}
void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, xs, x_shapes, ys, y_shapes, &max_error)));
EXPECT_LT(max_error, 1e-4);
}
Scope scope_;
};
TEST_F(DataFlowGradTest, DynamicPartitionGrad) {
TensorShape data_shape({2, 3, 2});
auto data = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(data_shape));
auto partitions = Const(scope_, {{2, 1, 0}, {1, 2, 0}});
auto y = DynamicPartition(scope_, data, partitions, 3);
TensorShape partition_shape({2, 2});
RunTest({data}, {data_shape}, y.outputs,
{partition_shape, partition_shape, partition_shape});
}
TEST_F(DataFlowGradTest, DynamicStitchGrad) {
TensorShape d1_shape({2});
TensorShape d2_shape({2, 2});
std::vector<Output> indices = {Const(scope_, 2), Const(scope_, {1, 0})};
std::vector<Output> data = {
Placeholder(scope_, DT_FLOAT, Placeholder::Shape(d1_shape)),
Placeholder(scope_, DT_FLOAT, Placeholder::Shape(d2_shape))};
auto y = DynamicStitch(scope_, indices, data);
TensorShape y_shape({3, 2});
RunTest(data, {d1_shape, d2_shape}, {y}, {y_shape});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/data_flow_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/data_flow_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
101e352c-b661-4e10-bbba-e7e6d5172d99 | cpp | tensorflow/tensorflow | image_grad | tensorflow/cc/gradients/image_grad.cc | tensorflow/cc/gradients/image_grad_test.cc | #include <vector>
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/ops/image_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
namespace tensorflow {
namespace ops {
namespace {
REGISTER_NO_GRADIENT_OP("NonMaxSuppression");
REGISTER_NO_GRADIENT_OP("NonMaxSuppressionV2");
REGISTER_NO_GRADIENT_OP("NonMaxSuppressionV3");
REGISTER_NO_GRADIENT_OP("NonMaxSuppressionV4");
REGISTER_NO_GRADIENT_OP("NonMaxSuppressionV5");
Status ResizeNearestNeighborGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
bool align_corners;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "align_corners", &align_corners));
bool half_pixel_centers;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "half_pixel_centers",
&half_pixel_centers));
auto x_shape = Slice(scope, Shape(scope, op.input(0)), {1}, {2});
grad_outputs->push_back(internal::ResizeNearestNeighborGrad(
scope, grad_inputs[0], x_shape,
internal::ResizeNearestNeighborGrad::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers)));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ResizeNearestNeighbor", ResizeNearestNeighborGradHelper);
Status ResizeBilinearGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
bool align_corners;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "align_corners", &align_corners));
bool half_pixel_centers;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "half_pixel_centers",
&half_pixel_centers));
grad_outputs->push_back(internal::ResizeBilinearGrad(
scope, grad_inputs[0], op.input(0),
internal::ResizeBilinearGrad::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers)));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ResizeBilinear", ResizeBilinearGradHelper);
Status ResizeBicubicGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
bool align_corners;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "align_corners", &align_corners));
bool half_pixel_centers;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "half_pixel_centers",
&half_pixel_centers));
grad_outputs->push_back(internal::ResizeBicubicGrad(
scope, grad_inputs[0], op.input(0),
internal::ResizeBicubicGrad::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers)));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ResizeBicubic", ResizeBicubicGradHelper);
Status ScaleAndTranslateGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string kernel_type;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "kernel_type", &kernel_type));
bool antialias;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "antialias", &antialias));
grad_outputs->push_back(internal::ScaleAndTranslateGrad(
scope, grad_inputs[0], op.input(0), op.input(2), op.input(3),
internal::ScaleAndTranslateGrad::KernelType(kernel_type)
.Antialias(antialias)));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ScaleAndTranslate", ScaleAndTranslateGradHelper);
Status CropAndResizeGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
DataType input_type;
string method;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "method", &method));
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "T", &input_type));
auto image_shape = Shape(scope, op.input(0));
grad_outputs->push_back(CropAndResizeGradImage(
scope, grad_inputs[0], op.input(1), op.input(2), image_shape, input_type,
CropAndResizeGradImage::Method(method)));
grad_outputs->push_back(CropAndResizeGradBoxes(
scope, grad_inputs[0], op.input(0), op.input(1), op.input(2)));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("CropAndResize", CropAndResizeGradHelper);
}
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using ops::Const;
using ops::CropAndResize;
using ops::ResizeBicubic;
using ops::ResizeBilinear;
using ops::ResizeNearestNeighbor;
using ops::ScaleAndTranslate;
class ImageGradTest : public ::testing::Test {
protected:
ImageGradTest() : scope_(Scope::NewRootScope()) {}
enum OpType { RESIZE_NEAREST, RESIZE_BILINEAR, RESIZE_BICUBIC };
template <typename T>
Tensor MakeData(const TensorShape& data_shape) {
DataType data_type = DataTypeToEnum<T>::v();
Tensor data(data_type, data_shape);
auto data_flat = data.flat<T>();
for (int i = 0; i < data_flat.size(); ++i) {
data_flat(i) = T(i);
}
return data;
}
template <typename T>
void MakeOp(const OpType op_type, const Tensor& x_data, const Input& y_shape,
const bool align_corners, const bool half_pixel_centers,
Output* x, Output* y) {
*x = Const<T>(scope_, x_data);
switch (op_type) {
case RESIZE_NEAREST:
*y = ResizeNearestNeighbor(
scope_, *x, y_shape,
ResizeNearestNeighbor::AlignCorners(align_corners));
return;
case RESIZE_BILINEAR:
*y = ResizeBilinear(scope_, *x, y_shape,
ResizeBilinear::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers));
return;
case RESIZE_BICUBIC:
*y = ResizeBicubic(scope_, *x, y_shape,
ResizeBicubic::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers));
return;
}
assert(false);
}
template <typename T>
void TestResizedShapeForType(const OpType op_type, const bool align_corners,
const bool half_pixel_centers) {
TensorShape x_shape({1, 2, 2, 1});
Tensor x_data = MakeData<T>(x_shape);
Output x, y;
MakeOp<T>(op_type, x_data, {4, 6}, align_corners, half_pixel_centers, &x,
&y);
ClientSession session(scope_);
std::vector<Tensor> outputs;
TF_ASSERT_OK(session.Run({y}, &outputs));
EXPECT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs[0].shape(), TensorShape({1, 4, 6, 1}));
}
void TestResizedShape(OpType op_type) {
for (const bool half_pixel_centers : {true, false}) {
for (const bool align_corners : {true, false}) {
if (half_pixel_centers && align_corners) {
continue;
}
TestResizedShapeForType<Eigen::half>(op_type, align_corners,
half_pixel_centers);
TestResizedShapeForType<float>(op_type, align_corners,
half_pixel_centers);
TestResizedShapeForType<double>(op_type, align_corners,
half_pixel_centers);
}
}
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestResizeToSmallerAndAlign(const OpType op_type,
const bool align_corners,
const bool half_pixel_centers) {
TensorShape x_shape({1, 4, 6, 1});
Tensor x_data = MakeData<X_T>(x_shape);
Output x, y;
MakeOp<X_T>(op_type, x_data, {2, 3}, align_corners, half_pixel_centers, &x,
&y);
JAC_T max_error;
TF_ASSERT_OK((ComputeGradientError<X_T, Y_T, JAC_T>(
scope_, x, x_data, y, {1, 2, 3, 1}, &max_error)));
EXPECT_LT(max_error, 1.5e-3);
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestResizeToLargerAndAlign(const OpType op_type,
const bool align_corners,
const bool half_pixel_centers) {
TensorShape x_shape({1, 2, 3, 1});
Tensor x_data = MakeData<X_T>(x_shape);
Output x, y;
MakeOp<X_T>(op_type, x_data, {4, 6}, align_corners, half_pixel_centers, &x,
&y);
JAC_T max_error;
TF_ASSERT_OK((ComputeGradientError<X_T, Y_T, JAC_T>(
scope_, x, x_data, y, {1, 4, 6, 1}, &max_error)));
EXPECT_LT(max_error, 1.5e-3);
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestResize(OpType op_type) {
for (const bool half_pixel_centers : {true, false}) {
for (const bool align_corners : {true, false}) {
if (half_pixel_centers && align_corners) {
continue;
}
TestResizeToSmallerAndAlign<X_T, Y_T, JAC_T>(op_type, align_corners,
half_pixel_centers);
TestResizeToLargerAndAlign<X_T, Y_T, JAC_T>(op_type, align_corners,
half_pixel_centers);
}
}
}
Scope scope_;
};
TEST_F(ImageGradTest, TestNearestNeighbor) {
TestResizedShape(RESIZE_NEAREST);
TestResize<float, float, float>(RESIZE_NEAREST);
TestResize<double, double, double>(RESIZE_NEAREST);
}
TEST_F(ImageGradTest, TestBilinear) {
TestResizedShape(RESIZE_BILINEAR);
TestResize<float, float, float>(RESIZE_BILINEAR);
TestResize<double, float, double>(RESIZE_BILINEAR);
}
TEST_F(ImageGradTest, TestBicubic) {
TestResizedShape(RESIZE_BICUBIC);
TestResize<float, float, float>(RESIZE_BICUBIC);
TestResize<double, float, double>(RESIZE_BICUBIC);
}
class ScaleAndTranslateGradTest : public ::testing::Test {
protected:
ScaleAndTranslateGradTest() : scope_(Scope::NewRootScope()) {}
template <typename T>
Tensor MakeData(const TensorShape& data_shape) {
DataType data_type = DataTypeToEnum<T>::v();
Tensor data(data_type, data_shape);
auto data_flat = data.flat<T>();
for (int i = 0; i < data_flat.size(); ++i) {
data_flat(i) = T(i);
}
return data;
}
template <typename T>
void MakeOp(const Tensor& x_data, const Input& y_shape, Input scale,
Input translation, const string& kernel_type, bool antialias,
Output* x, Output* y) {
*x = Const<T>(scope_, x_data);
*y = ScaleAndTranslate(scope_, *x, y_shape, scale, translation,
ScaleAndTranslate::KernelType(kernel_type)
.Antialias(antialias)
.Antialias(antialias));
TF_ASSERT_OK(scope_.status());
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestScaleAndTranslate(const TensorShape x_shape, const int out_height,
const int out_width, Input scale,
Input translation, const string& kernel_type,
bool antialias) {
Tensor x_data = MakeData<X_T>(x_shape);
Output x, y;
MakeOp<X_T>(x_data, {out_height, out_width}, scale, translation,
kernel_type, antialias, &x, &y);
JAC_T max_error;
TF_ASSERT_OK((ComputeGradientError<X_T, Y_T, JAC_T>(
scope_, x, x_data, y, {1, out_height, out_width, 1}, &max_error)));
EXPECT_LT(max_error, 2e-3);
}
const std::vector<Input> kScales = {Input{1.0f, 1.0f}, Input{0.37f, 0.47f},
Input{2.1f, 2.1f}};
const std::vector<Input> kTranslations = {
Input{0.0f, 0.0f}, Input{3.14f, 1.19f}, Input{2.1f, 3.1f},
Input{100.0f, 200.0f}};
Scope scope_;
};
TEST_F(ScaleAndTranslateGradTest, TestGrads) {
const std::vector<std::string> kKernelTypes = {"lanczos1", "lanczos3",
"lanczos5", "gaussian"};
constexpr int kOutHeight = 4;
constexpr int kOutWidth = 6;
const TensorShape kXShape = TensorShape({1, 2, 3, 1});
for (const Input scale : kScales) {
for (const Input translation : kTranslations) {
for (const std::string& kernel_type : kKernelTypes) {
TestScaleAndTranslate<float, float, float>(
kXShape, kOutHeight, kOutWidth, scale, translation, kernel_type,
true);
}
}
}
}
TEST_F(ScaleAndTranslateGradTest, TestGradsWithoutAntialias) {
constexpr int kOutHeight = 4;
constexpr int kOutWidth = 6;
const TensorShape kXShape = TensorShape({1, 2, 3, 1});
for (const Input scale : kScales) {
for (const Input translation : kTranslations) {
TestScaleAndTranslate<float, float, float>(kXShape, kOutHeight, kOutWidth,
scale, translation, "lanczos3",
false);
}
}
}
TEST_F(ScaleAndTranslateGradTest, TestGradsWithSameShape) {
const std::vector<std::string> kKernelTypes = {"lanczos3", "gaussian"};
constexpr int kOutHeight = 2;
constexpr int kOutWidth = 3;
const TensorShape kXShape = TensorShape({1, 2, 3, 1});
for (const Input scale : kScales) {
for (const Input translation : kTranslations) {
for (const std::string& kernel_type : kKernelTypes) {
TestScaleAndTranslate<float, float, float>(
kXShape, kOutHeight, kOutWidth, scale, translation, kernel_type,
true);
}
}
}
}
TEST_F(ScaleAndTranslateGradTest, TestGradsWithSmallerShape) {
const std::vector<std::string> kKernelTypes = {"lanczos3", "gaussian"};
constexpr int kOutHeight = 2;
constexpr int kOutWidth = 3;
const TensorShape kXShape = TensorShape({1, 4, 6, 1});
for (const Input scale : kScales) {
for (const Input translation : kTranslations) {
for (const std::string& kernel_type : kKernelTypes) {
TestScaleAndTranslate<float, float, float>(
kXShape, kOutHeight, kOutWidth, scale, translation, kernel_type,
true);
}
}
}
}
class CropAndResizeGradTest : public ::testing::Test {
protected:
CropAndResizeGradTest() : scope_(Scope::NewRootScope()) {}
template <typename T>
Tensor MakeData(const TensorShape& data_shape) {
DataType data_type = DataTypeToEnum<T>::v();
Tensor data(data_type, data_shape);
auto data_flat = data.flat<T>();
for (int i = 0; i < data_flat.size(); ++i) {
data_flat(i) = T(i);
}
return data;
}
template <typename T>
void MakeOp(const Tensor& x_data, const Input& boxes, const Input& box_ind,
const Input& crop_size, Output* x, Output* y) {
*x = Const<T>(scope_, x_data);
*y = CropAndResize(scope_, *x, boxes, box_ind, crop_size,
CropAndResize::Method("bilinear"));
TF_ASSERT_OK(scope_.status());
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestCropAndResize() {
TensorShape x_shape({1, 4, 2, 1});
Tensor x_data = MakeData<X_T>(x_shape);
TensorShape box_shape({1, 4});
Tensor boxes = MakeData<X_T>(box_shape);
Output x, y;
MakeOp<X_T>(x_data, boxes, {0}, {1, 1}, &x, &y);
JAC_T max_error;
TF_ASSERT_OK((ComputeGradientError<X_T, Y_T, JAC_T>(
scope_, x, x_data, y, {1, 1, 1, 1}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
Scope scope_;
};
TEST_F(CropAndResizeGradTest, TestCrop) {
TestCropAndResize<float, float, float>();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/image_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/image_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f271eb2-947f-476c-bd58-be3d6c7ffa56 | cpp | tensorflow/tensorflow | linalg_grad | tensorflow/cc/gradients/linalg_grad.cc | tensorflow/cc/gradients/linalg_grad_test.cc | #include <algorithm>
#include <cmath>
#include <string>
#include <tuple>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/gradients/grad_helper.h"
#include "tensorflow/cc/ops/array_ops_internal.h"
#include "tensorflow/cc/ops/math_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
namespace tensorflow {
namespace ops {
namespace {
constexpr absl::string_view kEllipsis = "...";
absl::optional<int> EinsumGetAxisFromLabel(absl::string_view subscripts,
char label) {
std::vector<absl::string_view> splits = absl::StrSplit(subscripts, kEllipsis);
auto index = splits[0].find(label);
if (index != splits[0].npos) {
return index;
}
if (splits.size() < 2) {
return absl::nullopt;
}
index = splits[1].find(label);
if (index != splits[1].npos) {
return index - splits[1].length();
}
return absl::nullopt;
}
std::tuple<int, absl::optional<int>> EinsumGetBcastSubshape(
absl::string_view subscripts) {
int start = subscripts.find(kEllipsis);
if (start == subscripts.npos) {
return std::make_tuple(0, 0);
}
int remaining = subscripts.length() - (start + kEllipsis.length());
absl::optional<int> end;
if (remaining > 0) {
end = -remaining;
} else {
end = absl::nullopt;
}
return std::make_tuple(start, end);
}
Output Slice1dHelper(const Scope& scope, Output tensor, int start,
absl::optional<int> end) {
if (end.has_value() && *end > 0) {
return Slice(scope, tensor, Const(scope, start, TensorShape({1})),
Const(scope, *end - start, TensorShape({1})));
} else {
return Slice(scope, tensor, Const(scope, start, TensorShape({1})),
Add(scope, Shape(scope, tensor), end.value_or(0) - start));
}
}
std::tuple<std::string, Output, Output> EinsumGetReducedSubscripts(
const Scope& scope, const absl::btree_set<char>& reduced_label_set,
Output input_shape, absl::string_view subscripts) {
const std::string reduced_subs =
std::string(reduced_label_set.begin(), reduced_label_set.end());
std::vector<int> reduced_axes;
reduced_axes.reserve(reduced_subs.size());
for (const char s : reduced_subs) {
auto axis = EinsumGetAxisFromLabel(subscripts, s);
if (!axis.has_value()) {
scope.UpdateStatus(errors::Internal(
absl::StrCat("Missing axis", absl::string_view(&s, 1))));
} else {
reduced_axes.push_back(*axis);
}
}
std::vector<Output> reduced_dims_inputs;
reduced_dims_inputs.reserve(reduced_axes.size());
for (const int i : reduced_axes) {
if (i < 0) {
reduced_dims_inputs.push_back(
Gather(scope, input_shape, Add(scope, Size(scope, input_shape), i)));
} else {
reduced_dims_inputs.push_back(Gather(scope, input_shape, i));
}
}
const Output reduced_dims = Stack(scope, reduced_dims_inputs);
Tensor reduced_axes_tensor(
DataType::DT_INT32, TensorShape({static_cast<int>(reduced_axes.size())}));
std::copy_n(reduced_axes.begin(), reduced_axes.size(),
reduced_axes_tensor.flat<int>().data());
return std::make_tuple(reduced_subs, reduced_dims,
Const(scope, reduced_axes_tensor));
}
Output EinsumGradReducedHelper(const Scope& scope, const Output& output_grad,
absl::string_view output_subs,
absl::string_view input_subs,
const Output& input_shape,
const absl::btree_set<char>& reduced_label_set) {
std::string reduced_subs;
Output reduced_dims, reduced_axes;
std::tie(reduced_subs, reduced_dims, reduced_axes) =
EinsumGetReducedSubscripts(scope, reduced_label_set, input_shape,
input_subs);
const int distinct_input_labels =
absl::flat_hash_set<char>(input_subs.begin(), input_subs.end()).size();
const int distinct_output_labels =
absl::flat_hash_set<char>(output_subs.begin(), output_subs.end()).size();
const bool has_repeated_labels =
(distinct_input_labels + distinct_output_labels) <
input_subs.length() + output_subs.length();
std::string input_subs_without_reduced_labels;
for (const char s : input_subs) {
if (!absl::c_linear_search(reduced_label_set, s)) {
input_subs_without_reduced_labels.push_back(s);
}
}
if (!has_repeated_labels &&
input_subs_without_reduced_labels == output_subs) {
auto reduced_shape = ReducedShapeHelper(scope, input_shape, reduced_axes);
return BroadcastTo(scope, Reshape(scope, output_grad, reduced_shape),
input_shape);
}
Output output_grad_shape = Shape(scope, output_grad);
auto grad_shape_with_reduced_labels =
Concat(scope, {reduced_dims, output_grad_shape}, 0);
auto reduced_shape = Concat(
scope,
{Const(scope, 1, TensorShape{static_cast<int>(reduced_label_set.size())}),
output_grad_shape},
0);
Output broadcasted_grad =
BroadcastTo(scope, Reshape(scope, output_grad, reduced_shape),
grad_shape_with_reduced_labels);
return Einsum(scope, {broadcasted_grad},
absl::StrCat(reduced_subs, output_subs, "->", input_subs));
}
Output EinsumGradWrt(const Scope& scope, Output output_grad,
Output other_operand, Output input_shape,
absl::string_view input_subs, absl::string_view other_subs,
absl::string_view output_subs) {
absl::btree_set<char> reduced_label_set(input_subs.begin(), input_subs.end());
for (const char x : output_subs) {
reduced_label_set.erase(x);
}
for (const char x : other_subs) {
reduced_label_set.erase(x);
}
reduced_label_set.erase('.');
std::string left_subs;
for (const char s : input_subs) {
if (!reduced_label_set.contains(s)) {
left_subs.push_back(s);
}
}
Output grad_reduced =
Einsum(scope, {output_grad, other_operand},
absl::StrCat(output_subs, ",", other_subs, "->", left_subs));
if (reduced_label_set.empty()) {
return grad_reduced;
}
return EinsumGradReducedHelper(scope, grad_reduced, left_subs, input_subs,
input_shape, reduced_label_set);
}
Status EinsumGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
if (grad_inputs.size() != 1) {
return errors::InvalidArgument("Expect 1 grad input.");
}
const Output& grad = grad_inputs[0];
std::string equation;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "equation", &equation));
std::vector<absl::string_view> equation_split =
absl::StrSplit(equation, "->");
if (equation_split.size() != 2) {
return errors::InvalidArgument("Equation must contain a single ->");
}
const absl::string_view input_subs = equation_split[0];
const absl::string_view output_subs = equation_split[1];
if (op.num_inputs() == 1) {
auto input_shape = Shape(scope, op.input(0));
absl::btree_set<char> reduced_label_set(input_subs.begin(),
input_subs.end());
for (const char x : output_subs) {
reduced_label_set.erase(x);
}
reduced_label_set.erase('.');
if (reduced_label_set.empty()) {
grad_outputs->push_back(Einsum(
scope, grad_inputs, absl::StrCat(output_subs, "->", input_subs)));
return scope.status();
}
grad_outputs->push_back(EinsumGradReducedHelper(
scope, grad, output_subs, input_subs, input_shape, reduced_label_set));
return scope.status();
}
std::vector<absl::string_view> subs = absl::StrSplit(input_subs, ',');
if (subs.size() != 2) {
return errors::InvalidArgument("Only 2 inputs are supported");
}
std::string x_subs(subs[0]);
std::string y_subs(subs[1]);
if (absl::StrContains(output_subs, kEllipsis)) {
if (!absl::StrContains(x_subs, kEllipsis)) {
absl::StrAppend(&x_subs, kEllipsis);
}
if (!absl::StrContains(y_subs, kEllipsis)) {
absl::StrAppend(&y_subs, kEllipsis);
}
}
tensorflow::Output x = op.input(0);
tensorflow::Output y = op.input(1);
if (DataTypeIsComplex(grad.type())) {
x = Conj(scope, x);
y = Conj(scope, y);
}
const auto x_shape = Shape(scope, x);
const auto y_shape = Shape(scope, y);
Output grad_x =
EinsumGradWrt(scope, grad, y, x_shape, x_subs, y_subs, output_subs);
Output grad_y =
EinsumGradWrt(scope, grad, x, y_shape, y_subs, x_subs, output_subs);
if (!absl::StrContains(output_subs, kEllipsis)) {
grad_outputs->push_back(grad_x);
grad_outputs->push_back(grad_y);
return scope.status();
}
int bx_start, by_start;
absl::optional<int> bx_end, by_end;
std::tie(bx_start, bx_end) = EinsumGetBcastSubshape(x_subs);
std::tie(by_start, by_end) = EinsumGetBcastSubshape(y_subs);
auto args = internal::BroadcastGradientArgs(
scope, Slice1dHelper(scope, x_shape, bx_start, bx_end),
Slice1dHelper(scope, y_shape, by_start, by_end));
grad_x = Reshape(
scope, ReduceSum(scope, grad_x, Add(scope, bx_start, args.r0)), x_shape);
grad_y = Reshape(
scope, ReduceSum(scope, grad_y, Add(scope, by_start, args.r1)), y_shape);
grad_outputs->push_back(grad_x);
grad_outputs->push_back(grad_y);
return scope.status();
}
REGISTER_GRADIENT_OP("Einsum", EinsumGrad);
}
}
} | #include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using tensorflow::ops::Einsum;
using tensorflow::ops::Placeholder;
class LinalgGradTest : public ::testing::Test {
protected:
LinalgGradTest() : scope_(Scope::NewRootScope()) {}
void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
const TensorShape& y_shape) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, {x}, {x_shape}, {y}, {y_shape}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, xs, x_shapes, ys, y_shapes, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
Scope scope_;
};
TEST_F(LinalgGradTest, Einsum_Transpose) {
TensorShape x_shape({2, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Einsum(scope_, {x}, "ij->ji");
TensorShape y_shape({3, 2});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
TEST_F(LinalgGradTest, Einsum_TransposeBroadcast) {
TensorShape x_shape({3, 2, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Einsum(scope_, {x}, "...ij->...ji");
TensorShape y_shape({3, 3, 2});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
TEST_F(LinalgGradTest, Einsum_MatMul) {
TensorShape x_shape({2, 3});
TensorShape y_shape({3, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "ij,jk->ik");
TensorShape z_shape({2, 3});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_MatMulComplex) {
TensorShape x_shape({2, 3});
TensorShape y_shape({3, 3});
Output x = Placeholder(scope_, DT_COMPLEX64, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_COMPLEX64, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "ij,jk->ik");
TensorShape z_shape({2, 3});
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<complex64, complex64, float>(
scope_, {x, y}, {x_shape, y_shape}, {z}, {z_shape}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
TEST_F(LinalgGradTest, Einsum_MatMulBroadcast) {
TensorShape x_shape({3, 2, 3});
TensorShape y_shape({3, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "...ij,...jk->...ik");
TensorShape z_shape({3, 2, 3});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_Trace) {
TensorShape x_shape({3, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto z = Einsum(scope_, {x}, "ii->");
TensorShape z_shape({});
RunTest({x}, {x_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_TraceBroadcast) {
TensorShape x_shape({4, 3, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto z = Einsum(scope_, {x}, "...ii->...");
TensorShape z_shape({4});
RunTest({x}, {x_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_DotProduct) {
TensorShape x_shape({3});
TensorShape y_shape({3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "i,i->");
TensorShape z_shape({});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_OuterProduct) {
TensorShape x_shape({3});
TensorShape y_shape({5});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "i,j->ij");
TensorShape z_shape({3, 5});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_TwoInputReduction) {
TensorShape x_shape({3, 2, 4});
TensorShape y_shape({4, 5});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "abc,cd->ad");
TensorShape z_shape({3, 5});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/linalg_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/linalg_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b830844d-42c9-454e-ab53-41d3fc88bcaa | cpp | tensorflow/tensorflow | client_session | tensorflow/cc/client/client_session.cc | tensorflow/cc/client/client_session_test.cc | #include "tensorflow/cc/client/client_session.h"
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class ClientSession::Impl {
private:
friend class ClientSession;
Impl(Session* session, std::shared_ptr<Graph> graph)
: session_(session), graph_(std::move(graph)) {}
static SessionOptions MakeDefaultSessionOptions(const string& target);
Status MaybeExtendGraph() const;
std::unique_ptr<Session> session_;
std::shared_ptr<Graph> graph_;
mutable mutex mu_;
mutable int last_num_graph_nodes_ TF_GUARDED_BY(mu_) = 0;
};
ClientSession::ClientSession(const Scope& scope, const string& target)
: ClientSession(scope, Impl::MakeDefaultSessionOptions(target)) {}
ClientSession::ClientSession(const Scope& scope) : ClientSession(scope, "") {}
ClientSession::ClientSession(const Scope& scope,
const SessionOptions& session_options) {
Session* new_session;
Status status = NewSession(session_options, &new_session);
TF_CHECK_OK(status) << status;
impl_.reset(new Impl(new_session, scope.graph_as_shared_ptr()));
CHECK_NOTNULL(impl()->session_.get());
}
ClientSession::~ClientSession() {}
SessionOptions ClientSession::Impl::MakeDefaultSessionOptions(
const string& target) {
SessionOptions options;
options.env = Env::Default();
options.target = target;
return options;
}
Status ClientSession::Run(const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const {
return Run(FeedType{}, fetch_outputs, {}, outputs);
}
Status ClientSession::Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const {
return Run(inputs, fetch_outputs, {}, outputs);
}
Status ClientSession::Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs) const {
return Run(RunOptions(), inputs, fetch_outputs, run_outputs, outputs,
nullptr);
}
Status ClientSession::Impl::MaybeExtendGraph() const {
mutex_lock l(mu_);
int num_nodes = graph_->num_node_ids();
if (num_nodes > last_num_graph_nodes_) {
GraphDef graph_def;
graph_->ToGraphDefSubRange(&graph_def, last_num_graph_nodes_);
last_num_graph_nodes_ = num_nodes;
return session_->Extend(graph_def);
}
return absl::OkStatus();
}
Status ClientSession::Run(const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) const {
std::vector<std::pair<string, Tensor>> feeds;
feeds.reserve(inputs.size());
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(std::piecewise_construct,
std::forward_as_tuple(feed.first.name()),
std::forward_as_tuple(feed.second.tensor));
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata);
}
Status ClientSession::Run(
const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const {
std::vector<std::pair<string, Tensor>> feeds;
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(feed.first.name(), feed.second.tensor);
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata,
threadpool_options);
}
Status ClientSession::MakeCallable(const CallableOptions& callable_options,
CallableHandle* out_handle) {
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->MakeCallable(callable_options, out_handle);
}
Status ClientSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata);
}
Status ClientSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& options) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata, options);
}
Status ClientSession::ReleaseCallable(CallableHandle handle) {
return impl()->session_->ReleaseCallable(handle);
}
} | #define EIGEN_USE_THREADS
#include "tensorflow/cc/client/client_session.h"
#include <utility>
#include <vector>
#include "absl/synchronization/barrier.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/core/threadpool_options.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace {
using ops::Add;
using ops::BatchMatMul;
using ops::Const;
using ops::Mul;
using ops::Placeholder;
using ops::Sub;
tensorflow::SessionOptions GetSessionOptions() {
tensorflow::SessionOptions options;
options.config.mutable_experimental()->set_disable_optimize_for_static_graph(
true);
return options;
}
class CustomThreadPoolImpl : public thread::ThreadPoolInterface {
public:
explicit CustomThreadPoolImpl(int numThreads) {
underlying_threadpool_.reset(new thread::ThreadPool(
tensorflow::Env::Default(), "custom_threadpool", numThreads));
num_schedule_called_ = 0;
}
void Schedule(std::function<void()> fn) override {
num_schedule_called_ += 1;
underlying_threadpool_->Schedule(std::move(fn));
}
void ScheduleWithHint(std::function<void()> fn, int start, int end) override {
num_schedule_called_ += 1;
underlying_threadpool_->ScheduleWithHint(std::move(fn), start, end);
}
void Cancel() override {}
int NumThreads() const override {
return underlying_threadpool_->NumThreads();
}
int CurrentThreadId() const override {
return underlying_threadpool_->CurrentThreadId();
}
int GetNumScheduleCalled() { return num_schedule_called_; }
private:
int num_schedule_called_;
std::unique_ptr<tensorflow::thread::ThreadPool> underlying_threadpool_;
};
TEST(ClientSessionTest, Basic) {
Scope root = Scope::NewRootScope();
auto c = Const(root, {{1, 1}});
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({c}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({1, 1}, {1, 2}));
}
TEST(ClientSessionTest, Feed) {
Scope root = Scope::NewRootScope();
auto a = Placeholder(root, DT_INT32);
auto b = Placeholder(root, DT_INT32);
auto c = Add(root, a, b);
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({{a, 1}, {b, 41}}, {c}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({42}, {}));
}
TEST(ClientSessionTest, Extend) {
Scope root = Scope::NewRootScope();
auto a = Placeholder(root, DT_INT32, Placeholder::Shape({2}));
auto c = Add(root, a, {2, 2});
ClientSession session(root, GetSessionOptions());
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({{a, {1, 1}}}, {c}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({3, 3}, {2}));
auto d = Add(root, c, {39, 39});
outputs.clear();
TF_EXPECT_OK(session.Run({{a, {-10, 1}}}, {d}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({31, 42}, {2}));
}
TEST(ClientSessionTest, MultiThreadedWithDefaultThreadpool) {
Scope root = Scope::NewRootScope();
auto a = Add(root, {1, 2}, {3, 4});
auto b = Mul(root, {1, 2}, {3, 4});
ClientSession session(root, GetSessionOptions());
{
thread::ThreadPool thread_pool(Env::Default(), "pool", 2);
thread_pool.Schedule([&session, a]() {
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({a}, &outputs));
test::ExpectTensorEqual<int>(outputs[0],
test::AsTensor<int>({4, 6}, {2}));
});
thread_pool.Schedule([&session, b]() {
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({b}, &outputs));
test::ExpectTensorEqual<int>(outputs[0],
test::AsTensor<int>({3, 8}, {2}));
});
}
auto c = Sub(root, b, a);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({c}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({-1, 2}, {2}));
}
TEST(ClientSessionTest, MultiThreadedWithCustomThreadpool) {
Scope root = Scope::NewRootScope();
int num_threads = 3;
auto a = Add(root, {1, 2}, {3, 4});
auto b = Mul(root, {1, 2}, {3, 4});
ClientSession session(root, GetSessionOptions());
auto inter_op_threadpool =
absl::make_unique<CustomThreadPoolImpl>(num_threads);
ASSERT_EQ(inter_op_threadpool->GetNumScheduleCalled(), 0);
auto intra_op_threadpool =
absl::make_unique<CustomThreadPoolImpl>(num_threads);
ASSERT_EQ(intra_op_threadpool->GetNumScheduleCalled(), 0);
tensorflow::thread::ThreadPoolOptions threadPoolOptions;
threadPoolOptions.inter_op_threadpool = inter_op_threadpool.get();
threadPoolOptions.intra_op_threadpool = intra_op_threadpool.get();
{
thread::ThreadPool thread_pool(Env::Default(), "pool", 2);
thread_pool.Schedule([&session, a]() {
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(RunOptions(), ClientSession::FeedType{}, {a}, {},
&outputs, nullptr, thread::ThreadPoolOptions()));
test::ExpectTensorEqual<int>(outputs[0],
test::AsTensor<int>({4, 6}, {2}));
});
thread_pool.Schedule([&session, b]() {
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(RunOptions(), ClientSession::FeedType{}, {b}, {},
&outputs, nullptr, thread::ThreadPoolOptions()));
test::ExpectTensorEqual<int>(outputs[0],
test::AsTensor<int>({3, 8}, {2}));
});
}
auto c = Sub(root, b, a);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(RunOptions(), ClientSession::FeedType{}, {c}, {},
&outputs, nullptr, thread::ThreadPoolOptions()));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({-1, 2}, {2}));
}
TEST(ClientSessionTest, CallableWithDefaultThreadPool) {
Scope root = Scope::NewRootScope();
auto a = Placeholder(root, DT_INT32);
auto b = Placeholder(root, DT_INT32);
auto c = Add(root, a, b);
ClientSession session(root);
std::vector<Tensor> outputs;
CallableOptions options;
options.add_feed(a.node()->name());
options.add_feed(b.node()->name());
options.add_fetch(c.node()->name());
ClientSession::CallableHandle callable;
TF_CHECK_OK(session.MakeCallable(options, &callable));
TF_EXPECT_OK(session.RunCallable(
callable, {test::AsTensor<int>({1}, {}), test::AsTensor<int>({41}, {})},
&outputs, nullptr));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({42}, {}));
TF_EXPECT_OK(session.ReleaseCallable(callable));
}
TEST(ClientSessionTest, CallableWithCustomThreadPool) {
Scope root = Scope::NewRootScope();
int num_threads = 3;
TensorShape data_shape({1, 1});
auto a = Placeholder(root, DT_INT32, Placeholder::Shape(data_shape));
auto b = Placeholder(root, DT_INT32, Placeholder::Shape(data_shape));
auto c = BatchMatMul(root, a, b);
ClientSession session(root);
std::vector<Tensor> outputs;
auto inter_op_threadpool =
absl::make_unique<CustomThreadPoolImpl>(num_threads);
ASSERT_EQ(inter_op_threadpool->GetNumScheduleCalled(), 0);
auto intra_op_threadpool =
absl::make_unique<CustomThreadPoolImpl>(num_threads);
ASSERT_EQ(intra_op_threadpool->GetNumScheduleCalled(), 0);
tensorflow::thread::ThreadPoolOptions threadPoolOptions;
threadPoolOptions.inter_op_threadpool = inter_op_threadpool.get();
threadPoolOptions.intra_op_threadpool = intra_op_threadpool.get();
CallableOptions options;
options.add_feed(a.node()->name());
options.add_feed(b.node()->name());
options.add_fetch(c.node()->name());
ClientSession::CallableHandle callable;
TF_CHECK_OK(session.MakeCallable(options, &callable));
absl::Barrier barrier(num_threads + 1);
for (int i = 0; i < num_threads; i++) {
intra_op_threadpool->Schedule([&barrier, num_threads]() {
tensorflow::SetPerThreadMaxParallelism(num_threads - 1);
barrier.Block();
});
}
barrier.Block();
TF_EXPECT_OK(session.RunCallable(
callable,
{test::AsTensor<int>({2}, {1, 1}), test::AsTensor<int>({10}, {1, 1})},
&outputs, nullptr, threadPoolOptions));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({20}, {1, 1}));
TF_EXPECT_OK(session.ReleaseCallable(callable));
ASSERT_GT(inter_op_threadpool->GetNumScheduleCalled(), 0);
ASSERT_GT(intra_op_threadpool->GetNumScheduleCalled(), 0);
intra_op_threadpool.reset();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/client/client_session.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/client/client_session_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95425b08-8054-4f6b-a08d-0daf2092292b | cpp | tensorflow/tensorflow | c_api_function | tensorflow/c/c_api_function.cc | tensorflow/c/c_api_function_test.cc | #include <algorithm>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "absl/strings/match.h"
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/base64.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/util/debug_data_dumper.h"
using tensorflow::errors::InvalidArgument;
namespace tensorflow {
namespace {
Status ValidateNonRefOutput(const Node* node, int idx) {
const DataType& dt = node->output_type(idx);
return IsRefType(dt)
? InvalidArgument("Output ", idx, " of node '", node->name(),
"' has a reference type ", DataTypeString(dt))
: absl::OkStatus();
}
Status ProcessInputs(
const TF_Graph* fn_body, const char* fn_name, int ninputs,
const TF_Output* inputs, std::vector<OutputTensor>* input_tensors,
std::unordered_map<const Node*, std::vector<int>>* input_nodes)
TF_EXCLUSIVE_LOCKS_REQUIRED(fn_body->mu) {
input_tensors->reserve(ninputs);
for (int i = 0; i < ninputs; ++i) {
Node* node = inputs[i].oper ? &inputs[i].oper->node : nullptr;
int idx = inputs[i].index;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
fn_body->graph.IsValidOutputTensor(node, idx),
"Encountered while processing input ", i, " into function '", fn_name,
"'");
TF_RETURN_WITH_CONTEXT_IF_ERROR(ValidateNonRefOutput(node, idx),
"Encountered while processing input ", i,
" into function '", fn_name, "'");
input_tensors->emplace_back(node, idx);
const auto& iter = input_nodes->find(node);
if (iter == input_nodes->end()) {
input_nodes->insert({node, {idx}});
} else {
auto& indices = iter->second;
if (std::find(indices.begin(), indices.end(), idx) != indices.end()) {
return InvalidArgument("TF_Output ", node->name(), ":", idx,
" appears more than once in the input list");
}
indices.push_back(idx);
}
}
return absl::OkStatus();
}
Status ProcessOutputs(const TF_Graph* fn_body, const char* fn_name,
int noutputs, const TF_Output* outputs,
std::vector<OutputTensor>* output_tensors)
TF_EXCLUSIVE_LOCKS_REQUIRED(fn_body->mu) {
output_tensors->reserve(noutputs);
for (int i = 0; i < noutputs; ++i) {
Node* node = outputs[i].oper ? &outputs[i].oper->node : nullptr;
int idx = outputs[i].index;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
fn_body->graph.IsValidOutputTensor(node, idx),
"Encountered while processing output ", i, " from function '", fn_name,
"'");
TF_RETURN_WITH_CONTEXT_IF_ERROR(ValidateNonRefOutput(node, idx),
"Encountered while creating function '",
fn_name, "'");
output_tensors->emplace_back(node, idx);
}
return absl::OkStatus();
}
Status ComputeBodyNodes(
const TF_Graph* fn_body, const char* fn_name, int num_opers,
const TF_Operation* const* opers,
const std::unordered_map<const Node*, std::vector<int>>& input_nodes,
std::vector<const Node*>* body_nodes)
TF_EXCLUSIVE_LOCKS_REQUIRED(fn_body->mu) {
if (num_opers == -1) {
for (const Node* node : fn_body->graph.op_nodes()) {
const auto& iter = input_nodes.find(node);
if (iter == input_nodes.end()) {
body_nodes->push_back(node);
} else {
if (node->num_outputs() != 1) {
return InvalidArgument(
"When `num_opers` is set to -1, nodes referenced in `inputs` "
"must have a single output. Node ",
node->name(), " has ", node->num_outputs(),
" outputs. Encountered while creating function '", fn_name, "'");
}
}
}
} else {
body_nodes->reserve(num_opers);
for (int i = 0; i < num_opers; ++i) {
const Node* node = &opers[i]->node;
body_nodes->push_back(node);
}
}
return absl::OkStatus();
}
}
}
using tensorflow::Node;
using tensorflow::string;
TF_Function* TF_GraphToFunctionWithControlOutputs(
const TF_Graph* fn_body, const char* fn_name,
unsigned char append_hash_to_fn_name, int num_opers,
const TF_Operation* const* opers, int ninputs, const TF_Output* inputs,
int noutputs, const TF_Output* outputs, const char* const* output_names,
int ncontrol_outputs, const TF_Operation* const* control_outputs,
const char* const* control_output_names, const TF_FunctionOptions* opts,
const char* description, TF_Status* status) {
tensorflow::mutex_lock l(fn_body->mu);
std::vector<tensorflow::OutputTensor> input_tensors;
std::unordered_map<const Node*, std::vector<int>> input_nodes;
status->status = tensorflow::ProcessInputs(fn_body, fn_name, ninputs, inputs,
&input_tensors, &input_nodes);
if (TF_GetCode(status) != TF_OK) return nullptr;
std::vector<tensorflow::OutputTensor> output_tensors;
status->status = tensorflow::ProcessOutputs(fn_body, fn_name, noutputs,
outputs, &output_tensors);
if (TF_GetCode(status) != TF_OK) return nullptr;
std::vector<string> output_names_vec;
if (output_names) {
output_names_vec.reserve(noutputs);
for (int i = 0; i < noutputs; ++i) {
output_names_vec.push_back(string(output_names[i]));
}
}
std::vector<string> control_output_names_vec;
if (control_output_names) {
control_output_names_vec.reserve(ncontrol_outputs);
for (int i = 0; i < ncontrol_outputs; ++i) {
control_output_names_vec.push_back(string(control_output_names[i]));
}
}
std::vector<const Node*> body_nodes;
status->status = tensorflow::ComputeBodyNodes(
fn_body, fn_name, num_opers, opers, input_nodes, &body_nodes);
if (TF_GetCode(status) != TF_OK) return nullptr;
std::vector<const Node*> control_output_nodes;
control_output_nodes.reserve(ncontrol_outputs);
for (int i = 0; i < ncontrol_outputs; ++i) {
control_output_nodes.push_back(&control_outputs[i]->node);
}
DCHECK(append_hash_to_fn_name <= 1);
tensorflow::FunctionDef fdef;
status->status = tensorflow::GraphToFunctionDef(
fn_body->graph, fn_name, append_hash_to_fn_name != 0,
true,
true, body_nodes, input_tensors,
output_tensors, output_names_vec, control_output_nodes,
control_output_names_vec, description, &fdef);
if (TF_GetCode(status) != TF_OK) {
return nullptr;
}
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
fn_name, kDebugGroupOpStacktrace, "initial", &fn_body->graph);
tensorflow::StackTracesMap stack_traces;
for (const Node* n : fn_body->graph.nodes()) {
stack_traces[n->name()] = n->GetStackTrace();
}
TF_Function* tf_function = new TF_Function();
tf_function->record = new tensorflow::FunctionRecord(
std::move(fdef), std::move(stack_traces), false);
return tf_function;
}
TF_Function* TF_GraphToFunction(const TF_Graph* fn_body, const char* fn_name,
unsigned char append_hash_to_fn_name,
int num_opers, const TF_Operation* const* opers,
int ninputs, const TF_Output* inputs,
int noutputs, const TF_Output* outputs,
const char* const* output_names,
const TF_FunctionOptions* opts,
const char* description, TF_Status* status) {
return TF_GraphToFunctionWithControlOutputs(
fn_body, fn_name, append_hash_to_fn_name, num_opers, opers, ninputs,
inputs, noutputs, outputs, output_names, 0, nullptr, nullptr, opts,
description, status);
}
const char* TF_FunctionName(TF_Function* func) {
return func->record->fdef().signature().name().c_str();
}
void TF_GraphCopyFunction(TF_Graph* g, const TF_Function* func,
const TF_Function* grad, TF_Status* status) {
if (func == nullptr) {
status->status = InvalidArgument(
"'func' argument to TF_GraphCopyFunction cannot be null");
return;
}
tensorflow::mutex_lock l(g->mu);
status->status = g->graph.AddFunctionDef(func->record->fdef(),
func->record->stack_traces());
if (TF_GetCode(status) != TF_OK) return;
if (!grad) return;
status->status = g->graph.AddFunctionDef(grad->record->fdef(),
grad->record->stack_traces());
if (TF_GetCode(status) != TF_OK) return;
tensorflow::GradientDef gdef;
gdef.set_function_name(func->record->fdef().signature().name());
gdef.set_gradient_func(grad->record->fdef().signature().name());
status->status = g->graph.AddGradientDef(std::move(gdef));
}
int TF_GraphNumFunctions(TF_Graph* g) {
tensorflow::mutex_lock l(g->mu);
return g->graph.flib_def().num_functions();
}
int TF_GraphGetFunctions(TF_Graph* g, TF_Function** funcs, int max_func,
TF_Status* status) {
tensorflow::FunctionDefLibrary lib;
{
tensorflow::mutex_lock l(g->mu);
lib = g->graph.flib_def().ToProto();
}
const auto len = std::min(max_func, static_cast<int>(lib.function_size()));
for (int i = 0; i < len; ++i) {
TF_Function* func = new TF_Function();
func->record = new tensorflow::FunctionRecord(lib.function(i), {}, false);
funcs[i] = func;
}
status->status = absl::OkStatus();
return len;
}
void TF_FunctionToFunctionDef(TF_Function* func, TF_Buffer* output_func_def,
TF_Status* status) {
status->status = MessageToBuffer(func->record->fdef(), output_func_def);
}
TF_Function* TF_FunctionImportFunctionDef(const void* proto, size_t proto_len,
TF_Status* status) {
tensorflow::FunctionDef fdef;
bool success = fdef.ParseFromArray(proto, proto_len);
if (!success) {
status->status = InvalidArgument(
"Invalid FunctionDef given to TF_FunctionImportFunctionDef");
return nullptr;
}
TF_Function* func = new TF_Function();
func->record = new tensorflow::FunctionRecord(std::move(fdef), {}, false);
status->status = absl::OkStatus();
return func;
}
void TF_FunctionSetAttrValueProto(TF_Function* func, const char* attr_name,
const void* proto, size_t proto_len,
TF_Status* status) {
tensorflow::AttrValue attr_value;
if (!attr_value.ParseFromArray(proto, proto_len)) {
status->status = InvalidArgument(
"Unparseable AttrValue proto passed to "
"TF_FunctionSetAttrValueProto");
return;
}
auto fdef_or = func->record->mutable_fdef();
if (!fdef_or.ok()) {
status->status = fdef_or.status();
return;
}
(*(fdef_or.value()->mutable_attr()))[string(attr_name)] = attr_value;
status->status = absl::OkStatus();
}
void TF_FunctionGetAttrValueProto(TF_Function* func, const char* attr_name,
TF_Buffer* output_attr_value,
TF_Status* status) {
const auto& it = func->record->fdef().attr().find(attr_name);
if (it == func->record->fdef().attr().end()) {
status->status =
InvalidArgument("Function '", func->record->fdef().signature().name(),
"' has no attr named '", attr_name, "'.");
return;
}
status->status = MessageToBuffer(it->second, output_attr_value);
}
void TF_DeleteFunction(TF_Function* func) {
if (func == nullptr) {
return;
}
func->record->Unref();
func->record = nullptr;
delete func;
} | #include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/c_test_util.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
typedef std::pair<string, DataType> IOSpec;
const char* kFeedStackToString = "File \"feed.cc\", line 10, in alpha";
const char* kNegStackToString = "File \"neg.cc\", line 15, in beta";
std::vector<IOSpec> M(const std::initializer_list<string>& names) {
std::vector<IOSpec> v;
for (const string& name : names) {
v.push_back(IOSpec(name, DT_INVALID));
}
return v;
}
struct EdgeSpec : public std::pair<string, string> {
typedef std::pair<string, string> Base;
using Base::pair;
string ToString() const { return strings::StrCat(first, "->", second); }
};
class CApiFunctionTest : public ::testing::Test {
protected:
CApiFunctionTest()
: s_(TF_NewStatus()),
func_graph_(TF_NewGraph()),
host_graph_(TF_NewGraph()),
func_(nullptr) {}
void SetUp() override {}
~CApiFunctionTest() override {
TF_DeleteFunction(func_);
TF_DeleteGraph(host_graph_);
TF_DeleteGraph(func_graph_);
TF_DeleteStatus(s_);
}
void Run(const std::vector<std::pair<TF_Operation*, TF_Tensor*>>& inputs,
TF_Operation* output, int32_t expected_result) {
Run(inputs, {{output, 0}}, {expected_result});
}
void RunT(const std::vector<std::pair<TF_Operation*, TF_Tensor*>>& inputs,
std::initializer_list<TF_Output> outputs,
const std::vector<std::vector<int32_t>>& expected_results) {
CSession csession(host_graph_, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
csession.SetInputs(inputs);
csession.SetOutputs(outputs);
csession.Run(s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
for (int i = 0; i < expected_results.size(); ++i) {
TF_Tensor* out = csession.output_tensor(i);
ASSERT_TRUE(out != nullptr);
EXPECT_EQ(TF_INT32, TF_TensorType(out));
EXPECT_EQ(1, TF_NumDims(out));
CompareInt32Tensor(expected_results[i], out);
}
}
void Run(const std::vector<std::pair<TF_Operation*, TF_Tensor*>>& inputs,
std::initializer_list<TF_Output> outputs,
const std::vector<int32_t>& expected_results) {
CSession csession(host_graph_, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
csession.SetInputs(inputs);
csession.SetOutputs(outputs);
csession.Run(s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
for (int i = 0; i < expected_results.size(); ++i) {
TF_Tensor* out = csession.output_tensor(i);
ASSERT_TRUE(out != nullptr);
EXPECT_EQ(TF_INT32, TF_TensorType(out));
EXPECT_EQ(0, TF_NumDims(out));
ASSERT_EQ(sizeof(int32_t), TF_TensorByteSize(out));
int32_t* output_contents = static_cast<int32_t*>(TF_TensorData(out));
EXPECT_EQ(expected_results[i], *output_contents);
}
}
void CompareInt32Tensor(const std::vector<int32_t>& expected, TF_Tensor* t) {
int32_t* data = static_cast<int32_t*>(TF_TensorData(t));
size_t size = TF_TensorByteSize(t);
ASSERT_EQ(expected.size() * sizeof(int32_t), size);
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i], data[i]) << "Different data at index " << i;
}
}
std::vector<TF_Output> ToOutput(const std::vector<TF_Operation*> ops) {
std::vector<TF_Output> out;
for (auto op : ops) {
out.push_back({op, 0});
}
return out;
}
void Define(int num_opers, const std::vector<TF_Operation*>& opers,
const std::vector<TF_Operation*>& inputs,
const std::vector<TF_Operation*>& outputs,
const std::vector<string>& output_names,
bool expect_failure = false) {
DefineT(num_opers, opers, ToOutput(inputs), ToOutput(outputs), output_names,
expect_failure);
}
static const char** ToArray(const std::vector<string>& strs) {
const char** ptr = nullptr;
if (!strs.empty()) {
ptr = new const char*[strs.size()];
for (size_t i = 0; i < strs.size(); ++i) {
ptr[i] = strs[i].c_str();
}
}
return ptr;
}
void DefineT(int num_opers, const std::vector<TF_Operation*>& opers,
const std::vector<TF_Output>& inputs,
const std::vector<TF_Output>& outputs,
const std::vector<string>& output_names,
bool expect_failure = false) {
ASSERT_EQ(func_, nullptr);
const char** output_names_ptr = ToArray(output_names);
func_ = TF_GraphToFunction(func_graph_, func_name_, false, num_opers,
num_opers == -1 ? nullptr : opers.data(),
inputs.size(), inputs.data(), outputs.size(),
outputs.data(), output_names_ptr,
nullptr, nullptr, s_);
delete[] output_names_ptr;
if (expect_failure) {
ASSERT_EQ(func_, nullptr);
return;
}
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
ASSERT_NE(func_, nullptr);
ASSERT_EQ(std::string(func_name_), std::string(TF_FunctionName(func_)));
TF_GraphCopyFunction(host_graph_, func_, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
}
TF_Operation* Use(const std::vector<TF_Operation*>& inputs) {
return UseT(ToOutput(inputs));
}
TF_Operation* UseT(const std::vector<TF_Output>& inputs) {
TF_Operation* op;
UseHelper(inputs, &op);
return op;
}
void UseHelper(const std::vector<TF_Output>& inputs, TF_Operation** op) {
TF_OperationDescription* desc =
TF_NewOperation(host_graph_, func_name_, func_node_name_);
for (auto input : inputs) {
TF_AddInput(desc, input);
}
TF_SetDevice(desc, "/cpu:0");
*op = TF_FinishOperation(desc, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
ASSERT_NE(*op, nullptr);
}
FunctionDef fdef() {
tensorflow::FunctionDef fdef;
EXPECT_TRUE(GetFunctionDef(func_, &fdef));
return fdef;
}
template <class Container>
string ToString(const Container& v) {
std::stringstream ss;
ss << "{";
size_t i = 0;
for (const auto& e : v) {
if (i != 0) {
ss << ", ";
}
ss << e.ToString();
++i;
}
ss << "}";
return ss.str();
}
void VerifyFDefNodes(const tensorflow::FunctionDef& fdef,
const std::unordered_set<string>& nodes) {
ASSERT_EQ(nodes.size(), fdef.node_def_size())
<< "Got unexpected number of nodes. Expected: ["
<< absl::StrJoin(nodes, ", ")
<< "] Actual nodes in fdef: " << fdef.DebugString();
for (const NodeDef& node_def : fdef.node_def()) {
ASSERT_TRUE(nodes.find(node_def.name()) != nodes.end())
<< "Got unexpected node: " << node_def.name()
<< " in fdef: " << fdef.DebugString();
}
}
void VerifyFDefInputs(const tensorflow::FunctionDef& fdef,
const std::vector<IOSpec>& inputs) {
const OpDef& signature = fdef.signature();
ASSERT_EQ(inputs.size(), signature.input_arg_size());
for (int i = 0; i < inputs.size(); ++i) {
const OpDef::ArgDef& arg = signature.input_arg(i);
const IOSpec& in = inputs[i];
if (in.second != DT_INVALID) {
ASSERT_EQ(arg.type(), in.second)
<< "Got unexpected type for input " << i
<< ". fdef: " << fdef.DebugString();
}
ASSERT_EQ(arg.name(), in.first) << "Got unexpected name for input " << i
<< ". fdef: " << fdef.DebugString();
}
}
void VerifyFDefOutputs(const tensorflow::FunctionDef& fdef,
const std::vector<IOSpec>& outputs) {
const OpDef& signature = fdef.signature();
ASSERT_EQ(outputs.size(), signature.output_arg_size());
for (int i = 0; i < outputs.size(); ++i) {
const OpDef::ArgDef& arg = signature.output_arg(i);
const IOSpec& out = outputs[i];
if (out.second != DT_INVALID) {
ASSERT_EQ(arg.type(), out.second)
<< "Got unexpected type for output " << i
<< ". fdef: " << fdef.DebugString();
}
ASSERT_EQ(arg.name(), out.first) << "Got unexpected name for output " << i
<< ". fdef: " << fdef.DebugString();
}
}
void VerifyFDefEdges(
const tensorflow::FunctionDef& fdef,
const std::vector<EdgeSpec>& e_edges,
const std::vector<EdgeSpec>& c_edges,
bool is_exact_edges = true) {
std::set<EdgeSpec> a_edges;
for (const NodeDef& node_def : fdef.node_def()) {
for (int i = 0; i < node_def.input_size(); ++i) {
const string& in = node_def.input(i);
const auto& v =
a_edges.insert({in, strings::StrCat(node_def.name(), ":", i)});
ASSERT_TRUE(v.second) << "Duplicate edge " << in << " -> "
<< strings::StrCat(node_def.name(), ":", i)
<< ". fdef: " << fdef.DebugString();
}
}
for (const OpDef::ArgDef& arg : fdef.signature().output_arg()) {
const auto& iter = fdef.ret().find(arg.name());
if (iter != fdef.ret().end()) {
const auto& v = a_edges.insert({iter->second, arg.name()});
ASSERT_TRUE(v.second) << "Duplicate edge " << iter->second << " -> "
<< arg.name() << ". fdef: " << fdef.DebugString();
} else {
const auto& v = a_edges.insert({arg.name(), arg.name()});
ASSERT_TRUE(v.second) << "Duplicate edge " << arg.name() << " -> "
<< arg.name() << ". fdef: " << fdef.DebugString();
}
}
for (const EdgeSpec& e : e_edges) {
ASSERT_TRUE(a_edges.find(e) != a_edges.end())
<< "Failed to find expected edge " << e.ToString()
<< " in fdef: " << fdef.DebugString();
}
for (const EdgeSpec& e : c_edges) {
ASSERT_TRUE(a_edges.find(e) != a_edges.end())
<< "Failed to find expected control edge " << e.ToString()
<< " in fdef: " << fdef.DebugString();
}
if (is_exact_edges) {
ASSERT_EQ(e_edges.size() + c_edges.size(), a_edges.size())
<< "Expected edges: " << ToString(e_edges)
<< " Expected Control edges: " << ToString(c_edges)
<< " Actual edges: " << ToString(a_edges)
<< " in fdef: " << fdef.DebugString();
}
}
void VerifyFDef(const std::unordered_set<string>& nodes,
const std::vector<IOSpec>& inputs,
const std::vector<IOSpec>& outputs,
const std::vector<EdgeSpec>& e_edges,
const std::vector<EdgeSpec>& c_edges,
bool is_exact_edges = true) {
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
VerifyFDefNodes(fdef, nodes);
VerifyFDefInputs(fdef, inputs);
VerifyFDefOutputs(fdef, outputs);
VerifyFDefEdges(fdef, e_edges, c_edges, is_exact_edges);
}
void Reincarnate() {
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
TF_DeleteFunction(func_);
string buf;
ASSERT_TRUE(fdef.SerializeToString(&buf));
func_ = TF_FunctionImportFunctionDef(buf.data(), buf.size(), s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
}
void GetAttr(const char* attr_name, AttrValue* out_attr) {
TF_Buffer* attr_buf = TF_NewBuffer();
TF_FunctionGetAttrValueProto(func_, attr_name, attr_buf, s_);
ASSERT_TRUE(out_attr->ParseFromArray(attr_buf->data, attr_buf->length));
TF_DeleteBuffer(attr_buf);
}
const char* func_name_ = "MyFunc";
const char* func_node_name_ = "MyFunc_0";
TF_Status* s_;
TF_Graph* func_graph_;
TF_Graph* host_graph_;
TF_Function* func_;
std::unordered_set<string> empty_;
};
TEST_F(CApiFunctionTest, OneOp_ZeroInputs_OneOutput) {
TF_Operation* c = ScalarConst(10, func_graph_, s_, "scalar10");
Define(-1, {}, {}, {c}, {});
TF_Operation* func_op = Use({});
Run({}, func_op, 10);
VerifyFDef({"scalar10_0"}, {}, {{"scalar10", DT_INT32}},
{{"scalar10_0:output:0", "scalar10"}}, {});
}
TEST_F(CApiFunctionTest, OneOp_OneInput_OneOutput) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* neg = Neg(feed, func_graph_, s_);
Define(-1, {}, {feed}, {neg}, {});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, -3);
VerifyFDef({"neg_0"}, {{"feed", DT_INT32}}, {{"neg", DT_INT32}},
{{"feed", "neg_0:0"}, {"neg_0:y:0", "neg"}}, {});
}
TEST_F(CApiFunctionTest, OneOutput_OutputNames) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* neg = Neg(feed, func_graph_, s_);
Define(-1, {}, {feed}, {neg}, {"negated_num"});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, -3);
VerifyFDef({"neg"}, {{"feed", DT_INT32}}, {{"negated_num", DT_INT32}},
{{"feed", "neg:0"}, {"neg:y:0", "negated_num"}}, {});
}
TEST_F(CApiFunctionTest, OutputNames_SameNameAsInput) {
TF_Operation* feed = Placeholder(func_graph_, s_, "negation");
TF_Operation* neg = Neg(feed, func_graph_, s_, "neg");
Define(-1, {}, {feed}, {neg}, {"negation"});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, -3);
VerifyFDef({"neg"}, {{"negation_0", DT_INT32}}, {{"negation", DT_INT32}},
{{"negation_0", "neg:0"}, {"neg:y:0", "negation"}}, {});
}
TEST_F(CApiFunctionTest, ZeroOps_Identity) {
TF_Operation* feed = Placeholder(func_graph_, s_);
Define(-1, {}, {feed}, {feed}, {});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 3);
VerifyFDef(empty_, {{"feed_0", DT_INT32}}, {{"feed", DT_INT32}},
{{"feed_0", "feed"}}, {});
}
TEST_F(CApiFunctionTest, ZeroOps_Permutation) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
Define(-1, {}, {feed1, feed2}, {feed2, feed1}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {3, 2});
VerifyFDef(empty_, M({{"feed1_0"}, {"feed2_0"}}), M({{"feed2"}, {"feed1"}}),
{{"feed1_0", "feed1"}, {"feed2_0", "feed2"}}, {});
}
TEST_F(CApiFunctionTest, ZeroOps_Permutation_OutputNames) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
Define(-1, {}, {feed1, feed2}, {feed2, feed1}, {"first", "second"});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {3, 2});
VerifyFDef(empty_, M({{"feed1"}, {"feed2"}}), M({{"first"}, {"second"}}),
{{"feed1", "second"}, {"feed2", "first"}}, {});
}
TEST_F(CApiFunctionTest, OneOp_TwoInputs_OneOutput) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
Define(-1, {}, {feed1, feed2}, {add}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add_0"}, M({{"feed1"}, {"feed2"}}), M({{"add"}}),
{{"feed1", "add_0:0"}, {"feed2", "add_0:1"}, {"add_0:sum:0", "add"}}, {});
}
TEST_F(CApiFunctionTest, OneOp_TwoInputs_ZeroOutputs) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
Add(feed1, feed2, func_graph_, s_);
Define(-1, {}, {feed1, feed2}, {}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
Use({two, func_feed});
VerifyFDef({"add"}, M({{"feed1"}, {"feed2"}}), {},
{{"feed1", "add:0"}, {"feed2", "add:1"}}, {});
}
TEST_F(CApiFunctionTest, TwoOps_ThreeInputs_OneOutput) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1, {}, {feed1, feed2, feed3}, {add2}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* ten = ScalarConst(10, host_graph_, s_, "ten");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, ten, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 10 + 3);
VerifyFDef({"add1", "add2_0"}, M({{"feed1"}, {"feed2"}, {"feed3"}}),
M({{"add2"}}),
{{"feed1", "add1:0"},
{"feed2", "add1:1"},
{"add1:sum:0", "add2_0:0"},
{"feed3", "add2_0:1"},
{"add2_0:sum:0", "add2"}},
{});
}
TEST_F(CApiFunctionTest, OneOp_TwoInputs_TwoDuplicateOutputs) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
Define(-1, {}, {feed1, feed2}, {add, add}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {5, 5});
VerifyFDef({"add_1"}, M({{"feed1"}, {"feed2"}}), M({{"add"}, {"add_0"}}),
{{"feed1", "add_1:0"},
{"feed2", "add_1:1"},
{"add_1:sum:0", "add"},
{"add_1:sum:0", "add_0"}},
{});
}
TEST_F(CApiFunctionTest, TwoDuplicateOutputs_OutputNames) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
Define(-1, {}, {feed1, feed2}, {add, add}, {"out1", "out2"});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {5, 5});
VerifyFDef({"add"}, M({{"feed1"}, {"feed2"}}), M({{"out1"}, {"out2"}}),
{{"feed1", "add:0"},
{"feed2", "add:1"},
{"add:sum:0", "out1"},
{"add:sum:0", "out2"}},
{});
}
TEST_F(CApiFunctionTest, TwoOps_ThreeInputs_TwoOutputs) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1, {}, {feed1, feed2, feed3}, {add1, add2}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* ten = ScalarConst(10, host_graph_, s_, "ten");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, ten, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {12, 15});
VerifyFDef({"add1_0", "add2_0"}, M({{"feed1"}, {"feed2"}, {"feed3"}}),
M({{"add1"}, {"add2"}}),
{{"feed1", "add1_0:0"},
{"feed2", "add1_0:1"},
{"add1_0:sum:0", "add2_0:0"},
{"feed3", "add2_0:1"},
{"add1_0:sum:0", "add1"},
{"add2_0:sum:0", "add2"}},
{});
}
TEST_F(CApiFunctionTest, FromSubsetOfOps) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(1, {add2}, {add1, feed3}, {add2}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add2_0"}, M({{"add1"}, {"feed3"}}), M({{"add2"}}),
{{"add1", "add2_0:0"}, {"feed3", "add2_0:1"}, {"add2_0:sum:0", "add2"}},
{});
}
TEST_F(CApiFunctionTest, UsingOneOutputOfSplit) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* split = Split3(feed, func_graph_, s_);
DefineT(-1, {}, {{feed, 0}}, {{split, 1}}, {});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
RunT({{func_feed, Int32Tensor({1, 2, 3, 4, 5, 6})}}, {{func_op, 0}},
{{3, 4}});
VerifyFDef({"split3_const0", "split3_0"}, M({{"feed"}}), M({{"split3"}}),
{{"split3_const0:output:0", "split3_0:0"},
{"feed", "split3_0:1"},
{"split3_0:output:1", "split3"}},
{});
}
TEST_F(CApiFunctionTest, UsingTwoOutputsOfSplit) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* split = Split3(feed, func_graph_, s_);
DefineT(-1, {}, {{feed, 0}}, {{split, 0}, {split, 2}}, {});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
RunT({{func_feed, Int32Tensor({1, 2, 3, 4, 5, 6})}},
{{func_op, 0}, {func_op, 1}}, {{1, 2}, {5, 6}});
VerifyFDef({"split3_const0", "split3_1"}, M({{"feed"}}),
M({{"split3"}, {"split3_0"}}),
{{"split3_const0:output:0", "split3_1:0"},
{"feed", "split3_1:1"},
{"split3_1:output:0", "split3"},
{"split3_1:output:2", "split3_0"}},
{});
}
TEST_F(CApiFunctionTest, UsingTwoOutputsOfSplitAsInputs) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* split = Split3(feed, func_graph_, s_);
TF_Operation* add = Add({split, 0}, {split, 2}, func_graph_, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
DefineT(1, {add}, {{split, 0}, {split, 2}}, {{add, 0}}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add_0"}, M({{"split3"}, {"split3_0"}}), M({{"add"}}),
{{"split3", "add_0:0"}, {"split3_0", "add_0:1"}, {"add_0:sum:0", "add"}},
{});
}
TEST_F(CApiFunctionTest, NodesUsedInInputsMustHaveSingleOutput) {
TF_Tensor* tensor_123 = Int32Tensor({1, 2, 3});
TF_Operation* c = Const(tensor_123, func_graph_, s_, "const_array");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* split = Split3(c, func_graph_, s_);
TF_Operation* add = Add({split, 0}, {split, 2}, func_graph_, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
DefineT(-1, {}, {{split, 0}, {split, 2}}, {{add, 0}}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("When `num_opers` is set to -1, nodes referenced in "
"`inputs` must have a single output. Node split3 has "
"3 outputs. Encountered while creating function 'MyFunc'"),
string(TF_Message(s_)));
TF_DeleteTensor(tensor_123);
}
TEST_F(CApiFunctionTest, FunctionWithWhileLoop) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
std::vector<TF_Output> outputs;
{
std::vector<TF_Output> inputs = {{feed1, 0}, {feed2, 0}};
std::unique_ptr<TF_WhileParams> params(new TF_WhileParams(
TF_NewWhile(func_graph_, &inputs[0], inputs.size(), s_)));
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params->name = "test_loop";
outputs.resize(2, {nullptr, -1});
TF_Operation* less_than = LessThan(
params->cond_inputs[0], params->cond_inputs[1], params->cond_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params->cond_output = {less_than, 0};
TF_Operation* add1 = Add(params->body_inputs[0], params->body_inputs[1],
params->body_graph, s_, "add1");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* one = ScalarConst(1, params->body_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* add2 = Add(add1, one, params->body_graph, s_, "add2");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params->body_outputs[0] = {add2, 0};
params->body_outputs[1] = params->body_inputs[1];
TF_FinishWhile(params.get(), s_, &outputs[0]);
EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
}
DefineT(-1, {}, {{feed1, 0}, {feed2, 0}}, {outputs[0]}, {});
TF_Operation* five = ScalarConst(5, host_graph_, s_, "five");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed, five});
Run({{func_feed, Int32Tensor(2)}}, func_op, 2 + 5 + 1);
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
VerifyFDefInputs(fdef, M({{"feed1"}, {"feed2"}}));
VerifyFDefOutputs(fdef, M({{"test_loop_exit"}}));
VerifyFDefEdges(fdef,
{{"feed1", "test_loop/Enter:0"},
{"test_loop/Enter:output:0", "test_loop/Merge:0"},
{"test_loop/Merge:output:0", "test_loop/Switch:0"},
{"test_loop/Switch:output_false:0", "test_loop/Exit:0"},
{"test_loop/Exit:output:0", "test_loop_exit"}},
{}, false);
}
TEST_F(CApiFunctionTest, ControlDependency) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* five = ScalarConst(5, func_graph_, s_);
TF_Operation* add =
AddWithCtrlDependency(feed1, feed2, func_graph_, five, s_);
EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
Define(-1, {}, {feed1, feed2}, {add}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add_0", "scalar"}, M({{"feed1"}, {"feed2"}}), M({{"add"}}),
{{"feed1", "add_0:0"}, {"feed2", "add_0:1"}, {"add_0:sum:0", "add"}},
{{"^scalar", "add_0:2"}});
}
TEST_F(CApiFunctionTest, ControlDependencyOutsideOfBody) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* five = ScalarConst(5, func_graph_, s_);
TF_Operation* add =
AddWithCtrlDependency(feed1, feed2, func_graph_, five, s_);
EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
Define(1, {add}, {feed1, feed2}, {add}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("The source of control edge [id=3 scalar:-1 -> add:-1] "
"is not in the body. Encountered while creating "
"function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, ControlDependencyOutsideOfBody_FromInputNode) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add =
AddWithCtrlDependency(feed1, feed2, func_graph_, feed1, s_);
EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
Define(-1, {}, {feed1, feed2}, {add}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add_0"}, M({{"feed1"}, {"feed2"}}), M({{"add"}}),
{{"feed1", "add_0:0"}, {"feed2", "add_0:1"}, {"add_0:sum:0", "add"}},
{{"^feed1", "add_0:2"}});
}
TEST_F(CApiFunctionTest, DuplicateInputsAreNotAllowed) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* add = Add(feed1, feed1, func_graph_, s_);
Define(-1, {}, {feed1, feed1}, {add}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(
string("TF_Output feed1:0 appears more than once in the input list"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, DuplicateOutputNamesAreNotAllowed) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1, {}, {feed1, feed2, feed3}, {add1, add2}, {"my_out", "my_out"},
true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Cannot have duplicate output names. Name 'my_out' "
"appears more than once in 'output_names' array."),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, InvalidInputTensor_HighIndex) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
DefineT(-1, {}, {{feed1, 0}, {feed2, 2}}, {{add, 0}}, {}, true);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(s_));
EXPECT_EQ(string("Node 'feed2' (type: 'Placeholder', num of outputs: 1) does "
"not have output 2\n\tEncountered while processing "
"input 1 into function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, InvalidInputTensor_BadNodePtr) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
DefineT(-1, {}, {{feed1, 0}, {nullptr, 0}}, {{add, 0}}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Node is null\n\tEncountered while processing input 1 "
"into function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, InvalidOutputTensor_HighIndex) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
DefineT(-1, {}, {{feed1, 0}, {feed2, 0}}, {{add, 3}}, {}, true);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(s_));
EXPECT_EQ(string("Node 'add' (type: 'AddN', num of outputs: 1) does "
"not have output 3\n\tEncountered while processing "
"output 0 from function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, InvalidOutputTensor_BadNodePtr) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
Add(feed1, feed2, func_graph_, s_);
DefineT(-1, {}, {{feed1, 0}, {feed2, 0}}, {{nullptr, 3}}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Node is null\n\tEncountered while processing output 0 "
"from function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, NodeMissingInput) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
DefineT(1, {add}, {{feed1, 0}}, {{add, 0}}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Input 1, 'feed2:0', of node 'add' in function 'MyFunc' "
"is not available. You might need to include it in inputs "
"or include its source node in the body"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, OutputOpNotInBody) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* scalar = ScalarConst(2, func_graph_, s_);
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
Define(1, {add}, {feed1, feed2}, {add, scalar}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("TF_Output scalar:0 is neither in the function body nor "
"among function inputs. Encountered while creating "
"function 'MyFunc'"),
string(TF_Message(s_)));
}
void DefineFunction(const char* name, TF_Function** func,
const char* description = nullptr,
bool append_hash = false) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> func_graph(
TF_NewGraph(), TF_DeleteGraph);
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> s(TF_NewStatus(),
TF_DeleteStatus);
TF_Operation* feed = Placeholder(func_graph.get(), s.get());
TF_Operation* neg = Neg(feed, func_graph.get(), s.get());
std::vector<StackFrame> feed_frames = {{"feed.cc", 10, "alpha"}};
std::vector<StackFrame> neg_frames = {{"neg.cc", 15, "beta"}};
feed->node.SetStackTrace(std::make_shared<FrozenStackTrace>(feed_frames));
neg->node.SetStackTrace(std::make_shared<FrozenStackTrace>(neg_frames));
TF_Output inputs[] = {{feed, 0}};
TF_Output outputs[] = {{neg, 0}};
*func = TF_GraphToFunction(func_graph.get(), name, append_hash, -1,
nullptr, 1, inputs, 1, outputs,
nullptr,
nullptr, description, s.get());
ASSERT_EQ(TF_OK, TF_GetCode(s.get())) << TF_Message(s.get());
ASSERT_NE(*func, nullptr);
}
REGISTER_OP("CustomOp")
.Output("output: float32")
.Attr("index: int")
.SetShapeFn(tensorflow::shape_inference::UnknownShape);
void NodeWithPlaceholderAttrHelper(TF_Graph* graph, TF_Status* s,
const char* name, const char* placeholder,
TF_Operation** op) {
TF_OperationDescription* desc = TF_NewOperation(graph, "CustomOp", name);
TF_SetAttrPlaceholder(desc, "index", placeholder);
*op = TF_FinishOperation(desc, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
ASSERT_NE(*op, nullptr);
}
TEST_F(CApiFunctionTest, GraphToFunctionDefWithPlaceholderAttr) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> func_graph(
TF_NewGraph(), TF_DeleteGraph);
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> s(TF_NewStatus(),
TF_DeleteStatus);
TF_Operation *node1, *node2, *node3;
NodeWithPlaceholderAttrHelper(func_graph.get(), s.get(), "node1", "v1",
&node1);
NodeWithPlaceholderAttrHelper(func_graph.get(), s.get(), "node2", "v1",
&node2);
NodeWithPlaceholderAttrHelper(func_graph.get(), s.get(), "node3", "v2",
&node3);
TF_Output outputs[] = {{node1, 0}, {node2, 0}, {node3, 0}};
func_ = TF_GraphToFunction(
func_graph.get(), "func", false, -1,
nullptr, 0, nullptr, 3, outputs,
nullptr,
nullptr, nullptr, s.get());
ASSERT_EQ(TF_OK, TF_GetCode(s.get())) << TF_Message(s.get());
ASSERT_NE(func_, nullptr);
ASSERT_EQ(func_->record->fdef().signature().attr().size(), 2);
EXPECT_EQ(func_->record->fdef().signature().attr(0).name(), "v1");
EXPECT_EQ(func_->record->fdef().signature().attr(0).type(), "int");
EXPECT_EQ(func_->record->fdef().signature().attr(1).name(), "v2");
EXPECT_EQ(func_->record->fdef().signature().attr(1).type(), "int");
}
void NodeWithAttrHelper(TF_Graph* graph, TF_Status* s, const char* name,
const char* attr_name, const char* attr_value,
TF_Operation** op) {
TF_OperationDescription* desc = TF_NewOperation(graph, "Placeholder", name);
TF_SetAttrType(desc, "dtype", TF_INT32);
TF_SetAttrString(desc, attr_name, attr_value, strlen(attr_value));
*op = TF_FinishOperation(desc, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
ASSERT_NE(*op, nullptr);
}
TEST_F(CApiFunctionTest, GraphToFunctionDefWithArgAttr) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> func_graph(
TF_NewGraph(), TF_DeleteGraph);
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> s(TF_NewStatus(),
TF_DeleteStatus);
TF_Operation* node;
NodeWithAttrHelper(func_graph.get(), s.get(), "node", "_test_attr", "value",
&node);
TF_Output inputs[] = {{node, 0}};
func_ = TF_GraphToFunction(
func_graph.get(), "func", false, -1,
nullptr, 1, inputs, 0, nullptr,
nullptr,
nullptr, nullptr, s.get());
ASSERT_EQ(TF_OK, TF_GetCode(s.get())) << TF_Message(s.get());
ASSERT_NE(func_, nullptr);
ASSERT_EQ(func_->record->fdef().arg_attr_size(), 1);
auto arg_attrs = func_->record->fdef().arg_attr().find(0);
ASSERT_NE(arg_attrs, func_->record->fdef().arg_attr().end());
auto iter = arg_attrs->second.attr().find("_test_attr");
ASSERT_NE(iter, arg_attrs->second.attr().end());
EXPECT_EQ(iter->second.s(), "value");
}
TEST_F(CApiFunctionTest, TFGraphToFunctionWithStackTraces) {
DefineFunction(func_name_, &func_);
auto stack_traces = func_->record->stack_traces();
EXPECT_EQ(stack_traces.size(), 4);
EXPECT_EQ(stack_traces["neg"]->ToString({}), kNegStackToString);
EXPECT_EQ(stack_traces["feed"]->ToString({}), kFeedStackToString);
}
TEST_F(CApiFunctionTest, TFGraphCopyFunctionWithStackTraces) {
DefineFunction(func_name_, &func_);
TF_Function* grad_func;
DefineFunction("MyGrad", &grad_func);
TF_GraphCopyFunction(host_graph_, func_, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_DeleteFunction(grad_func);
const StackTracesMap* func_stack_traces;
const StackTracesMap* grad_stack_traces;
{
mutex_lock l(host_graph_->mu);
auto flib_def = host_graph_->graph.flib_def();
func_stack_traces = flib_def.GetStackTraces(func_name_);
grad_stack_traces = flib_def.GetStackTraces("MyGrad");
}
ASSERT_NE(func_stack_traces, nullptr);
EXPECT_EQ(func_stack_traces->size(), 4);
EXPECT_EQ(func_stack_traces->at("neg")->ToString({}), kNegStackToString);
EXPECT_EQ(func_stack_traces->at("feed")->ToString({}), kFeedStackToString);
ASSERT_NE(grad_stack_traces, nullptr);
EXPECT_EQ(grad_stack_traces->size(), 4);
EXPECT_EQ(grad_stack_traces->at("neg")->ToString({}), kNegStackToString);
EXPECT_EQ(grad_stack_traces->at("feed")->ToString({}), kFeedStackToString);
}
TEST_F(CApiFunctionTest, SetGradientAndRun) {
DefineFunction(func_name_, &func_);
TF_Function* grad_func;
DefineFunction("MyGrad", &grad_func);
TF_GraphCopyFunction(host_graph_, func_, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
GraphDef gdef;
GetGraphDef(host_graph_, &gdef);
std::vector<string> func_names = GetFuncNames(gdef);
ASSERT_EQ(2, func_names.size());
ASSERT_EQ(func_name_, func_names[0]);
ASSERT_EQ("MyGrad", func_names[1]);
std::vector<std::pair<string, string>> grads = GetGradDefs(gdef);
ASSERT_EQ(1, grads.size());
ASSERT_EQ(func_name_, grads[0].first);
ASSERT_EQ("MyGrad", grads[0].second);
TF_GraphCopyFunction(host_graph_, func_, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_GraphCopyFunction(host_graph_, func_, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_DeleteFunction(grad_func);
GraphDef gdef2;
GetGraphDef(host_graph_, &gdef2);
ASSERT_EQ(gdef.DebugString(), gdef2.DebugString());
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, -3);
}
TEST_F(CApiFunctionTest, SameGradForTwoFunctions) {
TF_Function* func1;
TF_Function* func2;
TF_Function* grad_func;
DefineFunction("FooFunc1", &func1);
DefineFunction("FooFunc2", &func2);
DefineFunction("MyGrad", &grad_func);
TF_GraphCopyFunction(host_graph_, func1, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_GraphCopyFunction(host_graph_, func2, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
GraphDef gdef;
GetGraphDef(host_graph_, &gdef);
std::vector<std::pair<string, string>> grads = GetGradDefs(gdef);
ASSERT_EQ(2, grads.size());
ASSERT_EQ("FooFunc1", grads[0].first);
ASSERT_EQ("MyGrad", grads[0].second);
ASSERT_EQ("FooFunc2", grads[1].first);
ASSERT_EQ("MyGrad", grads[1].second);
TF_DeleteFunction(func1);
TF_DeleteFunction(func2);
TF_DeleteFunction(grad_func);
}
TEST_F(CApiFunctionTest, AddFunctionsThenMakeOneGradientOfAnother) {
TF_Function* func;
TF_Function* grad_func;
DefineFunction("FooFunc", &func);
DefineFunction("MyGrad", &grad_func);
TF_GraphCopyFunction(host_graph_, func, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_GraphCopyFunction(host_graph_, grad_func, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
GraphDef gdef;
GetGraphDef(host_graph_, &gdef);
std::vector<string> func_names = GetFuncNames(gdef);
ASSERT_EQ(2, func_names.size());
ASSERT_EQ("FooFunc", func_names[0]);
ASSERT_EQ("MyGrad", func_names[1]);
ASSERT_EQ(0, GetGradDefs(gdef).size());
TF_GraphCopyFunction(host_graph_, func, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
gdef.Clear();
GetGraphDef(host_graph_, &gdef);
std::vector<std::pair<string, string>> grads = GetGradDefs(gdef);
ASSERT_EQ(1, grads.size());
ASSERT_EQ("FooFunc", grads[0].first);
ASSERT_EQ("MyGrad", grads[0].second);
TF_DeleteFunction(func);
TF_DeleteFunction(grad_func);
}
TEST_F(CApiFunctionTest, GradientErrorCases) {
DefineFunction(func_name_, &func_);
TF_Function* grad_func1;
TF_Function* grad_func2;
DefineFunction("MyGrad1", &grad_func1);
DefineFunction("MyGrad2", &grad_func2);
TF_GraphCopyFunction(host_graph_, nullptr, func_, s_);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("'func' argument to TF_GraphCopyFunction cannot be null"),
string(TF_Message(s_)));
TF_GraphCopyFunction(host_graph_, func_, grad_func1, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_GraphCopyFunction(host_graph_, func_, grad_func2, s_);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Cannot assign gradient function 'MyGrad2' to 'MyFunc' "
"because it already has gradient function 'MyGrad1'"),
string(TF_Message(s_)));
TF_DeleteFunction(grad_func1);
TF_DeleteFunction(grad_func2);
}
TEST_F(CApiFunctionTest, ImportFunctionDef) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1, {}, {feed1, feed2, feed3}, {add1, add2},
{"internal_out", "final_out"});
Reincarnate();
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* ten = ScalarConst(10, host_graph_, s_, "ten");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, ten, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {12, 15});
VerifyFDef({"add1", "add2"}, M({{"feed1"}, {"feed2"}, {"feed3"}}),
M({{"internal_out"}, {"final_out"}}),
{{"feed1", "add1:0"},
{"feed2", "add1:1"},
{"add1:sum:0", "add2:0"},
{"feed3", "add2:1"},
{"add1:sum:0", "internal_out"},
{"add2:sum:0", "final_out"}},
{});
}
TEST_F(CApiFunctionTest, ImportFunctionDef_InvalidProto) {
char proto[] = {0x0, 0x0, 0x0, 0x0};
func_ = TF_FunctionImportFunctionDef(proto, 4, s_);
EXPECT_TRUE(func_ == nullptr);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Invalid FunctionDef given to TF_FunctionImportFunctionDef"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, Attribute) {
DefineFunction(func_name_, &func_);
TF_Buffer* attr_buf = TF_NewBuffer();
TF_FunctionGetAttrValueProto(func_, "foo_attr", attr_buf, s_);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Function 'MyFunc' has no attr named 'foo_attr'."),
string(TF_Message(s_)));
TF_DeleteBuffer(attr_buf);
tensorflow::AttrValue attr;
attr.set_s("test_attr_value");
string bytes;
attr.SerializeToString(&bytes);
TF_FunctionSetAttrValueProto(func_, "test_attr_name", bytes.data(),
bytes.size(), s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
AttrValue read_attr;
GetAttr("test_attr_name", &read_attr);
ASSERT_EQ(attr.DebugString(), read_attr.DebugString());
Reincarnate();
AttrValue read_attr2;
GetAttr("test_attr_name", &read_attr2);
ASSERT_EQ(attr.DebugString(), read_attr2.DebugString());
}
TEST_F(CApiFunctionTest, Description) {
DefineFunction(func_name_, &func_, "Return something");
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
ASSERT_EQ(string("Return something"), fdef.signature().description());
}
TEST_F(CApiFunctionTest, Name) {
DefineFunction("long_func_name", &func_, "Return something",
false);
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
ASSERT_EQ(string("long_func_name"), fdef.signature().name());
}
TEST_F(CApiFunctionTest, AppendHash) {
DefineFunction("func_name_base", &func_, "Return something",
true);
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
#if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
ASSERT_EQ(string("func_name_base_ZpgUD4x8oqk"), fdef.signature().name());
#else
ASSERT_EQ(string("func_name_base_qaJ8jA8UmGY"), fdef.signature().name());
#endif
}
TEST_F(CApiFunctionTest, GetOpDef) {
DefineFunction(func_name_, &func_);
TF_GraphCopyFunction(host_graph_, func_, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Buffer* buffer = TF_NewBuffer();
TF_GraphGetOpDef(host_graph_, func_name_, buffer, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
string data(static_cast<const char*>(buffer->data), buffer->length);
OpDef op_def;
op_def.ParseFromString(data);
EXPECT_EQ(op_def.name(), func_name_);
EXPECT_EQ(op_def.input_arg_size(), 1);
EXPECT_EQ(op_def.output_arg_size(), 1);
EXPECT_FALSE(op_def.is_stateful());
TF_DeleteBuffer(buffer);
}
void DefineStatefulFunction(const char* name, TF_Function** func) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> func_graph(
TF_NewGraph(), TF_DeleteGraph);
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> s(TF_NewStatus(),
TF_DeleteStatus);
TF_Tensor* tensor_shape = Int32Tensor({37, 1});
TF_Operation* shape = Const(tensor_shape, func_graph.get(), s.get(), "shape");
TF_Operation* random =
RandomUniform(shape, TF_FLOAT, func_graph.get(), s.get());
TF_Output outputs[] = {{random, 0}};
*func = TF_GraphToFunction(func_graph.get(), name,
false, -1,
nullptr, 0, nullptr, 1, outputs,
nullptr,
nullptr, "", s.get());
ASSERT_EQ(TF_OK, TF_GetCode(s.get())) << TF_Message(s.get());
ASSERT_NE(*func, nullptr);
TF_DeleteTensor(tensor_shape);
}
TEST_F(CApiFunctionTest, StatefulOpDef) {
DefineStatefulFunction(func_name_, &func_);
TF_GraphCopyFunction(host_graph_, func_, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Buffer* buffer = TF_NewBuffer();
TF_GraphGetOpDef(host_graph_, func_name_, buffer, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
string data(static_cast<const char*>(buffer->data), buffer->length);
OpDef op_def;
op_def.ParseFromString(data);
EXPECT_EQ(op_def.name(), func_name_);
EXPECT_EQ(op_def.input_arg_size(), 0);
EXPECT_EQ(op_def.output_arg_size(), 1);
EXPECT_TRUE(op_def.is_stateful());
TF_DeleteBuffer(buffer);
}
void AssertEqual(TF_Function* f1, TF_Function* f2) {
string s1, s2;
tensorflow::FunctionDef fdef1, fdef2;
ASSERT_TRUE(GetFunctionDef(f1, &fdef1));
ASSERT_TRUE(GetFunctionDef(f2, &fdef2));
SerializeToStringDeterministic(fdef1, &s1);
SerializeToStringDeterministic(fdef2, &s2);
ASSERT_EQ(s1, s2);
}
string GetName(TF_Function* func) {
tensorflow::FunctionDef fdef;
GetFunctionDef(func, &fdef);
return fdef.signature().name();
}
TEST_F(CApiFunctionTest, GetFunctionsFromGraph) {
TF_Function* funcs[2];
EXPECT_EQ(TF_GraphNumFunctions(host_graph_), 0);
TF_GraphGetFunctions(host_graph_, nullptr, 0, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Function* func0;
DefineFunction("FooFunc0", &func0);
TF_GraphCopyFunction(host_graph_, func0, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
EXPECT_EQ(TF_GraphNumFunctions(host_graph_), 1);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 0, s_), 0);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 1, s_), 1);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
AssertEqual(func0, funcs[0]);
TF_DeleteFunction(funcs[0]);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 2, s_), 1);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
AssertEqual(func0, funcs[0]);
TF_DeleteFunction(funcs[0]);
TF_Function* func1;
DefineFunction("FooFunc1", &func1);
TF_GraphCopyFunction(host_graph_, func1, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
EXPECT_EQ(TF_GraphNumFunctions(host_graph_), 2);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 0, s_), 0);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 2, s_), 2);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
if (GetName(funcs[0]) == GetName(func0)) {
AssertEqual(func0, funcs[0]);
AssertEqual(func1, funcs[1]);
} else {
AssertEqual(func0, funcs[1]);
AssertEqual(func1, funcs[0]);
}
TF_DeleteFunction(funcs[0]);
TF_DeleteFunction(funcs[1]);
TF_DeleteFunction(func0);
TF_DeleteFunction(func1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/c_api_function.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/c_api_function_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71508418-052b-485a-b417-d600f37f8823 | cpp | tensorflow/tensorflow | tf_status_helper | tensorflow/c/tf_status_helper.cc | tensorflow/c/tf_status_helper_test.cc | #include "tensorflow/c/tf_status_helper.h"
#include <string>
#include "tensorflow/c/tf_status.h"
#include "xla/tsl/c/tsl_status_helper.h"
namespace tsl {
void Set_TF_Status_from_Status(TF_Status* tf_status,
const absl::Status& status) {
TF_SetStatus(tf_status, TSLCodeFromStatusCode(status.code()),
absl::StatusMessageAsCStr(status));
status.ForEachPayload(
[tf_status](absl::string_view key, const absl::Cord& value) {
std::string key_str(key);
std::string value_str(value);
TF_SetPayload(tf_status, key_str.c_str(), value_str.c_str());
});
}
absl::Status StatusFromTF_Status(const TF_Status* tf_status) {
absl::Status status(StatusCodeFromTSLCode(TF_GetCode(tf_status)),
TF_Message(tf_status));
TF_ForEachPayload(
tf_status,
[](const char* key, const char* value, void* capture) {
absl::Status* status = static_cast<absl::Status*>(capture);
status->SetPayload(key, absl::Cord(absl::string_view(value)));
},
&status);
return status;
}
} | #include "tensorflow/c/tf_status_helper.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(StatusHelper, TestStatusHelper) {
TSL_Status* s = TSL_NewStatus();
absl::Status cc_status(absl::InvalidArgumentError("some error"));
cc_status.SetPayload("key1", absl::Cord("value1"));
cc_status.SetPayload("key2", absl::Cord("value2"));
Set_TF_Status_from_Status(s, cc_status);
ASSERT_EQ(TSL_INVALID_ARGUMENT, TSL_GetCode(s));
ASSERT_EQ(std::string("some error"), TSL_Message(s));
absl::Status another_cc_status(StatusFromTF_Status(s));
ASSERT_FALSE(another_cc_status.ok());
ASSERT_EQ(std::string("some error"), another_cc_status.message());
ASSERT_EQ(error::INVALID_ARGUMENT, another_cc_status.code());
ASSERT_EQ(cc_status.GetPayload("key1"), another_cc_status.GetPayload("key1"));
ASSERT_EQ(cc_status.GetPayload("key2"), another_cc_status.GetPayload("key2"));
TSL_DeleteStatus(s);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/tf_status_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/tf_status_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b6fd03f0-c227-443f-88a6-f4d89d5469bf | cpp | tensorflow/tensorflow | tensor_shape_utils | tensorflow/c/kernels/tensor_shape_utils.cc | tensorflow/c/kernels/tensor_shape_utils_test.cc | #include "tensorflow/c/kernels/tensor_shape_utils.h"
#include <string>
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
std::string ShapeDebugString(TF_Tensor* tensor) {
CHECK_GE(TF_NumDims(tensor), 0);
tensorflow::string s = "[";
for (int i = 0; i < TF_NumDims(tensor); ++i) {
if (i > 0) tensorflow::strings::StrAppend(&s, ",");
int64_t dim = TF_Dim(tensor, i);
CHECK_GE(dim, 0);
tensorflow::strings::StrAppend(&s, dim);
}
tensorflow::strings::StrAppend(&s, "]");
return s;
}
} | #include "tensorflow/c/kernels/tensor_shape_utils.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
struct TF_TensorWrapper {
TF_Tensor* tf_tensor;
explicit TF_TensorWrapper(TF_Tensor* tensor) { tf_tensor = tensor; }
~TF_TensorWrapper() { TF_DeleteTensor(tf_tensor); }
};
void TestShapeMatch(TensorShape shape) {
Tensor tensor(DT_FLOAT, shape);
Status status;
TF_Tensor* tf_tensor = TF_TensorFromTensor(tensor, &status);
TF_TensorWrapper tensor_wrapper = TF_TensorWrapper(tf_tensor);
ASSERT_TRUE(status.ok()) << status.ToString();
ASSERT_EQ(tensor.shape().DebugString(), ShapeDebugString(tf_tensor));
}
TEST(ShapeDebugString, RegularShape) { TestShapeMatch(TensorShape({5, 4, 7})); }
TEST(ShapeDebugString, ScalarShape) { TestShapeMatch(TensorShape({})); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/tensor_shape_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/tensor_shape_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4093cb77-9ef5-40ec-b488-76b0ae7aff0a | cpp | tensorflow/tensorflow | bitcast_op | tensorflow/c/kernels/bitcast_op.cc | tensorflow/c/kernels/bitcast_op_test.cc | #include <sstream>
#include "tensorflow/c/kernels.h"
#include "tensorflow/c/ops.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/registration/registration.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/platform/macros.h"
typedef struct BitcastOp {
TF_DataType input_data_type;
TF_DataType output_data_type;
size_t in_size;
size_t out_size;
} BitcastOp;
static void* BitcastOp_Create(TF_OpKernelConstruction* ctx) {
auto* kernel = new BitcastOp;
TF_Status* s = TF_NewStatus();
TF_OpKernelConstruction_GetAttrType(ctx, "T", &kernel->input_data_type, s);
if (TF_GetCode(s) == TF_OK) {
TF_OpKernelConstruction_GetAttrType(ctx, "type", &kernel->output_data_type,
s);
}
if (TF_GetCode(s) == TF_OK) {
kernel->in_size = TF_DataTypeSize(kernel->input_data_type);
kernel->out_size = TF_DataTypeSize(kernel->output_data_type);
size_t check_size = std::max(kernel->in_size, kernel->out_size) %
std::min(kernel->in_size, kernel->out_size);
if (check_size != 0) {
std::ostringstream err;
err << "cannot convert between datatype " << kernel->input_data_type
<< " and " << kernel->output_data_type;
TF_SetStatus(s, TF_INVALID_ARGUMENT, err.str().c_str());
}
}
if (TF_GetCode(s) != TF_OK) {
TF_OpKernelConstruction_Failure(ctx, s);
delete kernel;
kernel = nullptr;
}
TF_DeleteStatus(s);
return kernel;
}
static void BitcastOp_Delete(void* kernel) {
delete static_cast<BitcastOp*>(kernel);
}
static void BitcastOp_Compute(void* kernel, TF_OpKernelContext* ctx) {
auto* k = static_cast<BitcastOp*>(kernel);
int dim_count = 0;
TF_Tensor* tensor;
TF_Status* status = TF_NewStatus();
TF_GetInput(ctx, 0, &tensor, status);
if (TF_GetCode(status) == TF_OK) {
dim_count = TF_NumDims(tensor);
if (!(k->in_size >= k->out_size ||
(dim_count > 0 &&
TF_Dim(tensor, dim_count - 1) == k->out_size / k->in_size))) {
std::ostringstream err;
err << "Cannot bitcast from " << k->input_data_type << " to "
<< k->output_data_type;
TF_SetStatus(status, TF_INVALID_ARGUMENT, err.str().c_str());
}
}
if (TF_GetCode(status) == TF_OK) {
auto* dims = new int64_t[dim_count + 1];
int new_dim_count = dim_count;
for (int dim = 0; dim < dim_count; ++dim) {
dims[dim] = TF_Dim(tensor, dim);
}
if (k->out_size < k->in_size) {
dims[new_dim_count++] = static_cast<int64_t>(k->in_size / k->out_size);
} else if (k->out_size > k->in_size) {
--new_dim_count;
}
TF_Tensor* output = TF_AllocateTensor(k->output_data_type, dims, 0,
TF_DataTypeSize(k->output_data_type));
TF_TensorBitcastFrom(tensor, k->output_data_type, output, dims,
new_dim_count, status);
if (TF_GetCode(status) == TF_OK) {
TF_SetOutput(ctx, 0, output, status);
}
delete[] dims;
TF_DeleteTensor(output);
}
if (TF_GetCode(status) != TF_OK) {
TF_OpKernelContext_Failure(ctx, status);
}
TF_DeleteStatus(status);
TF_DeleteTensor(tensor);
}
void RegisterBitcastOpKernel() {
TF_Status* status = TF_NewStatus();
{
auto* builder = TF_NewKernelBuilder("Bitcast", tensorflow::DEVICE_CPU,
&BitcastOp_Create, &BitcastOp_Compute,
&BitcastOp_Delete);
TF_RegisterKernelBuilder("BitcastOp", builder, status);
CHECK_EQ(TF_OK, TF_GetCode(status))
<< "Error while registering bitcast kernel";
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
{
auto* builder = TF_NewKernelBuilder("Bitcast", tensorflow::DEVICE_GPU,
&BitcastOp_Create, &BitcastOp_Compute,
&BitcastOp_Delete);
TF_RegisterKernelBuilder("BitcastOp", builder, status);
CHECK_EQ(TF_OK, TF_GetCode(status))
<< "Error while registering CUDA bitcast kernel";
}
#endif
TF_DeleteStatus(status);
}
TF_ATTRIBUTE_UNUSED static bool IsBitcastOpKernelRegistered = []() {
if (SHOULD_REGISTER_OP_KERNEL("BitcastOp")) {
RegisterBitcastOpKernel();
}
return true;
}(); | #include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class DummyDevice : public DeviceBase {
public:
explicit DummyDevice(Env* env) : DeviceBase(env) {}
Allocator* GetAllocator(AllocatorAttributes ) override {
return cpu_allocator();
}
};
void TestBitcastOp(Tensor* input_tensor, DataType out_type,
TensorShape expected_shape, error::Code expected_code) {
Status status;
NodeDef def;
def.set_op("Bitcast");
def.set_device(DEVICE_CPU);
AttrValue typeAttr;
SetAttrValue(input_tensor->dtype(), &typeAttr);
AttrValue outTypeAttr;
SetAttrValue(out_type, &outTypeAttr);
(*def.mutable_attr())["T"] = typeAttr;
(*def.mutable_attr())["type"] = outTypeAttr;
def.add_input(
strings::StrCat("input1: ", DataTypeString(input_tensor->dtype())));
std::unique_ptr<OpKernel> kernel =
CreateOpKernel(DeviceType(DEVICE_CPU), nullptr, nullptr, def, 1, &status);
ASSERT_TRUE(status.ok()) << status.ToString();
OpKernelContext::Params params;
DummyDevice dummy_device(nullptr);
params.device = &dummy_device;
params.op_kernel = kernel.get();
absl::InlinedVector<TensorValue, 4UL> inputs;
inputs.emplace_back(input_tensor);
params.inputs = inputs;
OpKernelContext ctx(¶ms);
kernel->Compute(&ctx);
ASSERT_EQ(expected_code, ctx.status().code());
if (expected_code == error::OK) {
ASSERT_EQ(expected_shape, ctx.mutable_output(0)->shape())
<< ctx.mutable_output(0)->shape().DebugString();
}
}
TEST(BitcastOpTest, TestUpcast) {
Tensor int8_input(DT_UINT8, {8});
for (int i = 0; i < 8; i++) {
int8_input.vec<uint8>()(i) = static_cast<uint8>(1);
}
TestBitcastOp(&int8_input, DT_UINT64, TensorShape(), error::OK);
}
TEST(BitcastOpTest, TestDowncast) {
Tensor int64_input(static_cast<uint64>(1));
TestBitcastOp(&int64_input, DT_UINT8, TensorShape({8}), error::OK);
}
TEST(BitcastOpTest, TestCastToSameSize) {
Tensor int32_input(DT_UINT32, {4, 6});
TestBitcastOp(&int32_input, DT_UINT8, TensorShape({4, 6, 4}), error::OK);
}
TEST(BitcastOpTest, TestImpossibleCast) {
Tensor int8_input(DT_UINT8, {1});
TestBitcastOp(&int8_input, DT_UINT32, TensorShape(), error::INVALID_ARGUMENT);
}
PartialTensorShape S(std::initializer_list<int64_t> dims) {
return PartialTensorShape(dims);
}
TEST(BitcastOpTest, TestShapeInference_LargerShape) {
const OpRegistrationData* reg;
TF_CHECK_OK(OpRegistry::Global()->LookUp("Bitcast", ®));
OpDef op_def = reg->op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("dummy", &op_def)
.Attr("type", DT_INT8)
.Attr("T", DT_INT64)
.Input(FakeInput(DT_INT64))
.Finalize(&def));
shape_inference::InferenceContext c(0, def, op_def, {S({3, 4})}, {}, {}, {});
std::vector<shape_inference::ShapeHandle> input_shapes;
TF_CHECK_OK(c.input("input", &input_shapes));
ASSERT_EQ("[3,4]", c.DebugString(input_shapes[0]));
TF_CHECK_OK(reg->shape_inference_fn(&c));
ASSERT_EQ("[3,4,8]", c.DebugString(c.output(0)));
}
TEST(BitcastOpTest, TestShapeInference_SmallerShape) {
const OpRegistrationData* reg;
TF_CHECK_OK(OpRegistry::Global()->LookUp("Bitcast", ®));
OpDef op_def = reg->op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("dummy", &op_def)
.Attr("type", DT_INT64)
.Attr("T", DT_INT8)
.Input(FakeInput(DT_INT8))
.Finalize(&def));
shape_inference::InferenceContext c(0, def, op_def, {S({3, 4, 8})}, {}, {},
{});
std::vector<shape_inference::ShapeHandle> input_shapes;
TF_CHECK_OK(c.input("input", &input_shapes));
ASSERT_EQ("[3,4,8]", c.DebugString(input_shapes[0]));
TF_CHECK_OK(reg->shape_inference_fn(&c));
ASSERT_EQ("[3,4]", c.DebugString(c.output(0)));
}
TEST(BitcastOpTest, TestShapeInference_SameShape) {
const OpRegistrationData* reg;
TF_CHECK_OK(OpRegistry::Global()->LookUp("Bitcast", ®));
OpDef op_def = reg->op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("dummy", &op_def)
.Attr("type", DT_INT32)
.Attr("T", DT_FLOAT)
.Input(FakeInput(DT_FLOAT))
.Finalize(&def));
shape_inference::InferenceContext c(0, def, op_def, {S({3, 4})}, {}, {}, {});
std::vector<shape_inference::ShapeHandle> input_shapes;
TF_CHECK_OK(c.input("input", &input_shapes));
ASSERT_EQ("[3,4]", c.DebugString(input_shapes[0]));
TF_CHECK_OK(reg->shape_inference_fn(&c));
ASSERT_EQ("[3,4]", c.DebugString(c.output(0)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/bitcast_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/bitcast_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0f523235-915f-4e72-b771-0e58ab3b8a13 | cpp | tensorflow/tensorflow | summary_op | tensorflow/c/kernels/summary_op.cc | tensorflow/c/kernels/summary_op_test.cc | #include <sstream>
#include <string>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/c/kernels.h"
#include "tensorflow/c/kernels/tensor_shape_utils.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/framework/registration/registration.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
namespace {
struct Params {
TF_Tensor* tags;
TF_Tensor* values;
TF_Status* status;
explicit Params(TF_OpKernelContext* ctx)
: tags(nullptr), values(nullptr), status(nullptr) {
status = TF_NewStatus();
TF_GetInput(ctx, 0, &tags, status);
if (TF_GetCode(status) == TF_OK) {
TF_GetInput(ctx, 1, &values, status);
}
}
~Params() {
TF_DeleteStatus(status);
TF_DeleteTensor(tags);
TF_DeleteTensor(values);
}
};
void* ScalarSummaryOp_Create(TF_OpKernelConstruction* ctx) { return nullptr; }
void ScalarSummaryOp_Delete(void* kernel) {}
bool IsSameSize(TF_Tensor* tensor1, TF_Tensor* tensor2);
std::string SingleTag(TF_Tensor* tags);
template <typename T>
void ScalarSummaryOp_Compute(void* kernel, TF_OpKernelContext* ctx) {
Params params(ctx);
if (TF_GetCode(params.status) != TF_OK) {
TF_OpKernelContext_Failure(ctx, params.status);
return;
}
if (!IsSameSize(params.tags, params.values)) {
std::ostringstream err;
err << "tags and values are not the same shape: "
<< tensorflow::ShapeDebugString(params.tags)
<< " != " << tensorflow::ShapeDebugString(params.values)
<< SingleTag(params.tags);
TF_SetStatus(params.status, TF_INVALID_ARGUMENT, err.str().c_str());
TF_OpKernelContext_Failure(ctx, params.status);
return;
}
tensorflow::Summary s;
auto tags_array =
static_cast<tensorflow::tstring*>(TF_TensorData(params.tags));
auto values_array = static_cast<T*>(TF_TensorData(params.values));
for (int i = 0; i < TF_TensorElementCount(params.tags); ++i) {
tensorflow::Summary::Value* v = s.add_value();
const tensorflow::tstring& Ttags_i = tags_array[i];
v->set_tag(Ttags_i.data(), Ttags_i.size());
v->set_simple_value(static_cast<float>(values_array[i]));
}
TF_Tensor* summary_tensor =
TF_AllocateOutput(ctx, 0, TF_ExpectedOutputDataType(ctx, 0), nullptr, 0,
sizeof(tensorflow::tstring), params.status);
if (TF_GetCode(params.status) != TF_OK) {
TF_DeleteTensor(summary_tensor);
TF_OpKernelContext_Failure(ctx, params.status);
return;
}
tensorflow::tstring* output_tstring =
reinterpret_cast<tensorflow::tstring*>(TF_TensorData(summary_tensor));
CHECK(SerializeToTString(s, output_tstring));
TF_DeleteTensor(summary_tensor);
}
bool IsSameSize(TF_Tensor* tensor1, TF_Tensor* tensor2) {
if (TF_NumDims(tensor1) != TF_NumDims(tensor2)) {
return false;
}
for (int d = 0; d < TF_NumDims(tensor1); d++) {
if (TF_Dim(tensor1, d) != TF_Dim(tensor2, d)) {
return false;
}
}
return true;
}
std::string SingleTag(TF_Tensor* tags) {
if (TF_TensorElementCount(tags) == 1) {
const char* single_tag =
static_cast<tensorflow::tstring*>(TF_TensorData(tags))->c_str();
return tensorflow::strings::StrCat(" (tag '", single_tag, "')");
} else {
return "";
}
}
template <typename T>
void RegisterScalarSummaryOpKernel() {
TF_Status* status = TF_NewStatus();
{
auto* builder = TF_NewKernelBuilder(
"ScalarSummary", tensorflow::DEVICE_CPU, &ScalarSummaryOp_Create,
&ScalarSummaryOp_Compute<T>, &ScalarSummaryOp_Delete);
TF_KernelBuilder_TypeConstraint(
builder, "T",
static_cast<TF_DataType>(tensorflow::DataTypeToEnum<T>::v()), status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << "Error while adding type constraint";
TF_RegisterKernelBuilder("ScalarSummary", builder, status);
CHECK_EQ(TF_OK, TF_GetCode(status))
<< "Error while registering Scalar Summmary kernel";
}
TF_DeleteStatus(status);
}
TF_ATTRIBUTE_UNUSED bool IsScalarSummaryOpKernelRegistered = []() {
if (SHOULD_REGISTER_OP_KERNEL("ScalarSummary")) {
RegisterScalarSummaryOpKernel<int64_t>();
RegisterScalarSummaryOpKernel<tensorflow::uint64>();
RegisterScalarSummaryOpKernel<tensorflow::int32>();
RegisterScalarSummaryOpKernel<tensorflow::uint32>();
RegisterScalarSummaryOpKernel<tensorflow::uint16>();
RegisterScalarSummaryOpKernel<tensorflow::int16>();
RegisterScalarSummaryOpKernel<tensorflow::int8>();
RegisterScalarSummaryOpKernel<tensorflow::uint8>();
RegisterScalarSummaryOpKernel<Eigen::half>();
RegisterScalarSummaryOpKernel<tensorflow::bfloat16>();
RegisterScalarSummaryOpKernel<float>();
RegisterScalarSummaryOpKernel<double>();
}
return true;
}();
} | #include "tensorflow/c/kernels.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
class DummyDevice : public DeviceBase {
public:
explicit DummyDevice(Env* env) : DeviceBase(env) {}
Allocator* GetAllocator(AllocatorAttributes ) override {
return cpu_allocator();
}
};
void ExpectSummaryMatches(const Summary& actual, const string& expected_str) {
Summary expected;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(expected_str, &expected));
EXPECT_EQ(expected.DebugString(), actual.DebugString());
}
void TestScalarSummaryOp(Tensor* tags, Tensor* values, string expected_output,
error::Code expected_code) {
Status status;
NodeDef def;
def.set_op("ScalarSummary");
def.set_device(DEVICE_CPU);
AttrValue valuesTypeAttr;
SetAttrValue(values->dtype(), &valuesTypeAttr);
(*def.mutable_attr())["T"] = valuesTypeAttr;
def.add_input(strings::StrCat("input1: ", DataTypeString(tags->dtype())));
def.add_input(strings::StrCat("input2: ", DataTypeString(values->dtype())));
std::unique_ptr<OpKernel> kernel =
CreateOpKernel(DeviceType(DEVICE_CPU), nullptr, nullptr, def, 1, &status);
ASSERT_TRUE(status.ok()) << status.ToString();
OpKernelContext::Params params;
DummyDevice dummy_device(nullptr);
params.device = &dummy_device;
params.op_kernel = kernel.get();
AllocatorAttributes alloc_attrs;
params.output_attr_array = &alloc_attrs;
absl::InlinedVector<TensorValue, 4UL> inputs;
inputs.emplace_back(tags);
inputs.emplace_back(values);
params.inputs = inputs;
OpKernelContext ctx(¶ms, 1);
kernel->Compute(&ctx);
ASSERT_EQ(expected_code, ctx.status().code());
if (expected_code == error::OK) {
Summary summary;
ASSERT_TRUE(ParseProtoUnlimited(
&summary, ctx.mutable_output(0)->scalar<tstring>()()));
ExpectSummaryMatches(summary, expected_output);
} else {
EXPECT_TRUE(absl::StrContains(ctx.status().ToString(), expected_output))
<< ctx.status();
}
}
TEST(ScalarSummaryOpTest, SimpleFloat) {
int vectorSize = 3;
Tensor tags(DT_STRING, {vectorSize});
Tensor values(DT_FLOAT, {vectorSize});
tags.vec<tstring>()(0) = "tag1";
tags.vec<tstring>()(1) = "tag2";
tags.vec<tstring>()(2) = "tag3";
values.vec<float>()(0) = 1.0f;
values.vec<float>()(1) = -0.73f;
values.vec<float>()(2) = 10000.0f;
TestScalarSummaryOp(&tags, &values, R"(
value { tag: 'tag1' simple_value: 1.0 }
value { tag: 'tag2' simple_value: -0.73}
value { tag: 'tag3' simple_value: 10000.0})",
error::OK);
}
TEST(ScalarSummaryOpTest, SimpleDouble) {
int vectorSize = 3;
Tensor tags(DT_STRING, {vectorSize});
Tensor values(DT_DOUBLE, {vectorSize});
tags.vec<tstring>()(0) = "tag1";
tags.vec<tstring>()(1) = "tag2";
tags.vec<tstring>()(2) = "tag3";
values.vec<double>()(0) = 1.0;
values.vec<double>()(1) = -0.73;
values.vec<double>()(2) = 10000.0;
TestScalarSummaryOp(&tags, &values, R"(
value { tag: 'tag1' simple_value: 1.0 }
value { tag: 'tag2' simple_value: -0.73}
value { tag: 'tag3' simple_value: 10000.0})",
error::OK);
}
TEST(ScalarSummaryOpTest, SimpleHalf) {
int vectorSize = 3;
Tensor tags(DT_STRING, {vectorSize});
Tensor values(DT_HALF, {vectorSize});
tags.vec<tstring>()(0) = "tag1";
tags.vec<tstring>()(1) = "tag2";
tags.vec<tstring>()(2) = "tag3";
values.vec<Eigen::half>()(0) = Eigen::half(1.0);
values.vec<Eigen::half>()(1) = Eigen::half(-2.0);
values.vec<Eigen::half>()(2) = Eigen::half(10000.0);
TestScalarSummaryOp(&tags, &values, R"(
value { tag: 'tag1' simple_value: 1.0 }
value { tag: 'tag2' simple_value: -2.0}
value { tag: 'tag3' simple_value: 10000.0})",
error::OK);
}
TEST(ScalarSummaryOpTest, Error_WrongDimsTags) {
Tensor tags(DT_STRING, {2, 1});
Tensor values(DT_FLOAT, {2});
tags.matrix<tstring>()(0, 0) = "tag1";
tags.matrix<tstring>()(1, 0) = "tag2";
values.vec<float>()(0) = 1.0f;
values.vec<float>()(1) = -2.0f;
TestScalarSummaryOp(&tags, &values, "tags and values are not the same shape",
error::INVALID_ARGUMENT);
}
TEST(ScalarSummaryOpTest, Error_WrongValuesTags) {
Tensor tags(DT_STRING, {2});
Tensor values(DT_FLOAT, {2, 1});
tags.vec<tstring>()(0) = "tag1";
tags.vec<tstring>()(1) = "tag2";
values.matrix<float>()(0, 0) = 1.0f;
values.matrix<float>()(1, 0) = -2.0f;
TestScalarSummaryOp(&tags, &values, "tags and values are not the same shape",
error::INVALID_ARGUMENT);
}
TEST(ScalarSummaryOpTest, Error_WrongWithSingleTag) {
Tensor tags(DT_STRING, {1});
Tensor values(DT_FLOAT, {2, 1});
tags.vec<tstring>()(0) = "tag1";
values.matrix<float>()(0, 0) = 1.0f;
values.matrix<float>()(1, 0) = -2.0f;
TestScalarSummaryOp(&tags, &values, "tags and values are not the same shape",
error::INVALID_ARGUMENT);
}
TEST(ScalarSummaryOpTest, IsRegistered) {
const OpRegistrationData* reg;
TF_CHECK_OK(OpRegistry::Global()->LookUp("ScalarSummary", ®));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/summary_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/summary_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0bb73e85-1525-4a7e-9f33-cdadbf16edf8 | cpp | tensorflow/tensorflow | restore_ops | tensorflow/c/experimental/saved_model/core/ops/restore_ops.cc | tensorflow/c/experimental/saved_model/core/ops/restore_ops_test.cc | #include "tensorflow/c/experimental/saved_model/core/ops/restore_ops.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/tensor_interface.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace internal {
namespace {
Status CreateStringScalarTensorHandle(ImmediateExecutionContext* ctx,
const std::string& s,
ImmediateTensorHandlePtr* out) {
AbstractTensorPtr tensor(ctx->CreateStringScalar(s));
if (tensor.get() == nullptr) {
return errors::Internal(
"Failed to create scalar string tensor for checkpoint restore");
}
out->reset(ctx->CreateLocalHandle(tensor.get()));
return Status();
}
Status CreateStringVectorTensorHandle(ImmediateExecutionContext* ctx,
const std::string& s,
ImmediateTensorHandlePtr* out) {
int64_t flat_shape[] = {1};
AbstractTensorPtr tensor(ctx->CreateTensor(DT_STRING, flat_shape));
if (tensor.get() == nullptr) {
return errors::Internal(
"Failed to create vector string tensor for checkpoint restore");
}
new (tensor->Data()) tstring(s);
out->reset(ctx->CreateLocalHandle(tensor.get()));
return Status();
}
}
Status SingleRestore(ImmediateExecutionContext* ctx, const std::string& prefix,
const std::string& checkpoint_key, DataType dtype,
ImmediateTensorHandlePtr* out) {
ImmediateOpPtr restore_op(ctx->CreateOperation());
TF_RETURN_IF_ERROR(restore_op->Reset("RestoreV2", "/cpu:0"));
TF_RETURN_IF_ERROR(restore_op->SetAttrTypeList("dtypes", &dtype, 1));
ImmediateTensorHandlePtr prefix_handle;
TF_RETURN_IF_ERROR(
CreateStringScalarTensorHandle(ctx, prefix, &prefix_handle));
ImmediateTensorHandlePtr names_handle;
TF_RETURN_IF_ERROR(
CreateStringVectorTensorHandle(ctx, checkpoint_key, &names_handle));
ImmediateTensorHandlePtr shapes_and_slices_handle;
TF_RETURN_IF_ERROR(
CreateStringVectorTensorHandle(ctx, "", &shapes_and_slices_handle));
TF_RETURN_IF_ERROR(restore_op->AddInput(prefix_handle.get()));
TF_RETURN_IF_ERROR(restore_op->AddInput(names_handle.get()));
TF_RETURN_IF_ERROR(restore_op->AddInput(shapes_and_slices_handle.get()));
AbstractTensorHandle* restored_handle = nullptr;
int num_retvals = 1;
TF_RETURN_IF_ERROR(restore_op->Execute(
absl::MakeSpan(&restored_handle, num_retvals), &num_retvals));
AbstractTensorHandlePtr owned_restored_handle(restored_handle);
if (!tensorflow::isa<ImmediateExecutionTensorHandle>(
owned_restored_handle.get())) {
return errors::Internal("Unexpected tensor handle kind.");
}
out->reset(reinterpret_cast<ImmediateExecutionTensorHandle*>(
owned_restored_handle.release()));
return Status();
}
}
} | #include "tensorflow/c/experimental/saved_model/core/ops/restore_ops.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/experimental/saved_model/core/test_utils.h"
#include "tensorflow/c/tensor_interface.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
std::string CheckpointPrefix(StringPiece saved_model_dir) {
return io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
saved_model_dir, kSavedModelVariablesDirectory,
kSavedModelVariablesFilename);
}
class RestoreOpsTest : public ::testing::Test {
public:
RestoreOpsTest()
: device_mgr_(testing::CreateTestingDeviceMgr()),
ctx_(testing::CreateTestingEagerContext(device_mgr_.get())) {}
EagerContext* context() { return ctx_.get(); }
private:
std::unique_ptr<StaticDeviceMgr> device_mgr_;
EagerContextPtr ctx_;
};
TEST_F(RestoreOpsTest, RestoreSuccessful) {
ImmediateTensorHandlePtr x_handle;
TF_EXPECT_OK(internal::SingleRestore(
context(), CheckpointPrefix("VarsAndArithmeticObjectGraph"),
"x/.ATTRIBUTES/VARIABLE_VALUE", DT_FLOAT, &x_handle));
AbstractTensorPtr x = testing::TensorHandleToTensor(x_handle.get());
EXPECT_EQ(x->Type(), DT_FLOAT);
EXPECT_EQ(x->NumElements(), 1);
EXPECT_EQ(x->NumDims(), 0);
EXPECT_FLOAT_EQ(*reinterpret_cast<float*>(x->Data()), 1.0f);
ImmediateTensorHandlePtr y_handle;
TF_EXPECT_OK(internal::SingleRestore(
context(), CheckpointPrefix("VarsAndArithmeticObjectGraph"),
"y/.ATTRIBUTES/VARIABLE_VALUE", DT_FLOAT, &y_handle));
AbstractTensorPtr y = testing::TensorHandleToTensor(y_handle.get());
EXPECT_EQ(y->Type(), DT_FLOAT);
EXPECT_EQ(y->NumElements(), 1);
EXPECT_EQ(y->NumDims(), 0);
EXPECT_FLOAT_EQ(*reinterpret_cast<float*>(y->Data()), 2.0f);
ImmediateTensorHandlePtr z_handle;
TF_EXPECT_OK(internal::SingleRestore(
context(), CheckpointPrefix("VarsAndArithmeticObjectGraph"),
"child/z/.ATTRIBUTES/VARIABLE_VALUE", DT_FLOAT, &z_handle));
AbstractTensorPtr z = testing::TensorHandleToTensor(z_handle.get());
EXPECT_EQ(z->Type(), DT_FLOAT);
EXPECT_EQ(z->NumElements(), 1);
EXPECT_EQ(z->NumDims(), 0);
EXPECT_FLOAT_EQ(*reinterpret_cast<float*>(z->Data()), 3.0f);
}
TEST_F(RestoreOpsTest, BadCheckpointPrefixShouldFail) {
ImmediateTensorHandlePtr x_handle;
Status status = internal::SingleRestore(
context(), CheckpointPrefix("unknown_bad_checkpoint_prefix"),
"x/.ATTRIBUTES/VARIABLE_VALUE", DT_FLOAT, &x_handle);
EXPECT_FALSE(status.ok()) << status.message();
}
TEST_F(RestoreOpsTest, BadCheckpointKeyShouldFail) {
ImmediateTensorHandlePtr x_handle;
Status status = internal::SingleRestore(
context(), CheckpointPrefix("VarsAndArithmeticObjectGraph"),
"bad_checkpoint_key", DT_FLOAT, &x_handle);
EXPECT_FALSE(status.ok()) << status.message();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/saved_model/core/ops/restore_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/saved_model/core/ops/restore_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa03ac41-f14a-4743-9eea-8dc4ec137c2e | cpp | tensorflow/tensorflow | saved_model_api | tensorflow/c/experimental/saved_model/internal/saved_model_api.cc | tensorflow/c/experimental/saved_model/internal/saved_model_api_test.cc | #include "tensorflow/c/experimental/saved_model/public/saved_model_api.h"
#include <memory>
#include <string>
#include <unordered_set>
#include "absl/types/optional.h"
#include "tensorflow/c/eager/tfe_context_internal.h"
#include "tensorflow/c/experimental/saved_model/core/saved_model_api.h"
#include "tensorflow/c/experimental/saved_model/core/tf_saved_model_api.h"
#include "tensorflow/c/experimental/saved_model/internal/concrete_function_list_type.h"
#include "tensorflow/c/experimental/saved_model/internal/concrete_function_type.h"
#include "tensorflow/c/experimental/saved_model/internal/saved_model_api_type.h"
#include "tensorflow/c/experimental/saved_model/internal/signature_def_function_type.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/platform/casts.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
extern "C" {
TF_SavedModel* TF_LoadSavedModel(const char* dirname, TFE_Context* ctx,
TF_Status* status) {
std::string saved_model_dir(dirname);
std::unique_ptr<tensorflow::SavedModelAPI> result;
if (tensorflow::unwrap(ctx)->UsesTFRT()) {
status->status = tensorflow::errors::Unimplemented(
"TFRT SavedModel implementation will be added in the future");
} else {
std::unique_ptr<tensorflow::TFSavedModelAPI> saved_model;
status->status = tensorflow::TFSavedModelAPI::Load(
dirname, absl::nullopt,
tensorflow::down_cast<tensorflow::EagerContext*>(
tensorflow::unwrap(ctx)),
&saved_model);
result = std::move(saved_model);
}
if (!status->status.ok()) {
return nullptr;
}
return tensorflow::wrap(result.release());
}
TF_SavedModel* TF_LoadSavedModelWithTags(const char* dirname, TFE_Context* ctx,
const char* const* tags, int tags_len,
TF_Status* status) {
std::string saved_model_dir(dirname);
std::unordered_set<std::string> tagset;
for (int i = 0; i < tags_len; ++i) {
tagset.insert(std::string(tags[i]));
}
std::unique_ptr<tensorflow::SavedModelAPI> result;
if (tensorflow::unwrap(ctx)->UsesTFRT()) {
status->status = tensorflow::errors::Unimplemented(
"TFRT SavedModel implementation will be added in the future");
} else {
std::unique_ptr<tensorflow::TFSavedModelAPI> saved_model;
status->status = tensorflow::TFSavedModelAPI::Load(
dirname, tagset,
tensorflow::down_cast<tensorflow::EagerContext*>(
tensorflow::unwrap(ctx)),
&saved_model);
result = std::move(saved_model);
}
if (!status->status.ok()) {
return nullptr;
}
return tensorflow::wrap(result.release());
}
void TF_DeleteSavedModel(TF_SavedModel* model) {
delete tensorflow::unwrap(model);
}
TF_ConcreteFunction* TF_GetSavedModelConcreteFunction(TF_SavedModel* model,
const char* function_path,
TF_Status* status) {
tensorflow::ConcreteFunction* result = nullptr;
tensorflow::Status get_function_status =
tensorflow::unwrap(model)->GetFunction(function_path, &result);
status->status.Update(get_function_status);
if (!get_function_status.ok()) {
return nullptr;
}
return tensorflow::wrap(result);
}
TF_CAPI_EXPORT extern TF_SignatureDefFunction*
TF_GetSavedModelSignatureDefFunction(TF_SavedModel* model,
const char* signature_def_key,
TF_Status* status) {
tensorflow::SignatureDefFunction* result = nullptr;
tensorflow::Status get_function_status =
tensorflow::unwrap(model)->GetSignatureDefFunction(signature_def_key,
&result);
status->status.Update(get_function_status);
if (!get_function_status.ok()) {
return nullptr;
}
return tensorflow::wrap(result);
}
} | #include "tensorflow/c/experimental/saved_model/public/saved_model_api.h"
#include <string>
#include <vector>
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/experimental/saved_model/core/tf_saved_model_api.h"
#include "tensorflow/c/experimental/saved_model/internal/saved_model_api_type.h"
#include "tensorflow/c/experimental/saved_model/public/concrete_function.h"
#include "tensorflow/c/experimental/saved_model/public/signature_def_function.h"
#include "tensorflow/c/experimental/saved_model/public/signature_def_function_metadata.h"
#include "tensorflow/c/experimental/saved_model/public/signature_def_param.h"
#include "tensorflow/c/experimental/saved_model/public/signature_def_param_list.h"
#include "tensorflow/c/experimental/saved_model/public/tensor_spec.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_shape.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
namespace {
using tensorflow::tstring;
constexpr char kTestData[] = "cc/saved_model/testdata";
const char* kServeTag[] = {"serve"};
std::string SavedModelPath(tensorflow::StringPiece saved_model_dir) {
return tensorflow::io::JoinPath(tensorflow::testing::TensorFlowSrcRoot(),
kTestData, saved_model_dir);
}
class CSavedModelAPITest : public ::testing::TestWithParam<bool> {};
TEST_P(CSavedModelAPITest, LoadsSavedModelWithTags) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
bool use_tfrt = GetParam();
if (use_tfrt) {
TFE_DeleteContextOptions(opts);
TF_DeleteStatus(status);
GTEST_SKIP();
}
TFE_ContextOptionsSetTfrt(opts, use_tfrt);
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
std::string model_dir = SavedModelPath("VarsAndArithmeticObjectGraph");
TF_SavedModel* saved_model =
TF_LoadSavedModelWithTags(model_dir.c_str(), ctx, kServeTag, 1, status);
EXPECT_EQ(TF_GetCode(status), TF_UNIMPLEMENTED);
TF_DeleteSavedModel(saved_model);
TF_DeleteStatus(status);
TFE_DeleteContext(ctx);
}
TEST_P(CSavedModelAPITest, LoadsSavedModel) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
bool use_tfrt = GetParam();
if (use_tfrt) {
TFE_DeleteContextOptions(opts);
TF_DeleteStatus(status);
GTEST_SKIP();
}
TFE_ContextOptionsSetTfrt(opts, use_tfrt);
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
std::string model_dir = SavedModelPath("VarsAndArithmeticObjectGraph");
TF_SavedModel* saved_model =
TF_LoadSavedModel(model_dir.c_str(), ctx, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_ConcreteFunction* compute_fn =
TF_GetSavedModelConcreteFunction(saved_model, "compute", status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
std::vector<TFE_TensorHandle*> compute_fn_inputs;
TFE_TensorHandle* input_a = TestScalarTensorHandle(ctx, 2.0f);
TFE_TensorHandle* input_b = TestScalarTensorHandle(ctx, 1.0f);
compute_fn_inputs.push_back(input_a);
compute_fn_inputs.push_back(input_b);
TFE_Op* compute_fn_op = TF_ConcreteFunctionMakeCallOp(
compute_fn, compute_fn_inputs.data(), compute_fn_inputs.size(), status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_TensorHandle* compute_fn_outputs[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(compute_fn_op, &compute_fn_outputs[0], &num_retvals, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_Tensor* result = TFE_TensorHandleResolve(compute_fn_outputs[0], status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
EXPECT_EQ(TF_NumDims(result), 0);
float output_value = *static_cast<float*>(TF_TensorData(result));
EXPECT_FLOAT_EQ(output_value, 8.0);
TF_DeleteTensor(result);
TFE_DeleteTensorHandle(compute_fn_outputs[0]);
TFE_DeleteTensorHandle(input_a);
TFE_DeleteTensorHandle(input_b);
TFE_DeleteOp(compute_fn_op);
TF_DeleteSavedModel(saved_model);
TF_DeleteStatus(status);
TFE_DeleteContext(ctx);
}
TEST_P(CSavedModelAPITest, RunsSignatureDefFunction) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
bool use_tfrt = GetParam();
if (use_tfrt) {
TFE_DeleteContextOptions(opts);
TF_DeleteStatus(status);
GTEST_SKIP();
}
TFE_ContextOptionsSetTfrt(opts, use_tfrt);
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
std::string model_dir = SavedModelPath("VarsAndArithmeticObjectGraph");
TF_SavedModel* saved_model =
TF_LoadSavedModel(model_dir.c_str(), ctx, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_SignatureDefFunction* serving_default =
TF_GetSavedModelSignatureDefFunction(saved_model, "serving_default",
status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_SignatureDefFunctionMetadata* metadata =
TF_SignatureDefFunctionGetMetadata(serving_default);
const TF_SignatureDefParamList* args =
TF_SignatureDefFunctionMetadataArgs(metadata);
const TF_SignatureDefParamList* returns =
TF_SignatureDefFunctionMetadataReturns(metadata);
EXPECT_EQ(TF_SignatureDefParamListSize(args), 2);
const TF_SignatureDefParam* param_a = TF_SignatureDefParamListGet(args, 0);
const TF_TensorSpec* tensor_spec_a = TF_SignatureDefParamTensorSpec(param_a);
const TF_Shape* shape_a = TF_TensorSpecShape(tensor_spec_a);
EXPECT_EQ("a", std::string(TF_SignatureDefParamName(param_a)));
EXPECT_EQ(TF_FLOAT, TF_TensorSpecDataType(tensor_spec_a));
EXPECT_EQ(0, TF_ShapeDims(shape_a));
const TF_SignatureDefParam* param_b = TF_SignatureDefParamListGet(args, 1);
const TF_TensorSpec* tensor_spec_b = TF_SignatureDefParamTensorSpec(param_b);
const TF_Shape* shape_b = TF_TensorSpecShape(tensor_spec_b);
EXPECT_EQ("b", std::string(TF_SignatureDefParamName(param_b)));
EXPECT_EQ(TF_FLOAT, TF_TensorSpecDataType(tensor_spec_b));
EXPECT_EQ(0, TF_ShapeDims(shape_b));
EXPECT_EQ(TF_SignatureDefParamListSize(returns), 1);
const TF_SignatureDefParam* param_out =
TF_SignatureDefParamListGet(returns, 0);
const TF_TensorSpec* tensor_spec_out =
TF_SignatureDefParamTensorSpec(param_out);
const TF_Shape* shape_out = TF_TensorSpecShape(tensor_spec_out);
EXPECT_EQ("output_0", std::string(TF_SignatureDefParamName(param_out)));
EXPECT_EQ(TF_FLOAT, TF_TensorSpecDataType(tensor_spec_out));
EXPECT_EQ(0, TF_ShapeDims(shape_out));
std::vector<TFE_TensorHandle*> compute_fn_inputs;
TFE_TensorHandle* input_a = TestScalarTensorHandle(ctx, 2.0f);
TFE_TensorHandle* input_b = TestScalarTensorHandle(ctx, 1.0f);
compute_fn_inputs.push_back(input_a);
compute_fn_inputs.push_back(input_b);
TFE_Op* serving_default_op = TF_SignatureDefFunctionMakeCallOp(
serving_default, compute_fn_inputs.data(), compute_fn_inputs.size(),
status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
std::vector<TFE_TensorHandle*> compute_fn_outputs(
TF_SignatureDefParamListSize(returns));
int num_retvals = TF_SignatureDefParamListSize(returns);
TFE_Execute(serving_default_op, compute_fn_outputs.data(), &num_retvals,
status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_Tensor* result = TFE_TensorHandleResolve(compute_fn_outputs[0], status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
EXPECT_EQ(TF_NumDims(result), 0);
float output_value = *static_cast<float*>(TF_TensorData(result));
EXPECT_FLOAT_EQ(output_value, 8.0);
TF_DeleteTensor(result);
TFE_DeleteTensorHandle(compute_fn_outputs[0]);
TFE_DeleteTensorHandle(input_a);
TFE_DeleteTensorHandle(input_b);
TFE_DeleteOp(serving_default_op);
TF_DeleteSavedModel(saved_model);
TF_DeleteStatus(status);
TFE_DeleteContext(ctx);
}
TEST_P(CSavedModelAPITest, LoadsAssetSavedModel) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
bool use_tfrt = GetParam();
if (use_tfrt) {
TFE_DeleteContextOptions(opts);
TF_DeleteStatus(status);
GTEST_SKIP();
}
TFE_ContextOptionsSetTfrt(opts, use_tfrt);
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
std::string model_dir = SavedModelPath("AssetModule");
TF_SavedModel* saved_model =
TF_LoadSavedModel(model_dir.c_str(), ctx, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_ConcreteFunction* read_file_fn =
TF_GetSavedModelConcreteFunction(saved_model, "read_file", status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_Op* read_file_op =
TF_ConcreteFunctionMakeCallOp(read_file_fn, nullptr, 0, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_TensorHandle* read_file_fn_outputs[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(read_file_op, &read_file_fn_outputs[0], &num_retvals, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_Tensor* result = TFE_TensorHandleResolve(read_file_fn_outputs[0], status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
EXPECT_EQ(TF_NumDims(result), 0);
tensorflow::tstring* output_value =
static_cast<tensorflow::tstring*>(TF_TensorData(result));
std::string file_contents(*output_value);
EXPECT_NE(file_contents.find("TEST ASSET FILE CONTENTS"), std::string::npos);
TF_DeleteTensor(result);
TFE_DeleteTensorHandle(read_file_fn_outputs[0]);
TFE_DeleteOp(read_file_op);
TF_DeleteSavedModel(saved_model);
TF_DeleteStatus(status);
TFE_DeleteContext(ctx);
}
TEST_P(CSavedModelAPITest, LoadsStaticHashtableSavedModel) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
bool use_tfrt = GetParam();
if (use_tfrt) {
TFE_DeleteContextOptions(opts);
TF_DeleteStatus(status);
GTEST_SKIP();
}
TFE_ContextOptionsSetTfrt(opts, use_tfrt);
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
std::string model_dir = SavedModelPath("StaticHashTableModule");
TF_SavedModel* saved_model =
TF_LoadSavedModel(model_dir.c_str(), ctx, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_ConcreteFunction* lookup_fn =
TF_GetSavedModelConcreteFunction(saved_model, "lookup", status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
{
std::vector<TFE_TensorHandle*> lookup_fn_inputs;
TFE_TensorHandle* input_foo = TestScalarTensorHandle(ctx, tstring("foo"));
lookup_fn_inputs.push_back(input_foo);
TFE_Op* lookup_op = TF_ConcreteFunctionMakeCallOp(
lookup_fn, lookup_fn_inputs.data(), lookup_fn_inputs.size(), status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_TensorHandle* lookup_fn_outputs[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(lookup_op, &lookup_fn_outputs[0], &num_retvals, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_Tensor* result = TFE_TensorHandleResolve(lookup_fn_outputs[0], status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
EXPECT_EQ(TF_NumDims(result), 0);
int64_t* output_value = static_cast<int64_t*>(TF_TensorData(result));
EXPECT_EQ(*output_value, 0);
TF_DeleteTensor(result);
TFE_DeleteTensorHandle(input_foo);
TFE_DeleteTensorHandle(lookup_fn_outputs[0]);
TFE_DeleteOp(lookup_op);
}
{
std::vector<TFE_TensorHandle*> lookup_fn_inputs;
TFE_TensorHandle* input_foo = TestScalarTensorHandle(ctx, tstring("baz"));
lookup_fn_inputs.push_back(input_foo);
TFE_Op* lookup_op = TF_ConcreteFunctionMakeCallOp(
lookup_fn, lookup_fn_inputs.data(), lookup_fn_inputs.size(), status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_TensorHandle* lookup_fn_outputs[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(lookup_op, &lookup_fn_outputs[0], &num_retvals, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_Tensor* result = TFE_TensorHandleResolve(lookup_fn_outputs[0], status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
EXPECT_EQ(TF_NumDims(result), 0);
int64_t* output_value = static_cast<int64_t*>(TF_TensorData(result));
EXPECT_EQ(*output_value, 2);
TF_DeleteTensor(result);
TFE_DeleteTensorHandle(input_foo);
TFE_DeleteTensorHandle(lookup_fn_outputs[0]);
TFE_DeleteOp(lookup_op);
}
{
std::vector<TFE_TensorHandle*> lookup_fn_inputs;
TFE_TensorHandle* input_foo =
TestScalarTensorHandle(ctx, tstring("NON-EXISTENT-KEY"));
lookup_fn_inputs.push_back(input_foo);
TFE_Op* lookup_op = TF_ConcreteFunctionMakeCallOp(
lookup_fn, lookup_fn_inputs.data(), lookup_fn_inputs.size(), status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_TensorHandle* lookup_fn_outputs[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(lookup_op, &lookup_fn_outputs[0], &num_retvals, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_Tensor* result = TFE_TensorHandleResolve(lookup_fn_outputs[0], status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
EXPECT_EQ(TF_NumDims(result), 0);
int64_t* output_value = static_cast<int64_t*>(TF_TensorData(result));
EXPECT_EQ(*output_value, -1);
TF_DeleteTensor(result);
TFE_DeleteTensorHandle(input_foo);
TFE_DeleteTensorHandle(lookup_fn_outputs[0]);
TFE_DeleteOp(lookup_op);
}
TF_DeleteSavedModel(saved_model);
TF_DeleteStatus(status);
TFE_DeleteContext(ctx);
}
TEST_P(CSavedModelAPITest, LoadSavedModelWithUninitializedVariable) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
bool use_tfrt = GetParam();
if (use_tfrt) {
TFE_DeleteContextOptions(opts);
TF_DeleteStatus(status);
GTEST_SKIP();
}
TFE_ContextOptionsSetTfrt(opts, use_tfrt);
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
std::string model_dir = tensorflow::io::JoinPath(
tensorflow::testing::TensorFlowSrcRoot(),
"c/experimental/saved_model/internal/testdata/UninitializedVariable");
TF_SavedModel* saved_model =
TF_LoadSavedModel(model_dir.c_str(), ctx, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
tensorflow::TFSavedModelAPI* model_api =
tensorflow::down_cast<tensorflow::TFSavedModelAPI*>(
tensorflow::unwrap(saved_model));
tensorflow::Variable* uninitialized_variable;
ASSERT_EQ(absl::OkStatus(), model_api->GetVariable("uninitialized_variable",
&uninitialized_variable));
ASSERT_EQ(tensorflow::DT_FLOAT, uninitialized_variable->dtype());
ASSERT_EQ(absl::OkStatus(),
model_api->GetVariable("sub_module.uninitialized_variable",
&uninitialized_variable));
ASSERT_EQ(tensorflow::DT_INT64, uninitialized_variable->dtype());
TF_DeleteSavedModel(saved_model);
TF_DeleteStatus(status);
TFE_DeleteContext(ctx);
}
TEST_P(CSavedModelAPITest, LoadSavedModelWithWhileLoop) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
bool use_tfrt = GetParam();
if (use_tfrt) {
TFE_DeleteContextOptions(opts);
TF_DeleteStatus(status);
GTEST_SKIP();
}
TFE_ContextOptionsSetTfrt(opts, use_tfrt);
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
std::string model_dir = tensorflow::io::JoinPath(
tensorflow::testing::TensorFlowSrcRoot(),
"c/experimental/saved_model/internal/testdata/SimpleWhileLoop");
TF_SavedModel* saved_model =
TF_LoadSavedModel(model_dir.c_str(), ctx, status);
ASSERT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_ConcreteFunction* while_fn =
TF_GetSavedModelConcreteFunction(saved_model, "compute", status);
ASSERT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
std::vector<TFE_TensorHandle*> while_fn_inputs;
while_fn_inputs.push_back(TestScalarTensorHandle(ctx, 10.0f));
TFE_Op* while_fn_op = TF_ConcreteFunctionMakeCallOp(
while_fn, while_fn_inputs.data(), while_fn_inputs.size(), status);
ASSERT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_TensorHandle* while_fn_outputs[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(while_fn_op, &while_fn_outputs[0], &num_retvals, status);
ASSERT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TF_Tensor* result = TFE_TensorHandleResolve(while_fn_outputs[0], status);
ASSERT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
ASSERT_EQ(TF_NumDims(result), 0);
float output_value = *static_cast<float*>(TF_TensorData(result));
ASSERT_FLOAT_EQ(output_value, 55);
TF_DeleteTensor(result);
TFE_DeleteTensorHandle(while_fn_outputs[0]);
TFE_DeleteOp(while_fn_op);
TFE_DeleteTensorHandle(while_fn_inputs[0]);
TF_DeleteSavedModel(saved_model);
TF_DeleteStatus(status);
TFE_DeleteContext(ctx);
}
INSTANTIATE_TEST_SUITE_P(RuntimeAgnosticSavedModelTests, CSavedModelAPITest,
::testing::Bool());
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/saved_model/internal/saved_model_api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/saved_model/internal/saved_model_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b74a0183-94c8-4b66-a1f4-6699c8cbb84c | cpp | tensorflow/tensorflow | tensor_pjrt_buffer_util | tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.cc | tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc | #include "tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/pjrt_c_api_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
absl::StatusOr<PJRT_Buffer*> GetPjRtCBufferFromTensor(const Tensor* tensor) {
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(tensor);
if (av_tensor == nullptr || av_tensor->GetBuffer() == nullptr) {
return absl::InternalError("Input tensor does not have PjRtBuffer.");
}
auto* c_api_buffer =
dynamic_cast<xla::PjRtCApiBuffer*>(av_tensor->GetBuffer().get());
if (c_api_buffer == nullptr) {
return absl::InternalError(
"The PjRtBuffer in the tensor is not type PjRtCApiBuffer.");
}
return c_api_buffer->c_buffer();
}
absl::Status SetPjRtCBufferToTensor(PJRT_Buffer* c_buffer,
xla::PjRtCApiClient* c_api_client,
Tensor* tensor) {
auto buffer = std::make_unique<xla::PjRtCApiBuffer>(c_api_client, c_buffer);
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(tensor);
if (av_tensor == nullptr) {
TF_ASSIGN_OR_RETURN(
*tensor, MakeTensorFromPjRtBuffer(tensor->dtype(), tensor->shape(),
std::move(buffer)));
} else {
av_tensor->SetBuffer(std::move(buffer));
}
return absl::OkStatus();
}
absl::StatusOr<xla::PjRtCApiClient*> GetPjRtCApiClient(
const DeviceType& device_type) {
TF_ASSIGN_OR_RETURN(absl::StatusOr<xla::PjRtClient*> pjrt_client,
tensorflow::GetPjRtClient(device_type));
auto* pjrt_c_api_client = dynamic_cast<xla::PjRtCApiClient*>(*pjrt_client);
if (pjrt_c_api_client == nullptr) {
return absl::InternalError(absl::StrCat("PjRtClient for ",
device_type.type_string(),
" is not type PjRtCApiClient"));
}
return pjrt_c_api_client;
}
absl::Status ResetPjRtClient(const DeviceType& device_type) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->Lookup(rmgr->default_container(),
kPjRtStateResourceName, &pjrt_state));
TF_RETURN_IF_ERROR(pjrt_state->MovePjRtClientToUnused(device_type));
return absl::OkStatus();
}
} | #include "tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_cpu.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_api.h"
#include "xla/pjrt/pjrt_c_api_client.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using ::testing::HasSubstr;
using ::testing::NotNull;
using ::tsl::testing::StatusIs;
PJRT_Buffer* CreateCBuffer() {
auto status = pjrt::PjrtApi(DEVICE_CPU);
if (!status.ok()) {
CHECK_OK(pjrt::SetPjrtApi(DEVICE_CPU, GetPjrtApi()));
}
auto pjrt_client = xla::GetCApiClient(DEVICE_CPU);
CHECK_OK(pjrt_client.status());
auto c_api_client = down_cast<xla::PjRtCApiClient*>(pjrt_client->get());
std::vector<int32_t> data(1, 0);
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {1});
auto buffer = c_api_client->pjrt_c_client()->client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
c_api_client->pjrt_c_client()->client->addressable_devices()[0]);
CHECK_OK(buffer.status());
return new PJRT_Buffer{std::move(*buffer), c_api_client->pjrt_c_client()};
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorNoBuffer) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
EXPECT_THAT(
GetPjRtCBufferFromTensor(&tensor),
StatusIs(error::INTERNAL, HasSubstr(absl::StrCat(
"Input tensor does not have PjRtBuffer"))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorIncoorectType) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
std::vector<int32_t> data(1, 0);
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {1});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
pjrt_client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, pjrt_client->addressable_devices()[0]));
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(&tensor);
av_tensor->SetBuffer(std::move(buffer));
EXPECT_THAT(
GetPjRtCBufferFromTensor(&tensor),
StatusIs(
error::INTERNAL,
HasSubstr(absl::StrCat(
"The PjRtBuffer in the tensor is not type PjRtCApiBuffer"))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorSuccess) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
auto status = pjrt::PjrtApi(DEVICE_CPU);
if (!status.ok()) {
TF_ASSERT_OK(pjrt::SetPjrtApi(DEVICE_CPU, GetPjrtApi()));
}
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
std::vector<int32_t> data(1, 0);
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {1});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
pjrt_client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, pjrt_client->addressable_devices()[0]));
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(&tensor);
av_tensor->SetBuffer(std::move(buffer));
TF_ASSERT_OK_AND_ASSIGN(auto c_buffer, GetPjRtCBufferFromTensor(&tensor));
EXPECT_THAT(c_buffer, NotNull());
}
TEST(TensorPjRtBufferUtilTest, SetPjRtCBufferToTensorNotAsyncValueTensor) {
tensorflow::Tensor tensor(DT_FLOAT, {1});
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
PJRT_Buffer* c_buffer = CreateCBuffer();
TF_EXPECT_OK(SetPjRtCBufferToTensor(
c_buffer, down_cast<xla::PjRtCApiClient*>(pjrt_client.get()), &tensor));
}
TEST(TensorPjRtBufferUtilTest, SetPjRtCBufferToTensorSuccess) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
PJRT_Buffer* c_buffer = CreateCBuffer();
TF_EXPECT_OK(SetPjRtCBufferToTensor(
c_buffer, down_cast<xla::PjRtCApiClient*>(pjrt_client.get()), &tensor));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCApiClientNotFound) {
EXPECT_THAT(
GetPjRtCApiClient(tensorflow::DeviceType(DEVICE_CPU)),
StatusIs(error::NOT_FOUND,
HasSubstr(absl::StrCat("PjRt client not found for device type ",
DEVICE_CPU))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCApiClientIncorrectType) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
TF_ASSERT_OK(SetPjRtClientInTFGlobalResourceManager(DEVICE_CPU,
std::move(pjrt_client)));
EXPECT_THAT(GetPjRtCApiClient(tensorflow::DeviceType(DEVICE_CPU)),
StatusIs(error::INTERNAL,
HasSubstr(absl::StrCat("PjRtClient for ", DEVICE_CPU,
" is not type PjRtCApiClient"))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCApiClientSuccess) {
auto status = pjrt::PjrtApi(DEVICE_CPU);
if (!status.ok()) {
TF_ASSERT_OK(pjrt::SetPjrtApi(DEVICE_CPU, GetPjrtApi()));
}
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
TF_ASSERT_OK(SetPjRtClientInTFGlobalResourceManager(DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client_get,
GetPjRtCApiClient(tensorflow::DeviceType(DEVICE_CPU)));
EXPECT_THAT(pjrt_client_get, NotNull());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ea855415-dd79-4602-bd93-5e2fe3e53737 | cpp | tensorflow/tensorflow | modular_filesystem | tensorflow/c/experimental/filesystem/modular_filesystem.cc | tensorflow/c/experimental/filesystem/modular_filesystem_test.cc | #include "tensorflow/c/experimental/filesystem/modular_filesystem.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "tensorflow/c/experimental/filesystem/filesystem_interface.h"
#include "tensorflow/c/experimental/filesystem/modular_filesystem_registration.h"
#include "tensorflow/c/tf_file_statistics.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_statistics.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
namespace tensorflow {
using UniquePtrTo_TF_Status =
::std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
Status ModularFileSystem::NewRandomAccessFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
if (ops_->new_random_access_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support NewRandomAccessFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto file = std::make_unique<TF_RandomAccessFile>();
std::string translated_name = TranslateName(fname);
ops_->new_random_access_file(filesystem_.get(), translated_name.c_str(),
file.get(), plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularRandomAccessFile>(
translated_name, std::move(file), random_access_file_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::NewWritableFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
if (ops_->new_writable_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support NewWritableFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto file = std::make_unique<TF_WritableFile>();
std::string translated_name = TranslateName(fname);
ops_->new_writable_file(filesystem_.get(), translated_name.c_str(),
file.get(), plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularWritableFile>(
translated_name, std::move(file), writable_file_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::NewAppendableFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
if (ops_->new_appendable_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support NewAppendableFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto file = std::make_unique<TF_WritableFile>();
std::string translated_name = TranslateName(fname);
ops_->new_appendable_file(filesystem_.get(), translated_name.c_str(),
file.get(), plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularWritableFile>(
translated_name, std::move(file), writable_file_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::NewReadOnlyMemoryRegionFromFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
if (ops_->new_read_only_memory_region_from_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname,
" does not support NewReadOnlyMemoryRegionFromFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto region = std::make_unique<TF_ReadOnlyMemoryRegion>();
std::string translated_name = TranslateName(fname);
ops_->new_read_only_memory_region_from_file(
filesystem_.get(), translated_name.c_str(), region.get(),
plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularReadOnlyMemoryRegion>(
std::move(region), read_only_memory_region_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::FileExists(const std::string& fname,
TransactionToken* token) {
if (ops_->path_exists == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support FileExists()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
const std::string translated_name = TranslateName(fname);
ops_->path_exists(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
bool ModularFileSystem::FilesExist(const std::vector<std::string>& files,
TransactionToken* token,
std::vector<Status>* status) {
if (ops_->paths_exist == nullptr)
return FileSystem::FilesExist(files, token, status);
std::vector<char*> translated_names;
translated_names.reserve(files.size());
for (int i = 0; i < files.size(); i++)
translated_names.push_back(strdup(TranslateName(files[i]).c_str()));
bool result;
if (status == nullptr) {
result = ops_->paths_exist(filesystem_.get(), translated_names.data(),
files.size(), nullptr);
} else {
std::vector<TF_Status*> plugin_status;
plugin_status.reserve(files.size());
for (int i = 0; i < files.size(); i++)
plugin_status.push_back(TF_NewStatus());
result = ops_->paths_exist(filesystem_.get(), translated_names.data(),
files.size(), plugin_status.data());
for (int i = 0; i < files.size(); i++) {
status->push_back(StatusFromTF_Status(plugin_status[i]));
TF_DeleteStatus(plugin_status[i]);
}
}
for (int i = 0; i < files.size(); i++) free(translated_names[i]);
return result;
}
Status ModularFileSystem::GetChildren(const std::string& dir,
TransactionToken* token,
std::vector<std::string>* result) {
if (ops_->get_children == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", dir, " does not support GetChildren()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dir);
char** children = nullptr;
const int num_children =
ops_->get_children(filesystem_.get(), translated_name.c_str(), &children,
plugin_status.get());
if (num_children >= 0) {
for (int i = 0; i < num_children; i++) {
result->push_back(std::string(children[i]));
plugin_memory_free_(children[i]);
}
plugin_memory_free_(children);
}
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::GetMatchingPaths(const std::string& pattern,
TransactionToken* token,
std::vector<std::string>* result) {
if (ops_->get_matching_paths == nullptr)
return internal::GetMatchingPaths(this, Env::Default(), pattern, result);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
char** matches = nullptr;
const int num_matches = ops_->get_matching_paths(
filesystem_.get(), pattern.c_str(), &matches, plugin_status.get());
if (num_matches >= 0) {
for (int i = 0; i < num_matches; i++) {
result->push_back(std::string(matches[i]));
plugin_memory_free_(matches[i]);
}
plugin_memory_free_(matches);
}
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::DeleteFile(const std::string& fname,
TransactionToken* token) {
if (ops_->delete_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support DeleteFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(fname);
ops_->delete_file(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::DeleteRecursively(const std::string& dirname,
TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) {
if (undeleted_files == nullptr || undeleted_dirs == nullptr)
return errors::FailedPrecondition(
"DeleteRecursively must not be called with `undeleted_files` or "
"`undeleted_dirs` set to NULL");
if (ops_->delete_recursively == nullptr)
return FileSystem::DeleteRecursively(dirname, token, undeleted_files,
undeleted_dirs);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
uint64_t plugin_undeleted_files, plugin_undeleted_dirs;
ops_->delete_recursively(filesystem_.get(), translated_name.c_str(),
&plugin_undeleted_files, &plugin_undeleted_dirs,
plugin_status.get());
*undeleted_files = plugin_undeleted_files;
*undeleted_dirs = plugin_undeleted_dirs;
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::DeleteDir(const std::string& dirname,
TransactionToken* token) {
if (ops_->delete_dir == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", dirname, " does not support DeleteDir()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
ops_->delete_dir(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::RecursivelyCreateDir(const std::string& dirname,
TransactionToken* token) {
if (ops_->recursively_create_dir == nullptr)
return FileSystem::RecursivelyCreateDir(dirname, token);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
ops_->recursively_create_dir(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::CreateDir(const std::string& dirname,
TransactionToken* token) {
if (ops_->create_dir == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", dirname, " does not support CreateDir()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
ops_->create_dir(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::Stat(const std::string& fname,
TransactionToken* token, FileStatistics* stat) {
if (ops_->stat == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support Stat()"));
if (stat == nullptr)
return errors::InvalidArgument("FileStatistics pointer must not be NULL");
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(fname);
TF_FileStatistics stats;
ops_->stat(filesystem_.get(), translated_name.c_str(), &stats,
plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK) {
stat->length = stats.length;
stat->mtime_nsec = stats.mtime_nsec;
stat->is_directory = stats.is_directory;
}
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::IsDirectory(const std::string& name,
TransactionToken* token) {
if (ops_->is_directory == nullptr)
return FileSystem::IsDirectory(name, token);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(name);
ops_->is_directory(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::GetFileSize(const std::string& fname,
TransactionToken* token,
uint64* file_size) {
if (ops_->get_file_size == nullptr) {
FileStatistics stat;
Status status = Stat(fname, &stat);
if (!status.ok()) return status;
if (stat.is_directory)
return errors::FailedPrecondition("Called GetFileSize on a directory");
*file_size = stat.length;
return status;
}
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(fname);
*file_size = ops_->get_file_size(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::RenameFile(const std::string& src,
const std::string& target,
TransactionToken* token) {
if (ops_->rename_file == nullptr) {
Status status = CopyFile(src, target);
if (status.ok()) status = DeleteFile(src);
return status;
}
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_src = TranslateName(src);
std::string translated_target = TranslateName(target);
ops_->rename_file(filesystem_.get(), translated_src.c_str(),
translated_target.c_str(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::CopyFile(const std::string& src,
const std::string& target,
TransactionToken* token) {
if (ops_->copy_file == nullptr)
return FileSystem::CopyFile(src, target, token);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_src = TranslateName(src);
std::string translated_target = TranslateName(target);
ops_->copy_file(filesystem_.get(), translated_src.c_str(),
translated_target.c_str(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
std::string ModularFileSystem::TranslateName(const std::string& name) const {
if (ops_->translate_name == nullptr) return FileSystem::TranslateName(name);
char* p = ops_->translate_name(filesystem_.get(), name.c_str());
CHECK(p != nullptr) << "TranslateName(" << name << ") returned nullptr";
std::string ret(p);
plugin_memory_free_(p);
return ret;
}
void ModularFileSystem::FlushCaches(TransactionToken* token) {
if (ops_->flush_caches != nullptr) ops_->flush_caches(filesystem_.get());
}
Status ModularFileSystem::SetOption(const std::string& name,
const std::vector<string>& values) {
if (ops_->set_filesystem_configuration == nullptr) {
return errors::Unimplemented(
"Filesystem does not support SetConfiguration()");
}
if (values.empty()) {
return errors::InvalidArgument(
"SetConfiguration() needs number of values > 0");
}
TF_Filesystem_Option option;
memset(&option, 0, sizeof(option));
option.name = const_cast<char*>(name.c_str());
TF_Filesystem_Option_Value option_value;
memset(&option_value, 0, sizeof(option_value));
option_value.type_tag = TF_Filesystem_Option_Type_Buffer;
option_value.num_values = values.size();
std::vector<TF_Filesystem_Option_Value_Union> option_values(values.size());
for (size_t i = 0; i < values.size(); i++) {
memset(&option_values[i], 0, sizeof(option_values[i]));
option_values[i].buffer_val.buf = const_cast<char*>(values[i].c_str());
option_values[i].buffer_val.buf_length = values[i].size();
}
option_value.values = &option_values[0];
option.value = &option_value;
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->set_filesystem_configuration(filesystem_.get(), &option, 1,
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::SetOption(const std::string& name,
const std::vector<int64_t>& values) {
if (ops_->set_filesystem_configuration == nullptr) {
return errors::Unimplemented(
"Filesystem does not support SetConfiguration()");
}
if (values.empty()) {
return errors::InvalidArgument(
"SetConfiguration() needs number of values > 0");
}
TF_Filesystem_Option option;
memset(&option, 0, sizeof(option));
option.name = const_cast<char*>(name.c_str());
TF_Filesystem_Option_Value option_value;
memset(&option_value, 0, sizeof(option_value));
option_value.type_tag = TF_Filesystem_Option_Type_Int;
option_value.num_values = values.size();
std::vector<TF_Filesystem_Option_Value_Union> option_values(values.size());
for (size_t i = 0; i < values.size(); i++) {
memset(&option_values[i], 0, sizeof(option_values[i]));
option_values[i].int_val = values[i];
}
option_value.values = &option_values[0];
option.value = &option_value;
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->set_filesystem_configuration(filesystem_.get(), &option, 1,
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::SetOption(const std::string& name,
const std::vector<double>& values) {
if (ops_->set_filesystem_configuration == nullptr) {
return errors::Unimplemented(
"Filesystem does not support SetConfiguration()");
}
if (values.empty()) {
return errors::InvalidArgument(
"SetConfiguration() needs number of values > 0");
}
TF_Filesystem_Option option;
memset(&option, 0, sizeof(option));
option.name = const_cast<char*>(name.c_str());
TF_Filesystem_Option_Value option_value;
memset(&option_value, 0, sizeof(option_value));
option_value.type_tag = TF_Filesystem_Option_Type_Real;
option_value.num_values = values.size();
std::vector<TF_Filesystem_Option_Value_Union> option_values(values.size());
for (size_t i = 0; i < values.size(); i++) {
memset(&option_values[i], 0, sizeof(option_values[i]));
option_values[i].real_val = values[i];
}
option_value.values = &option_values[0];
option.value = &option_value;
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->set_filesystem_configuration(filesystem_.get(), &option, 1,
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularRandomAccessFile::Read(uint64 offset, size_t n,
StringPiece* result, char* scratch) const {
if (ops_->read == nullptr)
return errors::Unimplemented(
tensorflow::strings::StrCat("Read() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
int64_t read =
ops_->read(file_.get(), offset, n, scratch, plugin_status.get());
if (read > 0) *result = StringPiece(scratch, read);
return StatusFromTF_Status(plugin_status.get());
}
Status ModularRandomAccessFile::Name(StringPiece* result) const {
*result = filename_;
return OkStatus();
}
Status ModularWritableFile::Append(StringPiece data) {
if (ops_->append == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Append() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->append(file_.get(), data.data(), data.size(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Close() {
if (ops_->close == nullptr)
return errors::Unimplemented(
tensorflow::strings::StrCat("Close() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->close(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Flush() {
if (ops_->flush == nullptr) return OkStatus();
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->flush(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Sync() {
if (ops_->sync == nullptr) return Flush();
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->sync(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Name(StringPiece* result) const {
*result = filename_;
return OkStatus();
}
Status ModularWritableFile::Tell(int64_t* position) {
if (ops_->tell == nullptr)
return errors::Unimplemented(
tensorflow::strings::StrCat("Tell() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
*position = ops_->tell(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status RegisterFilesystemPlugin(const std::string& dso_path) {
Env* env = Env::Default();
void* dso_handle;
TF_RETURN_IF_ERROR(env->LoadDynamicLibrary(dso_path.c_str(), &dso_handle));
void* dso_symbol;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
env->GetSymbolFromLibrary(dso_handle, "TF_InitPlugin", &dso_symbol),
"Failed to load TF_InitPlugin symbol for DSO: ", dso_path);
TF_FilesystemPluginInfo info;
memset(&info, 0, sizeof(info));
auto TF_InitPlugin =
reinterpret_cast<int (*)(TF_FilesystemPluginInfo*)>(dso_symbol);
TF_InitPlugin(&info);
return filesystem_registration::RegisterFilesystemPluginImpl(&info);
}
} | #include "tensorflow/c/experimental/filesystem/modular_filesystem.h"
#include <memory>
#include <random>
#include <string>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/command_line_flags.h"
#if defined(PLATFORM_WINDOWS)
#include <direct.h>
#define mkdir(name, mode) _mkdir(name)
#undef CopyFile
#undef DeleteFile
#undef TranslateName
#endif
namespace tensorflow {
namespace {
using ::tensorflow::error::Code;
class ModularFileSystemTest : public ::testing::TestWithParam<std::string> {
public:
ModularFileSystemTest() {
const std::string test_name = tensorflow::str_util::StringReplace(
::testing::UnitTest::GetInstance()->current_test_info()->name(), "/",
"_", true);
if (!cloud_path_.empty()) {
root_dir_ = tensorflow::strings::StrCat(
"/", tmp_dir_,
tensorflow::strings::StrCat("tf_fs_", rng_val_, "_", test_name), "/");
} else {
root_dir_ = tensorflow::io::JoinPath(
tmp_dir_,
tensorflow::strings::StrCat("tf_fs_", rng_val_, "_", test_name));
}
if (!GetParam().empty()) {
root_dir_ = tensorflow::strings::StrCat(GetParam(), ":
root_dir_);
}
env_ = Env::Default();
}
void SetUp() override {
FileSystem* fs = nullptr;
Status s = env_->GetFileSystemForFile(root_dir_, &fs);
if (fs == nullptr || !s.ok())
GTEST_SKIP() << "No filesystem registered: " << s;
s = fs->CreateDir(root_dir_);
if (!s.ok()) {
GTEST_SKIP() << "Cannot create working directory: " << s;
}
}
std::string GetURIForPath(StringPiece path) {
const std::string translated_name =
tensorflow::io::JoinPath(root_dir_, path);
return translated_name;
}
StringPiece GetRelativePath(StringPiece absolute_path) {
return tensorflow::str_util::StripPrefix(absolute_path, root_dir_);
}
static void InitializeTestRNG() {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> distribution;
rng_val_ = distribution(gen);
}
static void SetCloudPath(const std::string& cloud_path) {
cloud_path_ = cloud_path;
if (cloud_path_.back() == '/') cloud_path_.pop_back();
}
static void SetTmpDir(const std::string& tmp_dir) {
tmp_dir_ = tmp_dir.empty() ? ::testing::TempDir() : tmp_dir;
}
protected:
Env* env_;
private:
std::string root_dir_;
static int rng_val_;
static std::string cloud_path_;
static std::string tmp_dir_;
};
int ModularFileSystemTest::rng_val_;
std::string ModularFileSystemTest::cloud_path_;
std::string ModularFileSystemTest::tmp_dir_;
bool UnimplementedOrReturnsCode(Status actual_status, Code expected_code) {
Code actual_code = actual_status.code();
return (actual_code == Code::UNIMPLEMENTED) || (actual_code == expected_code);
}
TEST_P(ModularFileSystemTest, TestTranslateName) {
const std::string generic_path = GetURIForPath("some_path");
FileSystem* fs = nullptr;
Status s = env_->GetFileSystemForFile(generic_path, &fs);
if (fs == nullptr || !s.ok())
GTEST_SKIP() << "No filesystem registered: " << s;
if (GetParam().empty()) {
EXPECT_EQ(fs->TranslateName(""), "");
EXPECT_EQ(fs->TranslateName("/"), "/");
EXPECT_EQ(fs->TranslateName("
EXPECT_EQ(fs->TranslateName("a_file"), "a_file");
EXPECT_EQ(fs->TranslateName("a_dir/.."), ".");
} else {
EXPECT_EQ(fs->TranslateName(tensorflow::strings::StrCat(GetParam(), ":
"/");
EXPECT_EQ(
fs->TranslateName(tensorflow::strings::StrCat(GetParam(), ":
"/");
EXPECT_EQ(
fs->TranslateName(tensorflow::strings::StrCat(GetParam(), ":
"/");
}
EXPECT_EQ(GetRelativePath(fs->TranslateName(GetURIForPath("a_file"))),
"/a_file");
EXPECT_EQ(GetRelativePath(fs->TranslateName(GetURIForPath("a_dir/a_file"))),
"/a_dir/a_file");
EXPECT_EQ(GetRelativePath(fs->TranslateName(GetURIForPath("./a_file"))),
"/a_file");
EXPECT_EQ(GetRelativePath(fs->TranslateName(
GetURIForPath("a/convoluted/../path/./to/.
"/a/path/to/a/file");
}
TEST_P(ModularFileSystemTest, TestCreateFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCreateFileNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateFileExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(new_path, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestAppendFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewAppendableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestAppendFileNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewAppendableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestAppendFileExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<WritableFile> new_file;
status = env_->NewAppendableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateThenAppendFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::unique_ptr<WritableFile> same_file;
status = env_->NewAppendableFile(filepath, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestAppendFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<WritableFile> same_file;
status = env_->NewAppendableFile(new_path, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestReadFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<RandomAccessFile> new_file;
Status status = env_->NewRandomAccessFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestReadFileNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<RandomAccessFile> new_file;
Status status = env_->NewRandomAccessFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestReadFileExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<RandomAccessFile> new_file;
status = env_->NewRandomAccessFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateThenReadFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::unique_ptr<RandomAccessFile> same_file;
status = env_->NewRandomAccessFile(filepath, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestReadFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<RandomAccessFile> same_file;
status = env_->NewRandomAccessFile(new_path, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegion) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<ReadOnlyMemoryRegion> region;
Status status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<ReadOnlyMemoryRegion> region;
Status status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<ReadOnlyMemoryRegion> new_file;
status = env_->NewReadOnlyMemoryRegionFromFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::unique_ptr<ReadOnlyMemoryRegion> region;
status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::INVALID_ARGUMENT);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = new_file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = new_file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = new_file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
std::unique_ptr<ReadOnlyMemoryRegion> region;
status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok())
GTEST_SKIP() << "NewReadOnlyMemoryRegionFromFile() not supported: "
<< status;
EXPECT_EQ(region->length(), test_data.size());
EXPECT_STREQ(reinterpret_cast<const char*>(region->data()),
test_data.c_str());
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<ReadOnlyMemoryRegion> region;
status = env_->NewReadOnlyMemoryRegionFromFile(new_path, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateDir) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCreateDirNoParent) {
const std::string dirpath = GetURIForPath("dir_not_found/a_dir");
Status status = env_->CreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateDirWhichIsFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->CreateDir(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::ALREADY_EXISTS);
}
TEST_P(ModularFileSystemTest, TestCreateDirTwice) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->CreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::ALREADY_EXISTS);
}
TEST_P(ModularFileSystemTest, TestCreateDirPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
status = env_->CreateDir(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDir) {
const std::string dirpath = GetURIForPath("a/path/to/a/dir");
Status status = env_->RecursivelyCreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirInATree) {
const std::string dirpath = GetURIForPath("a/path/to/a/dir");
Status status = env_->RecursivelyCreateDir(dirpath);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string new_dirpath = GetURIForPath("a/path/to/a/another/dir");
status = env_->RecursivelyCreateDir(new_dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirWhichIsFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->RecursivelyCreateDir(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirTwice) {
const std::string dirpath = GetURIForPath("a/path/to/a/dir");
Status status = env_->RecursivelyCreateDir(dirpath);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
status = env_->RecursivelyCreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
status = env_->RecursivelyCreateDir(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirFromNestedDir) {
const std::string parent_path = GetURIForPath("some/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string new_dirpath = GetURIForPath("some/path/that/is/extended");
status = env_->RecursivelyCreateDir(new_dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirFromNestedFile) {
const std::string parent_path = GetURIForPath("some/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("some/path/to_a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_dirpath = GetURIForPath("some/path/to_a_file/error");
status = env_->RecursivelyCreateDir(new_dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteFile(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteFileFromDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("a_dir/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteFile(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteFileDoesNotExist) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->DeleteFile(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestDeleteFileWhichIsDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->DeleteFile(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_new_file");
status = env_->DeleteFile(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->DeleteDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryFromDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string target_path = GetURIForPath("a_dir/another_dir");
EXPECT_EQ(env_->CreateDir(target_path).code(), Code::OK);
status = env_->DeleteDir(target_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryDoesNotExist) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->DeleteDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryNotEmpty) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("a_dir/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryWhichIsFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteDir(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
status = env_->DeleteDir(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyEmpty) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(dirpath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyNotEmpty) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string some_path = GetURIForPath("a_dir/another_dir");
status = env_->CreateDir(some_path);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string another_path = GetURIForPath("a_dir/yet_another_dir");
status = env_->CreateDir(another_path);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("a_dir/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(dirpath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyDoesNotExist) {
const std::string dirpath = GetURIForPath("a_dir");
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
Status status =
env_->DeleteRecursively(dirpath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 1);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyAFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(filepath, &undeleted_files, &undeleted_dirs);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
int64_t undeleted_files, undeleted_dirs;
status = env_->DeleteRecursively(new_path, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyANestedDir) {
const std::string parent_path = GetURIForPath("parent/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string new_dirpath = GetURIForPath("parent/path/that/is/extended");
status = env_->RecursivelyCreateDir(new_dirpath);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string path = GetURIForPath("parent/path/that");
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(path, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
status = env_->FileExists(parent_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyANestedFile) {
const std::string parent_path = GetURIForPath("some/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("some/path/to_a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(filepath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
status = env_->FileExists(parent_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRenameFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
status = env_->FileExists(new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRenameFileOverwrite) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
status = env_->FileExists(new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRenameFileSourceNotFound) {
const std::string filepath = GetURIForPath("a_file");
const std::string new_filepath = GetURIForPath("a_new_file");
Status status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestRenameFileDestinationParentNotFound) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_dir/a_file");
status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestRenameFileSourceIsDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->RenameFile(dirpath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRenameFileTargetIsDirectory) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string dirpath = GetURIForPath("a_dir");
status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->RenameFile(filepath, dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRenameFileSourcePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string old_filepath = GetURIForPath("a_file/x");
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->RenameFile(old_filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRenameFileTargetPathIsInvalid) {
const std::string old_filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> old_file;
Status status = env_->NewWritableFile(old_filepath, &old_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_file/a_new_file");
status = env_->RenameFile(old_filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRenameFileCompareContents) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status;
uint64 size;
status = env_->GetFileSize(new_filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, test_data.size());
}
TEST_P(ModularFileSystemTest, TestCopyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->CopyFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "CopyFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
status = env_->FileExists(new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCopyFileOverwrite) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->CopyFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "CopyFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
status = env_->FileExists(new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCopyFileSourceNotFound) {
const std::string filepath = GetURIForPath("a_file");
const std::string new_filepath = GetURIForPath("a_new_file");
Status status = env_->CopyFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCopyFileSourceIsDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->CopyFile(dirpath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCopyFileTargetIsDirectory) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string dirpath = GetURIForPath("a_dir");
status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->CopyFile(filepath, dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCopyFileSourcePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string old_filepath = GetURIForPath("a_file/x");
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->CopyFile(old_filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCopyFileTargetPathIsInvalid) {
const std::string old_filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> old_file;
Status status = env_->NewWritableFile(old_filepath, &old_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_file/a_new_file");
status = env_->CopyFile(old_filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCopyFileCompareContents) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->CopyFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status;
uint64 size;
status = env_->GetFileSize(filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, test_data.size());
status = env_->GetFileSize(new_filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, test_data.size());
}
TEST_P(ModularFileSystemTest, TestFileExists) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestFileExistsButIsDirectory) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestFileExistsNotFound) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestFileExistsPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_file");
status = env_->FileExists(target_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestFilesExist) {
const std::vector<std::string> filenames = {GetURIForPath("a"),
GetURIForPath("b")};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
EXPECT_TRUE(env_->FilesExist(filenames, nullptr));
std::vector<Status> statuses;
EXPECT_TRUE(env_->FilesExist(filenames, &statuses));
EXPECT_EQ(statuses.size(), filenames.size());
for (const auto& status : statuses)
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestFilesExistAllFailureModes) {
const std::vector<std::string> filenames = {
GetURIForPath("a_dir"),
GetURIForPath("a_file"),
GetURIForPath("a_file/a_new_file"),
GetURIForPath("file_not_found"),
};
Status status = env_->CreateDir(filenames[0]);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filenames[1], &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::vector<Status> statuses;
EXPECT_FALSE(env_->FilesExist(filenames, &statuses));
EXPECT_EQ(statuses.size(), filenames.size());
EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[0], Code::OK);
EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[1], Code::OK);
EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[2],
Code::FAILED_PRECONDITION);
EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[3], Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestFilesExistsNoFiles) {
const std::vector<std::string> filenames = {};
EXPECT_TRUE(env_->FilesExist(filenames, nullptr));
std::vector<Status> statuses;
EXPECT_TRUE(env_->FilesExist(filenames, &statuses));
EXPECT_TRUE(statuses.empty());
}
TEST_P(ModularFileSystemTest, TestStatEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
FileStatistics stat;
status = env_->Stat(filepath, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Stat() not supported: " << status;
EXPECT_FALSE(stat.is_directory);
EXPECT_EQ(stat.length, 0);
}
TEST_P(ModularFileSystemTest, TestStatNonEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
FileStatistics stat;
status = env_->Stat(filepath, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Stat() not supported: " << status;
EXPECT_FALSE(stat.is_directory);
EXPECT_EQ(stat.length, test_data.size());
}
TEST_P(ModularFileSystemTest, TestStatDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
FileStatistics stat;
status = env_->Stat(dirpath, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Stat() not supported: " << status;
EXPECT_TRUE(stat.is_directory);
}
TEST_P(ModularFileSystemTest, TestStatNotFound) {
const std::string dirpath = GetURIForPath("a_dir");
FileStatistics stat;
Status status = env_->Stat(dirpath, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestStatPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_file");
FileStatistics stat;
status = env_->Stat(target_path, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestIsDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->IsDirectory(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestIsDirectoryFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->IsDirectory(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestIsDirectoryNotFound) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->IsDirectory(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestIsDirectoryPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_file");
status = env_->IsDirectory(target_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetFileSizeEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
uint64 size;
status = env_->GetFileSize(filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, 0);
}
TEST_P(ModularFileSystemTest, TestGetFileSizeNonEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
uint64 size;
status = env_->GetFileSize(filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, test_data.size());
}
TEST_P(ModularFileSystemTest, TestGetFileSizeDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
uint64 size;
status = env_->GetFileSize(dirpath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetFileSizeNotFound) {
const std::string filepath = GetURIForPath("a_dir");
uint64 size;
Status status = env_->GetFileSize(filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestGetFileSizePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_file");
uint64 size;
status = env_->GetFileSize(target_path, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetChildren) {
const std::string dirpath = GetURIForPath("dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::vector<std::string> filenames = {
GetURIForPath("dir/a_file"),
GetURIForPath("dir/another_file"),
};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
const std::vector<std::string> dirnames = {
GetURIForPath("dir/a_dir"),
GetURIForPath("dir/another_dir"),
};
for (const auto& dirname : dirnames) {
status = env_->CreateDir(dirname);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
}
std::vector<std::string> children;
status = env_->GetChildren(dirpath, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetChildren() not supported: " << status;
const std::vector<std::string> expected_children = {"a_file", "another_file",
"a_dir", "another_dir"};
EXPECT_EQ(children.size(), filenames.size() + dirnames.size());
for (const auto& child : expected_children)
EXPECT_NE(std::find(children.begin(), children.end(), child),
children.end());
}
TEST_P(ModularFileSystemTest, TestGetChildrenEmpty) {
const std::string dirpath = GetURIForPath("dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::vector<std::string> children;
status = env_->GetChildren(dirpath, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(children.size(), 0);
}
TEST_P(ModularFileSystemTest, TestGetChildrenOfFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::vector<std::string> children;
status = env_->GetChildren(filepath, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetChildrenPathNotFound) {
const std::string target_path = GetURIForPath("a_dir");
std::vector<std::string> children;
Status status = env_->GetChildren(target_path, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestGetChildrenPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_dir");
std::vector<std::string> children;
status = env_->GetChildren(target_path, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetMatchingPaths) {
const std::vector<std::string> matching_filenames = {
GetURIForPath("a_file"),
GetURIForPath("another_file"),
};
const std::vector<std::string> other_filenames = {
GetURIForPath("some_file"),
GetURIForPath("yet_another_file"),
};
for (const auto& filename : matching_filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
for (const auto& filename : other_filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(GetURIForPath("/a*"), &results);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok())
GTEST_SKIP() << "GetMatchingPaths() not supported: " << status;
EXPECT_EQ(results.size(), matching_filenames.size());
for (const auto& match : matching_filenames)
EXPECT_NE(std::find(results.begin(), results.end(), match), results.end());
}
TEST_P(ModularFileSystemTest, TestGetMatchingPathsEmptyFileSystem) {
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(GetURIForPath("a*"), &results);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(results.size(), 0);
}
TEST_P(ModularFileSystemTest, TestGetMatchingPathsEmptyPattern) {
const std::vector<std::string> filenames = {
GetURIForPath("a_file"),
GetURIForPath("another_file"),
GetURIForPath("some_file"),
GetURIForPath("yet_another_file"),
};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(GetURIForPath(""), &results);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok())
GTEST_SKIP() << "GetMatchingPaths() not supported: " << status;
EXPECT_EQ(results.size(), 1);
EXPECT_NE(std::find(results.begin(), results.end(), GetURIForPath("")),
results.end());
}
TEST_P(ModularFileSystemTest, TestGetMatchingPathsLiteralMatch) {
const std::vector<std::string> filenames = {
GetURIForPath("a_file"),
GetURIForPath("another_file"),
GetURIForPath("some_file"),
GetURIForPath("yet_another_file"),
};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(filenames[0], &results);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok())
GTEST_SKIP() << "GetMatchingPaths() not supported: " << status;
EXPECT_EQ(results.size(), 1);
EXPECT_NE(std::find(results.begin(), results.end(), filenames[0]),
results.end());
}
TEST_P(ModularFileSystemTest, TestGetMatchingPathsNoMatch) {
const std::vector<std::string> filenames = {
GetURIForPath("a_file"),
GetURIForPath("another_file"),
GetURIForPath("some_file"),
GetURIForPath("yet_another_file"),
};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(GetURIForPath("x?y*"), &results);
if (!status.ok())
GTEST_SKIP() << "GetMatchingPaths() not supported: " << status;
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(results.size(), 0);
}
TEST_P(ModularFileSystemTest, TestAppendAndTell) {
const std::string filename = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t position;
status = file->Tell(&position);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Tell() not supported: " << status;
EXPECT_EQ(position, 0);
const std::string test_data("asdf");
status = file->Append(test_data);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Tell(&position);
EXPECT_EQ(status.code(), Code::OK);
EXPECT_EQ(position, test_data.size());
}
TEST_P(ModularFileSystemTest, TestClose) {
const std::string filename = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = file->Close();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
}
TEST_P(ModularFileSystemTest, TestRoundTrip) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
std::unique_ptr<RandomAccessFile> read_file;
status = env_->NewRandomAccessFile(filepath, &read_file);
if (!status.ok())
GTEST_SKIP() << "NewRandomAccessFile() not supported: " << status;
char scratch[64 ] = {0};
StringPiece result;
status = read_file->Read(0, test_data.size(), &result, scratch);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(test_data, result);
}
TEST_P(ModularFileSystemTest, TestRoundTripWithAppendableFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
std::unique_ptr<WritableFile> same_file;
status = env_->NewAppendableFile(filepath, &same_file);
if (!status.ok())
GTEST_SKIP() << "NewAppendableFile() not supported: " << status;
const std::string more_test_data("qwer");
EXPECT_EQ(same_file->Append(more_test_data).code(), Code::OK);
EXPECT_EQ(same_file->Flush().code(), Code::OK);
EXPECT_EQ(same_file->Close().code(), Code::OK);
std::unique_ptr<RandomAccessFile> read_file;
status = env_->NewRandomAccessFile(filepath, &read_file);
if (!status.ok())
GTEST_SKIP() << "NewRandomAccessFile() not supported: " << status;
char scratch[64 ] = {0};
StringPiece result;
status = read_file->Read(0, test_data.size() + more_test_data.size(), &result,
scratch);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(test_data + more_test_data, result);
EXPECT_EQ(
read_file->Read(test_data.size(), more_test_data.size(), &result, scratch)
.code(),
Code::OK);
EXPECT_EQ(more_test_data, result);
}
TEST_P(ModularFileSystemTest, TestReadOutOfRange) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
std::unique_ptr<RandomAccessFile> read_file;
status = env_->NewRandomAccessFile(filepath, &read_file);
if (!status.ok())
GTEST_SKIP() << "NewRandomAccessFile() not supported: " << status;
char scratch[64 ] = {0};
StringPiece result;
status = read_file->Read(0, test_data.size() + 1, &result, scratch);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OUT_OF_RANGE);
}
static std::vector<std::string>* SchemeVector() {
static std::vector<std::string>* schemes = new std::vector<std::string>;
return schemes;
}
static std::vector<std::string>* GetSchemesFromUserOrEnv() {
std::vector<std::string>* all_schemes = new std::vector<std::string>;
tensorflow::Status status =
tensorflow::Env::Default()->GetRegisteredFileSystemSchemes(all_schemes);
if (status.ok()) {
std::vector<std::string>* user_schemes = SchemeVector();
if (!user_schemes->empty()) {
auto is_requested_scheme = [user_schemes](const auto& scheme) {
return std::find(user_schemes->begin(), user_schemes->end(), scheme) ==
user_schemes->end();
};
auto end = std::remove_if(all_schemes->begin(), all_schemes->end(),
is_requested_scheme);
all_schemes->erase(end, all_schemes->end());
}
}
return all_schemes;
}
static std::vector<std::string> GetSchemes() {
static std::vector<std::string>* schemes = GetSchemesFromUserOrEnv();
return *schemes;
}
INSTANTIATE_TEST_SUITE_P(ModularFileSystem, ModularFileSystemTest,
::testing::ValuesIn(GetSchemes()));
static bool LoadDSO(const std::string& dso) {
tensorflow::Status status = RegisterFilesystemPlugin(dso);
if (!status.ok())
VLOG(0) << "Filesystems from '" << dso
<< "' could not be registered: " << status;
return status.ok();
}
static bool GetURIScheme(const std::string& scheme) {
tensorflow::SchemeVector()->push_back(scheme);
return true;
}
static bool SetCloudPath(const std::string& cloud_path_) {
ModularFileSystemTest::SetCloudPath(cloud_path_);
return true;
}
static bool SetTmpDir(const std::string& tmp_dir_) {
ModularFileSystemTest::SetTmpDir(tmp_dir_);
return true;
}
}
}
GTEST_API_ int main(int argc, char** argv) {
const std::vector<tensorflow::Flag> flag_list = {
tensorflow::Flag("dso", tensorflow::LoadDSO, "",
"Path to shared object to load"),
tensorflow::Flag("scheme", tensorflow::GetURIScheme, "",
"URI scheme to test"),
tensorflow::Flag("cloud_path", tensorflow::SetCloudPath, "",
"Path for cloud filesystem (namenode for hdfs, "
"bucketname for s3/gcs)"),
tensorflow::Flag("tmp_dir", tensorflow::SetTmpDir, "",
"Temporary directory to store test data.")};
if (!tensorflow::Flags::Parse(&argc, argv, flag_list)) {
std::cout << tensorflow::Flags::Usage(argv[0], flag_list);
return -1;
}
tensorflow::testing::InstallStacktraceHandler();
tensorflow::ModularFileSystemTest::InitializeTestRNG();
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/modular_filesystem.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5306fe2a-a64a-4862-978a-1a7bd74402f1 | cpp | tensorflow/tensorflow | gcs_filesystem | tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.cc | tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem_test.cc | #include "tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.h"
#include <stdlib.h>
#include <string.h>
#include <variant>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/types/variant.h"
#include "google/cloud/storage/client.h"
#include "tensorflow/c/env.h"
#include "tensorflow/c/experimental/filesystem/plugins/gcs/gcs_helper.h"
#include "tensorflow/c/logging.h"
#include "tensorflow/c/tf_status.h"
namespace gcs = google::cloud::storage;
constexpr char kBlockSize[] = "GCS_READ_CACHE_BLOCK_SIZE_MB";
constexpr size_t kDefaultBlockSize = 64 * 1024 * 1024;
constexpr char kMaxCacheSize[] = "GCS_READ_CACHE_MAX_SIZE_MB";
constexpr size_t kDefaultMaxCacheSize = 0;
constexpr char kMaxStaleness[] = "GCS_READ_CACHE_MAX_STALENESS";
constexpr uint64_t kDefaultMaxStaleness = 0;
constexpr char kStatCacheMaxAge[] = "GCS_STAT_CACHE_MAX_AGE";
constexpr uint64_t kStatCacheDefaultMaxAge = 5;
constexpr char kStatCacheMaxEntries[] = "GCS_STAT_CACHE_MAX_ENTRIES";
constexpr size_t kStatCacheDefaultMaxEntries = 1024;
constexpr char kAppendMode[] = "GCS_APPEND_MODE";
constexpr char kComposeAppend[] = "compose";
static inline void TF_SetStatusFromGCSStatus(
const google::cloud::Status& gcs_status, TF_Status* status) {
TF_SetStatus(status, static_cast<TF_Code>(gcs_status.code()),
gcs_status.message().c_str());
}
static void* plugin_memory_allocate(size_t size) { return calloc(1, size); }
static void plugin_memory_free(void* ptr) { free(ptr); }
void ParseGCSPath(const std::string& fname, bool object_empty_ok,
std::string* bucket, std::string* object, TF_Status* status) {
size_t scheme_end = fname.find(":
if (fname.substr(0, scheme_end + 1) != "gs:
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"GCS path doesn't start with 'gs:
return;
}
size_t bucket_end = fname.find('/', scheme_end + 1);
if (bucket_end == std::string::npos) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"GCS path doesn't contain a bucket name.");
return;
}
*bucket = fname.substr(scheme_end + 1, bucket_end - scheme_end - 1);
*object = fname.substr(bucket_end + 1);
if (object->empty() && !object_empty_ok) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"GCS path doesn't contain an object name.");
}
}
static void MaybeAppendSlash(std::string* name) {
if (name->empty())
*name = "/";
else if (name->back() != '/')
name->push_back('/');
}
static int64_t LoadBufferFromGCS(const std::string& path, size_t offset,
size_t buffer_size, char* buffer,
tf_gcs_filesystem::GCSFile* gcs_file,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return -1;
auto stream = gcs_file->gcs_client.ReadObject(
bucket, object, gcs::ReadRange(offset, offset + buffer_size));
TF_SetStatusFromGCSStatus(stream.status(), status);
if ((TF_GetCode(status) != TF_OK) &&
(TF_GetCode(status) != TF_OUT_OF_RANGE)) {
return -1;
}
int64_t read;
auto content_length = stream.headers().find("content-length");
if (content_length == stream.headers().end()) {
read = 0;
} else if (!absl::SimpleAtoi(content_length->second, &read)) {
TF_SetStatus(status, TF_UNKNOWN, "Could not get content-length header");
return -1;
}
TF_SetStatus(status, TF_OK, "");
TF_VLog(1, "Successful read of %s @ %u of size: %u", path.c_str(), offset,
read);
stream.read(buffer, read);
read = stream.gcount();
if (read < buffer_size) {
tf_gcs_filesystem::GcsFileStat stat;
if (gcs_file->stat_cache->Lookup(path, &stat)) {
if (offset + read < stat.base.length) {
TF_SetStatus(status, TF_INTERNAL,
absl::StrCat("File contents are inconsistent for file: ",
path, " @ ", offset)
.c_str());
}
TF_VLog(2, "Successful integrity check for: %s @ %u", path.c_str(),
offset);
}
}
return read;
}
namespace tf_random_access_file {
using ReadFn =
std::function<int64_t(const std::string& path, uint64_t offset, size_t n,
char* buffer, TF_Status* status)>;
typedef struct GCSFile {
const std::string path;
const bool is_cache_enable;
const uint64_t buffer_size;
ReadFn read_fn;
absl::Mutex buffer_mutex;
uint64_t buffer_start ABSL_GUARDED_BY(buffer_mutex);
bool buffer_end_is_past_eof ABSL_GUARDED_BY(buffer_mutex);
std::string buffer ABSL_GUARDED_BY(buffer_mutex);
GCSFile(std::string path, bool is_cache_enable, uint64_t buffer_size,
ReadFn read_fn)
: path(path),
is_cache_enable(is_cache_enable),
buffer_size(buffer_size),
read_fn(std::move(read_fn)),
buffer_mutex(),
buffer_start(0),
buffer_end_is_past_eof(false),
buffer() {}
} GCSFile;
void Cleanup(TF_RandomAccessFile* file) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
delete gcs_file;
}
int64_t Read(const TF_RandomAccessFile* file, uint64_t offset, size_t n,
char* buffer, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
if (gcs_file->is_cache_enable || n > gcs_file->buffer_size) {
return gcs_file->read_fn(gcs_file->path, offset, n, buffer, status);
} else {
absl::MutexLock l(&gcs_file->buffer_mutex);
size_t buffer_end = gcs_file->buffer_start + gcs_file->buffer.size();
size_t copy_size = 0;
if (offset < buffer_end && gcs_file->buffer_start) {
copy_size = (std::min)(n, static_cast<size_t>(buffer_end - offset));
memcpy(buffer,
gcs_file->buffer.data() + (offset - gcs_file->buffer_start),
copy_size);
}
bool consumed_buffer_to_eof =
offset + copy_size >= buffer_end && gcs_file->buffer_end_is_past_eof;
if (copy_size < n && !consumed_buffer_to_eof) {
gcs_file->buffer_start = offset + copy_size;
gcs_file->buffer.resize(gcs_file->buffer_size);
auto read_fill_buffer = gcs_file->read_fn(
gcs_file->path, gcs_file->buffer_start, gcs_file->buffer_size,
&(gcs_file->buffer[0]), status);
gcs_file->buffer_end_is_past_eof =
(TF_GetCode(status) == TF_OUT_OF_RANGE);
if (read_fill_buffer >= 0) gcs_file->buffer.resize(read_fill_buffer);
if (TF_GetCode(status) != TF_OK &&
TF_GetCode(status) != TF_OUT_OF_RANGE) {
gcs_file->buffer.resize(0);
return -1;
}
size_t remaining_copy =
(std::min)(n - copy_size, gcs_file->buffer.size());
memcpy(buffer + copy_size, gcs_file->buffer.data(), remaining_copy);
copy_size += remaining_copy;
}
if (copy_size < n) {
gcs_file->buffer_end_is_past_eof = false;
TF_SetStatus(status, TF_OUT_OF_RANGE, "Read less bytes than requested");
return copy_size;
}
TF_SetStatus(status, TF_OK, "");
return copy_size;
}
}
}
namespace tf_writable_file {
typedef struct GCSFile {
const std::string bucket;
const std::string object;
gcs::Client* gcs_client;
TempFile outfile;
bool sync_need;
int64_t offset;
} GCSFile;
static void SyncImpl(const std::string& bucket, const std::string& object,
int64_t* offset, TempFile* outfile,
gcs::Client* gcs_client, TF_Status* status) {
outfile->flush();
if (*offset == -1 || *offset == 0) {
auto metadata = gcs_client->UploadFile(outfile->getName(), bucket, object,
gcs::Fields("size"));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
if (*offset == 0) {
if (!outfile->truncate()) {
TF_SetStatus(status, TF_INTERNAL,
"Could not truncate internal temporary file.");
return;
}
*offset = static_cast<int64_t>(metadata->size());
}
outfile->clear();
outfile->seekp(0, std::ios::end);
TF_SetStatus(status, TF_OK, "");
} else {
std::string temporary_object =
gcs::CreateRandomPrefixName("tf_writable_file_gcs");
auto metadata = gcs_client->UploadFile(outfile->getName(), bucket,
temporary_object, gcs::Fields(""));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
TF_VLog(3, "AppendObject: gs:
temporary_object.c_str(), bucket.c_str(), object.c_str());
const std::vector<gcs::ComposeSourceObject> source_objects = {
{object, {}, {}}, {temporary_object, {}, {}}};
metadata = gcs_client->ComposeObject(bucket, source_objects, object,
gcs::Fields("size"));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
auto delete_status = gcs_client->DeleteObject(bucket, temporary_object);
if (!delete_status.ok()) {
TF_SetStatusFromGCSStatus(delete_status, status);
return;
}
if (!outfile->truncate()) {
TF_SetStatus(status, TF_INTERNAL,
"Could not truncate internal temporary file.");
return;
}
*offset = static_cast<int64_t>(metadata->size());
TF_SetStatus(status, TF_OK, "");
}
}
void Cleanup(TF_WritableFile* file) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
delete gcs_file;
}
void Append(const TF_WritableFile* file, const char* buffer, size_t n,
TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
if (!gcs_file->outfile.is_open()) {
TF_SetStatus(status, TF_FAILED_PRECONDITION,
"The internal temporary file is not writable.");
return;
}
TF_VLog(3, "Append: gs:
gcs_file->object.c_str(), n);
gcs_file->sync_need = true;
gcs_file->outfile.write(buffer, n);
if (!gcs_file->outfile)
TF_SetStatus(status, TF_INTERNAL,
"Could not append to the internal temporary file.");
else
TF_SetStatus(status, TF_OK, "");
}
int64_t Tell(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
int64_t position = int64_t(gcs_file->outfile.tellp());
if (position == -1)
TF_SetStatus(status, TF_INTERNAL,
"tellp on the internal temporary file failed");
else
TF_SetStatus(status, TF_OK, "");
return position == -1
? -1
: position + (gcs_file->offset == -1 ? 0 : gcs_file->offset);
}
void Flush(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
if (gcs_file->sync_need) {
TF_VLog(3, "Flush started: gs:
gcs_file->object.c_str());
if (!gcs_file->outfile) {
TF_SetStatus(status, TF_INTERNAL,
"Could not append to the internal temporary file.");
return;
}
SyncImpl(gcs_file->bucket, gcs_file->object, &gcs_file->offset,
&gcs_file->outfile, gcs_file->gcs_client, status);
TF_VLog(3, "Flush finished: gs:
gcs_file->object.c_str());
if (TF_GetCode(status) != TF_OK) return;
gcs_file->sync_need = false;
} else {
TF_SetStatus(status, TF_OK, "");
}
}
void Sync(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
TF_VLog(3, "Sync: gs:
gcs_file->object.c_str());
Flush(file, status);
}
void Close(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
TF_VLog(3, "Close: gs:
gcs_file->object.c_str());
if (gcs_file->sync_need) {
Flush(file, status);
}
gcs_file->outfile.close();
}
}
namespace tf_read_only_memory_region {
typedef struct GCSMemoryRegion {
const void* const address;
const uint64_t length;
} GCSMemoryRegion;
void Cleanup(TF_ReadOnlyMemoryRegion* region) {
auto r = static_cast<GCSMemoryRegion*>(region->plugin_memory_region);
plugin_memory_free(const_cast<void*>(r->address));
delete r;
}
const void* Data(const TF_ReadOnlyMemoryRegion* region) {
auto r = static_cast<GCSMemoryRegion*>(region->plugin_memory_region);
return r->address;
}
uint64_t Length(const TF_ReadOnlyMemoryRegion* region) {
auto r = static_cast<GCSMemoryRegion*>(region->plugin_memory_region);
return r->length;
}
}
namespace tf_gcs_filesystem {
GCSFile::GCSFile(google::cloud::storage::Client&& gcs_client)
: gcs_client(gcs_client), block_cache_lock() {
const char* append_mode = std::getenv(kAppendMode);
compose = (append_mode != nullptr) && (!strcmp(kAppendMode, append_mode));
uint64_t value;
block_size = kDefaultBlockSize;
size_t max_bytes = kDefaultMaxCacheSize;
uint64_t max_staleness = kDefaultMaxStaleness;
const char* block_size_env = std::getenv(kBlockSize);
if (block_size_env && absl::SimpleAtoi(block_size_env, &value)) {
block_size = value * 1024 * 1024;
}
const char* max_bytes_env = std::getenv(kMaxCacheSize);
if (max_bytes_env && absl::SimpleAtoi(max_bytes_env, &value)) {
max_bytes = static_cast<size_t>(value * 1024 * 1024);
}
const char* max_staleness_env = std::getenv(kMaxStaleness);
if (max_staleness_env && absl::SimpleAtoi(max_staleness_env, &value)) {
max_staleness = value;
}
TF_VLog(1, "GCS cache max size = %u ; block size = %u ; max staleness = %u",
max_bytes, block_size, max_staleness);
file_block_cache = std::make_unique<RamFileBlockCache>(
block_size, max_bytes, max_staleness,
[this](const std::string& filename, size_t offset, size_t buffer_size,
char* buffer, TF_Status* status) {
return LoadBufferFromGCS(filename, offset, buffer_size, buffer, this,
status);
});
uint64_t stat_cache_max_age = kStatCacheDefaultMaxAge;
size_t stat_cache_max_entries = kStatCacheDefaultMaxEntries;
const char* stat_cache_max_age_env = std::getenv(kStatCacheMaxAge);
if (stat_cache_max_age_env &&
absl::SimpleAtoi(stat_cache_max_age_env, &value)) {
stat_cache_max_age = value;
}
const char* stat_cache_max_entries_env = std::getenv(kStatCacheMaxEntries);
if (stat_cache_max_entries_env &&
absl::SimpleAtoi(stat_cache_max_entries_env, &value)) {
stat_cache_max_entries = static_cast<size_t>(value);
}
stat_cache = std::make_unique<ExpiringLRUCache<GcsFileStat>>(
stat_cache_max_age, stat_cache_max_entries);
}
GCSFile::GCSFile(google::cloud::storage::Client&& gcs_client, bool compose,
uint64_t block_size, size_t max_bytes, uint64_t max_staleness,
uint64_t stat_cache_max_age, size_t stat_cache_max_entries)
: gcs_client(gcs_client),
compose(compose),
block_cache_lock(),
block_size(block_size) {
file_block_cache = std::make_unique<RamFileBlockCache>(
block_size, max_bytes, max_staleness,
[this](const std::string& filename, size_t offset, size_t buffer_size,
char* buffer, TF_Status* status) {
return LoadBufferFromGCS(filename, offset, buffer_size, buffer, this,
status);
});
stat_cache = std::make_unique<ExpiringLRUCache<GcsFileStat>>(
stat_cache_max_age, stat_cache_max_entries);
}
void InitTest(TF_Filesystem* filesystem, bool compose, uint64_t block_size,
size_t max_bytes, uint64_t max_staleness,
uint64_t stat_cache_max_age, size_t stat_cache_max_entries,
TF_Status* status) {
google::cloud::StatusOr<gcs::Client> client =
gcs::Client::CreateDefaultClient();
if (!client) {
TF_SetStatusFromGCSStatus(client.status(), status);
return;
}
filesystem->plugin_filesystem =
new GCSFile(std::move(client.value()), compose, block_size, max_bytes,
max_staleness, stat_cache_max_age, stat_cache_max_entries);
TF_SetStatus(status, TF_OK, "");
}
void Init(TF_Filesystem* filesystem, TF_Status* status) {
google::cloud::StatusOr<gcs::Client> client =
gcs::Client::CreateDefaultClient();
if (!client) {
TF_SetStatusFromGCSStatus(client.status(), status);
return;
}
filesystem->plugin_filesystem = new GCSFile(std::move(client.value()));
TF_SetStatus(status, TF_OK, "");
}
void Cleanup(TF_Filesystem* filesystem) {
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
delete gcs_file;
}
static void UncachedStatForObject(const std::string& bucket,
const std::string& object, GcsFileStat* stat,
gcs::Client* gcs_client, TF_Status* status) {
auto metadata = gcs_client->GetObjectMetadata(
bucket, object, gcs::Fields("generation,size,timeStorageClassUpdated"));
if (!metadata) return TF_SetStatusFromGCSStatus(metadata.status(), status);
stat->generation_number = metadata->generation();
stat->base.length = metadata->size();
stat->base.mtime_nsec =
metadata->time_storage_class_updated().time_since_epoch().count();
stat->base.is_directory = object.back() == '/';
TF_VLog(1,
"Stat of: gs:
bucket.c_str(), object.c_str(), stat->base.length,
stat->generation_number, stat->base.mtime_nsec);
return TF_SetStatus(status, TF_OK, "");
}
void NewRandomAccessFile(const TF_Filesystem* filesystem, const char* path,
TF_RandomAccessFile* file, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
bool is_cache_enabled;
{
absl::MutexLock l(&gcs_file->block_cache_lock);
is_cache_enabled = gcs_file->file_block_cache->IsCacheEnabled();
}
auto read_fn = [gcs_file, is_cache_enabled, bucket, object](
const std::string& path, uint64_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
int64_t read = 0;
if (is_cache_enabled) {
absl::ReaderMutexLock l(&gcs_file->block_cache_lock);
GcsFileStat stat;
gcs_file->stat_cache->LookupOrCompute(
path, &stat,
[gcs_file, bucket, object](const std::string& path, GcsFileStat* stat,
TF_Status* status) {
UncachedStatForObject(bucket, object, stat, &gcs_file->gcs_client,
status);
},
status);
if (TF_GetCode(status) != TF_OK) return -1;
if (!gcs_file->file_block_cache->ValidateAndUpdateFileSignature(
path, stat.generation_number)) {
TF_VLog(
1,
"File signature has been changed. Refreshing the cache. Path: %s",
path.c_str());
}
read = gcs_file->file_block_cache->Read(path, offset, n, buffer, status);
} else {
read = LoadBufferFromGCS(path, offset, n, buffer, gcs_file, status);
}
if (TF_GetCode(status) != TF_OK) return -1;
if (read < n)
TF_SetStatus(status, TF_OUT_OF_RANGE, "Read less bytes than requested");
else
TF_SetStatus(status, TF_OK, "");
return read;
};
file->plugin_file = new tf_random_access_file::GCSFile(
std::move(path), is_cache_enabled, gcs_file->block_size, read_fn);
TF_SetStatus(status, TF_OK, "");
}
void NewWritableFile(const TF_Filesystem* filesystem, const char* path,
TF_WritableFile* file, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
char* temp_file_name = TF_GetTempFileName("");
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::out), true,
(gcs_file->compose ? 0 : -1)});
free(temp_file_name);
TF_VLog(3, "GcsWritableFile: %s", path);
TF_SetStatus(status, TF_OK, "");
}
void NewAppendableFile(const TF_Filesystem* filesystem, const char* path,
TF_WritableFile* file, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
char* temp_file_name_c_str = TF_GetTempFileName("");
std::string temp_file_name(temp_file_name_c_str);
free(temp_file_name_c_str);
if (!gcs_file->compose) {
auto gcs_status =
gcs_file->gcs_client.DownloadToFile(bucket, object, temp_file_name);
TF_SetStatusFromGCSStatus(gcs_status, status);
auto status_code = TF_GetCode(status);
if (status_code != TF_OK && status_code != TF_NOT_FOUND) return;
bool sync_need = (status_code == TF_NOT_FOUND);
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::app), sync_need,
-1});
} else {
auto metadata = gcs_file->gcs_client.GetObjectMetadata(bucket, object,
gcs::Fields("size"));
TF_SetStatusFromGCSStatus(metadata.status(), status);
if (TF_GetCode(status) == TF_OK) {
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::trunc), false,
static_cast<int64_t>(metadata->size())});
} else if (TF_GetCode(status) == TF_NOT_FOUND) {
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::trunc), true,
0});
} else {
return;
}
}
TF_VLog(3, "GcsWritableFile: %s with existing file %s", path,
temp_file_name.c_str());
TF_SetStatus(status, TF_OK, "");
}
void NewReadOnlyMemoryRegionFromFile(const TF_Filesystem* filesystem,
const char* path,
TF_ReadOnlyMemoryRegion* region,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto metadata = gcs_file->gcs_client.GetObjectMetadata(bucket, object,
gcs::Fields("size"));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
TF_RandomAccessFile reader;
NewRandomAccessFile(filesystem, path, &reader, status);
if (TF_GetCode(status) != TF_OK) return;
char* buffer = static_cast<char*>(plugin_memory_allocate(metadata->size()));
int64_t read =
tf_random_access_file::Read(&reader, 0, metadata->size(), buffer, status);
tf_random_access_file::Cleanup(&reader);
if (TF_GetCode(status) != TF_OK) return;
if (read > 0 && buffer) {
region->plugin_memory_region =
new tf_read_only_memory_region::GCSMemoryRegion(
{buffer, static_cast<uint64_t>(read)});
TF_SetStatus(status, TF_OK, "");
} else if (read == 0) {
TF_SetStatus(status, TF_INVALID_ARGUMENT, "File is empty");
}
}
static void StatForObject(GCSFile* gcs_file, const std::string& path,
const std::string& bucket, const std::string& object,
GcsFileStat* stat, TF_Status* status) {
if (object.empty())
return TF_SetStatus(
status, TF_INVALID_ARGUMENT,
absl::StrCat("'object' must be a non-empty string. (File: ", path, ")")
.c_str());
TF_SetStatus(status, TF_OK, "");
gcs_file->stat_cache->LookupOrCompute(
path, stat,
[gcs_file, bucket, object](const std::string& path, GcsFileStat* stat,
TF_Status* status) {
UncachedStatForObject(bucket, object, stat, &gcs_file->gcs_client,
status);
},
status);
}
static bool ObjectExists(GCSFile* gcs_file, const std::string& path,
const std::string& bucket, const std::string& object,
TF_Status* status) {
GcsFileStat stat;
StatForObject(gcs_file, path, bucket, object, &stat, status);
if (TF_GetCode(status) != TF_OK && TF_GetCode(status) != TF_NOT_FOUND)
return false;
if (TF_GetCode(status) == TF_NOT_FOUND) {
TF_SetStatus(status, TF_OK, "");
return false;
}
return !stat.base.is_directory;
}
static bool BucketExists(GCSFile* gcs_file, const std::string& bucket,
TF_Status* status) {
auto metadata =
gcs_file->gcs_client.GetBucketMetadata(bucket, gcs::Fields(""));
TF_SetStatusFromGCSStatus(metadata.status(), status);
if (TF_GetCode(status) != TF_OK && TF_GetCode(status) != TF_NOT_FOUND)
return false;
if (TF_GetCode(status) == TF_NOT_FOUND) {
TF_SetStatus(status, TF_OK, "");
return false;
}
return true;
}
static std::vector<std::string> GetChildrenBounded(
GCSFile* gcs_file, std::string dir, uint64_t max_results, bool recursive,
bool include_self_directory_marker, TF_Status* status) {
std::string bucket, prefix;
MaybeAppendSlash(&dir);
ParseGCSPath(dir, true, &bucket, &prefix, status);
std::vector<std::string> result;
uint64_t count = 0;
std::string delimiter = recursive ? "" : "/";
for (auto&& item : gcs_file->gcs_client.ListObjectsAndPrefixes(
bucket, gcs::Prefix(prefix), gcs::Delimiter(delimiter),
gcs::Fields("items(name),prefixes"))) {
if (count == max_results) {
TF_SetStatus(status, TF_OK, "");
return result;
}
if (!item) {
TF_SetStatusFromGCSStatus(item.status(), status);
return result;
}
auto value = *std::move(item);
std::string children = std::holds_alternative<std::string>(value)
? std::get<std::string>(value)
: std::get<gcs::ObjectMetadata>(value).name();
auto pos = children.find(prefix);
if (pos != 0) {
TF_SetStatus(status, TF_INTERNAL,
absl::StrCat("Unexpected response: the returned file name ",
children, " doesn't match the prefix ", prefix)
.c_str());
return result;
}
children.erase(0, prefix.length());
if (!children.empty() || include_self_directory_marker) {
result.emplace_back(children);
}
++count;
}
return result;
}
static bool FolderExists(GCSFile* gcs_file, std::string dir,
TF_Status* status) {
ExpiringLRUCache<GcsFileStat>::ComputeFunc compute_func =
[gcs_file](const std::string& dir, GcsFileStat* stat, TF_Status* status) {
auto children =
GetChildrenBounded(gcs_file, dir, 1, true, true, status);
if (TF_GetCode(status) != TF_OK) return;
if (!children.empty()) {
stat->base = {0, 0, true};
return TF_SetStatus(status, TF_OK, "");
} else {
return TF_SetStatus(status, TF_INVALID_ARGUMENT, "Not a directory!");
}
};
GcsFileStat stat;
MaybeAppendSlash(&dir);
gcs_file->stat_cache->LookupOrCompute(dir, &stat, compute_func, status);
if (TF_GetCode(status) != TF_OK && TF_GetCode(status) != TF_INVALID_ARGUMENT)
return false;
if (TF_GetCode(status) == TF_INVALID_ARGUMENT) {
TF_SetStatus(status, TF_OK, "");
return false;
}
return true;
}
static void ClearFileCaches(GCSFile* gcs_file, const std::string& path) {
absl::ReaderMutexLock l(&gcs_file->block_cache_lock);
gcs_file->file_block_cache->RemoveFile(path);
gcs_file->stat_cache->Delete(path);
}
void PathExists(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
if (object.empty()) {
bool result = BucketExists(gcs_file, bucket, status);
if (result) return TF_SetStatus(status, TF_OK, "");
}
GcsFileStat stat;
StatForObject(gcs_file, path, bucket, object, &stat, status);
if (TF_GetCode(status) != TF_NOT_FOUND) return;
bool result = FolderExists(gcs_file, path, status);
if (TF_GetCode(status) != TF_OK || (TF_GetCode(status) == TF_OK && result))
return;
return TF_SetStatus(
status, TF_NOT_FOUND,
absl::StrCat("The path ", path, " does not exist.").c_str());
}
void CreateDir(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string dir = path;
MaybeAppendSlash(&dir);
TF_VLog(3,
"CreateDir: creating directory with path: %s and "
"path_with_slash: %s",
path, dir.c_str());
std::string bucket, object;
ParseGCSPath(dir, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
if (object.empty()) {
bool is_directory = BucketExists(gcs_file, bucket, status);
if (TF_GetCode(status) != TF_OK) return;
if (!is_directory)
TF_SetStatus(status, TF_NOT_FOUND,
absl::StrCat("The specified bucket ", dir, " was not found.")
.c_str());
return;
}
PathExists(filesystem, dir.c_str(), status);
if (TF_GetCode(status) == TF_OK) {
TF_VLog(3, "CreateDir: directory already exists, not uploading %s", path);
return TF_SetStatus(status, TF_ALREADY_EXISTS, path);
}
auto metadata = gcs_file->gcs_client.InsertObject(
bucket, object, "",
gcs::IfGenerationMatch(0), gcs::Fields(""));
TF_SetStatusFromGCSStatus(metadata.status(), status);
if (TF_GetCode(status) == TF_FAILED_PRECONDITION)
TF_SetStatus(status, TF_ALREADY_EXISTS, path);
}
void DeleteFile(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto gcs_status = gcs_file->gcs_client.DeleteObject(bucket, object);
TF_SetStatusFromGCSStatus(gcs_status, status);
if (TF_GetCode(status) == TF_OK) ClearFileCaches(gcs_file, path);
}
void DeleteDir(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto childrens = GetChildrenBounded(gcs_file, path, 2, true, true, status);
if (TF_GetCode(status) != TF_OK) return;
if (childrens.size() > 1 || (childrens.size() == 1 && !childrens[0].empty()))
return TF_SetStatus(status, TF_FAILED_PRECONDITION,
"Cannot delete a non-empty directory.");
if (childrens.size() == 1 && childrens[0].empty()) {
std::string dir = path;
MaybeAppendSlash(&dir);
DeleteFile(filesystem, dir.c_str(), status);
return;
}
TF_SetStatus(status, TF_OK, "");
}
void CopyFile(const TF_Filesystem* filesystem, const char* src, const char* dst,
TF_Status* status) {
std::string bucket_src, object_src;
ParseGCSPath(src, false, &bucket_src, &object_src, status);
if (TF_GetCode(status) != TF_OK) return;
std::string bucket_dst, object_dst;
ParseGCSPath(dst, false, &bucket_dst, &object_dst, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto metadata = gcs_file->gcs_client.RewriteObjectBlocking(
bucket_src, object_src, bucket_dst, object_dst,
gcs::Fields("done,rewriteToken"));
TF_SetStatusFromGCSStatus(metadata.status(), status);
}
bool IsDirectory(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return false;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
if (object.empty()) {
bool result = BucketExists(gcs_file, bucket, status);
if (TF_GetCode(status) != TF_OK) return false;
if (!result)
TF_SetStatus(
status, TF_NOT_FOUND,
absl::StrCat("The specified bucket gs:
.c_str());
return result;
}
bool is_folder = FolderExists(gcs_file, path, status);
if (TF_GetCode(status) != TF_OK) return false;
if (is_folder) return true;
bool is_object = ObjectExists(gcs_file, path, bucket, object, status);
if (TF_GetCode(status) != TF_OK) return false;
if (is_object) {
TF_SetStatus(
status, TF_FAILED_PRECONDITION,
absl::StrCat("The specified path ", path, " is not a directory.")
.c_str());
return false;
}
TF_SetStatus(status, TF_NOT_FOUND,
absl::StrCat("The path ", path, " does not exist.").c_str());
return false;
}
static void RenameObject(const TF_Filesystem* filesystem,
const std::string& src, const std::string& dst,
TF_Status* status) {
TF_VLog(3, "RenameObject: started %s to %s", src.c_str(), dst.c_str());
std::string bucket_src, object_src;
ParseGCSPath(src, false, &bucket_src, &object_src, status);
if (TF_GetCode(status) != TF_OK) return;
std::string bucket_dst, object_dst;
ParseGCSPath(dst, false, &bucket_dst, &object_dst, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto metadata = gcs_file->gcs_client.RewriteObjectBlocking(
bucket_src, object_src, bucket_dst, object_dst,
gcs::Fields("done,rewriteToken"));
TF_SetStatusFromGCSStatus(metadata.status(), status);
if (TF_GetCode(status) != TF_OK) return;
TF_VLog(3, "RenameObject: finished %s to %s", src.c_str(), dst.c_str());
ClearFileCaches(gcs_file, dst);
DeleteFile(filesystem, src.c_str(), status);
}
void RenameFile(const TF_Filesystem* filesystem, const char* src,
const char* dst, TF_Status* status) {
if (!IsDirectory(filesystem, src, status)) {
if (TF_GetCode(status) == TF_FAILED_PRECONDITION) {
TF_SetStatus(status, TF_OK, "");
RenameObject(filesystem, src, dst, status);
}
return;
}
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
std::vector<std::string> childrens =
GetChildrenBounded(gcs_file, src, UINT64_MAX, true, true, status);
if (TF_GetCode(status) != TF_OK) return;
std::string src_dir = src;
std::string dst_dir = dst;
MaybeAppendSlash(&src_dir);
MaybeAppendSlash(&dst_dir);
for (const std::string& children : childrens) {
RenameObject(filesystem, src_dir + children, dst_dir + children, status);
if (TF_GetCode(status) != TF_OK) return;
}
TF_SetStatus(status, TF_OK, "");
}
void DeleteRecursively(const TF_Filesystem* filesystem, const char* path,
uint64_t* undeleted_files, uint64_t* undeleted_dirs,
TF_Status* status) {
if (!undeleted_files || !undeleted_dirs)
return TF_SetStatus(
status, TF_INTERNAL,
"'undeleted_files' and 'undeleted_dirs' cannot be nullptr.");
*undeleted_files = 0;
*undeleted_dirs = 0;
if (!IsDirectory(filesystem, path, status)) {
*undeleted_dirs = 1;
return;
}
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
std::vector<std::string> childrens =
GetChildrenBounded(gcs_file, path, UINT64_MAX, true, true, status);
if (TF_GetCode(status) != TF_OK) return;
std::string dir = path;
MaybeAppendSlash(&dir);
for (const std::string& children : childrens) {
const std::string& full_path = dir + children;
DeleteFile(filesystem, full_path.c_str(), status);
if (TF_GetCode(status) != TF_OK) {
if (IsDirectory(filesystem, full_path.c_str(), status))
(*undeleted_dirs)++;
else
(*undeleted_files)++;
}
}
}
int GetChildren(const TF_Filesystem* filesystem, const char* path,
char*** entries, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
std::vector<std::string> childrens =
GetChildrenBounded(gcs_file, path, UINT64_MAX, false, false, status);
if (TF_GetCode(status) != TF_OK) return -1;
int num_entries = childrens.size();
*entries = static_cast<char**>(
plugin_memory_allocate(num_entries * sizeof((*entries)[0])));
for (int i = 0; i < num_entries; i++)
(*entries)[i] = strdup(childrens[i].c_str());
TF_SetStatus(status, TF_OK, "");
return num_entries;
}
void Stat(const TF_Filesystem* filesystem, const char* path,
TF_FileStatistics* stats, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
if (object.empty()) {
auto bucket_metadata =
gcs_file->gcs_client.GetBucketMetadata(bucket, gcs::Fields(""));
TF_SetStatusFromGCSStatus(bucket_metadata.status(), status);
if (TF_GetCode(status) == TF_OK) {
stats->is_directory = true;
stats->length = 0;
stats->mtime_nsec = 0;
}
return;
}
if (IsDirectory(filesystem, path, status)) {
stats->is_directory = true;
stats->length = 0;
stats->mtime_nsec = 0;
return TF_SetStatus(status, TF_OK, "");
}
if (TF_GetCode(status) == TF_FAILED_PRECONDITION) {
auto metadata = gcs_file->gcs_client.GetObjectMetadata(
bucket, object, gcs::Fields("size,timeStorageClassUpdated"));
if (metadata) {
stats->is_directory = false;
stats->length = metadata.value().size();
stats->mtime_nsec = metadata.value()
.time_storage_class_updated()
.time_since_epoch()
.count();
}
TF_SetStatusFromGCSStatus(metadata.status(), status);
}
}
int64_t GetFileSize(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return -1;
TF_FileStatistics stat;
Stat(filesystem, path, &stat, status);
return stat.length;
}
static char* TranslateName(const TF_Filesystem* filesystem, const char* uri) {
return strdup(uri);
}
static void FlushCaches(const TF_Filesystem* filesystem) {
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
absl::ReaderMutexLock l(&gcs_file->block_cache_lock);
gcs_file->file_block_cache->Flush();
gcs_file->stat_cache->Clear();
}
}
static void ProvideFilesystemSupportFor(TF_FilesystemPluginOps* ops,
const char* uri) {
TF_SetFilesystemVersionMetadata(ops);
ops->scheme = strdup(uri);
ops->random_access_file_ops = static_cast<TF_RandomAccessFileOps*>(
plugin_memory_allocate(TF_RANDOM_ACCESS_FILE_OPS_SIZE));
ops->random_access_file_ops->cleanup = tf_random_access_file::Cleanup;
ops->random_access_file_ops->read = tf_random_access_file::Read;
ops->writable_file_ops = static_cast<TF_WritableFileOps*>(
plugin_memory_allocate(TF_WRITABLE_FILE_OPS_SIZE));
ops->writable_file_ops->cleanup = tf_writable_file::Cleanup;
ops->read_only_memory_region_ops = static_cast<TF_ReadOnlyMemoryRegionOps*>(
plugin_memory_allocate(TF_READ_ONLY_MEMORY_REGION_OPS_SIZE));
ops->read_only_memory_region_ops->cleanup =
tf_read_only_memory_region::Cleanup;
ops->read_only_memory_region_ops->data = tf_read_only_memory_region::Data;
ops->read_only_memory_region_ops->length = tf_read_only_memory_region::Length;
ops->filesystem_ops = static_cast<TF_FilesystemOps*>(
plugin_memory_allocate(TF_FILESYSTEM_OPS_SIZE));
ops->filesystem_ops->init = tf_gcs_filesystem::Init;
ops->filesystem_ops->cleanup = tf_gcs_filesystem::Cleanup;
ops->filesystem_ops->new_random_access_file =
tf_gcs_filesystem::NewRandomAccessFile;
ops->filesystem_ops->new_writable_file = tf_gcs_filesystem::NewWritableFile;
ops->filesystem_ops->new_appendable_file =
tf_gcs_filesystem::NewAppendableFile;
ops->filesystem_ops->new_read_only_memory_region_from_file =
tf_gcs_filesystem::NewReadOnlyMemoryRegionFromFile;
ops->filesystem_ops->create_dir = tf_gcs_filesystem::CreateDir;
ops->filesystem_ops->delete_file = tf_gcs_filesystem::DeleteFile;
ops->filesystem_ops->delete_dir = tf_gcs_filesystem::DeleteDir;
ops->filesystem_ops->delete_recursively =
tf_gcs_filesystem::DeleteRecursively;
ops->filesystem_ops->copy_file = tf_gcs_filesystem::CopyFile;
ops->filesystem_ops->path_exists = tf_gcs_filesystem::PathExists;
ops->filesystem_ops->is_directory = tf_gcs_filesystem::IsDirectory;
ops->filesystem_ops->stat = tf_gcs_filesystem::Stat;
ops->filesystem_ops->get_children = tf_gcs_filesystem::GetChildren;
ops->filesystem_ops->translate_name = tf_gcs_filesystem::TranslateName;
ops->filesystem_ops->flush_caches = tf_gcs_filesystem::FlushCaches;
}
void TF_InitPlugin(TF_FilesystemPluginInfo* info) {
info->plugin_memory_allocate = plugin_memory_allocate;
info->plugin_memory_free = plugin_memory_free;
info->num_schemes = 1;
info->ops = static_cast<TF_FilesystemPluginOps*>(
plugin_memory_allocate(info->num_schemes * sizeof(info->ops[0])));
ProvideFilesystemSupportFor(&info->ops[0], "gs");
} | #include "tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.h"
#include <random>
#include "absl/strings/string_view.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/test.h"
#define ASSERT_TF_OK(x) ASSERT_EQ(TF_OK, TF_GetCode(x)) << TF_Message(x)
#define EXPECT_TF_OK(x) EXPECT_EQ(TF_OK, TF_GetCode(x)) << TF_Message(x)
static const char* content = "abcdefghijklmnopqrstuvwxyz1234567890";
static const absl::string_view content_view = content;
namespace gcs = google::cloud::storage;
static std::string InitializeTmpDir() {
const char* test_dir = getenv("GCS_TEST_TMPDIR");
if (test_dir != nullptr) {
std::string bucket, object;
TF_Status* status = TF_NewStatus();
ParseGCSPath(test_dir, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) {
TF_DeleteStatus(status);
return "";
}
TF_DeleteStatus(status);
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> distribution;
std::string rng_val = std::to_string(distribution(gen));
return tensorflow::io::JoinPath(std::string(test_dir), rng_val);
} else {
return "";
}
}
static std::string* GetTmpDir() {
static std::string tmp_dir = InitializeTmpDir();
if (tmp_dir == "")
return nullptr;
else
return &tmp_dir;
}
namespace tensorflow {
namespace {
class GCSFilesystemTest : public ::testing::Test {
public:
void SetUp() override {
root_dir_ = io::JoinPath(
*GetTmpDir(),
::testing::UnitTest::GetInstance()->current_test_info()->name());
status_ = TF_NewStatus();
filesystem_ = new TF_Filesystem;
filesystem_->plugin_filesystem = nullptr;
}
void TearDown() override {
TF_DeleteStatus(status_);
if (filesystem_->plugin_filesystem != nullptr)
tf_gcs_filesystem::Cleanup(filesystem_);
delete filesystem_;
}
std::string GetURIForPath(absl::string_view path) {
const std::string translated_name =
tensorflow::io::JoinPath(root_dir_, path);
return translated_name;
}
std::unique_ptr<TF_WritableFile, void (*)(TF_WritableFile* file)>
GetWriter() {
std::unique_ptr<TF_WritableFile, void (*)(TF_WritableFile * file)> writer(
new TF_WritableFile, [](TF_WritableFile* file) {
if (file != nullptr) {
if (file->plugin_file != nullptr) tf_writable_file::Cleanup(file);
delete file;
}
});
writer->plugin_file = nullptr;
return writer;
}
std::unique_ptr<TF_RandomAccessFile, void (*)(TF_RandomAccessFile* file)>
GetReader() {
std::unique_ptr<TF_RandomAccessFile, void (*)(TF_RandomAccessFile * file)>
reader(new TF_RandomAccessFile, [](TF_RandomAccessFile* file) {
if (file != nullptr) {
if (file->plugin_file != nullptr)
tf_random_access_file::Cleanup(file);
delete file;
}
});
reader->plugin_file = nullptr;
return reader;
}
void WriteString(const std::string& path, const std::string& content) {
auto writer = GetWriter();
tf_gcs_filesystem::NewWritableFile(filesystem_, path.c_str(), writer.get(),
status_);
if (TF_GetCode(status_) != TF_OK) return;
tf_writable_file::Append(writer.get(), content.c_str(), content.length(),
status_);
if (TF_GetCode(status_) != TF_OK) return;
tf_writable_file::Close(writer.get(), status_);
if (TF_GetCode(status_) != TF_OK) return;
}
std::string ReadAll(const std::string& path) {
auto reader = GetReader();
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(),
reader.get(), status_);
if (TF_GetCode(status_) != TF_OK) return "";
auto file_size =
tf_gcs_filesystem::GetFileSize(filesystem_, path.c_str(), status_);
if (TF_GetCode(status_) != TF_OK) return "";
std::string content;
content.resize(file_size);
auto read = tf_random_access_file::Read(reader.get(), 0, file_size,
&content[0], status_);
if (TF_GetCode(status_) != TF_OK) return "";
if (read >= 0) content.resize(read);
if (file_size != content.size())
TF_SetStatus(
status_, TF_DATA_LOSS,
std::string("expected " + std::to_string(file_size) + " got " +
std::to_string(content.size()) + " bytes")
.c_str());
return content;
}
protected:
TF_Filesystem* filesystem_;
TF_Status* status_;
private:
std::string root_dir_;
};
::testing::AssertionResult WriteToServer(const std::string& path, size_t offset,
size_t length, gcs::Client* gcs_client,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK)
return ::testing::AssertionFailure() << TF_Message(status);
auto writer = gcs_client->WriteObject(bucket, object);
writer.write(content + offset, length);
writer.Close();
if (writer.metadata()) {
return ::testing::AssertionSuccess();
} else {
return ::testing::AssertionFailure()
<< writer.metadata().status().message();
}
}
::testing::AssertionResult InsertObject(const std::string& path,
const std::string& content,
gcs::Client* gcs_client,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK)
return ::testing::AssertionFailure() << TF_Message(status);
auto metadata = gcs_client->InsertObject(bucket, object, content);
if (metadata)
return ::testing::AssertionSuccess();
else
return ::testing::AssertionFailure() << metadata.status().message();
}
::testing::AssertionResult CompareSubString(int64_t offset, size_t length,
absl::string_view result,
size_t read) {
if (length == read && content_view.substr(offset, length) ==
absl::string_view(result).substr(0, read))
return ::testing::AssertionSuccess();
else
return ::testing::AssertionFailure()
<< "Result: " << absl::string_view(result).substr(0, read)
<< " Read: " << read;
}
::testing::AssertionResult CompareWithServer(const std::string& path,
size_t offset, size_t length,
gcs::Client* gcs_client,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK)
return ::testing::AssertionFailure() << TF_Message(status);
auto reader = gcs_client->ReadObject(bucket, object);
if (!reader) {
return ::testing::AssertionFailure() << reader.status().message();
} else {
std::string content{std::istreambuf_iterator<char>{reader}, {}};
return CompareSubString(offset, length, content, content.length());
}
}
TEST_F(GCSFilesystemTest, ParseGCSPath) {
std::string bucket, object;
ParseGCSPath("gs:
ASSERT_TF_OK(status_);
ASSERT_EQ(bucket, "bucket");
ASSERT_EQ(object, "path/to/object");
ParseGCSPath("gs:
ASSERT_TF_OK(status_);
ASSERT_EQ(bucket, "bucket");
ParseGCSPath("bucket/path/to/object", false, &bucket, &object, status_);
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT);
ParseGCSPath("gs:
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT);
ParseGCSPath("gs:
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT);
}
TEST_F(GCSFilesystemTest, RandomAccessFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string filepath = GetURIForPath("a_file");
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, filepath.c_str(), file,
status_);
ASSERT_TF_OK(status_);
char* result = new char[content_view.length()];
int64_t read = tf_random_access_file::Read(file, 0, 1, result, status_);
ASSERT_EQ(read, -1) << "Read: " << read;
ASSERT_EQ(TF_GetCode(status_), TF_NOT_FOUND) << TF_Message(status_);
TF_SetStatus(status_, TF_OK, "");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(WriteToServer(filepath, 0, content_view.length(),
&gcs_file->gcs_client, status_));
read = tf_random_access_file::Read(file, 0, content_view.length(), result,
status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(CompareSubString(0, content_view.length(), result, read));
read = tf_random_access_file::Read(file, 0, 4, result, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(CompareSubString(0, 4, result, read));
read = tf_random_access_file::Read(file, content_view.length() - 2, 4, result,
status_);
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
ASSERT_TRUE(CompareSubString(content_view.length() - 2, 2, result, read));
delete[] result;
tf_random_access_file::Cleanup(file);
delete file;
}
TEST_F(GCSFilesystemTest, WritableFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string filepath = GetURIForPath("a_file");
TF_WritableFile* file = new TF_WritableFile;
tf_gcs_filesystem::NewWritableFile(filesystem_, filepath.c_str(), file,
status_);
ASSERT_TF_OK(status_);
tf_writable_file::Append(file, content, 4, status_);
ASSERT_TF_OK(status_);
auto length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 4);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 4, &gcs_file->gcs_client, status_));
tf_writable_file::Append(file, content + 4, 4, status_);
ASSERT_TF_OK(status_);
length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 8);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 8, &gcs_file->gcs_client, status_));
tf_writable_file::Close(file, status_);
ASSERT_TF_OK(status_);
tf_writable_file::Cleanup(file);
gcs_file->compose = true;
filepath = GetURIForPath("b_file");
tf_gcs_filesystem::NewWritableFile(filesystem_, filepath.c_str(), file,
status_);
ASSERT_TF_OK(status_);
tf_writable_file::Append(file, content, 4, status_);
ASSERT_TF_OK(status_);
length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 4);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 4, &gcs_file->gcs_client, status_));
tf_writable_file::Append(file, content + 4, 4, status_);
ASSERT_TF_OK(status_);
length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 8);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 8, &gcs_file->gcs_client, status_));
tf_writable_file::Close(file, status_);
ASSERT_TF_OK(status_);
tf_writable_file::Cleanup(file);
delete file;
}
TEST_F(GCSFilesystemTest, ReadOnlyMemoryRegion) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(WriteToServer(path, 0, 0, &gcs_file->gcs_client, status_));
TF_ReadOnlyMemoryRegion* region = new TF_ReadOnlyMemoryRegion;
tf_gcs_filesystem::NewReadOnlyMemoryRegionFromFile(filesystem_, path.c_str(),
region, status_);
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT) << TF_Message(status_);
TF_SetStatus(status_, TF_OK, "");
ASSERT_TRUE(WriteToServer(path, 0, content_view.length(),
&gcs_file->gcs_client, status_));
tf_gcs_filesystem::NewReadOnlyMemoryRegionFromFile(filesystem_, path.c_str(),
region, status_);
ASSERT_TF_OK(status_);
auto length = tf_read_only_memory_region::Length(region);
ASSERT_EQ(length, content_view.length());
auto data =
static_cast<const char*>(tf_read_only_memory_region::Data(region));
ASSERT_TRUE(CompareSubString(0, content_view.length(), data, length));
tf_read_only_memory_region::Cleanup(region);
delete region;
}
TEST_F(GCSFilesystemTest, PathExists) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string path = GetURIForPath("PathExists");
tf_gcs_filesystem::PathExists(filesystem_, path.c_str(), status_);
EXPECT_EQ(TF_NOT_FOUND, TF_GetCode(status_)) << TF_Message(status_);
TF_SetStatus(status_, TF_OK, "");
WriteString(path, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::PathExists(filesystem_, path.c_str(), status_);
EXPECT_TF_OK(status_);
}
TEST_F(GCSFilesystemTest, GetChildren) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string base = GetURIForPath("GetChildren");
tf_gcs_filesystem::CreateDir(filesystem_, base.c_str(), status_);
EXPECT_TF_OK(status_);
const std::string file = io::JoinPath(base, "TestFile.csv");
WriteString(file, "test");
EXPECT_TF_OK(status_);
const std::string subdir = io::JoinPath(base, "SubDir");
tf_gcs_filesystem::CreateDir(filesystem_, subdir.c_str(), status_);
EXPECT_TF_OK(status_);
const std::string subfile = io::JoinPath(subdir, "TestSubFile.csv");
WriteString(subfile, "test");
EXPECT_TF_OK(status_);
char** entries;
auto num_entries = tf_gcs_filesystem::GetChildren(filesystem_, base.c_str(),
&entries, status_);
EXPECT_TF_OK(status_);
std::vector<std::string> childrens;
for (int i = 0; i < num_entries; ++i) {
childrens.push_back(entries[i]);
}
std::sort(childrens.begin(), childrens.end());
EXPECT_EQ(std::vector<string>({"SubDir/", "TestFile.csv"}), childrens);
}
TEST_F(GCSFilesystemTest, DeleteFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string path = GetURIForPath("DeleteFile");
WriteString(path, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::DeleteFile(filesystem_, path.c_str(), status_);
EXPECT_TF_OK(status_);
tf_gcs_filesystem::PathExists(filesystem_, path.c_str(), status_);
EXPECT_EQ(TF_GetCode(status_), TF_NOT_FOUND);
}
TEST_F(GCSFilesystemTest, CreateDir) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string dir = GetURIForPath("CreateDir");
tf_gcs_filesystem::CreateDir(filesystem_, dir.c_str(), status_);
EXPECT_TF_OK(status_);
TF_FileStatistics stat;
tf_gcs_filesystem::Stat(filesystem_, dir.c_str(), &stat, status_);
EXPECT_TF_OK(status_);
EXPECT_TRUE(stat.is_directory);
}
TEST_F(GCSFilesystemTest, DeleteDir) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string dir = GetURIForPath("DeleteDir");
const std::string file = io::JoinPath(dir, "DeleteDirFile.csv");
WriteString(file, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::DeleteDir(filesystem_, dir.c_str(), status_);
EXPECT_EQ(TF_GetCode(status_), TF_FAILED_PRECONDITION);
TF_SetStatus(status_, TF_OK, "");
tf_gcs_filesystem::DeleteFile(filesystem_, file.c_str(), status_);
EXPECT_TF_OK(status_);
tf_gcs_filesystem::DeleteDir(filesystem_, dir.c_str(), status_);
EXPECT_TF_OK(status_);
TF_FileStatistics stat;
tf_gcs_filesystem::Stat(filesystem_, dir.c_str(), &stat, status_);
EXPECT_EQ(TF_GetCode(status_), TF_NOT_FOUND) << TF_Message(status_);
}
TEST_F(GCSFilesystemTest, StatFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string path = GetURIForPath("StatFile");
WriteString(path, "test");
ASSERT_TF_OK(status_);
TF_FileStatistics stat;
tf_gcs_filesystem::Stat(filesystem_, path.c_str(), &stat, status_);
EXPECT_TF_OK(status_);
EXPECT_EQ(4, stat.length);
EXPECT_FALSE(stat.is_directory);
}
TEST_F(GCSFilesystemTest, RenameFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string src = GetURIForPath("RenameFileSrc");
const std::string dst = GetURIForPath("RenameFileDst");
WriteString(src, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::RenameFile(filesystem_, src.c_str(), dst.c_str(), status_);
EXPECT_TF_OK(status_);
auto result = ReadAll(dst);
EXPECT_TF_OK(status_);
EXPECT_EQ("test", result);
}
TEST_F(GCSFilesystemTest, RenameFileOverwrite) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string src = GetURIForPath("RenameFileOverwriteSrc");
const std::string dst = GetURIForPath("RenameFileOverwriteDst");
WriteString(src, "test_old");
ASSERT_TF_OK(status_);
WriteString(dst, "test_new");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::PathExists(filesystem_, dst.c_str(), status_);
EXPECT_TF_OK(status_);
tf_gcs_filesystem::RenameFile(filesystem_, src.c_str(), dst.c_str(), status_);
EXPECT_TF_OK(status_);
auto result = ReadAll(dst);
EXPECT_TF_OK(status_);
EXPECT_EQ("test_old", result);
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_NoBlockCache) {
tf_gcs_filesystem::InitTest(filesystem_, false, 0, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "0123456789", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(6);
int64_t read = tf_random_access_file::Read(file, 0, 6, &result[0], status_);
ASSERT_EQ(read, 6) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "012345") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 6, 6, &result[0], status_);
ASSERT_EQ(read, 4) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "6789") << "Result: " << result << "\n";
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_Buffered) {
tf_gcs_filesystem::InitTest(filesystem_, false, 10, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "0123456789", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(6);
int64_t read = tf_random_access_file::Read(file, 0, 6, &result[0], status_);
ASSERT_EQ(read, 6) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "012345") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 6, 6, &result[0], status_);
ASSERT_EQ(read, 4) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "6789") << "Result: " << result << "\n";
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_Buffered_ReadAtEOF) {
tf_gcs_filesystem::InitTest(filesystem_, false, 10, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "0123456789", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(10);
int64_t read = tf_random_access_file::Read(file, 0, result.length(),
&result[0], status_);
ASSERT_EQ(read, 10) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "0123456789") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, result.length(), result.length(),
&result[0], status_);
ASSERT_EQ(read, 0) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "") << "Result: " << result << "\n";
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_Buffered_CachedOutOfRange) {
tf_gcs_filesystem::InitTest(filesystem_, false, 10, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "012345678", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(5);
int64_t read = tf_random_access_file::Read(file, 0, result.length(),
&result[0], status_);
ASSERT_EQ(read, 5) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "01234") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 4, result.length(), &result[0],
status_);
ASSERT_EQ(read, 5) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
result.resize(read);
ASSERT_EQ(result, "45678") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 5, result.length(), &result[0],
status_);
ASSERT_EQ(read, 4) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "5678") << "Result: " << result << "\n";
}
}
}
GTEST_API_ int main(int argc, char** argv) {
tensorflow::testing::InstallStacktraceHandler();
if (!GetTmpDir()) {
std::cerr << "Could not read GCS_TEST_TMPDIR env";
return -1;
}
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c98917b6-8532-4f36-95e6-51113e57d6e4 | cpp | tensorflow/tensorflow | grappler | tensorflow/c/experimental/grappler/grappler.cc | tensorflow/c/experimental/grappler/grappler_test.cc | #include "tensorflow/c/experimental/grappler/grappler.h"
#include <algorithm>
#include <cstddef>
#include <cstring>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/experimental/grappler/grappler_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
namespace {
#define VALIDATE_STRUCT_SIZE(STRUCT_NAME, STRUCT_OBJ, SIZE_VALUE_NAME) \
do { \
if (STRUCT_OBJ.struct_size == 0) { \
return absl::Status(absl::StatusCode::kFailedPrecondition, \
"struct_size field in " #STRUCT_NAME \
" must be set to " #SIZE_VALUE_NAME "."); \
} \
} while (0)
#define VALIDATE_MEMBER(STRUCT_NAME, STRUCT_OBJ, NAME) \
do { \
if (STRUCT_OBJ.NAME == 0) { \
return absl::Status(absl::StatusCode::kFailedPrecondition, \
"'" #NAME "' field in " #STRUCT_NAME \
" must be set."); \
} \
} while (0)
absl::Status ValidateTPOptimizerRegistrationParams(
const TP_OptimizerRegistrationParams& params) {
VALIDATE_STRUCT_SIZE(TP_OptimizerRegistrationParams, params,
TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE);
VALIDATE_MEMBER(TP_OptimizerRegistrationParams, params, device_type);
return absl::OkStatus();
}
absl::Status ValidateTPOptimizer(const TP_Optimizer& optimizer) {
VALIDATE_STRUCT_SIZE(TP_Optimizer, optimizer, TP_OPTIMIZER_STRUCT_SIZE);
VALIDATE_MEMBER(TP_Optimizer, optimizer, optimize_func);
return absl::OkStatus();
}
absl::Status ValidateTPOptimizerConfigs(const TP_OptimizerConfigs& configs) {
VALIDATE_STRUCT_SIZE(TP_OptimizerConfigs, configs,
TP_OPTIMIZER_CONFIGS_STRUCT_SIZE);
return absl::OkStatus();
}
#undef VALIDATE_MEMBER
#undef VALIDATE_STRUCT_SIZE
}
namespace tensorflow {
namespace grappler {
Status CGraphOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph_def) {
OwnedTFStatus c_status(TF_NewStatus());
OwnedTFBuffer graph_buf(TF_NewBuffer());
OwnedTFBuffer optimized_graph_buf(TF_NewBuffer());
TF_RETURN_IF_ERROR(MessageToBuffer(item.graph, graph_buf.get()));
optimizer_.optimize_func(c_optimizer_, graph_buf.get(),
reinterpret_cast<const TF_GrapplerItem*>(&item),
optimized_graph_buf.get(), c_status.get());
TF_RETURN_IF_ERROR(tsl::StatusFromTF_Status(c_status.get()));
TF_RETURN_IF_ERROR(
BufferToMessage(optimized_graph_buf.get(), optimized_graph_def));
return absl::OkStatus();
}
#define CONFIG_TOGGLE(optimizer) \
if (tp_configs.optimizer == TF_TriState_Off) \
configs.toggle_config[#optimizer] = RewriterConfig::OFF; \
else \
configs.toggle_config[#optimizer] = RewriterConfig::ON;
void CGraphOptimizerRegister(
const PluginGraphOptimizerRegistry::Creator& creator,
const TP_OptimizerConfigs tp_configs, const char* device_type) {
ConfigList configs;
if (tp_configs.disable_model_pruning == TF_TriState_On)
configs.disable_model_pruning = true;
else
configs.disable_model_pruning = false;
CONFIG_TOGGLE(implementation_selector);
CONFIG_TOGGLE(function_optimization);
CONFIG_TOGGLE(common_subgraph_elimination);
CONFIG_TOGGLE(arithmetic_optimization);
CONFIG_TOGGLE(debug_stripper);
CONFIG_TOGGLE(constant_folding);
CONFIG_TOGGLE(shape_optimization);
CONFIG_TOGGLE(auto_mixed_precision);
CONFIG_TOGGLE(auto_mixed_precision_onednn_bfloat16);
CONFIG_TOGGLE(auto_mixed_precision_mkl);
CONFIG_TOGGLE(pin_to_host_optimization);
CONFIG_TOGGLE(layout_optimizer);
CONFIG_TOGGLE(remapping);
CONFIG_TOGGLE(loop_optimization);
CONFIG_TOGGLE(dependency_optimization);
CONFIG_TOGGLE(auto_parallel);
CONFIG_TOGGLE(memory_optimization);
CONFIG_TOGGLE(scoped_allocator_optimization);
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(
creator, device_type, configs);
}
#undef CONFIG_TOGGLE
absl::Status InitGraphPlugin(void* dso_handle) {
tsl::Env* env = tsl::Env::Default();
void* dso_symbol;
TF_RETURN_IF_ERROR(
env->GetSymbolFromLibrary(dso_handle, "TF_InitGraph", &dso_symbol));
auto init_fn = reinterpret_cast<TFInitGraphPluginFn>(dso_symbol);
return InitGraphPlugin(init_fn);
}
absl::Status InitGraphPlugin(TFInitGraphPluginFn init_fn) {
TP_OptimizerRegistrationParams params{
TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE};
TP_Optimizer optimizer{TP_OPTIMIZER_STRUCT_SIZE};
TP_OptimizerConfigs optimizer_configs{TP_OPTIMIZER_CONFIGS_STRUCT_SIZE};
params.major_version = GO_MAJOR;
params.minor_version = GO_MINOR;
params.patch_version = GO_PATCH;
params.optimizer = &optimizer;
params.optimizer_configs = &optimizer_configs;
OwnedTFStatus c_status(TF_NewStatus());
init_fn(¶ms, c_status.get());
TF_RETURN_IF_ERROR(tsl::StatusFromTF_Status(c_status.get()));
TF_RETURN_IF_ERROR(ValidateTPOptimizerRegistrationParams(params));
TF_RETURN_IF_ERROR(ValidateTPOptimizer(optimizer));
TF_RETURN_IF_ERROR(ValidateTPOptimizerConfigs(optimizer_configs));
CGraphOptimizerRegister(
[=]() { return new CGraphOptimizer(optimizer, params.device_type); },
optimizer_configs, params.device_type);
return absl::OkStatus();
}
}
}
void TF_GetNodesToPreserveListSize(const TF_GrapplerItem* item, int* num_values,
size_t* storage_size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::unordered_set<std::string>& nodes =
reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)
->NodesToPreserve();
*num_values = nodes.size();
*storage_size = 0;
for (const std::string& str : nodes) {
*storage_size += str.size();
}
}
void TF_GetNodesToPreserveList(const TF_GrapplerItem* item, char** values,
size_t* lengths, int num_values, void* storage,
size_t storage_size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::unordered_set<std::string>& nodes =
reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)
->NodesToPreserve();
char* p = static_cast<char*>(storage);
int index = 0;
for (const std::string& s : nodes) {
if (index >= num_values) break;
values[index] = p;
lengths[index] = s.size();
if ((p + s.size()) > (static_cast<char*>(storage) + storage_size)) {
tsl::Set_TF_Status_from_Status(
status,
absl::InvalidArgumentError(
"Not enough storage to hold the requested list of nodes"));
return;
}
memcpy(values[index], s.data(), s.size());
p += s.size();
index++;
}
}
void TF_GetFetchNodesListSize(const TF_GrapplerItem* item, int* num_values,
size_t* storage_size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::vector<std::string>& nodes =
reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)->fetch;
*num_values = nodes.size();
*storage_size = 0;
for (const std::string& str : nodes) {
*storage_size += str.size();
}
}
void TF_GetFetchNodesList(const TF_GrapplerItem* item, char** values,
size_t* lengths, int num_values, void* storage,
size_t storage_size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::vector<std::string>& nodes =
reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)->fetch;
const int len = std::min(num_values, static_cast<int>(nodes.size()));
char* p = static_cast<char*>(storage);
for (int index = 0; index < len; ++index) {
const std::string& s = nodes[index];
values[index] = p;
lengths[index] = s.size();
if ((p + s.size()) > (static_cast<char*>(storage) + storage_size)) {
tsl::Set_TF_Status_from_Status(
status,
absl::InvalidArgumentError(
"Not enough storage to hold the requested list of nodes"));
return;
}
memcpy(values[index], s.data(), s.size());
p += s.size();
}
}
TF_GraphProperties* TF_NewGraphProperties(const TF_GrapplerItem* item) {
return reinterpret_cast<TF_GraphProperties*>(
new tensorflow::grappler::GraphProperties(
*reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)));
}
void TF_DeleteGraphProperties(TF_GraphProperties* graph_properties) {
if (graph_properties == nullptr) return;
delete reinterpret_cast<tensorflow::grappler::GraphProperties*>(
graph_properties);
}
void TF_InferStatically(TF_GraphProperties* graph_properties,
TF_Bool assume_valid_feeds,
TF_Bool aggressive_shape_inference,
TF_Bool include_input_tensor_values,
TF_Bool include_output_tensor_values,
TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
absl::Status s =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->InferStatically(assume_valid_feeds, aggressive_shape_inference,
include_input_tensor_values,
include_output_tensor_values);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
}
}
void TF_GetInputPropertiesListSize(TF_GraphProperties* graph_properties,
const char* name, int* num_values,
TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
*num_values =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->GetInputProperties(name)
.size();
}
void TF_GetOutputPropertiesListSize(TF_GraphProperties* graph_properties,
const char* name, int* num_values,
TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
*num_values =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->GetOutputProperties(name)
.size();
}
void TF_GetInputPropertiesList(TF_GraphProperties* graph_properties,
const char* name, TF_Buffer** properties,
int num_values, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::vector<tensorflow::OpInfo::TensorProperties>& tensor_properties =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->GetInputProperties(name);
const int len =
std::min(num_values, static_cast<int>(tensor_properties.size()));
for (int i = 0; i < len; ++i) {
absl::Status s =
tensorflow::MessageToBuffer(tensor_properties[i], properties[i]);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return;
}
}
}
void TF_GetOutputPropertiesList(TF_GraphProperties* graph_properties,
const char* name, TF_Buffer** properties,
int num_values, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::vector<tensorflow::OpInfo::TensorProperties>& tensor_properties =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->GetOutputProperties(name);
const int len =
std::min(num_values, static_cast<int>(tensor_properties.size()));
for (int i = 0; i < len; ++i) {
absl::Status s =
tensorflow::MessageToBuffer(tensor_properties[i], properties[i]);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return;
}
}
}
TF_FunctionLibraryDefinition* TF_NewFunctionLibraryDefinition(
const TF_Buffer* graph_buf, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
tensorflow::GraphDef graph_def;
absl::Status s = tensorflow::BufferToMessage(graph_buf, &graph_def);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return nullptr;
}
return reinterpret_cast<TF_FunctionLibraryDefinition*>(
new tensorflow::FunctionLibraryDefinition(
tensorflow::OpRegistry::Global(), graph_def.library()));
}
void TF_DeleteFunctionLibraryDefinition(TF_FunctionLibraryDefinition* fn_lib) {
if (fn_lib == nullptr) return;
delete reinterpret_cast<tensorflow::FunctionLibraryDefinition*>(fn_lib);
}
void TF_LookUpOpDef(TF_FunctionLibraryDefinition* fn_lib, const char* name,
TF_Buffer* buf, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const tensorflow::OpDef* op_def_ptr = nullptr;
absl::Status s =
reinterpret_cast<tensorflow::FunctionLibraryDefinition*>(fn_lib)
->LookUpOpDef(name, &op_def_ptr);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return;
}
s = tensorflow::MessageToBuffer(*op_def_ptr, buf);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return;
}
} | #include "tensorflow/c/experimental/grappler/grappler.h"
#include "absl/log/check.h"
#include "tensorflow/c/experimental/grappler/grappler_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/c/tf_status.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
void optimize_func(void* optimizer, const TF_Buffer* graph_buf,
const TF_GrapplerItem* item, TF_Buffer* optimized_graph_buf,
TF_Status* tf_status) {}
void PopulateDefaultParam(TP_OptimizerRegistrationParams* params) {
params->struct_size = TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE;
params->optimizer_configs->struct_size = TP_OPTIMIZER_CONFIGS_STRUCT_SIZE;
params->optimizer->struct_size = TP_OPTIMIZER_STRUCT_SIZE;
params->optimizer->create_func = nullptr;
params->optimizer->optimize_func = optimize_func;
params->optimizer->destroy_func = nullptr;
}
TEST(Grappler, SuccessfulRegistration) {
auto plugin_init = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "Success";
params->optimizer_configs->remapping = TF_TriState_Off;
};
TF_ASSERT_OK(InitGraphPlugin(plugin_init));
ASSERT_EQ(PluginGraphOptimizerRegistry::CreateOptimizers(
std::set<string>{"Success"})
.size(),
1);
ConfigList config = PluginGraphOptimizerRegistry::GetPluginConfigs(
true, std::set<string>{"Success"});
ASSERT_EQ(config.toggle_config["remapping"], RewriterConfig::OFF);
}
TEST(Grappler, MultiplePluginRegistration) {
auto plugin_init_0 = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "Device0";
};
auto plugin_init_1 = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "Device1";
};
TF_ASSERT_OK(InitGraphPlugin(plugin_init_0));
TF_ASSERT_OK(InitGraphPlugin(plugin_init_1));
ASSERT_EQ(PluginGraphOptimizerRegistry::CreateOptimizers(
std::set<string>{"Device0", "Device1"})
.size(),
2);
}
TEST(Grappler, DeviceTypeNotSet) {
auto plugin_init = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = nullptr;
};
tensorflow::Status status = InitGraphPlugin(plugin_init);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(
status.message(),
"'device_type' field in TP_OptimizerRegistrationParams must be set.");
}
TEST(Grappler, OptimizeFuncNotSet) {
auto plugin_init = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "FuncNotSet";
params->optimizer->optimize_func = nullptr;
};
tensorflow::Status status = InitGraphPlugin(plugin_init);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(status.message(),
"'optimize_func' field in TP_Optimizer must be set.");
}
TEST(TF_GrapplerItem, NodesToPreserve) {
GrapplerItem item;
item.fetch = std::vector<string>{"Conv", "BiasAdd"};
std::unordered_set<string> nodes_preserved = item.NodesToPreserve();
TF_GrapplerItem* c_item = reinterpret_cast<TF_GrapplerItem*>(&item);
int list_total_size = 0;
for (const string& s : nodes_preserved) {
list_total_size += s.size();
}
size_t storage_size = 0;
int num_values = 0;
TF_Status* status = TF_NewStatus();
TF_GetNodesToPreserveListSize(c_item, &num_values, &storage_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(nodes_preserved.size(), num_values);
EXPECT_EQ(list_total_size, storage_size);
std::unique_ptr<char*[]> values(new char*[nodes_preserved.size()]);
std::unique_ptr<size_t[]> lens(new size_t[nodes_preserved.size()]);
std::unique_ptr<char[]> storage(new char[storage_size]);
TF_GetNodesToPreserveList(c_item, values.get(), lens.get(),
nodes_preserved.size(), storage.get(), storage_size,
status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (size_t i = 0; i < nodes_preserved.size(); ++i) {
EXPECT_EQ(nodes_preserved.find(string(static_cast<const char*>(values[i]),
lens[i])) != nodes_preserved.end(),
true);
}
TF_DeleteStatus(status);
}
TEST(TF_GrapplerItem, FetchNodes) {
GrapplerItem item;
item.fetch = std::vector<string>{"Conv", "BiasAdd"};
TF_GrapplerItem* c_item = reinterpret_cast<TF_GrapplerItem*>(&item);
int list_total_size = 0;
for (const string& s : item.fetch) {
list_total_size += s.size();
}
size_t storage_size = 0;
int num_values = 0;
TF_Status* status = TF_NewStatus();
TF_GetFetchNodesListSize(c_item, &num_values, &storage_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(item.fetch.size(), num_values);
EXPECT_EQ(list_total_size, storage_size);
std::unique_ptr<char*[]> values(new char*[item.fetch.size()]);
std::unique_ptr<size_t[]> lens(new size_t[item.fetch.size()]);
std::unique_ptr<char[]> storage(new char[storage_size]);
TF_GetFetchNodesList(c_item, values.get(), lens.get(), item.fetch.size(),
storage.get(), storage_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (size_t i = 0; i < item.fetch.size(); ++i) {
EXPECT_EQ(item.fetch[i].size(), lens[i]) << i;
EXPECT_EQ(item.fetch[i],
string(static_cast<const char*>(values[i]), lens[i]))
<< i;
}
TF_DeleteStatus(status);
}
TEST(TF_GraphProperties, InputProperties) {
std::unique_ptr<SingleMachine> cluster(new SingleMachine(5 * 60, 3, 0));
TF_ASSERT_OK(cluster->Provision());
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_Status* status = TF_NewStatus();
TF_GraphProperties* graph_properties =
TF_NewGraphProperties(reinterpret_cast<TF_GrapplerItem*>(&item));
TF_InferStatically(graph_properties, true, false, false, false, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (const NodeDef& node : item.graph.node()) {
if (node.op() == "AddN") {
int num_values = 0;
TF_GetInputPropertiesListSize(graph_properties, node.name().c_str(),
&num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(num_values, 1);
std::vector<TF_Buffer*> in_props_buf(num_values, TF_NewBuffer());
TF_GetInputPropertiesList(graph_properties, node.name().c_str(),
in_props_buf.data(), num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::OpInfo::TensorProperties in_props;
Status s = tensorflow::BufferToMessage(in_props_buf[0], &in_props);
TF_ASSERT_OK(s);
EXPECT_EQ(DT_FLOAT, in_props.dtype());
EXPECT_FALSE(in_props.shape().unknown_rank());
EXPECT_EQ(2, in_props.shape().dim_size());
EXPECT_EQ(10, in_props.shape().dim(0).size());
EXPECT_EQ(1, in_props.shape().dim(1).size());
for (int i = 0; i < in_props_buf.size(); i++)
TF_DeleteBuffer(in_props_buf[i]);
}
}
TF_DeleteGraphProperties(graph_properties);
TF_DeleteStatus(status);
TF_ASSERT_OK(cluster->Shutdown());
}
TEST(TF_GraphProperties, OutputProperties) {
std::unique_ptr<SingleMachine> cluster(new SingleMachine(5 * 60, 3, 0));
TF_ASSERT_OK(cluster->Provision());
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_Status* status = TF_NewStatus();
TF_GraphProperties* graph_properties =
TF_NewGraphProperties(reinterpret_cast<TF_GrapplerItem*>(&item));
TF_InferStatically(graph_properties, true, false, false, false, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (const NodeDef& node : item.graph.node()) {
if (node.op() == "AddN") {
int num_values = 0;
TF_GetOutputPropertiesListSize(graph_properties, node.name().c_str(),
&num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(num_values, 1);
std::vector<TF_Buffer*> out_props_buf(num_values, TF_NewBuffer());
TF_GetOutputPropertiesList(graph_properties, node.name().c_str(),
out_props_buf.data(), num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::OpInfo::TensorProperties out_props;
Status s = tensorflow::BufferToMessage(out_props_buf[0], &out_props);
TF_ASSERT_OK(s);
EXPECT_EQ(DT_FLOAT, out_props.dtype());
EXPECT_FALSE(out_props.shape().unknown_rank());
EXPECT_EQ(2, out_props.shape().dim_size());
EXPECT_EQ(10, out_props.shape().dim(0).size());
EXPECT_EQ(1, out_props.shape().dim(1).size());
for (int i = 0; i < out_props_buf.size(); i++)
TF_DeleteBuffer(out_props_buf[i]);
}
}
TF_DeleteStatus(status);
TF_DeleteGraphProperties(graph_properties);
TF_ASSERT_OK(cluster->Shutdown());
}
TEST(TF_FunctionLibraryDefinition, LookUpOpDef) {
TF_Buffer* g_buf = TF_NewBuffer();
TF_Buffer* op_buf = TF_NewBuffer();
TF_Status* status = TF_NewStatus();
GraphDef g_def;
Status s = MessageToBuffer(g_def, g_buf);
TF_ASSERT_OK(s);
TF_FunctionLibraryDefinition* func =
TF_NewFunctionLibraryDefinition(g_buf, status);
TF_LookUpOpDef(func, "Add", op_buf, status);
string actual_string(reinterpret_cast<const char*>(op_buf->data),
op_buf->length);
ASSERT_EQ(TF_OK, TF_GetCode(status));
const OpDef* expected_op_def;
TF_ASSERT_OK(OpRegistry::Global()->LookUpOpDef("Add", &expected_op_def));
string expected_serialized;
expected_op_def->SerializeToString(&expected_serialized);
EXPECT_EQ(expected_serialized, actual_string);
TF_DeleteBuffer(g_buf);
TF_DeleteBuffer(op_buf);
TF_DeleteStatus(status);
TF_DeleteFunctionLibraryDefinition(func);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/grappler/grappler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/grappler/grappler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1ee979de-8d4e-44bc-bbbc-6a20fdeda327 | cpp | tensorflow/tensorflow | case_format | tensorflow/c/experimental/ops/gen/common/case_format.cc | tensorflow/c/experimental/ops/gen/common/case_format_test.cc | #include "tensorflow/c/experimental/ops/gen/common/case_format.h"
#include "absl/strings/ascii.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace {
enum CaseFormatType {
LOWER_CAMEL,
UPPER_CAMEL,
LOWER_SNAKE,
UPPER_SNAKE,
};
string FormatStringCase(const string &str, CaseFormatType to,
const char delimiter = '_') {
const bool from_snake = (str == absl::AsciiStrToUpper(str)) ||
(str == absl::AsciiStrToLower(str));
const bool toUpper = (to == UPPER_CAMEL || to == UPPER_SNAKE);
const bool toSnake = (to == LOWER_SNAKE || to == UPPER_SNAKE);
string result;
bool inputStart = true;
bool wordStart = true;
for (const char c : str) {
if (c == delimiter) {
if (wordStart) {
result.push_back(delimiter);
}
wordStart = true;
continue;
}
if (!from_snake && isupper(c)) {
wordStart = true;
}
if (wordStart && toSnake && !inputStart) {
result.push_back(delimiter);
}
const bool shouldCapIfSnake = toUpper;
const bool shouldCapIfCamel = wordStart && (toUpper || !inputStart);
if ((toSnake && shouldCapIfSnake) || (!toSnake && shouldCapIfCamel)) {
result += toupper(c);
} else {
result += tolower(c);
}
wordStart = false;
inputStart = false;
}
if (wordStart) {
result.push_back(delimiter);
}
return result;
}
}
string toLowerCamel(const string &s, const char delimiter) {
return FormatStringCase(s, LOWER_CAMEL, delimiter);
}
string toLowerSnake(const string &s, const char delimiter) {
return FormatStringCase(s, LOWER_SNAKE, delimiter);
}
string toUpperCamel(const string &s, const char delimiter) {
return FormatStringCase(s, UPPER_CAMEL, delimiter);
}
string toUpperSnake(const string &s, const char delimiter) {
return FormatStringCase(s, UPPER_SNAKE, delimiter);
}
}
} | #include "tensorflow/c/experimental/ops/gen/common/case_format.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace {
struct Variations {
string lower_camel;
string lower_snake;
string upper_camel;
string upper_snake;
};
void TestSingleVariation(const string &str, Variations expected,
char delimiter = '_') {
EXPECT_EQ(expected.lower_camel, toLowerCamel(str, delimiter));
EXPECT_EQ(expected.lower_snake, toLowerSnake(str, delimiter));
EXPECT_EQ(expected.upper_camel, toUpperCamel(str, delimiter));
EXPECT_EQ(expected.upper_snake, toUpperSnake(str, delimiter));
}
void TestAllVariations(Variations variations, char delimiter = '_') {
TestSingleVariation(variations.lower_camel, variations, delimiter);
TestSingleVariation(variations.lower_snake, variations, delimiter);
TestSingleVariation(variations.upper_camel, variations, delimiter);
TestSingleVariation(variations.upper_snake, variations, delimiter);
}
TEST(CppOpGenCaseFormat, test_single_word) {
TestAllVariations(Variations{
"three",
"three",
"Three",
"THREE",
});
}
TEST(CppOpGenCaseFormat, test_complex_string) {
TestAllVariations(Variations{
"threeNTest33Words",
"three_n_test33_words",
"ThreeNTest33Words",
"THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_hyphen_delimiter) {
TestAllVariations(
Variations{
"threeNTest33Words",
"three-n-test33-words",
"ThreeNTest33Words",
"THREE-N-TEST33-WORDS",
},
'-');
}
TEST(CppOpGenCaseFormat, test_trailing_underscore) {
TestAllVariations(Variations{
"threeNTest33Words_",
"three_n_test33_words_",
"ThreeNTest33Words_",
"THREE_N_TEST33_WORDS_",
});
}
TEST(CppOpGenCaseFormat, test_double_trailing_underscores) {
TestAllVariations(Variations{
"xxY__",
"xx_y__",
"XxY__",
"XX_Y__",
});
}
TEST(CppOpGenCaseFormat, test_leading_underscore) {
TestAllVariations(Variations{
"_threeNTest33Words",
"_three_n_test33_words",
"_ThreeNTest33Words",
"_THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_double_leading_underscores) {
TestAllVariations(Variations{
"__threeNTest33Words",
"__three_n_test33_words",
"__ThreeNTest33Words",
"__THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_leading_and_trailing_underscores) {
TestAllVariations(Variations{
"__threeNTest33Words____",
"__three_n_test33_words____",
"__ThreeNTest33Words____",
"__THREE_N_TEST33_WORDS____",
});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/common/case_format.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/common/case_format_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e91a7dc9-1c90-4876-9a47-8825aed4fb9a | cpp | tensorflow/tensorflow | cpp_generator | tensorflow/c/experimental/ops/gen/cpp/cpp_generator.cc | tensorflow/c/experimental/ops/gen/cpp/cpp_generator_test.cc | #include "tensorflow/c/experimental/ops/gen/cpp/cpp_generator.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_file_renderer.h"
#include "tensorflow/core/lib/io/path.h"
namespace tensorflow {
namespace generator {
CppGenerator::CppGenerator(cpp::CppConfig cpp_config, PathConfig path_config)
: controller_(path_config),
cpp_config_(cpp_config),
path_config_(path_config) {}
SourceCode CppGenerator::GenerateOneFile(
cpp::RendererContext::Mode mode) const {
SourceCode generated_code;
const std::vector<OpSpec> ops(controller_.GetModelOps());
std::vector<cpp::OpView> views(ops.begin(), ops.end());
cpp::RendererContext context{mode, generated_code, cpp_config_, path_config_};
cpp::CppFileRenderer(context, views).Render();
return generated_code;
}
SourceCode CppGenerator::HeaderFileContents() const {
return GenerateOneFile(cpp::RendererContext::kHeader);
}
SourceCode CppGenerator::SourceFileContents() const {
return GenerateOneFile(cpp::RendererContext::kSource);
}
string CppGenerator::HeaderFileName() const {
return io::JoinPath(path_config_.output_path, cpp_config_.unit + "_ops.h");
}
string CppGenerator::SourceFileName() const {
return io::JoinPath(path_config_.output_path, cpp_config_.unit + "_ops.cc");
}
void CppGenerator::WriteHeaderFile() const {
controller_.WriteFile(HeaderFileName(), HeaderFileContents());
}
void CppGenerator::WriteSourceFile() const {
controller_.WriteFile(SourceFileName(), SourceFileContents());
}
}
} | #include "tensorflow/c/experimental/ops/gen/cpp/cpp_generator.h"
#include <algorithm>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace generator {
namespace {
TEST(CppGeneratorTest, typical_usage) {
string category = "testing";
string name_space = "tensorflow::ops";
string output_dir = "tensorflow/c/experimental/ops/gen/cpp/golden";
string source_dir = "tensorflow";
string api_dirs = "";
std::vector<string> ops = {
"Neg",
"MatMul",
"IdentityN",
"SparseSoftmaxCrossEntropyWithLogits",
"AccumulatorApplyGradient",
"VarHandleOp",
"RestoreV2",
};
cpp::CppConfig cpp_config(category, name_space);
PathConfig controller_config(output_dir, source_dir, api_dirs, ops);
CppGenerator generator(cpp_config, controller_config);
Env *env = Env::Default();
string golden_dir = io::JoinPath(testing::TensorFlowSrcRoot(),
controller_config.tf_output_dir);
string generated_header = generator.HeaderFileContents().Render();
string generated_source = generator.SourceFileContents().Render();
string expected_header;
string header_file_name = io::JoinPath(golden_dir, "testing_ops.h.golden");
TF_CHECK_OK(ReadFileToString(env, header_file_name, &expected_header));
string expected_source;
string source_file_name = io::JoinPath(golden_dir, "testing_ops.cc.golden");
TF_CHECK_OK(ReadFileToString(env, source_file_name, &expected_source));
expected_header.erase(
std::remove(expected_header.begin(), expected_header.end(), '\r'),
expected_header.end());
expected_source.erase(
std::remove(expected_source.begin(), expected_source.end(), '\r'),
expected_source.end());
EXPECT_EQ(expected_header, generated_header);
EXPECT_EQ(expected_source, generated_source);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/cpp/cpp_generator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/cpp/cpp_generator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f81a9f1c-f14a-46bb-95cb-3879e36651fc | cpp | tensorflow/tensorflow | renderer | tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.cc | tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_test.cc | #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace generator {
namespace cpp {
Renderer::Renderer(RendererContext context) : context_(context) {}
Renderer& Renderer::BlankLine() {
context_.code.AddLineWithoutIndent("");
return *this;
}
Renderer& Renderer::CodeLine(const string& text) {
context_.code.AddLineWithoutIndent(text);
return *this;
}
Renderer& Renderer::CodeLines(const string& text) {
StringPiece trimmed_text(text);
str_util::RemoveWhitespaceContext(&trimmed_text);
for (const string& line : str_util::Split(trimmed_text, '\n')) {
context_.code.AddLineWithoutIndent(line);
}
return *this;
}
Renderer& Renderer::Statement(const string& text) {
if (absl::EndsWith(text, ";")) {
LOG(WARNING) << "Superfluous terminating ';' in '" << text << "'";
context_.code.AddLineWithIndent(text);
} else {
context_.code.AddLineWithIndent(absl::StrCat(text, ";"));
}
return *this;
}
Renderer& Renderer::TFStatement(const string& text) {
return Statement(absl::Substitute("TF_RETURN_IF_ERROR($0)", text));
}
Renderer& Renderer::CommentLine(const string& text) {
context_.code.AddLineWithIndent(absl::StrCat("
return *this;
}
Renderer& Renderer::BlockOpen(const string& text) {
context_.code.AddLineWithIndent(absl::StrCat(text, " {"));
context_.code.IncreaseIndent();
return *this;
}
Renderer& Renderer::BlockClose(const string& text) {
context_.code.DecreaseIndent();
context_.code.AddLineWithIndent(absl::StrCat("}", text));
return *this;
}
}
}
} | #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "tensorflow/c/experimental/ops/gen/common/path_config.h"
#include "tensorflow/c/experimental/ops/gen/common/source_code.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_config.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace cpp {
namespace {
TEST(Renderer, typical_usage) {
class TestRenderer : Renderer {
public:
explicit TestRenderer(SourceCode& code)
: Renderer(
{RendererContext::kSource, code, CppConfig(), PathConfig()}) {}
void Render() {
CommentLine("File level comment.");
CodeLine("#include \"header.h\"");
BlankLine();
BlockOpen("void TestFunction()");
{
Statement("int i = 1");
BlankLine();
BlockOpen("while (i == 1)");
{
CommentLine("Do nothing, really....");
CodeLine("#if 0");
Statement("call()");
CodeLine("#endif");
BlockClose();
}
BlockClose("
}
}
};
SourceCode code;
TestRenderer(code).Render();
string expected = R"(
#include "header.h"
void TestFunction() {
int i = 1;
while (i == 1) {
#if 0
call();
#endif
}
}
)";
code.SetSpacesPerIndent(3);
EXPECT_EQ(expected, code.Render());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21fc9f49-2069-4de4-bd1e-e847fc05d67d | cpp | tensorflow/tensorflow | stream_executor | tensorflow/c/experimental/stream_executor/stream_executor.cc | tensorflow/c/experimental/stream_executor/stream_executor_test.cc | #include "tensorflow/c/experimental/stream_executor/stream_executor.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/c_api_macros_internal.h"
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h"
#include "tensorflow/c/tf_status_helper.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/executor_cache.h"
#include "xla/stream_executor/host_memory_allocation.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_common.h"
#include "tensorflow/core/common_runtime/device/device_utils.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tsl/platform/status.h"
using tensorflow::StatusFromTF_Status;
namespace stream_executor {
using tensorflow::StringPiece;
using OwnedTFStatus = tensorflow::TF_StatusPtr;
namespace {
absl::Status ValidateSPPlatform(const SP_Platform& platform) {
TF_VALIDATE_STRUCT_SIZE(SP_Platform, platform, SP_PLATFORM_STRUCT_SIZE);
TF_VALIDATE_NOT_NULL(SP_Platform, platform, name);
TF_VALIDATE_NOT_NULL(SP_Platform, platform, type);
TF_RETURN_IF_ERROR(
tensorflow::device_utils::ValidateDeviceType(platform.name));
TF_RETURN_IF_ERROR(
tensorflow::device_utils::ValidateDeviceType(platform.type));
return absl::OkStatus();
}
absl::Status ValidateSPPlatformFns(const SP_PlatformFns& platform_fns) {
TF_VALIDATE_STRUCT_SIZE(SP_PlatformFns, platform_fns,
SP_PLATFORM_FNS_STRUCT_SIZE);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, create_device);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, destroy_device);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, create_stream_executor);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, destroy_stream_executor);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, create_device_fns);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, destroy_device_fns);
return absl::OkStatus();
}
absl::Status ValidateSPAllocatorStats(const SP_AllocatorStats& stats) {
TF_VALIDATE_STRUCT_SIZE(SP_AllocatorStats, stats,
SP_ALLOCATORSTATS_STRUCT_SIZE);
return absl::OkStatus();
}
absl::Status ValidateSPDeviceMemoryBase(const SP_DeviceMemoryBase& mem) {
TF_VALIDATE_STRUCT_SIZE(SP_DeviceMemoryBase, mem,
SP_DEVICE_MEMORY_BASE_STRUCT_SIZE);
return absl::OkStatus();
}
absl::Status ValidateSPDevice(const SP_Device& device) {
TF_VALIDATE_STRUCT_SIZE(SP_Device, device, SP_DEVICE_STRUCT_SIZE);
return absl::OkStatus();
}
absl::Status ValidateSPDeviceFns(const SP_DeviceFns& device_fns) {
TF_VALIDATE_STRUCT_SIZE(SP_DeviceFns, device_fns, SP_DEVICE_FNS_STRUCT_SIZE);
return absl::OkStatus();
}
absl::Status ValidateSPStreamExecutor(const SP_StreamExecutor& se,
const SP_Platform& platform) {
TF_VALIDATE_STRUCT_SIZE(SP_StreamExecutor, se,
SP_STREAM_EXECUTOR_STRUCT_SIZE);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, allocate);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, deallocate);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, get_allocator_stats);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, host_memory_allocate);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, host_memory_deallocate);
if (platform.supports_unified_memory) {
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, unified_memory_allocate);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, unified_memory_deallocate);
}
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, device_memory_usage);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, create_stream);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, destroy_stream);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, create_stream_dependency);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, get_stream_status);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, create_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, destroy_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, get_event_status);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, record_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, wait_for_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, memcpy_dtoh);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, memcpy_htod);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, sync_memcpy_dtoh);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, sync_memcpy_htod);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, block_host_for_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, synchronize_all_activity);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, host_callback);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, mem_zero);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, memset);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, memset32);
return absl::OkStatus();
}
absl::Status ValidateSEPlatformRegistrationParams(
const SE_PlatformRegistrationParams& params) {
TF_VALIDATE_STRUCT_SIZE(SE_PlatformRegistrationParams, params,
SE_PLATFORM_REGISTRATION_PARAMS_STRUCT_SIZE);
TF_VALIDATE_NOT_NULL(SE_PlatformRegistrationParams, params, destroy_platform);
TF_VALIDATE_NOT_NULL(SE_PlatformRegistrationParams, params,
destroy_platform_fns);
return absl::OkStatus();
}
#undef TF_VALIDATE_NOT_NULL
DeviceMemoryBase DeviceMemoryBaseFromC(const SP_DeviceMemoryBase& mem) {
DeviceMemoryBase base(mem.opaque, mem.size);
base.SetPayload(mem.payload);
return base;
}
struct HostCallbackContext {
absl::AnyInvocable<absl::Status() &&> callback;
};
void HostCallbackTrampoline(void* ctx, TF_Status* status) {
HostCallbackContext* host_ctx = static_cast<HostCallbackContext*>(ctx);
absl::Status s = std::move(host_ctx->callback)();
tsl::Set_TF_Status_from_Status(status, s);
delete host_ctx;
}
class CStreamExecutor : public StreamExecutorCommon {
public:
explicit CStreamExecutor(Platform* se_platform, SP_Device device,
SP_DeviceFns* device_fns,
SP_StreamExecutor* stream_executor,
SP_Platform* platform, SP_PlatformFns* platform_fns,
SP_TimerFns* timer_fns, const std::string& name,
int visible_device_count)
: StreamExecutorCommon(se_platform),
device_(std::move(device)),
device_fns_(device_fns),
stream_executor_(stream_executor),
platform_(platform),
platform_fns_(platform_fns),
timer_fns_(timer_fns),
platform_name_(name),
visible_device_count_(visible_device_count) {}
~CStreamExecutor() override {
platform_fns_->destroy_device(platform_, &device_);
}
absl::Status Init() override { return absl::OkStatus(); }
DeviceMemoryBase Allocate(uint64_t size, int64_t memory_space) override {
SP_DeviceMemoryBase mem = {SP_DEVICE_MEMORY_BASE_STRUCT_SIZE};
stream_executor_->allocate(&device_, size, memory_space, &mem);
absl::Status status = ValidateSPDeviceMemoryBase(mem);
if (!status.ok()) {
LOG(ERROR) << status.message();
}
return DeviceMemoryBaseFromC(mem);
}
DeviceMemoryBase Allocate(uint64_t size) {
return Allocate(size, 0);
}
void Deallocate(DeviceMemoryBase* mem) override {
SP_DeviceMemoryBase device_memory_base = DeviceMemoryBaseToC(mem);
stream_executor_->deallocate(&device_, &device_memory_base);
}
absl::StatusOr<std::unique_ptr<MemoryAllocation>> HostMemoryAllocate(
uint64_t size) override {
auto* buffer = stream_executor_->host_memory_allocate(&device_, size);
if (buffer == nullptr && size > 0) {
return absl::InternalError(
absl::StrFormat("Failed to allocate HostMemory of size %d", size));
}
return std::make_unique<HostMemoryAllocation>(buffer, size, this);
}
void HostMemoryDeallocate(void* mem) override {
stream_executor_->host_memory_deallocate(&device_, mem);
}
void* UnifiedMemoryAllocate(uint64_t size) override {
CHECK(stream_executor_->unified_memory_allocate);
return stream_executor_->unified_memory_allocate(&device_, size);
}
void UnifiedMemoryDeallocate(void* mem) override {
CHECK(stream_executor_->unified_memory_deallocate);
stream_executor_->unified_memory_deallocate(&device_, mem);
}
absl::optional<AllocatorStats> GetAllocatorStats() override {
SP_AllocatorStats c_stats{SP_ALLOCATORSTATS_STRUCT_SIZE};
TF_Bool has_stats =
stream_executor_->get_allocator_stats(&device_, &c_stats);
if (!has_stats) {
return absl::nullopt;
}
absl::Status status = ValidateSPAllocatorStats(c_stats);
if (!status.ok()) {
LOG(ERROR) << status.message();
return absl::nullopt;
}
::stream_executor::AllocatorStats stats;
stats.num_allocs = c_stats.num_allocs;
stats.bytes_in_use = c_stats.bytes_in_use;
stats.peak_bytes_in_use = c_stats.peak_bytes_in_use;
stats.largest_alloc_size = c_stats.largest_alloc_size;
if (c_stats.has_bytes_limit) {
stats.bytes_limit = c_stats.bytes_limit;
}
stats.bytes_reserved = c_stats.bytes_reserved;
stats.peak_bytes_reserved = c_stats.peak_bytes_reserved;
if (c_stats.has_bytes_reservable_limit) {
stats.bytes_reservable_limit = c_stats.bytes_reservable_limit;
}
stats.largest_free_block_bytes = c_stats.largest_free_block_bytes;
return stats;
}
bool SynchronizeAllActivity() override {
OwnedTFStatus c_status(TF_NewStatus());
stream_executor_->synchronize_all_activity(&device_, c_status.get());
if (TF_GetCode(c_status.get()) != TF_OK) {
LOG(ERROR) << TF_Message(c_status.get());
return false;
}
return true;
}
absl::Status SynchronousMemZero(DeviceMemoryBase* location,
uint64_t size) override {
return tsl::errors::Unimplemented(
"SynchronousMemZero is not supported by pluggable device.");
}
absl::Status SynchronousMemcpy(DeviceMemoryBase* gpu_dst,
const void* host_src, uint64_t size) override {
OwnedTFStatus c_status(TF_NewStatus());
SP_DeviceMemoryBase device_memory_base = DeviceMemoryBaseToC(gpu_dst);
stream_executor_->sync_memcpy_htod(&device_, &device_memory_base, host_src,
size, c_status.get());
return StatusFromTF_Status(c_status.get());
}
absl::Status SynchronousMemcpy(void* host_dst,
const DeviceMemoryBase& gpu_src,
uint64_t size) override {
OwnedTFStatus c_status(TF_NewStatus());
SP_DeviceMemoryBase device_memory_base = DeviceMemoryBaseToC(&gpu_src);
stream_executor_->sync_memcpy_dtoh(&device_, host_dst, &device_memory_base,
size, c_status.get());
return StatusFromTF_Status(c_status.get());
}
void DeallocateStream(Stream* stream) override {
static_cast<CStream*>(stream)->Destroy();
}
absl::Status BlockHostForEvent(Stream* stream, Event* event) {
OwnedTFStatus c_status(TF_NewStatus());
SP_Event event_handle = static_cast<CEvent*>(event)->Handle();
stream_executor_->block_host_for_event(&device_, event_handle,
c_status.get());
return StatusFromTF_Status(c_status.get());
}
absl::Status BlockHostUntilDone(Stream* stream) override {
OwnedTFStatus c_status(TF_NewStatus());
SP_Stream stream_handle = static_cast<CStream*>(stream)->Handle();
if (stream_executor_->block_host_until_done != nullptr) {
stream_executor_->block_host_until_done(&device_, stream_handle,
c_status.get());
return StatusFromTF_Status(c_status.get());
}
SP_Event event_handle;
stream_executor_->create_event(&device_, &event_handle, c_status.get());
TF_RETURN_IF_ERROR(StatusFromTF_Status(c_status.get()));
stream_executor_->record_event(&device_, stream_handle, event_handle,
c_status.get());
absl::Status s = StatusFromTF_Status(c_status.get());
if (!s.ok()) {
stream_executor_->destroy_event(&device_, event_handle);
return s;
}
stream_executor_->block_host_for_event(&device_, event_handle,
c_status.get());
stream_executor_->destroy_event(&device_, event_handle);
return StatusFromTF_Status(c_status.get());
}
absl::Status EnablePeerAccessTo(StreamExecutor* other) override {
return tsl::errors::Unimplemented(
"EnablePeerAccessTo is not supported by pluggable device.");
}
bool CanEnablePeerAccessTo(StreamExecutor* other) override { return false; }
bool DeviceMemoryUsage(int64_t* free, int64_t* total) const override {
return stream_executor_->device_memory_usage(
&device_, reinterpret_cast<int64_t*>(free),
reinterpret_cast<int64_t*>(total));
}
absl::StatusOr<std::unique_ptr<DeviceDescription>> CreateDeviceDescription()
const override {
OwnedTFStatus c_status(TF_NewStatus());
DeviceDescription desc;
if (device_.hardware_name != nullptr) {
desc.set_name(device_.hardware_name);
}
if (device_.device_vendor != nullptr) {
desc.set_device_vendor(device_.device_vendor);
}
if (device_.pci_bus_id != nullptr) {
desc.set_pci_bus_id(device_.pci_bus_id);
}
if (device_fns_->get_numa_node != nullptr) {
int32_t numa_node = device_fns_->get_numa_node(&device_);
if (numa_node >= 0) {
desc.set_numa_node(numa_node);
}
}
if (device_fns_->get_memory_bandwidth != nullptr) {
int64_t memory_bandwidth = device_fns_->get_memory_bandwidth(&device_);
if (memory_bandwidth >= 0) {
desc.set_memory_bandwidth(memory_bandwidth);
}
}
return std::make_unique<DeviceDescription>(std::move(desc));
}
absl::StatusOr<std::unique_ptr<Event>> CreateEvent() override {
auto c_event = std::make_unique<CEvent>(&device_, stream_executor_);
TF_RETURN_IF_ERROR(c_event->Create());
return std::move(c_event);
}
absl::StatusOr<std::unique_ptr<Stream>> CreateStream(
std::optional<std::variant<StreamPriority, int>> priority) override {
auto stream = std::make_unique<CStream>(&device_, stream_executor_, this);
TF_RETURN_IF_ERROR(stream->Create());
return std::move(stream);
}
private:
SP_Device device_;
SP_DeviceFns* device_fns_;
SP_StreamExecutor* stream_executor_;
SP_Platform* platform_;
SP_PlatformFns* platform_fns_;
SP_TimerFns* timer_fns_;
std::string platform_name_;
int visible_device_count_;
};
}
CPlatform::CPlatform(SP_Platform platform,
void (*destroy_platform)(SP_Platform*),
SP_PlatformFns platform_fns,
void (*destroy_platform_fns)(SP_PlatformFns*),
SP_DeviceFns device_fns, SP_StreamExecutor stream_executor,
SP_TimerFns timer_fns)
: platform_(std::move(platform)),
destroy_platform_(destroy_platform),
platform_fns_(std::move(platform_fns)),
destroy_platform_fns_(destroy_platform_fns),
device_fns_(std::move(device_fns)),
stream_executor_(std::move(stream_executor)),
timer_fns_(std::move(timer_fns)),
name_(platform.name) {}
CPlatform::~CPlatform() {
platform_fns_.destroy_device_fns(&platform_, &device_fns_);
platform_fns_.destroy_stream_executor(&platform_, &stream_executor_);
platform_fns_.destroy_timer_fns(&platform_, &timer_fns_);
destroy_platform_(&platform_);
destroy_platform_fns_(&platform_fns_);
}
absl::StatusOr<std::unique_ptr<DeviceDescription>>
CPlatform::DescriptionForDevice(int ordinal) const {
DeviceDescription desc;
desc.set_name(name_);
return std::make_unique<DeviceDescription>(std::move(desc));
}
absl::StatusOr<StreamExecutor*> CPlatform::FindExisting(int ordinal) {
return executor_cache_.Get(ordinal);
}
absl::StatusOr<StreamExecutor*> CPlatform::ExecutorForDevice(int ordinal) {
return executor_cache_.GetOrCreate(
ordinal, [this, ordinal]() { return GetUncachedExecutor(ordinal); });
}
absl::StatusOr<std::unique_ptr<StreamExecutor>> CPlatform::GetUncachedExecutor(
int ordinal) {
SE_CreateDeviceParams device_params{SE_CREATE_DEVICE_PARAMS_STRUCT_SIZE};
SP_Device device{SP_DEVICE_STRUCT_SIZE};
device_params.device = &device;
device_params.ext = nullptr;
device_params.ordinal = ordinal;
OwnedTFStatus c_status(TF_NewStatus());
platform_fns_.create_device(&platform_, &device_params, c_status.get());
TF_RETURN_IF_ERROR(StatusFromTF_Status(c_status.get()));
TF_RETURN_IF_ERROR(ValidateSPDevice(device));
int visible_device_count = 0;
platform_fns_.get_device_count(&platform_, &visible_device_count,
c_status.get());
TF_RETURN_IF_ERROR(StatusFromTF_Status(c_status.get()));
return std::make_unique<CStreamExecutor>(
this, std::move(device), &device_fns_, &stream_executor_, &platform_,
&platform_fns_, &timer_fns_, name_, visible_device_count);
}
absl::Status InitStreamExecutorPlugin(void* dso_handle,
std::string* device_type,
std::string* platform_name) {
tensorflow::Env* env = tensorflow::Env::Default();
void* dso_symbol;
TF_RETURN_IF_ERROR(
env->GetSymbolFromLibrary(dso_handle, "SE_InitPlugin", &dso_symbol));
auto init_fn = reinterpret_cast<SEInitPluginFn>(dso_symbol);
return InitStreamExecutorPlugin(init_fn, device_type, platform_name);
}
absl::Status InitStreamExecutorPlugin(SEInitPluginFn init_fn,
std::string* device_type,
std::string* platform_name) {
SE_PlatformRegistrationParams params{
SE_PLATFORM_REGISTRATION_PARAMS_STRUCT_SIZE};
SP_Platform platform{SP_PLATFORM_STRUCT_SIZE};
SP_PlatformFns platform_fns{SP_PLATFORM_FNS_STRUCT_SIZE};
params.major_version = SE_MAJOR;
params.minor_version = SE_MINOR;
params.patch_version = SE_PATCH;
params.platform = &platform;
params.platform_fns = &platform_fns;
OwnedTFStatus c_status(TF_NewStatus());
init_fn(¶ms, c_status.get());
TF_RETURN_IF_ERROR(tensorflow::StatusFromTF_Status(c_status.get()));
TF_RETURN_IF_ERROR(ValidateSEPlatformRegistrationParams(params));
TF_RETURN_IF_ERROR(ValidateSPPlatform(platform));
TF_RETURN_IF_ERROR(ValidateSPPlatformFns(platform_fns));
SE_CreateDeviceFnsParams device_fns_params{
SE_CREATE_DEVICE_FNS_PARAMS_STRUCT_SIZE};
SP_DeviceFns device_fns{SP_DEVICE_FNS_STRUCT_SIZE};
device_fns_params.device_fns = &device_fns;
platform_fns.create_device_fns(&platform, &device_fns_params, c_status.get());
TF_RETURN_IF_ERROR(tensorflow::StatusFromTF_Status(c_status.get()));
TF_RETURN_IF_ERROR(ValidateSPDeviceFns(device_fns));
SE_CreateStreamExecutorParams se_params{
SE_CREATE_STREAM_EXECUTOR_PARAMS_STRUCT_SIZE};
SP_StreamExecutor se{SP_STREAMEXECUTOR_STRUCT_SIZE};
se_params.stream_executor = &se;
platform_fns.create_stream_executor(&platform, &se_params, c_status.get());
TF_RETURN_IF_ERROR(tensorflow::StatusFromTF_Status(c_status.get()));
TF_RETURN_IF_ERROR(ValidateSPStreamExecutor(se, platform));
SP_TimerFns timer_fns{SP_TIMER_FNS_STRUCT_SIZE};
TF_RETURN_IF_ERROR(tensorflow::StatusFromTF_Status(c_status.get()));
*device_type = std::string(platform.type);
*platform_name = std::string(platform.name);
std::unique_ptr<stream_executor::CPlatform> cplatform(
new stream_executor::CPlatform(
std::move(platform), params.destroy_platform, std::move(platform_fns),
params.destroy_platform_fns, std::move(device_fns), std::move(se),
std::move(timer_fns)));
TF_CHECK_OK(
stream_executor::PlatformManager::RegisterPlatform(std::move(cplatform)));
return absl::OkStatus();
}
} | #include "tensorflow/c/experimental/stream_executor/stream_executor.h"
#include <functional>
#include <utility>
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h"
#include "tensorflow/c/experimental/stream_executor/stream_executor_test_util.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
namespace {
TEST(StreamExecutor, SuccessfulRegistration) {
auto plugin_init = [](SE_PlatformRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
test_util::PopulateDefaultPlatformRegistrationParams(params);
};
std::string device_type, platform_name;
absl::Status status =
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
TF_ASSERT_OK(status);
absl::StatusOr<Platform*> maybe_platform =
PlatformManager::PlatformWithName("MY_DEVICE");
TF_ASSERT_OK(maybe_platform.status());
Platform* platform = std::move(maybe_platform).value();
ASSERT_EQ(platform->Name(), test_util::kDeviceName);
ASSERT_EQ(platform->VisibleDeviceCount(), test_util::kDeviceCount);
absl::StatusOr<StreamExecutor*> maybe_executor =
platform->ExecutorForDevice(0);
TF_ASSERT_OK(maybe_executor.status());
}
TEST(StreamExecutor, NameNotSet) {
auto plugin_init = [](SE_PlatformRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
test_util::PopulateDefaultPlatformRegistrationParams(params);
params->platform->name = nullptr;
};
std::string device_type, platform_name;
absl::Status status =
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(status.message(), "'name' field in SP_Platform must be set.");
}
TEST(StreamExecutor, InvalidNameWithSemicolon) {
auto plugin_init = [](SE_PlatformRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
test_util::PopulateDefaultPlatformRegistrationParams(params);
params->platform->name = "INVALID:NAME";
};
std::string device_type, platform_name;
absl::Status status =
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
EXPECT_THAT(
status.message(),
testing::ContainsRegex("Device name/type 'INVALID:NAME' must match"));
}
TEST(StreamExecutor, InvalidNameWithSlash) {
auto plugin_init = [](SE_PlatformRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
test_util::PopulateDefaultPlatformRegistrationParams(params);
params->platform->name = "INVALID/";
};
std::string device_type, platform_name;
absl::Status status =
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
EXPECT_THAT(status.message(),
testing::ContainsRegex("Device name/type 'INVALID/' must match"));
}
TEST(StreamExecutor, CreateDeviceNotSet) {
auto plugin_init = [](SE_PlatformRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
test_util::PopulateDefaultPlatformRegistrationParams(params);
params->platform_fns->create_device = nullptr;
};
std::string device_type, platform_name;
absl::Status status =
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(status.message(),
"'create_device' field in SP_PlatformFns must be set.");
}
TEST(StreamExecutor, UnifiedMemoryAllocateNotSet) {
auto plugin_init = [](SE_PlatformRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
test_util::PopulateDefaultPlatformRegistrationParams(params);
params->platform->supports_unified_memory = true;
};
std::string device_type, platform_name;
absl::Status status =
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(
status.message(),
"'unified_memory_allocate' field in SP_StreamExecutor must be set.");
}
class StreamExecutorTest : public ::testing::Test {
protected:
StreamExecutorTest() {}
void SetUp() override {
test_util::PopulateDefaultPlatform(&platform_, &platform_fns_);
test_util::PopulateDefaultDeviceFns(&device_fns_);
test_util::PopulateDefaultStreamExecutor(&se_);
test_util::PopulateDefaultTimerFns(&timer_fns_);
}
void TearDown() override {}
StreamExecutor* GetExecutor(int ordinal) {
if (!cplatform_) {
cplatform_ = absl::make_unique<CPlatform>(
platform_, test_util::DestroyPlatform, platform_fns_,
test_util::DestroyPlatformFns, device_fns_, se_, timer_fns_);
}
absl::StatusOr<StreamExecutor*> maybe_executor =
cplatform_->ExecutorForDevice(ordinal);
TF_CHECK_OK(maybe_executor.status());
return std::move(maybe_executor).value();
}
SP_Platform platform_;
SP_PlatformFns platform_fns_;
SP_DeviceFns device_fns_;
SP_StreamExecutor se_;
SP_TimerFns timer_fns_;
std::unique_ptr<CPlatform> cplatform_;
};
TEST_F(StreamExecutorTest, Allocate) {
se_.allocate = [](const SP_Device* const device, uint64_t size,
int64_t memory_space, SP_DeviceMemoryBase* const mem) {
mem->struct_size = SP_DEVICE_MEMORY_BASE_STRUCT_SIZE;
mem->opaque = malloc(size);
mem->size = size;
};
se_.deallocate = [](const SP_Device* const device,
SP_DeviceMemoryBase* const mem) {
EXPECT_EQ(mem->size, 2 * sizeof(int));
free(mem->opaque);
mem->opaque = nullptr;
mem->size = 0;
};
StreamExecutor* executor = GetExecutor(0);
DeviceMemory<int> mem = executor->AllocateArray<int>(2);
ASSERT_NE(mem.opaque(), nullptr);
ASSERT_EQ(mem.size(), 2 * sizeof(int));
executor->Deallocate(&mem);
}
TEST_F(StreamExecutorTest, HostMemoryAllocate) {
static bool allocate_called = false;
static bool deallocate_called = false;
se_.host_memory_allocate = [](const SP_Device* const device, uint64_t size) {
allocate_called = true;
return malloc(size);
};
se_.host_memory_deallocate = [](const SP_Device* const device, void* mem) {
free(mem);
deallocate_called = true;
};
StreamExecutor* executor = GetExecutor(0);
ASSERT_FALSE(allocate_called);
TF_ASSERT_OK_AND_ASSIGN(auto mem, executor->HostMemoryAllocate(8));
ASSERT_NE(mem->opaque(), nullptr);
ASSERT_TRUE(allocate_called);
ASSERT_FALSE(deallocate_called);
mem.reset();
ASSERT_TRUE(deallocate_called);
}
TEST_F(StreamExecutorTest, UnifiedMemoryAllocate) {
static bool allocate_called = false;
static bool deallocate_called = false;
se_.unified_memory_allocate = [](const SP_Device* const device,
uint64_t size) {
allocate_called = true;
return malloc(size);
};
se_.unified_memory_deallocate = [](const SP_Device* const device, void* mem) {
free(mem);
deallocate_called = true;
};
StreamExecutor* executor = GetExecutor(0);
ASSERT_FALSE(allocate_called);
void* mem = executor->UnifiedMemoryAllocate(8);
ASSERT_NE(mem, nullptr);
ASSERT_TRUE(allocate_called);
ASSERT_FALSE(deallocate_called);
executor->UnifiedMemoryDeallocate(mem);
ASSERT_TRUE(deallocate_called);
}
TEST_F(StreamExecutorTest, GetAllocatorStats) {
se_.get_allocator_stats = [](const SP_Device* const device,
SP_AllocatorStats* const stat) -> TF_Bool {
stat->struct_size = SP_ALLOCATORSTATS_STRUCT_SIZE;
stat->bytes_in_use = 123;
return true;
};
StreamExecutor* executor = GetExecutor(0);
absl::optional<AllocatorStats> optional_stats = executor->GetAllocatorStats();
ASSERT_TRUE(optional_stats.has_value());
AllocatorStats stats = optional_stats.value();
ASSERT_EQ(stats.bytes_in_use, 123);
}
TEST_F(StreamExecutorTest, DeviceMemoryUsage) {
se_.device_memory_usage = [](const SP_Device* const device,
int64_t* const free,
int64_t* const total) -> TF_Bool {
*free = 45;
*total = 7;
return true;
};
StreamExecutor* executor = GetExecutor(0);
int64_t free = 0;
int64_t total = 0;
executor->DeviceMemoryUsage(&free, &total);
ASSERT_EQ(free, 45);
ASSERT_EQ(total, 7);
}
TEST_F(StreamExecutorTest, CreateStream) {
static bool stream_created = false;
static bool stream_deleted = false;
se_.create_stream = [](const SP_Device* const device, SP_Stream* stream,
TF_Status* const status) -> void {
*stream = new SP_Stream_st(14);
stream_created = true;
};
se_.destroy_stream = [](const SP_Device* const device,
SP_Stream stream) -> void {
auto custom_stream = static_cast<SP_Stream_st*>(stream);
ASSERT_EQ(custom_stream->stream_id, 14);
delete custom_stream;
stream_deleted = true;
};
StreamExecutor* executor = GetExecutor(0);
ASSERT_FALSE(stream_created);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
ASSERT_TRUE(stream_created);
ASSERT_FALSE(stream_deleted);
stream.reset();
ASSERT_TRUE(stream_deleted);
}
TEST_F(StreamExecutorTest, CreateStreamDependency) {
static bool create_stream_dependency_called = false;
se_.create_stream_dependency = [](const SP_Device* const device,
SP_Stream dependent, SP_Stream other,
TF_Status* const status) {
TF_SetStatus(status, TF_OK, "");
create_stream_dependency_called = true;
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto dependent, executor->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(auto other, executor->CreateStream());
ASSERT_FALSE(create_stream_dependency_called);
TF_ASSERT_OK(dependent->WaitFor(other.get()));
ASSERT_TRUE(create_stream_dependency_called);
}
TEST_F(StreamExecutorTest, StreamStatus) {
static bool status_ok = true;
se_.get_stream_status = [](const SP_Device* const device, SP_Stream stream,
TF_Status* const status) -> void {
if (status_ok) {
TF_SetStatus(status, TF_OK, "");
} else {
TF_SetStatus(status, TF_INTERNAL, "Test error");
}
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK(stream->RefreshStatus());
status_ok = false;
auto updated_status = stream->RefreshStatus();
ASSERT_FALSE(stream->ok());
ASSERT_EQ(updated_status.message(), "Test error");
}
TEST_F(StreamExecutorTest, CreateEvent) {
static bool event_created = false;
static bool event_deleted = false;
se_.create_event = [](const SP_Device* const device, SP_Event* event,
TF_Status* const status) -> void {
*event = new SP_Event_st(123);
event_created = true;
};
se_.destroy_event = [](const SP_Device* const device,
SP_Event event) -> void {
auto custom_event = static_cast<SP_Event_st*>(event);
ASSERT_EQ(custom_event->event_id, 123);
delete custom_event;
event_deleted = true;
};
StreamExecutor* executor = GetExecutor(0);
ASSERT_FALSE(event_created);
TF_ASSERT_OK_AND_ASSIGN(auto event, executor->CreateEvent());
ASSERT_TRUE(event_created);
ASSERT_FALSE(event_deleted);
event.reset();
ASSERT_TRUE(event_deleted);
}
TEST_F(StreamExecutorTest, PollForEventStatus) {
static SE_EventStatus event_status = SE_EVENT_COMPLETE;
se_.create_event = [](const SP_Device* const device, SP_Event* event,
TF_Status* const status) -> void {
*event = new SP_Event_st(123);
};
se_.destroy_event = [](const SP_Device* const device,
SP_Event event) -> void { delete event; };
se_.get_event_status = [](const SP_Device* const device,
SP_Event event) -> SE_EventStatus {
EXPECT_EQ(event->event_id, 123);
return event_status;
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto event, executor->CreateEvent());
ASSERT_EQ(event->PollForStatus(), Event::Status::kComplete);
event_status = SE_EVENT_ERROR;
ASSERT_EQ(event->PollForStatus(), Event::Status::kError);
}
TEST_F(StreamExecutorTest, RecordAndWaitForEvent) {
static bool record_called = false;
static bool wait_called = false;
se_.create_stream = [](const SP_Device* const device, SP_Stream* stream,
TF_Status* const status) -> void {
*stream = new SP_Stream_st(1);
};
se_.destroy_stream = [](const SP_Device* const device,
SP_Stream stream) -> void { delete stream; };
se_.create_event = [](const SP_Device* const device, SP_Event* event,
TF_Status* const status) -> void {
*event = new SP_Event_st(2);
};
se_.destroy_event = [](const SP_Device* const device,
SP_Event event) -> void { delete event; };
se_.record_event = [](const SP_Device* const device, SP_Stream stream,
SP_Event event, TF_Status* const status) {
EXPECT_EQ(stream->stream_id, 1);
EXPECT_EQ(event->event_id, 2);
TF_SetStatus(status, TF_OK, "");
record_called = true;
};
se_.wait_for_event = [](const SP_Device* const device, SP_Stream stream,
SP_Event event, TF_Status* const status) {
EXPECT_EQ(stream->stream_id, 1);
EXPECT_EQ(event->event_id, 2);
TF_SetStatus(status, TF_OK, "");
wait_called = true;
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto event, executor->CreateEvent());
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
ASSERT_FALSE(record_called);
TF_ASSERT_OK(stream->RecordEvent(event.get()));
ASSERT_TRUE(record_called);
ASSERT_FALSE(wait_called);
TF_ASSERT_OK(stream->WaitFor(event.get()));
ASSERT_TRUE(wait_called);
}
TEST_F(StreamExecutorTest, MemcpyToHost) {
se_.create_stream = [](const SP_Device* const device, SP_Stream* stream,
TF_Status* const status) -> void {
*stream = new SP_Stream_st(14);
};
se_.destroy_stream = [](const SP_Device* const device,
SP_Stream stream) -> void { delete stream; };
se_.memcpy_dtoh = [](const SP_Device* const device, SP_Stream stream,
void* host_dst,
const SP_DeviceMemoryBase* const device_src,
uint64_t size, TF_Status* const status) {
TF_SetStatus(status, TF_OK, "");
EXPECT_EQ(stream->stream_id, 14);
std::memcpy(host_dst, device_src->opaque, size);
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
size_t size = sizeof(int);
int src_data = 34;
int dst_data = 2;
DeviceMemoryBase device_src(&src_data, size);
TF_ASSERT_OK(stream->Memcpy(&dst_data, device_src, size));
ASSERT_EQ(dst_data, 34);
}
TEST_F(StreamExecutorTest, MemcpyFromHost) {
se_.memcpy_htod = [](const SP_Device* const device, SP_Stream stream,
SP_DeviceMemoryBase* const device_dst,
const void* host_src, uint64_t size,
TF_Status* const status) {
TF_SetStatus(status, TF_OK, "");
std::memcpy(device_dst->opaque, host_src, size);
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
size_t size = sizeof(int);
int src_data = 18;
int dst_data = 0;
DeviceMemoryBase device_dst(&dst_data, size);
TF_ASSERT_OK(stream->Memcpy(&device_dst, &src_data, size));
ASSERT_EQ(dst_data, 18);
}
TEST_F(StreamExecutorTest, MemcpyDeviceToDevice) {
se_.memcpy_dtod = [](const SP_Device* const device, SP_Stream stream,
SP_DeviceMemoryBase* const device_dst,
const SP_DeviceMemoryBase* const device_src,
uint64_t size, TF_Status* const status) {
TF_SetStatus(status, TF_OK, "");
std::memcpy(device_dst->opaque, device_src->opaque, size);
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
size_t size = sizeof(int);
int src_data = 18;
int dst_data = 0;
DeviceMemoryBase device_dst(&dst_data, size);
DeviceMemoryBase device_src(&src_data, size);
TF_ASSERT_OK(stream->Memcpy(&device_dst, device_src, size));
ASSERT_EQ(dst_data, 18);
}
TEST_F(StreamExecutorTest, SyncMemcpyToHost) {
se_.sync_memcpy_dtoh = [](const SP_Device* const device, void* host_dst,
const SP_DeviceMemoryBase* const device_src,
uint64_t size, TF_Status* const status) {
TF_SetStatus(status, TF_OK, "");
std::memcpy(host_dst, device_src->opaque, size);
};
StreamExecutor* executor = GetExecutor(0);
size_t size = sizeof(int);
int src_data = 34;
int dst_data = 2;
DeviceMemoryBase device_src(&src_data, size);
TF_ASSERT_OK(executor->SynchronousMemcpyD2H(device_src, size, &dst_data));
ASSERT_EQ(dst_data, 34);
}
TEST_F(StreamExecutorTest, SyncMemcpyFromHost) {
se_.sync_memcpy_htod =
[](const SP_Device* const device, SP_DeviceMemoryBase* const device_dst,
const void* host_src, uint64_t size, TF_Status* const status) {
TF_SetStatus(status, TF_OK, "");
std::memcpy(device_dst->opaque, host_src, size);
};
StreamExecutor* executor = GetExecutor(0);
size_t size = sizeof(int);
int src_data = 18;
int dst_data = 0;
DeviceMemoryBase device_dst(&dst_data, size);
TF_ASSERT_OK(executor->SynchronousMemcpyH2D(&src_data, size, &device_dst));
ASSERT_EQ(dst_data, 18);
}
TEST_F(StreamExecutorTest, BlockHostForEvent) {
static bool block_host_for_event_called = false;
se_.create_event = [](const SP_Device* const device, SP_Event* event,
TF_Status* const status) {
*event = new SP_Event_st(357);
};
se_.destroy_event = [](const SP_Device* const device, SP_Event event) {
delete event;
};
se_.block_host_for_event = [](const SP_Device* const device, SP_Event event,
TF_Status* const status) -> void {
ASSERT_EQ(event->event_id, 357);
TF_SetStatus(status, TF_OK, "");
block_host_for_event_called = true;
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
ASSERT_FALSE(block_host_for_event_called);
TF_ASSERT_OK(stream->BlockHostUntilDone());
ASSERT_TRUE(block_host_for_event_called);
}
TEST_F(StreamExecutorTest, BlockHostUntilDone) {
static bool block_host_until_done_called = false;
se_.create_stream = [](const SP_Device* const device, SP_Stream* stream,
TF_Status* const status) {
*stream = new SP_Stream_st(58);
};
se_.destroy_stream = [](const SP_Device* const device, SP_Stream stream) {
delete stream;
};
se_.block_host_until_done = [](const SP_Device* const device,
SP_Stream stream,
TF_Status* const status) -> void {
ASSERT_EQ(stream->stream_id, 58);
TF_SetStatus(status, TF_OK, "");
block_host_until_done_called = true;
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
ASSERT_FALSE(block_host_until_done_called);
TF_ASSERT_OK(stream->BlockHostUntilDone());
ASSERT_TRUE(block_host_until_done_called);
}
TEST_F(StreamExecutorTest, SynchronizeAllActivity) {
static bool synchronize_all_called = false;
se_.synchronize_all_activity = [](const SP_Device* const device,
TF_Status* const status) {
TF_SetStatus(status, TF_OK, "");
synchronize_all_called = true;
};
StreamExecutor* executor = GetExecutor(0);
ASSERT_FALSE(synchronize_all_called);
ASSERT_TRUE(executor->SynchronizeAllActivity());
ASSERT_TRUE(synchronize_all_called);
}
TEST_F(StreamExecutorTest, HostCallbackOk) {
se_.host_callback = [](const SP_Device* const device, SP_Stream stream,
SE_StatusCallbackFn const callback_fn,
void* const callback_arg) -> TF_Bool {
TF_Status* status = TF_NewStatus();
callback_fn(callback_arg, status);
bool ok = TF_GetCode(status) == TF_OK;
TF_DeleteStatus(status);
return ok;
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
std::function<absl::Status()> callback = []() -> absl::Status {
return absl::OkStatus();
};
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(callback));
}
TEST_F(StreamExecutorTest, HostCallbackError) {
se_.host_callback = [](const SP_Device* const device, SP_Stream stream,
SE_StatusCallbackFn const callback_fn,
void* const callback_arg) -> TF_Bool {
TF_Status* status = TF_NewStatus();
callback_fn(callback_arg, status);
bool ok = TF_GetCode(status) == TF_OK;
TF_DeleteStatus(status);
return ok;
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
std::function<absl::Status()> callback = []() -> absl::Status {
return tsl::errors::Unimplemented("Unimplemented");
};
ASSERT_FALSE(stream->DoHostCallbackWithStatus(callback).ok());
}
TEST_F(StreamExecutorTest, DeviceDescription) {
static const char* hardware_name = "TestName";
static const char* vendor = "TestVendor";
static const char* pci_bus_id = "TestPCIBusId";
platform_fns_.create_device = [](const SP_Platform* platform,
SE_CreateDeviceParams* params,
TF_Status* status) {
params->device->hardware_name = hardware_name;
params->device->device_vendor = vendor;
params->device->pci_bus_id = pci_bus_id;
};
device_fns_.get_numa_node = [](const SP_Device* device) { return 123; };
device_fns_.get_memory_bandwidth = [](const SP_Device* device) -> int64_t {
return 54;
};
device_fns_.get_gflops = [](const SP_Device* device) -> double { return 32; };
StreamExecutor* executor = GetExecutor(0);
const DeviceDescription& description = executor->GetDeviceDescription();
ASSERT_EQ(description.name(), "TestName");
ASSERT_EQ(description.device_vendor(), "TestVendor");
ASSERT_EQ(description.pci_bus_id(), "TestPCIBusId");
ASSERT_EQ(description.numa_node(), 123);
ASSERT_EQ(description.memory_bandwidth(), 54);
}
TEST_F(StreamExecutorTest, DeviceDescriptionNumaNodeNotSet) {
static const char* hardware_name = "TestName";
static const char* vendor = "TestVendor";
static const char* pci_bus_id = "TestPCIBusId";
platform_fns_.create_device = [](const SP_Platform* platform,
SE_CreateDeviceParams* params,
TF_Status* status) {
params->device->hardware_name = hardware_name;
params->device->device_vendor = vendor;
params->device->pci_bus_id = pci_bus_id;
};
device_fns_.get_memory_bandwidth = [](const SP_Device* device) -> int64_t {
return 54;
};
device_fns_.get_gflops = [](const SP_Device* device) -> double { return 32; };
StreamExecutor* executor = GetExecutor(0);
const DeviceDescription& description = executor->GetDeviceDescription();
ASSERT_EQ(description.name(), "TestName");
ASSERT_EQ(description.device_vendor(), "TestVendor");
ASSERT_EQ(description.pci_bus_id(), "TestPCIBusId");
ASSERT_EQ(description.numa_node(), -1);
ASSERT_EQ(description.memory_bandwidth(), 54);
}
TEST_F(StreamExecutorTest, MemZero) {
se_.create_stream = [](const SP_Device* const device, SP_Stream* stream,
TF_Status* const status) -> void {
*stream = new SP_Stream_st(14);
};
se_.destroy_stream = [](const SP_Device* const device,
SP_Stream stream) -> void { delete stream; };
se_.mem_zero = [](const SP_Device* device, SP_Stream stream,
SP_DeviceMemoryBase* location, uint64_t size,
TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
EXPECT_EQ(stream->stream_id, 14);
std::memset(location->opaque, 0, size);
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
size_t size = sizeof(int);
int data = 2;
DeviceMemoryBase device_data(&data, size);
TF_ASSERT_OK(stream->MemZero(&device_data, size));
ASSERT_EQ(data, 0);
}
TEST_F(StreamExecutorTest, Memset32) {
se_.create_stream = [](const SP_Device* const device, SP_Stream* stream,
TF_Status* const status) -> void {
*stream = new SP_Stream_st(14);
};
se_.destroy_stream = [](const SP_Device* const device,
SP_Stream stream) -> void { delete stream; };
se_.memset32 = [](const SP_Device* device, SP_Stream stream,
SP_DeviceMemoryBase* location, uint32_t pattern,
uint64_t size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
EXPECT_EQ(stream->stream_id, 14);
EXPECT_EQ(size % 4, 0);
auto ptr = static_cast<uint32_t*>(location->opaque);
for (int i = 0; i < size / 4; i++) {
*(ptr + i) = pattern;
}
};
StreamExecutor* executor = GetExecutor(0);
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
size_t size = sizeof(int);
int data = 2;
DeviceMemoryBase device_data(&data, size);
TF_ASSERT_OK(stream->Memset32(&device_data, 18, size));
ASSERT_EQ(data, 18);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/stream_executor/stream_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/stream_executor/stream_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a27f2e13-ab33-42ea-891a-e76f26a75708 | cpp | tensorflow/tensorflow | c_api_unified_experimental | tensorflow/c/eager/c_api_unified_experimental.cc | tensorflow/c/eager/c_api_unified_experimental_test.cc | #include "tensorflow/c/eager/c_api_unified_experimental.h"
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
using tensorflow::string;
namespace tensorflow {
namespace tracing {
typedef absl::flat_hash_map<std::string, tracing::FactoryFunction> FactoriesMap;
static FactoriesMap& GetFactories() {
static FactoriesMap* factories = new FactoriesMap;
return *factories;
}
static tracing::FactoryFunction default_factory;
void RegisterTracingEngineFactory(const string& name, FactoryFunction factory) {
assert((!GetFactories().count(name)) ||
(GetFactories()[name] == factory) &&
"Duplicate tracing factory registration");
GetFactories()[name] = factory;
}
Status SetDefaultTracingEngine(const char* name) {
auto entry = GetFactories().find(name);
if (entry != GetFactories().end()) {
default_factory = GetFactories().find(name)->second;
return absl::OkStatus();
}
string msg = absl::StrCat(
"No tracing engine factory has been registered with the key '", name,
"' (available: ");
std::set<string> factories_sorted;
for (const auto& factory : GetFactories())
factories_sorted.insert(factory.first);
const char* comma = "";
for (const string& factory : factories_sorted) {
msg += comma + factory;
comma = ", ";
}
msg += ")";
return errors::InvalidArgument(msg.c_str());
}
static TracingContext* CreateTracingExecutionContext(const char* fn_name,
TF_Status* s) {
if (default_factory) {
return default_factory(fn_name, s);
}
tsl::Set_TF_Status_from_Status(
s, errors::FailedPrecondition("default_factory is nullptr"));
return nullptr;
}
}
}
using tensorflow::AbstractFunction;
using tensorflow::AbstractTensorHandle;
using tensorflow::DataType;
using tensorflow::dyn_cast;
using tensorflow::OutputList;
using tensorflow::Status;
using tensorflow::unwrap;
using tensorflow::wrap;
using tensorflow::tracing::CreateTracingExecutionContext;
using tensorflow::tracing::SetDefaultTracingEngine;
using tensorflow::tracing::TracingContext;
using tensorflow::tracing::TracingOperation;
using tensorflow::tracing::TracingTensorHandle;
void TF_SetTracingImplementation(const char* name, TF_Status* s) {
tsl::Set_TF_Status_from_Status(s, SetDefaultTracingEngine(name));
}
TF_ExecutionContext* TF_CreateFunction(const char* fn_name, TF_Status* s) {
return wrap(CreateTracingExecutionContext(fn_name, s));
}
TF_AbstractFunction* TF_FinalizeFunction(TF_ExecutionContext* ctx,
TF_OutputList* outputs, TF_Status* s) {
AbstractFunction* func;
TracingContext* tracing_ctx = dyn_cast<TracingContext>(unwrap(ctx));
if (!tracing_ctx) {
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"Only TracingContext can be converted into a function."));
return nullptr;
}
tsl::Set_TF_Status_from_Status(s,
tracing_ctx->Finalize(unwrap(outputs), &func));
TF_DeleteExecutionContext(ctx);
return wrap(func);
}
TF_AbstractTensor* TF_AddFunctionParameter(TF_ExecutionContext* func,
TF_DataType dtype, TF_Shape shape,
TF_Status* s) {
DCHECK_GE(shape.num_dims, -1);
TracingTensorHandle* t;
TracingContext* tracing_ctx = dyn_cast<TracingContext>(unwrap(func));
if (!tracing_ctx) {
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"TF_AddFunctionParameter must be called on a TracingContext."));
return nullptr;
}
tensorflow::PartialTensorShape partial_shape;
if (shape.num_dims != -1) {
DCHECK(shape.dim_sizes != nullptr);
Status status = tensorflow::PartialTensorShape::MakePartialShape(
reinterpret_cast<int64_t*>(shape.dim_sizes), shape.num_dims,
&partial_shape);
if (!status.ok()) {
tsl::Set_TF_Status_from_Status(s, status);
return nullptr;
}
}
tsl::Set_TF_Status_from_Status(
s, tracing_ctx->AddParameter(static_cast<DataType>(dtype), partial_shape,
&t));
return wrap(t);
}
void TF_DeleteExecutionContext(TF_ExecutionContext* c) { unwrap(c)->Release(); }
TF_AbstractOp* TF_NewAbstractOp(TF_ExecutionContext* c) {
return wrap((unwrap(c)->CreateOperation()));
}
void TF_DeleteAbstractOp(TF_AbstractOp* op) { unwrap(op)->Release(); }
void TF_DeleteAbstractTensor(TF_AbstractTensor* t) { unwrap(t)->Unref(); }
TF_OutputList* TF_NewOutputList() { return wrap(new OutputList); }
void TF_DeleteOutputList(TF_OutputList* o) { delete unwrap(o); }
void TF_OutputListSetNumOutputs(TF_OutputList* o, int num_outputs,
TF_Status* s) {
unwrap(o)->expected_num_outputs = num_outputs;
unwrap(o)->outputs.clear();
unwrap(o)->outputs.resize(num_outputs);
}
int TF_OutputListNumOutputs(TF_OutputList* o) {
return unwrap(o)->outputs.size();
}
TF_AbstractTensor* TF_OutputListGet(TF_OutputList* o, int i) {
return wrap(unwrap(o)->outputs[i]);
}
void TF_OutputListPushBack(TF_OutputList* o, TF_AbstractTensor* tensor,
TF_Status* s) {
unwrap(o)->outputs.push_back(unwrap(tensor));
}
void TF_AbstractOpSetOpType(TF_AbstractOp* op, const char* const op_type,
TF_Status* s) {
tsl::Set_TF_Status_from_Status(
s, unwrap(op)->Reset(op_type,
nullptr));
}
void TF_AbstractOpSetOpName(TF_AbstractOp* op, const char* const op_name,
TF_Status* s) {
TracingOperation* tracing_op = dyn_cast<TracingOperation>(unwrap(op));
if (!tracing_op) {
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"TF_AbstractOpSetOpName must be called on a TracingOperation."));
return;
}
tsl::Set_TF_Status_from_Status(s, tracing_op->SetOpName(op_name));
}
void TF_AbstractOpSetAttrType(TF_AbstractOp* op, const char* const attr_name,
TF_DataType value, TF_Status* s) {
Status status =
unwrap(op)->SetAttrType(attr_name, static_cast<DataType>(value));
TF_SetStatus(s, static_cast<TF_Code>(status.code()),
absl::StatusMessageAsCStr(status));
}
void TF_ExecuteOperation(TF_AbstractOp* op, int num_inputs,
TF_AbstractTensor* const* inputs, TF_OutputList* o,
TF_Status* s) {
for (int i = 0; i < num_inputs; i++) {
tsl::Set_TF_Status_from_Status(s, unwrap(op)->AddInput(unwrap(inputs[i])));
if (TF_GetCode(s) != TF_OK) {
return;
}
}
int num_outputs = unwrap(o)->expected_num_outputs;
tsl::Set_TF_Status_from_Status(
s, unwrap(op)->Execute(
absl::MakeSpan(reinterpret_cast<AbstractTensorHandle**>(
unwrap(o)->outputs.data()),
unwrap(o)->outputs.size()),
&num_outputs));
}
void TF_DeleteAbstractFunction(TF_AbstractFunction* func) {
unwrap(func)->Unref();
}
void TF_ExecutionContextRegisterFunction(TF_ExecutionContext* ctx,
TF_AbstractFunction* func,
TF_Status* s) {
tsl::Set_TF_Status_from_Status(s,
unwrap(ctx)->RegisterFunction(unwrap(func)));
} | #include "tensorflow/c/eager/c_api_unified_experimental.h"
#include <memory>
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
using tensorflow::Status;
using tensorflow::string;
using tensorflow::TF_StatusPtr;
namespace tensorflow {
namespace {
class UnifiedCAPI
: public ::testing::TestWithParam<std::tuple<const char*, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.message();
}
};
TEST_P(UnifiedCAPI, TestBasicEager) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* t = TestScalarTensorHandle(eager_ctx, 2.0f);
TF_AbstractTensor* at =
TF_CreateAbstractTensorFromEagerTensor(t, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "Add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at, at};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float* result_value = static_cast<float*>(TF_TensorData(result_tensor));
EXPECT_EQ(*result_value, 4.0);
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicEagerMatMul) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
float vals[] = {0.0f, 0.0f, 0.0f, 0.0f};
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
TFE_TensorHandle* t =
TestMatrixTensorHandleWithInput(eager_ctx, vals, dims, num_dims);
TF_AbstractTensor* at = TF_CreateAbstractTensorFromEagerTensor(
t, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "MatMul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at, at};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(result_tensor),
TF_TensorByteSize(result_tensor));
int data_len = 4;
for (int i = 0; i < data_len; i++) {
EXPECT_EQ(result_data[i], 0);
}
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicEagerMatMul2) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
float vals1[] = {1.0f, 2.0f, 3.0f, 4.0f};
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
TFE_TensorHandle* t1 =
TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
TF_AbstractTensor* at1 = TF_CreateAbstractTensorFromEagerTensor(
t1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float vals2[] = {5.0f, 6.0f, 7.0f, 8.0f};
TFE_TensorHandle* t2 =
TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
TF_AbstractTensor* at2 = TF_CreateAbstractTensorFromEagerTensor(
t2, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "MatMul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at1, at2};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at1);
TF_DeleteAbstractTensor(at2);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(result_tensor),
TF_TensorByteSize(result_tensor));
float e_vals[] = {19.0f, 22.0f, 43.0f, 50.0f};
int data_len = 4;
for (int i = 0; i < data_len; i++) {
EXPECT_EQ(result_data[i], e_vals[i]);
}
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicEagerMatAdd) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
float vals1[] = {1.0f, 2.0f, 3.0f, 4.0f};
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
TFE_TensorHandle* t1 =
TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
TF_AbstractTensor* at1 = TF_CreateAbstractTensorFromEagerTensor(
t1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float vals2[] = {5.0f, 6.0f, 7.0f, 8.0f};
TFE_TensorHandle* t2 =
TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
TF_AbstractTensor* at2 = TF_CreateAbstractTensorFromEagerTensor(
t2, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "Add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at1, at2};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at1);
TF_DeleteAbstractTensor(at2);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(result_tensor),
TF_TensorByteSize(result_tensor));
float e_vals[] = {6.0f, 8.0f, 10.0f, 12.0f};
int data_len = 4;
for (int i = 0; i < data_len; i++) {
EXPECT_EQ(result_data[i], e_vals[i]);
}
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicGraph) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
string fn_name = "double";
TF_ExecutionContext* graph_ctx =
TF_CreateFunction(fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_t =
TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(add_op, "my_add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto outs = unwrap(add_outputs);
auto h = outs->outputs[0];
ASSERT_NE(h, nullptr);
ASSERT_EQ(h->FullType().type_id(), TFT_UNSET);
ASSERT_EQ(unwrap(inputs[0])->FullType().type_id(), TFT_UNSET);
TF_DeleteAbstractOp(add_op);
TF_AbstractFunction* func =
TF_FinalizeFunction(graph_ctx, add_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractTensor(TF_OutputListGet(add_outputs, 0));
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f);
TF_AbstractTensor* input_t =
TF_CreateAbstractTensorFromEagerTensor(input_eager, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(fn_op, 1, &input_t, add_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(1, TF_OutputListNumOutputs(add_outputs));
TF_AbstractTensor* final_result = TF_OutputListGet(add_outputs, 0);
TFE_TensorHandle* final =
TF_AbstractTensorGetEagerTensor(final_result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* f_t = TFE_TensorHandleResolve(final, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float* f_value = static_cast<float*>(TF_TensorData(f_t));
ASSERT_EQ(*f_value, 4.0);
TF_DeleteOutputList(add_outputs);
TF_DeleteAbstractOp(fn_op);
TF_DeleteAbstractTensor(input_t);
TF_DeleteAbstractTensor(final_result);
TF_DeleteAbstractTensor(placeholder_t);
TF_DeleteTensor(f_t);
TF_DeleteAbstractFunction(func);
TF_DeleteExecutionContext(eager_execution_ctx);
}
TEST_P(UnifiedCAPI, TestBasicGraphMatMul) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
string fn_name = "matrix_multiply";
TF_ExecutionContext* graph_ctx =
TF_CreateFunction(fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_t =
TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* matmul_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(matmul_op, "MatMul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(matmul_op, "my_matmul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t};
TF_OutputList* mm_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(matmul_op, 2, inputs, mm_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(matmul_op);
TF_AbstractFunction* func =
TF_FinalizeFunction(graph_ctx, mm_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
float vals[] = {1.0f, 1.0f, 1.0f, 1.0f};
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
TFE_TensorHandle* input_eager =
TestMatrixTensorHandleWithInput(eager_ctx, vals, dims, num_dims);
TF_AbstractTensor* input_t =
TF_CreateAbstractTensorFromEagerTensor(input_eager, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(fn_op, 1, &input_t, mm_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(1, TF_OutputListNumOutputs(mm_outputs));
TF_AbstractTensor* final_result = TF_OutputListGet(mm_outputs, 0);
TFE_TensorHandle* final =
TF_AbstractTensorGetEagerTensor(final_result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* f_t = TFE_TensorHandleResolve(final, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(f_t), TF_TensorByteSize(f_t));
int data_len = 4;
for (int i = 0; i < data_len; i++) {
ASSERT_EQ(result_data[i], 2.0f);
}
TF_DeleteAbstractTensor(final_result);
TF_DeleteOutputList(mm_outputs);
TF_DeleteAbstractTensor(placeholder_t);
TF_DeleteAbstractOp(fn_op);
TF_DeleteAbstractTensor(input_t);
TF_DeleteTensor(f_t);
TF_DeleteAbstractFunction(func);
TF_DeleteExecutionContext(eager_execution_ctx);
}
TEST_P(UnifiedCAPI, TestMultiOutputGraph) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_Status* s = status.get();
string fn_name = "two_adds";
TF_ExecutionContext* graph_ctx = TF_CreateFunction(fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg0 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg1 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* add_output1;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg0, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output1 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_AbstractTensor* add_output2;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg1, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output2 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_DeleteAbstractTensor(arg0);
TF_DeleteAbstractTensor(arg1);
TF_AbstractFunction* func;
{
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListPushBack(func_outputs, add_output1, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_OutputListPushBack(func_outputs, add_output2, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
func = TF_FinalizeFunction(graph_ctx, func_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractTensor(add_output1);
TF_DeleteAbstractTensor(add_output2);
TF_DeleteOutputList(func_outputs);
}
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
std::vector<TF_AbstractTensor*> func_args;
{
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
input_eager = TestScalarTensorHandle(eager_ctx, 3.0f);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
}
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(func_outputs, 2, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_ExecuteOperation(fn_op, func_args.size(), func_args.data(), func_outputs,
s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(fn_op);
for (TF_AbstractTensor* t : func_args) TF_DeleteAbstractTensor(t);
ASSERT_EQ(2, TF_OutputListNumOutputs(func_outputs));
float results[2];
for (int idx = 0; idx < 2; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TFE_TensorHandle* handle = TF_AbstractTensorGetEagerTensor(result, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_Tensor* f_t = TFE_TensorHandleResolve(handle, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
results[idx] = *static_cast<float*>(TF_TensorData(f_t));
TF_DeleteTensor(f_t);
}
ASSERT_EQ(results[0], 5.0);
ASSERT_EQ(results[1], 6.0);
for (int idx = 0; idx < 2; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TF_DeleteAbstractTensor(result);
}
TF_DeleteOutputList(func_outputs);
TF_DeleteExecutionContext(eager_execution_ctx);
TF_DeleteAbstractFunction(func);
}
TEST_P(UnifiedCAPI, TestMultiOutputGraphMatMul) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_Status* s = status.get();
string fn_name = "two_adds_and_matmul";
TF_ExecutionContext* graph_ctx = TF_CreateFunction(fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg0 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg1 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* add_output1;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add1", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg0, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output1 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_AbstractTensor* add_output2;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add2", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg1, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output2 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_AbstractTensor* mm_output;
{
auto* mm_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(mm_op, "MatMul", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(mm_op, "mm", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {add_output1, add_output2};
TF_OutputList* mm_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(mm_op, 2, inputs, mm_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(mm_op);
mm_output = TF_OutputListGet(mm_outputs, 0);
TF_DeleteOutputList(mm_outputs);
}
TF_AbstractFunction* func;
{
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListPushBack(func_outputs, add_output1, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_OutputListPushBack(func_outputs, add_output2, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_OutputListPushBack(func_outputs, mm_output, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
func = TF_FinalizeFunction(graph_ctx, func_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteOutputList(func_outputs);
}
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
std::vector<TF_AbstractTensor*> func_args;
{
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, s);
float vals1[] = {0.0f, 1.0f, 1.0f, 0.0f};
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
TFE_TensorHandle* input_eager =
TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
float vals2[] = {1.0f, 0.0f, 0.0f, 1.0f};
input_eager =
TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
}
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(func_outputs, 3, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_ExecuteOperation(fn_op, func_args.size(), func_args.data(), func_outputs,
s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(fn_op);
for (TF_AbstractTensor* t : func_args) TF_DeleteAbstractTensor(t);
ASSERT_EQ(3, TF_OutputListNumOutputs(func_outputs));
float expected_outputs[3][4] = {{1.0f, 1.0f, 1.0f, 1.0f},
{2.0f, 0.0f, 0.0f, 2.0f},
{2.0f, 2.0f, 2.0f, 2.0f}};
float result_data[4];
for (int idx = 0; idx < 3; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TFE_TensorHandle* handle = TF_AbstractTensorGetEagerTensor(result, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_Tensor* f_t = TFE_TensorHandleResolve(handle, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
memcpy(&result_data[0], TF_TensorData(f_t), TF_TensorByteSize(f_t));
for (int j = 0; j < 4; j++) {
ASSERT_EQ(result_data[j], expected_outputs[idx][j]);
}
TF_DeleteTensor(f_t);
}
for (int idx = 0; idx < 3; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TF_DeleteAbstractTensor(result);
}
TF_DeleteOutputList(func_outputs);
TF_DeleteExecutionContext(eager_execution_ctx);
TF_DeleteAbstractFunction(func);
}
TEST_P(UnifiedCAPI, TF_ExecutionContextToFunctionWithEagerContextRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TF_AbstractFunction* f = TF_FinalizeFunction(ctx, nullptr, status.get());
ASSERT_EQ(nullptr, f);
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TF_AbstractOpSetOpTypeAfterFinishingOpBuildingRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get());
ASSERT_EQ(TF_FAILED_PRECONDITION, TF_GetCode(status.get()));
TF_DeleteAbstractOp(placeholder_op);
TF_DeleteExecutionContext(graph_ctx);
}
TEST_P(UnifiedCAPI, TF_AbstractOpSetOpNameAfterFinishingOpBuildingRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get());
ASSERT_EQ(TF_FAILED_PRECONDITION, TF_GetCode(status.get()));
TF_DeleteAbstractOp(placeholder_op);
TF_DeleteExecutionContext(graph_ctx);
}
TEST_P(UnifiedCAPI, TF_AbstractTensorGetEagerTensorOnGraphTensorRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto placeholder_t =
TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
TF_AbstractTensorGetEagerTensor(placeholder_t, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
TF_DeleteAbstractTensor(placeholder_t);
TF_DeleteExecutionContext(graph_ctx);
}
TEST_P(UnifiedCAPI, TF_ExecutionContextGetTFEContextFromFunctionContextRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecutionContextGetTFEContext(graph_ctx, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
TF_DeleteExecutionContext(graph_ctx);
}
INSTANTIATE_TEST_SUITE_P(Tracing, UnifiedCAPI,
::testing::Combine(::testing::Values("graphdef",
"mlir"),
::testing::Values(false)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_unified_experimental.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_unified_experimental_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0fdc034f-4003-4eb4-82c7-e682af795f59 | cpp | tensorflow/tensorflow | c_api_debug | tensorflow/c/eager/c_api_debug.cc | tensorflow/c/eager/c_api_debug_test.cc | #include <vector>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/tfe_tensor_debug_info_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/platform/status.h"
using tensorflow::string;
namespace {
std::vector<int64_t> TensorShapeAsVector(const tensorflow::TensorHandle& handle,
tensorflow::Status* status) {
std::vector<int64_t> shape;
int rank = -1;
*status = handle.NumDims(&rank);
if (!status->ok()) {
return shape;
}
shape.reserve(rank);
for (int i = 0; i < rank; ++i) {
int64_t dim;
*status = handle.Dim(i, &dim);
if (!status->ok()) {
return shape;
}
shape.push_back(dim);
}
return shape;
}
}
extern "C" {
TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
TFE_TensorHandle* h, TF_Status* status) {
tensorflow::TensorHandle* handle =
TensorHandleFromInterface(tensorflow::unwrap(h));
const tensorflow::Tensor* tensor;
status->status = handle->Tensor(&tensor);
if (!status->status.ok()) {
return nullptr;
}
std::vector<int64_t> dev_dims = TensorShapeAsVector(*handle, &status->status);
if (!status->status.ok()) {
return nullptr;
}
return new TFE_TensorDebugInfo(dev_dims);
}
TF_CAPI_EXPORT extern void TFE_DeleteTensorDebugInfo(
TFE_TensorDebugInfo* debug_info) {
delete debug_info;
}
TF_CAPI_EXPORT extern int TFE_TensorDebugInfoOnDeviceNumDims(
TFE_TensorDebugInfo* debug_info) {
return debug_info->dev_dims.size();
}
TF_CAPI_EXPORT extern int64_t TFE_TensorDebugInfoOnDeviceDim(
TFE_TensorDebugInfo* debug_info, int dim_index) {
return debug_info->dev_dims[dim_index];
}
} | #include "tensorflow/c/eager/c_api.h"
#include <string.h>
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
TEST(CApiDebug, ScalarCPU) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* h = TestScalarTensorHandle(ctx, 1.0f);
TFE_TensorDebugInfo* debug_info = TFE_TensorHandleTensorDebugInfo(h, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(0, TFE_TensorDebugInfoOnDeviceNumDims(debug_info));
TFE_DeleteTensorDebugInfo(debug_info);
TFE_DeleteTensorHandle(h);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CApiDebug, 2DCPU) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* h = TestMatrixTensorHandle3X2(ctx);
TFE_TensorDebugInfo* debug_info = TFE_TensorHandleTensorDebugInfo(h, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(2, TFE_TensorDebugInfoOnDeviceNumDims(debug_info));
EXPECT_EQ(3, TFE_TensorDebugInfoOnDeviceDim(debug_info, 0));
EXPECT_EQ(2, TFE_TensorDebugInfoOnDeviceDim(debug_info, 1));
TFE_DeleteTensorDebugInfo(debug_info);
TFE_DeleteTensorHandle(h);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_debug.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_debug_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
440601c2-2c57-4815-83a7-8087f34eb744 | cpp | tensorflow/tensorflow | c_api_experimental_reader | tensorflow/c/eager/c_api_experimental_reader.cc | tensorflow/c/eager/c_api_experimental_reader_test.cc | #include "tensorflow/c/eager/c_api_experimental_reader.h"
#include "tensorflow/c/eager/tfe_monitoring_reader_internal.h"
template <typename... LabelType>
int64_t TFE_MonitoringCounterReader::Read(const LabelType&... labels) {
return counter->Read(labels...);
}
TFE_MonitoringCounterReader* TFE_MonitoringNewCounterReader(const char* name) {
auto* result = new TFE_MonitoringCounterReader(name);
return result;
}
int64_t TFE_MonitoringReadCounter0(TFE_MonitoringCounterReader* cell_reader) {
int64_t result = cell_reader->Read();
return result;
}
int64_t TFE_MonitoringReadCounter1(TFE_MonitoringCounterReader* cell_reader,
const char* label) {
int64_t result = cell_reader->Read(label);
return result;
} | #include "tensorflow/c/eager/c_api_experimental_reader.h"
#include <cstdint>
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name);
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label);
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta = 1);
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta = 1);
TEST(CAPI, MonitoringCellReader0) {
auto counter_name = "test/counter0";
auto* counter = CreateCounter0(counter_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter0(counter);
int64_t actual = TFE_MonitoringReadCounter0(reader);
CHECK_EQ(actual, 1);
}
TEST(CAPI, MonitoringCellReader1) {
auto counter_name = "test/counter1";
auto label_name = "test/label";
auto* counter = CreateCounter1(counter_name, label_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter1(counter, label_name);
int64_t actual = TFE_MonitoringReadCounter1(reader, label_name);
CHECK_EQ(actual, 1);
}
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter0(counter_name, status, "description");
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter0(counter);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter1(counter_name, status, "description", label);
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter1(counter, label);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_experimental_reader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_experimental_reader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94fa025d-5568-47c6-8195-73a16c710571 | cpp | tensorflow/tensorflow | parallel_device | tensorflow/c/eager/parallel_device/parallel_device.cc | tensorflow/c/eager/parallel_device/parallel_device_test.cc | #include "tensorflow/c/eager/parallel_device/parallel_device.h"
#include <cstring>
#include <memory>
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_lib.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace parallel_device {
namespace {
class OpDeleter {
public:
void operator()(TFE_Op* to_delete) const { TFE_DeleteOp(to_delete); }
};
using OpPtr = std::unique_ptr<TFE_Op, OpDeleter>;
using MaybeParallelTensorOwned =
absl::variant<std::unique_ptr<ParallelTensor>, TensorHandlePtr>;
using MaybeParallelTensorUnowned =
absl::variant<ParallelTensor*, TFE_TensorHandle*>;
class NamedParallelDevice {
public:
NamedParallelDevice(const std::string& name,
std::unique_ptr<ParallelDevice> parallel_device)
: device_name_(name), parallel_device_(std::move(parallel_device)) {}
const std::string& name() const { return device_name_; }
const ParallelDevice& device() const { return *parallel_device_; }
private:
std::string device_name_;
std::unique_ptr<ParallelDevice> parallel_device_;
};
absl::optional<std::vector<MaybeParallelTensorOwned>> ExecuteWithSpecialOps(
const ParallelDevice& parallel_device,
const std::string& parallel_device_name, TFE_Context* context,
std::vector<MaybeParallelTensorUnowned> inputs, const char* operation_name,
const TFE_OpAttrs* attributes, int expected_max_outputs,
TF_Status* status) {
absl::optional<std::vector<MaybeParallelTensorOwned>> result;
if (operation_name == std::string("TPUReplicatedInput")) {
if (inputs.size() != parallel_device.num_underlying_devices()) {
std::string message(absl::StrCat(
"The parallel device ", parallel_device_name, " expected ",
parallel_device.num_underlying_devices(),
" inputs to TPUReplicatedInput, but got ", inputs.size()));
TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str());
return result;
}
std::vector<TensorHandlePtr> components;
components.reserve(inputs.size());
for (int i = 0; i < inputs.size(); ++i) {
if (absl::holds_alternative<ParallelTensor*>(inputs[i])) {
std::string message(absl::StrCat(
"Expected all inputs to TPUReplicatedInput to be non-parallel "
"TensorHandles. The input ",
i,
" was a parallel tensor (already "
"placed on the parallel device)."));
TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str());
return result;
}
components.emplace_back(TFE_TensorHandleCopySharingTensor(
absl::get<TFE_TensorHandle*>(inputs[i]), status));
}
std::vector<MaybeParallelTensorOwned> result_content;
result_content.reserve(1);
result_content.push_back(ParallelTensor::FromTensorHandles(
parallel_device, std::move(components), status));
if (TF_GetCode(status) != TF_OK) return result;
result.emplace(std::move(result_content));
return result;
} else if (operation_name == std::string("TPUReplicatedOutput")) {
OpPtr op(TFE_NewOp(context, operation_name, status));
TFE_OpAddAttrs(op.get(), attributes);
int expected_outputs = TFE_OpGetOutputLength(op.get(), "outputs", status);
if (TF_GetCode(status) != TF_OK) return result;
if (expected_outputs != parallel_device.num_underlying_devices()) {
std::string message(absl::StrCat(
"The parallel device ", parallel_device_name, " expected ",
parallel_device.num_underlying_devices(),
" outputs for TPUReplicatedOutput, but got ", expected_outputs));
TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str());
return result;
}
if (absl::holds_alternative<TFE_TensorHandle*>(inputs[0])) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"Expected the input to "
"TPUReplicatedOutput to be a parallel tensor (placed on the "
"parallel device).");
return result;
}
ParallelTensor* t = absl::get<ParallelTensor*>(inputs[0]);
std::vector<MaybeParallelTensorOwned> outputs;
outputs.reserve(t->num_tensors());
for (int i = 0; i < t->num_tensors(); ++i) {
TensorHandlePtr this_output(
TFE_TensorHandleCopySharingTensor(t->tensor(i), status));
outputs.emplace_back(std::move(this_output));
if (TF_GetCode(status) != TF_OK) return result;
}
result.emplace(std::move(outputs));
return result;
}
std::vector<ParallelTensor*> parallel_inputs;
std::vector<std::unique_ptr<ParallelTensor>> implicitly_broadcast_tensors;
parallel_inputs.reserve(inputs.size());
implicitly_broadcast_tensors.reserve(inputs.size());
for (const auto& input : inputs) {
if (absl::holds_alternative<TFE_TensorHandle*>(input)) {
if (operation_name == std::string("_EagerConst")) {
std::unique_ptr<ParallelTensor> parallel_tensor(
parallel_device.CopyToParallelDevice(
context, absl::get<TFE_TensorHandle*>(input), status));
if (TF_GetCode(status) != TF_OK) return absl::nullopt;
parallel_inputs.push_back(parallel_tensor.get());
implicitly_broadcast_tensors.emplace_back(std::move(parallel_tensor));
} else {
TF_SetStatus(
status, TF_INVALID_ARGUMENT,
absl::StrCat(
"Got a non-parallel tensor ",
tensorflow::unwrap(absl::get<TFE_TensorHandle*>(input))
->DebugString(),
" as input to a parallel operation. First pack non-parallel "
"tensors for each device into a parallel tensor explicitly.")
.c_str());
return absl::nullopt;
}
} else {
parallel_inputs.push_back(absl::get<ParallelTensor*>(input));
}
}
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>>
maybe_parallel_results(
parallel_device.Execute(context, parallel_inputs, operation_name,
attributes, expected_max_outputs, status));
if (!maybe_parallel_results.has_value()) return result;
std::vector<std::unique_ptr<ParallelTensor>> parallel_results(
std::move(maybe_parallel_results.value()));
std::vector<MaybeParallelTensorOwned> result_content;
result_content.reserve(parallel_results.size());
for (std::unique_ptr<ParallelTensor>& parallel_result : parallel_results) {
result_content.push_back(
MaybeParallelTensorOwned(std::move(parallel_result)));
}
result.emplace(std::move(result_content));
return result;
}
void ParallelTensorDeallocator(void* data) {
delete reinterpret_cast<ParallelTensor*>(data);
}
int ParallelTensorNumDims(void* data, TF_Status* status) {
const std::vector<int64_t>* shape;
Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return -1;
}
return shape->size();
}
int64_t ParallelTensorDim(void* data, int dim_index, TF_Status* status) {
const std::vector<int64_t>* shape;
Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return -1;
}
return (*shape)[dim_index];
}
TF_Buffer* ParallelTensorSummarize(void* data, TF_Status* status) {
ParallelTensor* parallel_tensor = reinterpret_cast<ParallelTensor*>(data);
std::string summary;
Status cpp_status = parallel_tensor->SummarizeValue(summary);
if (!cpp_status.ok()) {
tsl::Set_TF_Status_from_Status(status, cpp_status);
return nullptr;
}
return TF_NewBufferFromString(summary.data(), summary.size());
}
TensorHandlePtr ParallelTensorToTensorHandle(
const std::string& parallel_device_name, TFE_Context* context,
std::unique_ptr<ParallelTensor> t, TF_Status* status) {
ParallelTensor* t_released = t.release();
TFE_CustomDeviceTensorHandleMethods handle_methods;
handle_methods.num_dims = &ParallelTensorNumDims;
handle_methods.dim = &ParallelTensorDim;
handle_methods.deallocator = &ParallelTensorDeallocator;
handle_methods.summarize = &ParallelTensorSummarize;
return TensorHandlePtr(TFE_NewCustomDeviceTensorHandle(
context, parallel_device_name.c_str(), t_released->dtype(), t_released,
handle_methods, status));
}
TFE_TensorHandle* CopyToParallelDevice(TFE_Context* context,
TFE_TensorHandle* tensor,
TF_Status* status, void* device_info) {
TF_SetStatus(
status, TF_UNIMPLEMENTED,
absl::StrCat("Trying to copy a tensor ",
tensorflow::unwrap(tensor)->DebugString(),
" on to a parallel device. Pack non-parallel "
"tensors for each device into a parallel tensor explicitly.")
.c_str());
return nullptr;
}
TFE_TensorHandle* CopyTensorFromParallelDevice(TFE_Context* context,
TFE_TensorHandle* tensor,
const char* target_device_name,
TF_Status* status,
void* device_info) {
ParallelTensor* parallel_tensor = reinterpret_cast<ParallelTensor*>(
TFE_TensorHandleDevicePointer(tensor, status));
if (TF_GetCode(status) != TF_OK) return nullptr;
if (parallel_tensor->num_tensors() == 1) {
return TFE_TensorHandleCopySharingTensor(parallel_tensor->tensor(0),
status);
} else {
TF_SetStatus(
status, TF_UNIMPLEMENTED,
absl::StrCat(
"Trying to copy a tensor out of a parallel device. Since there "
"are multiple components to parallel tensors, they must be "
"unpacked explicitly.\n",
tensorflow::unwrap(tensor)->DebugString())
.c_str());
return nullptr;
}
}
void ParallelDeviceExecute(const TFE_Op* original_op, int* num_outputs,
TFE_TensorHandle** outputs, TF_Status* status,
void* device_info) {
const char* requested_placement = TFE_OpGetDevice(original_op, status);
if (*requested_placement == '\0') {
TF_SetStatus(
status, TF_INTERNAL,
"Ops must be placed on the parallel device explicitly, or their inputs "
"first un-packed. Got an un-placed op with an input placed on the "
"parallel device.");
return;
}
TFE_Context* context = TFE_OpGetContext(original_op, status);
if (TF_GetCode(status) != TF_OK) return;
const char* operation_name = TFE_OpGetName(original_op, status);
if (TF_GetCode(status) != TF_OK) return;
const TFE_OpAttrs* attributes = TFE_OpGetAttrs(original_op);
NamedParallelDevice* named_device =
reinterpret_cast<NamedParallelDevice*>(device_info);
std::vector<MaybeParallelTensorUnowned> typed_inputs;
int num_inputs = TFE_OpGetFlatInputCount(original_op, status);
if (TF_GetCode(status) != TF_OK) return;
typed_inputs.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
TFE_TensorHandle* input = TFE_OpGetFlatInput(original_op, i, status);
if (TF_GetCode(status) != TF_OK) return;
const char* tensor_handle_device =
TFE_TensorHandleDeviceName(input, status);
if (TF_GetCode(status) != TF_OK) return;
if (named_device->name() == tensor_handle_device) {
typed_inputs.emplace_back(reinterpret_cast<ParallelTensor*>(
TFE_TensorHandleDevicePointer(input, status)));
if (TF_GetCode(status) != TF_OK) return;
} else {
typed_inputs.emplace_back(input);
}
}
absl::optional<std::vector<MaybeParallelTensorOwned>> maybe_typed_outputs(
ExecuteWithSpecialOps(named_device->device(), named_device->name(),
context, std::move(typed_inputs), operation_name,
attributes, *num_outputs, status));
if (TF_GetCode(status) != TF_OK) return;
if (!maybe_typed_outputs.has_value()) {
TF_SetStatus(status, TF_INTERNAL, "OK status but no value was returned.");
return;
}
std::vector<MaybeParallelTensorOwned> typed_outputs(
std::move(maybe_typed_outputs.value()));
if (typed_outputs.size() > *num_outputs) {
TF_SetStatus(status, TF_INTERNAL,
"The allocated output buffer was too small.");
return;
}
for (int i = 0; i < typed_outputs.size(); ++i) {
MaybeParallelTensorOwned typed_output(std::move(typed_outputs[i]));
if (absl::holds_alternative<TensorHandlePtr>(typed_output)) {
outputs[i] = absl::get<TensorHandlePtr>(typed_output).release();
} else {
outputs[i] = ParallelTensorToTensorHandle(
named_device->name(), context,
std::move(absl::get<std::unique_ptr<ParallelTensor>>(
typed_output)),
status)
.release();
if (TF_GetCode(status) != TF_OK) return;
}
}
*num_outputs = typed_outputs.size();
}
void DeleteParallelDevice(void* device_info) {
delete reinterpret_cast<NamedParallelDevice*>(device_info);
}
}
void AllocateParallelDevice(const char* device_name,
const char* const* underlying_devices,
int num_underlying_devices,
TFE_CustomDevice* device, void** device_info) {
device->copy_tensor_to_device = &CopyToParallelDevice;
device->copy_tensor_from_device = &CopyTensorFromParallelDevice;
device->delete_device = &DeleteParallelDevice;
device->execute = &ParallelDeviceExecute;
std::vector<std::string> underlying_devices_vector;
underlying_devices_vector.reserve(num_underlying_devices);
for (int device_index = 0; device_index < num_underlying_devices;
++device_index) {
underlying_devices_vector.push_back(underlying_devices[device_index]);
}
std::unique_ptr<ParallelDevice> parallel_device(
new ParallelDevice(underlying_devices_vector));
*device_info =
new NamedParallelDevice{device_name, std::move(parallel_device)};
}
}
} | #include <array>
#include <gmock/gmock.h>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_experimental.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_lib.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_testlib.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_internal.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace parallel_device {
using ::testing::HasSubstr;
TEST(PARALLEL_DEVICE, TestBasicCPU) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
BasicTestsForTwoDevices(context.get(),
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1");
}
TEST(PARALLEL_DEVICE, TestBasicCPUAliased) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
BasicTestsForTwoDevices(context.get(),
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:0");
}
TEST(PARALLEL_DEVICE, TestBasicTPUAliased) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<TF_DeviceList, decltype(&TF_DeleteDeviceList)> devices(
TFE_ContextListDevices(context.get(), status.get()), TF_DeleteDeviceList);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
bool has_tpu = false;
for (int device_index = 0; device_index < TF_DeviceListCount(devices.get());
++device_index) {
std::string device_type =
TF_DeviceListType(devices.get(), device_index, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
if (device_type == "TPU") {
has_tpu = true;
break;
}
}
if (has_tpu) {
BasicTestsForTwoDevices(context.get(),
"/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:TPU:0");
}
}
TEST(PARALLEL_DEVICE, TestExplicitCopies) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
const char* first_device_name =
"/job:localhost/replica:0/task:0/device:CPU:0";
const char* second_device_name =
"/job:localhost/replica:0/task:0/device:CPU:1";
std::array<const char*, 2> underlying_devices{first_device_name,
second_device_name};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr cpu_value(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr failed_copy_on_result(TFE_TensorHandleCopyToDevice(
cpu_value.get(), context.get(), device_name, status.get()));
EXPECT_EQ(TF_GetCode(status.get()), TF_UNIMPLEMENTED);
std::array<TFE_TensorHandle*, 2> components{cpu_value.get(), cpu_value.get()};
TensorHandlePtr device_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr copy_off(TFE_TensorHandleCopyToDevice(
device_value.get(), context.get(), first_device_name, status.get()));
EXPECT_EQ(TF_GetCode(status.get()), TF_UNIMPLEMENTED);
}
TEST(PARALLEL_DEVICE, TestDifferentShapes) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::vector<float> size_two_value{1., 2.};
std::vector<float> size_three_value{1., 2., 3.};
TensorHandlePtr size_two(
VectorFloatTensorHandle(size_two_value, status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr size_three(
VectorFloatTensorHandle(size_three_value, status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{size_two.get(), size_three.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
int num_axes = TFE_TensorHandleNumDims(combined_value.get(), status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
EXPECT_EQ(num_axes, 1);
}
TEST(PARALLEL_DEVICE, TestNestedParallelDevices) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 3),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* first_device_name =
"/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> first_underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), first_device_name,
first_underlying_devices, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* second_device_name =
"/job:localhost/replica:0/task:0/device:CUSTOM:1";
std::array<const char*, 2> second_underlying_devices{
"/job:localhost/replica:0/task:0/device:CUSTOM:0",
"/job:localhost/replica:0/task:0/device:CPU:2"};
RegisterParallelDevice(context.get(), second_device_name,
second_underlying_devices, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(1., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(2., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()};
TensorHandlePtr first_combined_value = CreatePerDeviceValues(
context.get(), components, first_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_three(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
components[0] = first_combined_value.get();
components[1] = value_three.get();
TensorHandlePtr second_combined_value = CreatePerDeviceValues(
context.get(), components, second_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr negative_one_cpu(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
components[0] = negative_one_cpu.get();
components[1] = negative_one_cpu.get();
TensorHandlePtr first_negative_one = CreatePerDeviceValues(
context.get(), components, first_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
components[0] = first_negative_one.get();
components[1] = negative_one_cpu.get();
TensorHandlePtr second_negative_one = CreatePerDeviceValues(
context.get(), components, second_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr multiply_result(
Multiply(context.get(), second_combined_value.get(),
second_negative_one.get(), status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TensorHandlePtr, 2> second_components;
ExtractPerDeviceValues(context.get(), multiply_result.get(),
&second_components, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(second_components[1].get(), 9.);
std::string first_device = TFE_TensorHandleBackingDeviceName(
second_components[0].get(), status.get());
ASSERT_EQ(second_underlying_devices[0], first_device);
std::string second_device = TFE_TensorHandleBackingDeviceName(
second_components[1].get(), status.get());
ASSERT_EQ(second_underlying_devices[1], second_device);
std::array<TensorHandlePtr, 2> first_components;
ExtractPerDeviceValues(context.get(), second_components[0].get(),
&first_components, status.get());
ExpectScalarEq<float>(first_components[0].get(), 3.);
ExpectScalarEq<float>(first_components[1].get(), 6.);
first_device = TFE_TensorHandleBackingDeviceName(first_components[0].get(),
status.get());
ASSERT_EQ(first_underlying_devices[0], first_device);
second_device = TFE_TensorHandleBackingDeviceName(first_components[1].get(),
status.get());
ASSERT_EQ(first_underlying_devices[1], second_device);
}
TEST(PARALLEL_DEVICE, TestInvalidPacking) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 1> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(1., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(2., status.get()));
{
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(),
value_two.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
{
std::array<TFE_TensorHandle*, 1> correct_components{value_one.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), correct_components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TensorHandlePtr, 2> incorrect_components;
ExtractPerDeviceValues(context.get(), combined_value.get(),
&incorrect_components, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
{
std::array<TFE_TensorHandle*, 1> correct_components{value_one.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), correct_components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 1> incorrect_components{combined_value.get()};
TensorHandlePtr recombined_value = CreatePerDeviceValues(
context.get(), incorrect_components, device_name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
{
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context.get(), "TPUReplicatedOutput", status.get()),
TFE_DeleteOp);
if (TF_GetCode(status.get()) != TF_OK) return;
TFE_OpSetAttrInt(op.get(), "num_replicas", 1);
TFE_OpAddInput(op.get(), value_one.get(), status.get());
if (TF_GetCode(status.get()) != TF_OK) return;
TFE_OpSetDevice(op.get(), device_name, status.get());
if (TF_GetCode(status.get()) != TF_OK) return;
TFE_TensorHandle* result_handles;
int num_retvals = 1;
TFE_Execute(op.get(), &result_handles, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
}
TensorHandlePtr CollectiveSum(TFE_Context* context, TFE_TensorHandle* input,
int group_size, TF_Status* status) {
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context, "CollectiveReduce", status), TFE_DeleteOp);
if (TF_GetCode(status) != TF_OK) return nullptr;
const char* device = TFE_TensorHandleDeviceName(input, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_OpSetDevice(op.get(), device, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_OpSetAttrType(op.get(), "T", TFE_TensorHandleDataType(input));
TFE_OpSetAttrInt(op.get(), "group_size", group_size);
TFE_OpSetAttrInt(op.get(), "group_key", 0);
TFE_OpSetAttrInt(op.get(), "instance_key", 0);
const std::string merge_op("Add");
TFE_OpSetAttrString(op.get(), "merge_op", merge_op.c_str(),
merge_op.length());
const std::string final_op("Id");
TFE_OpSetAttrString(op.get(), "final_op", final_op.c_str(),
final_op.length());
TFE_OpSetAttrIntList(op.get(), "subdiv_offsets", nullptr, 0);
TFE_OpAddInput(op.get(), input, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_TensorHandle* result_handle;
int num_retvals = 1;
TFE_Execute(op.get(), &result_handle, &num_retvals, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
return TensorHandlePtr(result_handle);
}
void TestCollective(bool async) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
TFE_ContextOptionsSetAsync(opts.get(), async);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(1., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(2., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()};
TensorHandlePtr parallel_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr reduced(
CollectiveSum(context.get(), parallel_value.get(), 2, status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TensorHandlePtr, 2> result_components;
ExtractPerDeviceValues(context.get(), reduced.get(), &result_components,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(result_components[0].get(), 3.);
ExpectScalarEq<float>(result_components[1].get(), 3.);
}
TEST(PARALLEL_DEVICE, TestCollectiveSync) { TestCollective(false); }
TEST(PARALLEL_DEVICE, TestCollectiveAsync) { TestCollective(true); }
void RegisterCollectiveMulFunction(TFE_Context* context,
const char* function_name, int group_size,
TF_Status* status) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> body(TF_NewGraph(),
TF_DeleteGraph);
TF_OperationDescription* placeholder_desc =
TF_NewOperation(body.get(), "Placeholder", "Placeholder");
TF_SetAttrType(placeholder_desc, "dtype", TF_FLOAT);
TF_Operation* placeholder_op = TF_FinishOperation(placeholder_desc, status);
if (TF_GetCode(status) != TF_OK) return;
TF_Output x{placeholder_op, 0};
TF_OperationDescription* reduce_desc =
TF_NewOperation(body.get(), "CollectiveReduce", "CollectiveReduce");
TF_SetAttrType(reduce_desc, "T", TF_FLOAT);
TF_SetAttrInt(reduce_desc, "group_size", group_size);
TF_SetAttrInt(reduce_desc, "group_key", 0);
TF_SetAttrInt(reduce_desc, "instance_key", 0);
const std::string merge_op("Mul");
TF_SetAttrString(reduce_desc, "merge_op", merge_op.c_str(),
merge_op.length());
const std::string final_op("Id");
TF_SetAttrString(reduce_desc, "final_op", final_op.c_str(),
final_op.length());
TF_SetAttrIntList(reduce_desc, "subdiv_offsets", nullptr, 0);
TF_AddInput(reduce_desc, x);
TF_Operation* reduce_op = TF_FinishOperation(reduce_desc, status);
if (TF_GetCode(status) != TF_OK) return;
TF_Operation* operations[]{placeholder_op, reduce_op};
TF_Output y{reduce_op, 0};
const char* output_name = "y";
std::unique_ptr<TF_Function, decltype(&TF_DeleteFunction)> function(
TF_GraphToFunction(
body.get(), function_name,
0, 2,
operations, 1, &x,
1, &y, &output_name,
nullptr, "", status),
TF_DeleteFunction);
if (TF_GetCode(status) != TF_OK) return;
TFE_ContextAddFunction(context, function.get(), status);
}
TEST(PARALLEL_DEVICE, TestFunction) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* function_name = "test_reduce_mul";
RegisterCollectiveMulFunction(context.get(), function_name, 2, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(7., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(9., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()};
TensorHandlePtr parallel_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context.get(), function_name, status.get()), TFE_DeleteOp);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_OpSetDevice(op.get(), device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_OpAddInput(op.get(), parallel_value.get(), status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_TensorHandle* raw_result_handle;
int num_retvals = 1;
TFE_Execute(op.get(), &raw_result_handle, &num_retvals, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr reduced(raw_result_handle);
std::array<TensorHandlePtr, 2> result_components;
ExtractPerDeviceValues(context.get(), reduced.get(), &result_components,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(result_components[0].get(), 7. * 9.);
ExpectScalarEq<float>(result_components[1].get(), 7. * 9.);
std::string first_device = TFE_TensorHandleBackingDeviceName(
result_components[0].get(), status.get());
ASSERT_EQ(underlying_devices[0], first_device);
std::string second_device = TFE_TensorHandleBackingDeviceName(
result_components[1].get(), status.get());
ASSERT_EQ(underlying_devices[1], second_device);
}
TEST(PARALLEL_DEVICE, TestSummaryString) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr cpu_value(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{cpu_value.get(), cpu_value.get()};
TensorHandlePtr device_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ImmediateExecutionTensorHandle* unwrapped_handle =
tensorflow::unwrap(device_value.get());
std::string summarized;
TF_ASSERT_OK(unwrapped_handle->SummarizeValue(summarized));
EXPECT_THAT(summarized, HasSubstr("\"CPU:0\": 3"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/parallel_device/parallel_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/parallel_device/parallel_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e5ee644c-4890-444d-a6c5-e13e6ac379f3 | cpp | tensorflow/tensorflow | parallel_device_lib | tensorflow/c/eager/parallel_device/parallel_device_lib.cc | tensorflow/c/eager/parallel_device/parallel_device_lib_test.cc | #include "tensorflow/c/eager/parallel_device/parallel_device_lib.h"
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/eager/tfe_cancellation_manager_internal.h"
#include "tensorflow/c/eager/tfe_op_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace parallel_device {
namespace {
class OpDeleter {
public:
void operator()(TFE_Op* to_delete) const { TFE_DeleteOp(to_delete); }
};
using OpPtr = std::unique_ptr<TFE_Op, OpDeleter>;
class StatusDeleter {
public:
void operator()(TF_Status* to_delete) const { TF_DeleteStatus(to_delete); }
};
using StatusPtr = std::unique_ptr<TF_Status, StatusDeleter>;
class ExecutorDeleter {
public:
void operator()(TFE_Executor* to_delete) const {
TFE_DeleteExecutor(to_delete);
}
};
using ExecutorPtr = std::unique_ptr<TFE_Executor, ExecutorDeleter>;
}
class DeviceThread {
public:
explicit DeviceThread(const std::string& device, const bool is_async,
const int in_flight_nodes_limit)
: status_(TF_NewStatus()),
device_(device),
executor_(
TFE_NewExecutor(is_async, true,
in_flight_nodes_limit)),
op_(nullptr),
thread_(tensorflow::Env::Default()->StartThread(
tensorflow::ThreadOptions(), "parallel_device_execute",
std::bind(&DeviceThread::Run, this))) {}
~DeviceThread();
void StartExecute(TFE_Context* context, const char* operation_name,
std::vector<TFE_TensorHandle*> inputs,
const TFE_OpAttrs* attributes, int expected_max_outputs,
CancellationManager& cancellation_manager,
absl::optional<int64_t> step_id = absl::nullopt);
std::vector<TensorHandlePtr> Join(TF_Status* status);
void AsyncWait(TF_Status* status);
private:
void Run();
void Execute(TFE_Context* context, const char* operation_name,
std::vector<TFE_TensorHandle*> inputs,
const TFE_OpAttrs* attributes, int expected_max_outputs,
std::vector<TensorHandlePtr>* outputs, TF_Status* status) const
TF_EXCLUSIVE_LOCKS_REQUIRED(execution_mutex_);
enum class ExecutionState {
kReadyToExecute,
kHasResult,
kIdle,
kShuttingDown,
};
tensorflow::mutex execution_mutex_;
ExecutionState execution_state_ TF_GUARDED_BY(execution_mutex_) =
ExecutionState::kIdle;
tensorflow::condition_variable start_execute_;
tensorflow::condition_variable finished_execute_;
tensorflow::condition_variable finished_join_;
TFE_Context* context_ TF_GUARDED_BY(execution_mutex_);
const char* operation_name_ TF_GUARDED_BY(execution_mutex_);
absl::optional<int64_t> step_id_ TF_GUARDED_BY(execution_mutex_) =
absl::nullopt;
std::vector<TFE_TensorHandle*> op_inputs_ TF_GUARDED_BY(execution_mutex_);
const TFE_OpAttrs* attributes_ TF_GUARDED_BY(execution_mutex_);
int expected_max_outputs_ TF_GUARDED_BY(execution_mutex_);
CancellationManager* cancellation_manager_ TF_GUARDED_BY(execution_mutex_);
std::vector<TensorHandlePtr> op_outputs_ TF_GUARDED_BY(execution_mutex_);
StatusPtr status_ TF_GUARDED_BY(execution_mutex_);
const std::string device_;
ExecutorPtr executor_ TF_GUARDED_BY(execution_mutex_);
mutable OpPtr op_ TF_GUARDED_BY(execution_mutex_);
std::unique_ptr<Thread> thread_;
};
DeviceThread::~DeviceThread() {
{
tensorflow::mutex_lock l(execution_mutex_);
execution_state_ = ExecutionState::kShuttingDown;
}
start_execute_.notify_one();
}
void DeviceThread::AsyncWait(TF_Status* status) {
tensorflow::mutex_lock l(execution_mutex_);
TFE_ExecutorWaitForAllPendingNodes(executor_.get(), status);
TFE_ExecutorClearError(executor_.get());
}
void DeviceThread::Run() {
while (true) {
{
tensorflow::mutex_lock l(execution_mutex_);
while (execution_state_ == ExecutionState::kIdle ||
execution_state_ == ExecutionState::kHasResult) {
start_execute_.wait(l);
}
if (execution_state_ == ExecutionState::kShuttingDown) {
return;
} else if (execution_state_ == ExecutionState::kReadyToExecute) {
op_outputs_ = std::vector<TensorHandlePtr>();
Execute(context_, operation_name_, std::move(op_inputs_), attributes_,
expected_max_outputs_, &op_outputs_, status_.get());
execution_state_ = ExecutionState::kHasResult;
}
}
finished_execute_.notify_one();
}
}
void DeviceThread::StartExecute(TFE_Context* context,
const char* operation_name,
std::vector<TFE_TensorHandle*> inputs,
const TFE_OpAttrs* attributes,
int expected_max_outputs,
CancellationManager& cancellation_manager,
absl::optional<int64_t> step_id) {
{
tensorflow::mutex_lock l(execution_mutex_);
while (execution_state_ != ExecutionState::kIdle) {
finished_join_.wait(l);
}
context_ = context;
operation_name_ = operation_name;
step_id_ = step_id;
op_inputs_ = inputs;
attributes_ = attributes;
expected_max_outputs_ = expected_max_outputs;
cancellation_manager_ = &cancellation_manager;
execution_state_ = ExecutionState::kReadyToExecute;
}
start_execute_.notify_one();
}
std::vector<TensorHandlePtr> DeviceThread::Join(TF_Status* status) {
std::vector<TensorHandlePtr> result;
{
tensorflow::mutex_lock l(execution_mutex_);
while (execution_state_ != ExecutionState::kHasResult) {
finished_execute_.wait(l);
}
if (TF_GetCode(status_.get()) != TF_OK) {
TF_SetStatus(status, TF_GetCode(status_.get()),
TF_Message(status_.get()));
TF_SetStatus(status_.get(), TF_OK, "");
}
cancellation_manager_ = nullptr;
execution_state_ = ExecutionState::kIdle;
result = std::move(op_outputs_);
}
finished_join_.notify_one();
return result;
}
void DeviceThread::Execute(TFE_Context* context, const char* operation_name,
std::vector<TFE_TensorHandle*> inputs,
const TFE_OpAttrs* attributes,
int expected_max_outputs,
std::vector<TensorHandlePtr>* outputs,
TF_Status* status) const {
if (op_ == nullptr) {
TFE_ContextSetExecutorForThread(context, executor_.get());
op_.reset(TFE_NewOp(context, operation_name, status));
if (TF_GetCode(status) != TF_OK) return;
TFE_OpSetDevice(op_.get(), device_.c_str(), status);
if (TF_GetCode(status) != TF_OK) return;
} else {
TFE_OpReset(op_.get(), operation_name, device_.c_str(), status);
if (TF_GetCode(status) != TF_OK) return;
}
TFE_OpAddAttrs(op_.get(), attributes);
for (int input_index = 0; input_index < inputs.size(); ++input_index) {
TFE_OpAddInput(op_.get(), inputs[input_index], status);
if (TF_GetCode(status) != TF_OK) return;
}
std::vector<TFE_TensorHandle*> unwrapped_results(expected_max_outputs);
int real_num_outputs = expected_max_outputs;
TFE_OpSetCancellationManager(op_.get(), wrap(cancellation_manager_), status);
if (TF_GetCode(status) != TF_OK) return;
if (step_id_.has_value()) {
tensorflow::unwrap(op_.get())->SetStepId(step_id_.value());
}
TFE_Execute(op_.get(), unwrapped_results.data(), &real_num_outputs, status);
if (TF_GetCode(status) != TF_OK) {
cancellation_manager_->StartCancel();
return;
}
unwrapped_results.resize(real_num_outputs);
outputs->reserve(real_num_outputs);
for (TFE_TensorHandle* unwrapped_result : unwrapped_results) {
outputs->emplace_back(unwrapped_result);
}
}
ParallelDevice::ParallelDevice(const std::vector<std::string>& devices,
bool is_async, int in_flight_nodes_limit)
: underlying_devices_(devices),
default_cancellation_manager_(absl::make_unique<CancellationManager>()) {
device_threads_.reserve(devices.size());
for (int device_index = 0; device_index < devices.size(); ++device_index) {
device_threads_.emplace_back(new DeviceThread(
devices[device_index].c_str(), is_async, in_flight_nodes_limit));
}
}
ParallelDevice::~ParallelDevice() = default;
std::unique_ptr<ParallelTensor> ParallelDevice::CopyToParallelDevice(
TFE_Context* context, TFE_TensorHandle* tensor, TF_Status* status) const {
std::vector<TensorHandlePtr> components;
components.reserve(underlying_devices_.size());
for (const std::string& underlying_device_name : underlying_devices_) {
TFE_TensorHandle* t = TFE_TensorHandleCopyToDevice(
tensor, context, underlying_device_name.c_str(), status);
if (TF_GetCode(status) != TF_OK) return nullptr;
components.emplace_back(t);
}
return ParallelTensor::FromTensorHandles(*this, std::move(components),
status);
}
std::unique_ptr<ParallelTensor> ParallelDevice::DeviceIDs(
TFE_Context* context, TF_Status* status) const {
std::vector<int32_t> ids;
ids.reserve(num_underlying_devices());
for (int i = 0; i < num_underlying_devices(); ++i) {
ids.push_back(i);
}
return ScalarsFromSequence<int32_t>(ids, context, status);
}
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>>
ParallelDevice::Execute(TFE_Context* context,
const std::vector<ParallelTensor*>& inputs,
const char* operation_name,
const TFE_OpAttrs* attributes, int expected_max_outputs,
TF_Status* status) const {
std::vector<PartialTensorShape> expected_output_shapes(expected_max_outputs);
StartExecute(context, inputs, operation_name, attributes,
expected_max_outputs, *default_cancellation_manager_);
auto result = Join(expected_output_shapes, status);
if (TF_GetCode(status) != TF_OK) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> await_status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextAsyncWait(context, await_status.get());
default_cancellation_manager_ = absl::make_unique<CancellationManager>();
}
return result;
}
void ParallelDevice::StartExecute(TFE_Context* context,
const std::vector<ParallelTensor*>& inputs,
const char* operation_name,
const TFE_OpAttrs* attributes,
int expected_max_outputs,
CancellationManager& cancellation_manager,
absl::optional<int64_t> step_id) const {
for (int device_index = 0; device_index < underlying_devices_.size();
++device_index) {
DeviceThread* device_thread = device_threads_[device_index].get();
std::vector<TFE_TensorHandle*> device_inputs;
device_inputs.reserve(inputs.size());
for (int input_index = 0; input_index < inputs.size(); ++input_index) {
device_inputs.push_back(inputs[input_index]->tensor(device_index));
}
device_thread->StartExecute(
context, operation_name, std::move(device_inputs), attributes,
expected_max_outputs, cancellation_manager, step_id);
}
}
void ParallelDevice::StartExecute(
TFE_Context* context,
const std::vector<std::vector<TFE_TensorHandle*>>& inputs,
const char* operation_name, const TFE_OpAttrs* attributes,
int expected_max_outputs, CancellationManager& cancellation_manager,
absl::optional<int64_t> step_id) const {
for (int device_index = 0; device_index < underlying_devices_.size();
++device_index) {
DeviceThread* device_thread = device_threads_[device_index].get();
std::vector<TFE_TensorHandle*> device_inputs;
device_inputs.reserve(inputs.size());
for (int input_index = 0; input_index < inputs.size(); ++input_index) {
device_inputs.push_back(inputs[input_index][device_index]);
}
device_thread->StartExecute(
context, operation_name, std::move(device_inputs), attributes,
expected_max_outputs, cancellation_manager, step_id);
}
}
void ParallelDevice::AsyncWait(TFE_Context* context, TF_Status* status) const {
StatusPtr first_bad_status(nullptr);
for (const auto& dt : device_threads_) {
StatusPtr async_wait_status(TF_NewStatus());
dt->AsyncWait(async_wait_status.get());
if (TF_GetCode(async_wait_status.get()) != TF_OK &&
(first_bad_status == nullptr ||
TF_GetCode(first_bad_status.get()) == TF_CANCELLED)) {
first_bad_status.reset(TF_NewStatus());
TF_SetStatus(first_bad_status.get(), TF_GetCode(async_wait_status.get()),
TF_Message(async_wait_status.get()));
}
}
if (first_bad_status != nullptr) {
TF_SetStatus(status, TF_GetCode(first_bad_status.get()),
TF_Message(first_bad_status.get()));
}
}
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>>
ParallelDevice::Join(
const std::vector<PartialTensorShape>& expected_output_shapes,
TF_Status* status) const {
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>> result;
std::vector<std::vector<TensorHandlePtr>> per_device_output_tensors;
per_device_output_tensors.reserve(underlying_devices_.size());
int first_op_output_count = 0;
StatusPtr first_bad_status(nullptr);
for (int device_index = 0; device_index < underlying_devices_.size();
++device_index) {
DeviceThread* device_thread = device_threads_[device_index].get();
per_device_output_tensors.push_back(device_thread->Join(status));
if (TF_GetCode(status) != TF_OK &&
(first_bad_status == nullptr
|| TF_GetCode(first_bad_status.get()) == TF_CANCELLED)) {
first_bad_status.reset(TF_NewStatus());
TF_SetStatus(first_bad_status.get(), TF_GetCode(status),
TF_Message(status));
}
if (device_index == 0) {
first_op_output_count = per_device_output_tensors.rbegin()->size();
} else {
if (first_bad_status == nullptr &&
per_device_output_tensors.rbegin()->size() != first_op_output_count) {
first_bad_status.reset(TF_NewStatus());
TF_SetStatus(first_bad_status.get(), TF_INTERNAL,
"Parallel ops produced different numbers of tensors.");
}
}
}
if (first_bad_status != nullptr) {
TF_SetStatus(status, TF_GetCode(first_bad_status.get()),
TF_Message(first_bad_status.get()));
return result;
}
std::vector<std::unique_ptr<ParallelTensor>> per_device_outputs;
per_device_outputs.reserve(first_op_output_count);
for (int i = 0; i < first_op_output_count; ++i) {
std::vector<TensorHandlePtr> components;
components.reserve(underlying_devices_.size());
for (int j = 0; j < underlying_devices_.size(); ++j) {
components.push_back(std::move(per_device_output_tensors[j][i]));
}
if (expected_output_shapes[i].IsFullyDefined()) {
per_device_outputs.push_back(ParallelTensor::FromTensorHandles(
*this, std::move(components),
absl::Span<const int64_t>(expected_output_shapes[i].dim_sizes()),
status));
} else {
per_device_outputs.push_back(ParallelTensor::FromTensorHandles(
*this, std::move(components), status));
}
if (TF_GetCode(status) != TF_OK) return result;
}
result.emplace(std::move(per_device_outputs));
return result;
}
std::vector<std::string> ParallelDevice::SummarizeDeviceNames() const {
std::vector<DeviceNameUtils::ParsedName> parsed_components(
underlying_devices_.size());
for (int component_index = 0; component_index < underlying_devices_.size();
++component_index) {
if (!DeviceNameUtils::ParseFullName(underlying_devices_[component_index],
&parsed_components[component_index]) ||
!DeviceNameUtils::IsSameAddressSpace(
underlying_devices_[component_index], underlying_devices_[0])) {
return underlying_devices_;
}
}
std::vector<std::string> local_names;
local_names.reserve(underlying_devices_.size());
for (const DeviceNameUtils::ParsedName& parsed_component :
parsed_components) {
local_names.push_back(
absl::StrCat(parsed_component.type, ":", parsed_component.id));
}
return local_names;
}
std::unique_ptr<ParallelTensor> ParallelTensor::FromTensorHandles(
const ParallelDevice& parallel_device,
std::vector<TensorHandlePtr> components, absl::Span<const int64_t> shape,
TF_Status* status) {
if (components.empty()) {
TF_SetStatus(status, TF_INTERNAL,
"No components are provide for creating a ParallelTensor");
return nullptr;
}
TFE_TensorHandleGetStatus(components[0].get(), status);
if (!status->status.ok()) {
return nullptr;
}
TF_DataType dtype = TFE_TensorHandleDataType(components[0].get());
for (TensorHandlePtr& component : components) {
TFE_TensorHandleGetStatus(component.get(), status);
if (!status->status.ok()) {
return nullptr;
}
if (TFE_TensorHandleDataType(component.get()) != dtype) {
TF_SetStatus(status, TF_INTERNAL,
"Components of a ParallelTensor must all have "
"the same dtype");
return nullptr;
}
}
return std::unique_ptr<ParallelTensor>(
new ParallelTensor(parallel_device, std::move(components), shape, dtype));
}
std::unique_ptr<ParallelTensor> ParallelTensor::FromTensorHandles(
const ParallelDevice& parallel_device,
std::vector<TensorHandlePtr> components, TF_Status* status) {
if (components.empty()) {
TF_SetStatus(status, TF_INTERNAL,
"No components are provided for creating a ParallelTensor");
return nullptr;
}
TFE_TensorHandleGetStatus(components[0].get(), status);
if (!status->status.ok()) {
return nullptr;
}
TF_DataType dtype = TFE_TensorHandleDataType(components[0].get());
for (TensorHandlePtr& component : components) {
TFE_TensorHandleGetStatus(component.get(), status);
if (!status->status.ok()) {
return nullptr;
}
if (TFE_TensorHandleDataType(component.get()) != dtype) {
TF_SetStatus(status, TF_INTERNAL,
"Components of a ParallelTensor must all have "
"the same dtype");
return nullptr;
}
}
return std::unique_ptr<ParallelTensor>(
new ParallelTensor(parallel_device, std::move(components), dtype));
}
Status ParallelTensor::Shape(const std::vector<int64_t>** shape) const {
if (!shape_.has_value()) {
TF_Status status;
PartialTensorShape combined_shape;
TF_RETURN_IF_ERROR(unwrap(tensors_[0].get())->Shape(&combined_shape));
for (const TensorHandlePtr& component : tensors_) {
PartialTensorShape component_shape;
TF_RETURN_IF_ERROR(unwrap(component.get())->Shape(&component_shape));
if (combined_shape.dims() < 0 ||
combined_shape.dims() != component_shape.dims()) {
PartialTensorShape first_shape;
TF_RETURN_IF_ERROR(unwrap(tensors_[0].get())->Shape(&first_shape));
return errors::Unimplemented(absl::StrCat(
"Computing the shape of a ParallelTensor when the components do "
"not all have the same rank is not supported. One tensor had "
"shape ",
first_shape.DebugString(), " and another had shape ",
component_shape.DebugString()));
} else {
for (int axis_index = 0; axis_index < combined_shape.dims();
++axis_index) {
int64_t axis_length = combined_shape.dim_size(axis_index);
if (axis_length != component_shape.dim_size(axis_index)) {
axis_length = -1;
}
TF_RETURN_IF_ERROR(
combined_shape.SetDimWithStatus(axis_index, axis_length));
}
}
}
auto dim_sizes = combined_shape.dim_sizes();
shape_ = std::vector<int64_t>(dim_sizes.begin(), dim_sizes.end());
}
*shape = &*shape_;
return absl::OkStatus();
}
Status ParallelTensor::SummarizeValue(std::string& summary) {
summary = "{";
std::vector<std::string> summarized_devices = device_.SummarizeDeviceNames();
for (int component_index = 0; component_index < tensors_.size();
++component_index) {
ImmediateExecutionTensorHandle* component =
tensorflow::unwrap(tensors_[component_index].get());
std::string component_summary;
TF_RETURN_IF_ERROR(component->SummarizeValue(component_summary));
absl::StrAppend(&summary, component_index == 0 ? "" : ", ", "\"",
summarized_devices[component_index],
"\": ", component_summary);
}
summary += "}";
return absl::OkStatus();
}
}
} | #include "tensorflow/c/eager/parallel_device/parallel_device_lib.h"
#include <gmock/gmock.h>
#include "tensorflow/c/c_api_experimental.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_testlib.h"
#include "tensorflow/c/eager/tfe_context_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace parallel_device {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
TEST(PARALLEL_DEVICE_LIB, TestOpWithError) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true,
2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(std::move(devices));
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> handle_op(
TFE_NewOp(context.get(), "VarHandleOp", status.get()), TFE_DeleteOp);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpSetAttrType(handle_op.get(), "dtype", TF_FLOAT);
TFE_OpSetAttrShape(handle_op.get(), "shape", nullptr, 0,
status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
auto outputs =
parallel_device.Execute(context.get(), std::vector<ParallelTensor*>(),
"VarHandleOp", TFE_OpGetAttrs(handle_op.get()),
1, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
const std::vector<std::unique_ptr<ParallelTensor>>& handles = *outputs;
std::vector<ParallelTensor*> handle_inputs;
handle_inputs.reserve(handles.size());
for (auto& handle : handles) {
handle_inputs.push_back(handle.get());
}
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> read_op(
TFE_NewOp(context.get(), "ReadVariableOp", status.get()), TFE_DeleteOp);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpSetAttrType(read_op.get(), "dtype", TF_FLOAT);
parallel_device.Execute(context.get(), handle_inputs, "ReadVariableOp",
TFE_OpGetAttrs(read_op.get()),
1, status.get());
ASSERT_FALSE(TF_GetCode(status.get()) == TF_OK);
TF_SetStatus(status.get(), TF_OK, "");
parallel_device.Execute(context.get(), std::vector<ParallelTensor*>(),
"VarHandleOp", TFE_OpGetAttrs(handle_op.get()),
1, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
}
TEST(PARALLEL_DEVICE_LIB, TestExplicitOutputShape) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true,
2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(std::move(devices));
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> handle_op(
TFE_NewOp(context.get(), "VarHandleOp", status.get()), TFE_DeleteOp);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpSetAttrType(handle_op.get(), "dtype", TF_FLOAT);
TFE_OpSetAttrShape(handle_op.get(), "shape", nullptr, 0,
status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
CancellationManager cancellation_manager;
parallel_device.StartExecute(context.get(), std::vector<ParallelTensor*>(),
"VarHandleOp", TFE_OpGetAttrs(handle_op.get()),
1,
cancellation_manager);
auto outputs = parallel_device.Join(
{PartialTensorShape({})}, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
const std::vector<std::unique_ptr<ParallelTensor>>& handles = *outputs;
const std::vector<int64_t>* shape;
Status s = handles[0]->Shape(&shape);
ASSERT_TRUE(s.ok());
EXPECT_EQ(0, shape->size());
}
TEST(PARALLEL_DEVICE_LIB, TestCancelOnError) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(devices);
const FunctionDef assert_and_collective = FunctionDefHelper::Define(
"AssertAndCollective",
{"x: float", "condition: bool"},
{"y: float"},
{},
{
{{"assert"},
"Assert",
{"condition", "x"},
{{"T", std::vector<DataType>{DT_FLOAT}}}},
{{"y"},
"CollectiveReduce",
{"x"},
{{"T", DT_FLOAT},
{"group_size", static_cast<int>(devices.size())},
{"group_key", 0},
{"instance_key", 0},
{"merge_op", "Add"},
{"final_op", "Id"},
{"subdiv_offsets", std::vector<int>()}},
{"assert"}},
});
TF_ASSERT_OK(ContextFromInterface(unwrap(context.get()))
->AddFunctionDef(assert_and_collective));
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> call_op(
TFE_NewOp(context.get(), "AssertAndCollective", status.get()),
TFE_DeleteOp);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<ParallelTensor> reduced_values =
parallel_device.ScalarsFromSequence<float>({1.0, 2.0}, context.get(),
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<ParallelTensor> run_collective =
parallel_device.ScalarsFromSequence<bool>({true, true}, context.get(),
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
auto outputs = parallel_device.Execute(
context.get(), {reduced_values.get(), run_collective.get()},
"AssertAndCollective", TFE_OpGetAttrs(call_op.get()),
1, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ASSERT_EQ(outputs->size(), 1);
ParallelTensor* parallel_result = (*outputs)[0].get();
ExpectScalarEq<float>(parallel_result->tensor(0), 3.);
ExpectScalarEq<float>(parallel_result->tensor(1), 3.);
run_collective = parallel_device.ScalarsFromSequence<bool>(
{true, false}, context.get(), status.get());
parallel_device.Execute(context.get(),
{reduced_values.get(), run_collective.get()},
"AssertAndCollective", TFE_OpGetAttrs(call_op.get()),
1, status.get());
EXPECT_NE(TF_GetCode(status.get()), TF_CANCELLED);
EXPECT_EQ(TF_GetCode(status.get()), TF_INVALID_ARGUMENT);
EXPECT_THAT(TF_Message(status.get()), HasSubstr("assertion failed"));
}
TEST(PARALLEL_DEVICE_LIB, TestDifferentShapes) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true,
2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(std::move(devices));
TensorHandlePtr two_vector = VectorFloatTensorHandle({3., 4.}, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TensorHandlePtr three_vector =
VectorFloatTensorHandle({5., 6., 7.}, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<TensorHandlePtr> vector_handles;
vector_handles.reserve(2);
vector_handles.push_back(std::move(two_vector));
vector_handles.push_back(std::move(three_vector));
std::unique_ptr<ParallelTensor> unknown_length_vector =
ParallelTensor::FromTensorHandles(
parallel_device, std::move(vector_handles), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
const std::vector<int64_t>* shape;
TF_ASSERT_OK(unknown_length_vector->Shape(&shape));
EXPECT_THAT(*shape, ElementsAre(-1));
TensorHandlePtr scalar = FloatTensorHandle(2., status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
two_vector = VectorFloatTensorHandle({3., 4.}, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<TensorHandlePtr> mixed_handles;
mixed_handles.reserve(2);
mixed_handles.push_back(std::move(scalar));
mixed_handles.push_back(std::move(two_vector));
std::unique_ptr<ParallelTensor> unknown_dims_vector =
ParallelTensor::FromTensorHandles(parallel_device,
std::move(mixed_handles), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TF_ASSERT_OK(unknown_length_vector->Shape(&shape));
EXPECT_THAT(*shape, ElementsAre(-1));
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> size_op(
TFE_NewOp(context.get(), "Size", status.get()), TFE_DeleteOp);
auto result = parallel_device.Execute(
context.get(), {unknown_dims_vector.get()}, "Size",
TFE_OpGetAttrs(size_op.get()), 1, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TF_ASSERT_OK((*result)[0]->Shape(&shape));
EXPECT_EQ(0, shape->size());
}
TEST(PARALLEL_DEVICE_LIB, TestScalarsFromSequence) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(std::move(devices));
{
std::unique_ptr<ParallelTensor> float_tensors =
parallel_device.ScalarsFromSequence<float>({10.0, 11.0}, context.get(),
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(float_tensors->tensor(0), 10.0);
ExpectScalarEq<float>(float_tensors->tensor(1), 11.0);
}
{
std::unique_ptr<ParallelTensor> int_tensors =
parallel_device.ScalarsFromSequence<int>({5, 6}, context.get(),
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<int>(int_tensors->tensor(0), 5);
ExpectScalarEq<int>(int_tensors->tensor(1), 6);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/parallel_device/parallel_device_lib.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/parallel_device/parallel_device_lib_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f10e95fd-52ce-4a9d-8f4d-b91e24d2c4f9 | cpp | tensorflow/tensorflow | trt_convert_api | tensorflow/compiler/tf2tensorrt/trt_convert_api.cc | tensorflow/compiler/tf2tensorrt/trt_convert_api_test.cc | #include "tensorflow/compiler/tf2tensorrt/trt_convert_api.h"
#include <iostream>
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/cc/tools/freeze_saved_model.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/grappler_item_builder.h"
#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace {
Status NewCluster(grappler::Cluster** cluster) {
int num_cpu_cores = grappler::GetNumAvailableLogicalCPUCores();
int num_gpus = grappler::GetNumAvailableGPUs();
int timeout_s = 60 * 10;
*cluster = new grappler::SingleMachine(timeout_s, num_cpu_cores, num_gpus);
(*cluster)->DisableDetailedStats(true);
(*cluster)->AllowSoftPlacement(true);
(*cluster)->SetNumWarmupSteps(10);
TF_RETURN_IF_ERROR((*cluster)->Provision());
return OkStatus();
}
Status RunGrappler(const MetaGraphDef& meta_graph_def,
const std::vector<std::string>& input_names,
const std::vector<std::string>& output_names,
const ConfigProto& config_proto, grappler::Cluster* cluster,
GraphDef* out_graph_def) {
grappler::ItemConfig item_config;
for (const string& name : input_names) {
item_config.feed_nodes.insert(name);
}
for (const string& name : output_names) {
item_config.fetch_nodes.insert(name);
}
std::unique_ptr<grappler::GrapplerItem> item =
grappler::GrapplerItemFromMetaGraphDef("tf_graph", meta_graph_def,
item_config);
if (!item) {
return tensorflow::errors::Internal(
"Failed to create grappler item from MetaGraphDef.");
}
tensorflow::DeviceBase* cpu_device = nullptr;
TF_RETURN_IF_ERROR(grappler::RunMetaOptimizer(
std::move(*item), config_proto, cpu_device, cluster, out_graph_def));
VLOG(2) << "Grappler finished\n";
return OkStatus();
}
Status ImportGraphDefToSession(Session* session, const GraphDef& graph_def,
const string& prefix) {
ImportGraphDefOptions opts;
opts.prefix = prefix;
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef(opts, graph_def, &graph, nullptr));
GraphDef new_graph_def;
graph.ToGraphDef(&new_graph_def);
TF_RETURN_IF_ERROR(session->Extend(new_graph_def));
return OkStatus();
}
Status GetTrtRewriterConfig(const TfTrtConversionParams& params,
const GraphDef& frozen_graph_def,
RewriterConfig* opt_config) {
opt_config->set_meta_optimizer_iterations(tensorflow::RewriterConfig::ONE);
opt_config->set_min_graph_nodes(-1);
opt_config->set_remapping(RewriterConfig_Toggle::RewriterConfig_Toggle_OFF);
opt_config->set_experimental_disable_folding_quantization_emulation(
IS_TRT_VERSION_GE(8, 0, 0, 0));
opt_config->add_optimizers("function");
opt_config->add_optimizers("constfold");
opt_config->add_optimizers("layout");
opt_config->add_optimizers("constfold");
auto trt_optimizer = opt_config->add_custom_optimizers();
trt_optimizer->set_name("TensorRTOptimizer");
auto trt_parameter_map = trt_optimizer->mutable_parameter_map();
(*trt_parameter_map)["is_dynamic_op"].set_b(true);
(*trt_parameter_map)["minimum_segment_size"].set_i(
params.minimum_segment_size);
string prec_string;
TF_RETURN_IF_ERROR(
TrtPrecisionModeToName(params.precision_mode, &prec_string));
(*trt_parameter_map)["precision_mode"].set_s(prec_string);
(*trt_parameter_map)["max_batch_size"].set_i(1);
(*trt_parameter_map)["max_workspace_size_bytes"].set_i(
params.max_workspace_size_bytes);
(*trt_parameter_map)["max_cached_engines"].set_i(params.max_cached_engines);
(*trt_parameter_map)["use_calibration"].set_b(params.use_calibration);
(*trt_parameter_map)["profile_strategy"].set_s(
ProfileStrategyToName(params.profile_strategy));
(*trt_parameter_map)["use_implicit_batch"].set_b(!params.use_dynamic_shape);
(*trt_parameter_map)["_allow_build_at_runtime"].set_b(
params.allow_build_at_runtime);
return OkStatus();
}
Status RunTfTrt(const MetaGraphDef& meta_graph_def,
const std::vector<std::string>& input_names,
const std::vector<std::string>& output_names,
const RewriterConfig& rewriter_config,
GraphDef* segmented_graph_def) {
ConfigProto config_proto;
*config_proto.mutable_graph_options()->mutable_rewrite_options() =
rewriter_config;
VLOG(4) << "Setting up Grappler parameters\n" << config_proto.DebugString();
std::unique_ptr<grappler::Cluster> cluster;
grappler::Cluster* p_cluster;
mutex mu_cluster;
mutex_lock lock(mu_cluster);
TF_RETURN_IF_ERROR(NewCluster(&p_cluster));
cluster.reset(p_cluster);
TF_RETURN_IF_ERROR(RunGrappler(meta_graph_def, input_names, output_names,
config_proto, cluster.get(),
segmented_graph_def));
TF_RETURN_IF_ERROR(cluster->Shutdown());
return OkStatus();
}
Status SetProfileGenerationMode(GraphDef* graph_def, bool mode) {
VLOG(3) << "Setting _profile_generation_mode=" << mode;
std::string op{"TRTEngineOp"};
for (auto& node : *(graph_def->mutable_node())) {
if (!op.compare(node.op())) {
auto* attr = node.mutable_attr();
AttrValue profile_generation_mode;
profile_generation_mode.set_b(mode);
(*attr)["_profile_generation_mode"] = profile_generation_mode;
}
}
return OkStatus();
}
Status RunSession(Session* session, const std::vector<std::string>& input_names,
const std::vector<std::string>& output_names,
const std::vector<Tensor>& input_tensors,
string prefix = "") {
TRT_ENSURE(!input_names.empty());
TRT_ENSURE(!output_names.empty());
TRT_ENSURE(!input_tensors.empty());
std::vector<std::pair<std::string, tensorflow::Tensor>> input_pairs;
std::vector<std::string> prefixed_output_names;
auto prefixed_name = [](std::string prefix, std::string name) {
return !prefix.empty() ? absl::StrJoin({prefix, name}, "/") : name;
};
for (int i = 0; i < input_names.size(); i++) {
input_pairs.push_back(
{prefixed_name(prefix, input_names.at(i)), input_tensors.at(i)});
}
for (int i = 0; i < output_names.size(); i++) {
prefixed_output_names.push_back(prefixed_name(prefix, output_names.at(i)));
}
std::vector<tensorflow::Tensor> output_tensors;
for (int i = 0; i < output_names.size(); i++) {
output_tensors.push_back({});
}
VLOG(3) << "TF-TRT Build mode: running inference\n";
TF_RETURN_IF_ERROR(
session->Run(input_pairs, prefixed_output_names, {}, &output_tensors));
return OkStatus();
}
Status Build(GraphDef& segmented_graph_def,
const std::vector<std::string>& input_names,
const std::vector<std::string>& output_names,
const std::vector<std::vector<tensorflow::Tensor>>& inputs,
Session* session, const TfTrtConversionParams params) {
VLOG(2) << "Building the model";
bool need_collect_profiles = params.use_dynamic_shape && inputs.size() > 1;
if (need_collect_profiles) {
TF_RETURN_IF_ERROR(SetProfileGenerationMode(&segmented_graph_def, true));
}
TF_RETURN_IF_ERROR(session->Create(segmented_graph_def));
string prefix = "";
if (need_collect_profiles) {
for (auto const& input : inputs) {
TF_RETURN_IF_ERROR(RunSession(session, input_names, output_names, input));
}
prefix = "TrtBuildStep";
TF_RETURN_IF_ERROR(SetProfileGenerationMode(&segmented_graph_def, false));
VLOG(3) << "Importing graph with _profile_generation_mode disabled";
TF_RETURN_IF_ERROR(
ImportGraphDefToSession(session, segmented_graph_def, prefix));
}
TF_RETURN_IF_ERROR(
RunSession(session, input_names, output_names, *inputs.begin(), prefix));
return OkStatus();
}
Status GetResourceManager(const NodeDef& node, Session* session,
ResourceMgr** rm) {
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session->LocalDeviceManager(&device_mgr));
Device* device;
string device_name = node.device().empty()
? "/job:localhost/replica:0/task:0/device:GPU:0"
: node.device();
TF_RETURN_IF_ERROR(device_mgr->LookupDevice(device_name, &device));
*rm = device->resource_manager();
return OkStatus();
}
Status GetEngineCacheResource(const NodeDef& node, Session* session,
TRTEngineCacheResource** resource) {
ResourceMgr* rm;
TF_RETURN_IF_ERROR(GetResourceManager(node, session, &rm));
absl::string_view resource_name = node.name();
size_t last_slash = resource_name.find_last_of('/');
if (last_slash != absl::string_view::npos) {
resource_name.remove_prefix(last_slash + 1);
}
const std::string container(kTfTrtContainerName);
*resource = nullptr;
TF_RETURN_IF_ERROR(
rm->Lookup(container, std::string(resource_name), resource));
if (resource == nullptr || (*resource)->cache_.size() == 0) {
return errors::Internal("Engine cache not found for", resource_name);
}
return OkStatus();
}
Status ReadSerializedEngine(
const NodeDef& node, Session* session,
TrtUniquePtrType<nvinfer1::IHostMemory>* engine_data) {
TRTEngineCacheResource* resource;
TF_RETURN_IF_ERROR(GetEngineCacheResource(node, session, &resource));
core::ScopedUnref unref_cache_res(resource);
if (resource->cache_.size() > 1) {
return errors::Internal(
"Multiple engines found, but we can only serialize one");
}
const std::unique_ptr<EngineContext>& engine =
resource->cache_.begin()->second;
if (!engine) {
return errors::Internal("Engine not found for", node.name());
}
if (engine->GetCudaEngine()) {
engine_data->reset(engine->GetCudaEngine()->serialize());
} else {
LOG(WARNING) << "Engine cache contains nullptr";
}
return OkStatus();
}
Status ConvertToStaticEngine(const GraphDef graph_def,
GraphDef* static_graph_def, Session* session) {
*static_graph_def = graph_def;
VLOG(1) << "Saving TRT engines as static engine";
std::string op{"TRTEngineOp"};
for (auto& node : *(static_graph_def->mutable_node())) {
if (!op.compare(node.op())) {
VLOG(2) << "Saving TRT engine for " << node.name()
<< ", device: " << node.device();
TrtUniquePtrType<nvinfer1::IHostMemory> engine_data;
TF_RETURN_IF_ERROR(ReadSerializedEngine(node, session, &engine_data));
auto* attr = node.mutable_attr();
AttrValue static_engine;
static_engine.set_b(true);
AttrValue engine_string;
if (engine_data) {
engine_string.set_s(engine_data->data(), engine_data->size());
}
(*attr)["static_engine"] = static_engine;
(*attr)["serialized_segment"] = engine_string;
}
}
return OkStatus();
}
Status ValidateConversionParams(const TfTrtConversionParams& p, int n_inputs) {
if (p.precision_mode == TrtPrecisionMode::INT8 && p.use_calibration) {
return errors::InvalidArgument(
"Calibration not yet implemented through the C++ interface. Please use "
"our Python API for calibration.");
}
if (p.convert_to_static_engine && n_inputs == 0) {
return errors::InvalidArgument(
"TRT Engine needs to be built before we can convert it to static "
"engine. Please provide input data to build the model.");
}
if (!p.convert_to_static_engine && n_inputs >= 0) {
LOG(WARNING)
<< "Skipping build mode because we cannot save the "
"engines. Use convert_to_static_engines=true conversion "
"parameter to enable build mode and save the engines in the graph.";
}
if (!p.allow_build_at_runtime && n_inputs == 0) {
LOG(WARNING)
<< "TRT will not be used since allow_build_at_runtime is disabled and "
"no inputs are provided to build during conversion.";
}
return OkStatus();
}
tensorflow::SessionOptions GetSessionConfg() {
tensorflow::SessionOptions opts;
auto* rewriter_opts =
opts.config.mutable_graph_options()->mutable_rewrite_options();
rewriter_opts->set_experimental_disable_folding_quantization_emulation(true);
rewriter_opts->set_disable_meta_optimizer(true);
opts.config.mutable_experimental()->set_disable_optimize_for_static_graph(
true);
return opts;
}
}
StatusOr<GraphDef> ConvertAndBuild(
const GraphDef& frozen_graph_def, const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<std::vector<tensorflow::Tensor>>& inputs,
const TfTrtConversionParams& conv_params) {
TF_RETURN_IF_ERROR(ValidateConversionParams(conv_params, inputs.size()));
MetaGraphDef meta_graph;
*meta_graph.mutable_graph_def() = frozen_graph_def;
RewriterConfig rewriter_config;
TF_RETURN_IF_ERROR(
GetTrtRewriterConfig(conv_params, frozen_graph_def, &rewriter_config));
GraphDef segmented_graph_def;
TF_RETURN_IF_ERROR(RunTfTrt(meta_graph, input_names, output_names,
rewriter_config, &segmented_graph_def));
GraphDef output;
if (!inputs.empty() && conv_params.convert_to_static_engine) {
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(GetSessionConfg()));
if (!session) {
return errors::Internal("Failed to create build session");
}
TF_RETURN_IF_ERROR(Build(segmented_graph_def, input_names, output_names,
inputs, session.get(), conv_params));
TF_RETURN_IF_ERROR(
ConvertToStaticEngine(segmented_graph_def, &output, session.get()));
} else {
output = segmented_graph_def;
}
VLOG(1) << "TF-TRT conversion finished";
return output;
}
Status InlineFunctions(const MetaGraphDef& meta_graph_def,
GraphDef* out_graph_def) {
ConfigProto config_proto;
auto opt_config =
config_proto.mutable_graph_options()->mutable_rewrite_options();
opt_config->set_meta_optimizer_iterations(tensorflow::RewriterConfig::ONE);
opt_config->set_min_graph_nodes(-1);
opt_config->add_optimizers("function");
TF_RETURN_IF_ERROR(RunGrappler(meta_graph_def, {}, {}, config_proto, nullptr,
out_graph_def));
VLOG(2) << "Graph is inlined";
return OkStatus();
}
Status FreezeGraph(SavedModelBundle& bundle, MetaGraphDef* frozen_meta_graph) {
std::unordered_set<std::string> inputs;
std::unordered_set<std::string> outputs;
GraphDef frozen_graph_def;
TF_RETURN_IF_ERROR(
FreezeSavedModel(bundle, &frozen_graph_def, &inputs, &outputs));
*frozen_meta_graph = bundle.meta_graph_def;
GraphDef* gdef = frozen_meta_graph->mutable_graph_def();
*gdef = frozen_graph_def;
VLOG(2) << "Graph frozen";
return OkStatus();
}
std::vector<std::string> GetNodeNames(
const google::protobuf::Map<std::string, tensorflow::TensorInfo>& signature) {
std::vector<std::string> names;
for (auto const& item : signature) {
absl::string_view name = item.second.name();
size_t last_colon = name.find_last_of(':');
if (last_colon != absl::string_view::npos) {
name.remove_suffix(name.size() - last_colon);
}
names.push_back(std::string(name));
}
return names;
}
StatusOr<GraphDef> ConvertAndBuild(
SavedModelBundle* bundle, const std::string& signature_key,
const std::vector<std::vector<tensorflow::Tensor>>& inputs,
const TfTrtConversionParams& conversion_params) {
GraphDef inlined_graph_def;
TF_RETURN_IF_ERROR(
InlineFunctions(bundle->meta_graph_def, &inlined_graph_def));
*bundle->meta_graph_def.mutable_graph_def() = inlined_graph_def;
MetaGraphDef frozen_meta_graph;
TF_RETURN_IF_ERROR(FreezeGraph(*bundle, &frozen_meta_graph));
auto signature_map = bundle->GetSignatures();
const tensorflow::SignatureDef& signature = signature_map[signature_key];
std::vector<std::string> input_names = GetNodeNames(signature.inputs());
std::vector<std::string> output_names = GetNodeNames(signature.outputs());
return ConvertAndBuild(frozen_meta_graph.graph_def(), input_names,
output_names, inputs, conversion_params);
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/trt_convert_api.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/state_ops.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace tensorrt {
struct TestParam {
TfTrtConversionParams conv_params;
std::vector<std::vector<int64>> input_shapes;
};
class TrtConverterTest
: public ::testing::TestWithParam<std::tuple<TestParam, bool, bool>> {
protected:
TrtConverterTest() {
param_ = std::get<0>(GetParam());
use_variable_ = std::get<1>(GetParam());
use_function_ = std::get<2>(GetParam());
input_tensors_ = GetInputTensors();
}
GraphDef GetGraphDef(PartialTensorShape input_shape) {
Scope root = Scope::NewRootScope();
Output c;
c = ops::Const(root.WithOpName("my_const"), {{42.0f, 137.0f}});
Output v;
if (use_variable_) {
Output v_handle = ops::VarHandleOp(root.WithOpName("my_var"),
DataType::DT_FLOAT, {1, 2});
v = ops::ReadVariableOp(root.WithOpName("my_var/Read/ReadVariableOp"),
v_handle, DataType::DT_FLOAT);
auto v_init =
ops::AssignVariableOp(root.WithOpName("my_var/init"), v_handle, c);
} else {
v = c;
}
const auto attrs = ops::Placeholder::Shape(input_shape);
auto x = ops::Placeholder(root.WithOpName("input"), DT_FLOAT, attrs);
auto y = ops::Mul(root.WithOpName("my_mul"), x, v);
auto z = ops::Add(root.WithOpName("my_add"), x, y);
auto q = ops::Identity(root.WithOpName("output"), z);
GraphDef out;
TF_CHECK_OK(root.ToGraphDef(&out));
return out;
}
GraphDef GetGraphWithFunction(PartialTensorShape input_shape) {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
const Tensor kOne = test::AsScalar<float>(1.0f);
TensorShapeProto value_shape_proto;
kOne.shape().AsProto(&value_shape_proto);
TensorShapeProto input_shape_proto;
input_shape.AsProto(&input_shape_proto);
NodeDef value_node;
if (use_variable_) {
value_node =
NDef("my_value", "Identity", {"my_var:0"}, {{"T", DT_RESOURCE}});
} else {
value_node =
NDef("my_value", "Identity", {"my_const:0"}, {{"T", DT_FLOAT}});
}
GraphDef gdef = GDef(
{
NDef("input", "Placeholder", {},
{{"dtype", DT_FLOAT}, {"shape", input_shape_proto}}),
NDef("my_const", "Const", {},
{{"dtype", DT_FLOAT}, {"value", kOne}}),
value_node,
NDef("call", "StatefulPartitionedCall", {"input", "my_value"},
{{"Tin", DataTypeSlice{DT_FLOAT, use_variable_ ? DT_RESOURCE
: DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FunctionDefHelper::FunctionRef("f", {})}}),
NDef("output", "Identity", {"call:0"}, {{"T", DT_FLOAT}}),
},
{});
FunctionDef fdef;
if (use_variable_) {
*gdef.add_node() =
NDef("my_var", "VarHandleOp", {},
{{"dtype", DT_FLOAT}, {"shape", value_shape_proto}});
*gdef.add_node() = NDef("my_var/init", "AssignVariableOp",
{"my_var", "my_const"}, {{"dtype", DT_FLOAT}});
*gdef.add_node() = NDef("my_var/Read/ReadVariableOp", "ReadVariableOp",
{"my_var"}, {{"dtype", DT_FLOAT}});
fdef = FunctionDefHelper::Define(
"f",
{"x: float", "v: resource"},
{"q: float"},
{},
{{{"my_var/Read/ReadVariableOp"},
"ReadVariableOp",
{"v"},
{{"dtype", DT_FLOAT}}},
{{"my_mul"},
"Mul",
{"x", "my_var/Read/ReadVariableOp"},
{{"T", DT_FLOAT}}},
{{"my_add"}, "AddV2", {"x", "my_mul"}, {{"T", DT_FLOAT}}},
{{"q"}, "Identity", {"my_add"}, {{"T", DT_FLOAT}}}});
} else {
fdef = FunctionDefHelper::Define(
"f",
{"x: float", "v: float"},
{"q: float"},
{},
{{{"my_mul"}, "Mul", {"x", "v"}, {{"T", DT_FLOAT}}},
{{"my_add"}, "AddV2", {"x", "my_mul"}, {{"T", DT_FLOAT}}},
{{"q"}, "Identity", {"my_add"}, {{"T", DT_FLOAT}}}});
}
*gdef.mutable_library()->add_function() = fdef;
return gdef;
}
MetaGraphDef GetModel() {
PartialTensorShape shape({-1, 2});
MetaGraphDef out;
if (use_function_) {
*(out.mutable_graph_def()) = GetGraphWithFunction(shape);
} else {
*(out.mutable_graph_def()) = GetGraphDef(shape);
}
VLOG(2) << out.graph_def().DebugString();
TensorShapeProto shape_proto;
shape.AsProto(&shape_proto);
SignatureDef signature_def;
(*signature_def.mutable_inputs())["input"].set_name("input:0");
(*signature_def.mutable_inputs())["input"].set_dtype(DT_FLOAT);
*(*signature_def.mutable_inputs())["input"].mutable_tensor_shape() =
shape_proto;
(*signature_def.mutable_outputs())["output"].set_name("output:0");
(*signature_def.mutable_outputs())["output"].set_dtype(DT_FLOAT);
*(*signature_def.mutable_outputs())["output"].mutable_tensor_shape() =
shape_proto;
(*out.mutable_signature_def())["serving_default"] = signature_def;
VLOG(2) << signature_def.DebugString();
return out;
}
Status GetSavedModelBundle(SavedModelBundle* bundle) {
bundle->meta_graph_def = GetModel();
Session* session = nullptr;
TF_RETURN_IF_ERROR(NewSession(tensorflow::SessionOptions(), &session));
TF_RETURN_IF_ERROR(session->Create(bundle->meta_graph_def.graph_def()));
bundle->session.reset(session);
TF_RETURN_IF_ERROR(session->Run( {}, {},
{"my_var/init"}, nullptr));
return OkStatus();
}
void CheckTrtNode(const GraphDef& converted_graph_def) {
int n_trt_ops = 0;
string op_name{"TRTEngineOp"};
for (const auto& node : converted_graph_def.node()) {
if (!op_name.compare(node.op())) {
n_trt_ops++;
const auto& attr = node.attr();
EXPECT_EQ(attr.at("static_engine").b(),
param_.conv_params.convert_to_static_engine);
if (param_.conv_params.convert_to_static_engine) {
VLOG(2) << "Found serialized segment with size "
<< attr.at("serialized_segment").s().size();
EXPECT_GT(attr.at("serialized_segment").s().size(), 0);
}
}
}
EXPECT_EQ(n_trt_ops, 1);
}
std::vector<std::vector<Tensor>> GetInputTensors() {
std::vector<std::vector<Tensor>> input_tensors;
for (const std::vector<int64>& shape : param_.input_shapes) {
Tensor tensor(DT_FLOAT, TensorShape(shape));
test::FillIota(&tensor, 1.0f);
input_tensors.push_back({tensor});
}
return input_tensors;
}
void RunAndCompareResults(Session* session,
const GraphDef& converted_graph_def) {
Session* p_session = nullptr;
TF_EXPECT_OK(NewSession(SessionOptions(), &p_session));
std::unique_ptr<tensorflow::Session> trt_session(p_session);
TF_EXPECT_OK(trt_session->Create(converted_graph_def));
for (const std::vector<Tensor>& input : input_tensors_) {
std::vector<Tensor> outputs;
TF_EXPECT_OK(
session->Run({{"input", input.at(0)}}, {"output"}, {}, &outputs));
std::cout << outputs.at(0).DebugString() << std::endl;
std::vector<Tensor> trt_outputs;
TF_EXPECT_OK(trt_session->Run({{"input", input.at(0)}}, {"output"}, {},
&trt_outputs));
std::cout << trt_outputs.at(0).DebugString() << std::endl;
ASSERT_EQ(outputs.size(), 1);
ASSERT_EQ(trt_outputs.size(), 1);
tensorflow::test::ExpectEqual(outputs[0], trt_outputs[0]);
}
}
void ConvertAndRunFrozenGraph() {
MetaGraphDef meta_graph_def = GetModel();
StatusOr<GraphDef> result = tensorrt::ConvertAndBuild(
meta_graph_def.graph_def(), {"input"}, {"output"}, input_tensors_,
param_.conv_params);
TF_ASSERT_OK(result.status());
const GraphDef& converted_graph_def = result.value();
CheckTrtNode(converted_graph_def);
Session* p_session = nullptr;
TF_EXPECT_OK(NewSession(SessionOptions(), &p_session));
std::unique_ptr<tensorflow::Session> session(p_session);
TF_EXPECT_OK(session->Create(meta_graph_def.graph_def()));
RunAndCompareResults(session.get(), converted_graph_def);
}
void ConvertAndRunSavedModel() {
SavedModelBundle bundle;
TF_CHECK_OK(GetSavedModelBundle(&bundle));
StatusOr<GraphDef> result = tensorrt::ConvertAndBuild(
&bundle, "serving_default", input_tensors_, param_.conv_params);
TF_ASSERT_OK(result.status());
const GraphDef& converted_graph_def = result.value();
CheckTrtNode(converted_graph_def);
RunAndCompareResults(bundle.GetSession(), converted_graph_def);
}
TestParam param_;
bool use_variable_;
bool use_function_;
std::vector<std::vector<Tensor>> input_tensors_;
};
INSTANTIATE_TEST_CASE_P(
TrtConverterTestInstantiation, TrtConverterTest,
::testing::Combine(
::testing::Values(
TestParam{TfTrtConversionParams{
1 << 20,
TrtPrecisionMode::FP32,
3,
1,
false,
true,
ProfileStrategy::kOptimal,
true,
true
},
{{1, 2}, {4, 2}}},
TestParam{TfTrtConversionParams{
1 << 20,
TrtPrecisionMode::FP16,
3,
1,
false,
false,
ProfileStrategy::kRange,
true,
true
},
{{1, 2}}},
TestParam{TfTrtConversionParams{
1 << 20,
TrtPrecisionMode::FP32,
3,
1,
false,
true,
ProfileStrategy::kOptimal,
true,
false
},
{{1, 2}, {4, 2}}},
TestParam{TfTrtConversionParams{
1 << 20,
TrtPrecisionMode::FP16,
3,
2,
false,
false,
ProfileStrategy::kRange,
true,
false
},
{{1, 2}, {4, 2}}}),
::testing::Values(false, true),
::testing::Values(false, true)));
TEST_P(TrtConverterTest, Basic) {
if (use_variable_) {
ConvertAndRunSavedModel();
} else {
ConvertAndRunFrozenGraph();
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/trt_convert_api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/trt_convert_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e8706a30-8422-4dae-a5da-72cc6621a425 | cpp | tensorflow/tensorflow | trt_engine_resource_ops | tensorflow/compiler/tf2tensorrt/ops/trt_engine_resource_ops.cc | tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
REGISTER_OP("CreateTRTResourceHandle")
.Attr("resource_name: string")
.Output("resource_handle: resource")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("InitializeTRTResource")
.Attr("max_cached_engines_count: int = 1")
.Input("resource_handle: resource")
.Input("filename: string")
.SetIsStateful()
.SetShapeFn(shape_inference::NoOutputs);
REGISTER_OP("SerializeTRTResource")
.Attr("delete_resource: bool = false")
.Attr("save_gpu_specific_engines: bool = True")
.Input("resource_name: string")
.Input("filename: string")
.SetIsStateful()
.SetShapeFn(shape_inference::NoOutputs);
}
#endif | #include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/tf2tensorrt/common/datavec.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_engine_instance.pb.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
struct TestParam {
nvinfer1::Dims dims;
bool dynamic_shape;
int n_inputs;
};
class TRTEngineResourceOpsTest
: public OpsTestBase,
public ::testing::WithParamInterface<TestParam> {
public:
TRTEngineResourceOpsTest() : param_(GetParam()) {}
protected:
void Reset() {
for (auto& temp : tensors_) {
delete temp;
}
for (auto& temp : managed_outputs_) {
delete temp;
}
tensors_.clear();
managed_outputs_.clear();
inputs_.clear();
}
ITensorProxyPtr NetworkWith1Input(nvinfer1::INetworkDefinition* network,
ITensorProxyPtr input) {
nvinfer1::IUnaryLayer* layer =
network->addUnary(*input->trt_tensor(), nvinfer1::UnaryOperation::kEXP);
EXPECT_NE(nullptr, layer);
return layer->getOutput(0);
}
ITensorProxyPtr NetworkWith2Inputs(nvinfer1::INetworkDefinition* network,
ITensorProxyPtr input) {
nvinfer1::Dims dims2{1, {2}};
ITensorProxyPtr input2 =
network->addInput(absl::StrCat(IONamePrefixes::kInputPHName, 1).c_str(),
nvinfer1::DataType::kINT32, dims2);
EXPECT_NE(nullptr, input2->trt_tensor());
nvinfer1::Dims start{2, {0, 0}};
nvinfer1::Dims stride{2, {1, 1}};
auto slice_layer =
network->addSlice(*input->trt_tensor(), start, stride, stride);
EXPECT_NE(nullptr, slice_layer);
slice_layer->setInput(2, *input2->trt_tensor());
ITensorProxyPtr sliced_input = slice_layer->getOutput(0);
EXPECT_NE(nullptr, sliced_input->trt_tensor());
auto layer = network->addElementWise(*sliced_input->trt_tensor(),
*sliced_input->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUM);
EXPECT_NE(nullptr, layer);
return layer->getOutput(0);
}
TrtUniquePtrType<nvinfer1::ICudaEngine> CreateTRTEngine() {
TrtUniquePtrType<nvinfer1::IBuilder> builder(
nvinfer1::createInferBuilder(logger_));
TrtUniquePtrType<nvinfer1::INetworkDefinition> network;
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
network =
TrtUniquePtrType<nvinfer1::INetworkDefinition>(builder->createNetworkV2(
1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
#else
network =
TrtUniquePtrType<nvinfer1::INetworkDefinition>(builder->createNetworkV2(
1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
#endif
nvinfer1::Dims dims = this->param_.dims;
if (this->param_.dynamic_shape) {
std::fill(dims.d, dims.d + dims.nbDims, -1);
}
const std::string in_name = StrCat(IONamePrefixes::kInputPHName, 0);
ITensorProxyPtr input =
network->addInput(in_name.c_str(), nvinfer1::DataType::kFLOAT, dims);
EXPECT_NE(nullptr, input->trt_tensor());
ITensorProxyPtr output =
this->param_.n_inputs == 1
? this->NetworkWith1Input(network.get(), input)
: this->NetworkWith2Inputs(network.get(), input);
output->setName("output");
network->markOutput(*output->trt_tensor());
TrtUniquePtrType<nvinfer1::IBuilderConfig> builder_config(
builder->createBuilderConfig());
builder_config->setMaxWorkspaceSize(1 << 10);
builder->setMaxBatchSize(1);
if (this->param_.dynamic_shape) {
TrtShapeOptimizationProfile profile;
profile.SetShapeTensorMask(network.get());
const int n_input = param_.n_inputs;
std::vector<bool> input_mask(n_input, true);
profile.SetInputMask(input_mask);
for (int i = 1; i <= 3; i++) {
std::vector<TensorShape> shape_vec(n_input);
std::vector<int> dimvec(this->param_.dims.nbDims, 3 * i);
TensorShape shape;
TF_CHECK_OK(
TensorShapeUtils::MakeShape(dimvec.data(), dimvec.size(), &shape));
const ITensorProxyPtr input = network->getInput(0);
const char* name = input->getName();
VLOG(2) << "Defining profile for input " << name;
shape_vec[0] = shape;
if (this->param_.n_inputs == 2) {
TF_CHECK_OK(TensorShapeUtils::MakeShape(
std::vector<int32>{param_.dims.nbDims}, &shape));
shape_vec[1] = shape;
Tensor shape_tensor(DT_INT32, shape);
std::vector<int32> vals{1, i};
std::copy_n(vals.data(), vals.size(),
shape_tensor.flat<int32_t>().data());
DataVec shape_values{{"one", {}}, {"two", shape_tensor}};
TF_CHECK_OK(profile.CollectShapeValues(shape_values));
} else {
TF_CHECK_OK(profile.CollectShapeValues({{"one", {}}}));
}
profile.AddShape(shape_vec);
}
std::vector<PartialTensorShape> input_partial_shapes;
TF_CHECK_OK(GetNetworkInputShapes(network.get(), &input_partial_shapes));
profile.InitProfiles(input_partial_shapes, ProfileStrategy::kOptimal);
TF_CHECK_OK(profile.ConfigureBuilder(builder.get(), builder_config.get(),
network.get()));
}
VLOG(2) << "ConfigureBuilder Finished";
TrtUniquePtrType<nvinfer1::ICudaEngine> engine(
builder->buildEngineWithConfig(*network, *builder_config));
VLOG(2) << "Engine constructed";
EXPECT_NE(nullptr, engine);
return engine;
}
Logger& logger_ = *Logger::GetLogger();
TestParam param_;
};
#if IS_TRT_VERSION_GE(7, 1, 3, 0)
constexpr std::array<TestParam, 3> TestParameters = {
TestParam{nvinfer1::Dims{1, {1}}, false, 1},
TestParam{nvinfer1::Dims{1, {1}}, true, 1},
TestParam{nvinfer1::Dims{2, {3, 3}}, true, 2}};
#else
constexpr std::array<TestParam, 2> TestParameters = {
TestParam{nvinfer1::Dims{1, {1}}, false, 1},
TestParam{nvinfer1::Dims{1, {1}}, true, 1}};
#endif
INSTANTIATE_TEST_CASE_P(EngineResourceOpsTestInstantiation,
TRTEngineResourceOpsTest,
::testing::ValuesIn(TestParameters));
TEST_P(TRTEngineResourceOpsTest, Basic) {
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("GPU", {}, "/job:worker/replica:0/task:0"));
ResourceMgr* rm = device->resource_manager();
SetDevice(DEVICE_GPU, std::move(device));
const string container(kTfTrtContainerName);
const string resource_name = "myresource";
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "CreateTRTResourceHandle")
.Attr("resource_name", resource_name)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
TF_ASSERT_OK(RunOpKernel());
ResourceHandle handle =
context_->mutable_output(0)->scalar<ResourceHandle>()();
TRTEngineCacheResource* resource = nullptr;
EXPECT_TRUE(
errors::IsNotFound(rm->Lookup(container, resource_name, &resource)));
Reset();
Env* env = Env::Default();
const string filename = io::JoinPath(testing::TmpDir(), "trt_engine_file");
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(env->NewWritableFile(filename, &file));
}
TF_ASSERT_OK(NodeDefBuilder("op", "InitializeTRTResource")
.Input(FakeInput(DT_RESOURCE))
.Input(FakeInput(DT_STRING))
.Attr("max_cached_engines_count", 1)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<ResourceHandle>(TensorShape({}), {handle});
AddInputFromArray<tstring>(TensorShape({}), {filename});
TF_ASSERT_OK(RunOpKernel());
EXPECT_TRUE(rm->Lookup(container, resource_name, &resource).ok());
EXPECT_EQ(0, resource->cache_.size());
TrtUniquePtrType<nvinfer1::ICudaEngine> engine = CreateTRTEngine();
ExecutionContext context = ExecutionContext::Create(engine.get());
std::vector<TensorShape> engine_input_shape(1);
TF_ASSERT_OK(DimsAdapter(param_.dims).TensorShape(&(engine_input_shape[0])));
if (param_.n_inputs > 1) {
engine_input_shape.push_back(TensorShape({1, 1}));
}
resource->cache_.emplace(
engine_input_shape,
std::make_unique<EngineContext>(std::move(engine), std::move(context)));
EXPECT_FALSE(resource->RefCountIsOne());
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "SerializeTRTResource")
.Attr("delete_resource", true)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(DT_STRING))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {resource_name});
AddInputFromArray<tstring>(TensorShape({}), {filename});
TF_ASSERT_OK(RunOpKernel());
EXPECT_TRUE(resource->RefCountIsOne());
resource->Unref();
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "DestroyResourceOp")
.Attr("ignore_lookup_error", false)
.Input(FakeInput(DT_RESOURCE))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<ResourceHandle>(TensorShape({}), {handle});
EXPECT_TRUE(errors::IsNotFound(RunOpKernel()));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(filename, &file));
auto reader = std::make_unique<io::RecordReader>(file.get());
uint64 offset = 0;
tstring record;
TF_ASSERT_OK(reader->ReadRecord(&offset, &record));
TRTEngineInstance engine_instance;
engine_instance.ParseFromString(record);
EXPECT_EQ(param_.n_inputs, engine_instance.input_shapes_size());
EXPECT_EQ(param_.dims.nbDims, engine_instance.input_shapes(0).dim_size());
for (int i = 0; i < param_.dims.nbDims; i++) {
EXPECT_EQ(param_.dims.d[i], engine_instance.input_shapes(0).dim(i).size());
}
EXPECT_TRUE(errors::IsOutOfRange(reader->ReadRecord(&offset, &record)));
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "InitializeTRTResource")
.Input(FakeInput(DT_RESOURCE))
.Input(FakeInput(DT_STRING))
.Attr("max_cached_engines_count", 1)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<ResourceHandle>(TensorShape({}), {handle});
AddInputFromArray<tstring>(TensorShape({}), {filename});
TF_ASSERT_OK(RunOpKernel());
EXPECT_TRUE(rm->Lookup(container, resource_name, &resource).ok());
EXPECT_EQ(1, resource->cache_.size());
if (this->param_.dynamic_shape) {
EXPECT_EQ(3, resource->profiles_.GetNumProfiles());
EXPECT_EQ(3, resource->cache_.begin()->second->GetNumContexts());
if (this->param_.n_inputs == 1) {
std::vector<TensorShape> shapes(1);
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{6}, &shapes[0]));
EXPECT_EQ(1, resource->profiles_.GetProfileNumber(shapes));
} else {
std::vector<TensorShape> shapes(2);
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{9, 9}, &shapes[0]));
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{2}, &shapes[1]));
Tensor shape_tensor(DT_INT32, shapes[1]);
std::vector<int32> vals{1, 3};
std::copy_n(vals.data(), vals.size(),
shape_tensor.flat<int32_t>().data());
DataVec shape_values{{"one", {}}, {"two", shape_tensor}};
TF_CHECK_OK(resource->profiles_.CollectShapeValues(shape_values));
EXPECT_EQ(2, resource->profiles_.GetProfileNumber(shapes));
}
}
EXPECT_FALSE(resource->RefCountIsOne());
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "DestroyResourceOp")
.Attr("ignore_lookup_error", false)
.Input(FakeInput(DT_RESOURCE))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<ResourceHandle>(TensorShape({}), {handle});
TF_ASSERT_OK(RunOpKernel());
EXPECT_TRUE(errors::IsNotFound(RunOpKernel()));
EXPECT_TRUE(resource->RefCountIsOne());
resource->Unref();
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/ops/trt_engine_resource_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
36f85540-8919-4e0b-b082-182c3285944e | cpp | tensorflow/tensorflow | trt_engine_op | tensorflow/compiler/tf2tensorrt/ops/trt_engine_op.cc | tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
REGISTER_OP("TRTEngineOp")
.Attr("serialized_segment: string")
.Attr("segment_func: func = {}")
.Attr("InT: list({bool,int8,float16,float32,int32,resource})")
.Attr("OutT: list({bool,int8,float16,float32,int32})")
.Attr("input_shapes: list(shape) = []")
.Attr("output_shapes: list(shape) = []")
.Attr("max_cached_engines_count: int = 1")
.Attr("max_batch_size: int = 1")
.Attr("workspace_size_bytes: int")
.Attr("precision_mode: {'FP32', 'FP16', 'INT8'}")
.Attr("calibration_data: string = ''")
.Attr("use_calibration: bool = true")
.Input("in_tensor: InT")
.Output("out_tensor: OutT")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
std::vector<tensorflow::PartialTensorShape> output_shapes;
TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes));
for (int i = 0; i < output_shapes.size(); i++) {
::tensorflow::shape_inference::ShapeHandle shape;
shape_inference::ShapeHandle output_shape_handle;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(
output_shapes[i], &output_shape_handle));
c->set_output(i, output_shape_handle);
}
return OkStatus();
})
.Attr("segment_funcdef_name: string = ''")
.Attr("cached_engine_batches: list(int) >= 0 = []")
.Attr("fixed_input_size: bool = true")
.Attr("static_engine: bool = true")
.Attr("profile_strategy: string = ''")
.Attr("use_explicit_precision: bool = false");
}
#endif | #include <memory>
#include <numeric>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_graph.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "xla/tsl/framework/fixedpoint/FixedPoint.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/public/version.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
using ::absl::StrCat;
using ::testing::ElementsAre;
struct TestParam {
bool static_engine;
};
class TRTEngineOpTestBase : public OpsTestBase {
public:
void AddSimpleTrtOp(DataType dtype, int max_cached_engines_count = 1,
PartialTensorShape shape = PartialTensorShape({-1, -1}),
bool use_implicit_batch = true,
bool allow_build_at_runtime = true,
bool static_engine = false) {
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("GPU", {}, "/job:worker/replica:0/task:0"));
Scope s = Scope::NewRootScope();
auto feed = ops::_Arg(s.WithOpName("TensorRTInputPH_0"), dtype, 0);
auto add = ops::Add(s.WithOpName("add"), feed, feed);
ops::_Retval give_me_a_name(s.WithOpName("TensorRTOutputPH_0"), add, 0);
GraphDef graph_def;
TF_ASSERT_OK(s.ToGraphDef(&graph_def));
Graph* graph = s.graph();
TF_ASSERT_OK(convert::RegisterGraphToFunctionLibrary(graph_def, graph,
std::string(kOpName)));
TF_ASSERT_OK(flib_def_->AddLibrary(graph->flib_def()));
string segment_string;
if (static_engine) {
convert::TRTOptimizationPass::ConversionParams params;
convert::EngineInfo info;
info.segment_graph_def.CopyFrom(graph_def);
info.precision_mode = TrtPrecisionMode::FP32;
info.max_workspace_size_bytes = 1 << 20;
info.engine_name = "TRTEngineOP_000_000";
params.use_implicit_batch = use_implicit_batch;
params.trt_logger_name = "DefaultLogger";
TrtShapeOptimizationProfile profile;
std::vector<bool> input_mask = {true};
profile.SetInputMask(input_mask);
TensorShape my_shape;
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{4, 2}, &my_shape));
profile.AddShape({my_shape, {}});
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{1, 2}, &my_shape));
profile.AddShape({my_shape, {}});
profile.InitProfiles({shape}, ProfileStrategy::kOptimal);
std::vector<PartialTensorShape> shape_vec{shape, {}};
TF_CHECK_OK(convert::CreateStaticEngine(
params, info, 1, shape_vec, &profile, &segment_string, nullptr));
}
OpsTestBase::SetDevice(DEVICE_GPU, std::move(device));
NameAttrList function;
function.set_name(StrCat(std::string(kOpName), "_native_segment"));
TF_ASSERT_OK(NodeDefBuilder(std::string(kOpName), "TRTEngineOp")
.Input(FakeInput(1, dtype))
.Attr("input_shapes", {shape})
.Attr("output_shapes", {shape})
.Attr("static_engine", static_engine)
.Attr("segment_func", function)
.Attr("serialized_segment", segment_string)
.Attr("calibration_data", "")
.Attr("max_cached_engines_count", max_cached_engines_count)
.Attr("workspace_size_bytes", 1 << 20)
.Attr("precision_mode", "FP32")
.Attr("use_calibration", false)
.Attr("profile_strategy", "optimal")
.Attr("_use_implicit_batch", use_implicit_batch)
.Attr("_allow_build_at_runtime", allow_build_at_runtime)
.Attr("_allow_soft_placement", false)
.Attr("OutT", {dtype})
.Finalize(OpsTestBase::node_def()));
TF_ASSERT_OK(InitOpWithFunctionLibrary());
}
static const absl::string_view kOpName;
template <typename T>
void AddSimpleInput(const TensorShape& shape) {
std::vector<T> input(shape.num_elements());
std::iota(input.begin(), input.end(), T(0));
OpsTestBase::AddInputFromArray<T>(shape, input);
}
void ResetInputs() {
inputs_.clear();
for (auto& temp : tensors_) {
delete temp;
}
tensors_.clear();
}
private:
Status InitOpWithFunctionLibrary() {
OpKernel* kernel = nullptr;
auto flr = pflr_->GetFLR(device_->name());
std::shared_ptr<const NodeProperties> props;
Status status = NodeProperties::CreateFromNodeDef(
node_def_, flr->GetFunctionLibraryDefinition(), &props);
if (status.ok()) {
status.Update(CreateOpKernel(device_type_, device_, allocator(), flr,
props, TF_GRAPH_DEF_VERSION, &kernel));
}
kernel_ = std::unique_ptr<OpKernel>(kernel);
if (kernel_ != nullptr) input_types_ = kernel_->input_types();
return status;
}
};
class TRTEngineOpTestWithParam
: public TRTEngineOpTestBase,
public ::testing::WithParamInterface<TestParam> {
public:
TRTEngineOpTestWithParam() : param_(GetParam()) {}
protected:
TestParam param_;
};
const absl::string_view TRTEngineOpTestBase::kOpName = "myop";
constexpr std::array<TestParam, 2> TestParameters{TestParam{false},
TestParam{true}};
INSTANTIATE_TEST_CASE_P(TRTEngineOpTestInstantiation, TRTEngineOpTestWithParam,
::testing::ValuesIn(TestParameters));
TEST_F(TRTEngineOpTestBase, DynamicEngines) {
TRTEngineOpTestBase::AddSimpleTrtOp(DT_FLOAT, 4);
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({2, 2}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
TRTEngineCacheResource* cache_resource = nullptr;
TF_ASSERT_OK(device_->resource_manager()->Lookup(
std::string(kTfTrtContainerName), std::string(kOpName), &cache_resource));
core::ScopedUnref sc(cache_resource);
auto cache = &cache_resource->cache_;
EXPECT_EQ(1, cache->size());
EXPECT_EQ(1, cache->count({TensorShape({2, 2})}));
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({1, 2}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
EXPECT_EQ(1, cache->size());
EXPECT_EQ(1, cache->count({TensorShape({2, 2})}));
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({3, 2}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
EXPECT_EQ(2, cache->size());
EXPECT_EQ(1, cache->count({TensorShape({2, 2})}));
EXPECT_EQ(1, cache->count({TensorShape({3, 2})}));
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({10, 10}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({1, 10}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
EXPECT_EQ(3, cache->size());
EXPECT_EQ(1, cache->count({TensorShape({2, 2})}));
EXPECT_EQ(1, cache->count({TensorShape({3, 2})}));
EXPECT_EQ(1, cache->count({TensorShape({10, 10})}));
}
TEST_F(TRTEngineOpTestBase, AllowBuildAtRuntime) {
TRTEngineOpTestBase::AddSimpleTrtOp(DT_FLOAT, 1,
PartialTensorShape({-1, -1}),
true,
false);
TensorShape input_shape({2, 2});
TRTEngineOpTestBase::AddSimpleInput<float>(input_shape);
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
TRTEngineCacheResource* cache_resource = nullptr;
TF_ASSERT_OK(device_->resource_manager()->Lookup(
std::string(kTfTrtContainerName), std::string(kOpName), &cache_resource));
core::ScopedUnref sc(cache_resource);
auto cache = &cache_resource->cache_;
EXPECT_EQ(1, cache->size());
ASSERT_EQ(1, cache->count({input_shape}));
EngineContext* ectx = cache->at({input_shape}).get();
EXPECT_EQ(ectx->GetCudaEngine(), nullptr);
}
TEST_P(TRTEngineOpTestWithParam, ExplicitBatch) {
TRTEngineOpTestBase::AddSimpleTrtOp(DT_FLOAT, 1,
PartialTensorShape({1, 2}),
false,
true,
param_.static_engine);
TensorShape input_shape({1, 2});
TRTEngineOpTestBase::AddSimpleInput<float>(input_shape);
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
TRTEngineCacheResource* cache_resource = nullptr;
TF_ASSERT_OK(device_->resource_manager()->Lookup(
std::string(kTfTrtContainerName), std::string(kOpName), &cache_resource));
core::ScopedUnref sc(cache_resource);
auto cache = &cache_resource->cache_;
EXPECT_EQ(1, cache->size());
ASSERT_EQ(1, cache->count({input_shape}));
EngineContext* ectx = cache->at({input_shape}).get();
EXPECT_NE(ectx->GetCudaEngine(), nullptr);
}
TEST_P(TRTEngineOpTestWithParam, DynamicShapes) {
TRTEngineOpTestBase::AddSimpleTrtOp(DT_FLOAT, 1,
PartialTensorShape({-1, -1}),
false,
true,
param_.static_engine);
TensorShape input_shape({1, 2});
TRTEngineOpTestBase::AddSimpleInput<float>(input_shape);
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
TRTEngineCacheResource* cache_resource = nullptr;
TF_ASSERT_OK(device_->resource_manager()->Lookup(
std::string(kTfTrtContainerName), std::string(kOpName), &cache_resource));
core::ScopedUnref sc(cache_resource);
auto cache = &cache_resource->cache_;
EXPECT_EQ(1, cache->size());
ASSERT_EQ(1, cache->count({input_shape}));
EngineContext* ectx = cache->at({input_shape}).get();
EXPECT_NE(ectx->GetCudaEngine(), nullptr);
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({1, 37}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
EXPECT_EQ(1, cache->size());
EXPECT_EQ(0, cache->count({TensorShape({1, 37})}));
}
template <typename T>
class TRTEngineOpTest : public TRTEngineOpTestBase {};
using TypeList = ::testing::Types<float, Eigen::half>;
TYPED_TEST_SUITE(TRTEngineOpTest, TypeList);
TYPED_TEST(TRTEngineOpTest, Basic) {
TRTEngineOpTestBase::AddSimpleTrtOp(DataTypeToEnum<TypeParam>::v());
OpsTestBase::AddInputFromArray<TypeParam>(TensorShape({1, 2}),
{TypeParam(0.0f), TypeParam(1.0f)});
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
Tensor* output = OpsTestBase::GetOutput(0);
EXPECT_THAT(
absl::Span<const TypeParam>(output->template flat<TypeParam>().data(),
output->NumElements()),
ElementsAre(TypeParam(0.0f), TypeParam(2.0f)));
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/ops/trt_engine_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2b04e360-d1b6-4011-8aeb-4c51b14e12ca | cpp | tensorflow/tensorflow | segment | tensorflow/compiler/tf2tensorrt/segment/segment.cc | tensorflow/compiler/tf2tensorrt/segment/segment_test.cc | #include "tensorflow/compiler/tf2tensorrt/segment/segment.h"
#include <algorithm>
#include <fstream>
#include <map>
#include <numeric>
#include <queue>
#include <tuple>
#include <unordered_map>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/util/env_var.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace segment {
namespace {
using absl::StrAppend;
using absl::StrAppendFormat;
using absl::StrCat;
using absl::StrJoin;
class SimpleNode;
class SimpleGraph;
class SimpleEdge {
public:
SimpleEdge(int id, SimpleNode* src, int src_port, SimpleNode* dst,
int dst_port, bool is_control = false)
: id_(id),
src_(src),
src_port_(src_port),
dst_(dst),
dst_port_(dst_port),
control_(is_control) {}
~SimpleEdge() {}
SimpleNode* src() const { return src_; }
SimpleNode* dst() const { return dst_; }
int src_output() const { return src_port_; }
int dst_input() const { return dst_port_; }
int id() const { return id_; }
bool IsControlEdge() const { return control_; }
private:
int id_;
SimpleNode* src_;
int src_port_;
SimpleNode* dst_;
int dst_port_;
bool control_;
};
class SimpleNode {
public:
SimpleNode(const Node* node, const int id);
const std::vector<SimpleEdge*>& in_edges() const { return in_edges_; }
const std::vector<SimpleEdge*>& out_edges() const { return out_edges_; }
std::vector<SimpleNode*> in_nodes() const {
std::vector<SimpleNode*> res;
res.reserve(in_edges_.size());
for (const auto e : in_edges_) {
if (e) res.push_back(e->src());
}
return res;
}
std::vector<SimpleNode*> out_nodes() const {
std::vector<SimpleNode*> res;
res.reserve(out_edges_.size());
for (const auto e : out_edges_) {
if (e) res.push_back(e->dst());
}
return res;
}
const string& name() const { return node_->name(); }
const Node* tf_node() const { return node_; }
int id() const { return id_; }
private:
const Node* node_;
std::vector<SimpleEdge*> in_edges_;
std::vector<SimpleEdge*> out_edges_;
int id_;
friend class SimpleGraph;
};
class SimpleGraph {
public:
explicit SimpleGraph(const Graph* g);
~SimpleGraph();
void AddControlEdge(SimpleNode* src, SimpleNode* dst);
void AddEdge(SimpleNode* src, int out_port, SimpleNode* dst, int in_port);
void RemoveEdge(const SimpleEdge*);
SimpleNode* FindNodeId(int node_id) {
if (node_id < 0 || node_id > static_cast<int>(nodes_.size())) {
return nullptr;
}
return nodes_[node_id];
}
int num_node_ids() const { return nodes_.size(); }
const SimpleNode* source_node() const { return nodes_[Graph::kSourceId]; }
const SimpleNode* sink_node() const { return nodes_[Graph::kSinkId]; }
private:
const Graph* g_;
std::vector<SimpleNode*> nodes_;
std::vector<SimpleEdge*> edges_;
std::set<int> free_edge_ids_;
std::set<int> free_node_ids_;
};
SimpleNode::SimpleNode(const Node* node, const int id) : node_(node), id_(id) {
if (node_) {
in_edges_.reserve(node_->in_edges().size());
out_edges_.reserve(node_->out_edges().size());
}
}
SimpleGraph::SimpleGraph(const Graph* g) : g_(g) {
int n_nodes = g_->num_node_ids();
nodes_.resize(n_nodes, nullptr);
nodes_[g->kSourceId] = new SimpleNode(g->source_node(), g->kSourceId);
nodes_[g->kSinkId] = new SimpleNode(g->sink_node(), g->kSinkId);
int n_edges = g->num_edge_ids();
edges_.resize(n_edges, nullptr);
for (int i = 2; i < n_nodes; i++) {
const auto n = g->FindNodeId(i);
if (n) {
nodes_[i] = new SimpleNode(n, i);
} else {
free_node_ids_.insert(i);
}
}
for (int i = 0; i < n_edges; i++) {
const auto e = g->FindEdgeId(i);
if (e) {
const auto tfsrc = e->src();
const auto tfdst = e->dst();
bool is_control = e->IsControlEdge();
auto src = nodes_[tfsrc->id()];
auto dst = nodes_[tfdst->id()];
auto edge = new SimpleEdge(i, src, e->src_output(), dst, e->dst_input(),
is_control);
edges_[i] = edge;
src->out_edges_.push_back(edge);
dst->in_edges_.push_back(edge);
} else {
free_edge_ids_.insert(i);
}
}
}
void SimpleGraph::AddEdge(SimpleNode* src, int out_port, SimpleNode* dst,
int in_port) {
int i = edges_.size();
if (!free_edge_ids_.empty()) {
auto it = free_edge_ids_.begin();
i = *it;
free_edge_ids_.erase(it);
} else {
edges_.push_back(nullptr);
}
bool is_control = (out_port == Graph::kControlSlot);
is_control |= (in_port == Graph::kControlSlot);
auto edge = new SimpleEdge(i, src, out_port, dst, in_port, is_control);
edges_[i] = edge;
src->out_edges_.push_back(edge);
dst->in_edges_.push_back(edge);
}
void SimpleGraph::AddControlEdge(SimpleNode* src, SimpleNode* dst) {
AddEdge(src, Graph::kControlSlot, dst, Graph::kControlSlot);
}
void SimpleGraph::RemoveEdge(const SimpleEdge* edge) {
auto src = edge->src();
auto dst = edge->dst();
for (auto it = src->out_edges_.begin(); it != src->out_edges_.end(); ++it) {
if (*it == edge) {
src->out_edges_.erase(it);
break;
}
}
for (auto it = dst->in_edges_.begin(); it != dst->in_edges_.end(); ++it) {
if (*it == edge) {
dst->in_edges_.erase(it);
break;
}
}
}
SimpleGraph::~SimpleGraph() {
for (auto x : nodes_) delete x;
for (auto x : edges_) delete x;
}
struct SimpleEdgePtrCompare {
bool operator()(const SimpleEdge* lhs, const SimpleEdge* rhs) const {
return lhs->id() < rhs->id();
}
};
void StableDFS(const SimpleGraph& g, bool reverse,
const std::vector<const SimpleNode*>& start,
const std::function<bool(const SimpleNode*)>& enter,
const std::function<bool(const SimpleNode*)>& leave) {
struct Work {
const SimpleNode* node;
bool leave;
};
std::vector<Work> stack(start.size());
for (int i = 0; i < start.size(); ++i) {
stack[i] = Work{start[i], false};
}
auto get_nodes = [reverse](const SimpleNode* n) {
return reverse ? n->in_nodes() : n->out_nodes();
};
std::vector<bool> visited(g.num_node_ids(), false);
while (!stack.empty()) {
Work w = stack.back();
stack.pop_back();
auto n = w.node;
if (w.leave) {
if (leave && !leave(n)) return;
continue;
}
if (visited[n->id()]) continue;
visited[n->id()] = true;
if (enter && !enter(n)) return;
if (leave) stack.push_back(Work{n, true});
auto nodes = get_nodes(n);
std::vector<const SimpleNode*> nodes_sorted(nodes.begin(), nodes.end());
std::sort(nodes_sorted.begin(), nodes_sorted.end(),
[](const SimpleNode* lhs, const SimpleNode* rhs) {
return lhs->name() < rhs->name();
});
for (const SimpleNode* node : nodes_sorted) {
if (!visited[node->id()]) {
stack.push_back(Work{node, false});
}
}
}
}
bool CanContractEdge(const SimpleEdge* edge,
const std::unique_ptr<SimpleGraph>& graph) {
const auto src = edge->src();
const auto dst = edge->dst();
std::vector<const SimpleNode*> dfs_start_nodes;
for (const SimpleNode* node : dst->in_nodes()) {
if (node != src) {
dfs_start_nodes.push_back(node);
}
}
bool has_cycle = false;
StableDFS(*graph, true, dfs_start_nodes, nullptr,
[&has_cycle, src](const SimpleNode* n) {
if (n == src) {
has_cycle = true;
return false;
}
return true;
});
return !has_cycle;
}
string TensorPropertiesToString(const OpInfo::TensorProperties& prop) {
string s = StrCat(DataTypeString(prop.dtype()), ": ");
StrAppend(&s, "[");
if (prop.shape().unknown_rank()) {
StrAppend(&s, "?");
} else {
StrAppend(&s, StrJoin(prop.shape().dim(), ",",
[](string* out, const TensorShapeProto_Dim& d) {
StrAppendFormat(out, "%d", d.size());
}));
}
StrAppend(&s, "]");
return s;
}
string TensorPropertiesToString(
const std::vector<OpInfo::TensorProperties>& properties) {
return StrJoin(properties, "; ",
[](string* out, const OpInfo::TensorProperties& prop) {
StrAppend(out, TensorPropertiesToString(prop));
});
}
std::optional<const TensorShapeProto*> FindLeadingShape(
absl::Span<const OpInfo::TensorProperties> properties) {
DCHECK(!properties.empty());
const TensorShapeProto* result;
int max_batch_dim_value;
auto choose_shape_with_higher_rank = [&](const TensorShapeProto* s) {
result = s;
max_batch_dim_value = s->dim_size() < 1 ? 1 : s->dim(0).size();
};
DCHECK(!properties[0].shape().unknown_rank());
choose_shape_with_higher_rank(&properties[0].shape());
for (const OpInfo::TensorProperties& p : properties.subspan(1)) {
DCHECK(!p.shape().unknown_rank());
if (p.shape().dim_size() < result->dim_size()) continue;
if (p.shape().dim_size() > result->dim_size()) {
choose_shape_with_higher_rank(&p.shape());
continue;
}
if (result->dim_size() < 1) continue;
if (p.shape().dim(0).size() < 0 || result->dim(0).size() < 0) {
if (p.shape().dim(0).size() < 0 && result->dim(0).size() >= 0) {
result = &p.shape();
} else {
max_batch_dim_value =
std::max<int>(max_batch_dim_value, p.shape().dim(0).size());
}
continue;
}
if (p.shape().dim(0).size() > result->dim(0).size()) {
result = &p.shape();
max_batch_dim_value = result->dim(0).size();
}
}
if (result->dim_size() > 0 && result->dim(0).size() < 0) {
if (max_batch_dim_value <= 1) {
return result;
} else {
return std::nullopt;
}
}
return result;
}
absl::Span<const OpInfo::TensorProperties> GetInputsToDeterminateBatchSize(
const Node* node, const std::vector<OpInfo::TensorProperties>& all_inputs) {
static std::set<string> broadcast_supporting_ops = {
"Add",
"AddV2",
"Mul",
"Sub",
"Div",
"FloorDiv",
"RealDiv",
"Minimum",
"Maximum",
"Pow",
"BiasAdd",
"SquaredDifference",
"BatchMatMul",
"BatchMatMulV2",
};
const string& op = node->def().op();
if (op == "Conv2DBackpropInput" || op == "Conv3DBackpropInputV2") {
DCHECK_EQ(all_inputs.size(), 3);
return absl::MakeSpan(all_inputs).subspan(2, 1);
}
if (broadcast_supporting_ops.count(op)) {
return absl::MakeSpan(all_inputs);
}
return absl::MakeSpan(all_inputs).subspan(0, 1);
}
bool OperationCanBeTranslatedToImplicitBatch(
const grappler::GraphProperties* graph_properties, const Node* node) {
VLOG(3) << "process node " << node->name();
if (node->num_inputs() == 0) return true;
if (!graph_properties || !graph_properties->HasInputProperties(node->name()))
return false;
VLOG(3) << "input shapes "
<< TensorPropertiesToString(
graph_properties->GetInputProperties(node->name()));
const std::vector<OpInfo::TensorProperties>& all_input_properties =
graph_properties->GetInputProperties(node->name());
absl::Span<const OpInfo::TensorProperties> input_properties =
GetInputsToDeterminateBatchSize(node, all_input_properties);
if (absl::c_any_of(input_properties, [](const OpInfo::TensorProperties& p) {
return p.shape().unknown_rank();
})) {
return false;
}
std::optional<const TensorShapeProto*> leading_shape =
FindLeadingShape(input_properties);
return leading_shape.has_value() && leading_shape.value()->dim_size() >= 2;
}
bool HasDynamicNonBatchDimension(const OpInfo::TensorProperties& prop) {
const TensorShapeProto& shape = prop.shape();
if (shape.unknown_rank()) return true;
if (shape.dim_size() == 0) return false;
for (int i = 1; i < shape.dim_size(); ++i) {
if (shape.dim(i).size() <= -1) {
return true;
}
}
return false;
}
bool OperationHasDynamicNonBatchDimension(
const grappler::GraphProperties* graph_properties, const Node* node) {
VLOG(3) << "process node " << node->name();
if (node->num_inputs() == 0 || node->num_outputs() == 0) return false;
if (!graph_properties->HasOutputProperties(node->name())) return true;
VLOG(3) << "output shapes "
<< TensorPropertiesToString(
graph_properties->GetOutputProperties(node->name()));
return HasDynamicNonBatchDimension(
graph_properties->GetOutputProperties(node->name()).at(0));
}
void ContractEdge(SimpleEdge* edge, SimpleGraph* graph,
std::vector<const SimpleEdge*>* remove_edges) {
auto src = edge->src();
auto dst = edge->dst();
std::vector<const SimpleEdge*> in_edges(dst->in_edges().begin(),
dst->in_edges().end());
for (const SimpleEdge* in_edge : in_edges) {
if (in_edge->IsControlEdge()) {
if (in_edge->src() != src) {
SimpleEdge* e = const_cast<SimpleEdge*>(in_edge);
graph->AddControlEdge(e->src(), src);
}
} else {
if (in_edge->src() != src) {
SimpleEdge* e = const_cast<SimpleEdge*>(in_edge);
if (e->src() == graph->source_node()) {
graph->AddEdge(e->src(), e->src_output(), src, Graph::kControlSlot);
} else {
graph->AddEdge(e->src(), e->src_output(), src, 0 );
}
}
}
}
std::vector<const SimpleEdge*> out_edges(dst->out_edges().begin(),
dst->out_edges().end());
for (const SimpleEdge* out_edge : out_edges) {
if (out_edge->IsControlEdge()) {
SimpleEdge* e = const_cast<SimpleEdge*>(out_edge);
graph->AddControlEdge(src, e->dst());
} else {
SimpleEdge* e = const_cast<SimpleEdge*>(out_edge);
if (e->dst() == graph->sink_node()) {
VLOG(1) << " edge to sink node " << src->name() << " -> "
<< e->dst()->name();
graph->AddEdge(src, Graph::kControlSlot, e->dst(), e->dst_input());
} else {
graph->AddEdge(src, 0 , e->dst(), e->dst_input());
}
}
}
for (const auto& in_edge : dst->in_edges()) {
remove_edges->push_back(in_edge);
}
for (const auto& out_edge : dst->out_edges()) {
remove_edges->push_back(out_edge);
}
}
ClusterBatchSize GetClusterBatchSizeForNode(
const grappler::GraphProperties* graph_properties, const Node* node,
bool use_implicit_batch) {
ClusterBatchSize cluster_batch_size;
if (!use_implicit_batch || !node || node->num_inputs() == 0) {
return cluster_batch_size;
}
const NodeDef& node_def = node->def();
if (node_def.attr().count(kTftrtOpMaxBatchSizeAttr)) {
cluster_batch_size.SetMaxBatchSize(
node_def.attr().at(kTftrtOpMaxBatchSizeAttr).i());
}
if (!graph_properties ||
!graph_properties->HasInputProperties(node->name())) {
VLOG(3) << "doesn't have input property";
return cluster_batch_size;
}
const std::vector<OpInfo::TensorProperties>& input_properties =
graph_properties->GetInputProperties(node->name());
std::optional<const TensorShapeProto*> optional_leading_shape =
FindLeadingShape(GetInputsToDeterminateBatchSize(node, input_properties));
DCHECK(optional_leading_shape.has_value());
const TensorShapeProto* leading_shape = optional_leading_shape.value();
DCHECK(!leading_shape->unknown_rank() && leading_shape->dim_size() >= 2);
VLOG(3) << "set batch size as " << leading_shape->dim(0).size();
return cluster_batch_size.SetBatchSize(leading_shape->dim(0).size());
}
void AddSegmentForNode(const grappler::GraphProperties* graph_properties,
std::vector<UnionFind<SimpleNode*>>* segments,
SimpleNode* node,
const DeviceNameUtils::ParsedName& device_name,
bool use_implicit_batch) {
tensorflow::profiler::TraceMe activity(
"AddSegmentForNode", tensorflow::profiler::TraceMeLevel::kInfo);
ClusterProperty property(
GetClusterBatchSizeForNode(graph_properties,
node == nullptr ? nullptr : node->tf_node(),
use_implicit_batch),
device_name);
segments->emplace_back(node, std::move(property));
}
}
Status ExportNonConversionReportToCSV(
string filename,
std::map<string, std::map<string, int>>& nonconverted_ops_map,
string sep = "|") {
tensorflow::profiler::TraceMe activity(
"ExportNonConversionReportToCSV",
tensorflow::profiler::TraceMeLevel::kInfo);
std::unique_ptr<WritableFile> csv_file;
auto open_status = Env::Default()->NewWritableFile(filename, &csv_file);
if (!open_status.ok()) {
return errors::Internal("Failed to open output file: `", filename, "`");
}
LOG(WARNING) << "TF-TRT Non-Conversion Report saved at: `" << filename << "`";
std::ostringstream sstream;
sstream << "OP Name" << sep << "Reason" << sep << "Count" << std::endl;
for (auto& op_details : nonconverted_ops_map) {
auto op_name = op_details.first;
auto op_data = op_details.second;
for (auto& reject_data : op_data) {
auto reason = reject_data.first;
auto count = reject_data.second;
sstream << op_name << sep << reason << sep << count << std::endl;
}
}
auto append_status = csv_file->Append(sstream.str());
if (!append_status.ok()) {
return errors::Internal("Error writing to output file `", filename, "`.");
}
auto close_status = csv_file->Close();
if (!close_status.ok()) {
return errors::Internal("Error closing the file `", filename,
"`. The file might be corrupted.");
}
return OkStatus();
}
string GenerateNonConversionReport(
std::map<string, std::map<string, int>>& nonconverted_ops_map) {
tensorflow::profiler::TraceMe activity(
"GenerateNonConversionReport", tensorflow::profiler::TraceMeLevel::kInfo);
string detailed_report_var;
TF_CHECK_OK(ReadStringFromEnvVar("TF_TRT_SHOW_DETAILED_REPORT",
"", &detailed_report_var));
bool show_detailed_conversion_report = false;
if (detailed_report_var != "") {
if (detailed_report_var.find_first_not_of("-0123456789") != string::npos) {
const Status status = ExportNonConversionReportToCSV(
detailed_report_var, nonconverted_ops_map);
if (!status.ok()) {
LOG(ERROR) << "Problem encountered while generating the TF-TRT "
<< "Non-Conversion Report in CSV Format:\n"
<< status.message();
}
show_detailed_conversion_report = true;
} else if (std::stoi(detailed_report_var) >= 1) {
show_detailed_conversion_report = true;
}
}
string unsupported_op_report =
StrCat("\n\n", string(80, '#'), "\n",
"TensorRT unsupported/non-converted OP Report:");
int total_nonconverted_ops{0};
using ReasonCounterVector = std::vector<std::pair<string, int>>;
using NotConvertedOPTuple = std::tuple<string, int, ReasonCounterVector>;
std::vector<NotConvertedOPTuple> nonconverted_ops_vec;
for (auto& nonconverted_op_data : nonconverted_ops_map) {
int total_nonconverted_op{0};
ReasonCounterVector reason_occurances_vect;
auto op_name = nonconverted_op_data.first;
auto op_data = nonconverted_op_data.second;
for (auto& notconversion_reason_data : op_data) {
auto reason_count = notconversion_reason_data.second;
total_nonconverted_op += reason_count;
reason_occurances_vect.push_back(notconversion_reason_data);
}
std::sort(reason_occurances_vect.begin(), reason_occurances_vect.end(),
[](const std::pair<string, int>& a,
const std::pair<string, int>& b) -> bool {
return a.second > b.second;
});
nonconverted_ops_vec.push_back(std::make_tuple(
op_name, total_nonconverted_op, reason_occurances_vect));
}
std::sort(nonconverted_ops_vec.begin(), nonconverted_ops_vec.end(),
[](const NotConvertedOPTuple& a, const NotConvertedOPTuple& b) {
return std::get<1>(a) > std::get<1>(b);
});
for (auto& notconverted_op_detail : nonconverted_ops_vec) {
auto& op_name = std::get<0>(notconverted_op_detail);
auto& op_total_nonconverted = std::get<1>(notconverted_op_detail);
total_nonconverted_ops += op_total_nonconverted;
unsupported_op_report = StrCat(unsupported_op_report, "\n\t- ", op_name,
" -> ", op_total_nonconverted, "x");
if (show_detailed_conversion_report) {
auto& nonconverted_ops_details = std::get<2>(notconverted_op_detail);
for (auto& nonconversion_details : nonconverted_ops_details) {
auto& reason = nonconversion_details.first;
auto& reason_count = nonconversion_details.second;
if (reason_count == 0) {
continue;
}
unsupported_op_report = StrCat(unsupported_op_report, "\n\t\t- ",
"[Count: ", reason_count, "x] ", reason);
}
unsupported_op_report = StrCat(unsupported_op_report, "\n");
}
}
unsupported_op_report =
StrCat(unsupported_op_report, "\n", string(80, '-'),
"\n\t- Total nonconverted OPs: ", total_nonconverted_ops,
"\n\t- Total nonconverted OP Types: ", nonconverted_ops_map.size(),
"\nFor more information see https:
"/frameworks/tf-trt-user-guide/index.html#supported-ops.", "\n",
string(80, '#'), "\n");
return unsupported_op_report;
}
Status SegmentGraph(const Graph* tf_graph,
const grappler::GraphProperties* graph_properties,
const std::function<Status(const Node*)>& candidate_fn,
const std::function<bool(const Edge*)>& input_candidate_fn,
const std::function<bool(const Edge*)>& output_candidate_fn,
const SegmentOptions& options, SegmentVector* segments) {
tensorflow::profiler::TraceMe activity(
"SegmentGraph", tensorflow::profiler::TraceMeLevel::kInfo);
if (!options.use_implicit_batch && !options.allow_dynamic_non_batch_dim) {
return errors::Internal(
"Explicit batch mode should allow dynamic non-batch dimensions");
}
if (options.use_implicit_batch && !options.maximum_batch_size.has_value()) {
return errors::Internal("Implicit batch mode requires maximum_batch_size");
}
if (!options.allow_dynamic_non_batch_dim && !graph_properties) {
return errors::Internal(
"Need graph propertities to disallow dynamic non-batch dimensions");
}
auto graph = std::unique_ptr<SimpleGraph>(new SimpleGraph(tf_graph));
const absl::flat_hash_set<string> tftrt_op_denylist = [] {
string tftrt_op_denylist_str;
TF_CHECK_OK(ReadStringFromEnvVar("TF_TRT_OP_DENYLIST", "",
&tftrt_op_denylist_str));
absl::flat_hash_set<string> tftrt_op_denylist{};
for (const auto& x : str_util::Split(tftrt_op_denylist_str, ",")) {
tftrt_op_denylist.insert(x);
}
tftrt_op_denylist.rehash(0);
return tftrt_op_denylist;
}();
std::map<string, std::map<string, int>> nonconverted_ops_map = {};
std::vector<UnionFind<SimpleNode*>> node_segments;
for (int i = 0; i < graph->num_node_ids(); ++i) {
SimpleNode* node = graph->FindNodeId(i);
if (!node) {
VLOG(3) << "Node " << i << " doesn't exist in the graph";
continue;
}
const string node_op_type{node->tf_node()->type_string()};
auto exclude_node = [&](absl::string_view reason) {
VLOG(1) << "Not a TF-TRT candidate, " << "(Op type: " << node_op_type
<< "), " << "(Op name: " << node->name() << "), "
<< "(Reason: " << reason << ")";
nonconverted_ops_map[node_op_type][string(reason)]++;
node = nullptr;
};
std::optional<DeviceNameUtils::ParsedName> device_name =
GetDeviceParsedName(node->tf_node());
if (!device_name.has_value() ||
(device_name->has_type && device_name->type != "GPU")) {
exclude_node("node can't be placed on GPU");
} else if (options.exclude_node_list.count(node->name()) != 0) {
exclude_node(
"excluded by segmenter option. Most likely an input or "
"output node.");
} else if (options.use_implicit_batch &&
!OperationCanBeTranslatedToImplicitBatch(graph_properties,
node->tf_node())) {
exclude_node(
"implicit batch mode requires input shape with at least two "
"dimensions");
} else if (!options.allow_dynamic_non_batch_dim &&
OperationHasDynamicNonBatchDimension(graph_properties,
node->tf_node())) {
exclude_node("dynamic non-batch dimensions not allowed");
} else {
const Status status = candidate_fn(node->tf_node());
if (!status.ok()) {
exclude_node(status.message());
} else if (tftrt_op_denylist.contains(node->tf_node()->type_string())) {
LOG_WARNING_WITH_PREFIX
<< "Denylisted as TF-TRT candidate, "
<< "(Op type: " << node->tf_node()->type_string() << "), "
<< "(Op name: " << node->name() << ")";
exclude_node("Denylisted with the env var TF_TRT_OP_DENYLIST");
} else {
VLOG(2) << "Accepted as a TF-TRT candidate, "
<< "(Op type: " << node->tf_node()->type_string() << "), "
<< "(Op name: " << node->name();
}
}
AddSegmentForNode(graph_properties, &node_segments, node, *device_name,
options.use_implicit_batch);
}
LOG(WARNING) << GenerateNonConversionReport(nonconverted_ops_map);
std::vector<const SimpleNode*> order;
order.reserve(graph->num_node_ids());
StableDFS(*graph, false, {graph->source_node()},
nullptr, [&order](const SimpleNode* n) {
order.push_back(n);
return true;
});
for (const SimpleNode* node : order) {
VLOG(3) << "Trying node " << node->name() << " id=" << node->id();
if (node_segments[node->id()].Value() == nullptr) {
VLOG(3) << "... not a TRT candidate";
continue;
}
ClusterBatchSize expected_batch_size =
node_segments[node->id()].Property().BatchSize();
DeviceNameUtils::ParsedName expected_device_name =
node_segments[node->id()].Property().DeviceName();
VLOG(3) << "batch size " << expected_batch_size;
while (true) {
std::set<const SimpleEdge*, SimpleEdgePtrCompare> contract_edges;
for (const SimpleEdge* out_edge : node->out_edges()) {
VLOG(3) << "... out node " << out_edge->dst()->name() << " ( "
<< out_edge->dst()->id() << " <- " << node->id() << " )";
if (out_edge->IsControlEdge()) {
VLOG(3) << "... ... Control Edge, Skipping";
continue;
}
UnionFind<SimpleNode*>* out_cluster =
&node_segments[out_edge->dst()->id()];
if (out_cluster->Value() == nullptr) {
VLOG(3) << "... ... not a TRT candidate";
continue;
}
ClusterBatchSize out_batch_size = out_cluster->Property().BatchSize();
ClusterBatchSize merged_batch_size = expected_batch_size;
if (!merged_batch_size.MergeIfCompatible(out_batch_size)) {
VLOG(3) << "... ... incompatible batch sizes "
<< expected_batch_size.ToString() << " "
<< out_batch_size.ToString();
continue;
}
const DeviceNameUtils::ParsedName& out_device_name =
out_cluster->Property().DeviceName();
std::optional<DeviceNameUtils::ParsedName> merged_device_name =
MergeIfCompatible(expected_device_name, out_device_name);
if (!merged_device_name.has_value()) {
VLOG(3) << "... ... incompatible device names "
<< expected_device_name << " " << out_device_name;
continue;
}
if (CanContractEdge(out_edge, graph)) {
VLOG(3) << "... ... can contract. new batch size "
<< merged_batch_size.ToString();
contract_edges.insert(out_edge);
expected_batch_size = merged_batch_size;
expected_device_name = *merged_device_name;
} else {
VLOG(3) << "... ... cannot contract, would form cycle";
}
}
if (contract_edges.empty()) {
break;
}
while (!contract_edges.empty()) {
const SimpleEdge* contract_edge = *contract_edges.begin();
const SimpleNode* src = contract_edge->src();
const SimpleNode* dst = contract_edge->dst();
VLOG(3) << "Merge " << src->name() << " <- " << dst->name() << " ("
<< src->id() << " <- " << dst->id();
TF_RETURN_IF_ERROR(
node_segments[src->id()].Merge(&node_segments[dst->id()]));
SimpleEdge* e = const_cast<SimpleEdge*>(contract_edge);
std::vector<const SimpleEdge*> remove_edges;
ContractEdge(e, graph.get(), &remove_edges);
for (const SimpleEdge* r : remove_edges) {
contract_edges.erase(r);
graph->RemoveEdge(r);
}
}
if (expected_batch_size !=
node_segments[node->id()].Property().BatchSize()) {
return errors::Internal(
"expected batch size is not the same as the actual batch size");
}
if (expected_device_name !=
node_segments[node->id()].Property().DeviceName()) {
return errors::Internal(
"expected device name is not the same as the actual device name");
}
}
}
std::map<string, Segment> sg_map;
for (auto& u : node_segments) {
if ((u.Value() != nullptr) && (u.ParentValue() != nullptr)) {
sg_map[u.ParentValue()->name()].nodes.insert(u.Value()->tf_node());
}
if ((u.Value() != nullptr) && (u.ParentValue() == u.Value())) {
sg_map[u.Value()->name()].property = u.Property();
}
}
for (auto& itr : sg_map) {
std::set<const Node*, NodePtrCompare>& segment_nodes = itr.second.nodes;
VLOG(1) << "Segment original size: " << segment_nodes.size();
while (true) {
std::deque<const Node*> in_nodes_que, out_nodes_que;
for (auto node : segment_nodes) {
bool added = false;
for (const Edge* edge : node->in_edges()) {
if (!edge->IsControlEdge() && !edge->src()->IsSource() &&
!segment_nodes.count(edge->src())) {
if (!input_candidate_fn(edge)) {
in_nodes_que.push_back(node);
added = true;
break;
}
}
}
if (added) continue;
for (const Edge* edge : node->out_edges()) {
if (!edge->dst()->IsSink() && !edge->IsControlEdge() &&
!segment_nodes.count(edge->dst())) {
if (!output_candidate_fn(edge)) {
out_nodes_que.push_back(node);
break;
}
}
}
}
if (in_nodes_que.empty() && out_nodes_que.empty()) {
break;
}
auto remove_nodes = [&segment_nodes](bool is_input_nodes,
std::deque<const Node*>* que) {
std::set<const Node*, NodePtrCompare> visited;
std::set<const Node*, NodePtrCompare> logged(que->begin(), que->end());
while (!que->empty()) {
auto node = que->front();
que->pop_front();
if (!visited.insert(node).second) continue;
segment_nodes.erase(node);
for (auto in : (is_input_nodes || node->type_string() == "Const")
? node->in_nodes()
: node->out_nodes()) {
if (segment_nodes.count(in)) {
que->push_back(in);
if (VLOG_IS_ON(2)) {
if (!logged.count(in)) {
VLOG(2) << "----> Need to remove node " << in->name()
<< " because one of its "
<< (is_input_nodes ? "output" : "input")
<< " nodes in the graph was removed: "
<< node->name();
logged.insert(in);
}
}
}
}
}
};
remove_nodes(true, &in_nodes_que);
remove_nodes(false, &out_nodes_que);
}
VLOG(1) << "Segment new size: " << segment_nodes.size();
}
std::vector<int> effective_nodes_counts;
for (const auto& itr : sg_map) {
const string& segment_root = itr.first;
std::set<const Node*, NodePtrCompare> segment_nodes(
itr.second.nodes.begin(), itr.second.nodes.end());
if (VLOG_IS_ON(1) && !segment_nodes.empty()) {
string s;
for (auto node : segment_nodes) {
StrAppend(&s, "\n[Op type: ", node->type_string(), "] ", node->name());
}
VLOG(1) << "Nodes in segment " << segments->size()
<< " with parent=" << segment_root << ":" << s;
}
const int num_effective_nodes = std::count_if(
segment_nodes.begin(), segment_nodes.end(), [](const Node* node) {
static auto noops =
new std::set<string>{"Identity", "Snapshot", "StopGradient"};
return noops->count(node->type_string()) == 0;
});
if (num_effective_nodes == 0 ||
num_effective_nodes < options.minimum_segment_size) {
VLOG(1) << "Segment " << segments->size() << " has only "
<< num_effective_nodes << " effective nodes, dropping";
continue;
}
segments->emplace_back(itr.second.property, segment_nodes);
effective_nodes_counts.push_back(num_effective_nodes);
}
int64_t max_trt_engine_ops;
TF_CHECK_OK(ReadInt64FromEnvVar("TF_TRT_MAX_ALLOWED_ENGINES",
20, &max_trt_engine_ops));
if (max_trt_engine_ops <= 0) {
LOG(WARNING) << "The environment variable TF_TRT_MAX_ALLOWED_ENGINES is "
<< "<= 0. TF-TRT did not limit the number of TensorRT engines "
<< "created.";
} else {
if (segments->size() > max_trt_engine_ops) {
LOG(WARNING) << "A total of " << segments->size() << " segments with at "
<< "least minimum_segment_size="
<< options.minimum_segment_size << " nodes have been found. "
<< "TF-TRT will only convert the " << max_trt_engine_ops
<< " largest segments. You can change this behavior by "
<< "modifying the environment variable "
<< "TF_TRT_MAX_ALLOWED_ENGINES=" << max_trt_engine_ops;
std::vector<int> indices(segments->size());
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&effective_nodes_counts](int i1, int i2) {
return effective_nodes_counts[i1] >
effective_nodes_counts[i2];
});
std::vector<bool> mask = std::vector<bool>(segments->size(), false);
for (int i = 0; i < max_trt_engine_ops; i++) {
mask[indices[i]] = true;
}
int j = 0;
VLOG(1) << "The following segments have been accepted by TF-TRT:";
for (int i = 0; i < segments->size(); i++) {
if (mask[i]) {
VLOG(1) << "[*] Segment " << i
<< " [node count: " << effective_nodes_counts[i]
<< "] accepted. Re-assigned " << "segment id=" << j;
segments->at(j) = segments->at(i);
j++;
}
}
VLOG(1) << "The following segments have been rejected by TF-TRT:";
for (int i = 0; i < segments->size(); i++) {
if (!mask[i]) {
VLOG(1) << "[*] Segment " << i
<< " [node count: " << effective_nodes_counts[i]
<< "] rejected.";
}
}
segments->resize(max_trt_engine_ops);
} else {
LOG(WARNING) << "The environment variable TF_TRT_MAX_ALLOWED_ENGINES="
<< max_trt_engine_ops << " has no effect since there are "
<< "only " << segments->size() << " TRT Engines with at "
<< "least minimum_segment_size="
<< options.minimum_segment_size << " nodes.";
}
}
return OkStatus();
}
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/segment/segment.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace segment {
namespace test {
class SegmentTest : public ::testing::Test {
protected:
std::function<Status(const Node*)> MakeCandidateFn(
const std::set<string>& node_names) {
return [node_names](const Node* node) -> Status {
if (node_names.find(node->name()) != node_names.end()) {
return OkStatus();
}
return errors::NotFound("Not a user specified candidate");
};
}
std::function<bool(const Edge*)> MakeInputEdgeCandidateFn(
const std::set<string>& node_names) {
return [node_names](const Edge* in_edge) -> bool {
return node_names.find(in_edge->dst()->name()) != node_names.end();
};
}
std::function<bool(const Edge*)> MakeOutputEdgeCandidateFn(
const std::set<string>& node_names) {
return [node_names](const Edge* out_edge) -> bool {
return node_names.find(out_edge->src()->name()) != node_names.end();
};
}
void RunTest(const Graph* graph,
const grappler::GraphProperties* graph_properties,
const std::set<string>& candidates,
const std::set<string>& input_candidates,
const std::set<string>& output_candidates,
const std::vector<std::set<string>>& expected_segments) {
SegmentVector segments;
TF_EXPECT_OK(SegmentGraph(graph, graph_properties,
MakeCandidateFn(candidates),
MakeInputEdgeCandidateFn(input_candidates),
MakeOutputEdgeCandidateFn(output_candidates),
segment_options_, &segments));
ValidateSegment(segments, expected_segments);
}
void RunTest(const Graph* graph, const std::set<string>& candidates,
const std::set<string>& input_candidates,
const std::set<string>& output_candidates,
const std::vector<std::set<string>>& expected_segments) {
RunTest(graph, nullptr, candidates, input_candidates, output_candidates,
expected_segments);
}
void ValidateSegment(const SegmentVector& segments,
const std::vector<std::set<string>>& expected_segments) {
EXPECT_EQ(expected_segments.size(), segments.size());
for (int i = 0; i < segments.size(); ++i) {
std::set<string> segment_node_names;
for (const Node* node : segments[i].nodes) {
segment_node_names.insert(node->name());
}
const auto& expected = expected_segments[i];
for (const auto& name : expected) {
EXPECT_TRUE(segment_node_names.count(name))
<< "Segment " << i << " is missing expected node: " << name;
}
if (segment_node_names.size() == expected.size()) continue;
for (const auto& name : segment_node_names) {
EXPECT_TRUE(expected.count(name))
<< "Unexpected node found in segment " << i << ": " << name;
}
}
}
void DisableImplicitBatchMode() {
segment_options_.use_implicit_batch = false;
segment_options_.allow_dynamic_non_batch_dim = true;
}
void EnableImplicitBatchModeForStaticEngine(int maximum_batch_size = 1000) {
segment_options_.use_implicit_batch = true;
segment_options_.maximum_batch_size = maximum_batch_size;
segment_options_.allow_dynamic_non_batch_dim = false;
}
SegmentOptions segment_options_;
};
std::set<string> operator-(const std::set<string>& lhs, const string& rhs) {
std::set<string> result = lhs;
CHECK(result.erase(rhs));
return result;
}
TEST_F(SegmentTest, Empty) {
Scope s = Scope::NewRootScope();
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
DisableImplicitBatchMode();
RunTest(&g, {}, {}, {}, {});
}
TEST_F(SegmentTest, Simple) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4"};
DisableImplicitBatchMode();
RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
auto without_add1 = all_adds - "add1";
RunTest(&g, without_add1, without_add1, without_add1, {without_add1});
auto without_add2 = all_adds - "add2";
RunTest(&g, without_add1, without_add2, without_add1, {{"add3", "add4"}});
RunTest(&g, all_adds, without_add2, all_adds, {all_adds});
RunTest(&g, all_adds, without_add1, all_adds, {without_add1});
auto without_add3 = all_adds - "add3";
RunTest(&g, all_adds, all_adds, without_add3, {all_adds});
}
TEST_F(SegmentTest, WithDeviceAssignments) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4"};
DisableImplicitBatchMode();
{
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
}
{
add1.node()->set_assigned_device_name("/device:CPU:0");
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {all_adds - "add1"});
add1.node()->set_assigned_device_name("");
}
{
constexpr char kGpu0[] = "/device:GPU:0";
add0.node()->set_assigned_device_name(kGpu0);
add1.node()->set_assigned_device_name(kGpu0);
add2.node()->set_assigned_device_name(kGpu0);
constexpr char kGpu1[] = "/device:GPU:1";
add3.node()->set_assigned_device_name(kGpu1);
add4.node()->set_assigned_device_name(kGpu1);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {{"add0", "add1", "add2"}});
}
{
constexpr char kGpuAny[] = "/device:GPU:*";
add3.node()->set_assigned_device_name(kGpuAny);
add4.node()->set_assigned_device_name(kGpuAny);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
}
}
TEST_F(SegmentTest, AvoidCycle) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> without_add2 = {"add0", "add1", "add3", "add4"};
DisableImplicitBatchMode();
RunTest(&g, without_add2, without_add2, without_add2, {});
}
TEST_F(SegmentTest, Multiple) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add7 = ops::Add(s.WithOpName("add7"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add5 = ops::Add(s.WithOpName("add5"), add2, add7);
auto add8 = ops::Add(s.WithOpName("add8"), add7, add7);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add5);
auto add6 = ops::Add(s.WithOpName("add6"), add5, add8);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4",
"add5", "add6", "add7", "add8"};
auto without_add5 = all_adds - "add5";
DisableImplicitBatchMode();
RunTest(&g, without_add5, without_add5, without_add5,
{{"add0", "add1", "add2", "add3"}, {"add6", "add8"}});
auto without_add8 = all_adds - "add8";
auto without_add6 = all_adds - "add6";
RunTest(&g, without_add8, without_add6, all_adds, {{"add3", "add4"}});
auto without_add3 = all_adds - "add3";
auto without_add0 = all_adds - "add0";
RunTest(&g, without_add3, all_adds, without_add0, {{"add1", "add7", "add8"}});
}
TEST_F(SegmentTest, BigIfElse) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), add0, add0);
auto add2 = ops::Add(s.WithOpName("add2"), add1, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add2, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add0, add0);
auto add5 = ops::Add(s.WithOpName("add5"), add4, add4);
auto add6 = ops::Add(s.WithOpName("add6"), add5, add5);
auto add7 = ops::Add(s.WithOpName("add7"), add3, add6);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_adds = {"add0", "add1", "add2", "add3",
"add4", "add5", "add6", "add7"};
DisableImplicitBatchMode();
RunTest(&g, all_adds - "add2", all_adds, all_adds,
{{"add0", "add1"}, {"add3", "add4", "add5", "add6", "add7"}});
}
TEST_F(SegmentTest, IdentityOps) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto identity0 = ops::Identity(s.WithOpName("identity0"), feed);
auto identity1 = ops::Identity(s.WithOpName("identity1"), identity0);
auto identity2 = ops::Identity(s.WithOpName("identity2"), identity1);
auto identity3 = ops::Identity(s.WithOpName("identity3"), identity2);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_identities = {"identity0", "identity1",
"identity2", "identity3"};
DisableImplicitBatchMode();
RunTest(&g, all_identities, all_identities, all_identities, {});
}
TEST_F(SegmentTest, ExcludeAddWithDynamicNonBatchDimension) {
Scope s = Scope::NewRootScope();
auto feed_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2, 3}));
auto feed_1_shape = ops::Placeholder::Shape(PartialTensorShape({-1, -1, 3}));
auto const_val = ops::Const<float>(s, {1.0}, {});
auto feed_0 =
ops::Placeholder(s.WithOpName("feed-1"), DT_FLOAT, feed_0_shape);
auto feed_1 =
ops::Placeholder(s.WithOpName("feed-2"), DT_FLOAT, feed_1_shape);
auto add_0 = ops::Add(s.WithOpName("add-0"), feed_0, const_val);
auto add_1 = ops::Add(s.WithOpName("add-1"), add_0, feed_0);
auto add_2 = ops::Add(s.WithOpName("add-2"), const_val, feed_1);
grappler::GrapplerItem item;
item.fetch.push_back("add-2");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"add-0", "add-1", "add-2"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{all_nodes - "add-2"});
}
TEST_F(SegmentTest, ExcludeReshapeWithDynamicNonBatchDimensionInOutput) {
Scope s = Scope::NewRootScope();
auto feed_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2, 3}));
auto const_val = ops::Const<float>(s, {1.0}, {});
auto feed_0 =
ops::Placeholder(s.WithOpName("feed-1"), DT_FLOAT, feed_0_shape);
auto add_0 = ops::Add(s.WithOpName("add-0"), feed_0, const_val);
auto reshape = ops::Reshape(s.WithOpName("reshape"), add_0, Input({6, -1}));
auto add_1 = ops::Add(s.WithOpName("add-1"), reshape, const_val);
grappler::GrapplerItem item;
item.fetch.push_back("add-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"add-0", "reshape", "add-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes, {});
}
TEST_F(SegmentTest, RankOneCannotUseImplicitBatch) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(TensorShape({3}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({3}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-scalar"), 1.0f, {});
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, const_val);
auto output_1 = ops::Add(s.WithOpName("output-1"), input_1, const_val);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
item.fetch.push_back("output-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-scalar", "output-0", "output-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes, {});
}
TEST_F(SegmentTest, TwoChainsDiffBatchSizes) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(TensorShape({2, 3}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({5, 3}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-scalar"), 1.0f, {});
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, const_val);
auto output_1 = ops::Add(s.WithOpName("output-1"), input_1, const_val);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
item.fetch.push_back("output-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-scalar", "output-0", "output-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{{"output-0", "const-scalar"}});
EnableImplicitBatchModeForStaticEngine(1);
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{{"output-0", "const-scalar"}});
}
TEST_F(SegmentTest, SameRankImplicitBroadcastingStaticBatchSize) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(TensorShape({2, 3, 1}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({1, 3, 4}));
auto input_2_shape = ops::Placeholder::Shape(TensorShape({2, 3, 4}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto input_2 =
ops::Placeholder(s.WithOpName("input-2"), DT_FLOAT, input_2_shape);
auto multiple = ops::Mul(s.WithOpName("multiple"), input_2, input_2);
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, multiple);
auto output_1 = ops::Add(s.WithOpName("output-1"), input_1, multiple);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
item.fetch.push_back("output-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"multiple", "output-0", "output-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{all_nodes});
}
TEST_F(SegmentTest, SameRankImplicitBroadcastingDynamicBatchSize) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({1, 2}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-val"), 1.0f, {1, 1});
auto add_0 = ops::Add(s.WithOpName("add-0"), input_0, const_val);
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, add_0);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-val", "add-0", "output-0"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{{"const-val", "add-0", "output-0"}});
}
TEST_F(SegmentTest, IncompatibleBatchSizes) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({2, 2}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-val"), 1.0f, {2, 2});
auto add_0 = ops::Add(s.WithOpName("add-0"), input_0, const_val);
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, add_0);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-val", "add-0", "output-0"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes, {});
}
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/segment/segment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/segment/segment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bea336ee-0035-41f2-b969-693116bc4a27 | cpp | tensorflow/tensorflow | trt_lru_cache | tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.cc | tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache_test.cc | #include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include <sstream>
#include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/mutex.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
string CalibrationContext::TerminateCalibration() {
mutex_lock l(mu_);
if (terminated_) return calibration_table_;
TRTInt8Calibrator* raw_calibrator = calibrator_.get();
raw_calibrator->waitAndSetDone();
terminated_ = true;
thr_->join();
calibration_table_ = raw_calibrator->getCalibrationTableAsString();
return calibration_table_;
}
const absl::string_view kTfTrtContainerName = "TF-TRT";
Logger& TRTEngineCacheResource::GetLogger() {
static Logger* logger = new Logger();
return *logger;
}
TRTEngineCacheResource::TRTEngineCacheResource(OpKernelContext* ctx,
size_t capacity)
: cache_(capacity) {
auto device = ctx->device();
auto alloc = device->GetAllocator(AllocatorAttributes());
if (!alloc) {
LOG(ERROR) << "Can't find device allocator for gpu device "
<< device->name();
allocator_ = nullptr;
} else {
allocator_.reset(new TRTDeviceAllocator(alloc));
}
}
TRTEngineCacheResource::~TRTEngineCacheResource() {
VLOG(1) << "Destroying TRTEngineCacheResource...";
}
string TRTEngineCacheResource::DebugString() const {
std::stringstream oss;
using std::dec;
using std::endl;
using std::hex;
oss << "TRTEngineCacheResource: ";
oss << "TRTBaseAllocator = " << hex << allocator_.get() << dec << ", ";
oss << "LRUCache = " << hex << &cache_ << dec << endl;
oss << "Containing " << cache_.size() << " entries: " << endl;
for (const auto& item : cache_) {
mutex_lock lock(item.second->mu);
oss << TensorShapeUtils::ShapeListString(item.first) << ": " << hex
<< "ICudaEngine: " << item.second->GetCudaEngine() << ", "
<< "IExecutionContext: ";
absl::c_for_each(
item.second->execution_contexts,
[&](const ExecutionContext& ctx) { oss << ctx.get() << ","; });
oss << dec << endl;
}
return oss.str();
}
EngineContext* TRTEngineCacheResource::GetEngineContext(
const std::vector<TensorShape>& input_shapes) {
EngineContext* engine_context = nullptr;
int64 min_matched_batch_size = kint64max;
for (const auto& pair : cache_) {
const std::vector<TensorShape>& cached_input_shapes = pair.first;
if (input_shapes.size() != cached_input_shapes.size()) {
LOG(ERROR) << "Input shape list size mismatch"
<< ", cached size: " << cached_input_shapes.size()
<< " vs. input size: " << input_shapes.size();
}
if (AreShapesCompatible(input_shapes, cached_input_shapes)) {
const int cached_batch_size = cached_input_shapes[0].dim_size(0);
if (min_matched_batch_size > cached_batch_size) {
min_matched_batch_size = cached_batch_size;
engine_context = pair.second.get();
}
}
}
return engine_context;
}
EngineContext* TRTEngineCacheResource::GetEngineContext(const int profile_id) {
if (profiles_.NeedProfiles() && profile_id >= profiles_.GetNumProfiles()) {
LOG(ERROR) << "Out of range: profile_id " << profile_id
<< " is larger than number of profiles "
<< profiles_.GetNumProfiles();
return nullptr;
}
if (cache_.size() > 1) {
LOG(ERROR) << "Cache is expected to have at most "
<< "1 engine in explicit batch mode where profiles are used.";
return nullptr;
}
if (cache_.size() == 0) {
return nullptr;
}
return cache_.begin()->second.get();
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tensorrt {
TEST(LRUCacheTest, Basic) {
LRUCache<int, int, std::hash<int>> cache;
cache.reserve(2);
cache.emplace(10, 100);
EXPECT_EQ(cache.size(), 1);
EXPECT_EQ(cache.count(10), 1);
EXPECT_EQ(cache.at(10), 100);
EXPECT_EQ(cache.count(100), 0);
cache.emplace(20, 200);
EXPECT_EQ(cache.size(), 2);
EXPECT_EQ(cache.count(10), 1);
EXPECT_EQ(cache.count(20), 1);
EXPECT_EQ(cache.at(10), 100);
EXPECT_EQ(cache.at(20), 200);
EXPECT_EQ(cache.count(100), 0);
EXPECT_EQ(cache.count(200), 0);
cache.emplace(30, 300);
EXPECT_EQ(cache.count(10), 0);
EXPECT_EQ(cache.count(20), 1);
EXPECT_EQ(cache.count(30), 1);
cache.at(20);
cache.emplace(40, 400);
EXPECT_EQ(cache.count(10), 0);
EXPECT_EQ(cache.count(20), 1);
EXPECT_EQ(cache.count(30), 0);
EXPECT_EQ(cache.count(40), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
078c6641-034b-4980-8a9d-61b5f28ff7fc | cpp | tensorflow/tensorflow | trt_testutils | tensorflow/compiler/tf2tensorrt/utils/trt_testutils.cc | tensorflow/compiler/tf2tensorrt/utils/trt_testutils_test.cc | #include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <map>
#include <string>
#include <vector>
#include <gmock/gmock.h>
namespace tensorflow {
namespace tensorrt {
namespace convert {
::testing::Matcher<std::vector<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error, bool nan_sensitive) {
std::vector<::testing::Matcher<float>> matchers;
matchers.reserve(values.size());
for (const float& v : values) {
if (nan_sensitive) {
matchers.emplace_back(::testing::NanSensitiveFloatNear(v, max_abs_error));
} else if (max_abs_error == 0) {
matchers.emplace_back(::testing::FloatEq(v));
} else {
EXPECT_GE(max_abs_error, 0);
matchers.emplace_back(::testing::FloatNear(v, max_abs_error));
}
}
return ::testing::ElementsAreArray(matchers);
}
nvinfer1::Dims CreateDims(const std::vector<int>& d) {
nvinfer1::Dims dims;
dims.nbDims = d.size();
for (int i = 0; i < d.size(); ++i) {
dims.d[i] = d[i];
}
return dims;
}
NodeDef MakeNodeDef(const std::string& name, const std::string& op,
const std::vector<std::string>& inputs,
const std::map<std::string, AttrValue> attrs) {
NodeDef node_def;
node_def.set_name(name);
node_def.set_op(op);
for (const auto& input : inputs) {
node_def.add_input(input);
}
for (const auto& attr : attrs) {
(*node_def.mutable_attr())[attr.first] = attr.second;
}
return node_def;
}
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
using ::testing::AllOf;
using ::testing::AnyOf;
using ::testing::Eq;
using ::testing::Not;
TEST(TrtDimsMatcher, ParameterizedMatchers) {
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), DimsAreArray({1, 2, 3, 4}));
EXPECT_THAT(nvinfer1::Dims{}, Not(DimsAreArray({1, 2})));
std::vector<int> empty_dims;
EXPECT_THAT(nvinfer1::Dims{}, DimsAreArray(empty_dims));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Not(DimsAreArray({1, 2, 3, 5})));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Not(DimsAreArray({1, 2, 5})));
}
TEST(TrtDimsMatcher, EqualityMatcher) {
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Eq(nvinfer1::Dims4(1, 2, 3, 4)));
EXPECT_THAT(nvinfer1::Dims{}, Eq(nvinfer1::Dims()));
EXPECT_THAT(nvinfer1::Dims{}, Not(Eq(nvinfer1::DimsHW())));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4),
Not(Eq(nvinfer1::Dims4(1, 2, 3, 3))));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Not(Eq(nvinfer1::Dims2(1, 2))));
}
TEST(INetworkDefinitionMatchers, CorrectlyMatch) {
Logger& logger = *Logger::GetLogger();
TrtUniquePtrType<nvinfer1::IBuilder> builder(
nvinfer1::createInferBuilder(logger));
TrtUniquePtrType<nvinfer1::INetworkDefinition> network(
builder->createNetworkV2(0L));
EXPECT_THAT(network.get(), AllOf(Not(LayerNamesAreArray({"some layer"})),
LayerNamesNonEmpty()));
nvinfer1::Weights weights;
weights.type = nvinfer1::DataType::kFLOAT;
std::array<float, 1> vals;
weights.values = vals.data();
weights.count = 1;
auto input = network->addInput("input-tensor", nvinfer1::DataType::kFLOAT,
nvinfer1::Dims3{1, 1, 1});
ASSERT_NE(input, nullptr);
const char* fc_layer_name = "my-fc-layer";
auto layer = network->addFullyConnected(*input, 1, weights, weights);
ASSERT_NE(layer, nullptr);
layer->setName(fc_layer_name);
EXPECT_THAT(network.get(),
AllOf(LayerNamesNonEmpty(), LayerNamesAreArray({fc_layer_name})));
layer = network->addFullyConnected(*input, 1, weights, weights);
EXPECT_THAT(network.get(), AllOf(LayerNamesNonEmpty(),
Not(LayerNamesAreArray({fc_layer_name}))));
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_testutils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_testutils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5057728d-228f-4dab-b62d-fbf3fd4efa64 | cpp | tensorflow/tensorflow | trt_shape_optimization_profiles | tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc | tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles_test.cc | #include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
#include <algorithm>
#include <functional>
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
namespace tensorflow {
namespace tensorrt {
template <typename TensorShapeType>
std::vector<nvinfer1::Dims> GetDimVec(std::vector<TensorShapeType> shape_vec) {
std::vector<nvinfer1::Dims> dimvec(shape_vec.size());
absl::c_transform(shape_vec, dimvec.begin(), [](TensorShapeType shape) {
auto adap = DimsAdapter::Create(shape);
TF_CHECK_OK(adap.status());
return adap->AsTrtDims();
});
return dimvec;
}
void EnforceCompatibility(nvinfer1::Dims* prof_dims,
const PartialTensorShape& input_shape) {
for (int i = 0; i < input_shape.dims(); i++) {
if (input_shape.dim_size(i) != -1) {
prof_dims->d[i] = input_shape.dim_size(i);
}
}
}
void SetImplicitBatchModeCompatibleProfile(
const std::vector<nvinfer1::Dims>& dimvec, std::vector<nvinfer1::Dims>* min,
std::vector<nvinfer1::Dims>* opt, std::vector<nvinfer1::Dims>* max) {
*min = dimvec;
for (auto& dim : *min) {
if (dim.d[0] != -1) dim.d[0] = 1;
}
*opt = dimvec;
*max = dimvec;
}
void TrtShapeOptimizationProfile::ImplicitBatchModeCompatibleStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes) {
for (auto& shape_vec : collected_shapes) {
std::vector<nvinfer1::Dims> min, opt, max;
SetImplicitBatchModeCompatibleProfile(shape_vec, &min, &opt, &max);
VLOG(2) << "Initializing optimization profile config with min="
<< DebugString(min) << ", opt=max=" << DebugString(max);
OptimizationProfileConfig profConfig{min, opt, max};
profiles_.push_back(std::move(profConfig));
}
}
template <typename BinaryOperation>
Status ShapeProfileBinaryOp(std::vector<nvinfer1::Dims>* x,
const std::vector<nvinfer1::Dims>& y,
BinaryOperation op) {
if (x->size() != y.size())
return errors::InvalidArgument(
"Number of input tensors differ during profile creation");
for (int i = 0; i < x->size(); i++) {
if (x->at(i).nbDims != y[i].nbDims)
return errors::InvalidArgument(
"Number of input dimensions differ during profile creation at dim ",
i, ", values ", x->at(i).nbDims, y[i].nbDims);
for (int j = 0; j < x->at(i).nbDims; j++) {
x->at(i).d[j] = op(x->at(i).d[j], y[i].d[j]);
}
}
return OkStatus();
}
Status TrtShapeOptimizationProfile::RangeStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes) {
if (collected_shapes.empty()) return OkStatus();
std::vector<nvinfer1::Dims> min = collected_shapes[0];
std::vector<nvinfer1::Dims> max = min;
for (int i = 1; i < collected_shapes.size(); i++) {
TF_RETURN_IF_ERROR(
ShapeProfileBinaryOp(&min, collected_shapes[i],
[](int a, int b) { return std::min(a, b); }));
TF_RETURN_IF_ERROR(
ShapeProfileBinaryOp(&max, collected_shapes[i],
[](int a, int b) { return std::max(a, b); }));
}
VLOG(2) << "Initializing optimization profile config with min="
<< DebugString(min) << ", opt=max=" << DebugString(max);
OptimizationProfileConfig profConfig{min, max, max};
profiles_.push_back(std::move(profConfig));
return OkStatus();
}
void TrtShapeOptimizationProfile::OptimalStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes) {
for (auto& shape_vec : collected_shapes) {
std::vector<nvinfer1::Dims> min = shape_vec;
std::vector<nvinfer1::Dims> opt = min;
std::vector<nvinfer1::Dims> max = min;
VLOG(2) << "Initializing optimization profile config with min=opt=max="
<< DebugString(min);
OptimizationProfileConfig profConfig{min, opt, max};
profiles_.push_back(std::move(profConfig));
}
}
Status TrtShapeOptimizationProfile::CollectShapeValues(OpKernelContext* ctx) {
tensorflow::profiler::TraceMe activity(
"TrtShapeOptimizationProfile::CollectShapeValues",
tensorflow::profiler::TraceMeLevel::kInfo);
cudaStream_t stream = reinterpret_cast<cudaStream_t>(CHECK_NOTNULL(
ctx->op_device_context()->stream()->platform_specific_handle().stream));
actual_shape_values_.resize(ctx->num_inputs());
if (is_shape_tensor_.empty()) {
is_shape_tensor_.resize(ctx->num_inputs());
for (int i = 0; i < ctx->num_inputs(); i++) {
is_shape_tensor_[i] = IsTrtShapeTensorCompatible(ctx->input(i));
}
}
int n_shape_val = 0;
for (int i = 0; i < ctx->num_inputs(); i++) {
if (is_shape_tensor_[i]) {
if (ctx->input_dtype(i) != DT_INT32) {
is_shape_tensor_[i] = false;
continue;
}
if (input_shape_values_.size() > 0 &&
input_shape_values_[0][i].nbDims != ctx->input(i).NumElements()) {
is_shape_tensor_[i] = false;
continue;
}
n_shape_val++;
const Tensor& input = ctx->input(i);
actual_shape_values_[i].nbDims = input.NumElements();
auto ret = cudaMemcpyAsync(
actual_shape_values_[i].d, input.flat<int32>().data(),
input.NumElements() * sizeof(int32), cudaMemcpyDeviceToHost, stream);
if (ret != 0) {
return errors::Internal("Could not copy shape tensor values");
}
VLOG(2) << "Input " << i << " is (probably) a shape tensor, n_values="
<< input.NumElements();
} else {
actual_shape_values_[i] = {0, {}};
}
}
if (n_shape_val > 0) {
cudaStreamSynchronize(stream);
}
return OkStatus();
}
Status TrtShapeOptimizationProfile::CollectShapeValues(const DataVec& input) {
actual_shape_values_.resize(input.size());
for (int i = 0; i < input.size(); i++) {
if (is_shape_tensor_[i]) {
if (!IsTrtShapeTensorCompatible(input[i].tensor)) {
return errors::Internal("Inconsistent shape tensor ", input[i].name,
", ", i);
}
int n_elements = input[i].tensor.NumElements();
actual_shape_values_[i].nbDims = n_elements;
std::copy(input[i].tensor.flat<int32>().data(),
input[i].tensor.flat<int32>().data() + n_elements,
actual_shape_values_[i].d);
VLOG(2) << "Collected tensor shape values "
<< DebugString(actual_shape_values_[i]);
} else {
actual_shape_values_[i] = {0, {}};
}
}
return OkStatus();
}
void FixShapeValueProfile(OptimizationProfileConfig* prof,
const std::vector<bool>& is_shape_tensor) {
int shape_value_offset = is_shape_tensor.size();
for (int i = 0; i < is_shape_tensor.size(); i++) {
if (is_shape_tensor[i] &&
std::equal(prof->min[shape_value_offset + i].d,
prof->min[shape_value_offset + i].d +
prof->min[shape_value_offset + i].nbDims,
prof->max[shape_value_offset + i].d)) {
prof->max[shape_value_offset + i].d[0]++;
VLOG(2) << "Adjusted profile for shape value tensor " << i << " "
<< DebugString(prof->max[shape_value_offset + i]);
} else {
VLOG(2) << i << " is not a shape tensor." << is_shape_tensor[i];
}
}
}
bool AlreadyCollected(const std::vector<std::vector<nvinfer1::Dims>>& values,
const std::vector<nvinfer1::Dims>& rhs) {
for (auto& lhs : values) {
bool ret = lhs.size() == rhs.size();
for (int i = 0; ret && i < lhs.size(); i++) {
ret &= lhs[i].nbDims == rhs[i].nbDims;
for (int j = 0; ret && j < lhs[i].nbDims; j++) {
ret &= (lhs[i].d[j] == rhs[i].d[j]);
}
}
if (ret) return true;
}
return false;
}
void TrtShapeOptimizationProfile::InitProfiles(
const std::vector<PartialTensorShape>& input_partial_shapes,
ProfileStrategy strategy) {
strategy_ = strategy;
if (input_shapes_.size() == 0) {
VLOG(1) << "Not creating profiles without input_shapes. "
"You have to enable profile generation mode first (build).";
return;
}
std::vector<std::vector<nvinfer1::Dims>> collected_shapes;
for (int i = 0; i < input_shapes_.size(); i++) {
auto shape_vec = input_shapes_[i];
VLOG(2) << "Initprofiles, processing shape " << i;
if (!shape_vec.empty()) {
for (int k = 0; k < input_shape_values_[i].size(); k++) {
if (!is_shape_tensor_[k])
input_shape_values_[i][k] = nvinfer1::Dims{0, {}};
}
std::vector<nvinfer1::Dims> dimvec = GetDimVec(shape_vec);
dimvec.insert(dimvec.end(), input_shape_values_[i].begin(),
input_shape_values_[i].end());
if (!AlreadyCollected(collected_shapes, dimvec)) {
collected_shapes.push_back(dimvec);
}
}
}
switch (strategy_) {
case ProfileStrategy::kImplicitBatchModeCompatible:
VLOG(1) << "Creating profiles with ImplicitBatchModeCompatible strategy";
ImplicitBatchModeCompatibleStrategy(collected_shapes);
break;
case ProfileStrategy::kRange:
VLOG(1) << "Creating profiles with Range strategy";
TF_CHECK_OK(RangeStrategy(collected_shapes));
break;
case ProfileStrategy::kRangeOptimal:
VLOG(1) << "Creating profiles with RangeOptimal strategy";
OptimalStrategy(collected_shapes);
TF_CHECK_OK(RangeStrategy(collected_shapes));
break;
case ProfileStrategy::kOptimal:
VLOG(1) << "Creating profiles with Optimal strategy";
OptimalStrategy(collected_shapes);
break;
}
SetShapeTensorMask(input_partial_shapes);
if (input_partial_shapes.size() > 0) {
for (OptimizationProfileConfig& prof : profiles_) {
#if !IS_TRT_VERSION_GE(8, 0, 0, 0)
FixShapeValueProfile(&prof, is_shape_tensor_);
#endif
for (int i = 0; i < input_partial_shapes.size(); i++) {
auto network_input = input_partial_shapes[i];
EnforceCompatibility(&prof.min[i], network_input);
EnforceCompatibility(&prof.opt[i], network_input);
EnforceCompatibility(&prof.max[i], network_input);
}
}
}
}
void TrtShapeOptimizationProfile::InitCalibProfile(
const std::vector<TensorShape>& shapes) {
VLOG(1) << "Collected shape(s) " << DebugString(shapes) << " for "
<< " calibration profile.";
auto shape_vec = shapes;
if (!shape_vec.empty()) {
std::vector<nvinfer1::Dims> dimvec = GetDimVec(shape_vec);
dimvec.insert(dimvec.end(), actual_shape_values_.begin(),
actual_shape_values_.end());
VLOG(2) << "Initializing calibration optimization profile config with "
<< "min=opt=max " << DebugString(dimvec);
OptimizationProfileConfig profConfig{dimvec, dimvec, dimvec};
calib_profiles_ = std::move(profConfig);
} else {
VLOG(2) << "Failed to initialize calibration optimization profile.";
}
}
Status TrtShapeOptimizationProfile::AddProfiles(
nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config,
const nvinfer1::INetworkDefinition* network) {
if (!calib_profiles_.min.empty()) {
VLOG(2) << "Setting up calibration profiles";
auto* calibProfile = builder->createOptimizationProfile();
Status status =
calib_profiles_.SetDimensions(network, calibProfile, input_mask_);
if (!status.ok()) {
return status;
}
bool result = false;
if (calibProfile->isValid()) {
result = config->setCalibrationProfile(calibProfile);
} else {
VLOG(2) << "Calibration profile is not valid";
}
if (result) {
VLOG(2) << "Added calibration optimization profile "
<< calib_profiles_.DebugString() << " to builder config.";
} else {
VLOG(2) << "FAILED TO ADD PROFILE";
LOG(ERROR) << "Failed to add calibration optimization profile "
<< calib_profiles_.DebugString()
<< ". This usually happens when profile is invalid.";
}
}
for (int i = 0; i < profiles_.size(); i++) {
auto* optProfile = builder->createOptimizationProfile();
Status status =
profiles_[i].SetDimensions(network, optProfile, input_mask_);
if (!status.ok()) {
return status;
}
int idx = -1;
if (optProfile->isValid()) {
idx = config->addOptimizationProfile(optProfile);
}
if (idx >= 0) {
if (i != idx) {
return errors::Internal(
"Profile index of engine config is different from source profile "
"index: ",
i, " != ", idx);
}
VLOG(1) << "Added optimization profile " << profiles_[i].DebugString()
<< " with idx " << idx << " to builder config.";
} else {
LOG(ERROR) << "Failed to add optimization profile "
<< profiles_[i].DebugString()
<< ". This usually happens when profile is invalid.";
}
}
if (!profiles_.empty() && config->getNbOptimizationProfiles() == 0) {
return errors::Internal("Failure in adding an optimization profile.");
}
need_profiles_ = config->getNbOptimizationProfiles() > 0;
SetShapeTensorMask(network);
is_pruned_input_.resize(network->getNbInputs());
absl::c_fill(is_pruned_input_, false);
return OkStatus();
}
Status TrtShapeOptimizationProfile::ConfigureBuilder(
nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config,
const nvinfer1::INetworkDefinition* network) {
TF_RETURN_IF_ERROR(AddProfiles(builder, config, network));
return OkStatus();
}
void TrtShapeOptimizationProfile::SetShapeTensorMask(
const nvinfer1::ICudaEngine* engine, int n_inputs) {
is_shape_tensor_.resize(n_inputs, false);
for (int i = 0; i < n_inputs; i++) {
int binding_index;
Status status = GetTrtBindingIndex(i, 0, engine, &binding_index);
if (!status.ok()) {
continue;
}
is_shape_tensor_[i] = engine->isShapeBinding(binding_index);
if (is_shape_tensor_[i]) {
VLOG(2) << "Found shape tensor at " << i;
}
}
has_shape_tensor_ =
absl::c_any_of(is_shape_tensor_, [](bool b) { return b; });
}
void TrtShapeOptimizationProfile::SetShapeTensorMask(
const nvinfer1::INetworkDefinition* network) {
int n_inputs = network->getNbInputs();
is_shape_tensor_.resize(n_inputs, false);
for (int i = 0; i < n_inputs; i++) {
const ITensorProxyPtr input = network->getInput(i);
is_shape_tensor_[i] = input->isShapeTensor();
if (is_shape_tensor_[i]) {
VLOG(2) << "Found shape tensor " << input->getName() << " at " << i;
}
}
has_shape_tensor_ =
absl::c_any_of(is_shape_tensor_, [](bool b) { return b; });
}
void TrtShapeOptimizationProfile::SetShapeTensorMask(
const std::vector<PartialTensorShape>& input_partial_shapes) {
if (is_shape_tensor_.size() == input_partial_shapes.size()) {
return;
}
is_shape_tensor_.resize(input_partial_shapes.size(), false);
for (int i = 0; i < input_partial_shapes.size(); i++) {
is_shape_tensor_[i] = IsTrtShapeTensorCompatible(input_partial_shapes[i]);
if (is_shape_tensor_[i]) {
VLOG(2) << "Found shape compatible tensor at " << i;
}
}
has_shape_tensor_ =
absl::c_any_of(is_shape_tensor_, [](bool b) { return b; });
}
int TrtShapeOptimizationProfile::GetProfileNumber(
const std::vector<TensorShape>& shapes) {
tensorflow::profiler::TraceMe activity(
"TrtShapeOptimizationProfile::GetProfileNumber",
tensorflow::profiler::TraceMeLevel::kInfo);
if (!need_profiles_) return 0;
for (int i = 0; i < profiles_.size(); i++) {
if (profiles_[i].IncludesShapes(shapes, HasShapeTensor(),
actual_shape_values_, is_pruned_input_,
is_shape_tensor_)) {
return i;
}
}
VLOG(1) << "Profile not found for input shapes " << DebugString(shapes);
VLOG(2) << " and shape values " << DebugString(actual_shape_values_);
return -1;
}
Status TrtShapeOptimizationProfile::CreateExecutionContexts(
nvinfer1::ICudaEngine* engine,
std::vector<ExecutionContext>* exec_contexts) {
int i = 0;
do {
VLOG(1) << "Creating execution context " << i;
ExecutionContext context = ExecutionContext::Create(engine);
if (i > 0) {
if (!context->setOptimizationProfile(i)) {
return errors::Internal("Could not set TRT optimization profile.");
}
}
exec_contexts->push_back(std::move(context));
i++;
} while (i < profiles_.size());
return OkStatus();
}
Status TrtShapeOptimizationProfile::SetInputShapeBinding(
int input_index, int binding_index, nvinfer1::ICudaEngine* cuda_engine,
nvinfer1::IExecutionContext* exec_context) const {
tensorflow::profiler::TraceMe activity(
"TrtShapeOptimizationProfile::SetInputShapeBinding",
tensorflow::profiler::TraceMeLevel::kInfo);
if (cuda_engine->isShapeBinding(binding_index)) {
VLOG(2) << "Setting input shape binding for idx " << binding_index
<< ", with values "
<< DebugString(actual_shape_values_.at(input_index));
bool ret = exec_context->setInputShapeBinding(
binding_index, actual_shape_values_.at(input_index).d);
if (!ret) {
return errors::Internal("Could not set input shape binding for idx ",
binding_index);
}
}
return OkStatus();
}
nvinfer1::Dims GetDimsFromShapeVal(int prof_idx, int binding_idx,
nvinfer1::OptProfileSelector selector,
const nvinfer1::ICudaEngine* engine) {
if (engine->isShapeBinding(binding_idx)) {
const int32* shape_val_ptr =
engine->getProfileShapeValues(binding_idx, prof_idx, selector);
if (shape_val_ptr) {
VLOG(2) << "Found shape value in prof " << prof_idx << ", binding "
<< binding_idx;
nvinfer1::Dims dims = engine->getBindingDimensions(binding_idx);
int n_values = (dims.nbDims == 0) ? 1 : dims.d[0];
if (n_values > 0) {
dims.nbDims = n_values;
std::copy(shape_val_ptr, shape_val_ptr + n_values, dims.d);
}
return dims;
}
}
return {0, {0}};
}
Status TrtShapeOptimizationProfile::SetPrunedMask(
const nvinfer1::ICudaEngine* engine, int n_network_inputs) {
is_pruned_input_.resize(n_network_inputs);
absl::c_fill(is_pruned_input_, false);
for (int j = 0; j < n_network_inputs; j++) {
int binding_idx;
Status status = GetTrtBindingIndex(j, 0, engine, &binding_idx);
if (!status.ok()) {
is_pruned_input_[j] = true;
VLOG(2) << "Skipping pruned input " << j;
continue;
}
}
return OkStatus();
}
Status TrtShapeOptimizationProfile::RestoreProfiles(
const nvinfer1::ICudaEngine* engine, int n_network_inputs) {
need_profiles_ = false;
if (!engine) {
return OkStatus();
}
if (engine->hasImplicitBatchDimension()) {
return OkStatus();
}
int n_profiles = engine->getNbOptimizationProfiles();
need_profiles_ = n_profiles > 0;
int n_inputs = GetNumberOfEngineInputs(engine);
if (n_inputs > n_network_inputs) {
return errors::Internal("Incorrect number of engine inputs");
}
VLOG(2) << "Attempting to restore " << n_profiles << " profiles, each with "
<< n_inputs << " inputs";
SetShapeTensorMask(engine, n_network_inputs);
TF_RETURN_IF_ERROR(SetPrunedMask(engine, n_network_inputs));
for (int prof_idx = 0; prof_idx < n_profiles; prof_idx++) {
OptimizationProfileConfig cfg;
cfg.min.resize(n_network_inputs * 2);
cfg.max.resize(n_network_inputs * 2);
cfg.opt.resize(n_network_inputs * 2);
for (int j = 0; j < n_network_inputs; j++) {
if (is_pruned_input_[j]) continue;
int binding_idx;
TF_RETURN_IF_ERROR(GetTrtBindingIndex(j, 0, engine, &binding_idx));
nvinfer1::Dims min = engine->getProfileDimensions(
binding_idx, prof_idx, nvinfer1::OptProfileSelector::kMIN);
nvinfer1::Dims max = engine->getProfileDimensions(
binding_idx, prof_idx, nvinfer1::OptProfileSelector::kMAX);
nvinfer1::Dims opt = engine->getProfileDimensions(
binding_idx, prof_idx, nvinfer1::OptProfileSelector::kOPT);
cfg.min[j] = min;
cfg.max[j] = max;
cfg.opt[j] = opt;
cfg.min[j + n_inputs] = GetDimsFromShapeVal(
prof_idx, binding_idx, nvinfer1::OptProfileSelector::kMIN, engine);
cfg.max[j + n_inputs] = GetDimsFromShapeVal(
prof_idx, binding_idx, nvinfer1::OptProfileSelector::kMAX, engine);
cfg.opt[j + n_inputs] = GetDimsFromShapeVal(
prof_idx, binding_idx, nvinfer1::OptProfileSelector::kOPT, engine);
}
VLOG(2) << "Restored profile " << cfg.DebugString();
profiles_.push_back(std::move(cfg));
}
return OkStatus();
}
int TrtShapeOptimizationProfile::GetNumProfiles() const {
return profiles_.size();
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <string.h>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/test.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
std::vector<TensorShape> DimVecToShapeVec(
std::vector<nvinfer1::Dims3> dimvec,
bool expand_with_empty_shape_values = false) {
std::vector<TensorShape> shapevec(dimvec.size());
for (int i = 0; i < dimvec.size(); i++) {
TensorShape shape;
TF_CHECK_OK(
TensorShapeUtils::MakeShape(dimvec[i].d, dimvec[i].nbDims, &shape));
shapevec[i] = shape;
}
if (expand_with_empty_shape_values) {
shapevec.resize(2 * dimvec.size());
}
return shapevec;
}
bool DimsContained(const nvinfer1::Dims& dim, const nvinfer1::Dims& min,
const nvinfer1::Dims& max) {
if (dim.nbDims != min.nbDims || dim.nbDims != max.nbDims) {
return false;
}
for (int i = 0; i < dim.nbDims; i++) {
if (dim.d[i] < min.d[i] || dim.d[i] > max.d[i]) {
return false;
}
}
return true;
}
bool DimsEqual(const nvinfer1::Dims& a, const nvinfer1::Dims& b) {
if (a.nbDims != b.nbDims) {
return false;
}
for (int i = 0; i < a.nbDims; i++) {
if (a.d[i] != b.d[i]) {
return false;
}
}
return true;
}
class TrtShapeOptimizationProfileTest
: public ::testing::TestWithParam<ProfileStrategy> {
protected:
TrtShapeOptimizationProfileTest() {
strategy_ = GetParam();
builder_ = TrtUniquePtrType<nvinfer1::IBuilder>(
nvinfer1::createInferBuilder(logger_));
network_ = TrtUniquePtrType<nvinfer1::INetworkDefinition>(
builder_->createNetworkV2(flags_));
builder_config_ = TrtUniquePtrType<nvinfer1::IBuilderConfig>(
builder_->createBuilderConfig());
builder_config_->setMaxWorkspaceSize(1 << 10);
}
void DefineNetwork(nvinfer1::INetworkDefinition* network,
nvinfer1::Dims3& dims) {
ITensorProxyPtr input1 =
network->addInput("input1", nvinfer1::DataType::kFLOAT, dims);
EXPECT_NE(nullptr, input1->trt_tensor());
ITensorProxyPtr input2 =
network->addInput("input2", nvinfer1::DataType::kFLOAT, dims);
EXPECT_NE(nullptr, input2->trt_tensor());
auto layer =
network->addElementWise(*input1->trt_tensor(), *input2->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUM);
EXPECT_NE(nullptr, layer);
ITensorProxyPtr output = layer->getOutput(0);
output->setName("output");
network->markOutput(*output->trt_tensor());
}
void CheckProfile(const std::vector<nvinfer1::Dims3>& dimvec,
TrtShapeOptimizationProfile* profile, bool has_prof,
bool test_optimality) {
std::vector<TensorShape> shape_vec = DimVecToShapeVec(dimvec);
int idx = profile->GetProfileNumber(shape_vec);
ASSERT_EQ(idx >= 0, has_prof);
if (idx < 0) return;
int prof_idx = exec_contexts_[idx]->getOptimizationProfile();
ASSERT_GE(prof_idx, 0);
for (int j = 0; j < dimvec.size(); j++) {
nvinfer1::Dims min = engine->getProfileDimensions(
j, prof_idx, nvinfer1::OptProfileSelector::kMIN);
nvinfer1::Dims max = engine->getProfileDimensions(
j, prof_idx, nvinfer1::OptProfileSelector::kMAX);
nvinfer1::Dims opt = engine->getProfileDimensions(
j, prof_idx, nvinfer1::OptProfileSelector::kOPT);
EXPECT_TRUE(DimsContained(dimvec[j], min, max));
if (test_optimality) {
EXPECT_TRUE(DimsEqual(dimvec[j], opt));
}
}
}
Logger& logger_ = *Logger::GetLogger();
TrtUniquePtrType<nvinfer1::IBuilder> builder_;
TrtUniquePtrType<nvinfer1::INetworkDefinition> network_;
TrtUniquePtrType<nvinfer1::IBuilderConfig> builder_config_;
TrtUniquePtrType<nvinfer1::ICudaEngine> engine;
std::vector<ExecutionContext> exec_contexts_;
const uint32_t flags_ =
1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
ProfileStrategy strategy_;
};
INSTANTIATE_TEST_CASE_P(
OptProfilesTestInstantiation, TrtShapeOptimizationProfileTest,
::testing::Values(ProfileStrategy::kRange, ProfileStrategy::kOptimal,
ProfileStrategy::kRangeOptimal,
ProfileStrategy::kImplicitBatchModeCompatible));
TEST_P(TrtShapeOptimizationProfileTest, Static) {
if (strategy_ != ProfileStrategy::kRange) return;
nvinfer1::Dims3 dims(8, 8, 10);
DefineNetwork(network_.get(), dims);
TrtShapeOptimizationProfile profile;
TF_CHECK_OK(profile.ConfigureBuilder(builder_.get(), builder_config_.get(),
network_.get()));
engine = TrtUniquePtrType<nvinfer1::ICudaEngine>(
builder_->buildEngineWithConfig(*network_, *builder_config_));
EXPECT_NE(nullptr, engine);
TF_CHECK_OK(profile.CreateExecutionContexts(engine.get(), &exec_contexts_));
ASSERT_EQ(exec_contexts_.size(), 1);
EXPECT_NE(nullptr, exec_contexts_[0]);
std::vector<nvinfer1::Dims3> dim_vec(2, dims);
std::vector<TensorShape> shape_vec = DimVecToShapeVec(dim_vec);
EXPECT_EQ(0, profile.GetProfileNumber(shape_vec));
}
TEST_P(TrtShapeOptimizationProfileTest, Dynamic) {
nvinfer1::Dims3 dims(-1, -1, 10);
DefineNetwork(network_.get(), dims);
TrtShapeOptimizationProfile profile;
std::vector<bool> input_mask(2, true);
profile.SetInputMask(input_mask);
std::vector<std::vector<nvinfer1::Dims3>> input_profiles{
{nvinfer1::Dims3(2, 2, 10), nvinfer1::Dims3(2, 2, 10)},
{nvinfer1::Dims3(3, 3, 10), nvinfer1::Dims3(3, 3, 10)},
{nvinfer1::Dims3(16, 16, 10), nvinfer1::Dims3(16, 16, 10)},
};
std::vector<nvinfer1::Dims3> unseen_shapes{nvinfer1::Dims3(5, 5, 10),
nvinfer1::Dims3(9, 9, 10)};
for (auto dim_vec : input_profiles) {
std::vector<TensorShape> shape_vec = DimVecToShapeVec(dim_vec, true);
profile.AddShape(shape_vec);
}
std::vector<PartialTensorShape> input_partial_shapes;
TF_CHECK_OK(GetNetworkInputShapes(network_.get(), &input_partial_shapes));
profile.InitProfiles(input_partial_shapes, strategy_);
TF_CHECK_OK(profile.ConfigureBuilder(builder_.get(), builder_config_.get(),
network_.get()));
engine = TrtUniquePtrType<nvinfer1::ICudaEngine>(
builder_->buildEngineWithConfig(*network_.get(), *builder_config_.get()));
ASSERT_NE(nullptr, engine);
TF_CHECK_OK(profile.CreateExecutionContexts(engine.get(), &exec_contexts_));
int n_profiles_exp;
switch (strategy_) {
case (ProfileStrategy::kImplicitBatchModeCompatible):
case (ProfileStrategy::kOptimal):
n_profiles_exp = input_profiles.size();
break;
case (ProfileStrategy::kRange):
n_profiles_exp = 1;
break;
case (ProfileStrategy::kRangeOptimal):
n_profiles_exp = 1 + input_profiles.size();
break;
}
EXPECT_EQ(exec_contexts_.size(), n_profiles_exp);
profile.SetShapeTensorMask(network_.get());
EXPECT_EQ(profile.HasShapeTensor(), false);
for (auto dimvec : input_profiles) {
bool test_optimal_prof = strategy_ == ProfileStrategy::kOptimal ||
strategy_ == ProfileStrategy::kRangeOptimal;
CheckProfile(dimvec, &profile, true, test_optimal_prof);
}
bool has_prof = (strategy_ == ProfileStrategy::kRange ||
strategy_ == ProfileStrategy::kRangeOptimal);
CheckProfile(unseen_shapes, &profile, has_prof, false);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
514097a7-1bf6-4ff9-845c-43cd5e6b876d | cpp | tensorflow/tensorflow | trt_allocator | tensorflow/compiler/tf2tensorrt/utils/trt_allocator.cc | tensorflow/compiler/tf2tensorrt/utils/trt_allocator_test.cc | #include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#endif
namespace tensorflow {
namespace tensorrt {
void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space) {
QCHECK_GT(alignment, 0ul) << "alignment must be greater than 0.";
QCHECK_EQ(0, alignment & (alignment - 1)) << "Alignment must be power of 2.";
QCHECK_GT(size, 0ul) << "size must be greater than 0.";
QCHECK(ptr) << "ptr must not be nullptr.";
QCHECK_GT(space, 0ul) << "space must be greater than 0.";
const uintptr_t ptr_val = reinterpret_cast<uintptr_t>(ptr);
QCHECK_GE(ptr_val + space, ptr_val) << "Provided space overflows.";
if (size > space) return nullptr;
const uintptr_t aligned_ptr_val = ((ptr_val + alignment - 1) & -alignment);
if (aligned_ptr_val > ptr_val + space - size) return nullptr;
ptr = reinterpret_cast<void*>(aligned_ptr_val);
const uintptr_t diff = aligned_ptr_val - ptr_val;
space -= diff;
return ptr;
}
}
}
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
void* TRTDeviceAllocator::allocate(uint64_t size, uint64_t alignment,
uint32_t flags) noexcept {
if (size == 0) return nullptr;
alignment = 512;
assert((alignment & (alignment - 1)) == 0);
uint64_t total_size = size + alignment;
AllocationAttributes attributes;
attributes.retry_on_failure = false;
void* mem = allocator_->AllocateRaw(alignment, total_size, attributes);
if (!mem) return nullptr;
void* alloc_mem = mem;
QCHECK(Align(alignment, size, mem, total_size));
mutex_lock lock(mu_);
if (mem != alloc_mem) {
QCHECK(mem_map_.insert({mem, alloc_mem}).second);
}
VLOG(2) << "Allocated " << total_size << " bytes memory @" << alloc_mem
<< "; aligned to " << size << " bytes @" << mem << " with alignment "
<< alignment;
return mem;
}
TRTDeviceAllocator::TRTDeviceAllocator(Allocator* allocator)
: allocator_(allocator) {
VLOG(1) << "Using " << allocator->Name() << " allocator from TensorFlow";
}
void TRTDeviceAllocator::free(void* memory) noexcept {
mutex_lock lock(mu_);
VLOG(2) << "Deallocating @ " << memory;
if (memory) {
auto alloc_mem = mem_map_.find(memory);
if (alloc_mem != mem_map_.end()) {
memory = alloc_mem->second;
mem_map_.erase(alloc_mem->first);
}
allocator_->DeallocateRaw(memory);
}
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tensorrt {
bool RunTest(const uint64_t alignment, const uint64_t size,
const intptr_t orig_ptr_val, const uint64_t orig_space) {
void* const orig_ptr = reinterpret_cast<void*>(orig_ptr_val);
void* ptr = orig_ptr;
uint64_t space = orig_space;
void* result = Align(alignment, size, ptr, space);
if (result == nullptr) {
EXPECT_EQ(orig_ptr, ptr);
EXPECT_EQ(orig_space, space);
return false;
} else {
EXPECT_EQ(result, ptr);
const intptr_t ptr_val = reinterpret_cast<intptr_t>(ptr);
EXPECT_EQ(0, ptr_val % alignment);
EXPECT_GE(ptr_val, orig_ptr_val);
EXPECT_GE(space, size);
EXPECT_LE(space, orig_space);
EXPECT_EQ(ptr_val + space, orig_ptr_val + orig_space);
return true;
}
}
TEST(TRTAllocatorTest, Align) {
for (const uint64_t space :
{1ul, 2ul, 3ul, 4ul, 7ul, 8ul, 9ul, 10ul, 16ul, 32ul, 511ul, 512ul,
513ul, 700ul, 12345ul, 1ul << 32}) {
for (uint64_t alignment = 1; alignment <= space * 4; alignment *= 2) {
for (const uintptr_t ptr_val :
{static_cast<uint64_t>(1),
alignment == 1 ? static_cast<uint64_t>(1) : alignment - 1,
alignment, alignment + 1, alignment + (alignment / 2)}) {
if (ptr_val % alignment == 0) {
for (const uint64_t size :
{static_cast<uint64_t>(1),
space == 1 ? static_cast<uint64_t>(1) : space - 1, space,
space + 1}) {
EXPECT_EQ(space >= size, RunTest(alignment, size, ptr_val, space));
}
} else {
EXPECT_FALSE(RunTest(alignment, space, ptr_val, space));
const uint64_t diff = alignment - ptr_val % alignment;
if (space > diff) {
EXPECT_TRUE(
RunTest(alignment, space - diff, ptr_val + diff, space - diff));
for (const uint64_t size :
{static_cast<uint64_t>(1),
space - diff > 1 ? space - diff - 1
: static_cast<uint64_t>(1),
space - diff, space - diff + 1, space - 1}) {
EXPECT_EQ(space - diff >= size,
RunTest(alignment, size, ptr_val, space));
}
} else {
EXPECT_FALSE(RunTest(alignment, 1, ptr_val, space));
}
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
00d67176-bf8e-4103-a78d-4dc32037dd36 | cpp | tensorflow/tensorflow | algorithm_selector | tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.cc | tensorflow/compiler/tf2tensorrt/convert/algorithm_selector_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.h"
#include <utility>
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/core/util/env_var.h"
#include "third_party/tensorrt/NvInfer.h"
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
#define ALGORITHM_IO_INFO_BY_IDX(alg, idx) *(alg).getAlgorithmIOInfoByIndex(idx)
#else
#define ALGORITHM_IO_INFO_BY_IDX(alg, idx) (alg).getAlgorithmIOInfo(idx)
#endif
namespace nvinfer1 {
std::ostream& operator<<(std::ostream& os,
const nvinfer1::IAlgorithmContext& ctx) {
os << "AlgorithmContext(name=" << ctx.getName()
<< ",nbInputs=" << ctx.getNbInputs() << ",nbOutputs=" << ctx.getNbOutputs()
<< ")";
return os;
}
std::ostream& operator<<(std::ostream& os, const nvinfer1::IAlgorithm& alg) {
const nvinfer1::IAlgorithmVariant& variant = alg.getAlgorithmVariant();
os << "Algorithm(" << "variant.implementation=" << variant.getImplementation()
<< ",variant.tactic=" << variant.getTactic()
<< ",timingMSec=" << alg.getTimingMSec()
<< ",workspaceSize=" << alg.getWorkspaceSize() << ")";
return os;
}
std::ostream& operator<<(std::ostream& os,
const nvinfer1::IAlgorithmIOInfo& info) {
os << "IOTensor(format=" << info.getTensorFormat()
<< ",dtype=" << info.getDataType() << ",strides=" << info.getStrides()
<< ")";
return os;
}
}
namespace tensorflow {
namespace tensorrt {
namespace convert {
bool operator>=(const AlgorithmSelectorImpl::TRTVersion& lhs,
const AlgorithmSelectorImpl::TRTVersion& rhs) {
if (lhs[0] > rhs[0]) return true;
if (lhs[0] == rhs[0] && lhs[1] > rhs[1]) return true;
if (lhs[0] == rhs[0] && lhs[1] == rhs[1] && lhs[2] > rhs[2]) return true;
if (lhs[0] == rhs[0] && lhs[1] == rhs[1] && lhs[2] == rhs[2] &&
lhs[3] >= rhs[3]) {
return true;
}
return false;
}
bool AlgorithmSelectorImpl::IsTrtVersionGE(const TRTVersion& version) const {
return version_ >= version;
}
bool AlgorithmSelectorImpl::IsShuffleLayer(ImplementationID id) const {
if (IsTrtVersionGE({8, 2, 0, 0})) {
return id == 0x80000000 + 13;
}
if (IsTrtVersionGE({8, 0, 0, 0})) {
return id == 0x80000000 + 14;
}
if (IsTrtVersionGE({7, 2, 0, 0})) {
return id == 0x80000000 + 16;
}
return id == 18;
}
std::set<AlgorithmSelectorImpl::TacticID>
AlgorithmSelectorImpl::GetBannedTRT72TuringTactics() {
static const std::set<TacticID> banned_turing_72{
-5927686925093575778,
-3848538574386518527,
-959009792490796596};
return banned_turing_72;
}
bool AlgorithmSelectorImpl::IsBannedTactic(TacticID id) const {
if (IsTrtVersionGE({7, 2, 0, 0}) && !IsTrtVersionGE({8, 0, 0, 0})) {
auto banned_turing_72 = GetBannedTRT72TuringTactics();
return banned_turing_72.find(id) != banned_turing_72.end();
}
return false;
}
bool AlgorithmSelectorImpl::AllowShuffleAlgorithm(
TacticID tactic, nvinfer1::DataType input_dtype,
nvinfer1::TensorFormat input_format) const {
if (IsTrtVersionGE({8, 0, 0, 0}) && !IsTrtVersionGE({8, 0, 3, 0})) {
return !(input_format == nvinfer1::TensorFormat::kLINEAR &&
input_dtype == nvinfer1::DataType::kINT8);
}
if (IsTrtVersionGE({7, 2, 0, 0}) && !IsTrtVersionGE({8, 0, 0, 0})) {
return !(input_format == nvinfer1::TensorFormat::kCHW32 &&
input_dtype == nvinfer1::DataType::kFLOAT);
}
return true;
}
bool AlgorithmSelectorImpl::IsAlgorithmSelectorRequired() const {
if (IsTrtVersionGE({7, 2, 0, 0}) && !IsTrtVersionGE({8, 0, 0, 0})) {
return true;
}
if (IsTrtVersionGE({8, 0, 0, 0}) && !IsTrtVersionGE({8, 0, 3, 0})) {
return true;
}
return false;
}
namespace {
string FormatAlgorithmList(const nvinfer1::IAlgorithmContext& ctx,
absl::Span<const nvinfer1::IAlgorithm* const> algs) {
return absl::StrFormat(
"%s:\n\t%s", absl::FormatStreamed(ctx),
absl::StrJoin(
algs, "\n\t",
[&ctx](std::string* out, const nvinfer1::IAlgorithm* const alg) {
absl::StrAppendFormat(out, "%s", absl::FormatStreamed(*alg));
for (int i = 0; i < ctx.getNbInputs() + ctx.getNbOutputs(); i++) {
absl::StrAppendFormat(
out, "\n\t\t%s",
absl::FormatStreamed(ALGORITHM_IO_INFO_BY_IDX(*alg, i)));
}
}));
}
}
TftrtAlgorithmSelector::TftrtAlgorithmSelector()
: fixed_algorithm_idx_(GetFixedAlgorithmID()),
selector_(AlgorithmSelectorImpl::CompileTimeTRTVersion()) {}
std::optional<int64_t> TftrtAlgorithmSelector::GetFixedAlgorithmID() {
int64_t trt_algorithm_idx = 0;
constexpr auto null_idx =
std::numeric_limits<decltype(trt_algorithm_idx)>::min();
Status status = tensorflow::ReadInt64FromEnvVar("TF_TRT_FIXED_ALGORITHM_ID",
null_idx,
&trt_algorithm_idx);
if (!status.ok()) {
LOG(ERROR) << status;
return std::nullopt;
}
if (trt_algorithm_idx != null_idx) {
return std::max(static_cast<int32_t>(trt_algorithm_idx), 0);
}
return std::nullopt;
}
bool TftrtAlgorithmSelector::AlgorithmPolicy(
const nvinfer1::IAlgorithmContext& context,
const nvinfer1::IAlgorithm& alg) const {
const nvinfer1::IAlgorithmVariant& variant = alg.getAlgorithmVariant();
TacticID tactic_id = variant.getTactic();
if (selector_.IsBannedTactic(tactic_id)) {
return false;
}
if (selector_.IsShuffleLayer(variant.getImplementation())) {
return selector_.AllowShuffleAlgorithm(
tactic_id, alg.getAlgorithmIOInfo(0).getDataType(),
alg.getAlgorithmIOInfo(0).getTensorFormat());
}
return true;
}
int32_t TftrtAlgorithmSelector::selectAlgorithms(
const nvinfer1::IAlgorithmContext& algoContext,
const nvinfer1::IAlgorithm* const* algoChoices, int32_t nbChoices,
int32_t* selection) noexcept {
if (fixed_algorithm_idx_) {
LOG(WARNING) << "Forcing TRT algorithm selection to: ID = "
<< *fixed_algorithm_idx_;
selection[0] = std::min(*fixed_algorithm_idx_, nbChoices - 1);
return 1;
}
int num_selections = 0;
VLOG(1) << "Algorithm selection choices: "
<< FormatAlgorithmList(algoContext,
absl::MakeSpan(algoChoices, nbChoices));
for (int i = 0; i < nbChoices; i++) {
const nvinfer1::IAlgorithm& alg = *algoChoices[i];
if (!AlgorithmPolicy(algoContext, alg)) {
LOG(WARNING) << absl::StrFormat("Rejecting Algorithm: %s ",
absl::FormatStreamed(alg));
continue;
}
selection[num_selections++] = i;
}
return num_selections;
}
void TftrtAlgorithmSelector::reportAlgorithms(
const nvinfer1::IAlgorithmContext* const* algoContexts,
const nvinfer1::IAlgorithm* const* algoChoices,
int32_t nbAlgorithms) noexcept {
if (VLOG_IS_ON(1)) {
string selection_msg = "Algorithms selected:\n";
for (int i = 0; i < nbAlgorithms; i++) {
absl::StrAppend(&selection_msg,
FormatAlgorithmList(*algoContexts[i],
absl::MakeSpan(algoChoices + i, 1)));
}
VLOG(1) << selection_msg;
}
}
std::unique_ptr<TftrtAlgorithmSelector> MaybeCreateAlgorithmSelector() {
auto selector = std::make_unique<TftrtAlgorithmSelector>();
if (selector->IsRequired()) {
return selector;
}
return nullptr;
}
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.h"
#include <memory>
#include <gtest/gtest.h>
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
TEST(TestAlgorithmSelector, TensorRT7_1) {
AlgorithmSelectorImpl sel71({7, 1, 3, 4});
ASSERT_FALSE(sel71.IsAlgorithmSelectorRequired());
}
TEST(TestAlgorithmSelector, TensorRT7_2) {
AlgorithmSelectorImpl sel72({7, 2, 0, 0});
ASSERT_TRUE(sel72.IsAlgorithmSelectorRequired());
auto turing_tactics = AlgorithmSelectorImpl::GetBannedTRT72TuringTactics();
for (auto id : turing_tactics) {
EXPECT_TRUE(sel72.IsBannedTactic(id));
}
EXPECT_FALSE(sel72.AllowShuffleAlgorithm(0, nvinfer1::DataType::kFLOAT,
nvinfer1::TensorFormat::kCHW32));
EXPECT_TRUE(sel72.AllowShuffleAlgorithm(0, nvinfer1::DataType::kHALF,
nvinfer1::TensorFormat::kCHW32));
EXPECT_TRUE(sel72.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT32,
nvinfer1::TensorFormat::kCHW32));
EXPECT_TRUE(sel72.AllowShuffleAlgorithm(0, nvinfer1::DataType::kFLOAT,
nvinfer1::TensorFormat::kCHW16));
}
TEST(TestAlgorithmSelector, TensorRT8_0) {
AlgorithmSelectorImpl sel80({8, 0, 1, 6});
ASSERT_TRUE(sel80.IsAlgorithmSelectorRequired());
auto turing_tactics = AlgorithmSelectorImpl::GetBannedTRT72TuringTactics();
for (auto id : turing_tactics) {
EXPECT_FALSE(sel80.IsBannedTactic(id));
}
EXPECT_FALSE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT8,
nvinfer1::TensorFormat::kLINEAR));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kHALF,
nvinfer1::TensorFormat::kLINEAR));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT32,
nvinfer1::TensorFormat::kLINEAR));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kFLOAT,
nvinfer1::TensorFormat::kLINEAR));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT8,
nvinfer1::TensorFormat::kCHW16));
EXPECT_TRUE(sel80.AllowShuffleAlgorithm(0, nvinfer1::DataType::kINT8,
nvinfer1::TensorFormat::kCHW32));
}
TEST(TestAlgorithmSelector, TensorRT8_2) {
AlgorithmSelectorImpl sel({8, 2, 0, 0});
ASSERT_FALSE(sel.IsAlgorithmSelectorRequired());
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/algorithm_selector_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f72466f2-69f4-45c4-9c91-d85a452eeac0 | cpp | tensorflow/tensorflow | logger_registry | tensorflow/compiler/tf2tensorrt/convert/logger_registry.cc | tensorflow/compiler/tf2tensorrt/convert/logger_registry_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
#include <unordered_map>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace tensorrt {
class LoggerRegistryImpl : public LoggerRegistry {
Status Register(const string& name, nvinfer1::ILogger* logger) override {
mutex_lock lock(mu_);
if (!registry_.emplace(name, std::unique_ptr<nvinfer1::ILogger>(logger))
.second) {
return errors::AlreadyExists("Logger ", name, " already registered");
}
return OkStatus();
}
nvinfer1::ILogger* LookUp(const string& name) override {
mutex_lock lock(mu_);
const auto found = registry_.find(name);
if (found == registry_.end()) {
return nullptr;
}
return found->second.get();
}
private:
mutable mutex mu_;
mutable std::unordered_map<string, std::unique_ptr<nvinfer1::ILogger>>
registry_ TF_GUARDED_BY(mu_);
};
LoggerRegistry* GetLoggerRegistry() {
static LoggerRegistryImpl* registry = new LoggerRegistryImpl;
return registry;
}
}
}
#endif | #include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
class TestLogger : public nvinfer1::ILogger {
void log(nvinfer1::ILogger::Severity severity, const char* msg) override {}
};
TestLogger test_logger;
REGISTER_TENSORRT_LOGGER("test_logger", &test_logger);
TEST(LoggerRegistryTest, RegistersCorrectly) {
auto registered_logger = GetLoggerRegistry()->LookUp("test_logger");
EXPECT_THAT(registered_logger, Eq(&test_logger));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/logger_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/logger_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
50716b0d-9733-485e-b753-395edeb19808 | cpp | tensorflow/tensorflow | convert_nodes | tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc | tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc | #include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include <algorithm>
#include <bitset>
#include <cmath>
#include <cstring>
#include <map>
#include <memory>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/slice_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/timing_cache.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_experimental_features.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/annotated_traceme.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/strided_slice_op.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
#include "third_party/tensorrt/NvInferPlugin.h"
#define TFTRT_CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
#define TFTRT_CHECK_INPUT_SIZE(size, exp_size, node_def) \
if ((size) != (exp_size)) { \
TFTRT_ERROR(errors::InvalidArgument, node_def.op(), " got ", (size), \
" inputs but expected ", (exp_size)); \
}
#define MAX_KERNEL_DIMS_PRODUCT(x) (int64_t(std::pow(100000.0F, (x) * 0.5F)))
namespace tensorflow {
namespace tensorrt {
namespace convert {
using absl::StrAppend;
using absl::StrCat;
namespace {
#define ADD_LAYER(layer_name) \
case nvinfer1::LayerType::k##layer_name: \
return #layer_name;
const char* LayerTypeToString(nvinfer1::LayerType layer_type) {
switch (layer_type) {
ADD_LAYER(CONVOLUTION)
ADD_LAYER(FULLY_CONNECTED)
ADD_LAYER(ACTIVATION)
ADD_LAYER(POOLING)
ADD_LAYER(LRN)
ADD_LAYER(SCALE)
ADD_LAYER(SOFTMAX)
ADD_LAYER(DECONVOLUTION)
ADD_LAYER(CONCATENATION)
ADD_LAYER(ELEMENTWISE)
ADD_LAYER(PLUGIN)
ADD_LAYER(UNARY)
ADD_LAYER(PADDING)
ADD_LAYER(SHUFFLE)
ADD_LAYER(REDUCE)
ADD_LAYER(TOPK)
ADD_LAYER(GATHER)
#if IS_TRT_VERSION_GE(8, 5, 0, 0)
ADD_LAYER(GRID_SAMPLE)
#endif
ADD_LAYER(MATRIX_MULTIPLY)
ADD_LAYER(RAGGED_SOFTMAX)
ADD_LAYER(CONSTANT)
ADD_LAYER(RNN_V2)
ADD_LAYER(IDENTITY)
ADD_LAYER(PLUGIN_V2)
ADD_LAYER(SLICE)
ADD_LAYER(SHAPE)
ADD_LAYER(PARAMETRIC_RELU)
ADD_LAYER(RESIZE)
ADD_LAYER(TRIP_LIMIT)
ADD_LAYER(RECURRENCE)
ADD_LAYER(ITERATOR)
ADD_LAYER(LOOP_OUTPUT)
ADD_LAYER(SELECT)
ADD_LAYER(FILL)
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
ADD_LAYER(QUANTIZE)
ADD_LAYER(DEQUANTIZE)
#endif
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
ADD_LAYER(CONDITION)
ADD_LAYER(CONDITIONAL_INPUT)
ADD_LAYER(CONDITIONAL_OUTPUT)
ADD_LAYER(SCATTER)
ADD_LAYER(EINSUM)
ADD_LAYER(ASSERTION)
#endif
#if IS_TRT_VERSION_GE(8, 5, 0, 0)
ADD_LAYER(ONE_HOT)
ADD_LAYER(NON_ZERO)
ADD_LAYER(NMS)
#endif
#if IS_TRT_VERSION_GE(8, 6, 0, 0)
ADD_LAYER(REVERSE_SEQUENCE)
#endif
#if !IS_TRT_VERSION_GE(8, 0, 0, 0)
ADD_LAYER(RNN)
#endif
default:
return "UNKNOWN_LAYER";
}
}
#undef ADD_LAYER
void SetLayerNameHelper(nvinfer1::ILayer* layer, absl::string_view engine_name,
absl::string_view tf_name) {
const char* trt_name = LayerTypeToString(layer->getType());
layer->setName(
absl::StrCat(engine_name, "/", tf_name, ":", trt_name).c_str());
}
std::string GetLayerNameSuffix(absl::string_view sub_op_name,
std::optional<int> sub_op_instance) {
std::string op_suffix(sub_op_name);
if (sub_op_instance.has_value()) {
op_suffix =
absl::StrCat(op_suffix, "_", std::to_string(sub_op_instance.value()));
}
return op_suffix;
}
}
bool IsEngineInput(absl::string_view name) {
return absl::StartsWith(name, IONamePrefixes::kInputPHName);
}
bool IsEngineOutput(absl::string_view name) {
return absl::StartsWith(name, IONamePrefixes::kOutputPHName);
}
void GetOutputProperties(const grappler::GraphProperties& graph_properties,
const Node* node, const int out_port,
PartialTensorShape* shape, DataType* dtype) {
if (graph_properties.HasOutputProperties(node->name())) {
auto output_params = graph_properties.GetOutputProperties(node->name());
auto out_shape = output_params.at(out_port);
*dtype = out_shape.dtype();
*shape = out_shape.shape();
} else {
LOG(INFO) << "Unknown output shape at node: " << node->name();
*dtype = node->output_type(out_port);
}
}
void GetInputProperties(const grappler::GraphProperties& graph_properties,
const Node* node, const int in_port,
PartialTensorShape* shape, DataType* dtype) {
if (graph_properties.HasInputProperties(node->name())) {
auto input_params = graph_properties.GetInputProperties(node->name());
auto in_shape = input_params.at(in_port);
*dtype = in_shape.dtype();
*shape = in_shape.shape();
} else {
*dtype = node->input_type(in_port);
}
}
Status ValidateTensorProperties(const string& producer_node_type,
const DataType dtype,
const PartialTensorShape& shape,
const bool use_implicit_batch,
bool validation_only,
nvinfer1::DataType* trt_dtype,
nvinfer1::Dims* trt_dims, int* batch_size) {
TF_RETURN_IF_ERROR(TfTypeToTrtType(dtype, trt_dtype));
if (shape.dims() < 0) {
return errors::InvalidArgument("Input tensor rank is unknown.");
}
const int max_rank = nvinfer1::Dims::MAX_DIMS + (use_implicit_batch ? 1 : 0);
if (shape.dims() > max_rank) {
return errors::OutOfRange("Input tensor rank is greater than ", max_rank);
}
if (use_implicit_batch && (producer_node_type != "Const") &&
(shape.dims() < 1)) {
return errors::InvalidArgument(
"Scalar input tensor is not supported since the first dimension "
"is treated as batch dimension by TRT");
}
StatusOr<DimsAdapter> dims = DimsAdapter::Create(shape, use_implicit_batch);
TRT_ENSURE_OK(dims);
*trt_dims = dims->AsTrtDims();
if (use_implicit_batch) {
*batch_size = shape.dim_size(0);
}
const int first_trt_dim = use_implicit_batch ? 1 : 0;
for (int d = first_trt_dim; d < shape.dims(); ++d) {
if (shape.dim_size(d) == 0) {
return errors::Unimplemented(
"Input tensor with shape ", shape.DebugString(),
" is an empty tensor, which is not supported by TRT");
}
}
if (validation_only) return OkStatus();
if (use_implicit_batch) {
for (int d = first_trt_dim; d < shape.dims(); ++d) {
if (shape.dim_size(d) < 0) {
return errors::InvalidArgument(
"Input tensor with shape ", shape.DebugString(),
" has an unknown non-batch dimension at dim ", d);
}
}
}
return OkStatus();
}
Status GetTrtBroadcastShape(const TRT_TensorOrWeights& operand_l,
const TRT_TensorOrWeights& operand_r,
const bool check_feasibility,
const bool use_implicit_batch,
nvinfer1::Dims* operand_l_new_dims,
nvinfer1::Dims* operand_r_new_dims) {
if (!operand_l.is_tensor() && !operand_r.is_tensor()) {
return errors::InvalidArgument(
"Broadcasting requires at least one of the operands be tensors");
}
constexpr int max_nb_dims = nvinfer1::Dims::MAX_DIMS + 1;
auto compute_output_dims =
[use_implicit_batch](const TRT_TensorOrWeights& input,
int broadcast_num_dims,
std::array<int32_t, max_nb_dims>* output_dims_array,
nvinfer1::Dims* output_dims) -> Status {
const nvinfer1::Dims input_dims = input.GetTrtDims();
absl::c_fill(*output_dims_array, 1);
absl::c_copy(
DimsAdapter(input_dims),
output_dims_array->begin() + broadcast_num_dims - input_dims.nbDims);
if (use_implicit_batch && input.is_tensor()) {
const int true_input_dims = input_dims.nbDims + 1;
if (true_input_dims < broadcast_num_dims) {
return errors::InvalidArgument(
"Broadcasting beyond batch dimension is not supported ",
"(tensor #dims ", true_input_dims, " vs broadcast #dims ",
broadcast_num_dims, ")");
}
(*output_dims_array)[0] = -1;
}
auto offt = use_implicit_batch ? 1 : 0;
output_dims->nbDims = broadcast_num_dims - offt;
absl::c_copy(
absl::MakeSpan(*output_dims_array).subspan(offt, broadcast_num_dims),
output_dims->d);
return OkStatus();
};
const int broadcast_num_dims =
std::max(operand_l.GetTrtDims().nbDims +
(use_implicit_batch && operand_l.is_tensor()),
operand_r.GetTrtDims().nbDims +
(use_implicit_batch && operand_r.is_tensor()));
std::array<int32_t, max_nb_dims> output_l, output_r;
TF_RETURN_IF_ERROR(compute_output_dims(operand_l, broadcast_num_dims,
&output_l, operand_l_new_dims));
TF_RETURN_IF_ERROR(compute_output_dims(operand_r, broadcast_num_dims,
&output_r, operand_r_new_dims));
if (check_feasibility) {
for (int i = 0; i < broadcast_num_dims; ++i) {
if (!use_implicit_batch && (output_l[i] == -1 || output_r[i] == -1)) {
continue;
}
if ((output_l[i] != output_r[i]) && (output_l[i] != 1) &&
(output_r[i] != 1)) {
return errors::InvalidArgument("Infeasible broadcast scheme (",
"batch_dim: ", output_l[0], ", ",
DebugString(*operand_l_new_dims), " vs ",
"batch_dim: ", output_r[0], ", ",
DebugString(*operand_r_new_dims), ")");
}
}
}
return OkStatus();
}
Status DynamicBroadcast(ITensorProxyPtr operand,
const OpConverterParams* params,
ITensorProxyPtr* output, int broadcasted_nbDims,
std::optional<int> op_instance) {
int operand_nbDims = operand->getDimensions().nbDims;
if (broadcasted_nbDims > operand_nbDims) {
if (params->validation_only) return OkStatus();
int n_extra_dims = broadcasted_nbDims - operand_nbDims;
VLOG(2) << "Dynamic broadcast adding " << n_extra_dims << " leading 1s";
TF_RETURN_IF_ERROR(params->converter->DynamicReshape(
operand,
{std::make_pair(0, operand_nbDims)},
params,
output,
{n_extra_dims},
op_instance));
} else {
*output = operand;
}
return OkStatus();
}
Status BroadcastWeights(std::unique_ptr<TRT_TensorOrWeights>& p,
const DimsAdapter& broadcasted_dims) {
if (!p->is_weights()) return errors::Internal("Weight input expected");
if (p->GetTrtDims().nbDims != broadcasted_dims.NumDims()) {
TRT_ShapedWeights weights(p->weights());
TF_RETURN_IF_ERROR(weights.SetShape(broadcasted_dims));
p = std::make_unique<TRT_TensorOrWeights>(weights);
}
return OkStatus();
}
Status ApplyBroadcast(std::unique_ptr<TRT_TensorOrWeights>& operand,
const DimsAdapter& broadcasted_dims,
const OpConverterParams* params,
std::optional<int> op_instance) {
if (operand->is_weights()) {
TF_RETURN_IF_ERROR(BroadcastWeights(operand, broadcasted_dims));
} else {
ITensorProxyPtr tensor = nullptr;
auto is_static_shuffle_compatible = [](const auto& dims) {
return absl::c_count(dims, -1) <= 1;
};
if (is_static_shuffle_compatible(broadcasted_dims)) {
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, *operand, broadcasted_dims,
params->validation_only, &tensor, params->node_def));
} else {
TF_RETURN_IF_ERROR(DynamicBroadcast(
operand->tensor(),
params,
&tensor,
broadcasted_dims.NumDims(),
op_instance));
}
operand = std::make_unique<TRT_TensorOrWeights>(tensor);
}
return OkStatus();
}
Status BroadcastTensors(std::unique_ptr<TRT_TensorOrWeights>& operand_l,
std::unique_ptr<TRT_TensorOrWeights>& operand_r,
bool check_feasibility,
const OpConverterParams* params) {
nvinfer1::Dims broadcasted_dims_l, broadcasted_dims_r;
TF_RETURN_IF_ERROR(GetTrtBroadcastShape(
*operand_l, *operand_r, check_feasibility, params->use_implicit_batch,
&broadcasted_dims_l, &broadcasted_dims_r));
if (params->validation_only) return OkStatus();
TF_RETURN_IF_ERROR(ApplyBroadcast(
operand_l,
broadcasted_dims_l,
params,
0));
TF_RETURN_IF_ERROR(ApplyBroadcast(
operand_r,
broadcasted_dims_r,
params,
1));
return OkStatus();
}
ITensorProxyPtr Converter::CreateConstantLayer(const TRT_ShapedWeights& weights,
const nvinfer1::Dims& dims) {
nvinfer1::Weights trt_weights = weights.GetTrtWeights();
nvinfer1::IConstantLayer* layer = network()->addConstant(dims, trt_weights);
if (!layer) return nullptr;
SetLayerName(layer, "_tftrt_constant_",
std::to_string(next_constant_layer_id_));
next_constant_layer_id_++;
ITensorProxyPtr trt_tensor = layer->getOutput(0);
return trt_tensor;
}
template <typename T>
Status CreateScalarConstant(
const OpConverterParams* params, T value, ITensorProxyPtr* tensor,
nvinfer1::DataType trt_type = nvinfer1::DataType::kINT32,
const nvinfer1::Dims& dims = {1, {1}}) {
StatusOr<TRT_ShapedWeights> weights =
params->weight_store->GetTempWeights(trt_type, dims);
TRT_ENSURE_OK(weights);
TF_RETURN_IF_ERROR(weights->SetValues(value));
*tensor = params->converter->CreateConstantLayer(*weights, dims);
TFTRT_RETURN_ERROR_IF_NULLPTR(*tensor, params->node_def.name());
return OkStatus();
}
Status CreateBroadcastableScalarConstant(const OpConverterParams* params,
float value,
const nvinfer1::Dims& dims,
ITensorProxyPtr* tensor,
const char* dtype_attr_name = "T") {
nvinfer1::DataType trt_type = nvinfer1::DataType::kFLOAT;
AttrSlice attrs(params->node_def);
if (attrs.FindByString(dtype_attr_name) != nullptr) {
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, dtype_attr_name, &dtype));
TF_RETURN_IF_ERROR(TfTypeToTrtType(dtype, &trt_type));
}
nvinfer1::Dims broadcastable_dims(dims);
for (int i = 0; i < broadcastable_dims.nbDims; i++) {
broadcastable_dims.d[i] = 1;
}
return CreateScalarConstant(params, value, tensor, trt_type,
broadcastable_dims);
}
StatusOr<ITensorProxyPtr> ConcatenateTensors(
const OpConverterParams* params,
const std::vector<ITensorProxyPtr> input_tensors,
std::optional<int> op_instance = std::nullopt) {
std::vector<nvinfer1::ITensor*> trt_input_tensors;
for (const auto& t : input_tensors) {
trt_input_tensors.push_back(t->trt_tensor());
}
nvinfer1::IConcatenationLayer* layer =
params->converter->network()->addConcatenation(
static_cast<nvinfer1::ITensor* const*>(trt_input_tensors.data()),
input_tensors.size());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, params->node_def.op());
params->converter->SetLayerName(layer, params->node_def.name(),
"concat_shapes", op_instance);
layer->setAxis(0);
return ITensorProxyPtr(layer->getOutput(0));
}
Status ConvertAxis(int tf_axis, int trt_nb_dims, absl::string_view node_name,
bool use_implicit_batch, int* trt_axis) {
const int tf_nb_dims = trt_nb_dims + (use_implicit_batch ? 1 : 0);
if (tf_axis < -tf_nb_dims || tf_axis >= tf_nb_dims) {
return errors::InvalidArgument(
"Axis value of ", tf_axis, " is out of bounds, must be in range [",
-tf_nb_dims, ", ", tf_nb_dims, "), at ", node_name);
}
if (tf_axis < 0) tf_axis += tf_nb_dims;
if (use_implicit_batch && tf_axis == 0) {
return errors::Unimplemented(
"TensorRT does not allow manipulation of the batch dimension");
}
*trt_axis = use_implicit_batch ? tf_axis - 1 : tf_axis;
return OkStatus();
}
bool AllLengthsEqual(const std::vector<std::vector<int>>& inputs) {
if (inputs.size() == 0) return true;
int length = inputs.at(0).size();
for (int i = 1; i < inputs.size(); i++) {
if (inputs.at(i).size() != length) return false;
}
return true;
}
bool DimsHaveSameSize(const DimsAdapter& lhs, const DimsAdapter& rhs) {
return lhs.Volume() == rhs.Volume();
}
bool AreDimsStaticWithSameSize(const DimsAdapter& lhs, const DimsAdapter& rhs) {
if (!lhs.IsStatic() || !rhs.IsStatic()) return false;
return DimsHaveSameSize(lhs, rhs);
}
bool AreDimsStaticWithDifferentSize(const DimsAdapter& lhs,
const DimsAdapter& rhs) {
if (!lhs.IsStatic() || !rhs.IsStatic()) return false;
return !DimsHaveSameSize(lhs, rhs);
}
static std::vector<std::pair<int, int>> CreateSamePadding(
const nvinfer1::Dims& stride, const nvinfer1::Dims& kernel,
const std::vector<int64_t>& input_dims) {
std::vector<std::pair<int, int>> padding(input_dims.size());
CHECK_EQ(stride.nbDims, input_dims.size());
for (size_t i = 0; i < input_dims.size(); ++i) {
int p = ((input_dims[i] - 1) / stride.d[i]) * stride.d[i] + kernel.d[i] -
input_dims[i];
p = (p > 0) ? p : 0;
int left = p / 2;
int right = p - left;
VLOG(2) << "PADDING_" << i << " pre: " << left << ", post: " << right
<< "paras: " << input_dims[i] << ", " << stride.d[i] << ", "
<< "kernel: " << kernel.d[i];
padding[i] = {left, right};
}
return padding;
}
string GetCommonNameScope(const string& op_name_a, const string& op_name_b) {
size_t last_scope_separator = 0;
const size_t min_size = std::min(op_name_a.size(), op_name_b.size());
for (size_t i = 0; i < min_size; ++i) {
if (op_name_a[i] != op_name_b[i]) break;
if (op_name_a[i] == '/') last_scope_separator = i + 1;
}
return op_name_a.substr(0, last_scope_separator);
}
Status VerifyShapesMatch(absl::Span<const TRT_TensorOrWeights> inputs,
int masked_dim, absl::string_view node_name) {
size_t num_inputs = inputs.size();
if (num_inputs <= 1) return OkStatus();
const nvinfer1::Dims dims_0 = inputs.at(0).GetTrtDims();
for (size_t i = 1; i < num_inputs; ++i) {
const nvinfer1::Dims dim_i = inputs.at(i).GetTrtDims();
if (dim_i.nbDims != dims_0.nbDims) {
return errors::InvalidArgument(
"Received inputs with inconsistent rank, at ", node_name);
}
for (size_t j = 0; j < dims_0.nbDims; ++j) {
if (dim_i.d[j] == -1 || dims_0.d[j] == -1) continue;
if (dim_i.d[j] != dims_0.d[j] && j != masked_dim) {
return errors::InvalidArgument(
"Received inputs with inconsistent shape, at ", node_name);
}
}
}
return OkStatus();
}
template <typename T>
void Reorder5(const nvinfer1::Dims& shape, const T* idata,
const nvinfer1::Dims& istrides, T* odata,
const nvinfer1::Dims& ostrides) {
for (int k = 0; k < shape.d[0]; ++k) {
for (int c = 0; c < shape.d[1]; ++c) {
for (int d = 0; d < shape.d[2]; ++d) {
for (int r = 0; r < shape.d[3]; ++r) {
for (int s = 0; s < shape.d[4]; ++s) {
odata[k * ostrides.d[0] + c * ostrides.d[1] + d * ostrides.d[2] +
r * ostrides.d[3] + s * ostrides.d[4]] =
idata[k * istrides.d[0] + c * istrides.d[1] +
d * istrides.d[2] + r * istrides.d[3] +
s * istrides.d[4]];
}
}
}
}
}
}
template <typename T>
void Reorder4(const nvinfer1::Dims4& shape, const T* idata,
const nvinfer1::Dims4& istrides, T* odata,
const nvinfer1::Dims4& ostrides) {
for (int n = 0; n < shape.d[0]; ++n) {
for (int c = 0; c < shape.d[1]; ++c) {
for (int h = 0; h < shape.d[2]; ++h) {
for (int w = 0; w < shape.d[3]; ++w) {
odata[n * ostrides.d[0] + c * ostrides.d[1] + h * ostrides.d[2] +
w * ostrides.d[3]] =
idata[n * istrides.d[0] + c * istrides.d[1] + h * istrides.d[2] +
w * istrides.d[3]];
}
}
}
}
}
template <typename T>
void Reorder2(const nvinfer1::DimsHW& shape, const T* idata,
const nvinfer1::DimsHW& istrides, T* odata,
const nvinfer1::DimsHW& ostrides) {
for (int h = 0; h < shape.h(); ++h) {
for (int w = 0; w < shape.w(); ++w) {
odata[h * ostrides.h() + w * ostrides.w()] =
idata[h * istrides.h() + w * istrides.w()];
}
}
}
void ReorderCKtoKC(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights) {
const int c = iweights.Shape().dim(0);
const int k = iweights.Shape().dim(1);
oweights->Shape().dim(0) = k;
oweights->Shape().dim(1) = c;
const nvinfer1::DimsHW istrides = {1, k};
const nvinfer1::DimsHW ostrides = {c, 1};
switch (iweights.TrtDType()) {
case nvinfer1::DataType::kFLOAT: {
Reorder2({k, c}, iweights.GetPointer<float>(), istrides,
oweights->GetPointer<float>(), ostrides);
break;
}
case nvinfer1::DataType::kHALF: {
Reorder2({k, c}, iweights.GetPointer<Eigen::half>(), istrides,
oweights->GetPointer<Eigen::half>(), ostrides);
break;
}
default:
LOG(FATAL) << "Unsupported type in reorder expected fp32 or fp16 but got "
<< DebugString(iweights.TrtDType());
}
}
void ReorderRSCKToKCRS(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights, const int num_groups) {
CHECK(iweights.TrtDType() == oweights->TrtDType());
CHECK_EQ(iweights.size_bytes(), oweights->size_bytes());
const int r = iweights.Shape().dim(0);
const int s = iweights.Shape().dim(1);
const int c = iweights.Shape().dim(2) / num_groups;
const int k = iweights.Shape().dim(3) * num_groups;
VLOG(2) << "num_groups: " << num_groups << "c" << iweights.Shape().dim(2)
<< " then " << c << "k" << iweights.Shape().dim(3) << " then " << k
<< "r" << iweights.Shape().dim(0) << " then " << r << "s"
<< iweights.Shape().dim(1) << " then " << s;
oweights->Shape().dim(0) = k / num_groups;
oweights->Shape().dim(1) = c * num_groups;
oweights->Shape().dim(2) = r;
oweights->Shape().dim(3) = s;
const nvinfer1::Dims4 istrides = {1, k, s * k * c, c * k};
const nvinfer1::Dims4 ostrides = {c * r * s, r * s, s, 1};
switch (iweights.TrtDType()) {
case nvinfer1::DataType::kFLOAT: {
Reorder4({k, c, r, s}, iweights.GetPointer<float>(), istrides,
oweights->GetPointer<float>(), ostrides);
break;
}
case nvinfer1::DataType::kHALF: {
Reorder4({k, c, r, s}, iweights.GetPointer<Eigen::half>(), istrides,
oweights->GetPointer<Eigen::half>(), ostrides);
break;
}
default:
LOG(FATAL) << "Unsupported type, expected fp32 or fp16 but got "
<< DebugString(iweights.TrtDType());
}
}
nvinfer1::Dims InitDimsN(std::initializer_list<int> list) {
nvinfer1::Dims dim;
dim.nbDims = list.size();
std::copy(list.begin(), list.end(), dim.d);
return dim;
}
void ReorderDRSCKToKCDRS(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights, const int num_groups) {
DCHECK(iweights.TrtDType() == oweights->TrtDType());
CHECK_EQ(iweights.size_bytes(), oweights->size_bytes());
const int d = iweights.Shape().dim(0);
const int r = iweights.Shape().dim(1);
const int s = iweights.Shape().dim(2);
const int c = iweights.Shape().dim(3) / num_groups;
const int k = iweights.Shape().dim(4) * num_groups;
VLOG(2) << "num_groups: " << num_groups << ", c: " << iweights.Shape().dim(3)
<< " becomes " << c << ", k: " << iweights.Shape().dim(4)
<< " becomes " << k << ", d: " << d << ", r: " << r << ", s: " << s;
oweights->Shape().dim(0) = iweights.Shape().dim(4);
oweights->Shape().dim(1) = iweights.Shape().dim(3);
oweights->Shape().dim(2) = d;
oweights->Shape().dim(3) = r;
oweights->Shape().dim(4) = s;
nvinfer1::Dims shape =
InitDimsN({k, c, d, r, s});
nvinfer1::Dims ostrides =
InitDimsN({c * d * r * s, d * r * s, r * s, s,
1});
nvinfer1::Dims istrides =
InitDimsN({1, k, r * s * c * k, s * c * k,
c * k});
switch (iweights.TrtDType()) {
case nvinfer1::DataType::kFLOAT: {
Reorder5(shape, iweights.GetPointer<float>(), istrides,
oweights->GetPointer<float>(), ostrides);
break;
}
case nvinfer1::DataType::kHALF: {
Reorder5(shape, iweights.GetPointer<Eigen::half>(), istrides,
oweights->GetPointer<Eigen::half>(), ostrides);
break;
}
default:
LOG(FATAL) << "Unsupported type, expected fp32 or fp16 but got "
<< DebugString(iweights.TrtDType());
}
}
OpConverterParams::OpConverterParams(
const NodeDef& node_def, const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs, TrtWeightStore* weight_store,
TrtPrecisionMode precision_mode, bool use_calibration,
bool use_implicit_batch, bool use_explicit_precision)
: node_def(node_def),
inputs(inputs),
outputs(outputs),
validation_only(true),
weight_store(weight_store),
precision_mode(precision_mode),
use_calibration(use_calibration),
use_implicit_batch(use_implicit_batch),
use_explicit_precision(use_explicit_precision) {}
OpConverterParams::OpConverterParams(
Converter* converter, const NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs, TrtWeightStore* weight_store)
: converter(converter),
node_def(node_def),
inputs(inputs),
outputs(outputs),
validation_only(false),
weight_store(weight_store),
precision_mode(converter->precision_mode()),
use_calibration(converter->use_calibration()),
use_implicit_batch(converter->use_implicit_batch()),
use_explicit_precision(converter->UseExplicitPrecision()) {}
TrtNodeValidator::TrtNodeValidator(
const grappler::GraphProperties& graph_properties,
TrtPrecisionMode precision_mode, bool use_calibration,
bool use_implicit_batch, bool use_explicit_precision)
: graph_properties_(graph_properties),
precision_mode_(precision_mode),
use_calibration_(use_calibration),
use_implicit_batch_(use_implicit_batch),
use_explicit_precision_(use_explicit_precision) {}
StatusOr<OpConverter> TrtNodeValidator::GetValidator(const std::string& op) {
return GetOpConverterRegistry()->LookUp(op);
}
Status TrtNodeValidator::ConvertToTensorOrWeights(
const NodeDef& node_def, int output_port,
TRT_TensorOrWeights* tensor_or_weights) {
if (node_def.op() == "VarHandleOp" || node_def.op() == "Placeholder") {
AttrSlice attrs(node_def);
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "dtype", &dtype));
if (dtype == DataType::DT_RESOURCE) {
ResourceHandle fake_resource;
*tensor_or_weights = TRT_TensorOrWeights(fake_resource);
return OkStatus();
}
}
if (node_def.op() == "Const" || node_def.op() == "VariableV2") {
if (output_port != 0) {
return errors::InvalidArgument(node_def.op(),
" node should only have one output.");
}
std::vector<TRT_TensorOrWeights> inputs;
return ConvertConstToWeights(node_def, inputs, tensor_or_weights);
}
if (node_def.op() == "ReadVariableOp") {
const std::vector<TRT_TensorOrWeights> inputs{
TRT_TensorOrWeights(ResourceHandle())};
return ConvertConstToWeights(node_def, inputs, tensor_or_weights);
}
if (!graph_properties_.HasOutputProperties(node_def.name())) {
return errors::InvalidArgument("Shape and data type are unknown");
}
const auto& output_params =
graph_properties_.GetOutputProperties(node_def.name());
const auto& tensor_properties = output_params.at(output_port);
const DataType dtype = tensor_properties.dtype();
const PartialTensorShape shape = tensor_properties.shape();
nvinfer1::DataType trt_dtype;
nvinfer1::Dims trt_dims;
int batch_size = -1;
TF_RETURN_IF_ERROR(ValidateTensorProperties(
node_def.op(), dtype, shape, use_implicit_batch_,
true, &trt_dtype, &trt_dims, &batch_size));
*tensor_or_weights = TRT_TensorOrWeights(trt_dtype, trt_dims, batch_size);
return OkStatus();
}
Status TrtNodeValidator::IsTensorRTCandidate(const Node* node) {
const string& op = node->def().op();
bool is_supported_op = false;
if (absl::c_find(kQuantizationOpNames, op) != kQuantizationOpNames.end()) {
is_supported_op = (precision_mode_ == TrtPrecisionMode::INT8);
} else {
is_supported_op = GetValidator(op).ok();
}
if (!is_supported_op) {
return errors::Unimplemented("Op type ", op, " is not supported.");
}
std::vector<TRT_TensorOrWeights> inputs;
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(node->input_edges(&input_edges));
for (const Edge* edge : input_edges) {
Node* src_node = edge->src();
while (src_node->def().op() == "Identity") {
std::vector<const Edge*> input_edges_temp;
TF_RETURN_IF_ERROR(src_node->input_edges(&input_edges_temp));
src_node = input_edges_temp[0]->src();
}
const NodeDef& src_def = src_node->def();
TRT_TensorOrWeights tensor_or_weights;
Status status = ConvertToTensorOrWeights(src_def, edge->src_output(),
&tensor_or_weights);
if (!status.ok()) {
VLOG(2) << "Failed to convert input `" << src_def.name() << "` to a "
<< "TRT_TensorOrWeights: " << status.message();
return errors::Internal(
"Failed to convert at least one input to a TRT_TensorOrWeights: ",
status.message());
}
inputs.push_back(tensor_or_weights);
}
auto validator = GetValidator(op);
TF_RETURN_IF_ERROR(validator.status());
OpConverterParams params(node->def(), inputs, nullptr,
&weight_store_, precision_mode_, use_calibration_,
use_implicit_batch_, use_explicit_precision_);
return (*validator)(¶ms);
}
Status TrtNodeValidator::ConvertConstToWeights(
const NodeDef& const_node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
TRT_TensorOrWeights* output) {
std::vector<TRT_TensorOrWeights> outputs;
OpConverterParams params(const_node_def, inputs, &outputs, &weight_store_,
precision_mode_, use_calibration_,
use_implicit_batch_, use_explicit_precision_);
auto const_val = GetValidator(const_node_def.op());
TF_RETURN_IF_ERROR(const_val.status());
Status status = (*const_val)(¶ms);
if (status.ok() && (output != nullptr)) {
*output = outputs[0];
}
return status;
}
StatusOr<std::unique_ptr<Converter>> Converter::Create(
TrtPrecisionMode precision_mode, bool use_calibration,
nvinfer1::ILogger* trt_logger, const bool use_implicit_batch,
absl::string_view engine_name, bool use_explicit_precision,
OpKernelContext* ctx) {
std::unique_ptr<Converter> converter = absl::WrapUnique(new Converter(
precision_mode, use_calibration, trt_logger, use_implicit_batch,
engine_name, use_explicit_precision, ctx));
TF_RETURN_IF_ERROR(converter->Init(trt_logger));
return converter;
}
Converter::Converter(TrtPrecisionMode precision_mode, bool use_calibration,
nvinfer1::ILogger* trt_logger,
const bool use_implicit_batch,
absl::string_view engine_name, bool use_explicit_precision,
OpKernelContext* ctx)
: ctx_(ctx),
precision_mode_(precision_mode),
use_calibration_(use_calibration),
use_implicit_batch_(use_implicit_batch),
engine_name_(engine_name),
use_explicit_precision_(use_explicit_precision) {
MaybeInitializeTrtPlugins(trt_logger);
}
Status Converter::Init(nvinfer1::ILogger* trt_logger) {
VLOG(1) << "Creating TensorRT builder";
trt_builder_.reset(nvinfer1::createInferBuilder(*trt_logger));
VLOG(1) << "Creating TensorRT network";
uint32_t flags =
use_implicit_batch_
? 0U
: (1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
if (use_explicit_precision_) {
flags |=
(1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_PRECISION));
}
trt_network_.reset(trt_builder_->createNetworkV2(flags));
if (!trt_network_) {
return errors::Internal("Failed to create TensorRT network object");
}
return OkStatus();
}
Status Converter::ConvertNode(const NodeDef& node_def) {
std::vector<TRT_TensorOrWeights> inputs;
std::vector<TRT_TensorOrWeights> outputs;
TF_RETURN_IF_ERROR(this->GetInputs(node_def, &inputs));
OpConverterParams params(this, node_def, inputs, &outputs, &weight_store_);
const string& op = node_def.op();
auto op_converter = GetOpConverterRegistry()->LookUp(op);
TF_RETURN_IF_ERROR(op_converter.status());
TF_RETURN_IF_ERROR((*op_converter)(¶ms));
for (size_t i = 0; i < outputs.size(); ++i) {
TRT_TensorOrWeights& output = outputs[i];
string output_name = node_def.name();
if (i != 0) {
StrAppend(&output_name, ":", i);
}
if (output.is_tensor()) {
const char* tensor_name = output.tensor()->getName();
if (!IsEngineInput(tensor_name)) {
output.tensor()->setName(output_name.c_str());
}
}
VLOG(2) << "Adding out tensor " << output_name << ": "
<< output.DebugString();
Status status = AddTensorOrWeights(output_name, output);
if (!status.ok()) {
return errors::Create(static_cast<absl::StatusCode>(status.code()),
StrCat("Failed to add output for node: ",
node_def.name(), ": ", status.message()),
errors::GetPayloads(status));
}
}
return OkStatus();
}
Status Converter::AddInputTensor(const string& name, nvinfer1::DataType dtype,
const nvinfer1::Dims& dims, int batch_size) {
Status status;
if (use_implicit_batch_) {
status = MaybeUpdateBatchSize(batch_size);
if (!status.ok()) {
return errors::CreateWithUpdatedMessage(
status, batch_size_error(name, status.message()));
}
}
ITensorProxyPtr tensor = network()->addInput(name.c_str(), dtype, dims);
if (*tensor == nullptr) {
return errors::InvalidArgument("Failed to create Input layer tensor ", name,
" rank=", dims.nbDims);
}
status = AddTensorOrWeights(name, TRT_TensorOrWeights(tensor));
if (!status.ok()) {
return errors::CreateWithUpdatedMessage(
status,
StrCat("Failed to add input tensor ", name, ": ", status.message()));
}
return OkStatus();
}
Status Converter::AddInputResource(const string& name,
const ResourceHandle& resource) {
Status status = AddTensorOrWeights(name, TRT_TensorOrWeights(resource));
if (!status.ok()) {
return errors::CreateWithUpdatedMessage(
status,
StrCat("Failed to add input resource ", name, ": ", status.message()));
}
return OkStatus();
}
Status Converter::RenameAndMarkOutputTensors(
const std::vector<Converter::EngineOutputInfo>& output_tensors) {
int output_index = 0;
for (const auto& output : output_tensors) {
TRT_TensorOrWeights tensor_or_weights;
TF_RETURN_IF_ERROR(
GetTensorOrWeights(output.source_tensor_name, &tensor_or_weights));
if (!tensor_or_weights.is_tensor()) {
return errors::InvalidArgument("Output ", output.source_tensor_name,
" is weights not tensor");
}
ITensorProxyPtr tensor = tensor_or_weights.tensor();
if (*tensor == nullptr) {
return errors::NotFound("Output tensor not found: ",
output.source_tensor_name);
}
if (IsEngineInput(tensor->getName()) || IsEngineOutput(tensor->getName())) {
nvinfer1::IShuffleLayer* layer =
network()->addShuffle(*tensor->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(
layer, StrCat("Output Copy for ", tensor->getName()));
SetLayerName(layer, tensor->getName(), "shuffle", output_index);
tensor = layer->getOutput(0);
}
tensor->setName(output.dest_node_name.c_str());
network()->markOutput(*tensor->trt_tensor());
tensor->setType(output.trt_dtype);
output_index++;
VLOG(1) << "Marking output TRT tensor " << output.source_tensor_name
<< " with data type " << DebugString(output.trt_dtype)
<< ", which feeds TF node " << output.dest_node_name;
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "Created TensorRT network with the following layers:";
for (int i = 0; i < network()->getNbLayers(); i++) {
auto layer = network()->getLayer(i);
VLOG(2) << " " << layer->getName() << " ("
<< "type: " << static_cast<int>(layer->getType())
<< ", precision: " << static_cast<int>(layer->getPrecision())
<< ")";
}
}
return OkStatus();
}
bool AbortCudaEngineBuild() {
bool value;
Status status = ReadBoolFromEnvVar("TF_TRT_ABORT_CUDA_ENGINE_BUILD",
false, &value);
if (!status.ok()) {
LOG(ERROR) << status;
}
return value;
}
Status Converter::BuildCudaEngine(
TrtUniquePtrType<nvinfer1::ICudaEngine>* engine, int max_batch_size,
size_t max_workspace_size_bytes, nvinfer1::IGpuAllocator* allocator,
TRTInt8Calibrator* calibrator, TrtShapeOptimizationProfile* profiles) {
tensorflow::profiler::AnnotatedTraceMe activity(
[&]() {
return tensorflow::profiler::TraceMeOpOverride("TRTEngineOp",
"BuildEngine");
},
tensorflow::profiler::TraceMeLevel::kInfo);
if (AbortCudaEngineBuild()) {
return errors::Aborted(
"Engine creation aborted by TF_TRT_ABORT_CUDA_ENGINE_BUILD variable");
}
VLOG(1) << "Configuring TensorRT builder";
trt_builder_->setMaxBatchSize(max_batch_size);
trt_builder_->setGpuAllocator(allocator);
TrtUniquePtrType<nvinfer1::IBuilderConfig> builder_config(
trt_builder_->createBuilderConfig());
builder_config->setMaxWorkspaceSize(max_workspace_size_bytes);
std::unique_ptr<nvinfer1::IAlgorithmSelector> trt_algorithm_selector{nullptr};
if (!IS_TRT_VERSION_GE(8, 0, 0, 0)) {
if (!use_calibration_ || precision_mode_ != TrtPrecisionMode::INT8) {
trt_algorithm_selector = MaybeCreateAlgorithmSelector();
}
} else {
trt_algorithm_selector = MaybeCreateAlgorithmSelector();
}
if (trt_algorithm_selector != nullptr) {
builder_config->setAlgorithmSelector(trt_algorithm_selector.get());
}
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
enum class SparseComputeMode { DISABLED, ENABLED, SIMULATED };
static SparseComputeMode sparse_compute_mode = []() {
SparseComputeMode _sparse_compute_mode;
int64 _sparse_mode;
TF_CHECK_OK(tensorflow::ReadInt64FromEnvVar("TF_TRT_SPARSE_MODE",
1,
&_sparse_mode));
string sparse_log_msg = "[TF-TRT] Sparse compute capability: ";
if (_sparse_mode == 1) {
sparse_log_msg = StrCat(sparse_log_msg, "enabled.");
_sparse_compute_mode = SparseComputeMode::ENABLED;
} else if (_sparse_mode < 1) {
sparse_log_msg = StrCat(sparse_log_msg, "disabled.");
_sparse_compute_mode = SparseComputeMode::DISABLED;
} else {
sparse_log_msg = StrCat(
sparse_log_msg, "simulated.",
"It shall only be used for sparse computing benchmark and debug.");
_sparse_compute_mode = SparseComputeMode::SIMULATED;
}
LOG(INFO) << sparse_log_msg;
return _sparse_compute_mode;
}();
if (sparse_compute_mode == SparseComputeMode::ENABLED ||
sparse_compute_mode == SparseComputeMode::SIMULATED) {
builder_config->setFlag(nvinfer1::BuilderFlag::kSPARSE_WEIGHTS);
}
#endif
if (tensorflow::tensor_float_32_execution_enabled()) {
builder_config->setFlag(nvinfer1::BuilderFlag::kTF32);
} else {
builder_config->clearFlag(nvinfer1::BuilderFlag::kTF32);
}
if (precision_mode_ == TrtPrecisionMode::FP16) {
builder_config->setFlag(nvinfer1::BuilderFlag::kFP16);
} else if (precision_mode_ == TrtPrecisionMode::INT8) {
if (IS_TRT_VERSION_GE(8, 0, 0, 0) || !use_explicit_precision_) {
builder_config->setFlag(nvinfer1::BuilderFlag::kFP16);
} else {
LOG_WARNING_WITH_PREFIX << "With explicit precision mode, FP16 is not "
"allowed before TensorRT 8. TRT will consider "
"INT8 and FP32 tactics.";
}
builder_config->setFlag(nvinfer1::BuilderFlag::kINT8);
}
if (!use_implicit_batch_ && profiles) {
TF_RETURN_IF_ERROR(profiles->ConfigureBuilder(
trt_builder_.get(), builder_config.get(), network()));
}
if (precision_mode_ == TrtPrecisionMode::INT8) {
builder_config->setInt8Calibrator(use_calibration_ ? calibrator : nullptr);
}
std::unique_ptr<TimingCacheRegistry::TimingCache> timing_cache = nullptr;
if (trt_algorithm_selector == nullptr) {
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
TimingCacheRegistry* registry = GetTimingCacheRegistry();
auto cache = registry->LookUp("default_cache", builder_config.get());
if (!cache.ok()) {
LOG(WARNING) << "failed to create a timing cache: "
<< cache.status().message();
} else {
timing_cache = std::move(*cache);
builder_config->setTimingCache(*timing_cache, false);
}
#endif
} else {
builder_config->setFlag(nvinfer1::BuilderFlag::kDISABLE_TIMING_CACHE);
}
string precision_mode_str;
TF_RETURN_IF_ERROR(
TrtPrecisionModeToName(precision_mode_, &precision_mode_str));
string trt_network_name = StrCat(
"TF:", TF_VERSION_STRING, ", ",
"TRT:", absl::StrJoin(GetLoadedTensorRTVersion(), "."), "-",
"Precision:", precision_mode_str, ", ", "Calibration:", use_calibration_,
", ", "Max-Batch-Size:", max_batch_size, ", ",
"Max-Workspace-Size:", max_workspace_size_bytes);
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
trt_network_name = StrCat(trt_network_name, ", Sparse Compute: ");
switch (sparse_compute_mode) {
case SparseComputeMode::SIMULATED:
trt_network_name = StrCat(trt_network_name, "Simulated");
break;
case SparseComputeMode::ENABLED:
trt_network_name = StrCat(trt_network_name, "Enabled");
break;
case SparseComputeMode::DISABLED:
trt_network_name = StrCat(trt_network_name, "Disabled");
break;
}
#endif
VLOG(1) << "Setting TensorRT network name to " << trt_network_name;
network()->setName(trt_network_name.c_str());
VLOG(1) << "Building TensorRT engine";
if (VLOG_IS_ON(2)) {
VLOG(2) << "Network inputs";
int n_inputs = network()->getNbInputs();
for (int i = 0; i < n_inputs; i++) {
const ITensorProxyPtr input = network()->getInput(i);
if (*input) {
VLOG(2) << " " << i << " " << input->getName();
} else {
VLOG(2) << "Could not find input " << i;
}
}
}
engine->reset(
trt_builder_->buildEngineWithConfig(*network(), *builder_config));
if (engine->get() == nullptr) {
return errors::Internal("Failed to build TensorRT engine");
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "TRT engine created";
int nbBindings = (*engine)->getNbBindings();
VLOG(2) << "Number of engine bindings: " << nbBindings;
for (int i = 0; i < nbBindings; i++) {
auto get_location_string = [&engine](int i) {
if ((*engine)->getLocation(i) == nvinfer1::TensorLocation::kDEVICE)
return " on device";
else
return " on host";
};
VLOG(2) << "Binding " << i << " name: " << (*engine)->getBindingName(i)
<< get_location_string(i);
}
}
if (timing_cache) {
GetTimingCacheRegistry()->Upsert("default_cache", timing_cache.get());
}
return OkStatus();
}
Status Converter::MaybeUpdateBatchSize(int batch_size) {
if (this->batch_size_ < 0 || batch_size < 0 ||
this->batch_size_ == batch_size) {
if (this->batch_size_ < 0 && batch_size >= 0) {
this->batch_size_ = batch_size;
}
return OkStatus();
}
return errors::InvalidArgument(
"Provided batch size does not match converter batch size: ", batch_size,
" vs ", batch_size_);
}
Status Converter::AddTensorOrWeights(const string& name,
TRT_TensorOrWeights input) {
if (use_implicit_batch_ && input.is_tensor()) {
input.set_batch_size(batch_size_);
}
if (trt_tensors_.insert({name, std::move(input)}).second) return OkStatus();
return errors::AlreadyExists("tensor/weights ", name, " already exist.");
}
Status Converter::GetTensorOrWeights(const string& name,
TRT_TensorOrWeights* output) {
if (!trt_tensors_.count(name)) {
return errors::NotFound("Tensor or weights with name ", name,
" could not be found.");
}
*output = trt_tensors_.at(name);
return OkStatus();
}
Status Converter::TransposeTensor(ITensorProxyPtr input_tensor,
const std::vector<int>& order_with_batch_dim,
ITensorProxyPtr* output_tensor,
const NodeDef& node_def,
absl::string_view sub_op_name) {
const auto dims = input_tensor->getDimensions();
const int order_size = use_implicit_batch_ ? order_with_batch_dim.size() - 1
: order_with_batch_dim.size();
if (order_size != size_t(dims.nbDims)) {
return errors::InvalidArgument(
"Rank of perm for transpose does not match with that of the input.");
}
if (use_implicit_batch_ && order_with_batch_dim[0] != 0) {
return errors::Unimplemented(
"Transpose at batch dimension is not supported.");
}
nvinfer1::IShuffleLayer* layer =
this->network()->addShuffle(*input_tensor->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, "TF-TRT Internal Transpose");
SetLayerName(layer, node_def, sub_op_name);
nvinfer1::Permutation permutation;
if (use_implicit_batch_) {
for (int32_t i = 0; i < dims.nbDims; ++i) {
permutation.order[i] = order_with_batch_dim[i + 1] - 1;
}
} else {
std::copy(order_with_batch_dim.begin(), order_with_batch_dim.end(),
permutation.order);
}
VLOG(1) << "TransposeTensor permutation: "
<< DebugString(permutation, dims.nbDims);
layer->setFirstTranspose(permutation);
nvinfer1::Dims reshape_dims;
reshape_dims.nbDims = dims.nbDims;
for (int32_t i = 0; i < reshape_dims.nbDims; ++i) {
reshape_dims.d[i] = 0;
}
layer->setReshapeDimensions(reshape_dims);
*output_tensor = layer->getOutput(0);
return OkStatus();
}
Status Converter::GetWeightRange(const TRT_ShapedWeights& weights,
float* out_min, float* out_max) const {
switch (weights.TrtDType()) {
case nvinfer1::DataType::kFLOAT: {
auto inp = weights.GetPointer<float>();
auto result = std::minmax_element(inp, inp + weights.count());
*out_min = *result.first;
*out_max = *result.second;
break;
}
case nvinfer1::DataType::kHALF: {
auto inp = weights.GetPointer<Eigen::half>();
auto result = std::minmax_element(inp, inp + weights.count());
*out_min = static_cast<float>(*result.first);
*out_max = static_cast<float>(*result.second);
break;
}
case nvinfer1::DataType::kINT32: {
auto inp = weights.GetPointer<int>();
auto result = std::minmax_element(inp, inp + weights.count());
*out_min = static_cast<float>(*result.first);
*out_max = static_cast<float>(*result.second);
break;
}
default:
return errors::Unimplemented(
"Data type not supported for GetWeightRange: ",
DebugString(weights.TrtDType()));
}
return OkStatus();
}
void Converter::SetLayerName(nvinfer1::ILayer* layer, const NodeDef& node_def,
absl::string_view sub_op_name,
std::optional<int> sub_op_instance,
std::optional<std::string> origin_node_name) {
std::string sub_op_suffix = GetLayerNameSuffix(sub_op_name, sub_op_instance);
if (sub_op_suffix.empty()) {
SetLayerNameHelper(layer, engine_name_, node_def.name());
} else if (origin_node_name.has_value()) {
auto layer_name = absl::StrCat(node_def.name(), "-",
absl::string_view(origin_node_name.value()),
"-", sub_op_suffix);
SetLayerNameHelper(layer, engine_name_, layer_name);
} else {
SetLayerNameHelper(layer, engine_name_,
absl::StrCat(node_def.name(), "-", sub_op_suffix));
}
}
void Converter::SetLayerName(nvinfer1::ILayer* layer,
absl::string_view main_op_name,
absl::string_view sub_op_name,
std::optional<int> sub_op_instance) {
std::string layer_name_suffix =
GetLayerNameSuffix(sub_op_name, sub_op_instance);
SetLayerNameHelper(layer, engine_name_,
absl::StrCat(main_op_name, "-", layer_name_suffix));
}
Status PrepareTensorForShape(Converter* converter,
const TRT_TensorOrWeights& input,
const DimsAdapter& dims,
const bool validation_only,
ITensorProxyPtr* tensor, const NodeDef& node_def,
std::optional<int> op_instance,
std::optional<std::string> origin_node_name) {
DimsAdapter input_dims(input.GetTrtDims());
if (dims.Volume() > 0 && AreDimsStaticWithDifferentSize(input_dims, dims)) {
return errors::InvalidArgument(
"Incompatible shapes: ", input_dims.DebugString(), " vs. ",
dims.DebugString());
}
if (input.is_weights() && !dims.IsStatic()) {
return errors::InvalidArgument("Shape is not fully defined: ",
dims.DebugString());
}
if (validation_only) {
*tensor = nullptr;
return OkStatus();
}
TFTRT_RETURN_ERROR_IF_NULLPTR(converter, "converter is nullptr");
if (input.is_tensor()) {
if (input_dims == dims) {
*tensor = input.tensor();
} else {
nvinfer1::IShuffleLayer* layer =
converter->network()->addShuffle(*input.tensor()->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, "TF-TRT Internal Reshape");
converter->SetLayerName(layer, node_def, "shuffle", op_instance,
origin_node_name);
layer->setReshapeDimensions(dims.AsTrtDims());
*tensor = layer->getOutput(0);
}
} else {
*tensor = converter->CreateConstantLayer(input.weights(), dims.AsTrtDims());
TFTRT_RETURN_ERROR_IF_NULLPTR(*tensor, "TF-TRT Internal Reshape");
}
return OkStatus();
}
void Converter::ProvideQuantizationRange(ITensorProxyPtr* tensor,
float min_range, float max_range) {
float symmetric_range = std::max(std::abs(min_range), std::abs(max_range));
if ((*tensor)->is_trt_tensor()) {
quantization_ranges_[(*tensor)->trt_tensor()] = symmetric_range;
} else if ((*tensor)->is_simple_tensor()) {
quantization_ranges_proxy_[tensor] = symmetric_range;
}
}
void Converter::MaybeApplyQuantizationRanges() {
if (precision_mode() != TrtPrecisionMode::INT8) return;
for (auto pair : quantization_ranges_) {
nvinfer1::ITensor* tensor = pair.first;
const float range = pair.second;
VLOG(1) << "Setting range for: " << tensor->getName() << ": " << range;
tensor->setDynamicRange(-range, range);
}
for (auto pair : quantization_ranges_proxy_) {
ITensorProxyPtr tensor = *pair.first;
const float range = pair.second;
VLOG(1) << "Setting range for: " << tensor->getName() << ": " << range;
tensor->setDynamicRange(-range, range);
}
}
Status Converter::GetInputs(const NodeDef& node_def,
std::vector<TRT_TensorOrWeights>* inputs) const {
for (auto const& input_name : node_def.input()) {
if (input_name[0] == '^') continue;
string name = input_name;
auto last = name.find_last_of(':');
if (last != string::npos && last + 2 == name.size() &&
name[last + 1] == '0') {
name.erase(last);
}
if (trt_tensors_.count(name)) {
TRT_TensorOrWeights input = trt_tensors_.at(name);
inputs->push_back(input);
VLOG(2) << "Retrieved input " << name << ": " << input.DebugString();
} else {
string msg("Node ");
StrAppend(&msg, node_def.name(), " should have an input named '", name,
"' but it is not available");
LOG(ERROR) << msg;
return errors::InvalidArgument(msg);
}
}
return OkStatus();
}
Status CheckInputsWeights(
const OpConverterParams& params,
const std::vector<std::pair<string, TrtInputArg>>& expected_inputs) {
const auto& inputs = params.inputs;
const auto& node_def = params.node_def;
TFTRT_CHECK_INPUT_SIZE(inputs.size(), expected_inputs.size(), node_def);
for (int i = 0; i < inputs.size(); i++) {
if (expected_inputs[i].second == TrtInputArg::kWeight &&
!inputs.at(i).is_weights()) {
return errors::Unimplemented("The input \"", expected_inputs[i].first,
"\" for ", node_def.op(),
" must be a constant");
}
if (expected_inputs[i].second == TrtInputArg::kTensor &&
!inputs.at(i).is_tensor()) {
return errors::Unimplemented("The input \"", expected_inputs[i].first,
"\" for ", node_def.op(),
" must be a tensor");
}
if (expected_inputs[i].second == TrtInputArg::kResource &&
!inputs.at(i).is_resource()) {
return errors::Unimplemented("The input \"", expected_inputs[i].first,
"\" for ", node_def.op(),
" must be a resource handle");
}
}
return OkStatus();
}
Status CheckInputsWeights(
const OpConverterParams& params,
const std::vector<std::pair<string, bool>>& inputs_is_weight) {
std::vector<std::pair<string, TrtInputArg>> expected_inputs;
expected_inputs.reserve(inputs_is_weight.size());
std::transform(
inputs_is_weight.begin(), inputs_is_weight.end(),
std::back_inserter(expected_inputs), [](std::pair<string, bool> x) {
return std::make_pair(
x.first, x.second ? TrtInputArg::kWeight : TrtInputArg::kTensor);
});
return CheckInputsWeights(params, expected_inputs);
}
Status GetNodeDefTfType(const NodeDef& node_def, DataType* tf_type,
const string type_attr_name_in = "") {
string type_attr_name;
if (type_attr_name_in.empty()) {
if (node_def.op() == "ReadVariableOp" ||
node_def.op() == "ResourceGather") {
type_attr_name = "dtype";
} else {
type_attr_name = "T";
}
} else {
type_attr_name = type_attr_name_in;
}
AttrSlice attrs(node_def);
if (attrs.FindByString(type_attr_name) == nullptr) {
return errors::InvalidArgument("Attribute with name ", type_attr_name,
" not found.");
}
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, type_attr_name, tf_type));
return OkStatus();
}
Status GetInputTfType(const OpConverterParams& params, DataType* tf_type,
int pos) {
const std::vector<TRT_TensorOrWeights>& inputs = params.inputs;
if (inputs.size() <= pos) {
return errors::Internal("Invalid input position");
}
return inputs[pos].GetTfType(tf_type);
}
Status GetOutputTfType(const OpConverterParams& params, DataType* tf_type) {
return GetNodeDefTfType(params.node_def, tf_type);
}
Status AllowDataTypes(const OpConverterParams& params,
const std::set<DataType>& allowed_types,
const char* type_attr_name = "") {
const auto& node_def = params.node_def;
DataType tf_type;
TF_RETURN_IF_ERROR(GetNodeDefTfType(node_def, &tf_type, type_attr_name));
if (!allowed_types.count(tf_type)) {
const auto error =
convert_not_supported_dtype_msg(allowed_types, tf_type, node_def);
return errors::Unimplemented(error);
}
return OkStatus();
}
namespace {
std::vector<int64_t> GetSpatialDimsFromOutputSizes(
const TRT_TensorOrWeights& output_sizes, const int h_index,
const int w_index) {
const TRT_ShapedWeights& weights = output_sizes.weights();
const int output_sizes_length = weights.count();
auto output_sizes_values = weights.GetPointer<int>();
return {output_sizes_values[output_sizes_length == 4 ? h_index : 0],
output_sizes_values[output_sizes_length == 4 ? w_index : 1]};
}
}
Status ConvertConv2DHelper(const OpConverterParams* params, int group,
bool is_conv2d_backprop_input) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TRT_TensorOrWeights backprop_output_size;
ITensorProxyPtr tensor = nullptr;
if (is_conv2d_backprop_input) {
if (!params->use_explicit_precision) {
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input_sizes", true}, {"filter", true}, {"out_backprop", false}}));
}
backprop_output_size = inputs.at(0);
tensor = inputs.at(2).tensor();
bool has_dynamic_hw_shape{false};
int start_idx{0};
auto dims = tensor->getDimensions();
if (params->use_implicit_batch) {
if (dims.nbDims != 3) {
return errors::Internal(
"In implicit batch mode, input nbDims should be 3");
}
start_idx = 1;
} else {
if (dims.nbDims != 4) {
return errors::Internal(
"In explicit batch mode, input nbDims should be 4");
}
start_idx = 2;
}
for (int i = start_idx; i < dims.nbDims; ++i) {
if (dims.d[i] < 0) {
has_dynamic_hw_shape = true;
}
}
if (has_dynamic_hw_shape) {
return errors::Unimplemented(
"Conv2dBackpropInput does not support input with unknown spatial "
"shape");
}
} else {
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input", false}, {"filter", !params->use_explicit_precision}}));
tensor = inputs.at(0).tensor();
}
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
if (inputs.at(1).GetTrtDims().nbDims != 4) {
return errors::InvalidArgument("Conv2D expects kernel of dimension 4");
}
string data_format, padding_type;
std::vector<int64_t> tf_dilations, tf_stride;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "dilations", &tf_dilations));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
int c_index = (data_format == "NHWC") ? 3 : 1;
int h_index = (data_format == "NHWC") ? 1 : 2;
int w_index = (data_format == "NHWC") ? 2 : 3;
if (tf_dilations.size() != 4) {
return errors::InvalidArgument(
"Convolution dilations field must specify 4 dimensions");
}
if (tf_dilations[0] != 1 || tf_dilations[c_index] != 1) {
return errors::Unimplemented(
"Dilation rate must be 1 for batch and channel dimensions");
}
const nvinfer1::DimsHW dilation(tf_dilations[h_index], tf_dilations[w_index]);
if (is_conv2d_backprop_input && (dilation.d[0] != 1 || dilation.d[1] != 1)) {
return errors::Unimplemented(
"Dilation with Conv2DBackpropInput (conv2d_transpose) is not"
" supported");
}
if (tf_stride.size() != 4) {
return errors::InvalidArgument(
"Convolution strides field must specify 4 dimensions");
}
if (tf_stride[0] != 1 || tf_stride[c_index] != 1) {
return errors::Unimplemented(
"Stride must be 1 for batch and channel dimensions");
}
if (!params->use_implicit_batch && tensor->getDimensions().d[c_index] == -1) {
return errors::InvalidArgument("Channel dimension must be static");
}
if (padding_type != "SAME" && padding_type != "VALID") {
return errors::Unimplemented(padding_type +
" padding type not implemented, "
"only VALID and SAME are supported");
}
const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
if (params->validation_only) return OkStatus();
const bool need_transpose = (data_format == "NHWC");
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 3, 1, 2}, &tensor, node_def, "to_NCHW"));
}
const auto tensor_dim = tensor->getDimensions();
const int c_dim_size = tensor_dim.d[params->use_implicit_batch ? 0 : 1];
const int num_groups = (group == 0) ? c_dim_size : group;
const int output_axis = is_conv2d_backprop_input ? 2 : 3;
auto weights_shape = inputs.at(1).GetTrtDims();
const int noutput = weights_shape.d[output_axis] * num_groups;
nvinfer1::DimsHW kernel_size;
kernel_size.h() = weights_shape.d[0];
kernel_size.w() = weights_shape.d[1];
TRT_ShapedWeights weights_rsck;
if (inputs.at(1).is_weights()) {
weights_rsck = inputs.at(1).weights();
} else {
StatusOr<TRT_ShapedWeights> tmp = params->weight_store->GetTempWeights(
nvinfer1::DataType::kFLOAT, weights_shape);
TRT_ENSURE_OK(tmp);
weights_rsck = std::move(tmp).value();
}
if (!inputs.at(1).is_weights()) {
TRT_ENSURE(params->use_explicit_precision);
StatusOr<TRTNetworkBuilder> builder = TRTNetworkBuilder::Create(
params->converter->network(), params->weight_store);
TRT_ENSURE_OK(builder);
auto dequant_layer =
builder->FindProducerOf(inputs.at(1).tensor()->trt_tensor());
TRT_ENSURE_PTR_OK(dequant_layer);
if (!IS_TRT_VERSION_GE(8, 0, 0, 0)) {
TRT_ENSURE((*dequant_layer)->getType() == nvinfer1::LayerType::kSCALE);
}
auto quant_layer = builder->UniqueParentOf(*dequant_layer, 0);
TRT_ENSURE_PTR_OK(quant_layer);
if (!IS_TRT_VERSION_GE(8, 0, 0, 0)) {
TRT_ENSURE((*quant_layer)->getType() == nvinfer1::LayerType::kSCALE);
}
auto weights_layer = builder->UniqueParentOf(*quant_layer, 0);
TRT_ENSURE_PTR_OK(weights_layer);
TRT_ENSURE((*weights_layer)->getType() == nvinfer1::LayerType::kCONSTANT);
auto const_weights_rsck =
reinterpret_cast<nvinfer1::IConstantLayer*>(*weights_layer)
->getWeights();
TRT_ENSURE(weights_rsck.count() == weights_rsck.count());
const auto* weights_ptr =
static_cast<const float*>(const_weights_rsck.values);
std::copy_n(weights_ptr, const_weights_rsck.count,
weights_rsck.GetPointer<float>());
}
StatusOr<TRT_ShapedWeights> weights =
params->weight_store->GetTempWeights(weights_rsck);
TRT_ENSURE_OK(weights);
StatusOr<TRT_ShapedWeights> biases = params->weight_store->GetTempWeights(
nvinfer1::DataType::kFLOAT, nvinfer1::Dims{1, {noutput}});
TRT_ENSURE_OK(biases);
std::fill_n(biases->GetPointer<float>(), noutput, 0.0f);
ReorderRSCKToKCRS(weights_rsck, &*weights, num_groups);
nvinfer1::ILayer* conv_layer = nullptr;
if (is_conv2d_backprop_input) {
nvinfer1::IDeconvolutionLayer* layer =
params->converter->network()->addDeconvolution(
*tensor->trt_tensor(), noutput, kernel_size,
weights->GetTrtWeights(), biases->GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
if (padding_type == "SAME") {
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
layer->setNbGroups(num_groups);
conv_layer = layer;
} else {
const nvinfer1::Weights empty_weights{nvinfer1::DataType::kFLOAT, nullptr,
0};
nvinfer1::IConvolutionLayer* layer =
params->converter->network()->addConvolution(
*tensor->trt_tensor(), noutput, kernel_size,
params->use_explicit_precision ? empty_weights
: weights->GetTrtWeights(),
empty_weights);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
if (padding_type == "SAME") {
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
layer->setNbGroups(num_groups);
layer->setDilation(dilation);
conv_layer = layer;
}
if (params->use_explicit_precision) {
TRT_ENSURE(inputs.at(1).is_tensor());
nvinfer1::IShuffleLayer* layer = params->converter->network()->addShuffle(
*inputs.at(1).tensor()->trt_tensor());
layer->setFirstTranspose({3, 2, 0, 1});
layer->setReshapeDimensions({4, {0, 0, 0, 0}});
conv_layer->setInput(1, *layer->getOutput(0));
}
params->converter->SetLayerName(conv_layer, node_def, "conv");
ITensorProxyPtr output_tensor = conv_layer->getOutput(0);
if (is_conv2d_backprop_input) {
std::vector<int64_t> output_spatial_dims =
GetSpatialDimsFromOutputSizes(backprop_output_size, h_index, w_index);
const int output_height = output_spatial_dims[0];
const int output_width = output_spatial_dims[1];
nvinfer1::Dims trt_output_shape = output_tensor->getDimensions();
int out_h_idx = params->use_implicit_batch ? 1 : 2;
int out_w_idx = params->use_implicit_batch ? 2 : 3;
const int height_diff = output_height - trt_output_shape.d[out_h_idx];
const int width_diff = output_width - trt_output_shape.d[out_w_idx];
if ((height_diff < 0) || (width_diff < 0)) {
return errors::InvalidArgument(
"input_sizes argument of Conv2DBackprop (i.e. output_shape argument "
"of conv2d_transpose) ",
"is too small for the given out_backprop argument of Conv2DBackprop "
"(i.e. input argument of conv2d_transpose). Expect: ",
"(", output_height, ", ", output_width, ") >= ", "(",
trt_output_shape.d[out_h_idx], ", ", trt_output_shape.d[out_w_idx],
")");
}
if ((height_diff > 0) || (width_diff > 0)) {
nvinfer1::DimsHW pre_padding(0, 0);
nvinfer1::DimsHW post_padding(height_diff, width_diff);
nvinfer1::IPaddingLayer* padding_layer =
params->converter->network()->addPadding(*output_tensor->trt_tensor(),
pre_padding, post_padding);
output_tensor = padding_layer->getOutput(0);
params->converter->SetLayerName(padding_layer, node_def, "pad");
}
}
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 1}, &output_tensor, node_def, "to_NHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
bool AllowInefficientTranspose() {
static bool result = [] {
bool value;
Status status =
ReadBoolFromEnvVar("TF_DEBUG_TRT_ALLOW_INEFFICIENT_TRANSPOSE",
false, &value);
if (!status.ok()) {
LOG(ERROR) << status;
}
return value;
}();
return result;
}
Status ConvertTranspose(const OpConverterParams* params) {
const auto& inputs = params->inputs;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"x", false}, {"perm", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
TRT_ShapedWeights weights = inputs.at(1).weights();
const int* weights_ptr = weights.GetPointer<int>();
std::vector<int> perm(weights_ptr, weights_ptr + weights.count());
ITensorProxyPtr input_tensor = inputs.at(0).tensor();
const int perm_size =
params->use_implicit_batch ? perm.size() - 1 : perm.size();
if (perm_size != size_t(input_tensor->getDimensions().nbDims)) {
return errors::InvalidArgument(
"Rank of perm for transpose does not match with that of the input.");
}
if (params->use_implicit_batch && perm[0] != 0) {
return errors::Unimplemented(
"Transpose at batch dimension is not supported.");
}
if (!IS_TRT_VERSION_GE(7, 1, 3, 4)) {
constexpr int64_t kMaxEfficientTranspose = 2500000;
int64_t tensor_size = DimsAdapter(input_tensor->getDimensions()).Volume();
if (!AllowInefficientTranspose() && tensor_size > kMaxEfficientTranspose) {
return errors::Unimplemented(StrCat("Transpose too large:", tensor_size));
}
}
if (params->validation_only) return OkStatus();
ITensorProxyPtr output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
input_tensor, perm, &output_tensor, params->node_def));
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertShape(const OpConverterParams* params) {
const auto& inputs = params->inputs;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", TrtInputArg::kBoth}}));
if (params->use_implicit_batch) {
return errors::Unimplemented(
"Shape is only supported for explicit batch mode.");
}
DimsAdapter input_dims(inputs.at(0).GetTrtDims());
if (params->validation_only) return OkStatus();
StatusOr<TRTNetworkBuilder> builder = TRTNetworkBuilder::Create(
params->converter->network(), params->weight_store);
TRT_ENSURE_OK(builder);
if (input_dims.IsStatic()) {
StatusOr<nvinfer1::IConstantLayer*> const_layer =
builder->ConstantShape(input_dims);
TRT_ENSURE_PTR_OK(const_layer);
params->outputs->push_back(
TRT_TensorOrWeights((*const_layer)->getOutput(0)));
return OkStatus();
}
StatusOr<nvinfer1::IShapeLayer*> shape_layer =
builder->Shape(inputs.at(0).tensor()->trt_tensor());
TRT_ENSURE_PTR_OK(shape_layer);
params->converter->SetLayerName(*shape_layer, params->node_def, "shape");
params->outputs->push_back(TRT_TensorOrWeights((*shape_layer)->getOutput(0)));
return OkStatus();
}
Status ExpectShapeTensor(const TRT_TensorOrWeights& tensor) {
if (tensor.tensor()->getType() != nvinfer1::DataType::kINT32) {
return errors::InvalidArgument("Expected a shape tensor with INT32 type");
}
if (tensor.GetTrtDims().nbDims > 1) {
return errors::InvalidArgument("Expected a 0D or 1D shape tensor");
}
return OkStatus();
}
Status ConvertDynamicReshape(const OpConverterParams* params) {
if (params->use_implicit_batch) {
return errors::InvalidArgument(
"The input \"shape\" for Reshape must be a constant in implicit batch"
" mode.");
}
if (!IS_TRT_VERSION_GE(7, 1, 3, 0)) {
return errors::InvalidArgument(
"Non constant shape input tensor for Reshape requires minimum TRT "
"7.1.3");
}
const auto& inputs = params->inputs;
const TRT_TensorOrWeights& input_tensor = inputs.at(0);
TF_RETURN_IF_ERROR(ExpectShapeTensor(inputs.at(1)));
if (inputs.at(1).tensor()->getDimensions().nbDims == 0) {
return errors::Unimplemented(
"Reshape with dynamic input requires 1D input tensor");
}
if (params->validation_only) return OkStatus();
nvinfer1::IShuffleLayer* layer = params->converter->network()->addShuffle(
*input_tensor.tensor()->trt_tensor());
VLOG(2) << "ConvertReshape setInput (1) "
<< DebugString(inputs.at(1).tensor()->getDimensions());
layer->setInput(1, *inputs.at(1).tensor()->trt_tensor());
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertStaticReshapeForExplicitBatchMode(
const OpConverterParams* params, DimsAdapter output_dims,
ITensorProxyPtr* output_tensor) {
return PrepareTensorForShape(params->converter, params->inputs.at(0),
output_dims, params->validation_only,
output_tensor, params->node_def);
}
Status ConvertStaticReshapeForImplicitBatchMode(
const OpConverterParams* params, DimsAdapter output_dims,
ITensorProxyPtr* output_tensor) {
const auto& inputs = params->inputs;
const TRT_TensorOrWeights& input_tensor = inputs.at(0);
const int input_batch_dim = input_tensor.batch_size();
const int64_t output_batch_dim = output_dims.dim(0);
DimsAdapter input_nonbatch_dims(input_tensor.GetTrtDims());
DimsAdapter output_nonbatch_dims(output_dims);
TF_RETURN_IF_ERROR(output_nonbatch_dims.RemoveBatchDimension());
VLOG(1) << "input_batch_dim=" << input_batch_dim
<< ", input_nonbatch_dims=" << input_nonbatch_dims.DebugString()
<< "\nresult_batch_dim=" << output_batch_dim
<< ", result_nonbatch_dims=" << output_nonbatch_dims.DebugString();
bool reshape_may_change_batch_dim = false;
if (input_batch_dim != -1 && output_batch_dim != -1) {
reshape_may_change_batch_dim = (input_batch_dim != output_batch_dim);
} else {
reshape_may_change_batch_dim =
!AreDimsStaticWithSameSize(input_nonbatch_dims, output_nonbatch_dims);
}
if (reshape_may_change_batch_dim) {
return errors::Unimplemented("Reshape on batch dimension is not supported");
}
return PrepareTensorForShape(params->converter, input_tensor,
output_nonbatch_dims, params->validation_only,
output_tensor, params->node_def);
}
Status ConvertReshape(const OpConverterParams* params) {
const auto& inputs = params->inputs;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"tensor", TrtInputArg::kTensor}, {"shape", TrtInputArg::kBoth}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
if (inputs.at(1).is_tensor()) {
return ConvertDynamicReshape(params);
}
TRT_ShapedWeights weights = inputs.at(1).weights();
if (weights.count() == 0 && params->use_implicit_batch) {
return errors::Unimplemented("Reshape to shape=[] is not supported");
}
DimsAdapter output_shape_dims(
absl::MakeSpan(weights.GetPointer<int>(), weights.count()));
ITensorProxyPtr output_tensor = nullptr;
if (!params->use_implicit_batch) {
TF_RETURN_IF_ERROR(ConvertStaticReshapeForExplicitBatchMode(
params, output_shape_dims, &output_tensor));
} else {
TF_RETURN_IF_ERROR(ConvertStaticReshapeForImplicitBatchMode(
params, output_shape_dims, &output_tensor));
}
if (params->validation_only) return OkStatus();
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertExpandDims(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"axis", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
const TRT_TensorOrWeights& input_tensor = inputs.at(0);
const nvinfer1::Dims dims = input_tensor.GetTrtDims();
std::vector<int> input_dims(dims.d, dims.d + dims.nbDims);
auto axis = inputs.at(1).weights().GetSpan<int>();
if (axis.size() != 1) {
return errors::InvalidArgument("ExpandDims axis must be a scalar");
}
int trt_axis;
TF_RETURN_IF_ERROR(ConvertAxis(axis[0], dims.nbDims + 1, node_def.name(),
params->use_implicit_batch, &trt_axis));
if (params->validation_only) return OkStatus();
ITensorProxyPtr output_tensor = nullptr;
if (!params->use_implicit_batch && !HasStaticShape(input_dims)) {
TF_RETURN_IF_ERROR(params->converter->DynamicExpandDims(
input_tensor.tensor(),
dims,
trt_axis,
params,
&output_tensor));
} else {
input_dims.insert(input_dims.begin() + trt_axis, 1);
DimsAdapter dims(input_dims);
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, input_tensor, dims,
false, &output_tensor, params->node_def));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status Converter::DynamicReshape(ITensorProxyPtr input,
std::vector<std::pair<int, int>> slices,
const OpConverterParams* params,
ITensorProxyPtr* output,
std::vector<int> size_for_added_dims,
std::optional<int> op_instance) {
*output = nullptr;
if (params->validation_only) {
return errors::Internal(
"DynamicReshape should not be used during validation");
}
ITensorProxyPtr shape =
network()->addShape(*input->trt_tensor())->getOutput(0);
std::vector<ITensorProxyPtr> concat_inputs;
int max_num_slices = std::max(slices.size(), size_for_added_dims.size());
int op_instance_value = op_instance.has_value() ? op_instance.value() : 0;
for (int i = 0; i < max_num_slices; i++) {
ITensorProxyPtr tensor;
if (i < size_for_added_dims.size() && size_for_added_dims[i] >= 0) {
nvinfer1::Dims dims{1, {1}};
if (size_for_added_dims[i] > 0) {
dims.d[0] = size_for_added_dims[i];
}
TF_RETURN_IF_ERROR(
CreateScalarConstant(params, std::min(size_for_added_dims[i], 1),
&tensor, nvinfer1::DataType::kINT32, dims));
concat_inputs.push_back(tensor);
}
if (i < slices.size()) {
nvinfer1::ISliceLayer* slice_layer = network()->addSlice(
*shape->trt_tensor(), {1, {slices[i].first}},
{1, {slices[i].second - slices[i].first}}, {1, {1}});
concat_inputs.push_back(slice_layer->getOutput(0));
string slice_name = StrCat("slice_", op_instance_value);
SetLayerName(slice_layer, params->node_def, slice_name,
i);
}
}
std::vector<nvinfer1::ITensor*> trt_concat_inputs;
for (const auto& t : concat_inputs) {
trt_concat_inputs.push_back(t->trt_tensor());
}
nvinfer1::IConcatenationLayer* concat_layer = network()->addConcatenation(
static_cast<nvinfer1::ITensor* const*>(trt_concat_inputs.data()),
concat_inputs.size());
SetLayerName(concat_layer, params->node_def, "concat", op_instance);
concat_layer->setAxis(0);
ITensorProxyPtr new_shape = concat_layer->getOutput(0);
nvinfer1::IShuffleLayer* shuffle =
network()->addShuffle(*input->trt_tensor());
SetLayerName(shuffle, params->node_def, "shuffle", op_instance);
shuffle->setInput(1, *new_shape->trt_tensor());
*output = shuffle->getOutput(0);
return OkStatus();
}
Status Converter::DynamicExpandDims(ITensorProxyPtr input,
const nvinfer1::Dims& dims, int axis,
const OpConverterParams* params,
ITensorProxyPtr* output,
std::optional<int> op_instance) {
if (params->validation_only) {
*output = nullptr;
return errors::Internal(
"DynamicExpandDims should not be used during validation");
}
std::vector<std::pair<int, int>> slices;
std::vector<int> extra_dims;
if (axis != 0) {
slices.push_back(std::pair<int, int>{0, axis});
extra_dims.push_back(-1);
}
extra_dims.push_back(1);
if (axis != dims.nbDims) {
slices.push_back(std::pair<int, int>{axis, dims.nbDims});
}
return DynamicReshape(
input,
slices,
params,
output,
extra_dims,
op_instance);
}
Status Converter::SqueezeTensor(ITensorProxyPtr input,
std::vector<int>* input_dims,
const OpConverterParams* params,
ITensorProxyPtr* output,
std::optional<int> op_instance) {
if (!params->use_implicit_batch && !HasStaticShape(*input_dims)) {
std::vector<std::pair<int, int>> slices;
for (int i = 0; i < input_dims->size(); i++) {
if (input_dims->at(i) != 0) {
slices.push_back(std::pair<int, int>(i, i + 1));
}
}
return DynamicReshape(
input,
slices,
params,
output,
{},
op_instance);
}
input_dims->erase(std::remove(input_dims->begin(), input_dims->end(), 0),
input_dims->end());
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, TRT_TensorOrWeights(input), DimsAdapter(*input_dims),
false, output, params->node_def, op_instance));
return OkStatus();
}
Status ConvertSqueeze(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
const TRT_TensorOrWeights& input_tensor = inputs.at(0);
const nvinfer1::Dims dims = input_tensor.GetTrtDims();
std::vector<int> input_dims(dims.d, dims.d + dims.nbDims);
std::vector<int64_t> squeeze_dims;
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "squeeze_dims", &squeeze_dims));
if (squeeze_dims.empty()) {
if (params->use_implicit_batch || !HasStaticShape(dims)) {
return errors::Unimplemented(
"Squeeze is not implemented for empty squeeze_dims");
} else {
for (int& dim : input_dims) {
if (dim == 1) {
dim = 0;
}
}
}
} else {
std::vector<int> trt_axes;
trt_axes.reserve(squeeze_dims.size());
for (int tf_axis : squeeze_dims) {
int trt_axis;
TF_RETURN_IF_ERROR(ConvertAxis(tf_axis, dims.nbDims, node_def.name(),
params->use_implicit_batch, &trt_axis));
if (input_dims[trt_axis] != -1 && input_dims[trt_axis] != 1) {
return errors::InvalidArgument(
"Dimension ", tf_axis, " with size ", input_dims[trt_axis],
" cannot be squeezed because it must be size 1");
}
trt_axes.push_back(trt_axis);
}
for (int axis : trt_axes) {
input_dims[axis] = 0;
}
}
if (params->validation_only) return OkStatus();
ITensorProxyPtr output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->SqueezeTensor(
input_tensor.tensor(),
&input_dims,
params,
&output_tensor));
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertSlice(const OpConverterParams* params) {
const auto& inputs = params->inputs;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params, {{"input", false}, {"begin", true}, {"size", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
const TRT_ShapedWeights& begin_weights = inputs.at(1).weights();
const TRT_ShapedWeights& size_weights = inputs.at(2).weights();
if (absl::c_any_of(begin_weights.GetSpan<int32>(),
[](const int32 val) { return val < 0; })) {
return errors::InvalidArgument("\"begin\" in Slice is out of range");
}
if (absl::c_any_of(size_weights.GetSpan<int32>(),
[](const int32 val) { return val < -1; })) {
return errors::InvalidArgument("\"size\" in Slice is out of range");
}
PartialTensorShape input_shape;
TF_RETURN_IF_ERROR(
DimsAdapter(inputs.at(0).GetTrtDims())
.PartialTensorShape(
&input_shape, params->use_implicit_batch
? std::optional<int>(inputs.at(0).batch_size())
: std::nullopt));
if (static_cast<int64>(input_shape.dims()) !=
begin_weights.GetTensor().NumElements() ||
static_cast<int64>(input_shape.dims()) !=
size_weights.GetTensor().NumElements()) {
return errors::InvalidArgument(
"Length of begin and size arguments must equal rank of input for "
"Slice");
}
if (params->use_implicit_batch) {
auto begin_v = begin_weights.GetSpan<int32>();
auto size_v = size_weights.GetSpan<int32>();
if (begin_v[0] != 0 ||
(size_v[0] != -1 && size_v[0] != input_shape.dim_size(0))) {
return errors::Unimplemented(
"TensorRT does not allow modifications to the batch dimension in "
"implicit batch mode");
}
}
PartialTensorShape processing_shape;
PartialTensorShape final_shape;
bool is_identity;
bool is_simple_slice;
bool slice_dim0;
absl::InlinedVector<int64, 4> begin;
absl::InlinedVector<int64, 4> end;
absl::InlinedVector<int64, 4> strides;
StridedSliceShapeSpec strided_slice_spec;
std::bitset<32> begin_mask(0);
std::bitset<32> end_mask(0);
std::bitset<32> ellipsis_mask(0);
std::bitset<32> new_axis_mask(0);
std::bitset<32> shrink_axis_mask(0);
Tensor strides_tensor = tensor::DeepCopy(begin_weights.GetTensor());
Tensor end_tensor = tensor::DeepCopy(size_weights.GetTensor());
Tensor size_tensor = tensor::DeepCopy(size_weights.GetTensor());
auto strides_vec = strides_tensor.flat<int32>();
auto end_vec = end_tensor.flat<int32>();
auto size_vec = size_tensor.flat<int32>();
auto begin_vec = begin_weights.GetTensor().flat<int32>();
for (int i = 0; i < input_shape.dims(); i++) {
strides_vec(i) = 1;
begin_mask[i] = false;
if (size_vec(i) == -1) {
end_mask[i] = true;
end_vec(i) = 0;
size_vec(i) = 0;
} else {
end_mask[i] = false;
end_vec(i) = begin_vec(i) + size_vec(i);
if (end_vec(i) > input_shape.dim_size(i) && input_shape.dim_size(i) > 0) {
return errors::InvalidArgument("\"begin\" + \"size\" for dimension ", i,
" in Slice is out of range");
}
}
}
auto bitset_to_int32 = [](const std::bitset<32>& bs) {
return static_cast<int32_t>(bs.to_ulong());
};
TF_RETURN_IF_ERROR(ValidateStridedSliceOp(
&begin_weights.GetTensor(), &end_tensor, strides_tensor, input_shape,
bitset_to_int32(begin_mask), bitset_to_int32(end_mask),
bitset_to_int32(ellipsis_mask), bitset_to_int32(new_axis_mask),
bitset_to_int32(shrink_axis_mask), &processing_shape, &final_shape,
&is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides,
&strided_slice_spec));
VLOG(2) << "ConvertSlice: " << "\n input_shape: " << input_shape
<< "\n processing_shape: " << processing_shape
<< "\n final_shape: " << final_shape
<< "\n begin: " << DebugString(begin)
<< "\n stride: " << DebugString(strides)
<< "\n end: " << DebugString(end)
<< "\n is identity: " << is_identity
<< "\n is simple_slice: " << is_simple_slice
<< "\n slice dim0: " << slice_dim0
<< " StridedSliceShapeSpec:" << "\n begin_dense_mask: "
<< std::bitset<32>(strided_slice_spec.begin_dense_mask)
<< "\n end_dense_mask: "
<< std::bitset<32>(strided_slice_spec.end_dense_mask)
<< "\n shrink_dense_mask: "
<< std::bitset<32>(strided_slice_spec.shrink_axis_dense_mask);
return ConvertStridedSliceHelper(params, inputs.at(0), input_shape, begin,
strides, end, std::nullopt, std::nullopt,
strided_slice_spec);
}
Status ConvertStridedSlice(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input", false}, {"begin", true}, {"end", true}, {"strides", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
int32 begin_mask, end_mask, ellipsis_mask, shrink_axis_mask, new_axis_mask;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "begin_mask", &begin_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "end_mask", &end_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ellipsis_mask", &ellipsis_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "shrink_axis_mask", &shrink_axis_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "new_axis_mask", &new_axis_mask));
if (new_axis_mask != 0) {
return errors::Unimplemented(
"new_axis_mask is not supported for StridedSlice");
}
if (params->use_implicit_batch && shrink_axis_mask & 1) {
return errors::Unimplemented(
"TensorRT does not allow modifications to the batch dimension");
}
PartialTensorShape input_shape;
TF_RETURN_IF_ERROR(
DimsAdapter(inputs.at(0).GetTrtDims())
.PartialTensorShape(
&input_shape, params->use_implicit_batch
? std::optional<int>(inputs.at(0).batch_size())
: std::nullopt));
const TRT_ShapedWeights& begin_weights = inputs.at(1).weights();
const TRT_ShapedWeights& end_weights = inputs.at(2).weights();
const TRT_ShapedWeights& stride_weights = inputs.at(3).weights();
if (!AllLengthsEqual({begin_weights.ToVector<int>(),
end_weights.ToVector<int>(),
stride_weights.ToVector<int>()})) {
return errors::InvalidArgument(
"Length of begin, end, and stride must be equal");
}
PartialTensorShape processing_shape;
PartialTensorShape final_shape;
bool is_identity;
bool is_simple_slice;
bool slice_dim0;
absl::InlinedVector<int64, 4> begin;
absl::InlinedVector<int64, 4> end;
absl::InlinedVector<int64, 4> strides;
StridedSliceShapeSpec strided_slice_spec;
TF_RETURN_IF_ERROR(ValidateStridedSliceOp(
&begin_weights.GetTensor(), &end_weights.GetTensor(),
stride_weights.GetTensor(), input_shape, begin_mask, end_mask,
ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape,
&final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end,
&strides, &strided_slice_spec));
if (!params->validation_only) {
VLOG(2) << "After ValidateStridedSliceOp:" << "\n input_shape: "
<< input_shape << "\n processing_shape: " << processing_shape
<< "\n final_shape: " << final_shape
<< "\n begin: " << DebugString(begin)
<< "\n stride: " << DebugString(strides)
<< "\n end: " << DebugString(end)
<< " is identity: " << is_identity
<< "\n is simple_slice: " << is_simple_slice
<< "\n slice dim0: " << slice_dim0
<< " StridedSliceShapeSpec:" << "\n begin_dense_mask: "
<< std::bitset<32>(strided_slice_spec.begin_dense_mask)
<< "\n end_dense_mask: "
<< std::bitset<32>(strided_slice_spec.end_dense_mask)
<< "\n shrink_dense_mask: "
<< std::bitset<32>(strided_slice_spec.shrink_axis_dense_mask);
}
if (params->use_implicit_batch &&
!((ellipsis_mask & 1) &&
begin_weights.Shape().NumDims() < input_shape.dims())) {
const bool begin_is_modified = !(begin_mask & 1) && (begin[0] != 0);
const bool stride_is_modified = (strides[0] != 1);
const bool batch_size_is_defined = (input_shape.dim_size(0) > 0);
const bool end_is_modified =
!(end_mask & 1) &&
(!batch_size_is_defined || (end[0] != input_shape.dim_size(0)));
if (begin_is_modified || stride_is_modified || end_is_modified) {
return errors::Unimplemented(
"TensorRT does not allow modifications to the batch dimension");
}
}
std::optional<nvinfer1::Dims> final_shape_dims = std::nullopt;
if (shrink_axis_mask) {
final_shape_dims.emplace();
auto dims_adap =
DimsAdapter::Create(final_shape, params->use_implicit_batch);
TRT_ENSURE_OK(dims_adap);
*final_shape_dims = dims_adap->AsTrtDims();
}
return ConvertStridedSliceHelper(params, inputs.at(0), input_shape, begin,
strides, end, final_shape_dims, 0,
strided_slice_spec);
}
Status ConvertConv2D(const OpConverterParams* params) {
return ConvertConv2DHelper(params, 1, false);
}
Status ConvertConv2DDepthwise(const OpConverterParams* params) {
return ConvertConv2DHelper(params, 0, false);
}
Status ConvertConv2DBackpropInput(const OpConverterParams* params) {
return ConvertConv2DHelper(params, 1, true);
}
Status ConvertConv3DHelper(const OpConverterParams* params, int group,
bool is_conv3d_backprop_input = false) {
const int kNumDims = 5;
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TRT_TensorOrWeights backprop_output_size;
ITensorProxyPtr tensor = nullptr;
if (is_conv3d_backprop_input) {
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input_sizes", true}, {"filter", true}, {"out_backprop", false}}));
backprop_output_size = inputs.at(0);
tensor = inputs.at(2).tensor();
} else {
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"filter", true}}));
tensor = inputs.at(0).tensor();
}
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
const TRT_ShapedWeights weights_drsck = inputs.at(1).weights();
if (weights_drsck.Shape().NumDims() != kNumDims) {
return errors::InvalidArgument("Conv3D expects kernel of dimension 5");
}
string data_format, padding_type;
std::vector<int64_t> tf_dilations, tf_stride;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "dilations", &tf_dilations));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
const bool is_ndhwc = (data_format == "NDHWC");
const int d_index = is_ndhwc ? 1 : 2;
const int h_index = is_ndhwc ? 2 : 3;
const int w_index = is_ndhwc ? 3 : 4;
const int c_index = is_ndhwc ? 4 : 1;
if (tf_dilations.size() != kNumDims) {
return errors::InvalidArgument(
"Convolution dilations field must specify 5 dimensions");
}
if (tf_dilations[0] != 1 || tf_dilations[c_index] != 1) {
return errors::Unimplemented(
"Dilation rate must be 1 for batch and channel dimensions");
}
const nvinfer1::Dims3 dilation_dhw(
tf_dilations[d_index], tf_dilations[h_index], tf_dilations[w_index]);
if (is_conv3d_backprop_input &&
(dilation_dhw.d[0] != 1 || dilation_dhw.d[1] != 1 ||
dilation_dhw.d[2] != 1)) {
return errors::Unimplemented(
"Dilation with Conv3DBackpropInputV2 (conv3d_transpose) is not "
"supported");
}
if (tf_stride.size() != kNumDims) {
return errors::InvalidArgument(
"Convolution strides field must specify 5 dimensions");
}
if (tf_stride[0] != 1 || tf_stride[c_index] != 1) {
return errors::Unimplemented(
"Stride must be 1 for batch and channel dimensions");
}
const nvinfer1::Dims3 stride_dhw(tf_stride[d_index], tf_stride[h_index],
tf_stride[w_index]);
const auto tensor_dim = tensor->getDimensions();
if (is_conv3d_backprop_input && padding_type == "SAME") {
StatusOr<TRT_ShapedWeights> weights =
params->weight_store->GetTempWeights(weights_drsck);
TRT_ENSURE_OK(weights);
nvinfer1::Dims3 effective_kernel_size(
weights->Shape().dim(0) +
(weights->Shape().dim(0) - 1) * (dilation_dhw.d[0] - 1),
weights->Shape().dim(1) +
(weights->Shape().dim(1) - 1) * (dilation_dhw.d[1] - 1),
weights->Shape().dim(2) +
(weights->Shape().dim(2) - 1) * (dilation_dhw.d[2] - 1)
);
const auto output_size_weights =
backprop_output_size.weights().GetPointer<int>();
const std::vector<int64_t> input_dims = {output_size_weights[d_index],
output_size_weights[h_index],
output_size_weights[w_index]};
const std::vector<std::pair<int, int>> padding =
CreateSamePadding(stride_dhw, effective_kernel_size, input_dims);
if (padding[0].first != padding[0].second ||
padding[1].first != padding[1].second ||
padding[2].first != padding[2].second) {
return errors::Unimplemented(
"Asymmetric padding with Conv3DBackpropInputV2 (conv3d_transpose) is "
"not supported");
}
}
int implicit_batch_offset = params->use_implicit_batch ? -1 : 0;
if (tensor->getDimensions().d[c_index + implicit_batch_offset] == -1) {
return errors::InvalidArgument("Channel dimension must be static");
}
if (params->validation_only) return OkStatus();
const bool need_transpose = is_ndhwc;
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 4, 1, 2, 3}, &tensor, node_def, "to_NCDHW"));
}
const int num_groups = (group == 0) ? tensor_dim.d[0] : group;
StatusOr<TRT_ShapedWeights> weights =
params->weight_store->GetTempWeights(weights_drsck);
TRT_ENSURE_OK(weights);
ReorderDRSCKToKCDRS(weights_drsck, &*weights, num_groups);
TRT_ShapedWeights biases(weights->TrtDType());
const int output_axis = is_conv3d_backprop_input ? 1 : 0;
const int noutput = weights->Shape().dim(output_axis) * num_groups;
nvinfer1::Dims3 kernel_size_drs(weights->Shape().dim(2),
weights->Shape().dim(3),
weights->Shape().dim(4)
);
nvinfer1::ILayer* conv_layer = nullptr;
if (is_conv3d_backprop_input) {
nvinfer1::IDeconvolutionLayer* layer =
params->converter->network()->addDeconvolutionNd(
*tensor->trt_tensor(), noutput, kernel_size_drs,
weights->GetTrtWeights(), biases.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStrideNd(stride_dhw);
if (padding_type == "SAME") {
VLOG(2) << "Using SAME padding";
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
layer->setNbGroups(num_groups);
conv_layer = layer;
} else {
nvinfer1::IConvolutionLayer* layer =
params->converter->network()->addConvolutionNd(
*tensor->trt_tensor(), noutput, kernel_size_drs,
weights->GetTrtWeights(), biases.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStrideNd(stride_dhw);
if (padding_type == "SAME") {
VLOG(2) << "Using SAME padding";
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
layer->setNbGroups(num_groups);
layer->setDilationNd(dilation_dhw);
conv_layer = layer;
}
params->converter->SetLayerName(conv_layer, node_def, "conv");
ITensorProxyPtr output_tensor = conv_layer->getOutput(0);
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 4, 1}, &output_tensor, node_def, "to_NDHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertConv3D(const OpConverterParams* params) {
return ConvertConv3DHelper(params, 1, false);
}
Status ConvertConv3DBackpropInputV2(const OpConverterParams* params) {
return ConvertConv3DHelper(params, 1, true);
}
Status ConvertPool3D(const OpConverterParams* params) {
const int kNumDims = 5;
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
nvinfer1::PoolingType type;
if (node_def.op() == "MaxPool3D") {
type = nvinfer1::PoolingType::kMAX;
} else if (node_def.op() == "AvgPool3D") {
type = nvinfer1::PoolingType::kAVERAGE;
} else {
return errors::Unimplemented("Unsupported pooling type: ", node_def.op());
}
string data_format, padding_type;
std::vector<int64_t> tf_stride, tf_kernel;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &tf_kernel));
if ((padding_type != "SAME") && (padding_type != "VALID")) {
return errors::Unimplemented("Unsupported padding type: ", padding_type);
}
const bool is_ndhwc = (data_format == "NDHWC");
const int c_index = is_ndhwc ? 4 : 1;
const int d_index = is_ndhwc ? 1 : 2;
const int h_index = is_ndhwc ? 2 : 3;
const int w_index = is_ndhwc ? 3 : 4;
if (tf_stride.size() != kNumDims) {
return errors::InvalidArgument(
"Pooling strides field must specify 5 dimensions");
}
if (tf_stride[0] != 1 || tf_stride[c_index] != 1) {
return errors::Unimplemented(
"stride must be 1 for batch and channel dimensions");
}
if (tf_kernel.size() != kNumDims) {
return errors::InvalidArgument(
"Pooling ksize field must specify 5 dimensions");
}
if (tf_kernel[0] != 1 || tf_kernel[c_index] != 1) {
return errors::Unimplemented(
"ksize must be 1 for batch and channel dimensions");
}
const nvinfer1::Dims3 stride(tf_stride[d_index], tf_stride[h_index],
tf_stride[w_index]);
const nvinfer1::Dims3 ksize(tf_kernel[d_index], tf_kernel[h_index],
tf_kernel[w_index]);
if (!(ksize.nbDims >= 3 &&
(ksize.d[0] >= 1 && ksize.d[1] >= 1 && ksize.d[2] >= 1) &&
(ksize.d[0] * ksize.d[1] * ksize.d[2] < MAX_KERNEL_DIMS_PRODUCT(3)))) {
return errors::InvalidArgument("Window dimensions are not within bounds");
}
if (params->validation_only) return OkStatus();
ITensorProxyPtr tensor = inputs.at(0).tensor();
if (data_format == "NDHWC") {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 4, 1, 2, 3}, &tensor, node_def, "to_NCDHW"));
}
nvinfer1::IPoolingLayer* layer = params->converter->network()->addPoolingNd(
*tensor->trt_tensor(), type, ksize);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStrideNd(stride);
if (padding_type == "SAME") {
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
params->converter->SetLayerName(layer, node_def, "pooling");
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (data_format == "NDHWC") {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 4, 1}, &output_tensor, node_def, "to_NDHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertFusedConv2DBiasActivation(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false},
{"filter", true},
{"bias", true},
{"side_input", true},
{"conv_input_scale", true},
{"side_input_scale", true}}));
ITensorProxyPtr tensor = inputs.at(0).tensor();
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
TRT_ShapedWeights weights = inputs.at(1).weights();
if (weights.Shape().NumDims() != 4) {
return errors::InvalidArgument(
"FusedConv2DBiasActivation expects kernel of dimension 4");
}
string data_format, filter_format, activation_mode, padding_type;
std::vector<int64_t> tf_dilations, tf_stride;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "filter_format", &filter_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "activation_mode", &activation_mode));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "dilations", &tf_dilations));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
if (data_format != "NHWC" && data_format != "NCHW") {
return errors::InvalidArgument("Unsupported data_format:", data_format);
}
int c_index = (data_format == "NHWC") ? 3 : 1;
int h_index = (data_format == "NHWC") ? 1 : 2;
int w_index = (data_format == "NHWC") ? 2 : 3;
if (tf_dilations.size() != 4) {
return errors::InvalidArgument(
"Convolution dilations field must specify 4 dimensions");
}
if (tf_dilations[0] != 1 || tf_dilations[c_index] != 1) {
return errors::Unimplemented(
"Dilation rate must be 1 for batch and channel dimensions");
}
const nvinfer1::DimsHW dilation(tf_dilations[h_index], tf_dilations[w_index]);
if (tf_stride.size() != 4) {
return errors::InvalidArgument(
"Convolution strides field must specify 4 dimensions");
}
if (tf_stride[0] != 1 || tf_stride[c_index] != 1) {
return errors::Unimplemented(
"Stride must be 1 for batch and channel dimensions");
}
const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
auto op_pair = ActivationTypeMap()->find(activation_mode);
if (op_pair == ActivationTypeMap()->end() && activation_mode != "None") {
return errors::Unimplemented("Activation mode not supported: ",
activation_mode);
}
if (filter_format != "HWIO" && filter_format != "OIHW") {
return errors::InvalidArgument("Unsupported filter_format:", filter_format);
}
TRT_ShapedWeights side_input = inputs.at(3).weights();
if (side_input.count() != 0) {
return errors::InvalidArgument(
"FusedConv2DBiasActivation doesn't yet support side_input");
}
TRT_ShapedWeights conv_input_scale = inputs.at(4).weights();
if (conv_input_scale.count() != 1 ||
conv_input_scale.TrtDType() != nvinfer1::DataType::kFLOAT ||
conv_input_scale.GetSpan<float>()[0] != 1.0) {
return errors::InvalidArgument(
"FusedConv2DBiasActivation doesn't yet support conv_input_scale");
}
if (params->validation_only) return OkStatus();
const bool need_transpose = (data_format == "NHWC");
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 3, 1, 2}, &tensor, node_def, "to_NCHW"));
}
nvinfer1::DimsHW kernel_size;
if (filter_format == "OIHW") {
kernel_size.h() = weights.Shape().dim(2);
kernel_size.w() = weights.Shape().dim(3);
} else {
DCHECK_EQ(filter_format, "HWIO");
kernel_size.h() = weights.Shape().dim(0);
kernel_size.w() = weights.Shape().dim(1);
}
TRT_ShapedWeights biases = inputs.at(2).weights();
nvinfer1::IConvolutionLayer* conv_layer = nullptr;
if (filter_format == "OIHW") {
conv_layer = params->converter->network()->addConvolution(
*tensor->trt_tensor(), weights.Shape().dim(0), kernel_size,
weights.GetTrtWeights(), biases.GetTrtWeights());
} else {
TRT_ENSURE(filter_format == "HWIO");
StatusOr<TRT_ShapedWeights> weights_kcrs =
params->weight_store->GetTempWeights(weights);
TRT_ENSURE_OK(weights_kcrs);
ReorderRSCKToKCRS(weights, &*weights_kcrs, 1);
conv_layer = params->converter->network()->addConvolution(
*tensor->trt_tensor(), weights.Shape().dim(3), kernel_size,
weights_kcrs->GetTrtWeights(), biases.GetTrtWeights());
}
TFTRT_RETURN_ERROR_IF_NULLPTR(conv_layer, node_def.name());
conv_layer->setStride(stride);
if (padding_type == "SAME") {
conv_layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
params->converter->SetLayerName(conv_layer, node_def, "conv");
conv_layer->setNbGroups(1);
conv_layer->setDilation(dilation);
ITensorProxyPtr output_tensor = conv_layer->getOutput(0);
if (op_pair != ActivationTypeMap()->end()) {
nvinfer1::IActivationLayer* activation_layer =
params->converter->network()->addActivation(
*output_tensor->trt_tensor(), op_pair->second);
TFTRT_RETURN_ERROR_IF_NULLPTR(activation_layer, node_def.name());
params->converter->SetLayerName(activation_layer, node_def, "activation");
output_tensor = activation_layer->getOutput(0);
}
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 1}, &output_tensor, node_def, "to_NHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertPool(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false}}));
std::set<DataType> allowed_types{DataType::DT_FLOAT, DataType::DT_HALF,
DataType::DT_INT8};
TF_RETURN_IF_ERROR(AllowDataTypes(*params, allowed_types));
nvinfer1::PoolingType type;
if (node_def.op() == "MaxPool") {
type = nvinfer1::PoolingType::kMAX;
} else if (node_def.op() == "AvgPool") {
type = nvinfer1::PoolingType::kAVERAGE;
} else {
return errors::Unimplemented("Unsupported pooling type: ", node_def.op());
}
string data_format, padding_type;
std::vector<int64_t> tf_stride, tf_kernel;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &tf_kernel));
if ((padding_type != "SAME") && (padding_type != "VALID")) {
return errors::Unimplemented("Unsupported padding type: ", padding_type);
}
ITensorProxyPtr tensor = inputs.at(0).tensor();
int h_index = 2;
int w_index = 3;
if (data_format == "NHWC") {
h_index = 1;
w_index = 2;
}
const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
const nvinfer1::DimsHW ksize(tf_kernel[h_index], tf_kernel[w_index]);
if (!((ksize.h() >= 1 && ksize.w() >= 1) &&
(ksize.h() * ksize.w() < MAX_KERNEL_DIMS_PRODUCT(2)))) {
return errors::InvalidArgument("Window dimensions are not within bounds");
}
if (params->validation_only) return OkStatus();
if (data_format == "NHWC") {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 3, 1, 2}, &tensor, node_def, "to_NCHW"));
}
nvinfer1::IPoolingLayer* layer = params->converter->network()->addPooling(
*tensor->trt_tensor(), type, ksize);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
if (padding_type == "SAME") {
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
params->converter->SetLayerName(layer, node_def, "pooling");
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (data_format == "NHWC") {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 1}, &output_tensor, node_def, "to_NHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertClipByValue(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"t", false}, {"clip_value_min", true}, {"clip_value_max", true}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
if (params->validation_only) return OkStatus();
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "T", &dtype));
float clip_value_min = 0.0f;
float clip_value_max = 0.0f;
if (dtype == DataType::DT_FLOAT) {
clip_value_min = inputs.at(1).weights().GetSpan<float>()[0];
clip_value_max = inputs.at(2).weights().GetSpan<float>()[0];
} else if (dtype == DataType::DT_HALF) {
clip_value_min =
static_cast<float>(inputs.at(1).weights().GetSpan<Eigen::half>()[0]);
clip_value_max =
static_cast<float>(inputs.at(2).weights().GetSpan<Eigen::half>()[0]);
}
nvinfer1::IActivationLayer* layer =
params->converter->network()->addActivation(
*inputs.at(0).tensor()->trt_tensor(),
nvinfer1::ActivationType::kCLIP);
layer->setAlpha(clip_value_min);
layer->setBeta(clip_value_max);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, "activation");
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertBiasAdd(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TFTRT_CHECK_INPUT_SIZE(inputs.size(), 2, node_def);
if (inputs[0].is_weights() && inputs[1].is_weights()) {
return errors::InvalidArgument(
"All inputs are weights, but Grappler is expected to fold them.");
}
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
string data_format;
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "data_format", &data_format));
nvinfer1::Dims input_shape = inputs.at(0).GetTrtDims();
nvinfer1::Dims bias_shape = inputs.at(1).GetTrtDims();
if (data_format == "NCHW") {
if (params->use_implicit_batch) {
bias_shape.nbDims = input_shape.nbDims;
std::fill(bias_shape.d + 1, bias_shape.d + bias_shape.nbDims, 1);
} else {
std::vector<int> bias_shape_vec(bias_shape.d,
bias_shape.d + bias_shape.nbDims);
bias_shape_vec.insert(bias_shape_vec.begin(), 1);
bias_shape_vec.insert(bias_shape_vec.end(),
input_shape.nbDims - bias_shape_vec.size(), 1);
DimsAdapter(bias_shape_vec).TrtDims(&bias_shape);
}
} else {
TF_RETURN_IF_ERROR(GetTrtBroadcastShape(inputs.at(0), inputs.at(1),
true,
params->use_implicit_batch,
&input_shape, &bias_shape));
}
ITensorProxyPtr input_tensor{nullptr};
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, inputs.at(0), DimsAdapter(input_shape),
params->validation_only, &input_tensor, node_def,
0));
ITensorProxyPtr bias_tensor{nullptr};
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, inputs.at(1), DimsAdapter(bias_shape),
params->validation_only, &bias_tensor, node_def,
1));
VLOG(2) << "Bias shape adjusted to " << DebugString(bias_shape);
if (params->validation_only) return OkStatus();
nvinfer1::IElementWiseLayer* layer =
params->converter->network()->addElementWise(
*input_tensor->trt_tensor(), *bias_tensor->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUM);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, "sum");
ITensorProxyPtr output_tensor = layer->getOutput(0);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
template <typename Input>
inline bool IsIntegerInInt32Bounds(const Input& inp) {
static_assert(std::is_integral<Input>::value,
"This function is only implemented for integral types.");
if (sizeof(Input) < sizeof(int32) || std::is_same<Input, int32>::value) {
return true;
}
if (!std::numeric_limits<Input>::is_signed) {
return inp <= static_cast<Input>(std::numeric_limits<int32>::max());
}
return (inp >= static_cast<Input>(std::numeric_limits<int32>::lowest()) &&
inp <= static_cast<Input>(std::numeric_limits<int32>::max()));
}
template <DataType dtype>
Status CopyToTrtInt32Array(const Tensor& tensor, int32* dst) {
typedef typename EnumToDataType<dtype>::Type CType;
const CType* src = tensor.flat<CType>().data();
for (int i = 0; i < tensor.NumElements(); ++i) {
if (!IsIntegerInInt32Bounds(src[i])) {
return errors::InvalidArgument("Value at index ", i,
" is outside the range of int32");
}
dst[i] = static_cast<int32>(src[i]);
}
return OkStatus();
}
Status TfTensorToTrtWeights(const Tensor& tensor, TrtWeightStore* weight_store,
TRT_ShapedWeights* weights) {
const DataType dtype = tensor.dtype();
DataType converted_dtype = DataTypeIsInteger(dtype) ? DT_INT32 : dtype;
nvinfer1::DataType trt_dtype;
TF_RETURN_IF_ERROR(TfTypeToTrtType(converted_dtype, &trt_dtype));
if (tensor.NumElements() == 0) {
*weights = TRT_ShapedWeights(trt_dtype);
return OkStatus();
}
StatusOr<DimsAdapter> weight_dims = DimsAdapter::Create(tensor.shape());
TRT_ENSURE_OK(weight_dims);
auto tmp = weight_store->GetTempWeights(trt_dtype, weight_dims->AsTrtDims());
TRT_ENSURE_OK(tmp);
*weights = std::move(tmp).value();
if (converted_dtype == dtype) {
std::copy_n(tensor.tensor_data().data(), tensor.TotalBytes(),
weights->GetPointer<int8>());
return OkStatus();
}
Status status = OkStatus();
int32* dst = weights->GetPointer<int32>();
switch (dtype) {
case DT_INT8:
status = CopyToTrtInt32Array<DT_INT8>(tensor, dst);
break;
case DT_UINT8:
status = CopyToTrtInt32Array<DT_UINT8>(tensor, dst);
break;
case DT_INT16:
status = CopyToTrtInt32Array<DT_INT16>(tensor, dst);
break;
case DT_UINT16:
status = CopyToTrtInt32Array<DT_UINT16>(tensor, dst);
break;
case DT_UINT32:
status = CopyToTrtInt32Array<DT_UINT32>(tensor, dst);
break;
case DT_INT64:
status = CopyToTrtInt32Array<DT_INT64>(tensor, dst);
break;
case DT_UINT64:
status = CopyToTrtInt32Array<DT_UINT64>(tensor, dst);
break;
default:
return errors::Internal("Unexpected DataType: ", DataTypeString(dtype));
}
return status;
}
Status ConvertConst(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (!inputs.empty()) {
return errors::InvalidArgument(
"Constant node is expected to have empty input list");
}
const auto& tensor_proto = node_def.attr().at("value").tensor();
Tensor tensor;
if (!tensor.FromProto(tensor_proto)) {
return errors::Internal("Cannot parse weight tensor proto: ",
node_def.name());
}
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "dtype", &dtype));
if (dtype != tensor.dtype()) {
return errors::InvalidArgument("DataType mismatch between attr (",
DataTypeString(dtype), ") and tensor (",
DataTypeString(tensor.dtype()), ")");
}
TRT_ShapedWeights weights;
TF_RETURN_IF_ERROR(
TfTensorToTrtWeights(tensor, params->weight_store, &weights));
if (params->outputs != nullptr) {
params->outputs->push_back(TRT_TensorOrWeights(weights));
}
return OkStatus();
}
Status ConvertIdentity(const OpConverterParams* params) {
if (params->validation_only) return OkStatus();
for (int i = 0; i < params->inputs.size(); i++) {
params->outputs->push_back(params->inputs.at(i));
}
return OkStatus();
}
Status ConvertFake(const OpConverterParams* params) {
if (params->validation_only) return OkStatus();
return errors::Unimplemented(
"This converter is not valid after graph "
"segmentation. Building an engine using this "
"converter will trigger a native segment "
"fallback.");
}
Status ConvertSquare(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"x", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
if (params->validation_only) return OkStatus();
ITensorProxyPtr const2_tensor = nullptr;
TF_RETURN_IF_ERROR(CreateBroadcastableScalarConstant(
params, 2.0f, inputs.at(0).GetTrtDims(), &const2_tensor));
nvinfer1::IElementWiseLayer* layer =
params->converter->network()->addElementWise(
*inputs.at(0).tensor()->trt_tensor(), *const2_tensor->trt_tensor(),
nvinfer1::ElementWiseOperation::kPOW);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
ITensorProxyPtr output_tensor = layer->getOutput(0);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertReduce(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"axis", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
ITensorProxyPtr tensor = inputs.at(0).tensor();
auto tf_axes_list = inputs.at(1).weights().GetSpan<int>();
DataType idx_dtype{DataType::DT_INT32};
bool keep_dims{false};
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "Tidx", &idx_dtype));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "keep_dims", &keep_dims));
if (idx_dtype != DataType::DT_INT32) {
return errors::Unimplemented("Tidx supports only DT_INT32");
}
int axes = 0;
if (tf_axes_list.size() == 0) {
return errors::InvalidArgument(
"TRT cannot support reduce on all (batch) dimensions");
}
for (int i = 0; i < tf_axes_list.size(); i++) {
int trt_axis;
TF_RETURN_IF_ERROR(
ConvertAxis(tf_axes_list[i], tensor->getDimensions().nbDims,
node_def.name(), params->use_implicit_batch, &trt_axis));
axes |= (1 << trt_axis);
}
nvinfer1::ReduceOperation reduce_operation;
if (node_def.op() == "Sum") {
reduce_operation = nvinfer1::ReduceOperation::kSUM;
} else if (node_def.op() == "Prod") {
reduce_operation = nvinfer1::ReduceOperation::kPROD;
} else if (node_def.op() == "Max") {
reduce_operation = nvinfer1::ReduceOperation::kMAX;
} else if (node_def.op() == "Min") {
reduce_operation = nvinfer1::ReduceOperation::kMIN;
} else if (node_def.op() == "Mean") {
reduce_operation = nvinfer1::ReduceOperation::kAVG;
} else {
return errors::Unimplemented("Op not supported ", node_def.op());
}
if (params->validation_only) return OkStatus();
nvinfer1::ILayer* layer = params->converter->network()->addReduce(
*tensor->trt_tensor(), reduce_operation, axes, keep_dims);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertPack(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
int num_inputs{0};
int64_t tf_axis{0};
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "N", &num_inputs));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "axis", &tf_axis));
if (num_inputs != inputs.size()) {
return errors::InvalidArgument(
"Number of inputs for Pack is inconsistent with N attribute");
}
TrtInputArg expected_arg =
params->use_implicit_batch ? TrtInputArg::kTensor : TrtInputArg::kBoth;
std::vector<std::pair<string, TrtInputArg>> inputs_is_weight;
inputs_is_weight.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs_is_weight.push_back({StrCat("values_", i), expected_arg});
}
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, inputs_is_weight));
std::set<DataType> allowed_types{DataType::DT_FLOAT, DataType::DT_HALF,
DataType::DT_INT32};
TF_RETURN_IF_ERROR(AllowDataTypes(*params, allowed_types));
if (num_inputs > 1) {
TF_RETURN_IF_ERROR(
VerifyShapesMatch(inputs, -1, node_def.name()));
}
int idx = 0;
for (int i = 1; i < inputs.size(); i++) {
if (HasStaticShape(inputs.at(i).GetTrtDims())) {
idx = i;
}
}
DimsAdapter dims(inputs.at(idx).GetTrtDims());
int trt_axis{0};
TF_RETURN_IF_ERROR(ConvertAxis(tf_axis, dims.NumDims() + 1, node_def.name(),
params->use_implicit_batch, &trt_axis));
std::vector<int64_t> tensor_dims(dims.begin(), dims.end());
tensor_dims.insert(tensor_dims.begin() + trt_axis, 1);
std::vector<ITensorProxyPtr> expanded_tensors;
int input_index = 0;
for (const TRT_TensorOrWeights& input : inputs) {
ITensorProxyPtr expanded_tensor = nullptr;
if (input.is_tensor() && !params->use_implicit_batch &&
!HasStaticShape(dims)) {
if (!params->validation_only) {
TF_RETURN_IF_ERROR(params->converter->DynamicExpandDims(
input.tensor(),
dims.AsTrtDims(),
trt_axis,
params,
&expanded_tensor,
input_index));
}
} else {
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter,
input,
DimsAdapter(tensor_dims),
params->validation_only,
&expanded_tensor,
node_def,
input_index));
}
if (!params->validation_only) {
expanded_tensors.push_back(expanded_tensor);
}
input_index++;
}
if (params->validation_only) return OkStatus();
if (num_inputs == 1) {
params->outputs->push_back(TRT_TensorOrWeights(expanded_tensors[0]));
return OkStatus();
}
std::vector<nvinfer1::ITensor*> trt_expanded_tensors;
for (const auto& t : expanded_tensors) {
trt_expanded_tensors.push_back(t->trt_tensor());
}
nvinfer1::IConcatenationLayer* layer =
params->converter->network()->addConcatenation(
static_cast<nvinfer1::ITensor* const*>(trt_expanded_tensors.data()),
expanded_tensors.size());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, "concat");
layer->setAxis(trt_axis);
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertPad(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"tensor", false}, {"paddings", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT8}));
ITensorProxyPtr tensor = inputs.at(0).tensor();
const auto dims = tensor->getDimensions();
const int nb_dims =
params->use_implicit_batch ? dims.nbDims + 1 : dims.nbDims;
if (nb_dims < 4) {
return errors::InvalidArgument("Convertpad requires at least 4D input");
}
TRT_ShapedWeights pads = inputs.at(1).weights();
DataType padding_dtype{DataType::DT_INT32};
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "Tpaddings", &padding_dtype));
if (pads.Shape().dim(0) != nb_dims || pads.Shape().dim(1) != 2) {
return errors::InvalidArgument("Paddings must be a weight with shape ",
"[n, 2], where n is the rank of input ",
"tensor");
}
if (padding_dtype != DataType::DT_INT32) {
return errors::Unimplemented("Tpaddings supports only DT_INT32");
}
auto pad_data = pads.GetPointer<int>();
std::vector<int32_t> tf_pad_index;
for (int i = 0; i < nb_dims; i++) {
if (pad_data[2 * i] != 0 || pad_data[2 * i + 1] != 0) {
tf_pad_index.push_back(i);
}
}
if (tf_pad_index.empty()) {
params->outputs->push_back(inputs.at(0));
return OkStatus();
}
if (tf_pad_index.size() > 2) {
return errors::InvalidArgument(
"Padding layer does not support padding on > 2");
}
if (params->use_implicit_batch && tf_pad_index[0] == 0) {
return errors::InvalidArgument(
"Padding layer does not support padding on batch dimension");
}
if (params->validation_only) return OkStatus();
bool transposed_pad = false;
std::vector<int> transpose_idx(nb_dims);
std::iota(transpose_idx.begin(), transpose_idx.end(), 0);
std::vector<int> trt_pad_index{nb_dims - 2, nb_dims - 1};
nvinfer1::DimsHW pre_padding(0, 0);
nvinfer1::DimsHW post_padding(0, 0);
std::vector<int> trt_pre_post_padding_index{0, 1};
if (tf_pad_index.size() == 1 && tf_pad_index[0] == nb_dims - 1) {
trt_pad_index[0] = nb_dims - 1;
trt_pre_post_padding_index[0] = 1;
}
if (tf_pad_index.size() == 2 && tf_pad_index[1] == nb_dims - 2) {
std::swap(trt_pad_index[0], trt_pad_index[1]);
std::swap(trt_pre_post_padding_index[0], trt_pre_post_padding_index[1]);
}
for (int i = 0; i < tf_pad_index.size(); i++) {
const int tf_index = tf_pad_index[i];
const int trt_index = trt_pad_index[i];
const int k = trt_pre_post_padding_index[i];
pre_padding.d[k] = pad_data[tf_index * 2];
post_padding.d[k] = pad_data[tf_index * 2 + 1];
if (tf_index != trt_index) {
transposed_pad = true;
std::swap(transpose_idx[tf_index], transpose_idx[trt_index]);
}
}
if (transposed_pad) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, transpose_idx, &tensor, node_def, "to_pad"));
}
nvinfer1::IPaddingLayer* layer = params->converter->network()->addPadding(
*tensor->trt_tensor(), pre_padding, post_padding);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (transposed_pad) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, transpose_idx, &output_tensor, node_def, "from_pad"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertSplitHelper(const OpConverterParams* params,
const TRT_TensorOrWeights& input, int tf_axis,
int num_splits, bool squeeze_after) {
const auto& node_def = params->node_def;
const nvinfer1::Dims dims = input.GetTrtDims();
int trt_axis;
TF_RETURN_IF_ERROR(ConvertAxis(tf_axis, dims.nbDims, node_def.name(),
params->use_implicit_batch, &trt_axis));
if (dims.d[trt_axis] < 0) {
return errors::InvalidArgument("Dimension ", tf_axis,
" must have statically defined dimensions");
}
if (squeeze_after && dims.d[trt_axis] != num_splits) {
return errors::InvalidArgument(
"Dimension ", tf_axis, " has size ", dims.d[trt_axis],
" which is not equal to num of ", num_splits);
}
if (dims.d[trt_axis] % num_splits != 0) {
return errors::InvalidArgument("Dimension ", tf_axis, " of size ",
dims.d[trt_axis],
" is not evenly divisible by ", num_splits);
}
std::vector<int> begin(dims.nbDims, 0);
std::vector<int64> input_dims(dims.d, dims.d + dims.nbDims);
std::vector<int> size(dims.d, dims.d + dims.nbDims);
const int split_size_on_axis = dims.d[trt_axis] / num_splits;
size[trt_axis] = split_size_on_axis;
std::vector<int> stride(dims.nbDims, 1);
if (params->use_implicit_batch) {
begin.insert(begin.begin(), 0);
size.insert(size.begin(), 1);
stride.insert(stride.begin(), 1);
input_dims.insert(input_dims.begin(), std::max(-1, input.batch_size()));
}
PartialTensorShape input_shape(input_dims);
std::optional<nvinfer1::Dims> final_shape_for_unpack = std::nullopt;
const bool is_dynamic_shape = !HasStaticShape(dims);
if (squeeze_after && !is_dynamic_shape) {
std::vector<int> size_after_squeeze(size);
const int tf_axis = trt_axis + (params->use_implicit_batch ? 1 : 0);
size_after_squeeze.erase(size_after_squeeze.begin() + tf_axis);
DimsAdapter adap(size_after_squeeze);
if (params->use_implicit_batch)
TF_RETURN_IF_ERROR(adap.RemoveBatchDimension());
final_shape_for_unpack = adap.AsTrtDims();
}
for (int i = 0; i < num_splits; ++i) {
const int tf_axis = trt_axis + (params->use_implicit_batch ? 1 : 0);
begin[tf_axis] = i * split_size_on_axis;
absl::InlinedVector<int64, 4> stride_v(begin.size(), 1);
absl::InlinedVector<int64, 4> begin_v;
absl::InlinedVector<int64, 4> end_v;
for (int i = 0; i < begin.size(); i++) {
end_v.push_back(begin[i] + size[i]);
begin_v.push_back(begin[i]);
}
TF_RETURN_IF_ERROR(ConvertStridedSliceHelper(
params, input, input_shape, begin_v, stride_v, end_v,
final_shape_for_unpack,
i, std::nullopt));
}
if (params->validation_only) return OkStatus();
if (squeeze_after && is_dynamic_shape) {
for (int i = 0; i < params->outputs->size(); i++) {
ITensorProxyPtr output_tensor = nullptr;
std::vector<int> in_dims(dims.d, dims.d + dims.nbDims);
input_dims[trt_axis] = 0;
TF_RETURN_IF_ERROR(params->converter->SqueezeTensor(
params->outputs->at(i).tensor(),
&in_dims,
params,
&output_tensor,
i));
(*params->outputs)[i] = TRT_TensorOrWeights(output_tensor);
}
}
return OkStatus();
}
Status ConvertSplit(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"axis", true}, {"value", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
int tf_axis = inputs.at(0).weights().GetSpan<int>()[0];
int num_split;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "num_split", &num_split));
return ConvertSplitHelper(params, inputs.at(1), tf_axis, num_split, false);
}
Status ConvertUnpack(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"value", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
if (inputs.at(0).GetTrtDims().nbDims == 0) {
return errors::Unimplemented(
"Input \"value\" for Unpack must be rank 2 or greater");
}
int tf_axis = 0, num = 0;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "axis", &tf_axis));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "num", &num));
return ConvertSplitHelper(params, inputs.at(0), tf_axis, num, true);
}
Status ConvertCast(const OpConverterParams* params) {
auto unsupport_cast_error = [&](string msg) {
return errors::Unimplemented("Cast op is not supported - ", msg);
};
if (isExperimentalFeatureActivated("reject_all_fp_cast_ops")) {
LOG(WARNING) << "`TF_TRT_EXPERIMENTAL_FEATURES=reject_all_fp_cast_ops`is "
<< "meant as a workaround. If the Cast converter leads to any "
<< "performance or accuracy regression, please open an issue "
<< "on GitHub.";
return unsupport_cast_error(
"TF_TRT_EXPERIMENTAL_FEATURES=reject_all_fp_cast_ops has been defined");
}
std::set<DataType> allowed_types{DataType::DT_FLOAT, DataType::DT_HALF};
DataType input_type;
TF_RETURN_IF_ERROR(GetInputTfType(*params, &input_type, 0));
if (allowed_types.find(input_type) == allowed_types.end()) {
return unsupport_cast_error(
StrCat("Allowed input dtypes: [", DataTypeString(DataType::DT_FLOAT),
", ", DataTypeString(DataType::DT_HALF),
"]. Received: ", DataTypeString(input_type)));
}
DataType output_type;
TF_RETURN_IF_ERROR(GetNodeDefTfType(params->node_def, &output_type,
kCastOutputTypeAttrName));
if (allowed_types.find(output_type) == allowed_types.end()) {
return unsupport_cast_error(
StrCat("Allowed output dtypes: [", DataTypeString(DataType::DT_FLOAT),
", ", DataTypeString(DataType::DT_HALF),
"]. Received: ", DataTypeString(output_type)));
}
return ConvertIdentity(params);
}
Status ConvertConcat(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
int num_inputs{0};
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "N", &num_inputs));
if (num_inputs != static_cast<int>(inputs.size()) - 1) {
return errors::InvalidArgument(
"Number of inputs for ConcatV2 is inconsistent with N attributes.");
}
std::vector<std::pair<string, TrtInputArg>> inputs_kinds;
TrtInputArg expected_input =
params->use_implicit_batch ? TrtInputArg::kTensor : TrtInputArg::kBoth;
inputs_kinds.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs_kinds.push_back({StrCat("values_", i), expected_input});
}
inputs_kinds.push_back({"axis", TrtInputArg::kWeight});
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, inputs_kinds));
std::set<DataType> allowed_types{DataType::DT_FLOAT, DataType::DT_HALF,
DataType::DT_INT32};
TF_RETURN_IF_ERROR(AllowDataTypes(*params, allowed_types));
const auto axis = inputs.at(num_inputs).weights().GetSpan<int>();
if (axis.size() != 1) {
return errors::InvalidArgument("Axis for ConcatV2 must be a scalar");
}
int trt_axis = 0;
const auto dim = inputs.at(0).GetTrtDims();
TF_RETURN_IF_ERROR(ConvertAxis(axis[0], dim.nbDims, node_def.name(),
params->use_implicit_batch, &trt_axis));
TF_RETURN_IF_ERROR(VerifyShapesMatch(
absl::Span<const TRT_TensorOrWeights>(inputs).first(num_inputs), trt_axis,
node_def.name()));
if (params->validation_only) return OkStatus();
std::vector<ITensorProxyPtr> input_tensors;
input_tensors.reserve(num_inputs);
for (int i = 0; i < num_inputs; i++) {
if (inputs.at(i).is_tensor()) {
input_tensors.push_back(inputs.at(i).tensor());
} else {
input_tensors.push_back(params->converter->CreateConstantLayer(
inputs.at(i).weights(), inputs.at(i).GetTrtDims()));
}
}
std::vector<nvinfer1::ITensor*> trt_input_tensors;
for (const auto& t : input_tensors) {
trt_input_tensors.push_back(t->trt_tensor());
}
nvinfer1::IConcatenationLayer* layer =
params->converter->network()->addConcatenation(
static_cast<nvinfer1::ITensor* const*>(trt_input_tensors.data()),
input_tensors.size());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
layer->setAxis(trt_axis);
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertFusedBatchNorm(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"x", false},
{"scale", true},
{"offset", true},
{"mean", true},
{"variance", true}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
float epsilon{0.1f};
string data_format;
bool is_training{false};
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "epsilon", &epsilon));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "is_training", &is_training));
if (is_training) {
LOG_WARNING_WITH_PREFIX
<< node_def.op() << " only supports is_training=false. If you "
<< "are using Keras, please call "
<< "keras.backend.set_learning_phase(0) before constructing "
<< "your model. At " << node_def.name();
return errors::Unimplemented(node_def.op(),
" only supports is_training=false");
}
ITensorProxyPtr tensor = inputs.at(0).tensor();
if (!params->use_implicit_batch) {
int channel_dim = (data_format == "NCHW" ? 1 : 3);
if (tensor->getDimensions().d[channel_dim] == -1) {
return errors::InvalidArgument("Channel dimension must be static");
}
}
auto parameter_type = inputs.at(1).weights().TrtDType();
if ((parameter_type != nvinfer1::DataType::kFLOAT) &&
(parameter_type != nvinfer1::DataType::kHALF)) {
return errors::Unimplemented(
"Only float32 or float16 weight data type is supported,", " got ",
DebugString(parameter_type));
}
for (int i = 1; i < 5; i++) {
if (inputs.at(i).weights().TrtDType() != parameter_type) {
return errors::Unimplemented(
"Inconsistent parameter type for batchnorm is not supported");
}
}
TRT_ShapedWeights dummy_power_weights(parameter_type);
size_t nweight = 0;
for (int i = 1; i < 5; i++) {
nweight = std::max<size_t>(nweight, inputs.at(i).weights().count());
}
const TRT_ShapedWeights* ptr_shape_weights = nullptr;
for (int i = 1; i < 5; i++) {
if (inputs.at(i).weights().count() == nweight) {
ptr_shape_weights = &(inputs.at(i).weights());
} else if (inputs.at(i).weights().count() != 1) {
return errors::InvalidArgument("Inconsistent batchnorm parameter count");
}
}
if (params->validation_only) return OkStatus();
StatusOr<TRT_ShapedWeights> combined_scale_weights =
params->weight_store->GetTempWeights(*ptr_shape_weights);
TRT_ENSURE_OK(combined_scale_weights);
StatusOr<TRT_ShapedWeights> combined_offset_weights =
params->weight_store->GetTempWeights(*ptr_shape_weights);
TRT_ENSURE_OK(combined_offset_weights);
const Eigen::half* cast_vals_array[4];
const float* vals_array[4];
for (int j = 0; j < 4; j++) {
cast_vals_array[j] = inputs.at(j + 1).weights().GetPointer<Eigen::half>();
vals_array[j] = inputs.at(j + 1).weights().GetPointer<float>();
}
Eigen::half* cast_combined_scale_vals =
combined_scale_weights->GetPointer<Eigen::half>();
Eigen::half* cast_combined_offset_vals =
combined_offset_weights->GetPointer<Eigen::half>();
float* combined_scale_vals = combined_scale_weights->GetPointer<float>();
float* combined_offset_vals = combined_offset_weights->GetPointer<float>();
for (size_t i = 0; i < nweight; ++i) {
float batchnorm_data[4];
for (int j = 0; j < 4; j++) {
if (inputs.at(j + 1).weights().count() != 1) {
if (parameter_type == nvinfer1::DataType::kFLOAT) {
batchnorm_data[j] = vals_array[j][i];
} else if (parameter_type == nvinfer1::DataType::kHALF) {
batchnorm_data[j] = static_cast<float>(cast_vals_array[j][i]);
}
} else {
if (parameter_type == nvinfer1::DataType::kFLOAT) {
batchnorm_data[j] = vals_array[j][0];
} else if (parameter_type == nvinfer1::DataType::kHALF) {
batchnorm_data[j] = static_cast<float>(cast_vals_array[j][0]);
}
}
}
float scale = batchnorm_data[0];
float offset = batchnorm_data[1];
float mean = batchnorm_data[2];
float variance = batchnorm_data[3];
float combined_scale_val = scale / sqrtf(variance + epsilon);
float combined_offset_val = offset - mean * combined_scale_val;
if (parameter_type == nvinfer1::DataType::kFLOAT) {
combined_scale_vals[i] = combined_scale_val;
combined_offset_vals[i] = combined_offset_val;
} else if (parameter_type == nvinfer1::DataType::kHALF) {
cast_combined_scale_vals[i] = Eigen::half(combined_scale_val);
cast_combined_offset_vals[i] = Eigen::half(combined_offset_val);
}
}
ITensorProxyPtr output_tensor;
if (data_format == "NCHW") {
nvinfer1::ScaleMode mode = nvinfer1::ScaleMode::kCHANNEL;
nvinfer1::IScaleLayer* layer = params->converter->network()->addScale(
*tensor->trt_tensor(), mode, combined_offset_weights->GetTrtWeights(),
combined_scale_weights->GetTrtWeights(),
nvinfer1::Weights{nvinfer1::DataType::kFLOAT, nullptr, 0});
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
output_tensor = layer->getOutput(0);
}
if (data_format == "NHWC") {
nvinfer1::Dims dims = tensor->getDimensions();
for (int i = 0; i < dims.nbDims - 1; i++) {
dims.d[i] = 1;
}
dims.d[dims.nbDims - 1] = nweight;
StatusOr<TRTNetworkBuilder> builder = TRTNetworkBuilder::Create(
params->converter->network(), params->weight_store);
TRT_ENSURE_OK(builder);
auto scale_constant_layer = builder->WeightsToConstant(
combined_scale_weights->GetTrtWeights(), dims);
ITensorProxyPtr scale_constant = (*scale_constant_layer)->getOutput(0);
auto scale_layer =
builder->Mul(tensor->trt_tensor(), scale_constant->trt_tensor());
auto offset_constant_layer = builder->WeightsToConstant(
combined_offset_weights->GetTrtWeights(), dims);
ITensorProxyPtr offset_constant = (*offset_constant_layer)->getOutput(0);
auto offset_layer = builder->Add((*scale_layer)->getOutput(0),
offset_constant->trt_tensor());
output_tensor = (*offset_layer)->getOutput(0);
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertGather(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"params", TrtInputArg::kBoth},
{"indices", TrtInputArg::kBoth},
{"axis", TrtInputArg::kWeight}}));
const auto& params_input = inputs.at(0);
const auto& indices_input = inputs.at(1);
const auto& axis_input = inputs.at(2);
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32},
"Tparams"));
TF_RETURN_IF_ERROR(AllowDataTypes(*params, {DataType::DT_INT32},
"Tindices"));
absl::Span<const int> axis = axis_input.weights().GetSpan<int>();
if (axis.size() != 1) {
return errors::InvalidArgument("Axis for GatherV2 must be a scalar");
}
int trt_axis = 0;
TF_RETURN_IF_ERROR(ConvertAxis(
axis[0], params_input.GetTrtDims().nbDims, node_def.name(),
params->use_implicit_batch && params_input.is_tensor(), &trt_axis));
if (params->use_implicit_batch && params_input.is_weights() &&
trt_axis != 0) {
return errors::Unimplemented(
"The input axis must be zero when params is a weight.");
}
if (params->use_implicit_batch &&
(params_input.is_tensor() == indices_input.is_tensor()) &&
(indices_input.batch_size() != 1 || params_input.batch_size() != 1)) {
return errors::Unimplemented(
"Params and indices must have a batch size of 1 when params and indices"
" are both tensors or both constants.");
}
auto get_rank = [params](const auto& input) {
return input.GetTrtDims().nbDims +
(params->use_implicit_batch && input.is_tensor() ? 1 : 0);
};
const int params_tf_rank = get_rank(params_input);
const int indices_tf_rank = get_rank(indices_input);
const int tf_gather_output_rank = params_tf_rank + indices_tf_rank - 1;
if (tf_gather_output_rank >
nvinfer1::Dims::MAX_DIMS + (params->use_implicit_batch ? 1 : 0)) {
return errors::InvalidArgument(
"Result of gather has dimension greater than ",
nvinfer1::Dims::MAX_DIMS + 1);
}
int32 batch_dims;
TF_RETURN_IF_ERROR(GetNodeAttr(node_def, "batch_dims", &batch_dims));
if (params->use_implicit_batch && batch_dims) {
return errors::InvalidArgument(
"batch_dims must be zero in implicit batch mode");
}
if (!params->use_implicit_batch && batch_dims > 1) {
return errors::InvalidArgument(
"batch_dims cannot exceed 1 in dynamic shape mode");
}
if (params->validation_only) return OkStatus();
auto populate_tensor = [params](const auto& input) -> ITensorProxyPtr {
ITensorProxyPtr result_tensor = nullptr;
if (input.is_weights()) {
result_tensor = params->converter->CreateConstantLayer(
input.weights(), input.GetTrtDims());
} else {
result_tensor = input.tensor();
}
return result_tensor;
};
ITensorProxyPtr params_tensor = populate_tensor(params_input);
ITensorProxyPtr indices_tensor = populate_tensor(indices_input);
nvinfer1::IGatherLayer* layer = params->converter->network()->addGather(
*params_tensor->trt_tensor(), *indices_tensor->trt_tensor(), trt_axis);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
layer->setNbElementWiseDims(batch_dims);
ITensorProxyPtr output_tensor = layer->getOutput(0);
nvinfer1::Dims trt_gather_output_dims = output_tensor->getDimensions();
if (params->use_implicit_batch) {
const int expected_trt_output_rank = tf_gather_output_rank -
(params_input.is_tensor() ? 1 : 0) -
(indices_input.is_tensor() ? 1 : 0);
if (trt_gather_output_dims.nbDims != expected_trt_output_rank) {
return errors::Internal(
"Get unexpected output dimensions of IGatherLayer. Expect nbDims: ",
expected_trt_output_rank,
", actual nbDims: ", trt_gather_output_dims.nbDims);
}
}
if (params->use_implicit_batch && params_input.is_tensor() &&
indices_input.is_tensor()) {
for (int i = trt_gather_output_dims.nbDims; i > trt_axis; --i) {
trt_gather_output_dims.d[i] = trt_gather_output_dims.d[i - 1];
}
trt_gather_output_dims.d[trt_axis] = 1;
++trt_gather_output_dims.nbDims;
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, TRT_TensorOrWeights(output_tensor),
trt_gather_output_dims,
false, &output_tensor, node_def));
}
if (params->use_implicit_batch && params_input.is_weights() &&
indices_input.is_weights()) {
for (int i = trt_axis; i < trt_gather_output_dims.nbDims - 1; ++i) {
trt_gather_output_dims.d[i] = trt_gather_output_dims.d[i + 1];
}
--trt_gather_output_dims.nbDims;
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, TRT_TensorOrWeights(output_tensor),
trt_gather_output_dims,
false, &output_tensor, node_def));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
StatusOr<ITensorProxyPtr> ConvertFullyConnectedImpl(
const OpConverterParams* params, TRT_TensorOrWeights input_a,
TRT_TensorOrWeights input_b, bool transpose_a, bool transpose_b) {
if (!(!transpose_a && input_a.is_tensor() && input_b.is_weights())) {
VLOG(2) << "Not FC compatible, A must be non transposed tensor, and B "
"must be constant.";
return ITensorProxyPtr(nullptr);
}
if (!params->use_implicit_batch && input_b.GetTrtDims().nbDims > 2 &&
input_b.GetTrtDims().d[0] != 1) {
VLOG(2) << "Not FC compatible, if B has an explicit batch dimension, then "
"it must be 1.";
return ITensorProxyPtr(nullptr);
}
nvinfer1::Dims input_dim = input_a.GetTrtDims();
if (input_dim.d[input_dim.nbDims - 1] == -1) {
VLOG(2) << "Not FC compatible, last dim of A must be static.";
return ITensorProxyPtr(nullptr);
}
if (input_dim.nbDims + 2 > nvinfer1::Dims::MAX_DIMS) {
VLOG(2) << "Not FC compatible, cannot expand A's shape.";
return ITensorProxyPtr(nullptr);
}
ITensorProxyPtr tensor_a = nullptr;
auto reshape_dim = DimsAdapter(input_dim.nbDims,
DimsAdapter::StorageType(input_dim.nbDims, 0))
.Append(1)
.Append(1);
const NodeDef& node_def = params->node_def;
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, input_a, reshape_dim,
false, &tensor_a, node_def, 0,
"FULLY_CONNECTED"));
VLOG(2) << "New shape of A " << DebugString(tensor_a->getDimensions());
TRT_ShapedWeights weights_b = input_b.weights();
TRT_ShapedWeights weights_2D(weights_b);
if (weights_b.Shape().NumDims() > 2) {
if (std::any_of(weights_b.Shape().begin(),
weights_b.Shape().begin() + weights_b.Shape().NumDims() - 2,
[](int d) { return d != 1; })) {
VLOG(2) << "Not FC compatible, B has a batch dim larger than 1";
return ITensorProxyPtr(nullptr);
}
int k = weights_b.Shape().dim(weights_b.Shape().NumDims() - 1);
nvinfer1::Dims dims{2, {static_cast<int>(weights_b.count() / k), k}};
TF_RETURN_IF_ERROR(weights_2D.SetShape(dims));
}
TRT_ShapedWeights weights(weights_2D.TrtDType());
if (!transpose_b) {
auto tmp = params->weight_store->GetTempWeights(weights_2D);
TRT_ENSURE_OK(tmp);
weights = std::move(tmp).value();
ReorderCKtoKC(weights_2D, &weights);
} else {
weights = weights_2D;
}
TRT_ShapedWeights biases(weights.TrtDType());
int k = weights.Shape().dim(weights.Shape().NumDims() - 1);
const int noutput = weights.count() / k;
VLOG(2) << "Using fully connected layer with k=" << k
<< ", n_output=" << noutput
<< " weights shape: " << weights.Shape().DebugString()
<< " to convert " << node_def.op();
nvinfer1::IFullyConnectedLayer* layer =
params->converter->network()->addFullyConnected(
*tensor_a->trt_tensor(), noutput, weights.GetTrtWeights(),
biases.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
ITensorProxyPtr output_tensor = layer->getOutput(0);
auto output_dim = output_tensor->getDimensions();
output_dim.nbDims -= 2;
std::fill(output_dim.d, output_dim.d + output_dim.nbDims, 0);
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, TRT_TensorOrWeights(output_tensor), output_dim,
false, &output_tensor, node_def,
1, "FULLY_CONNECTED"));
return output_tensor;
}
StatusOr<ITensorProxyPtr> ConvertMatMulImpl(const OpConverterParams* params,
TRT_TensorOrWeights input_a,
TRT_TensorOrWeights input_b,
bool transpose_a,
bool transpose_b) {
if (params->use_implicit_batch) {
if ((input_a.GetTrtDims().nbDims < 2 &&
(transpose_a || !input_b.is_weights())) ||
(input_b.GetTrtDims().nbDims < 2)) {
return errors::InvalidArgument(
"MatMul with 2D tensors requires explicit batch mode, or that tensor"
" A is not transposed and B is a constant tensor.");
}
}
if (params->validation_only) return ITensorProxyPtr(nullptr);
StatusOr<ITensorProxyPtr> result = ConvertFullyConnectedImpl(
params, input_a, input_b, transpose_a, transpose_b);
TF_RETURN_IF_ERROR(result.status());
ITensorProxyPtr output = result.value();
if (*output) {
return output;
}
const auto convert_to_itensor =
[¶ms](TRT_TensorOrWeights operand) -> ITensorProxyPtr {
if (operand.is_tensor()) {
return operand.tensor();
} else {
return params->converter->CreateConstantLayer(operand.weights(),
operand.GetTrtDims());
}
};
ITensorProxyPtr tensor_a = convert_to_itensor(input_a);
ITensorProxyPtr tensor_b = convert_to_itensor(input_b);
const auto get_matrix_op = [](ITensorProxyPtr in,
bool transpose) -> nvinfer1::MatrixOperation {
return (transpose) ? nvinfer1::MatrixOperation::kTRANSPOSE
: nvinfer1::MatrixOperation::kNONE;
};
nvinfer1::MatrixOperation op_a, op_b;
op_a = (tensor_a->getDimensions().nbDims < 2)
? nvinfer1::MatrixOperation::kVECTOR
: get_matrix_op(tensor_a, transpose_a);
op_b = get_matrix_op(tensor_b, transpose_b);
nvinfer1::IMatrixMultiplyLayer* layer =
params->converter->network()->addMatrixMultiply(
*tensor_a->trt_tensor(), op_a, *tensor_b->trt_tensor(), op_b);
const auto& node_def = params->node_def;
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
return ITensorProxyPtr(layer->getOutput(0));
}
Status ConvertMatMulHelper(const OpConverterParams* params,
TRT_TensorOrWeights input_a,
TRT_TensorOrWeights input_b, bool transpose_a,
bool transpose_b) {
StatusOr<ITensorProxyPtr> result =
ConvertMatMulImpl(params, input_a, input_b, transpose_a, transpose_b);
TF_RETURN_IF_ERROR(result.status());
if (!params->validation_only) {
params->outputs->push_back(TRT_TensorOrWeights(result.value()));
}
return OkStatus();
}
Status ConvertMatMul(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TFTRT_CHECK_INPUT_SIZE(inputs.size(), 2, node_def);
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
bool transpose_a = false, transpose_b = false;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "transpose_b", &transpose_b));
return ConvertMatMulHelper(params, inputs.at(0), inputs.at(1), transpose_a,
transpose_b);
}
Status ConvertBatchMatMul(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TFTRT_CHECK_INPUT_SIZE(inputs.size(), 2, node_def);
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params, {{"x", TrtInputArg::kBoth}, {"y", TrtInputArg::kBoth}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
if (inputs.at(0).is_weights() && inputs.at(1).is_weights()) {
return errors::InvalidArgument(
"All inputs are weights, but Grappler is expected to fold them.");
}
bool transpose_a = false, transpose_b = false;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "adj_x", &transpose_a));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "adj_y", &transpose_b));
const auto check_weight_is_not_batched =
[](const TRT_TensorOrWeights& input_l,
const TRT_TensorOrWeights& input_r) {
if (input_l.is_weights() &&
input_l.GetTrtDims().nbDims > input_r.GetTrtDims().nbDims &&
input_l.GetTrtDims().d[0] != 1) {
return errors::Unimplemented(
"TensorRT does not support batched constants in implicit batch "
"mode.");
}
return OkStatus();
};
if (params->use_implicit_batch) {
TF_RETURN_IF_ERROR(check_weight_is_not_batched(inputs.at(0), inputs.at(1)));
TF_RETURN_IF_ERROR(check_weight_is_not_batched(inputs.at(1), inputs.at(0)));
}
auto input_l = std::make_unique<TRT_TensorOrWeights>(inputs.at(0));
auto input_r = std::make_unique<TRT_TensorOrWeights>(inputs.at(1));
TF_RETURN_IF_ERROR(BroadcastTensors(input_l, input_r,
false, params));
if (params->validation_only) return OkStatus();
return ConvertMatMulHelper(params, *input_l, *input_r, transpose_a,
transpose_b);
}
Status ConvertArgMinMax(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"dimension", true}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
DataType output_dtype{DataType::DT_INT32};
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "output_type", &output_dtype));
if (output_dtype != DataType::DT_INT32) {
return errors::Unimplemented("Output type ", DataTypeString(output_dtype),
" is not supported");
}
int tf_axis = inputs.at(1).weights().GetSpan<int>()[0];
int trt_axis;
nvinfer1::Dims dims = inputs.at(0).GetTrtDims();
TF_RETURN_IF_ERROR(ConvertAxis(tf_axis, dims.nbDims, node_def.name(),
params->use_implicit_batch, &trt_axis));
nvinfer1::TopKOperation topk_op;
if (node_def.op() == "ArgMin") {
topk_op = nvinfer1::TopKOperation::kMIN;
} else if (node_def.op() == "ArgMax") {
topk_op = nvinfer1::TopKOperation::kMAX;
} else {
return errors::InvalidArgument("Unsupported ArgMin/Max operation");
}
#if !IS_TRT_VERSION_GE(7, 0, 0, 11)
const nvinfer1::Dims trt_dims = params->inputs.at(0).GetTrtDims();
if (trt_dims.nbDims >= 4) {
string trt_dim_str = DebugString(trt_dims);
return errors::Unimplemented(node_def.op(), "op is not able to support",
" tensors with 4+ dimensions (excluding batch",
" size). Received: ", trt_dim_str);
}
#endif
if (params->validation_only) return OkStatus();
const uint32_t reduce_axes = 1 << trt_axis;
nvinfer1::ITopKLayer* layer = params->converter->network()->addTopK(
*inputs.at(0).tensor()->trt_tensor(), topk_op, 1, reduce_axes);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, "topk");
ITensorProxyPtr output_indices_tensor = layer->getOutput(1);
std::vector<int> input_dims(dims.d, dims.d + dims.nbDims);
input_dims[trt_axis] = 0;
ITensorProxyPtr output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->SqueezeTensor(
output_indices_tensor,
&input_dims,
params,
&output_tensor));
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertTopK(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"k", true}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
bool sorted{false};
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "sorted", &sorted));
if (!sorted) {
return errors::InvalidArgument("Only sorted=True is supported");
}
ITensorProxyPtr tensor = inputs.at(0).tensor();
const int num_dims = tensor->getDimensions().nbDims;
if (num_dims == 0) {
return errors::InvalidArgument(
"TensorRT TopK cannot apply on batch dimension");
}
TRT_ShapedWeights k_w = inputs.at(1).weights();
if (k_w.count() != 1) {
return errors::InvalidArgument("k value of TopK should be a scalar");
}
if (params->validation_only) return OkStatus();
const nvinfer1::TopKOperation op = nvinfer1::TopKOperation::kMAX;
const int k = *(k_w.GetPointer<int>());
const uint32_t reduce_axes = 1 << (num_dims - 1);
nvinfer1::ITopKLayer* layer = params->converter->network()->addTopK(
*tensor->trt_tensor(), op, k, reduce_axes);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
ITensorProxyPtr output_value_tensor = layer->getOutput(0);
ITensorProxyPtr output_indices_tensor = layer->getOutput(1);
params->outputs->push_back(TRT_TensorOrWeights(output_value_tensor));
params->outputs->push_back(TRT_TensorOrWeights(output_indices_tensor));
return OkStatus();
}
StatusOr<std::pair<ITensorProxyPtr, ITensorProxyPtr>>
CalcDepthSpaceDynamicShape(const OpConverterParams* params, int block_size,
string data_format) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
const int channels_axis = data_format == "NCHW" ? 1 : 3;
const int h_axis = data_format == "NCHW" ? 2 : 1;
const int w_axis = data_format == "NCHW" ? 3 : 2;
ITensorProxyPtr shape = params->converter->network()
->addShape(*inputs.at(0).tensor()->trt_tensor())
->getOutput(0);
ITensorProxyPtr batch_size =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {0}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr num_channels =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {channels_axis}}, {1, {1}},
{1, {1}})
->getOutput(0);
ITensorProxyPtr h =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {h_axis}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr w =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {w_axis}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr r;
TF_RETURN_IF_ERROR(CreateScalarConstant(params, block_size, &r));
ITensorProxyPtr r_squared;
TF_RETURN_IF_ERROR(
CreateScalarConstant(params, block_size * block_size, &r_squared));
std::vector<ITensorProxyPtr> first_shuffle_tensors(6, nullptr);
std::vector<ITensorProxyPtr> second_shuffle_tensors(4, nullptr);
if (node_def.op() == "DepthToSpace") {
first_shuffle_tensors[0] = batch_size;
first_shuffle_tensors[1] = r;
first_shuffle_tensors[2] = r;
first_shuffle_tensors[3] =
params->converter->network()
->addElementWise(*num_channels->trt_tensor(),
*r_squared->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
first_shuffle_tensors[4] = h;
first_shuffle_tensors[5] = w;
second_shuffle_tensors[0] = batch_size;
second_shuffle_tensors[1] =
params->converter->network()
->addElementWise(*num_channels->trt_tensor(),
*r_squared->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
second_shuffle_tensors[2] =
params->converter->network()
->addElementWise(*h->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
second_shuffle_tensors[3] =
params->converter->network()
->addElementWise(*w->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
} else if (node_def.op() == "SpaceToDepth") {
first_shuffle_tensors[0] = batch_size;
first_shuffle_tensors[1] = num_channels;
first_shuffle_tensors[2] =
params->converter->network()
->addElementWise(*h->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
first_shuffle_tensors[3] = r;
first_shuffle_tensors[4] =
params->converter->network()
->addElementWise(*w->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
first_shuffle_tensors[5] = r;
second_shuffle_tensors[0] = batch_size;
second_shuffle_tensors[1] =
params->converter->network()
->addElementWise(*num_channels->trt_tensor(),
*r_squared->trt_tensor(),
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
second_shuffle_tensors[2] =
params->converter->network()
->addElementWise(*h->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
second_shuffle_tensors[3] =
params->converter->network()
->addElementWise(*w->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
}
StatusOr<ITensorProxyPtr> result =
ConcatenateTensors(params, first_shuffle_tensors, 0);
TF_RETURN_IF_ERROR(result.status());
ITensorProxyPtr first_shuffle_shape = result.value();
result = ConcatenateTensors(params, second_shuffle_tensors, 1);
TF_RETURN_IF_ERROR(result.status());
ITensorProxyPtr second_shuffle_shape = result.value();
return std::make_pair(first_shuffle_shape, second_shuffle_shape);
}
Status ConvertDepthSpaceShuffle(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
string data_format;
int block_size;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "block_size", &block_size));
if (block_size < 2) {
return errors::InvalidArgument("Block size must be 2 or greater");
}
if (data_format != "NCHW" && data_format != "NHWC") {
return errors::Unimplemented("Data format ", data_format,
" is not supported");
}
int idx_offset = params->use_implicit_batch ? 0 : 1;
nvinfer1::Dims dims = inputs.at(0).GetTrtDims();
const int required_rank = 3 + idx_offset;
if (dims.nbDims != required_rank) {
return errors::InvalidArgument("The input to ", node_def.op(),
" must be rank 4");
}
const int num_channels =
data_format == "NCHW" ? dims.d[0 + idx_offset] : dims.d[2 + idx_offset];
const int h =
data_format == "NCHW" ? dims.d[1 + idx_offset] : dims.d[0 + idx_offset];
const int w =
data_format == "NCHW" ? dims.d[2 + idx_offset] : dims.d[1 + idx_offset];
nvinfer1::Dims first_shuffle_shape;
nvinfer1::Permutation transpose_perm;
nvinfer1::Dims second_shuffle_shape;
if (node_def.op() == "DepthToSpace") {
if (num_channels != -1 && num_channels % (block_size * block_size) != 0) {
return errors::InvalidArgument(
"Number of channels must be divisible by block_size*block_size");
}
first_shuffle_shape = {
5,
{block_size, block_size, num_channels / (block_size * block_size),
h, w}};
transpose_perm = {2, 3, 0, 4, 1};
second_shuffle_shape =
nvinfer1::Dims3(num_channels / (block_size * block_size),
h * block_size, w * block_size);
} else {
if (node_def.op() != "SpaceToDepth")
return errors::InvalidArgument("Incorrect op type ", node_def.op());
if ((h != -1 && h % block_size != 0) || (w != -1 && w % block_size != 0)) {
return errors::InvalidArgument(
"Width and height must be divisible by block_size");
}
first_shuffle_shape = {5,
{num_channels, h / block_size, block_size,
w / block_size, block_size}};
transpose_perm = {2, 4, 0, 1, 3};
second_shuffle_shape = nvinfer1::Dims3(
num_channels * block_size * block_size, h / block_size, w / block_size);
}
if (params->validation_only) return OkStatus();
nvinfer1::IShuffleLayer* first_shuffle =
params->converter->network()->addShuffle(
*inputs.at(0).tensor()->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(first_shuffle, node_def.name());
params->converter->SetLayerName(first_shuffle, node_def, "shuffle",
0);
ITensorProxyPtr second_shuffle_shape_tensor;
if (HasStaticShape(inputs.at(0).GetTrtDims())) {
auto adjust_reshape = [](int N, nvinfer1::Dims dims,
bool use_implicit_batch) {
if (use_implicit_batch) return dims;
for (int i = dims.nbDims; i > 0; i--) {
dims.d[i] = dims.d[i - 1];
}
dims.d[0] = N;
dims.nbDims++;
return dims;
};
first_shuffle_shape = adjust_reshape(dims.d[0], first_shuffle_shape,
params->use_implicit_batch);
second_shuffle_shape = adjust_reshape(dims.d[0], second_shuffle_shape,
params->use_implicit_batch);
first_shuffle->setReshapeDimensions(first_shuffle_shape);
} else {
StatusOr<std::pair<ITensorProxyPtr, ITensorProxyPtr>> result =
CalcDepthSpaceDynamicShape(params, block_size, data_format);
TF_RETURN_IF_ERROR(result.status());
first_shuffle->setInput(1, *result.value().first->trt_tensor());
second_shuffle_shape_tensor = result.value().second;
}
auto adjust_perm = [](int n, nvinfer1::Permutation perm,
bool use_implicit_batch) {
if (use_implicit_batch) return perm;
for (int i = n; i > 0; i--) {
perm.order[i] = perm.order[i - 1] + 1;
}
perm.order[0] = 0;
return perm;
};
transpose_perm = adjust_perm(5, transpose_perm, params->use_implicit_batch);
if (data_format == "NHWC") {
nvinfer1::Permutation layout_transpose =
adjust_perm(3, {2, 0, 1}, params->use_implicit_batch);
first_shuffle->setFirstTranspose(layout_transpose);
}
first_shuffle->setSecondTranspose(transpose_perm);
nvinfer1::IShuffleLayer* second_shuffle =
params->converter->network()->addShuffle(*first_shuffle->getOutput(0));
TFTRT_RETURN_ERROR_IF_NULLPTR(second_shuffle, node_def.name());
params->converter->SetLayerName(second_shuffle, node_def, "shuffle",
1);
if (HasStaticShape(inputs.at(0).GetTrtDims())) {
second_shuffle->setReshapeDimensions(second_shuffle_shape);
} else {
second_shuffle->setInput(1, *second_shuffle_shape_tensor->trt_tensor());
}
if (data_format == "NHWC") {
nvinfer1::Permutation layout_transpose =
adjust_perm(3, {1, 2, 0}, params->use_implicit_batch);
second_shuffle->setSecondTranspose(layout_transpose);
}
params->outputs->push_back(TRT_TensorOrWeights(second_shuffle->getOutput(0)));
return OkStatus();
}
Status ConvertSquaredDifference(const OpConverterParams* params) {
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"x", false}, {"y", false}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
nvinfer1::Dims broadcasted_dims_l, broadcasted_dims_r;
TF_RETURN_IF_ERROR(GetTrtBroadcastShape(
inputs.at(0), inputs.at(1), true,
params->use_implicit_batch, &broadcasted_dims_l, &broadcasted_dims_r));
ITensorProxyPtr tensor_l = nullptr;
ITensorProxyPtr tensor_r = nullptr;
TF_RETURN_IF_ERROR(
PrepareTensorForShape(params->converter, inputs.at(0), broadcasted_dims_l,
params->validation_only, &tensor_l, node_def));
TF_RETURN_IF_ERROR(
PrepareTensorForShape(params->converter, inputs.at(1), broadcasted_dims_r,
params->validation_only, &tensor_r, node_def));
if (params->validation_only) return OkStatus();
nvinfer1::IElementWiseLayer* sub =
params->converter->network()->addElementWise(
*tensor_l->trt_tensor(), *tensor_r->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUB);
TFTRT_RETURN_ERROR_IF_NULLPTR(sub, node_def.name());
params->converter->SetLayerName(sub, node_def, "sub");
nvinfer1::IElementWiseLayer* mul =
params->converter->network()->addElementWise(
*sub->getOutput(0), *sub->getOutput(0),
nvinfer1::ElementWiseOperation::kPROD);
TFTRT_RETURN_ERROR_IF_NULLPTR(mul, node_def.name());
params->converter->SetLayerName(mul, node_def, "mul");
params->outputs->push_back(TRT_TensorOrWeights(mul->getOutput(0)));
return OkStatus();
}
#if IS_TRT_VERSION_GE(7, 1, 3, 0) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
Status ConvertCombinedNMS(const OpConverterParams* params) {
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params, {{"boxes", TrtInputArg::kTensor},
{"scores", TrtInputArg::kTensor},
{"max_output_size_per_class", TrtInputArg::kWeight},
{"max_total_size", TrtInputArg::kWeight},
{"iou_threshold", TrtInputArg::kWeight},
{"score_threshold", TrtInputArg::kWeight}}));
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
const auto& node_name = node_def.name();
const ITensorProxyPtr boxes_tensor = inputs.at(0).tensor();
const ITensorProxyPtr scores_tensor = inputs.at(1).tensor();
const auto boxes_dims = boxes_tensor->getDimensions();
const auto scores_dims = scores_tensor->getDimensions();
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
const auto flag = true;
const auto* plugin_name = "NMS TRT Plugin ";
const auto* pluginName = "EfficientNMS_TFTRT_TRT";
#else
const auto flag = false;
const auto* plugin_name = "TensorRT BatchedNMS Plugin ";
const auto* pluginName = "BatchedNMS_TRT";
auto AllowNmsTopkOverride = []() {
static bool result = [] {
bool value;
const Status status = ReadBoolFromEnvVar("TF_TRT_ALLOW_NMS_TOPK_OVERRIDE",
false, &value);
if (!status.ok()) {
LOG(ERROR) << status;
}
return value;
}();
return result;
};
#endif
if (params->use_implicit_batch == flag) {
if (flag) {
return errors::Unimplemented(
convert_not_supported_implicit(node_def.op(), node_name));
} else {
if (!HasStaticShape(boxes_dims) || !HasStaticShape(scores_dims)) {
return errors::Unimplemented(plugin_name,
"requires input with static shape");
}
}
}
const auto& output_size_per_class = inputs.at(2).weights();
const auto& total_size = inputs.at(3).weights();
const auto& iou_threshold = inputs.at(4).weights();
const auto& score_threshold = inputs.at(5).weights();
const int offset = params->use_implicit_batch ? 0 : 1;
if (boxes_dims.nbDims != 3 + offset) {
return errors::InvalidArgument(
plugin_name, "input boxes must be 4-D including batch, at ", node_name);
}
AttrSlice attrs(node_def);
bool clip_boxes = false, pad_per_class = false;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "clip_boxes", &clip_boxes));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "pad_per_class", &pad_per_class));
const int class_idx = 1 + offset;
const int num_classes = scores_dims.d[class_idx];
const bool box_check =
boxes_dims.d[class_idx] == 1 || boxes_dims.d[class_idx] == num_classes;
if (!box_check) {
return errors::InvalidArgument(
plugin_name,
"third dimension of boxes must be either 1"
"or match the num_classes dimension of scores, at ",
node_name);
}
if (output_size_per_class.count() != 1) {
return errors::InvalidArgument(
plugin_name, "max_output_size_per_class must be scalar, at ",
node_name);
}
const int max_size_per_class = *(output_size_per_class.GetPointer<int>());
if (max_size_per_class <= 0) {
return errors::InvalidArgument(
plugin_name, "max_output_size_per_class should be > 0, at ", node_name);
}
if (total_size.count() != 1) {
return errors::InvalidArgument(
plugin_name, "max_total_size must be scalar, at ", node_name);
}
int max_total_size = *(total_size.GetPointer<int>());
if (max_total_size <= 0) {
return errors::InvalidArgument(
plugin_name, "max_total_size should be > 0, at ", node_name);
}
if (iou_threshold.count() != 1) {
return errors::InvalidArgument(
plugin_name, "iou_threshold must be scalar, at ", node_name);
}
const auto iou_thresh = *(iou_threshold.GetPointer<float>());
if (iou_thresh < 0.0 || iou_thresh > 1.0) {
return errors::InvalidArgument(
plugin_name, "iou_threshold must be in [0, 1], at", node_name);
}
if (score_threshold.count() != 1) {
return errors::InvalidArgument(
plugin_name, "score_threshold must be scalar, at ", node_name);
}
#if !IS_TRT_VERSION_GE(8, 2, 1, 6) && !defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
const bool is_normalized = true;
const int backgrnd_id = -1;
const bool share_location = (boxes_dims.d[class_idx] == 1);
int keep_top_k =
pad_per_class ? std::min(max_size_per_class * num_classes, max_total_size)
: max_total_size;
const int num_boxes = boxes_dims.d[offset];
int top_k = std::max(num_boxes, keep_top_k);
if (top_k > 4096) {
if (AllowNmsTopkOverride()) {
top_k = 4096;
keep_top_k = std::min(top_k, keep_top_k);
} else {
return errors::InvalidArgument(
"TRT NMS plugin allow top_k<=4096, where top_k = max(num_boxes, "
"max_total_size). You can override this by setting "
"TF_TRT_ALLOW_NMS_TOPK_OVERRIDE=1 environment variable, but this can "
"result in a loss of accuracy.");
}
}
#endif
if (params->validation_only) return OkStatus();
float score_thresh = *(score_threshold.GetPointer<float>());
nvinfer1::PluginField fields[] = {
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
{"max_output_size_per_class", &max_size_per_class,
nvinfer1::PluginFieldType::kINT32, 1},
{"max_total_size", &max_total_size, nvinfer1::PluginFieldType::kINT32, 1},
{"iou_threshold", &iou_thresh, nvinfer1::PluginFieldType::kFLOAT32, 1},
{"score_threshold", &score_thresh, nvinfer1::PluginFieldType::kFLOAT32,
1},
{"pad_per_class", &pad_per_class, nvinfer1::PluginFieldType::kINT32, 1},
{"clip_boxes", &clip_boxes, nvinfer1::PluginFieldType::kINT32, 1},
#else
{"shareLocation", &share_location, nvinfer1::PluginFieldType::kINT32, 1},
{"backgroundLabelId", &backgrnd_id, nvinfer1::PluginFieldType::kINT32, 1},
{"numClasses", &num_classes, nvinfer1::PluginFieldType::kINT32, 1},
{"topK", &top_k, nvinfer1::PluginFieldType::kINT32, 1},
{"keepTopK", &keep_top_k, nvinfer1::PluginFieldType::kINT32, 1},
{"scoreThreshold", &score_thresh, nvinfer1::PluginFieldType::kFLOAT32, 1},
{"iouThreshold", &iou_thresh, nvinfer1::PluginFieldType::kFLOAT32, 1},
{"isNormalized", &is_normalized, nvinfer1::PluginFieldType::kINT32, 1},
{"clipBoxes", &clip_boxes, nvinfer1::PluginFieldType::kINT32, 1},
#endif
};
nvinfer1::PluginFieldCollection fc{sizeof(fields) / sizeof(fields[0]),
fields};
auto creator = getPluginRegistry()->getPluginCreator(pluginName, "1", "");
TFTRT_RETURN_ERROR_IF_NULLPTR(creator, node_name);
TrtUniquePtrType<nvinfer1::IPluginV2> plugin(
creator->createPlugin(node_name.c_str(), &fc));
TFTRT_RETURN_ERROR_IF_NULLPTR(plugin, node_name);
std::vector<nvinfer1::ITensor*> trt_plugin_inputs;
trt_plugin_inputs.push_back(boxes_tensor->trt_tensor());
trt_plugin_inputs.push_back(scores_tensor->trt_tensor());
nvinfer1::IPluginV2Layer* layer = params->converter->network()->addPluginV2(
&trt_plugin_inputs[0], static_cast<int>(trt_plugin_inputs.size()),
*plugin);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_name);
params->converter->SetLayerName(layer, node_def, "plugin");
const ITensorProxyPtr output_detection_boxes = layer->getOutput(1);
const ITensorProxyPtr output_detection_scores = layer->getOutput(2);
ITensorProxyPtr output_num_detections = layer->getOutput(0);
ITensorProxyPtr output_detection_classes = layer->getOutput(3);
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
nvinfer1::IIdentityLayer* layer_detection_classes =
params->converter->network()->addIdentity(
*output_detection_classes->trt_tensor());
layer_detection_classes->setOutputType(0, nvinfer1::DataType::kFLOAT);
output_detection_classes = layer_detection_classes->getOutput(0);
std::vector<int> input_dims{output_num_detections->getDimensions().d[0], 0};
TF_RETURN_IF_ERROR(params->converter->SqueezeTensor(
output_num_detections,
&input_dims,
params,
&output_num_detections));
#endif
params->outputs->push_back(TRT_TensorOrWeights(output_detection_boxes));
params->outputs->push_back(TRT_TensorOrWeights(output_detection_scores));
params->outputs->push_back(TRT_TensorOrWeights(output_detection_classes));
params->outputs->push_back(TRT_TensorOrWeights(output_num_detections));
return OkStatus();
}
#endif
Status ConvertResize(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input", TrtInputArg::kTensor}, {"size", TrtInputArg::kBoth}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
ITensorProxyPtr inputs_tensor = inputs.at(0).tensor();
TFTRT_RETURN_ERROR_IF_NULLPTR(inputs_tensor, params->node_def.name());
const bool const_output_size = inputs.at(1).is_weights();
if (const_output_size) {
if (inputs.at(1).weights().count() != 2) {
return errors::Unimplemented("Resize requires 2D values for the size");
}
} else {
if (params->use_implicit_batch) {
return errors::Unimplemented(
"Resize requires constant size in implicit batch mode");
}
TF_RETURN_IF_ERROR(ExpectShapeTensor(inputs.at(1)));
if (inputs.at(1).tensor()->getDimensions().d[0] != 2) {
return errors::Unimplemented("Resize requires 2D values for the size");
}
}
bool align_corners;
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "align_corners", &align_corners));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
nvinfer1::ResizeMode resize_mode;
if (node_def.op() == "ResizeBilinear") {
#if IS_TRT_VERSION_GE(7, 1, 0, 0)
if (!align_corners) {
return errors::InvalidArgument(
"Cannot Convert Bilinear Resize when align_corners=False");
}
#endif
resize_mode = nvinfer1::ResizeMode::kLINEAR;
} else if (node_def.op() == "ResizeNearestNeighbor") {
resize_mode = nvinfer1::ResizeMode::kNEAREST;
} else {
return errors::Unimplemented(node_def.op(), " is not yet implemented");
}
if (params->validation_only) return OkStatus();
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
inputs_tensor, {0, 3, 1, 2}, &inputs_tensor, node_def, "to_NCHW"));
nvinfer1::Dims output_shape_dims;
ITensorProxyPtr output_shape_tensor;
const bool static_output_shape =
HasStaticShape(inputs_tensor->getDimensions()) && const_output_size;
if (static_output_shape) {
output_shape_dims.nbDims = inputs_tensor->getDimensions().nbDims;
for (int i = 0; i < output_shape_dims.nbDims; ++i) {
output_shape_dims.d[i] = inputs_tensor->getDimensions().d[i];
}
const int* weights_ptr = inputs.at(1).weights().GetPointer<int>();
output_shape_dims.d[output_shape_dims.nbDims - 2] = weights_ptr[0];
output_shape_dims.d[output_shape_dims.nbDims - 1] = weights_ptr[1];
} else {
ITensorProxyPtr shape = params->converter->network()
->addShape(*inputs_tensor->trt_tensor())
->getOutput(0);
ITensorProxyPtr batch_size =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {0}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr num_channels =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {1}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr height, width;
if (const_output_size) {
const int* weights_ptr = inputs.at(1).weights().GetPointer<int>();
TF_RETURN_IF_ERROR(CreateScalarConstant(params, weights_ptr[0], &height));
TF_RETURN_IF_ERROR(CreateScalarConstant(params, weights_ptr[1], &width));
} else {
ITensorProxyPtr size = inputs.at(1).tensor();
height = params->converter->network()
->addSlice(*size->trt_tensor(), {1, {0}}, {1, {1}}, {1, {1}})
->getOutput(0);
width = params->converter->network()
->addSlice(*size->trt_tensor(), {1, {1}}, {1, {1}}, {1, {1}})
->getOutput(0);
}
StatusOr<ITensorProxyPtr> result = ConcatenateTensors(
params, {batch_size, num_channels, height, width}, 0);
TF_RETURN_IF_ERROR(result.status());
output_shape_tensor = result.value();
}
nvinfer1::IResizeLayer* layer =
params->converter->network()->addResize(*inputs_tensor->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
layer->setResizeMode(resize_mode);
layer->setAlignCorners(align_corners);
if (static_output_shape) {
layer->setOutputDimensions(output_shape_dims);
} else {
layer->setInput(1, *output_shape_tensor->trt_tensor());
}
ITensorProxyPtr output = layer->getOutput(0);
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output, {0, 2, 3, 1}, &output, node_def, "to_NHWC"));
params->outputs->push_back(TRT_TensorOrWeights(output));
return OkStatus();
}
Status ConvertAddN(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
int num_inputs;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "N", &num_inputs));
if (num_inputs < 2) {
return errors::InvalidArgument("AddN requires at least two inputs");
}
TFTRT_CHECK_INPUT_SIZE(inputs.size(), num_inputs, node_def);
for (const auto& input : inputs) {
if (!input.is_tensor() && input.weights().Shape().dim(0) != 1) {
return errors::InvalidArgument(
"Weights input to AddN is required to have batch dimension 1.");
}
}
if (params->validation_only) return OkStatus();
std::vector<ITensorProxyPtr> tensor_inputs;
tensor_inputs.reserve(inputs.size());
for (const auto& input : inputs) {
if (input.is_tensor()) {
tensor_inputs.push_back(input.tensor());
} else {
auto dims = input.weights().Shape();
if (params->use_implicit_batch) {
TF_RETURN_IF_ERROR(dims.RemoveBatchDimension());
}
tensor_inputs.push_back(params->converter->CreateConstantLayer(
input.weights(), dims.AsTrtDims()));
}
}
ITensorProxyPtr lhs = tensor_inputs[0];
for (int i = 1; i < num_inputs; ++i) {
ITensorProxyPtr rhs = tensor_inputs[i];
nvinfer1::ILayer* layer = params->converter->network()->addElementWise(
*lhs->trt_tensor(), *rhs->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUM);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, std::to_string(i));
lhs = layer->getOutput(0);
}
params->outputs->push_back(TRT_TensorOrWeights(lhs));
return OkStatus();
}
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertBiasAdd, "BiasAdd");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertClipByValue, "ClipByValue");
#if IS_TRT_VERSION_GE(7, 1, 3, 0) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertCombinedNMS,
"CombinedNonMaxSuppression");
#endif
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertAddN, "AddN");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertCast, "Cast");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConcat, "ConcatV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConst, "Const");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv2D, "Conv2D");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv2DBackpropInput,
"Conv2DBackpropInput");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertDepthSpaceShuffle, "DepthToSpace");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv2DDepthwise,
"DepthwiseConv2dNative");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertExpandDims, "ExpandDims");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertFusedConv2DBiasActivation,
"FusedConv2DBiasActivation");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertGather, "GatherV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertMatMul, "MatMul");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPack, "Pack");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPad, "Pad");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertReshape, "Reshape");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv3D, "Conv3D");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv3DBackpropInputV2,
"Conv3DBackpropInputV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertResize, "ResizeBilinear");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertResize, "ResizeNearestNeighbor");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPool3D, "AvgPool3D");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPool3D, "MaxPool3D");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertShape, "Shape");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSlice, "Slice");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertDepthSpaceShuffle, "SpaceToDepth");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSplit, "Split");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSquare, "Square");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSquaredDifference,
"SquaredDifference");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSqueeze, "Squeeze");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertStridedSlice, "StridedSlice");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertTopK, "TopKV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertTranspose, "Transpose");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertUnpack, "Unpack");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPool, {"MaxPool", "AvgPool"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertFusedBatchNorm,
{"FusedBatchNorm", "FusedBatchNormV2",
"FusedBatchNormV3"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertReduce,
{"Sum", "Prod", "Max", "Min", "Mean"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertArgMinMax, {"ArgMin", "ArgMax"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertIdentity,
{"Identity", "IdentityN", "Snapshot",
"StopGradient", "_CopyFromHostToGpu"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertBatchMatMul,
{"BatchMatMul", "BatchMatMulV2"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertFake, "FakeOp");
static Status SetDeviceInfoInNodes(GraphDef* graph_def, const string& device) {
for (auto& node : *(graph_def->mutable_node())) {
*node.mutable_device() = device;
}
return OkStatus();
}
Status ConvertGraphDefToEngine(
const GraphDef& gdef, OpKernelContext* ctx, TrtPrecisionMode precision_mode,
int max_batch_size, size_t max_workspace_size_bytes,
const std::vector<PartialTensorShape>& input_shapes,
nvinfer1::ILogger* trt_logger, nvinfer1::IGpuAllocator* allocator,
TRTInt8Calibrator* calibrator,
TrtUniquePtrType<nvinfer1::ICudaEngine>* engine, bool use_calibration,
const bool use_implicit_batch, bool* convert_successfully,
TrtShapeOptimizationProfile* profiles, absl::string_view engine_name,
bool use_explicit_precision, tensorflow::grappler::Cluster* cluster,
const string& device) {
engine->reset();
if (convert_successfully) *convert_successfully = false;
auto statusor = Converter::Create(precision_mode, use_calibration, trt_logger,
use_implicit_batch, engine_name,
use_explicit_precision, ctx);
TF_RETURN_IF_ERROR(statusor.status());
std::unique_ptr<Converter> converter = std::move(statusor.value());
GraphDef graph = gdef;
if (cluster != nullptr) {
bool apply_layout_optim;
Status status =
ReadBoolFromEnvVar("TF_TRT_ENABLE_LAYOUT_OPTIMIZER",
true, &apply_layout_optim);
if (!status.ok()) {
LOG(ERROR) << status;
}
if (apply_layout_optim) {
tensorflow::grappler::GrapplerItem grappler_item;
grappler_item.graph = gdef;
TF_RETURN_IF_ERROR(SetDeviceInfoInNodes(&grappler_item.graph, device));
tensorflow::grappler::GenericLayoutOptimizer layout_optimizer("NCHW");
TF_RETURN_IF_ERROR(
layout_optimizer.Optimize(cluster, grappler_item, &graph));
grappler_item.graph = graph;
tensorflow::grappler::ConstantFolding const_optimizer(
nullptr,
false,
false);
TF_RETURN_IF_ERROR(
const_optimizer.Optimize(cluster, grappler_item, &graph));
Graph g(OpRegistry::Global());
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph(GraphConstructorOptions(), graph, &g));
g.ToGraphDef(&graph);
}
}
VLOG(1) << "Starting to convert TensorFlow ops to TensorRT layers";
std::vector<Converter::EngineOutputInfo> output_tensors;
int num_layers = converter->network()->getNbLayers();
absl::flat_hash_set<const char*> layer_names;
for (const auto& node_def : graph.node()) {
const string& node_name = node_def.name();
VLOG(2) << "Converting node " << node_name << ", op=" << node_def.op();
if (IsEngineInput(node_name)) {
int32 slot_number = -1;
string type_key;
if (node_def.op() == "Placeholder") {
if (!strings::safe_strto32(
node_name.c_str() + strlen(IONamePrefixes::kInputPHName),
&slot_number)) {
return errors::InvalidArgument("Failed to parse slot number from ",
node_name);
}
type_key = "dtype";
} else if (tensorflow::grappler::IsArg(node_def)) {
slot_number = node_def.attr().at("index").i();
type_key = "T";
} else {
return errors::InvalidArgument(
"Node ", node_name,
" with is neither Placeholder nor Arg, instead ", node_def.op());
}
DataType tf_dtype = node_def.attr().at(type_key).type();
if (tf_dtype == DT_RESOURCE) {
VLOG(2) << "Adding engine input resource " << node_name;
if (ctx == nullptr) {
return errors::InvalidArgument(
"Variable resource type conversion requires a valid ctx");
}
if (ctx->input(slot_number).NumElements() == 0) {
return errors::InvalidArgument("Resource input ", node_name,
" is empty.");
}
TF_RETURN_IF_ERROR(converter->AddInputResource(
node_name, ctx->input(slot_number).flat<ResourceHandle>()(0)));
} else {
nvinfer1::DataType trt_dtype;
nvinfer1::Dims trt_dims;
int batch_size = -1;
const auto shape = input_shapes.at(slot_number);
const auto status = ValidateTensorProperties(
node_def.op(), node_def.attr().at(type_key).type(), shape,
use_implicit_batch, false, &trt_dtype,
&trt_dims, &batch_size);
if (!status.ok()) {
const string error_message =
StrCat("Validation failed for ", node_name, " and input slot ",
slot_number, ": ", status.message());
LOG_WARNING_WITH_PREFIX << error_message;
return errors::CreateWithUpdatedMessage(status, error_message);
}
VLOG(2) << "Adding engine input tensor " << node_name << " with shape "
<< DebugString(trt_dims);
TF_RETURN_IF_ERROR(converter->AddInputTensor(node_name, trt_dtype,
trt_dims, batch_size));
}
} else if (IsEngineOutput(node_name)) {
int32 slot_number = -1;
if (node_def.op() == "Identity") {
if (!strings::safe_strto32(
node_name.c_str() + strlen(IONamePrefixes::kOutputPHName),
&slot_number)) {
return errors::InvalidArgument("Failed to parse slot number from ",
node_name);
}
} else if (tensorflow::grappler::IsRetval(node_def)) {
slot_number = node_def.attr().at("index").i();
} else {
return errors::InvalidArgument(
"Node with name ", node_name,
" starting with IONamePrefixes::kOutputPHName is "
"neither Identity nor Retval, instead ",
node_def.op());
}
string out_type_key;
if (node_def.op() == "ReadVariableOp" ||
node_def.op() == "ResourceGather") {
out_type_key = "dtype";
} else {
out_type_key = "T";
}
DataType tf_dtype;
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), out_type_key, &tf_dtype));
nvinfer1::DataType trt_dtype;
TF_RETURN_IF_ERROR(TfTypeToTrtType(tf_dtype, &trt_dtype));
if (output_tensors.size() <= slot_number) {
output_tensors.resize(slot_number + 1);
}
output_tensors.at(slot_number) = {node_def.input(0), node_name,
trt_dtype};
} else {
TF_RETURN_IF_ERROR(converter->ConvertNode(node_def));
}
int new_num_layers = converter->network()->getNbLayers();
for (int i = num_layers; i < new_num_layers; i++) {
auto layer = converter->network()->getLayer(i);
if (layer->getName() == nullptr ||
!layer_names.insert(layer->getName()).second) {
std::string error_message = absl::StrCat(
"Converting node ", node_name, ", op=", node_def.op(),
layer->getName() ? " creates a layer with name collision"
: " creates a layer without a name");
LOG_WARNING_WITH_PREFIX << error_message;
return errors::Internal(error_message);
}
}
num_layers = new_num_layers;
}
TF_RETURN_IF_ERROR(converter->RenameAndMarkOutputTensors(output_tensors));
if (convert_successfully) *convert_successfully = true;
if (!use_explicit_precision) {
converter->MaybeApplyQuantizationRanges();
}
TF_RETURN_IF_ERROR(converter->BuildCudaEngine(
engine, max_batch_size, max_workspace_size_bytes, allocator, calibrator,
profiles));
VLOG(1) << "Finished conversion";
return OkStatus();
}
Status ConvertSegmentToGraphDef(
const Graph* graph, const grappler::GraphProperties& graph_properties,
const std::vector<const Node*>& subgraph_nodes,
EngineInfo* engine_info) {
tensorflow::profiler::TraceMe activity(
"ConvertSegmentToGraphDef", tensorflow::profiler::TraceMeLevel::kInfo);
std::vector<EngineConnection>* connections = &engine_info->connections;
GraphDef* segment_def = &engine_info->segment_graph_def;
std::set<string> marker_nodes;
for (size_t i = 0; i < connections->size(); ++i) {
tensorflow::profiler::TraceMe activity(
[&] {
return StrCat("Constructing TRTEngine IO: ", i + 1, "/",
connections->size());
},
tensorflow::profiler::TraceMeLevel::kInfo);
auto& connection = connections->at(i);
if (connection.is_control_edge()) continue;
auto outside_node = graph->FindNodeId(connection.outside_id);
if (!outside_node) {
return errors::NotFound("Cannot find node with id ",
connection.outside_id, " in the graph.");
}
DataType dtype;
PartialTensorShape partial_shape;
if (connection.is_input_edge) {
GetOutputProperties(graph_properties,
graph->FindNodeId(connection.outside_id),
connection.outside_port, &partial_shape, &dtype);
connection.outside_shape = partial_shape;
} else {
GetInputProperties(graph_properties,
graph->FindNodeId(connection.outside_id),
connection.outside_port, &partial_shape, &dtype);
connection.inside_shape = partial_shape;
}
connection.connection_type = dtype;
if (connection.is_input_edge) {
const string node_name =
StrCat(IONamePrefixes::kInputPHName, connection.port_number);
if (marker_nodes.count(node_name)) {
VLOG(1) << "Reusing input " << node_name << " for the edge "
<< connection.outside_node_name << ":"
<< connection.outside_port << " -> "
<< connection.inside_node_name << ":" << connection.inside_port;
continue;
}
marker_nodes.insert(node_name);
auto seg_node = segment_def->add_node();
NodeDefBuilder builder(node_name, "_Arg");
auto status = builder.Attr("shape", partial_shape)
.Attr("T", dtype)
.Attr("index", connection.port_number)
.Finalize(seg_node);
VLOG(1) << "Constructing input " << node_name << " for the edge "
<< connection.outside_node_name << ":" << connection.outside_port
<< " -> " << connection.inside_node_name << ":"
<< connection.inside_port;
} else {
const string node_name =
StrCat(IONamePrefixes::kOutputPHName, connection.port_number);
if (marker_nodes.count(node_name)) {
VLOG(1) << "Reusing output " << node_name << " for the edge "
<< connection.inside_node_name << ":" << connection.inside_port
<< " -> " << connection.outside_node_name << ":"
<< connection.outside_port;
continue;
}
marker_nodes.insert(node_name);
auto seg_node = segment_def->add_node();
NodeDefBuilder builder(node_name, "_Retval");
auto status =
builder.Attr("T", dtype)
.Attr("index", connection.port_number)
.Input(connection.inside_node_name, connection.inside_port, dtype)
.Finalize(seg_node);
VLOG(1) << "Constructing output " << node_name << " for the edge "
<< connection.inside_node_name << ":" << connection.inside_port
<< " -> " << connection.outside_node_name << ":"
<< connection.outside_port;
}
}
std::unordered_map<int, int> old_to_new_id_map;
string local_scope = subgraph_nodes.front()->name();
int i = 0;
for (const Node* node : subgraph_nodes) {
tensorflow::profiler::TraceMe activity(
[&] {
return StrCat("Copy Node to Subgraph: ", ++i, "/",
subgraph_nodes.size());
},
tensorflow::profiler::TraceMeLevel::kInfo);
local_scope = GetCommonNameScope(local_scope, node->name());
old_to_new_id_map[node->id()] = segment_def->node_size();
auto snode = segment_def->add_node();
*snode = node->def();
VLOG(2) << "Copying " << snode->name() << " to subgraph";
}
for (int i = 0; i < connections->size(); ++i) {
tensorflow::profiler::TraceMe activity(
[&] {
return StrCat("Updating Subgraph Input: ", i + 1, "/",
connections->size());
},
tensorflow::profiler::TraceMeLevel::kInfo);
auto& connection = connections->at(i);
if (connection.is_control_edge() || !connection.is_input_edge) continue;
auto snode =
segment_def->mutable_node(old_to_new_id_map[connection.inside_id]);
const string arg_name =
StrCat(IONamePrefixes::kInputPHName, connection.port_number);
VLOG(1) << "Updating " << snode->name() << ":" << connection.inside_port
<< " from " << snode->input(connection.inside_port) << " to "
<< arg_name;
snode->set_input(connection.inside_port, arg_name);
}
std::set<string> subgraph_node_names;
{
tensorflow::profiler::TraceMe activity(
"Constructing subgraph_node_names set: ",
tensorflow::profiler::TraceMeLevel::kInfo);
for (const Node* node : subgraph_nodes) {
subgraph_node_names.insert(node->name());
}
}
for (int i = 0; i < segment_def->node_size(); ++i) {
tensorflow::profiler::TraceMe activity(
[&] {
return StrCat("Removing outside to subgraph control inputs: ", i + 1,
"/", segment_def->node_size());
},
tensorflow::profiler::TraceMeLevel::kInfo);
auto snode = segment_def->mutable_node(i);
const int input_size = snode->input_size();
int input_idx = 0;
int actual_input_idx = 0;
while (input_idx < input_size) {
TensorId input = ParseTensorName(snode->input(input_idx));
if (!subgraph_node_names.count(
string(input.first.data(), input.first.size())) &&
!IsEngineInput(input.first)) {
if (input.second == Graph::kControlSlot) {
VLOG(1) << "... removing control inputs " << input.first
<< " from subgraph.";
++input_idx;
continue;
}
}
if (actual_input_idx != input_idx) {
snode->set_input(actual_input_idx, snode->input(input_idx));
}
++input_idx;
++actual_input_idx;
}
for (int remove = input_size - actual_input_idx; remove > 0; --remove) {
snode->mutable_input()->RemoveLast();
}
}
return OkStatus();
}
bool OutputEdgeValidator::operator()(const Edge* out_edge) const {
if (out_edge->IsControlEdge()) return true;
if (out_edge->src()->type_string() == "Const") {
VLOG(1) << "--> Need to remove output node " << out_edge->src()->name()
<< " which is a Const.";
return false;
}
return true;
}
ITensorProxyPtr TRT_TensorOrWeights::as_tensor(
const OpConverterParams* params) {
if (is_tensor()) {
return tensor();
} else {
return params->converter->CreateConstantLayer(weights(), GetTrtDims());
}
}
std::string unexpected_type_error_msg(nvinfer1::DataType type_being_checked,
nvinfer1::DataType type_expected,
const NodeDef& node_def, int idx) {
return "The '" + node_def.input(idx) + "' parameter of " + node_def.op() +
" operation in " + node_def.name() + " is expected to be of type " +
DebugString(type_expected) + " type, got " +
DebugString(type_being_checked) + ".";
}
string batch_size_error(absl::string_view name, absl::string_view comment) {
return StrCat("Batch size doesn't match for tensor '", name, "' : ", comment);
}
Status check_type(nvinfer1::DataType type_being_checked,
nvinfer1::DataType type_expected, const NodeDef& node_def,
int idx) {
if (type_being_checked == type_expected) return OkStatus();
return errors::InvalidArgument(unexpected_type_error_msg(
type_being_checked, type_expected, node_def, idx));
}
std::string convert_not_supported_implicit(const std::string& pOpName,
const std::string& pNodeName,
const char* pOpType) {
const auto oper = pOpType ? absl::StrCat(pOpType, " ") : string("");
return absl::StrCat("Convertion for ", oper, "op: '", pOpName,
"' is not supported in implicit batch mode, at ",
pNodeName);
}
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include <algorithm>
#include <cmath>
#include <functional>
#include <iterator>
#include <memory>
#include <numeric>
#include <type_traits>
#include <unordered_map>
#include <vector>
#include "absl/time/civil_time.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/base/call_once.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2tensorrt/common/datavec.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_engine_utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/gpu/gpu_managed_allocator.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/kernels/variable_ops.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
enum class TrtTestMode {
kImplicitBatch = 0,
kExplicitBatch = 1,
kDynamicShape = 2
};
string DebugString(const TrtTestMode mode) {
switch (mode) {
case TrtTestMode::kImplicitBatch:
return "kImplicitBatch";
case TrtTestMode::kExplicitBatch:
return "kExplicitBatch";
case TrtTestMode::kDynamicShape:
return "kDynamicShape";
default:
return "Invalid TrtTestMode";
}
}
namespace convert {
using absl::StrCat;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::Matcher;
using ::testing::PrintToString;
using ::tensorflow::testing::IsOk;
using ::tensorflow::testing::StatusIs;
constexpr std::array<TrtTestMode, 3> ValidTrtModes = {
TrtTestMode::kImplicitBatch, TrtTestMode::kExplicitBatch,
TrtTestMode::kDynamicShape};
bool TrtShapedWeightsEquals(const TRT_ShapedWeights& lhs,
const TRT_ShapedWeights& rhs) {
return lhs.Shape() == rhs.Shape() && lhs.TrtDType() == rhs.TrtDType() &&
lhs.GetPointer<int8>() == rhs.GetPointer<int8>();
}
template <typename T>
void ValidateWeights(const TRT_ShapedWeights& weights,
const std::vector<int>& expected_dims,
const std::vector<T>& expected_value) {
EXPECT_EQ(weights.Shape(), DimsAdapter(expected_dims));
ASSERT_EQ(expected_value.size(), weights.count()) << weights.DebugString();
const T* actual_values = weights.GetPointer<T>();
for (int i = 0; i < expected_value.size(); ++i) {
EXPECT_EQ(expected_value[i], actual_values[i]);
}
}
TEST(TRT_ShapedWeights_Test, Basic) {
{
TRT_ShapedWeights weights;
TRT_ShapedWeights copy(weights);
for (auto ptr : {&weights, ©}) {
nvinfer1::Weights trt_weights = ptr->GetTrtWeights();
EXPECT_EQ(nvinfer1::DataType::kFLOAT, trt_weights.type);
EXPECT_EQ(nullptr, trt_weights.values);
EXPECT_EQ(0, trt_weights.count);
EXPECT_EQ(nullptr, ptr->GetPointer<int8>());
EXPECT_EQ(0, ptr->count());
EXPECT_EQ(0, ptr->size_bytes());
}
}
{
TRT_ShapedWeights weights(nvinfer1::DataType::kFLOAT);
TRT_ShapedWeights copy(weights);
for (auto ptr : {&weights, ©}) {
nvinfer1::Weights trt_weights = ptr->GetTrtWeights();
EXPECT_EQ(nvinfer1::DataType::kFLOAT, trt_weights.type);
EXPECT_EQ(nullptr, trt_weights.values);
EXPECT_EQ(0, trt_weights.count);
EXPECT_EQ(nullptr, ptr->GetPointer<int8>());
EXPECT_EQ(0, ptr->count());
EXPECT_EQ(0, ptr->size_bytes());
}
}
{
TrtWeightStore store;
TRT_ShapedWeights weights =
store.GetTempWeights(nvinfer1::DataType::kFLOAT, CreateDims({2, 5}))
.value();
TRT_ShapedWeights copy(weights);
for (auto ptr : {&weights, ©}) {
nvinfer1::Weights trt_weights = ptr->GetTrtWeights();
EXPECT_EQ(nvinfer1::DataType::kFLOAT, trt_weights.type);
EXPECT_NE(nullptr, trt_weights.values);
EXPECT_EQ(10, trt_weights.count);
EXPECT_EQ(trt_weights.values, ptr->GetPointer<int8>());
EXPECT_EQ(10, ptr->count());
EXPECT_EQ(40, ptr->size_bytes());
}
EXPECT_EQ(weights.GetPointer<int8>(), copy.GetPointer<int8>());
}
}
TEST(TRT_TensorOrWeights_Test, Basic) {
{
TRT_TensorOrWeights tw;
TRT_TensorOrWeights copy(tw);
TRT_TensorOrWeights assigned;
assigned = tw;
for (auto ptr : {&tw, ©, &assigned}) {
EXPECT_EQ(false, ptr->is_tensor());
EXPECT_EQ(false, ptr->is_weights());
EXPECT_EQ(-1, ptr->batch_size());
}
}
{
nvinfer1::Dims dims;
dims.nbDims = 1;
dims.d[0] = 1;
ITensorProxyPtr itensor(dims);
TRT_TensorOrWeights tw(itensor);
TRT_TensorOrWeights tw1(itensor, 1);
for (auto original_ptr : {&tw, &tw1}) {
TRT_TensorOrWeights copy(*original_ptr);
TRT_TensorOrWeights assigned;
assigned = *original_ptr;
for (auto ptr : {original_ptr, ©, &assigned}) {
ASSERT_TRUE(ptr->is_tensor());
EXPECT_EQ(false, ptr->is_weights());
if (original_ptr == &tw) {
EXPECT_EQ(-1, ptr->batch_size());
} else {
EXPECT_EQ(1, ptr->batch_size());
}
EXPECT_EQ(itensor->simple_tensor(), ptr->tensor()->simple_tensor());
EXPECT_THAT(ptr->GetTrtDims(), DimsAreArray({1}));
}
}
}
{
nvinfer1::Dims dims;
dims.nbDims = 1;
dims.d[0] = 1;
TRT_TensorOrWeights tw(nvinfer1::DataType::kFLOAT, dims, 1);
TRT_TensorOrWeights copy(tw);
TRT_TensorOrWeights assigned;
assigned = tw;
for (auto ptr : {&tw, ©, &assigned}) {
ASSERT_TRUE(ptr->is_tensor());
EXPECT_EQ(false, ptr->is_weights());
EXPECT_EQ(1, ptr->batch_size());
EXPECT_NE(nullptr, ptr->tensor()->simple_tensor());
EXPECT_THAT(ptr->GetTrtDims(), DimsAreArray({1}));
}
}
{
TRT_ShapedWeights weights;
TRT_TensorOrWeights tw(weights);
TRT_TensorOrWeights copy(tw);
TRT_TensorOrWeights assigned;
assigned = tw;
for (auto ptr : {&tw, ©, &assigned}) {
EXPECT_EQ(false, ptr->is_tensor());
EXPECT_EQ(true, ptr->is_weights());
EXPECT_TRUE(TrtShapedWeightsEquals(weights, ptr->weights()));
std::vector<int> empty_dims;
EXPECT_THAT(ptr->GetTrtDims(), DimsAreArray(empty_dims));
}
}
}
class ValidatorTest : public ::testing::Test {
public:
ValidatorTest() {}
Status ConvertToTensorOrWeights(const Scope& scope, const Node* node,
int output_port,
TRT_TensorOrWeights* tensor_or_weights) {
grappler::GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
TrtNodeValidator validator(graph_properties, TrtPrecisionMode::FP32,
false,
true,
false);
return validator.ConvertToTensorOrWeights(node->def(), output_port,
tensor_or_weights);
}
};
TEST_F(ValidatorTest, ConvertToTensorOrWeights) {
{
Scope s = Scope::NewRootScope();
auto node =
ops::Const(s.WithOpName("my_const"), {1.0f, 2.0f}, TensorShape({2}));
TRT_TensorOrWeights output;
EXPECT_THAT(ConvertToTensorOrWeights(s, node.op().node(),
0, &output),
IsOk());
ValidateWeights<float>(output.weights(), {2}, {1.0, 2.0});
}
auto convert_to_tensor_or_weights = [this](const std::vector<int64_t>& dims,
TRT_TensorOrWeights* output) {
Scope s = Scope::NewRootScope();
const auto attrs = ops::Placeholder::Shape(PartialTensorShape{dims});
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT, attrs);
auto add = ops::Add(s.WithOpName("add"), feed, feed);
return this->ConvertToTensorOrWeights(s, add.operation.node(),
0, output);
};
{
TRT_TensorOrWeights output;
EXPECT_THAT(
convert_to_tensor_or_weights(
std::vector<int64_t>(nvinfer1::Dims::MAX_DIMS + 2, 1), &output),
StatusIs(absl::StatusCode::kOutOfRange,
HasSubstr("Input tensor rank is greater than 9")));
}
{
TRT_TensorOrWeights output;
EXPECT_THAT(convert_to_tensor_or_weights({}, &output),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Scalar input tensor is not supported since "
"the first dimension "
"is treated as batch dimension by TRT")));
}
for (const int32 non_batch_dim : {-1, 2}) {
const int32 batch_size = 12;
TRT_TensorOrWeights output;
EXPECT_THAT(
convert_to_tensor_or_weights({batch_size, non_batch_dim}, &output),
IsOk());
ASSERT_TRUE(output.is_tensor());
EXPECT_EQ(batch_size, output.batch_size());
EXPECT_NE(nullptr, output.tensor()->simple_tensor());
EXPECT_THAT(output.GetTrtDims(), DimsAreArray({non_batch_dim}));
}
}
TEST_F(ValidatorTest, IsTensorRTCandidate_Basics) {
Scope s = Scope::NewRootScope();
auto input =
ops::Const(s.WithOpName("const"), {1.0f, 2.0f}, TensorShape({2}));
auto add = ops::Add(s.WithOpName("add"), input, input);
const Node* add_node = add.operation.node();
grappler::GrapplerItem item;
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
TrtNodeValidator validator(graph_properties, TrtPrecisionMode::FP32,
false,
true,
false);
bool start_conversion = false;
bool should_fail = false;
auto op_converter = [&start_conversion, &should_fail](
const OpConverterParams* params) -> Status {
if (should_fail) return errors::InvalidArgument("");
if (!params->validation_only) start_conversion = true;
return OkStatus();
};
auto original_op_converter = GetOpConverterRegistry()->LookUp("Add");
ASSERT_TRUE(original_op_converter.ok());
GetOpConverterRegistry()->Clear("Add");
EXPECT_THAT(validator.IsTensorRTCandidate(add_node),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("Op type Add is not supported.")));
GetOpConverterRegistry()->Register("Add", kDefaultConverterPriority + 1,
op_converter);
TF_EXPECT_OK(validator.IsTensorRTCandidate(add_node));
EXPECT_EQ(false, start_conversion);
should_fail = true;
EXPECT_THAT(validator.IsTensorRTCandidate(add_node),
StatusIs(absl::StatusCode::kInvalidArgument));
GetOpConverterRegistry()->Clear("Add");
GetOpConverterRegistry()->Register("Add", kDefaultConverterPriority,
*original_op_converter);
}
TEST(TrtNodeValidator, IsTensorRTCandidate) {
const std::vector<int32> input_shape_array{2, 2};
TensorShape input_shape;
TF_EXPECT_OK(TensorShapeUtils::MakeShape(input_shape_array, &input_shape));
Scope s = Scope::NewRootScope();
ops::Placeholder::Attrs feed_attrs;
TF_EXPECT_OK(
TensorShapeUtils::MakeShape(input_shape_array, &feed_attrs.shape_));
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT, feed_attrs);
auto const_1 = ops::Const(s.WithOpName("const_1"), 1.0f, input_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), feed, const_1);
ops::MatMul::Attrs matmul_attrs;
matmul_attrs.transpose_a_ = true;
auto incompatible_matmul = ops::MatMul(s.WithOpName("incompatible_matmul"),
feed, const_1, matmul_attrs);
auto unsupported_op = ops::Erfc(s.WithOpName("sin"), feed);
auto incompatible_feed = ops::Placeholder(s.WithOpName("feed"), DT_DOUBLE);
auto const_2 = ops::Const(s.WithOpName("const_2"), 1.0, input_shape);
auto matmul_with_incompatible_input =
ops::MatMul(s.WithOpName("matmul_with_incompatible_input"),
incompatible_feed, const_2);
auto quantize_attrs = ops::FakeQuantWithMinMaxArgs::Min(-6.0f).Max(6.0f);
auto quantize = ops::FakeQuantWithMinMaxArgs(s.WithOpName("quantize"), feed,
quantize_attrs);
grappler::GrapplerItem item;
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
Tensor feed_tensor(DT_FLOAT, input_shape);
item.feed.push_back(std::make_pair("feed", feed_tensor));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
for (const TrtPrecisionMode precision_mode :
{TrtPrecisionMode::FP32, TrtPrecisionMode::INT8}) {
TrtNodeValidator validator(graph_properties, precision_mode,
false,
true,
false);
TF_EXPECT_OK(validator.IsTensorRTCandidate(matmul.operation.node()));
EXPECT_THAT(
validator.IsTensorRTCandidate(incompatible_matmul.operation.node()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("MatMul with 2D tensors requires explicit batch "
"mode, or that tensor A "
"is not transposed and B is a constant tensor.")));
EXPECT_THAT(validator.IsTensorRTCandidate(unsupported_op.operation.node()),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("Op type Erfc is not supported")));
EXPECT_THAT(validator.IsTensorRTCandidate(
matmul_with_incompatible_input.operation.node()),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to convert at least one input to a "
"TRT_TensorOrWeights:")));
if (precision_mode == TrtPrecisionMode::INT8) {
TF_EXPECT_OK(validator.IsTensorRTCandidate(quantize.operation.node()));
} else {
EXPECT_THAT(
validator.IsTensorRTCandidate(quantize.operation.node()),
StatusIs(
absl::StatusCode::kUnimplemented,
HasSubstr("Op type FakeQuantWithMinMaxArgs is not supported")));
}
}
}
class ConverterTest : public ::testing::Test {
public:
ConverterTest() { Reset(); }
void Reset() {
GetOpConverterRegistry()->Clear("MyOp");
GetOpConverterRegistry()->Clear("DummyOp");
converter_ =
std::move(Converter::Create(TrtPrecisionMode::FP32,
false, &logger_,
true,
"TRTEngineOp_000_000",
false)
.value());
weight_store_ = &converter_->weight_store_;
}
Status MaybeUpdateBatchSize(int batch_size) {
return converter_->MaybeUpdateBatchSize(batch_size);
}
Status AddTensorOrWeights(const string& name, TRT_TensorOrWeights input) {
return converter_->AddTensorOrWeights(name, input);
}
Status GetTensorOrWeights(const string& name, TRT_TensorOrWeights* output) {
return converter_->GetTensorOrWeights(name, output);
}
Status GetInputs(const NodeDef& node_def,
std::vector<TRT_TensorOrWeights>* inputs) const {
return converter_->GetInputs(node_def, inputs);
}
Status GetWeightRange(const TRT_ShapedWeights& weights, float* out_min,
float* out_max) const {
return converter_->GetWeightRange(weights, out_min, out_max);
}
int batch_size() const { return converter_->batch_size_; }
std::unordered_map<ITensorProxyPtr*, float>& quantization_ranges_proxy() {
return converter_->quantization_ranges_proxy_;
}
std::unordered_map<nvinfer1::ITensor*, float>& quantization_ranges() {
return converter_->quantization_ranges_;
}
private:
Logger& logger_ = *Logger::GetLogger();
protected:
std::unique_ptr<Converter> converter_;
TrtWeightStore* weight_store_;
};
TEST_F(ConverterTest, ConvertNode) {
ITensorProxyPtr output_tensors[2];
auto op_converter =
[&output_tensors](const OpConverterParams* params) -> Status {
nvinfer1::Dims dims = params->inputs[0].tensor()->getDimensions();
for (int i = 0; i < 2; ++i) {
dims.d[0] += 1;
output_tensors[i]->setDimensions(dims);
params->outputs->push_back(TRT_TensorOrWeights(output_tensors[i]));
}
return OkStatus();
};
NodeDef node_def = MakeNodeDef("my_op", "MyOp", {"my_input"});
TF_ASSERT_OK(converter_->AddInputTensor(
"my_input", nvinfer1::DataType::kFLOAT, CreateDims({123}), 1));
EXPECT_THAT(converter_->ConvertNode(node_def),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("No converter for op MyOp")));
GetOpConverterRegistry()->Register("MyOp", kDefaultConverterPriority,
op_converter);
TF_ASSERT_OK(converter_->ConvertNode(node_def));
TRT_TensorOrWeights actual_output_1;
TF_EXPECT_OK(GetTensorOrWeights("my_op", &actual_output_1));
EXPECT_EQ(output_tensors[0]->simple_tensor(),
actual_output_1.tensor()->simple_tensor());
EXPECT_EQ(124, actual_output_1.tensor()->getDimensions().d[0]);
TRT_TensorOrWeights actual_output_2;
TF_EXPECT_OK(GetTensorOrWeights("my_op:1", &actual_output_2));
EXPECT_EQ(output_tensors[1]->simple_tensor(),
actual_output_2.tensor()->simple_tensor());
EXPECT_EQ(125, actual_output_2.tensor()->getDimensions().d[0]);
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, AddAndGetInputs) {
NodeDef node_def;
node_def.add_input("^control_input");
node_def.add_input("input");
node_def.add_input("input:0");
node_def.add_input("input:1");
node_def.add_input("weird_input:2:3:4:0");
TF_EXPECT_OK(converter_->AddInputTensor("input", nvinfer1::DataType::kFLOAT,
CreateDims({1}), 1));
TF_EXPECT_OK(converter_->AddInputTensor("input:1", nvinfer1::DataType::kINT32,
CreateDims({2, 3}), 1));
TF_EXPECT_OK(converter_->AddInputTensor(
"weird_input:2:3:4", nvinfer1::DataType::kHALF, CreateDims({5, 3}), 1));
std::vector<TRT_TensorOrWeights> inputs;
TF_EXPECT_OK(GetInputs(node_def, &inputs));
EXPECT_EQ(4, inputs.size());
EXPECT_EQ(inputs[0].tensor()->trt_tensor(), inputs[1].tensor()->trt_tensor());
EXPECT_EQ(nvinfer1::DataType::kFLOAT, inputs[0].tensor()->getType());
EXPECT_EQ(nvinfer1::DataType::kINT32, inputs[2].tensor()->getType());
EXPECT_EQ(nvinfer1::DataType::kHALF, inputs[3].tensor()->getType());
EXPECT_THAT(inputs[0].tensor()->getDimensions(), DimsAreArray({1}));
EXPECT_THAT(inputs[2].tensor()->getDimensions(), DimsAreArray({2, 3}));
EXPECT_THAT(inputs[3].tensor()->getDimensions(), DimsAreArray({5, 3}));
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, RenameAndMarkOutputTensors) {
std::vector<ITensorProxyPtr> output_tensors;
auto op_converter =
[&output_tensors](const OpConverterParams* params) -> Status {
nvinfer1::Permutation perm;
perm.order[0] = 1;
perm.order[1] = 0;
for (int i = 0; i < 2; ++i) {
ITensorProxyPtr input_tensor = params->inputs[0].tensor();
nvinfer1::IShuffleLayer* layer =
params->converter->network()->addShuffle(*input_tensor->trt_tensor());
layer->setFirstTranspose(perm);
ITensorProxyPtr output_tensor = layer->getOutput(0);
params->outputs->emplace_back(output_tensor);
output_tensors.push_back(output_tensor);
}
TRT_ShapedWeights output_weights(nvinfer1::DataType::kFLOAT);
params->outputs->emplace_back(output_weights);
return OkStatus();
};
GetOpConverterRegistry()->Register("MyOp", kDefaultConverterPriority,
op_converter);
NodeDef node_def = MakeNodeDef("my_op", "MyOp", {"my_input"});
TF_EXPECT_OK(converter_->AddInputTensor(
"my_input", nvinfer1::DataType::kFLOAT, CreateDims({1, 2}), 1));
TF_EXPECT_OK(converter_->ConvertNode(node_def));
EXPECT_THAT(
converter_->RenameAndMarkOutputTensors({{"my_op:2", "my_output"}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Output my_op:2 is weights not tensor")));
TF_EXPECT_OK(converter_->RenameAndMarkOutputTensors(
{{"my_op", "my_output"}, {"my_op:1", "my_output_1"}}));
EXPECT_EQ(2, output_tensors.size());
for (auto output_tensor : output_tensors) {
EXPECT_THAT(output_tensor->getDimensions(), DimsAreArray({2, 1}));
}
EXPECT_EQ("my_output", string(output_tensors[0]->getName()));
EXPECT_EQ("my_output_1", string(output_tensors[1]->getName()));
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, TransposeTensor) {
ITensorProxyPtr input_tensor = converter_->network()->addInput(
"", nvinfer1::DataType::kFLOAT, CreateDims({2, 3, 5}));
ITensorProxyPtr output_tensor = nullptr;
NodeDef dummy_node_def = MakeNodeDef("dummy_op", "DummyOp", {});
EXPECT_THAT(converter_->TransposeTensor(input_tensor, {0, 1}, &output_tensor,
dummy_node_def, "sub1"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Rank of perm for transpose does not match "
"with that of the input")));
EXPECT_THAT(
converter_->TransposeTensor(input_tensor, {1, 0, 2, 3}, &output_tensor,
dummy_node_def, "sub2"),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("Transpose at batch dimension is not supported.")));
TF_EXPECT_OK(converter_->TransposeTensor(
input_tensor, {0, 3, 1, 2}, &output_tensor, dummy_node_def, "sub3"));
EXPECT_THAT(output_tensor->getDimensions(), DimsAreArray({5, 2, 3}));
EXPECT_THAT(
converter_->network(),
LayerNamesAreArray({"TRTEngineOp_000_000/dummy_op-sub3:SHUFFLE"}));
}
void TestPrepareTensorForShape(
const std::vector<int>& input_dims, const std::vector<int>& reshape_dims,
const std::vector<int>& expected_tensor_dims, bool input_is_tensor,
Converter* converter, TrtWeightStore* weight_store,
absl::StatusCode expected_code = absl::StatusCode::kOk,
const char* expected_error_msg_substr = nullptr) {
TRT_TensorOrWeights input;
if (input_is_tensor) {
input = TRT_TensorOrWeights(converter->network()->addInput(
"", nvinfer1::DataType::kFLOAT, CreateDims(input_dims)));
} else {
input = TRT_TensorOrWeights(
weight_store
->GetTempWeights(nvinfer1::DataType::kFLOAT, CreateDims(input_dims))
.value());
}
ITensorProxyPtr output_tensor = nullptr;
NodeDef dummy_node_def = MakeNodeDef("dummy_op", "DummyOp", {});
for (bool validation_only : {false, true}) {
const Status status =
PrepareTensorForShape(converter, input, DimsAdapter(reshape_dims),
validation_only, &output_tensor, dummy_node_def);
if (expected_code == absl::StatusCode::kOk) {
TF_EXPECT_OK(status);
if (validation_only) {
EXPECT_EQ(nullptr, *output_tensor);
} else {
EXPECT_THAT(output_tensor->getDimensions(),
DimsAreArray(expected_tensor_dims));
}
} else {
EXPECT_THAT(status, StatusIs(expected_code,
HasSubstr(expected_error_msg_substr)));
}
}
}
TEST_F(ConverterTest, PrepareTensorForShape) {
for (bool input_is_tensor : {true, false}) {
Reset();
TestPrepareTensorForShape({2, 3, 5}, {2, 3, 6}, {}, input_is_tensor,
converter_.get(), weight_store_,
absl::StatusCode::kInvalidArgument,
"Incompatible shapes");
Reset();
TestPrepareTensorForShape({2, 3, 5}, {10, 3}, {10, 3}, input_is_tensor,
converter_.get(), weight_store_);
Reset();
TestPrepareTensorForShape({1, 1}, {}, {}, input_is_tensor, converter_.get(),
weight_store_);
}
Reset();
TestPrepareTensorForShape({}, {1, 1}, {1, 1}, true,
converter_.get(), weight_store_);
Reset();
TestPrepareTensorForShape({2, 3, 5}, {-1, 2}, {15, 2},
true, converter_.get(),
weight_store_);
Reset();
TestPrepareTensorForShape({2, 3, 5}, {-1, 2}, {15, 2},
false, converter_.get(),
weight_store_, absl::StatusCode::kInvalidArgument,
"Shape is not fully defined");
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, MaybeUpdateBatchSize) {
EXPECT_EQ(-1, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(-1));
EXPECT_EQ(-1, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(123));
EXPECT_EQ(123, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(123));
EXPECT_EQ(123, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(-1));
EXPECT_EQ(123, batch_size());
EXPECT_THAT(
MaybeUpdateBatchSize(124),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(
"Provided batch size does not match converter batch size")));
}
TEST_F(ConverterTest, AddAndGetTensorOrWeights) {
ITensorProxyPtr simple_tensor;
TRT_TensorOrWeights tensor(simple_tensor);
EXPECT_EQ(-1, tensor.batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(123));
TF_EXPECT_OK(AddTensorOrWeights("my_tensor", tensor));
TRT_TensorOrWeights added_tensor;
TF_EXPECT_OK(GetTensorOrWeights("my_tensor", &added_tensor));
EXPECT_EQ(123, added_tensor.batch_size());
EXPECT_THAT(AddTensorOrWeights("my_tensor", tensor),
StatusIs(absl::StatusCode::kAlreadyExists,
HasSubstr("tensor/weights my_tensor already exist")));
}
template <typename T>
void TestGetWeightRange(ConverterTest* test, TrtWeightStore* weight_store) {
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(DataTypeToEnum<T>::v(), &trt_type));
TRT_ShapedWeights weights =
weight_store->GetTempWeights(trt_type, CreateDims({2, 3})).value();
const std::vector<T> values = {T(3), T(1), T(2), T(6), T(5), T(4)};
absl::c_copy(values, weights.GetPointer<T>());
float out_min = 0.0f;
float out_max = 0.0f;
TF_EXPECT_OK(test->GetWeightRange(weights, &out_min, &out_max));
EXPECT_EQ(1.0f, out_min);
EXPECT_EQ(6.0f, out_max);
}
TEST_F(ConverterTest, GetWeightRange) {
TestGetWeightRange<float>(this, weight_store_);
TestGetWeightRange<Eigen::half>(this, weight_store_);
TestGetWeightRange<int32>(this, weight_store_);
}
TEST_F(ConverterTest, ProvideQuantizationRange) {
ITensorProxyPtr simple_tensor;
converter_->ProvideQuantizationRange(&simple_tensor, 0.0f, 6.0f);
EXPECT_EQ(6.0f, quantization_ranges_proxy()[&simple_tensor]);
converter_->ProvideQuantizationRange(&simple_tensor, 1.0f, 6.0f);
EXPECT_EQ(6.0f, quantization_ranges_proxy()[&simple_tensor]);
converter_->ProvideQuantizationRange(&simple_tensor, -8.0f, 6.0f);
EXPECT_EQ(8.0f, quantization_ranges_proxy()[&simple_tensor]);
converter_->ProvideQuantizationRange(&simple_tensor, -8.123f, -6.123f);
EXPECT_EQ(8.123f, quantization_ranges_proxy()[&simple_tensor]);
converter_->ProvideQuantizationRange(&simple_tensor, -6.123f, 6.123f);
EXPECT_EQ(6.123f, quantization_ranges_proxy()[&simple_tensor]);
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, MaybeApplyQuantizationRanges) {
ITensorProxyPtr input;
ITensorProxyPtr not_infer;
Logger& logger = *Logger::GetLogger();
auto int8_converter = Converter::Create(TrtPrecisionMode::INT8,
true, &logger,
true,
"")
.value();
int8_converter->ProvideQuantizationRange(&input, -5.0f, 5.0f);
int8_converter->ProvideQuantizationRange(¬_infer, -100.0f, 100.0f);
int8_converter->MaybeApplyQuantizationRanges();
EXPECT_EQ(input->getDynamicRangeMax(), 5.0f);
EXPECT_EQ(not_infer->getDynamicRangeMax(), 100.0f);
EXPECT_THAT(int8_converter->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, GetTrtBroadcastShape) {
const bool kIsTensor = true;
const bool kIsNotTensor = false;
auto symmetric_test = [this](const std::vector<int>& operand_1_shape,
const std::vector<int>& operand_2_shape,
const bool operand_1_is_tensor,
const bool operand_2_is_tensor,
const std::vector<int>& expected_operand_1_shape,
const std::vector<int>& expected_operand_2_shape,
absl::StatusCode expected_code =
absl::StatusCode::kOk,
const char* expected_error_msg_substr = "",
const int operand_1_batch_size = -1,
const int operand_2_batch_size = -1) {
auto create_tensor_or_weights = [](const std::vector<int>& shape,
bool is_tensor, int batch_size = -1) {
if (is_tensor) {
return TRT_TensorOrWeights(nvinfer1::DataType::kFLOAT,
CreateDims(shape), batch_size);
}
TRT_ShapedWeights weights;
weights.Shape() = CreateDims(shape);
return TRT_TensorOrWeights(weights);
};
nvinfer1::Dims operand_1_new_dims, operand_2_new_dims;
TRT_TensorOrWeights operand_1 = create_tensor_or_weights(
operand_1_shape, operand_1_is_tensor, operand_1_batch_size);
TRT_TensorOrWeights operand_2 = create_tensor_or_weights(
operand_2_shape, operand_2_is_tensor, operand_2_batch_size);
EXPECT_THAT(
GetTrtBroadcastShape(operand_1, operand_2, true,
true, &operand_1_new_dims,
&operand_2_new_dims),
StatusIs(expected_code, HasSubstr(expected_error_msg_substr)));
if (expected_code == absl::StatusCode::kOk) {
EXPECT_THAT(operand_1_new_dims, DimsAreArray(expected_operand_1_shape));
EXPECT_THAT(operand_2_new_dims, DimsAreArray(expected_operand_2_shape));
}
EXPECT_THAT(
GetTrtBroadcastShape(operand_2, operand_1, true,
true, &operand_2_new_dims,
&operand_1_new_dims),
StatusIs(expected_code, HasSubstr(expected_error_msg_substr)));
if (expected_code == absl::StatusCode::kOk) {
EXPECT_THAT(operand_1_new_dims, DimsAreArray(expected_operand_1_shape));
EXPECT_THAT(operand_2_new_dims, DimsAreArray(expected_operand_2_shape));
}
};
symmetric_test(
{1}, {1}, kIsNotTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting requires at least one of the operands be tensors");
symmetric_test({1, 1, 1}, {2}, kIsTensor, kIsNotTensor, {1, 1, 1}, {1, 1, 2});
symmetric_test({1, 1, 2}, {2}, kIsTensor, kIsNotTensor, {1, 1, 2}, {1, 1, 2});
symmetric_test({1, 3, 2}, {1}, kIsTensor, kIsNotTensor, {1, 3, 2}, {1, 1, 1});
symmetric_test({1, 1, 1}, {2, 3}, kIsTensor, kIsNotTensor, {1, 1, 1},
{1, 2, 3});
symmetric_test({1, 1, 1}, {2, 3, 4}, kIsTensor, kIsNotTensor, {1, 1, 1},
{2, 3, 4});
symmetric_test({1, 1, 1}, {1, 2, 3, 4}, kIsTensor, kIsNotTensor, {1, 1, 1},
{2, 3, 4});
symmetric_test({1, 3, 4}, {1, 2, 1, 4}, kIsTensor, kIsNotTensor, {1, 3, 4},
{2, 1, 4});
symmetric_test({1, 1, 1}, {2, 1, 1, 1}, kIsTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Infeasible broadcast scheme");
symmetric_test({1, 1, 1}, {2, 1, 1, 1}, kIsTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Infeasible broadcast scheme",
2);
symmetric_test({1, 1, 1}, {1, 1, 1, 1, 1}, kIsTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 4 vs broadcast #dims 5)");
symmetric_test({3}, {1, 1, 3}, kIsTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 2 vs broadcast #dims 3)",
2);
symmetric_test({1, 1, 1}, {1, 1}, kIsTensor, kIsTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 3 vs broadcast #dims 4)");
symmetric_test({1, 3}, {3}, kIsTensor, kIsTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 2 vs broadcast #dims 3)");
symmetric_test({1, 3, 4}, {2, 1, 4}, kIsTensor, kIsTensor, {1, 3, 4},
{2, 1, 4});
symmetric_test({1, 1, 1}, {1, 1, 1, 1}, kIsTensor, kIsTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 4 vs broadcast #dims 5)");
symmetric_test({2, 3}, {7, 5}, kIsTensor, kIsTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Infeasible broadcast scheme");
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, CreateConstantLayer) {
for (auto dtype : {nvinfer1::DataType::kFLOAT, nvinfer1::DataType::kINT32}) {
TRT_ShapedWeights weights =
weight_store_->GetTempWeights(dtype, CreateDims({2, 3, 5})).value();
ITensorProxyPtr tensor =
converter_->CreateConstantLayer(weights, CreateDims({3, 10}));
ASSERT_NE(nullptr, tensor->trt_tensor());
EXPECT_EQ(dtype, tensor->getType())
<< "Expected " << DebugString(dtype) << " vs. actual "
<< DebugString(tensor->getType());
EXPECT_THAT(tensor->getDimensions(), DimsAreArray({3, 10}));
}
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
class ConvertGraphDefToEngineTest : public ::testing::Test {
public:
Status RunConvertGraphDefToEngine(Scope* s) {
GraphDef gdef;
TF_EXPECT_OK(s->ToGraphDef(&gdef));
std::vector<PartialTensorShape> input_shapes;
int batch_size = -1;
for (const NodeDef& node : gdef.node()) {
absl::string_view node_name(node.name());
if (absl::ConsumePrefix(&node_name, IONamePrefixes::kInputPHName)) {
int port = -1;
EXPECT_TRUE(absl::SimpleAtoi(node_name, &port)) << node.name();
if (input_shapes.size() < port + 1) input_shapes.resize(port + 1);
input_shapes[port] =
PartialTensorShape(node.attr().at("shape").shape());
if (batch_size == -1) {
batch_size = input_shapes[port].dim_size(0);
} else {
EXPECT_EQ(batch_size, input_shapes[port].dim_size(0));
}
}
}
return ConvertGraphDefToEngine(
gdef, nullptr, TrtPrecisionMode::FP32, 1,
64 << 20, input_shapes, &logger_,
nullptr, nullptr, &engine_,
false, true,
nullptr, nullptr,
"TRTEngineOp_000_000", false);
}
protected:
TrtUniquePtrType<nvinfer1::ICudaEngine> engine_;
private:
Logger& logger_ = *Logger::GetLogger();
};
TEST_F(ConvertGraphDefToEngineTest, IdentityGraph) {
Scope s = Scope::NewRootScope();
auto input =
ops::Placeholder(s.WithOpName(StrCat(IONamePrefixes::kInputPHName, 0)),
DT_FLOAT, ops::Placeholder::Shape({1, 1}));
auto output = ops::Identity(s.WithOpName("identity1"), input);
output = ops::Identity(s.WithOpName("identity2"), output);
output = ops::Identity(s.WithOpName(StrCat(IONamePrefixes::kOutputPHName, 0)),
output);
TF_EXPECT_OK(RunConvertGraphDefToEngine(&s));
}
Status GetShapeFromDataVec(DataVec input_data,
std::vector<TensorShape>* shape_vec) {
shape_vec->reserve(input_data.size());
std::transform(input_data.begin(), input_data.end(),
std::back_inserter(*shape_vec),
[](InputOutputData x) { return x.tensor.shape(); });
return OkStatus();
}
template <typename T>
inline absl::Span<const T> GetSpanForData(const InputOutputData& data) {
const auto& tensor_map = data.tensor.flat<T>();
return absl::Span<const T>(tensor_map.data(), tensor_map.size());
}
std::vector<float> GetDataAsFloat(InputOutputData& data) {
const auto dType = data.tensor.dtype();
if (dType == DT_FLOAT) {
auto span = GetSpanForData<float>(data);
return std::vector<float>(span.begin(), span.end());
}
if (dType == DT_HALF) {
return CastVector<Eigen::half, float>(GetSpanForData<Eigen::half>(data));
}
if (dType == DT_INT32) {
return CastVector<int32, float>(GetSpanForData<int32>(data));
}
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
if (dType == DT_BOOL) {
return CastVector<bool, float>(GetSpanForData<bool>(data));
}
#endif
LOG(FATAL) << "DataType not supported for testing " << DataTypeString(dType);
return {};
}
class OpConverterTest : public ::testing::Test {
public:
OpConverterTest()
: tensor_buffer_allocator_(new GpuManagedAllocator()),
scope_(Scope::NewRootScope()) {
QCHECK_EQ(0, cudaStreamCreate(&stream_));
Reset();
}
~OpConverterTest() noexcept override {
QCHECK_EQ(0, cudaStreamDestroy(stream_));
}
Status GetTensorOrWeights(const string& name, TRT_TensorOrWeights* output) {
return converter_->GetTensorOrWeights(name, output);
}
void Reset(TrtPrecisionMode precision_mode_to_test = TrtPrecisionMode::FP32,
TrtTestMode trt_mode = TrtTestMode::kImplicitBatch,
OpKernelContext* ctx = nullptr) {
converter_.reset(nullptr);
engine_.reset(nullptr);
converter_ =
std::move(Converter::Create(precision_mode_to_test,
false, &logger_,
trt_mode ==
TrtTestMode::kImplicitBatch,
"",
false, ctx)
.value());
scope_ = Scope::NewRootScope();
}
template <typename T>
Tensor AsTensor(gtl::ArraySlice<T> vals) {
Tensor ret(tensor_buffer_allocator_.get(), DataTypeToEnum<T>::value,
{static_cast<int64_t>(vals.size())});
std::copy_n(vals.data(), vals.size(), ret.flat<T>().data());
return ret;
}
template <typename T>
Tensor AsTensor(gtl::ArraySlice<T> vals,
const TensorShape& shape) {
Tensor ret(tensor_buffer_allocator_.get(), DataTypeToEnum<T>::value,
{static_cast<int64_t>(vals.size())});
CHECK(ret.CopyFrom(AsTensor(vals), shape));
return ret;
}
template <typename T, typename S>
void transformTensor(const std::vector<T>& vals, Tensor& ret) {
std::transform(vals.begin(), vals.end(), ret.flat<S>().data(),
[](const T in_val) -> S { return static_cast<S>(in_val); });
}
template <typename T, typename S>
void transformWeights(const std::vector<T>& vals,
TRT_ShapedWeights& weights) {
std::transform(vals.begin(), vals.end(), weights.GetPointer<S>(),
[](const T in_val) -> S { return static_cast<S>(in_val); });
}
template <typename T>
Tensor AsTensor(const std::vector<T>& vals,
const std::vector<int>& input_dims, DataType tf_type) {
Tensor ret(tensor_buffer_allocator_.get(), tf_type,
{static_cast<int64_t>(vals.size())});
if (tf_type == DT_FLOAT) {
transformTensor<T, float>(vals, ret);
} else if (tf_type == DT_HALF) {
transformTensor<T, Eigen::half>(vals, ret);
} else if (tf_type == DT_INT32) {
transformTensor<T, int32>(vals, ret);
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
} else if (tf_type == DT_BOOL) {
transformTensor<T, bool>(vals, ret);
#endif
} else {
LOG(FATAL) << "Cannot create tensor with type "
<< DataTypeString(tf_type);
}
TensorShape shape;
TF_EXPECT_OK(TensorShapeUtils::MakeShape(input_dims, &shape));
CHECK(ret.CopyFrom(ret, shape));
return ret;
}
template <typename T>
Tensor AsTensor(const std::vector<int>& vals,
const std::vector<int>& input_dims, DataType tf_type) {
const auto& conv_vals = CastVector<int, T>(vals);
return AsTensor(conv_vals, input_dims, tf_type);
}
template <typename T>
Tensor ConstructTensor(int data_size, const T& value = T()) {
std::vector<T> values(data_size, value);
return AsTensor<T>(values);
}
template <typename T>
Tensor ConstructTensor(int data_size, const T& value, DataType tf_type) {
std::vector<T> values(data_size, value);
return AsTensor<T>(values, {data_size}, tf_type);
}
void CheckDataTypeMatches(const DataVec& datas) {
if (VLOG_IS_ON(2)) {
int nbBindings = engine_->getNbBindings();
VLOG(2) << "Number of engine bindings: " << nbBindings;
for (int i = 0; i < nbBindings; i++) {
VLOG(2) << "Binding " << i << " name: " << engine_->getBindingName(i);
}
}
for (const auto& data : datas) {
VLOG(2) << "Checking if data type matches for tensor " << data.name;
const int input_index = engine_->getBindingIndex(data.name.c_str());
ASSERT_NE(-1, input_index);
const nvinfer1::DataType trt_dtype =
engine_->getBindingDataType(input_index);
DataType tf_type;
TF_ASSERT_OK(TrtTypeToTfType(trt_dtype, &tf_type));
ASSERT_EQ(data.tensor.dtype(), tf_type)
<< DataTypeString(data.tensor.dtype()) << " vs. "
<< DataTypeString(tf_type);
}
}
Status BuildAndRun(const DataVec& input_data, DataVec* output_data,
const int batch_size = 1) {
std::vector<Converter::EngineOutputInfo> output_info;
for (const auto& data : *output_data) {
nvinfer1::DataType trt_type;
TF_RETURN_IF_ERROR(TfTypeToTrtType(data.tensor.dtype(), &trt_type));
output_info.push_back({data.name, data.name, trt_type});
}
TF_RETURN_IF_ERROR(converter_->RenameAndMarkOutputTensors(output_info));
if (engine_.get() != nullptr) {
return errors::Internal("Engine already exists");
}
TrtShapeOptimizationProfile profiles;
if (!converter_->use_implicit_batch()) {
std::vector<bool> input_mask(input_data.size());
for (int i = 0; i < input_data.size(); i++) {
input_mask[i] = (input_data[i].tensor.dtype() != DataType::DT_RESOURCE);
}
profiles.SetInputMask(input_mask);
profiles.SetShapeTensorMask(converter_->network());
TF_RETURN_IF_ERROR(profiles.CollectShapeValues(input_data));
std::vector<TensorShape> input_shapes;
TF_RETURN_IF_ERROR(GetShapeFromDataVec(input_data, &input_shapes));
profiles.AddShape(input_shapes);
std::vector<PartialTensorShape> input_partial_shapes;
TF_RETURN_IF_ERROR(
GetNetworkInputShapes(converter_->network(), &input_partial_shapes));
profiles.InitProfiles(input_partial_shapes, ProfileStrategy::kRange);
}
TF_RETURN_IF_ERROR(
converter_->BuildCudaEngine(&engine_,
batch_size,
1 << 26,
nullptr,
nullptr,
&profiles));
CHECK_NOTNULL(engine_.get());
CheckDataTypeMatches(input_data);
CheckDataTypeMatches(*output_data);
const int num_bindings = input_data.size() + output_data->size();
std::vector<void*> buffers(num_bindings);
if (engine_->getNbBindings() != num_bindings) {
return errors::Internal("Number of bindings do not match");
}
TrtUniquePtrType<nvinfer1::IExecutionContext> execution_context(
engine_->createExecutionContext());
TF_RETURN_IF_ERROR(
SetTrtEngineInputs(engine_.get(), execution_context.get(), 0, buffers,
converter_->use_implicit_batch(), batch_size,
profiles, nullptr, &input_data));
TF_RETURN_IF_ERROR(SetTrtEngineOutputs(
engine_.get(), execution_context.get(), 0, buffers,
converter_->use_implicit_batch(), batch_size, nullptr, output_data));
TF_RETURN_IF_ERROR(TrtEnqueue(execution_context.get(), buffers, stream_,
converter_->use_implicit_batch(),
batch_size));
cudaStreamSynchronize(stream_);
return OkStatus();
}
void AddTestTensorWithTFDims(
const string& name, const std::vector<int32>& dims,
nvinfer1::DataType trt_type = nvinfer1::DataType::kFLOAT,
Status add_input_status = OkStatus()) {
DataType tf_type;
TF_ASSERT_OK(TrtTypeToTfType(trt_type, &tf_type));
ops::Placeholder::Attrs attrs;
TF_EXPECT_OK(TensorShapeUtils::MakeShape(dims, &attrs.shape_));
auto input = ops::Placeholder(scope_.WithOpName(name), tf_type, attrs);
node_inputs_[name] = input.output;
auto dims_adap =
DimsAdapter::Create(attrs.shape_, converter_->use_implicit_batch());
if (converter_->use_implicit_batch() && !dims_adap.ok()) {
ASSERT_EQ(add_input_status, dims_adap.status());
return;
} else {
TF_EXPECT_OK(dims_adap.status());
}
if (!converter_->use_implicit_batch() || dims_adap->IsStatic()) {
int batch_size = dims.size() > 0 ? dims[0] : 0;
Status status = converter_->AddInputTensor(
name, trt_type, dims_adap->AsTrtDims(), batch_size);
ASSERT_EQ(add_input_status, status);
}
}
Status AddTensorOrWeights(const string& name, TRT_TensorOrWeights input) {
return converter_->AddTensorOrWeights(name, input);
}
void AddTestTensor(
const string& name, const std::vector<int32>& dims, int batch_size = 1,
nvinfer1::DataType trt_dtype = nvinfer1::DataType::kFLOAT) {
DimsAdapter adap(dims);
std::vector<int32_t> dims_vec;
TF_CHECK_OK(adap.Prepend(batch_size).Vector(&dims_vec));
AddTestTensorWithTFDims(name, dims_vec, trt_dtype);
if (adap.IsStatic()) {
ASSERT_EQ(batch_size, converter_->batch_size_);
}
}
template <typename T = int32>
void AddTestWeights(const string& name, const std::vector<int>& dims,
const std::vector<T>& values_inp, DataType tf_type,
bool fix_values = true) {
const DimsAdapter dims_adap(dims);
const int64_t num_elements = dims_adap.Volume();
std::vector<T> values(values_inp);
if (num_elements != values.size()) {
if (fix_values) {
AdjustVectorByDims<T>(values, num_elements, name, "AddTestWeights");
} else {
FAIL() << "Unable to create test weights: "
<< (num_elements > values.size() ? "not enough" : "to many")
<< " values specified: " << values.size() << " vs. "
<< num_elements << " defined by dims";
}
}
Tensor t = AsTensor<T>(values, dims, tf_type);
node_inputs_[name] = ops::Const(scope_.WithOpName(name), t);
nvinfer1::DataType dtype;
TF_ASSERT_OK(TfTypeToTrtType(tf_type, &dtype));
QCHECK_EQ(num_elements, values.size())
<< num_elements << " vs " << values.size();
TRT_ShapedWeights weights(dtype);
if (num_elements) {
weights =
converter_->weight_store_.GetTempWeights(dtype, dims_adap.AsTrtDims())
.value();
if (tf_type == DT_FLOAT) {
transformWeights<T, float>(values, weights);
} else if (tf_type == DT_HALF) {
transformWeights<T, Eigen::half>(values, weights);
} else if (tf_type == DT_INT32) {
transformWeights<T, int32>(values, weights);
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
} else if (tf_type == DT_BOOL) {
transformWeights<T, bool>(values, weights);
#endif
} else {
LOG(FATAL) << "Cannot create tensor with type "
<< DataTypeString(tf_type);
}
}
TF_EXPECT_OK(
converter_->AddTensorOrWeights(name, TRT_TensorOrWeights{weights}));
}
template <typename T = int32>
void AddTestWeights(const string& name, const std::vector<int>& dims,
const std::vector<T>& value, bool fix_values = true) {
AddTestWeights(name, dims, value, DataTypeToEnum<T>::value, fix_values);
}
Status RunValidation(const Node* node) {
grappler::GrapplerItem item;
TF_EXPECT_OK(scope_.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
TrtNodeValidator validator(
graph_properties, converter_->precision_mode(),
false,
converter_->use_implicit_batch(),
false);
return validator.IsTensorRTCandidate(node);
}
void RunConversion(const Node* node,
absl::StatusCode expected_code = absl::StatusCode::kOk,
absl::string_view expected_msg_substr = "") {
EXPECT_THAT(converter_->ConvertNode(node->def()),
StatusIs(expected_code, HasSubstr(expected_msg_substr)));
if (expected_code == absl::StatusCode::kOk) {
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
}
void RunValidationAndConversion(
const NodeDef& node_def,
absl::StatusCode expected_code = absl::StatusCode::kOk,
absl::string_view expected_msg_substr = "",
bool should_run_conversion = true) {
Graph* graph = scope_.graph();
Status status;
Node* node = graph->AddNode(std::move(node_def), &status);
TF_EXPECT_OK(status);
for (int i = 0; i < node_def.input().size(); ++i) {
const string& input_name = node_def.input(i);
const auto& itr = node_inputs_.find(input_name);
QCHECK(itr != node_inputs_.end());
const Output& input = itr->second;
graph->AddEdge(input.node(), input.index(), node, i);
}
status = RunValidation(node);
if (should_run_conversion && status.ok()) {
RunConversion(node, expected_code, expected_msg_substr);
} else {
EXPECT_THAT(status,
StatusIs(expected_code, HasSubstr(expected_msg_substr)));
}
}
void RunValidationAndConversion(
const NodeDef& node_def, const Status& status,
const std::string& output_name,
const std::vector<std::vector<int>>& exp_out_dims) {
RunValidationAndConversion(node_def,
static_cast<absl::StatusCode>(status.code()),
status.message(), true);
if (status.ok()) {
if (converter_->use_implicit_batch()) {
for (int i = 0; i < exp_out_dims.size(); i++) {
TRT_TensorOrWeights output;
string name = i == 0 ? output_name : StrCat(output_name, ":", i);
TF_EXPECT_OK(GetTensorOrWeights(name.c_str(), &output));
ASSERT_TRUE(output.is_tensor());
if (!exp_out_dims[i].empty()) {
auto out_dims = std::vector<int>(exp_out_dims[i].begin() + 1,
exp_out_dims[i].end());
VLOG(2) << "Testing output shape for tensor " << name;
EXPECT_THAT(output.tensor()->getDimensions(),
DimsAreArray(out_dims));
}
}
}
}
}
std::unordered_map<ITensorProxyPtr*, float>& quantization_ranges_proxy() {
return converter_->quantization_ranges_proxy_;
}
std::unordered_map<nvinfer1::ITensor*, float>& quantization_ranges() {
return converter_->quantization_ranges_;
}
protected:
template <typename T>
void AdjustVectorByDims(std::vector<T>& values, size_t num_elements,
const string& name, const char* callingFunc) {
const auto old_size = values.size();
if (num_elements > old_size) {
const std::vector<T> zeros(num_elements - old_size, 0);
values.reserve(num_elements);
values.insert(values.end(), zeros.begin(), zeros.end());
VLOG(2) << "In function " << callingFunc << " the vector '" << name
<< "' was extended by " << num_elements - old_size << " zeros";
} else {
values.resize(num_elements);
VLOG(2) << "Only first " << num_elements << " out of " << old_size
<< " elements of the vector '" << name
<< "' will be used in function" << callingFunc;
}
}
public:
std::unique_ptr<Converter> converter_;
protected:
Logger& logger_ = *Logger::GetLogger();
private:
TrtUniquePtrType<nvinfer1::ICudaEngine> engine_;
cudaStream_t stream_;
std::unique_ptr<Allocator> tensor_buffer_allocator_;
public:
Scope scope_;
protected:
std::unordered_map<string, Output> node_inputs_;
};
class VariableOpConverterTest : public OpConverterTest {
public:
void Reset(TrtPrecisionMode precision_mode_to_test = TrtPrecisionMode::FP32,
TrtTestMode trt_mode = TrtTestMode::kImplicitBatch) {
OpConverterTest::Reset(precision_mode_to_test, trt_mode, context_.get());
}
void CreateContext(const NodeDef& node_def, OpKernel** kernel,
OpKernelContext** context) {
std::unique_ptr<Device> device_(
DeviceFactory::NewDevice("GPU", {}, "/job:a/replica:0/task:0"));
Device* device_ptr = device_.get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(device_));
managed_allocator_ = std::make_unique<GpuManagedAllocator>();
Allocator* allocator = managed_allocator_.get();
step_container_ =
std::make_unique<ScopedStepContainer>(0, [](const string&) {});
slice_reader_cache_wrapper_ =
std::make_unique<checkpoint::TensorSliceReaderCacheWrapper>();
flib_def_ = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
thread_pool_ =
std::make_unique<thread::ThreadPool>(Env::Default(), "default",
1);
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions(),
thread_pool_.get());
FunctionLibraryRuntime* flib = pflr_->GetFLR(device_ptr->name());
ResourceMgr* resource_mgr = device_ptr->resource_manager();
TF_CHECK_OK(NodeProperties::CreateFromNodeDef(
node_def, OpRegistry::Global(), &props_));
OpKernel* kernel_ptr = nullptr;
TF_CHECK_OK(CreateOpKernel(DEVICE_GPU, device_ptr, allocator, flib,
resource_mgr, props_, TF_GRAPH_DEF_VERSION,
&kernel_ptr));
op_kernel_ = std::unique_ptr<OpKernel>(kernel_ptr);
auto* dev_info = device_ptr->tensorflow_accelerator_device_info();
CHECK_NOTNULL(dev_info);
DeviceContext* device_context = dev_info->default_context;
params_.device = device_ptr;
params_.op_kernel = op_kernel_.get();
params_.resource_manager = resource_mgr;
params_.frame_iter = FrameAndIter(0, 0);
params_.inputs = inputs_;
params_.step_container = step_container_.get();
params_.function_library = flib;
params_.slice_reader_cache = slice_reader_cache_wrapper_.get();
params_.op_device_context = device_context;
context_ = std::make_unique<OpKernelContext>(¶ms_);
*kernel = op_kernel_.get();
*context = context_.get();
}
void AddTestResource(const string& name, const ResourceHandle& resource) {
node_inputs_[name] =
ops::Placeholder(scope_.WithOpName("my_handle"), DT_RESOURCE);
TF_EXPECT_OK(AddTensorOrWeights(name, TRT_TensorOrWeights{resource}));
}
private:
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<Allocator> managed_allocator_;
std::unique_ptr<ScopedStepContainer> step_container_;
std::unique_ptr<checkpoint::TensorSliceReaderCacheWrapper>
slice_reader_cache_wrapper_;
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
std::unique_ptr<thread::ThreadPool> thread_pool_;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
OpKernelContext::Params params_;
std::unique_ptr<OpKernel> op_kernel_;
std::unique_ptr<OpKernelContext> context_;
std::shared_ptr<const NodeProperties> props_;
absl::InlinedVector<TensorValue, 4> inputs_;
};
struct TestParamBase {
std::vector<int> input_dims;
std::vector<int> partial_input_dims;
std::vector<int> expected_output_dims;
std::vector<int> param;
Status status;
Status runtime_status;
};
std::ostream& operator<<(std::ostream& os, const TestParamBase& p) {
os << "input_dims" << PrintToString(p.input_dims);
if (!p.partial_input_dims.empty()) {
os << ", partial_input_dims" << PrintToString(p.partial_input_dims);
}
if (!p.expected_output_dims.empty()) {
os << ", exp_out_dims" << PrintToString(p.expected_output_dims);
}
if (!p.param.empty()) {
os << ", param" << PrintToString(p.param);
}
os << ", " << p.status;
return os;
}
template <typename T>
const std::string get_debug_string_for_vector(const std::vector<T>& vector,
absl::string_view pComment,
absl::string_view name,
absl::string_view type = "") {
const std::string t1 = absl::StrCat(pComment, " '", name, "': Dims(nbDims=");
const std::string t2 = absl::StrJoin(vector, ",");
const std::string t3 = type != "" ? absl::StrCat(") of type ", type) : ")";
std::stringstream stream;
stream << t1 << vector.size() << ", d=" << t2 << t3;
return stream.str();
}
class ParameterizedOpConverterTestBase
: public OpConverterTest,
public ::testing::WithParamInterface<
std::tuple<TrtTestMode, DataType, TrtPrecisionMode>> {
public:
ParameterizedOpConverterTestBase()
: trt_mode_(std::get<0>(GetParam())),
tf_type_(std::get<1>(GetParam())),
converter_precision_(std::get<2>(GetParam())) {
LOG(INFO) << "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%";
LOG(INFO) << "tf_type_: " << DebugString(tf_type_);
LOG(INFO) << "trt_mode_: " << DebugString(trt_mode_);
LOG(INFO) << "converter_precision_: " << DebugString(converter_precision_);
LOG(INFO) << "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%";
}
void Reset() {
OpConverterTest::Reset(converter_precision_, trt_mode_);
input_data_.clear();
}
void Reset(TrtPrecisionMode precision) {
OpConverterTest::Reset(precision, trt_mode_);
input_data_.clear();
}
DataType get_tf_type() { return tf_type_; }
TrtTestMode get_trt_mode() { return trt_mode_; }
TrtPrecisionMode get_converter_precision() { return converter_precision_; }
template <typename T = int>
void AddTestTensor(const string& name, const std::vector<int32>& dims,
DataType tf_type, const std::vector<T>& values_inp,
const std::vector<int32>& partial_input_shape_dims = {},
Status add_input_status = OkStatus(),
bool fix_values = true) {
std::vector<T> values(values_inp);
VLOG(2) << "**** AddTestTensor for " << name
<< " ***** dims empty() = " << dims.empty()
<< " tf_type = " << DebugString(tf_type);
if (!dims.empty()) {
const auto num_elements = std::accumulate(
std::begin(dims), std::end(dims), 1, std::multiplies<double>());
if (!values.empty() && num_elements != values.size()) {
if (fix_values) {
AdjustVectorByDims(values, num_elements, name, "AddTestTensor");
} else {
LOG(WARNING) << "Expected Test Tensor Shape: " << DebugString(dims)
<< ", Received Input Tensor: " << DebugString(values);
}
}
}
std::vector<int32> partial_shape;
if (!partial_input_shape_dims.empty()) {
partial_shape = partial_input_shape_dims;
} else {
if (trt_mode_ == TrtTestMode::kDynamicShape) {
partial_shape = std::vector<int32>(dims.size(), -1);
} else {
partial_shape = dims;
}
if (VLOG_IS_ON(2)) {
VLOG(2) << get_debug_string_for_vector(partial_shape,
"Using partial_shape for", name);
}
}
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(tf_type, &trt_type));
AddTestTensorWithTFDims(name, partial_shape, trt_type, add_input_status);
if (!values.empty()) {
if (VLOG_IS_ON(2)) {
VLOG(2) << get_debug_string_for_vector(values, "Adding test tensor for",
name, DataTypeString(tf_type));
}
InputOutputData data{name, AsTensor(values, dims, tf_type)};
VLOG(2) << "Added tensor: " << data.name << " with dtype "
<< DataTypeString(data.tensor.dtype());
input_data_.push_back(data);
}
}
template <typename T = int>
void AddTestTensor(const string& name, const std::vector<int32>& dims,
const std::vector<T>& values = {},
const std::vector<int32>& partial_input_shape_dims = {}) {
AddTestTensor<T>(name, dims, tf_type_, values, partial_input_shape_dims);
}
void BuildAndRun(const string& name,
const std::vector<std::vector<int>>& expected_output_dims,
const Status& expected_runtime_status,
const std::vector<Matcher<std::vector<float>>>& matcher,
const std::vector<DataType>& out_tf_types = {}) {
TensorShape shape;
const int n_output = expected_output_dims.size();
ASSERT_EQ(n_output, matcher.size());
DataVec output_data;
for (int i = 0; i < n_output; i++) {
TF_EXPECT_OK(
TensorShapeUtils::MakeShape(expected_output_dims[i], &shape));
string out_name = (i == 0) ? name : StrCat(name, ":", i);
DataType out_tf_type =
out_tf_types.size() > i ? out_tf_types[i] : tf_type_;
InputOutputData data{
out_name, ConstructTensor(shape.num_elements(), 0, out_tf_type)};
output_data.push_back(data);
}
const int batch_size =
input_data_.empty() ||
TensorShapeUtils::IsScalar(input_data_[0].tensor.shape())
? 1
: input_data_[0].tensor.shape().dim_size(0);
Status stat =
OpConverterTest::BuildAndRun(input_data_, &output_data, batch_size);
ASSERT_EQ(expected_runtime_status.ok(), stat.ok())
<< "expected status: " << expected_runtime_status
<< ", actual status: " << stat;
if (expected_runtime_status.ok() && stat.ok()) {
for (int i = 0; i < n_output; i++) {
TF_EXPECT_OK(
TensorShapeUtils::MakeShape(expected_output_dims[i], &shape));
EXPECT_TRUE(output_data[i].tensor.shape() == shape)
<< "Expected shape: " << shape.DebugString() << ", actual shape: "
<< output_data[i].tensor.shape().DebugString();
EXPECT_THAT(GetDataAsFloat(output_data[i]), matcher[i]);
}
}
}
void TestOpConverterMultiOut(
const NodeDef& node_def,
const std::vector<std::vector<int>>& expected_output_dims,
const Status& expected_conversion_status,
const Status& expected_runtime_status,
const std::vector<Matcher<std::vector<float>>>& matcher,
const std::vector<DataType>& out_tf_type = {}) {
const auto& name = node_def.name();
RunValidationAndConversion(node_def, expected_conversion_status, name,
expected_output_dims);
if (expected_conversion_status.ok()) {
BuildAndRun(name, expected_output_dims, expected_runtime_status, matcher,
out_tf_type);
}
}
void TestOpConverter(const NodeDef& node_def,
const std::vector<int>& expected_output_dims,
const Status& expected_conversion_status,
const Status& expected_runtime_status,
const Matcher<std::vector<float>>& matcher,
const std::vector<DataType>& out_tf_types = {}) {
TestOpConverterMultiOut(
node_def, std::vector<std::vector<int>>({expected_output_dims}),
expected_conversion_status, expected_runtime_status,
std::vector<Matcher<std::vector<float>>>({matcher}), out_tf_types);
}
protected:
const TrtTestMode trt_mode_;
const DataType tf_type_;
const TrtPrecisionMode converter_precision_;
DataVec input_data_;
};
template <typename T>
class OpConverter_UnaryTest : public ParameterizedOpConverterTestBase {
public:
template <typename S>
void RunTests(
const string& testName, const OperationMap<S>& map,
std::map<std::string,
std::pair<std::function<NodeDef(DataType)>, T (*)(T)>>& op_map,
const std::vector<T> input_values, const std::string input_name = "input",
float max_abs_error = 0.0001, bool nan_sensitive = true) {
auto p = TestParamBase{
{1, 1, 2, 3},
{},
{1, 1, 2, 3},
};
std::vector<string> ops_to_test;
for (auto& pair : map) {
ops_to_test.push_back(pair.first);
}
for (const string& op_name : ops_to_test) {
SCOPED_TRACE(op_name);
if (!op_map.count(op_name)) {
FAIL() << testName << " op test map does not contain op " << op_name;
}
const DataType tf_type = get_tf_type();
const NodeDef& node = op_map[op_name].first(tf_type);
runExpectedToFailTest(node, input_name, input_values, op_name);
Status conv_status = OkStatus();
if (trt_mode_ == TrtTestMode::kImplicitBatch &&
(op_name == "Sign" || op_name == "Round" ||
op_name == "LogicalNot")) {
const auto& err =
convert_not_supported_implicit(op_name, node.name(), "Unary");
conv_status = errors::Unimplemented(err);
}
Reset();
const DataType input_tf_type = op_name == "Cast" ? DT_HALF : tf_type;
const DataType output_tf_type = op_name == "Cast" ? DT_FLOAT : tf_type;
AddTestTensor("input", p.input_dims, input_tf_type, input_values);
std::vector<float> output;
std::transform(input_values.begin(), input_values.end(),
std::back_inserter(output), op_map[op_name].second);
TestOpConverter(node, p.expected_output_dims, conv_status, OkStatus(),
ArrayFloatNear(output, max_abs_error, nan_sensitive),
{output_tf_type});
}
}
void runExpectedToFailTest(const NodeDef& node_def,
const std::string& input_name,
const std::vector<T>& input_values,
const std::string& op_name) {
Reset();
std::string error =
"The input \"" + input_name + "\" for " + op_name + " must be a tensor";
AddTestWeights("input", {1, 2, 3}, input_values, get_tf_type());
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
error);
Reset();
std::vector<int32> dims{};
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
dims = {1};
}
error = "At least 1 dimension is required for UNARY operation '" + op_name +
"'";
AddTestTensor("input", dims);
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
error);
}
};
template <typename T>
class OpConverter_BinaryTest : public ParameterizedOpConverterTestBase {
public:
template <typename S>
void RunTests(
const OperationMap<S>& map,
std::map<std::string,
std::pair<std::function<NodeDef(DataType)>, std::vector<T>>>&
op_test_info,
const std::vector<std::vector<T>>& data) {
const std::vector<DataType> bool_types{DT_BOOL}, default_types{};
std::vector<string> logical_ops{"Greater", "Less", "Equal"};
std::vector<string> combined_ops{"GreaterEqual", "LessEqual"};
const DataType tf_type = get_tf_type();
AttrValue dtype;
dtype.set_type(tf_type);
std::map<std::string, NodeDef> nodes;
for (const auto op_name : combined_ops) {
nodes[op_name] = MakeNodeDef("my_binary", op_name, {"input1", "input2"},
{{"T", dtype}});
}
for (auto& iter : map) {
const string& op_name = iter.first;
if (!op_test_info.count(op_name)) {
FAIL() << "Binary op test map does not contain op " << op_name;
}
const auto comb_op = find_name(op_name, combined_ops);
const auto& node_def =
comb_op ? nodes[op_name] : op_test_info[op_name].first(tf_type);
for (const bool operand_1_is_tensor : {true, false}) {
for (const bool operand_2_is_tensor : {true, false}) {
SCOPED_TRACE(StrCat(op_name, "_", operand_1_is_tensor ? "T" : "W",
operand_2_is_tensor ? "T" : "W"));
Reset();
if (!operand_1_is_tensor && !operand_2_is_tensor) {
runExpectedToFailTest(op_name, node_def);
continue;
}
const bool logical_op = comb_op || find_name(op_name, logical_ops);
auto conv_status = OkStatus();
if (tf_type == DT_BOOL || logical_op) {
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
conv_status =
errors::Unimplemented(convert_not_supported_implicit(
op_name, node_def.name(), "Binary"));
} else if (!logical_op &&
(!operand_1_is_tensor || !operand_2_is_tensor)) {
conv_status = errors::InvalidArgument(
"Both inputs of '", op_name, "' are expected to be tensors");
}
}
if (operand_1_is_tensor) {
AddTestTensor("input1", {2, 1, 2}, data[0]);
} else {
AddTestWeights("input1", {1, 2}, data[1], tf_type);
}
if (operand_2_is_tensor) {
AddTestTensor("input2", {2, 2, 1}, data[2]);
} else {
AddTestWeights("input2", {2, 1}, data[3], tf_type);
}
TestOpConverter(node_def, {2, 2, 2}, conv_status, OkStatus(),
ElementsAreArray(op_test_info[op_name].second),
logical_op ? bool_types : default_types);
}
}
}
}
void runExpectedToFailTest(const std::string& op_name, const NodeDef& node) {
AddTestWeights("input1", {1}, {1}, tf_type_);
AddTestWeights("input2", {1}, {1}, tf_type_);
const string error =
"Constant folding is falled back to TensorFlow, "
"binary op '" +
op_name + "' received both input as constant";
RunValidationAndConversion(node, absl::StatusCode::kUnimplemented, error);
}
};
typedef ParameterizedOpConverterTestBase OpConverter_FP32_Test;
typedef ParameterizedOpConverterTestBase OpConverter_FP32_FP16_Test;
typedef OpConverter_BinaryTest<float> OpConverter_FP32_FP16_BinaryTest;
typedef OpConverter_BinaryTest<int> OpConverter_BOOL_BinaryTest;
typedef ParameterizedOpConverterTestBase OpConverter_FP32_FP16_INT32_Test;
typedef ParameterizedOpConverterTestBase OpConverter_INT32_Test;
typedef OpConverter_UnaryTest<float> OpConverter_FP32_UnaryTest;
typedef OpConverter_UnaryTest<int> OpConverter_BOOL_Test;
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_FP16_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT, DT_HALF),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_FP16_INT32_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT, DT_HALF, DT_INT32),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_INT32_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_INT32),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_UnaryTest,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_BOOL_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_BOOL),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_FP16_BinaryTest,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT, DT_HALF),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_BOOL_BinaryTest,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_BOOL),
::testing::Values(TrtPrecisionMode::FP32)));
template <typename T>
void CopyTensorElements(const Tensor& tensor, protobuf::RepeatedField<T>* out) {
out->Clear();
if (tensor.NumElements() == 0) return;
const auto flat = tensor.flat<T>();
int64 last_index = 0;
for (int64 i = 0; i < tensor.NumElements(); ++i) {
if (flat(i) != flat(last_index)) {
last_index = i;
}
}
int num_out_elements = last_index + 1;
out->Reserve(num_out_elements);
out->AddNAlreadyReserved(num_out_elements);
const T* src = flat.data();
T* dst = out->mutable_data();
std::copy(src, src + num_out_elements, dst);
}
template <DataType dtype, typename CType>
void TestConvertVariableV2(VariableOpConverterTest* test) {
struct TestParam {
string container;
string shared_name;
std::vector<int> dims;
float epsilon;
Status conversion_status;
};
std::vector<TestParam> test_param = {
{"", "var0", {}, 0.001, OkStatus()},
{"", "var0", {64}, 0.001, OkStatus()},
{"", "var0", {8, 16}, 0.001, OkStatus()},
{"box", "var", {8, 16}, 0.001, OkStatus()}};
for (auto p : test_param) {
NodeDef node_def;
std::vector<int64_t> dims_64(p.dims.begin(), p.dims.end());
TensorShape shape = TensorShape(absl::Span<int64_t>(dims_64));
TF_CHECK_OK(NodeDefBuilder("my_var", "VariableV2")
.Attr("dtype", dtype)
.Attr("shape", shape)
.Attr("container", p.container)
.Attr("shared_name", p.shared_name)
.Finalize(&node_def));
OpKernel* kernel;
OpKernelContext* context;
test->CreateContext(node_def, &kernel, &context);
test->Reset(TrtPrecisionMode::FP32, TrtTestMode::kDynamicShape);
int var_size = std::accumulate(p.dims.begin(), p.dims.end(), 1,
std::multiplies<int>());
std::vector<CType> expected_value;
expected_value.reserve(var_size);
for (int i = 0; i < var_size; i++) {
expected_value.push_back((CType)i);
}
kernel->Compute(context);
Tensor* tensor_ptr = context->mutable_output(0);
CHECK_NOTNULL(tensor_ptr);
AllocatorAttributes attr;
attr.set_gpu_compatible(true);
attr.set_nic_compatible(true);
OP_REQUIRES_OK(context,
context->allocate_temp(dtype, shape, tensor_ptr, attr));
auto tensor_flat = tensor_ptr->flat<CType>();
CHECK_NOTNULL(tensor_flat.data());
auto ret = cudaMemcpy(tensor_flat.data(), expected_value.data(),
expected_value.size() * sizeof(CType),
cudaMemcpyHostToDevice);
CHECK_EQ(ret, 0);
test->RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(test->GetTensorOrWeights("my_var", &output));
EXPECT_THAT(output.weights(),
ShapedWeightsHasDimsAndValues<CType>(p.dims, expected_value));
}
}
TEST_F(VariableOpConverterTest, ConvertVariableV2) {
TestConvertVariableV2<DT_FLOAT, float>(this);
TestConvertVariableV2<DT_HALF, Eigen::half>(this);
}
template <DataType dtype, typename CType>
void TestConvertReadVariableOp(VariableOpConverterTest* test) {
struct TestParam {
string container;
string name;
std::vector<int> dims;
float epsilon;
Status conversion_status;
};
std::vector<TestParam> test_param = {
{"", "var0", {}, 0.001, OkStatus()},
{"", "var0", {64}, 0.001, OkStatus()},
{"", "var0", {8, 16}, 0.001, OkStatus()},
{"box", "var", {8, 16}, 0.001, OkStatus()}};
for (auto p : test_param) {
NodeDefBuilder::NodeOut rvo_input =
NodeDefBuilder::NodeOut("my_handle", 0, DT_RESOURCE);
NodeDef node_def;
std::vector<int64_t> dims_64(p.dims.begin(), p.dims.end());
TensorShape shape =
TensorShape(gtl::ArraySlice<int64_t>(dims_64));
TF_CHECK_OK(NodeDefBuilder("my_var", "ReadVariableOp")
.Attr("dtype", dtype)
.Attr("_shape", shape)
.Input(rvo_input)
.Finalize(&node_def));
OpKernel* kernel;
OpKernelContext* context;
test->CreateContext(node_def, &kernel, &context);
test->Reset(TrtPrecisionMode::FP32, TrtTestMode::kDynamicShape);
int var_size = std::accumulate(p.dims.begin(), p.dims.end(), 1,
std::multiplies<int>());
std::vector<CType> expected_value;
expected_value.reserve(var_size);
for (int i = 0; i < var_size; i++) {
expected_value.push_back((CType)i);
}
DtypeAndPartialTensorShape dtype_and_shape;
dtype_and_shape.dtype = dtype;
TF_CHECK_OK(PartialTensorShape::BuildPartialTensorShape(
gtl::ArraySlice<int64_t>(dims_64),
&dtype_and_shape.shape));
ResourceHandle handle = MakeResourceHandle<Var>(
context, p.container, p.name,
std::vector<DtypeAndPartialTensorShape>{dtype_and_shape});
test->AddTestResource("my_handle", handle);
Var* resource = new Var(dtype);
TF_EXPECT_OK(CreateResource(context, handle, resource));
AllocatorAttributes attr_value;
attr_value.set_gpu_compatible(true);
attr_value.set_nic_compatible(true);
TF_EXPECT_OK(
context->allocate_temp(dtype, shape, resource->tensor(), attr_value));
auto tensor_flat = resource->tensor()->flat<CType>();
CHECK(tensor_flat.data());
auto ret = cudaMemcpy(tensor_flat.data(), expected_value.data(),
expected_value.size() * sizeof(CType),
cudaMemcpyHostToDevice);
CHECK_EQ(ret, 0);
test->RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(test->GetTensorOrWeights("my_var", &output));
EXPECT_THAT(output.weights(),
ShapedWeightsHasDimsAndValues<CType>(p.dims, expected_value));
}
}
TEST_F(VariableOpConverterTest, ConvertReadVariableOp) {
TestConvertReadVariableOp<DT_FLOAT, float>(this);
TestConvertReadVariableOp<DT_HALF, Eigen::half>(this);
}
template <DataType dtype, typename InputCType, typename OutputCType>
void TestConvertConst(OpConverterTest* test) {
NodeDef node_def;
node_def.set_name("my_const");
node_def.set_op("Const");
auto reset_and_test = [&node_def, test](
const Tensor& tensor, const bool as_tensor_content,
const std::vector<int>& expected_dims,
const std::vector<OutputCType>& expected_value) {
test->Reset();
TensorProto* tensor_attr =
(*node_def.mutable_attr())["value"].mutable_tensor();
tensor_attr->Clear();
if (as_tensor_content) {
tensor.AsProtoTensorContent(tensor_attr);
} else {
tensor.shape().AsProto(tensor_attr->mutable_tensor_shape());
tensor_attr->set_dtype(tensor.dtype());
if (tensor.dtype() == DT_FLOAT) {
CopyTensorElements<float>(tensor, tensor_attr->mutable_float_val());
} else if (tensor.dtype() == DT_INT32) {
CopyTensorElements<int32>(tensor, tensor_attr->mutable_int_val());
} else {
tensor.AsProtoField(tensor_attr);
}
}
test->RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(test->GetTensorOrWeights("my_const", &output));
EXPECT_THAT(output.weights(), ShapedWeightsHasDimsAndValues<OutputCType>(
expected_dims, expected_value));
};
auto& attr = *node_def.mutable_attr();
attr["dtype"].set_type(dtype);
{
Tensor t(dtype);
reset_and_test(t, false, {}, {});
}
{
Tensor t = test::AsScalar<InputCType>(12);
std::vector<int> expected_dims{1};
expected_dims.clear();
reset_and_test(t, false, expected_dims, {12});
reset_and_test(t, true, expected_dims, {12});
}
{
Tensor t = test->AsTensor<InputCType>({1, 2});
reset_and_test(t, false, {2}, {1, 2});
reset_and_test(t, true, {2}, {1, 2});
}
{
Tensor t =
test->AsTensor<InputCType>({1, 2, 3, 4, 5, 6}, TensorShape({2, 3}));
reset_and_test(t, false, {2, 3}, {1, 2, 3, 4, 5, 6});
reset_and_test(t, true, {2, 3}, {1, 2, 3, 4, 5, 6});
}
{
Tensor t =
test->AsTensor<InputCType>({1, 1, 1, 1, 1, 1}, TensorShape({2, 3}));
reset_and_test(t, false, {2, 3}, {1, 1, 1, 1, 1, 1});
reset_and_test(t, true, {2, 3}, {1, 1, 1, 1, 1, 1});
}
{
Tensor t =
test->AsTensor<InputCType>({2, 2, 1, 1, 1, 1}, TensorShape({2, 3}));
reset_and_test(t, false, {2, 3}, {2, 2, 1, 1, 1, 1});
reset_and_test(t, true, {2, 3}, {2, 2, 1, 1, 1, 1});
}
}
TEST_F(OpConverterTest, ConvertConst) {
{
Reset();
NodeDef node_def = MakeConstNodeDef<double>("my_const", {});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Unsupported tensorflow data type double");
}
{
Reset();
Tensor tensor =
AsTensor<int64_t>({1, std::numeric_limits<int64_t>::max(), 1, 1, 1,
std::numeric_limits<int64_t>::lowest()},
TensorShape({2, 3}));
NodeDef node_def;
node_def.set_name("my_const");
node_def.set_op("Const");
(*node_def.mutable_attr())["dtype"].set_type(DT_INT64);
TensorProto* tensor_attr =
(*node_def.mutable_attr())["value"].mutable_tensor();
tensor_attr->Clear();
tensor.AsProtoTensorContent(tensor_attr);
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"outside the range of int32");
}
TestConvertConst<DT_FLOAT, float, float>(this);
TestConvertConst<DT_INT8, int8, int32>(this);
TestConvertConst<DT_UINT8, uint8, int32>(this);
TestConvertConst<DT_INT16, int16, int32>(this);
TestConvertConst<DT_UINT16, uint16, int32>(this);
TestConvertConst<DT_INT32, int32, int32>(this);
TestConvertConst<DT_UINT32, uint32, int32>(this);
TestConvertConst<DT_INT64, int64, int32>(this);
TestConvertConst<DT_UINT64, uint64, int32>(this);
}
template <typename T>
NodeDef CreateFusedBatchNormOp(DataType tf_type, std::string data_format,
bool is_training, float epsilon) {
Scope s = Scope::NewRootScope();
auto x = ops::Placeholder(s.WithOpName("x"), tf_type);
auto scale = ops::Placeholder(s.WithOpName("scale"), tf_type);
auto offset = ops::Placeholder(s.WithOpName("offset"), tf_type);
auto mean = ops::Placeholder(s.WithOpName("mean"), tf_type);
auto variance = ops::Placeholder(s.WithOpName("variance"), tf_type);
typename T::Attrs attrs;
attrs.data_format_ = data_format;
attrs.is_training_ = is_training;
if (epsilon > 0) {
attrs.epsilon_ = epsilon;
} else {
EXPECT_GE(epsilon, 0);
}
return T(s.WithOpName("my_batchnorm"), x, scale, offset, mean, variance,
attrs)
.operation.node()
->def();
}
TEST_P(OpConverter_FP32_Test, ConvertFusedBatchNorm) {
using OpFunc = std::function<NodeDef(DataType, std::string, bool, float)>;
std::vector<OpFunc> get_node_def_vec{
CreateFusedBatchNormOp<ops::FusedBatchNorm>,
CreateFusedBatchNormOp<ops::FusedBatchNormV2>,
CreateFusedBatchNormOp<ops::FusedBatchNormV3>};
struct TestParam {
std::string data_format;
int tensor_input_idx;
bool is_training;
float epsilon;
Status conversion_status;
bool keep_channel_unknown;
};
struct NodeInput {
std::string name;
std::vector<int> dims;
std::vector<float> val;
};
std::vector<NodeInput> node_input_nchw{
{"x", {2, 3, 2, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}},
{"scale", {3}, {7, 8, 9}},
{"offset", {3}, {10, 20, 30}},
{"mean", {3}, {1, 2, 3}},
{"variance", {3}, {4, 5, 6}}};
std::vector<NodeInput> node_input_nhwc{
{"x", {2, 2, 1, 3}, {1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12}},
{"scale", {3}, {7, 8, 9}},
{"offset", {3}, {10, 20, 30}},
{"mean", {3}, {1, 2, 3}},
{"variance", {3}, {4, 5, 6}}};
std::vector<float> expected_output_nchw{
10.0, 13.495633, 23.574135, 27.148273, 37.342354, 41.013527,
30.9738, 34.469433, 45.018955, 48.59309, 59.369415, 63.04059};
std::vector<float> expected_output_nhwc{
10.0, 23.574135, 37.342354, 13.495633, 27.148273, 41.013527,
30.9738, 45.018955, 59.369415, 34.469433, 48.59309, 63.04059};
for (auto get_node_def : get_node_def_vec) {
NodeDef tmp_node_def = get_node_def(tf_type_, "NCHW", true, 0);
std::string op_name = tmp_node_def.op();
std::vector<TestParam> test_param{
{"NCHW", 0, true, 0,
errors::Unimplemented(
StrCat(op_name, " only supports is_training=false"))},
{"NCHW", 1, false, 0,
errors::Unimplemented(StrCat("The input \"scale\" for ", op_name,
" must be a constant"))},
{"NCHW", 2, false, 0,
errors::Unimplemented(StrCat("The input \"offset\" for ", op_name,
" must be a constant"))},
{"NCHW", 3, false, 0,
errors::Unimplemented(StrCat("The input \"mean\" for ", op_name,
" must be a constant"))},
{"NCHW", 4, false, 0,
errors::Unimplemented(StrCat("The input \"variance\" for ", op_name,
" must be a constant"))},
{"NCHW", 0, false, 0.01},
{"NHWC", 0, false, 0.01}};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
test_param.push_back(
{"NCHW", 0, false, 0.01,
errors::InvalidArgument("Channel dimension must be static"), true});
test_param.push_back(
{"NHWC", 0, false, 0.01,
errors::InvalidArgument("Channel dimension must be static"), true});
}
for (auto p : test_param) {
Reset();
NodeDef node_def =
get_node_def(tf_type_, p.data_format, p.is_training, p.epsilon);
std::vector<NodeInput> node_input =
p.data_format == "NCHW" ? node_input_nchw : node_input_nhwc;
std::vector<float> expected_output =
p.data_format == "NCHW" ? expected_output_nchw : expected_output_nhwc;
for (int i = 0; i < node_input.size(); i++) {
if (i == 0 || i == p.tensor_input_idx) {
Status expected_status =
(i != 0 && trt_mode_ == TrtTestMode::kImplicitBatch)
? errors::InvalidArgument(
batch_size_error(node_input[i].name,
"Provided batch size does not match "
"converter batch size: 3 vs 2"))
: OkStatus();
std::vector<int> partial_input_shape;
if (i == 0 && trt_mode_ == TrtTestMode::kDynamicShape &&
!p.keep_channel_unknown) {
partial_input_shape.resize(4, -1);
int channel_dim = (p.data_format == "NCHW" ? 1 : 3);
partial_input_shape[channel_dim] = node_input[i].dims[channel_dim];
}
AddTestTensor(node_input[i].name, node_input[i].dims, tf_type_,
node_input[i].val, partial_input_shape,
expected_status);
} else {
AddTestWeights(node_input[i].name, node_input[i].dims,
node_input[i].val, tf_type_);
}
}
TestOpConverter(node_def, node_input[0].dims, p.conversion_status,
OkStatus(), ArrayFloatNear(expected_output));
}
}
}
TEST_P(OpConverter_FP32_Test, ConvertTranspose) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto transpose = ops::Transpose(s.WithOpName("my_transpose"), input, weights);
const NodeDef& node_def = transpose.operation.node()->def();
std::vector<TestParamBase> test_params = {
TestParamBase{{3, 1, 2, 1},
{},
{},
{},
Status(absl::StatusCode::kUnimplemented,
"The input \"perm\" for Transpose must be a "
"constant")},
TestParamBase{{1, 1, 2, 3},
{},
{},
{0, 1, 2},
Status(absl::StatusCode::kInvalidArgument,
"Rank of perm for transpose does not match with "
"that of the input.")},
TestParamBase{
{1, 1, 2, 3},
{},
{3, 2, 1, 1},
{3, 2, 1, 0},
(trt_mode_ == TrtTestMode::kImplicitBatch)
? Status(absl::StatusCode::kUnimplemented,
"Transpose at batch dimension is not supported")
: OkStatus()},
TestParamBase{{1, 1, 2, 3}, {}, {1, 3, 1, 2}, {0, 3, 1, 2}},
};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
test_params.push_back(TestParamBase{
{1, 1, 2, 3}, {-1, 1, 2, -1}, {1, 3, 1, 2}, {0, 3, 1, 2}});
}
std::vector<float> expected_values{1, 4, 2, 5, 3, 6};
for (auto p : test_params) {
SCOPED_TRACE(p);
Reset();
AddTestTensor("input", p.input_dims, {1, 2, 3, 4, 5, 6},
p.partial_input_dims);
if (p.param.empty()) {
AddTestTensor("weights", {3});
} else {
AddTestWeights<int32>("weights", {static_cast<int>(p.param.size())},
p.param);
}
TestOpConverter(node_def, p.expected_output_dims, p.status,
p.runtime_status, ElementsAreArray(expected_values));
}
}
TEST_P(OpConverter_FP32_Test, ConvertTile) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto tile = ops::Tile(s.WithOpName("my_tile"), input, weights);
const NodeDef& node_def = tile.operation.node()->def();
struct TileParam {
std::vector<int> input_dims;
std::vector<int> multiplier;
std::vector<float> tensor;
std::vector<int> expected_output_dims;
std::vector<int> expected_results;
int test_ID;
Status status;
};
std::vector<TileParam> test_params = {
TileParam{{1, 2, 3},
{1, -2, 1},
{},
{},
{},
1,
Status(absl::StatusCode::kInvalidArgument,
"All replications of the Tile operation in "
"'my_tile' should be positive, got (1, -2, 1).")},
TileParam{{1, 2, 3},
{1, 2, 1, 3},
{0, 1, 2, 3, 4, 5},
{},
{},
2,
Status(absl::StatusCode::kInvalidArgument,
"The length of the replication vector (4) of the "
"Tile operation in 'my_tile' is expected to be equal "
"to the rank of the input vector (3).")},
TileParam{{1, 2},
{1, 3},
{2, 3},
{1, 6},
{2, 3, 2, 3, 2, 3}},
TileParam{{1, 2, 3},
{1, 2, 1},
{0, 1, 2, 3, 4, 5},
{1, 4, 3},
{0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5}},
TileParam{{1, 2, 3},
{1, 1, 2},
{0, 1, 2, 3, 4, 5},
{1, 2, 6},
{0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5}},
TileParam{{1, 2, 3},
{1, 2, 2},
{0, 1, 2, 3, 4, 5},
{1, 4, 6},
{0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5,
0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5}},
TileParam{{1, 2},
{2, 3},
{2, 3},
{2, 6},
{2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3}},
TileParam{{1, 2, 3},
{2, 2, 1},
{0, 1, 2, 3, 4, 5},
{2, 4, 3},
{0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5}},
};
for (bool multiplier_is_tensor : {true, false}) {
for (bool input_is_tensor : {true, false}) {
for (auto p : test_params) {
std::vector<int> num_mults = {static_cast<int>(p.multiplier.size())};
std::vector<int> partial_input_dims = {};
if (multiplier_is_tensor) {
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
p.status =
Status(absl::StatusCode::kInvalidArgument,
"Conversion for Tile is not implemented for multipliers "
"passed as a tensor in implicit batch mode");
num_mults = {1, static_cast<int>(p.multiplier.size())};
} else {
if (p.test_ID == 1) {
continue;
}
if (trt_mode_ == TrtTestMode::kDynamicShape) {
partial_input_dims = num_mults;
p.status = OkStatus();
}
if (p.test_ID == 2) {
p.status = Status(absl::StatusCode::kInvalidArgument,
"When replications are defined as a tensor, "
"the number of its elements (4) must be equal "
"to the rank of the input tensor (3).");
}
}
} else {
if (trt_mode_ == TrtTestMode::kImplicitBatch && p.multiplier[0] > 1) {
p.status =
Status(absl::StatusCode::kUnimplemented,
"The Tile operation along "
"the batch dimension in 'my_tile' is not implemented.");
}
}
Reset();
if (input_is_tensor) {
AddTestTensor("input", p.input_dims, p.tensor);
} else {
AddTestWeights("input", p.input_dims, p.tensor, tf_type_);
}
if (multiplier_is_tensor) {
AddTestTensor<int>("weights", num_mults, DT_INT32, p.multiplier,
partial_input_dims);
} else {
AddTestWeights<int32>("weights", num_mults, p.multiplier);
}
TestOpConverter(node_def, p.expected_output_dims, p.status, OkStatus(),
ElementsAreArray(p.expected_results));
}
}
}
}
TEST_P(OpConverter_FP32_Test, ConvertReshape) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto reshape = ops::Reshape(s.WithOpName("my_reshape"), input, weights);
const NodeDef& node_def = reshape.operation.node()->def();
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
Reset();
AddTestTensor("input", {3, 2, 1});
AddTestTensor("weights", {3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"The input \"shape\" for Reshape must be a constant in implicit batch "
"mode");
} else if (!IS_TRT_VERSION_GE(7, 1, 3, 0)) {
Reset();
AddTestTensor("input", {3, 2, 1});
AddTestTensor("weights", {3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Non constant shape input tensor for Reshape requires minimum TRT "
"7.1.3");
}
Status reshape_from_scalar_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Internal(
"Failed to convert at least one input to a TRT_TensorOrWeights:"
" Scalar input tensor is not supported since the first "
"dimension is treated as batch dimension by TRT")
: OkStatus();
Status add_scalar_tensor_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::InvalidArgument(
"removing first dim requires explicit batch dimension")
: OkStatus();
Status reshape_to_scalar_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented("Reshape to shape=[] is not supported")
: OkStatus();
Status reshape_batch_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented("Reshape on batch dimension is not supported")
: OkStatus();
struct TestParams {
std::vector<int> tensor_dims;
std::vector<int> shape;
std::vector<int> expected_shape;
Status conversion_status;
Status runtime_status;
std::vector<int> shape_prof;
Status add_test_tensor_status;
};
std::vector<TestParams> params = {
TestParams{{},
{1, 1},
{},
reshape_from_scalar_status,
{},
{},
add_scalar_tensor_status},
TestParams{{1, 1}, {}, {}, reshape_to_scalar_status},
TestParams{{1, 1, 2, 3}, {3, 1, 1, 2}, {}, reshape_batch_status},
TestParams{{2, 1, 2, 3}, {-1, 1, 4}, {3, 1, 4}, reshape_batch_status},
TestParams{{1, 1, 2, 3}, {-1, 1, 3, 2}, {1, 1, 3, 2}},
TestParams{{1, 1, 2, 3}, {1, 1, -1}, {1, 1, 6}},
TestParams{{1, 1, 2, 3}, {1, 1, 3, 2}},
TestParams{{2, 1, 2, 3}, {2, 1, 3, 2}},
TestParams{{1, 1, 1}, {1}},
TestParams{{1}, {1, 1}},
TestParams{{2, 1, 1}, {2}},
TestParams{{2}, {2, 1}},
};
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
params.push_back(TestParams{{},
{},
{},
reshape_from_scalar_status,
{},
{},
add_scalar_tensor_status});
}
std::vector<bool> shape_input_options(1, true);
if (trt_mode_ != TrtTestMode::kImplicitBatch &&
IS_TRT_VERSION_GE(7, 1, 3, 0)) {
shape_input_options.push_back(false);
}
for (auto p : params) {
for (auto shape_as_weight : shape_input_options) {
std::ostringstream oss;
oss << "shape " << PrintToString(p.shape);
SCOPED_TRACE(StrCat(oss.str(), shape_as_weight ? " weight" : " tensor"));
if (!shape_as_weight && p.shape.empty()) {
p.conversion_status = errors::Unimplemented(
"Reshape with dynamic input requires 1D input tensor");
}
Reset();
const int n_elements =
std::accumulate(p.tensor_dims.begin(), p.tensor_dims.end(), 1,
std::multiplies<int>());
std::vector<float> input_vec(n_elements);
std::iota(input_vec.begin(), input_vec.end(), 1);
AddTestTensor("input", p.tensor_dims, tf_type_, input_vec, {},
p.add_test_tensor_status);
if (shape_as_weight) {
AddTestWeights<int32>("weights", {static_cast<int>(p.shape.size())},
p.shape);
} else {
std::vector<int32> dims;
std::vector<int32> values{p.shape};
if (!p.shape.empty()) {
dims.push_back(p.shape.size());
} else {
values.push_back(1);
}
AddTestTensor("weights", dims, DT_INT32, values, dims);
}
std::vector<int> expected_shape =
p.expected_shape.empty() ? p.shape : p.expected_shape;
VLOG(2) << "Calling TestOpConverter";
TestOpConverter(node_def, expected_shape, p.conversion_status,
p.runtime_status, ElementsAreArray(input_vec));
}
}
}
TEST_P(OpConverter_FP32_Test, ConvertShape) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto shape = ops::Shape(s.WithOpName("my_shape"), input);
const NodeDef& node_def = shape.operation.node()->def();
Status conversion_status =
(trt_mode_ == TrtTestMode::kImplicitBatch)
? errors::Unimplemented(
"Shape is only supported for explicit batch mode.")
: OkStatus();
std::vector<TestParamBase> test_params = {
#if !IS_TRT_VERSION_GE(7, 1, 3, 0)
TestParamBase{{1, 2, 3}, {}, {3}, {}, conversion_status},
#endif
TestParamBase{{1, 2, 3}, {}, {3}, {1}, conversion_status},
};
auto input_is_weight = [](const TestParamBase p) { return !p.param.empty(); };
for (auto p : test_params) {
SCOPED_TRACE(p);
Reset();
int n_elements = 0;
if (input_is_weight(p) || trt_mode_ != TrtTestMode::kExplicitBatch) {
n_elements = std::accumulate(p.input_dims.begin(), p.input_dims.end(), 1,
std::multiplies<int>());
}
std::vector<float> input_val(n_elements, 1);
if (!input_is_weight(p)) {
AddTestTensor("input", p.input_dims, input_val);
} else {
AddTestWeights("input", p.input_dims, input_val, tf_type_);
}
TestOpConverter(node_def, p.expected_output_dims, p.status,
p.runtime_status, ElementsAreArray(p.input_dims),
{DT_INT32});
}
}
struct MatMulTestParams {
std::vector<int> shape_a;
std::vector<int> values_a;
bool transpose_a;
std::vector<int> shape_b;
std::vector<int> values_b;
bool transpose_b;
std::vector<int> expected_shape;
std::vector<int> expected_output;
};
void TestMatMulHelper(
ParameterizedOpConverterTestBase* test,
const std::function<NodeDef(DataType, bool, bool)>& get_matmul,
const std::vector<MatMulTestParams>& params) {
{
test->Reset();
NodeDef node_def = get_matmul(DT_INT32, false, false);
test->AddTestTensor("input", {1, 2}, DT_INT32, {});
test->AddTestWeights<int32>("weights", {2, 1}, {3, 5});
const std::vector<DataType> allowed_types{DT_FLOAT, DT_HALF};
test->RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
convert_not_supported_dtype_msg(allowed_types, DT_INT32, node_def));
}
std::vector<bool> a_test_partial_shape_values{false};
if (test->get_trt_mode() == TrtTestMode::kDynamicShape) {
a_test_partial_shape_values.push_back(true);
}
for (auto p : params) {
for (bool a_is_tensor : {true, false}) {
for (bool b_is_tensor : {true, false}) {
for (bool a_partial_shape : a_test_partial_shape_values) {
if (a_partial_shape && !a_is_tensor) {
continue;
}
if (!a_is_tensor && !b_is_tensor) {
continue;
}
SCOPED_TRACE(StrCat("A", p.transpose_a ? ".T" : "", " is ",
a_is_tensor ? "tensor" : "weight", ", B",
p.transpose_b ? ".T" : "", " is ",
b_is_tensor ? "tensor " : "weight, rank A ",
p.shape_a.size(), ", rank B ", p.shape_b.size()));
test->Reset();
NodeDef node_def =
get_matmul(test->get_tf_type(), p.transpose_a, p.transpose_b);
const bool is_batch_matmul = node_def.op() == "BatchMatMul";
if (a_is_tensor) {
if (a_partial_shape) {
std::vector<int> partial_shape(p.shape_a.size(), -1);
int k = p.shape_a.size() - 1;
partial_shape.at(k) = p.shape_a.at(k);
test->AddTestTensor("input", p.shape_a, test->get_tf_type(),
p.values_a, partial_shape);
} else {
test->AddTestTensor("input", p.shape_a, p.values_a);
}
} else {
test->AddTestWeights("input", p.shape_a, p.values_a,
test->get_tf_type());
}
if (b_is_tensor) {
if (a_is_tensor && p.shape_a[0] != p.shape_b[0] &&
test->get_trt_mode() == TrtTestMode::kImplicitBatch) {
VLOG(2) << "Skipping test with inpcompatible batch dimensions";
continue;
}
test->AddTestTensor("weights", p.shape_b, p.values_b);
} else {
test->AddTestWeights("weights", p.shape_b, p.values_b,
test->get_tf_type());
}
Status conversion_status = OkStatus();
if (test->get_trt_mode() == TrtTestMode::kImplicitBatch) {
if (is_batch_matmul) {
if (a_is_tensor && p.shape_a.size() < p.shape_b.size()) {
conversion_status = errors::InvalidArgument(
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims ",
p.shape_a.size(), " vs broadcast #dims ", p.shape_b.size(),
")");
}
if (b_is_tensor && p.shape_b.size() < p.shape_a.size()) {
conversion_status = errors::InvalidArgument(
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims ",
p.shape_b.size(), " vs broadcast #dims ", p.shape_a.size(),
")");
}
if ((!a_is_tensor || !b_is_tensor) && p.shape_a[0] != 1) {
conversion_status = errors::Unimplemented(
"TensorRT does not support batched constants in implicit "
"batch mode.");
}
} else if ((a_is_tensor && p.shape_a.size() <= 2 &&
(p.transpose_a || b_is_tensor)) ||
(b_is_tensor && p.shape_b.size() <= 2)) {
conversion_status = errors::InvalidArgument(
"MatMul with 2D tensors requires explicit batch mode, or that"
" tensor A is not transposed and B is a constant tensor.");
}
}
test->TestOpConverter(node_def, p.expected_shape, conversion_status,
OkStatus(),
ElementsAreArray(p.expected_output));
if (!conversion_status.ok()) {
VLOG(2) << "Converted with status " << conversion_status;
}
VLOG(2) << "== Finished test iteration ==";
}
}
}
}
}
template <typename LayerType>
void CheckAddedLayers(OpConverterTest* test, bool expect_found) {
bool layer_found = false;
for (int i = 0; i < test->converter_->network()->getNbLayers(); i++) {
nvinfer1::ILayer* layer = test->converter_->network()->getLayer(i);
if (dynamic_cast<LayerType*>(layer)) {
layer_found = true;
}
}
EXPECT_EQ(expect_found, layer_found);
}
std::vector<MatMulTestParams> GetMatMulTestParams() {
std::vector<MatMulTestParams> params{
MatMulTestParams{{2, 2}, {0, 1, 2, 3}, false,
{2, 2}, {0, 1, 2, 3}, false,
{2, 2}, {2, 3, 6, 11}},
MatMulTestParams{{2, 2}, {0, 1, 2, 3}, false,
{2, 2}, {0, 1, 2, 3}, true,
{2, 2}, {1, 3, 3, 13}},
MatMulTestParams{{2, 2}, {0, 1, 2, 3}, true,
{2, 2}, {0, 1, 2, 3}, false,
{2, 2}, {4, 6, 6, 10}},
MatMulTestParams{{2, 2}, {0, 1, 2, 3}, true,
{2, 2}, {0, 1, 2, 3}, true,
{2, 2}, {2, 6, 3, 11}},
MatMulTestParams{{2, 3}, {0, 1, 2, 3, 4, 5}, false,
{2, 3}, {1, 2, 3, 4, 5, 6}, true,
{2, 2}, {8, 17, 26, 62}},
MatMulTestParams{{2, 3}, {0, 1, 2, 3, 4, 5}, true,
{2, 3}, {1, 2, 3, 4, 5, 6}, false,
{3, 3}, {12, 15, 18, 17, 22, 27, 22, 29, 36}},
MatMulTestParams{{3, 2}, {0, 1, 2, 3, 4, 5}, false,
{2, 3}, {1, 2, 3, 4, 5, 6}, false,
{3, 3}, {4, 5, 6, 14, 19, 24, 24, 33, 42}},
MatMulTestParams{{3, 2}, {0, 1, 2, 3, 4, 5}, true,
{2, 3}, {1, 2, 3, 4, 5, 6}, true,
{2, 2}, {16, 34, 22, 49}},
};
return params;
}
TEST_P(OpConverter_FP32_Test, ConvertMatMul) {
auto get_matmul_nodedef = [](DataType dtype, bool transpose_a,
bool transpose_b) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto weights = ops::Placeholder(s.WithOpName("weights"), dtype);
const auto matmul_attrs =
ops::MatMul::TransposeA(transpose_a).TransposeB(transpose_b);
auto matmul =
ops::MatMul(s.WithOpName("my_matmul"), input, weights, matmul_attrs);
return matmul.operation.node()->def();
};
TestMatMulHelper(this, get_matmul_nodedef, GetMatMulTestParams());
}
TEST_P(OpConverter_FP32_Test, ConvertBatchMatMul) {
auto get_batch_matmul_nodedef = [](DataType dtype, bool transpose_a,
bool transpose_b) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto weights = ops::Placeholder(s.WithOpName("weights"), dtype);
const auto matmul_attrs =
ops::BatchMatMul::AdjX(transpose_a).AdjY(transpose_b);
auto matmul = ops::BatchMatMul(s.WithOpName("my_matmul"), input, weights,
matmul_attrs);
return matmul.operation.node()->def();
};
std::vector<MatMulTestParams> params_2d = GetMatMulTestParams();
std::vector<MatMulTestParams> params;
params.reserve(params_2d.size() * 3 + 1);
auto insert_ones = [](std::vector<int> v, int n) {
std::vector<int> ones(n, 1);
ones.insert(ones.end(), v.begin(), v.end());
return ones;
};
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[](MatMulTestParams p) {
p.shape_a.insert(p.shape_a.begin(), 1);
p.shape_b.insert(p.shape_b.begin(), 1);
p.expected_shape.insert(p.expected_shape.begin(), 1);
return p;
});
params.push_back(
MatMulTestParams{{2, 2, 2}, {0, 1, 2, 3, 0, 1, 2, 3}, false,
{2, 2, 2}, {0, 1, 2, 3, 0, 1, 2, 3}, false,
{2, 2, 2}, {2, 3, 6, 11, 2, 3, 6, 11}}
);
params.push_back(
MatMulTestParams{{2, 2, 3}, {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5},
false,
{2, 2, 3}, {1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6}, true,
{2, 2, 2}, {8, 17, 26, 62, 8, 17, 26, 62}});
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[insert_ones](MatMulTestParams p) {
p.shape_a = insert_ones(p.shape_a, 2);
p.shape_b = insert_ones(p.shape_b, 2);
p.expected_shape = insert_ones(p.expected_shape, 2);
return p;
});
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[insert_ones](MatMulTestParams p) {
p.shape_a = insert_ones(p.shape_a, 2);
p.expected_shape = insert_ones(p.expected_shape, 2);
return p;
});
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[insert_ones](MatMulTestParams p) {
p.shape_a = insert_ones(p.shape_a, 1);
p.shape_b = insert_ones(p.shape_b, 2);
p.expected_shape = insert_ones(p.expected_shape, 2);
return p;
});
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[insert_ones](MatMulTestParams p) {
p.shape_a.insert(p.shape_a.begin(), 2);
p.values_a.reserve(p.values_a.size() * 2);
p.values_a.insert(p.values_a.end(), p.values_a.begin(),
p.values_a.end());
p.shape_b.insert(p.shape_b.begin(), 2);
p.values_b.reserve(p.values_b.size() * 2);
p.values_b.insert(p.values_b.end(), p.values_b.begin(),
p.values_b.end());
p.expected_shape.insert(p.expected_shape.begin(), 2);
p.expected_output.reserve(p.expected_output.size() * 2);
p.expected_output.insert(p.expected_output.end(),
p.expected_output.begin(),
p.expected_output.end());
return p;
});
params.push_back(MatMulTestParams{
{1, 2, 4, 5},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39},
false,
{1, 2, 3, 5},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30},
true,
{1, 2, 4, 3},
{40, 90, 140, 115, 290, 465, 190, 490,
790, 265, 690, 1115, 1990, 2540, 3090, 2440,
3115, 3790, 2890, 3690, 4490, 3340, 4265, 5190}});
TestMatMulHelper(this, get_batch_matmul_nodedef, params);
}
#if IS_TRT_VERSION_GE(7, 1, 3, 0)
TEST_P(OpConverter_FP32_Test, ConvertEinsum) {
auto get_einsum_nodedef = [](DataType dtype, std::string eq,
int n_inputs = 2) -> NodeDef {
Scope s = Scope::NewRootScope();
auto a = ops::Placeholder(s.WithOpName("input_a"), dtype);
std::vector<Input> input_vec{a};
if (n_inputs > 1) {
auto b = ops::Placeholder(s.WithOpName("input_b"), dtype);
input_vec.push_back(b);
}
InputList inputs(input_vec);
auto einsum = ops::Einsum(s.WithOpName("my_einsum"), inputs, eq);
return einsum.operation.node()->def();
};
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
Reset();
NodeDef node = get_einsum_nodedef(tf_type_, "ab,cb->ac");
AddTestTensor("input_a", {2, 3});
AddTestTensor("input_b", {2, 3});
const auto& err = convert_not_supported_implicit(node.op(), node.name());
TestOpConverter(node, {2, 2}, errors::Unimplemented(err), OkStatus(),
ElementsAreArray({13, 16, 40, 52}));
return;
}
struct TestParams {
std::string equation;
std::vector<int> shape_a;
std::vector<int> values_a;
std::vector<int> shape_b;
std::vector<int> values_b;
std::vector<int> expected_shape;
std::vector<int> expected_output;
Status conv_status;
};
Status unimplemented_eq = errors::Unimplemented("");
Status internal_err = errors::Internal("");
Status internal_err_before_TRT82 =
IS_TRT_VERSION_GE(8, 2, 0, 0) ? OkStatus() : internal_err;
Status unimplemented_before_TRT82 =
IS_TRT_VERSION_GE(8, 2, 0, 0) ? OkStatus() : unimplemented_eq;
Status diagonal_error = unimplemented_eq;
Status diagonal_error_1_input =
IS_TRT_VERSION_GE(8, 2, 0, 0) ? unimplemented_eq : internal_err;
std::vector<TestParams> params{
TestParams{"i,i->", {2}, {2, 3}, {2}, {1, 2}, {}, {8}, unimplemented_eq},
TestParams{"ik,ik->",
{2, 2},
{2, 3, 4, 1},
{2, 2},
{1, 2, 1, 3},
{},
{15},
unimplemented_eq},
TestParams{"i,k->ik",
{2},
{1, 2},
{3},
{1, 2, 3},
{2, 3},
{1, 2, 3, 2, 4, 6},
unimplemented_eq},
TestParams{"ij,kl->ijkl",
{2, 1},
{1, 2},
{3, 1},
{1, 2, 3},
{2, 1, 3, 1},
{1, 2, 3, 2, 4, 6},
unimplemented_before_TRT82},
TestParams{"ik->ki",
{2, 3},
{0, 1, 2, 3, 4, 5},
{},
{},
{3, 2},
{0, 3, 1, 4, 2, 5},
internal_err_before_TRT82},
TestParams{"ii->i",
{3, 3},
{0, 1, 2, 3, 4, 5, 6, 7, 8},
{},
{},
{3},
{0, 4, 8},
diagonal_error_1_input},
TestParams{"ii->",
{3, 3},
{0, 1, 2, 3, 4, 5, 6, 7, 8},
{},
{},
{},
{12},
diagonal_error_1_input},
TestParams{"abbc,dc->ad",
{1, 2, 2, 3},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{2, 3},
{1, 2, 3, 4, 5, 6},
{2, 3},
{1, 2, 3, 2, 4, 6},
diagonal_error},
TestParams{"...ik,...jk->...ij",
{1, 3, 1, 4},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 1, 1, 4},
{1, 2, 3, 4, 5, 6, 7, 8},
{2, 3, 1, 1},
{20, 60, 100, 44, 148, 252},
unimplemented_eq},
TestParams{"ab,bc->ac",
{2, 3},
{0, 1, 2, 3, 4, 5},
{3, 2},
{1, 2, 3, 4, 5, 6},
{2, 2},
{13, 16, 40, 52}},
TestParams{"abc,cde->abde",
{1, 2, 3},
{0, 1, 2, 3, 4, 5},
{3, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{1, 2, 2, 2},
{23, 26, 29, 32, 68, 80, 92, 104}},
TestParams{"abcd,cde->abe",
{1, 2, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 3, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{1, 2, 2},
{125, 140, 341, 392}},
TestParams{"aBAE,AEe->aBe",
{1, 2, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 3, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{1, 2, 2},
{125, 140, 341, 392}},
TestParams{"abc,cd->abd",
{1, 2, 3},
{0, 1, 2, 3, 4, 5},
{3, 2},
{1, 2, 3, 4, 5, 6},
{1, 2, 2},
{13, 16, 40, 52}},
TestParams{"acbe,aecd->abcd",
{1, 2, 3, 4},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{1, 4, 2, 3},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
{1, 3, 2, 3},
{90, 96, 102, 732, 786, 840, 250, 272, 294, 940, 1010, 1080,
410, 448, 486, 1148, 1234, 1320}},
TestParams{"aecd,abcd->acbe",
{1, 2, 3, 4},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{1, 2, 3, 4},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
{1, 3, 2, 2},
{20, 140, 92, 788, 148, 460, 412, 1300, 404, 908, 860, 1940}},
TestParams{"acd,dce->ae",
{1, 2, 3},
{0, 1, 2, 3, 4, 5},
{3, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{1, 2},
{115, 130}},
TestParams{"abcd,bace->bade",
{2, 3, 2, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{3, 2, 2, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{3, 2, 1, 1},
{2, 46, 28, 128, 86, 242}},
TestParams{
"cebfad,fageb->abcdg",
{1, 1, 3, 3, 2, 2},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35},
{3, 2, 2, 1, 3},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36},
{2, 3, 1, 2, 2},
{252, 288, 291, 336, 768, 912, 810, 963, 1356, 1608, 1401, 1662,
438, 492, 495, 558, 1176, 1338, 1236, 1407, 1986, 2256, 2049, 2328}},
};
for (auto p : params) {
for (bool a_is_tensor : {true, false}) {
for (bool b_is_tensor : {true, false}) {
if (!a_is_tensor && !b_is_tensor) {
continue;
}
Reset();
int n_inputs = p.shape_b.empty() ? 1 : 2;
NodeDef node_def = get_einsum_nodedef(tf_type_, p.equation, n_inputs);
if (a_is_tensor) {
AddTestTensor("input_a", p.shape_a, p.values_a);
} else {
AddTestWeights("input_a", p.shape_a, p.values_a, tf_type_);
}
if (!p.shape_b.empty()) {
if (b_is_tensor) {
AddTestTensor("input_b", p.shape_b, p.values_b);
} else {
AddTestWeights("input_b", p.shape_b, p.values_b, tf_type_);
}
}
TestOpConverter(node_def, p.expected_shape, p.conv_status, OkStatus(),
ElementsAreArray(p.expected_output));
}
}
}
}
#endif
TEST_P(OpConverter_FP32_FP16_Test, ConvertBiasAdd) {
auto get_biasadd_nodedef = [](const string& data_format,
DataType tf_type) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto weights = ops::Placeholder(s.WithOpName("weights"), tf_type);
const auto biasadd_attrs = ops::BiasAdd::DataFormat(data_format);
auto biasadd =
ops::BiasAdd(s.WithOpName("my_biasadd"), input, weights, biasadd_attrs);
return biasadd.operation.node()->def();
};
for (const string& data_format : {"NHWC", "NCHW"}) {
for (const int trt_input_rank : {1, 2, 3, 4}) {
Reset();
NodeDef node_def = get_biasadd_nodedef(data_format, tf_type_);
std::vector<int32> dims_array(trt_input_rank + 1, 1);
if (trt_input_rank == 1) {
dims_array[1] = (data_format == "NHWC" ? 3 : 2);
} else {
dims_array[1] = 2;
dims_array[trt_input_rank] = 3;
}
const int64_t num_input = DimsAdapter(dims_array).Volume();
ASSERT_EQ(trt_input_rank > 1 ? 6 : (data_format == "NHWC" ? 3 : 2),
num_input);
std::vector<float> input_data(num_input, 0);
AddTestTensor("input", dims_array, input_data);
const int channel_size = (data_format == "NHWC" ? 3 : 2);
std::vector<float> bias(channel_size);
for (int i = 0; i < channel_size; ++i) {
bias[i] = i + 1;
}
AddTestWeights("weights", {channel_size}, bias, tf_type_);
std::vector<float> output_data;
if (trt_input_rank == 1) {
if (data_format == "NHWC") {
output_data = {1, 2, 3};
} else {
output_data = {1, 2};
}
} else {
if (data_format == "NHWC") {
output_data = {1, 2, 3, 1, 2, 3};
} else {
output_data = {1, 1, 1, 2, 2, 2};
}
}
TestOpConverter(node_def, dims_array, OkStatus(), OkStatus(),
ElementsAreArray(output_data));
}
}
}
template <typename OpType>
NodeDef GetBinaryOpNodeDef(DataType dtype) {
Scope s = Scope::NewRootScope();
auto input_l = ops::Placeholder(s.WithOpName("input1"), dtype);
auto input_r = ops::Placeholder(s.WithOpName("input2"), dtype);
auto op = OpType(s.WithOpName("my_binary"), input_l, input_r);
return op.operation.node()->def();
}
TEST_P(OpConverter_FP32_FP16_BinaryTest, ConvertBinary) {
using OpFunc = std::function<NodeDef(DataType)>;
std::map<std::string, std::pair<OpFunc, std::vector<float>>> op_test_info;
#define ADD_OP(name, op, v1, v2, v3, v4, v5, v6, v7, v8) \
op_test_info[name] = \
std::make_pair(GetBinaryOpNodeDef<op>, \
std::vector<float>(v1, v2, v3, v4, v5, v6, v7, v8))
ADD_OP("Add", ops::Add, {5, 8, 6, 9, 5, 8, 6, 9});
ADD_OP("AddV2", ops::AddV2, {5, 8, 6, 9, 5, 8, 6, 9});
ADD_OP("Sub", ops::Sub, {1, 4, 0, 3, 1, 4, 0, 3});
ADD_OP("Mul", ops::Mul, {6, 12, 9, 18, 6, 12, 9, 18});
ADD_OP("Div", ops::Div, {1.5, 3, 1, 2, 1.5, 3, 1, 2});
ADD_OP("RealDiv", ops::RealDiv, {1.5, 3, 1, 2, 1.5, 3, 1, 2});
ADD_OP("FloorDiv", ops::FloorDiv, {1, 3, 1, 2, 1, 3, 1, 2});
ADD_OP("Minimum", ops::Minimum, {2, 2, 3, 3, 2, 2, 3, 3});
ADD_OP("Maximum", ops::Maximum, {3, 6, 3, 6, 3, 6, 3, 6});
ADD_OP("Pow", ops::Pow, {9, 36, 27, 216, 9, 36, 27, 216});
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
ADD_OP("Greater", ops::Greater, {1, 1, 0, 1, 1, 1, 0, 1});
ADD_OP("Less", ops::Less, {0, 0, 0, 0, 0, 0, 0, 0});
ADD_OP("Equal", ops::Equal, {0, 0, 1, 0, 0, 0, 1, 0});
ADD_OP("GreaterEqual", ops::Less, {1, 1, 1, 1, 1, 1, 1, 1});
ADD_OP("LessEqual", ops::Greater, {0, 0, 1, 0, 0, 0, 1, 0});
#endif
#undef ADD_OP
std::vector<std::vector<float>> data = {
{3, 6, 3, 6}, {3, 6}, {2, 3, 2, 3}, {2, 3}};
RunTests(*BinaryOperationMap(), op_test_info, data);
}
TEST_P(OpConverter_BOOL_BinaryTest, ConvertBooleanBinary) {
using OpFunc = std::function<NodeDef(DataType)>;
std::map<std::string, std::pair<OpFunc, std::vector<int>>> op_test_info;
#define ADD_OP(name, op, v1, v2, v3, v4, v5, v6, v7, v8) \
op_test_info[name] = \
std::make_pair(GetBinaryOpNodeDef<op>, \
std::vector<int>(v1, v2, v3, v4, v5, v6, v7, v8))
ADD_OP("LogicalOr", ops::LogicalOr, {1, 1, 0, 1, 1, 1, 0, 1});
ADD_OP("LogicalAnd", ops::LogicalAnd, {0, 1, 0, 0, 0, 1, 0, 0});
#undef ADD_OP
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
std::vector<std::vector<int>> data = {
{0, 1, 0, 1}, {0, 1}, {1, 0, 1, 0}, {1, 0}};
RunTests(*BinaryBooleanOperationMap(), op_test_info, data);
#endif
}
NodeDef GetAddNNodeDef(const std::vector<string>& input_names, DataType dtype) {
Scope s = Scope::NewRootScope();
OutputList inputs;
for (const string& name : input_names) {
inputs.push_back(ops::Placeholder(s.WithOpName(name), dtype));
}
auto op = ops::AddN(s.WithOpName("my_addn"), inputs);
return op.operation.node()->def();
}
struct AddNTestParams {
std::vector<float> input_values;
std::vector<string> input_names;
std::vector<int> dimensions;
std::vector<float> expected_output;
Status status;
};
void TestAddN(ParameterizedOpConverterTestBase* test, AddNTestParams& p) {
test->Reset();
const NodeDef node_def = GetAddNNodeDef(p.input_names, test->get_tf_type());
if (p.input_values.size() % p.input_names.size() != 0) {
LOG(ERROR) << "The number of input values: `" << p.input_values.size()
<< "` is not a multiple of the number of inputs: `"
<< p.input_names.size() << "`";
ASSERT_TRUE(false);
}
DataVec input_data;
int input_offset = 0;
const int window_size = p.input_values.size() / p.input_names.size();
for (const string& name : p.input_names) {
std::vector<float>::const_iterator start_pos =
p.input_values.begin() + input_offset;
std::vector<float>::const_iterator end_pos = start_pos + window_size;
std::vector<float> sub_input_val(start_pos, end_pos);
input_offset += window_size;
test->AddTestTensor(name, p.dimensions, test->get_tf_type(), sub_input_val);
}
test->TestOpConverter(node_def, p.dimensions,
p.status,
p.status,
ElementsAreArray(p.expected_output),
{test->get_tf_type()});
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertAddN) {
{
Reset();
const NodeDef node_def = GetAddNNodeDef({"tensor", "weights"}, tf_type_);
AddTestTensor("tensor", {1, 2});
AddTestWeights<float>("weights", {2, 1, 2}, {0, 1, 2, 3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Weights input to AddN is required to have batch dimension 1.");
}
const std::vector<float> common_input = CreateVectorIota<float>(6);
std::vector<AddNTestParams> params = {
{common_input,
{"inp1", "inp2", "inp3"},
{1, 1, 2, 1, 1},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{1, 1, 3, 1, 1},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3"},
{1, 2, 1, 1},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{1, 1, 3, 1},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3"},
{1, 2, 1},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{1, 1, 3},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3"},
{2, 1},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{1, 3},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3"},
{2},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{3},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3", "inp4", "inp5", "inp6"},
{1},
{15},
OkStatus()},
};
for (auto p : params) {
TestAddN(this, p);
}
}
TEST_P(OpConverter_FP32_Test, ConvertQDQDynamicRangeMode) {
{
Reset(TrtPrecisionMode::INT8);
NodeDef node_def =
MakeNodeDef("my_quantize", "FakeQuantWithMinMaxArgs", {"input"});
AddTestTensor("input", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kNotFound,
"No attr named 'min'");
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto quantize_attrs = ops::FakeQuantWithMinMaxArgs::Min(-6.0f).Max(6.0f);
auto quantize = ops::FakeQuantWithMinMaxArgs(s.WithOpName("my_quantize"),
input, quantize_attrs);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_quantize", &output));
ASSERT_TRUE(output.is_tensor());
auto ranges = quantization_ranges();
EXPECT_EQ(1, ranges.count(output.tensor()->trt_tensor()));
EXPECT_EQ(6.0f, ranges[output.tensor()->trt_tensor()]);
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto weights_min = ops::Placeholder(s.WithOpName("weights_min"), DT_FLOAT);
auto weights_max = ops::Placeholder(s.WithOpName("weights_max"), DT_FLOAT);
auto quantize = ops::FakeQuantWithMinMaxVars(
s.WithOpName("my_quantize"), input, weights_min, weights_max);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
AddTestWeights<float>("weights_min", {1}, {-6.0f});
AddTestWeights<float>("weights_max", {1}, {6.0f});
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_quantize", &output));
ASSERT_TRUE(output.is_tensor());
auto ranges = quantization_ranges();
EXPECT_EQ(1, ranges.count(output.tensor()->trt_tensor()));
EXPECT_EQ(6.0f, ranges[output.tensor()->trt_tensor()]);
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto weights_min = ops::Placeholder(s.WithOpName("weights_min"), DT_FLOAT);
auto weights_max = ops::Placeholder(s.WithOpName("weights_max"), DT_FLOAT);
auto quantize = ops::QuantizeAndDequantizeV2(
s.WithOpName("my_quantize"), input, weights_min, weights_max);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
AddTestWeights<float>("weights_min", {1}, {-6.0f});
AddTestWeights<float>("weights_max", {1}, {6.0f});
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_quantize", &output));
ASSERT_TRUE(output.is_tensor());
auto ranges = quantization_ranges();
EXPECT_EQ(1, ranges.count(output.tensor()->trt_tensor()));
EXPECT_EQ(6.0f, ranges[output.tensor()->trt_tensor()]);
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto weights_min = ops::Placeholder(s.WithOpName("weights_min"), DT_FLOAT);
auto weights_max = ops::Placeholder(s.WithOpName("weights_max"), DT_FLOAT);
auto quantize = ops::QuantizeAndDequantizeV2(
s.WithOpName("my_quantize"), input, weights_min, weights_max);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
AddTestTensor("weights_min", {1});
AddTestTensor("weights_max", {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"input_min\" for "
"QuantizeAndDequantizeV2 must be a constant");
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto weights_min = ops::Placeholder(s.WithOpName("weights_min"), DT_FLOAT);
auto weights_max = ops::Placeholder(s.WithOpName("weights_max"), DT_FLOAT);
auto num_bits = ops::Placeholder(s.WithOpName("num_bits"), DT_INT32);
auto quantize = ops::QuantizeAndDequantizeV3(
s.WithOpName("my_quantize"), input, weights_min, weights_max, num_bits);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
AddTestWeights<float>("weights_min", {1}, {-6.0f});
AddTestWeights<float>("weights_max", {1}, {6.0f});
AddTestWeights<int>("num_bits", {1}, {8});
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_quantize", &output));
ASSERT_TRUE(output.is_tensor());
auto ranges = quantization_ranges();
EXPECT_EQ(1, ranges.count(output.tensor()->trt_tensor()));
EXPECT_EQ(6.0f, ranges[output.tensor()->trt_tensor()]);
}
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertSquare) {
{
Reset();
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto square = ops::Square(s.WithOpName("my_square"), input);
NodeDef node_def = square.operation.node()->def();
AddTestWeights("input", {1, 2, 3}, {1, 2, 3, 4, -5, 6}, tf_type_);
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"x\" for Square must be a tensor");
}
Reset();
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto square = ops::Square(s.WithOpName("my_square"), input);
NodeDef node_def = square.operation.node()->def();
const int num_inputs = 20;
std::vector<float> inputs(num_inputs);
std::vector<float> expected_outputs(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
const float value = (i - 9);
inputs[i] = value;
expected_outputs[i] = value * value;
}
AddTestTensor("input", {1, 1, 20}, tf_type_, inputs);
TestOpConverter(node_def, {1, 1, 20}, OkStatus(), OkStatus(),
ArrayFloatNear(expected_outputs, 0));
}
bool nextTensorWeightConfiguration(std::vector<int>& config) {
for (int i = config.size(); i-- > 0;) {
if ((config[i] = 1 - config[i])) return true;
}
return false;
}
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertFill) {
Scope s = Scope::NewRootScope();
auto dims = ops::Placeholder(s.WithOpName("dims"), DT_INT32);
auto value = ops::Placeholder(s.WithOpName("value"), tf_type_);
auto fill = ops::Fill(s.WithOpName("my_fill"), dims, value);
const NodeDef& node_def = fill.operation.node()->def();
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
Reset();
AddTestWeights("dims", {2}, {2, 2}, DT_INT32);
AddTestWeights("value", {1}, {42}, tf_type_);
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
convert_not_supported_implicit(node_def.op(), node_def.name()));
return;
}
std::vector<std::vector<int>> output_dims_params = {
{8}, {8, 2, 4}, {32, 32, 3200}};
std::vector<std::vector<int>> value_dims_params = {{}, {1}};
float val = 42.0;
Status status = OkStatus();
for (bool dims_is_tensor : {true, false}) {
for (bool value_is_tensor : {true, false}) {
for (auto output_dims : output_dims_params) {
for (auto value_dims : value_dims_params) {
Reset();
std::vector<int32_t> dims_dims = {
static_cast<int32_t>(output_dims.size())};
if (dims_is_tensor) {
AddTestTensor("dims", dims_dims, DT_INT32, output_dims, dims_dims);
} else {
AddTestWeights("dims", dims_dims, output_dims, DT_INT32);
}
if (value_is_tensor) {
AddTestTensor("value", value_dims, tf_type_,
{static_cast<int>(val)});
} else {
AddTestWeights("value", value_dims, {static_cast<int>(val)},
tf_type_);
}
size_t nb_el = 1;
for (auto d : output_dims) {
nb_el *= d;
}
std::vector<float> expected_output(nb_el, val);
TestOpConverter(node_def, output_dims, status, status,
ElementsAreArray(expected_output));
}
}
}
}
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertRange) {
auto get_casted_value = [this](const float value, const DataType dtype) {
return dtype == DT_INT32 ? static_cast<int32>(value) : value;
};
auto set_parameters = [this](const std::array<const char*, 3>& name,
const std::array<std::vector<float>, 3>& value,
const std::array<DataType, 3>& type,
const std::vector<int>& config,
int shape_idx = -1) {
Reset();
for (int i = 0; i < 3; i++) {
if (config[i]) {
std::vector<int32> partial_shape_dims = {};
if (shape_idx > 3 || (shape_idx >= 0 && shape_idx != i)) {
partial_shape_dims = {1};
}
AddTestTensor(name[i], {1}, type[i], value[i], partial_shape_dims);
} else {
AddTestWeights(name[i], {1}, value[i], type[i]);
}
}
};
const float start = 1.0;
const float limit = 43.0;
const float delta = 2.0;
const std::array<const char*, 3> param_name = {"start", "limit", "delta"};
std::array<std::vector<float>, 3> param_value;
param_value[0] = {start};
param_value[1] = {limit};
param_value[2] = {delta};
const auto start_type = tf_type_;
std::array<DataType, 3> param_type = {tf_type_, tf_type_, tf_type_};
Scope s = Scope::NewRootScope();
const auto range =
ops::Range(s.WithOpName("my_range"),
ops::Placeholder(s.WithOpName(param_name[0]), param_type[0]),
ops::Placeholder(s.WithOpName(param_name[1]), param_type[1]),
ops::Placeholder(s.WithOpName(param_name[2]), param_type[2]));
const NodeDef& ndef = range.operation.node()->def();
const std::vector<DataType> param_types{DT_FLOAT, DT_HALF, DT_INT32};
std::vector<int> config(3, 0);
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
const auto& err = convert_not_supported_implicit(ndef.op(), ndef.name());
do {
set_parameters(param_name, param_value, param_type, config);
RunValidationAndConversion(ndef, absl::StatusCode::kUnimplemented, err);
} while (nextTensorWeightConfiguration(config));
return;
}
const auto& expect_msg = convert_range_expected_msg(ndef);
bool all_weights = true;
do {
for (auto limit_type : param_types) {
param_type[1] = limit_type;
for (auto delta_type : param_types) {
param_type[2] = delta_type;
const auto all_integers = start_type == DT_INT32 &&
limit_type == DT_INT32 &&
delta_type == DT_INT32;
if (all_weights || (all_integers && !config[2])) {
param_value[2] = {0};
set_parameters(param_name, param_value, param_type, config);
RunValidationAndConversion(
ndef, absl::StatusCode::kInvalidArgument,
"The delta parameter of Range operation cannot be equal to 0");
if (!all_weights && !config[2]) {
param_value[2] = {-1};
set_parameters(param_name, param_value, param_type, config);
const string err = StrCat(
"The delta parameter of Range operation "
"cannot be negative, when one of (start, limit) is passed as "
"a tensor, but got ",
param_value[2][0]);
RunValidationAndConversion(ndef, absl::StatusCode::kInvalidArgument,
err);
}
}
if (all_weights) {
for (int j = 0; j <= 1; j++) {
param_value[j] = {get_casted_value(start, tf_type_)};
param_value[1 - j] = {get_casted_value(limit, limit_type)};
param_value[2] = {(2 * j - 1) *
get_casted_value(delta, delta_type)};
set_parameters(param_name, param_value, param_type, config);
const auto error = convert_range_error_msg(
param_value[0][0], param_value[1][0], param_value[2][0]);
RunValidationAndConversion(ndef, absl::StatusCode::kInvalidArgument,
error);
}
}
param_value[0] = {start};
param_value[2] = {delta};
if (all_integers) {
if (trt_mode_ == TrtTestMode::kDynamicShape) {
for (int j = 0; j < 3; j++) {
if (!config[j]) continue;
const string err =
StrCat("Dimension for '", param_name[j],
"' of Range operator should be equal to 1");
set_parameters(param_name, param_value, param_type, config, j);
RunValidationAndConversion(
ndef, absl::StatusCode::kInvalidArgument, err);
}
}
} else {
if (!all_weights) {
set_parameters(param_name, param_value, param_type, config);
RunValidationAndConversion(ndef, absl::StatusCode::kUnimplemented,
expect_msg);
}
}
}
}
all_weights = false;
} while (nextTensorWeightConfiguration(config));
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(DT_BOOL, &trt_type));
const std::string error_msg =
"Unsupported data type " + DebugString(trt_type) + " used for '";
do {
for (auto limit_type : param_types) {
param_type[1] = limit_type;
for (auto delta_type : param_types) {
param_type[2] = delta_type;
for (int i = 0; i < 3; i++) {
if (!config[i]) {
const auto saved_type = param_type[i];
param_type[i] = DT_BOOL;
set_parameters(param_name, param_value, param_type, config);
param_type[i] = saved_type;
RunValidationAndConversion(ndef, absl::StatusCode::kInvalidArgument,
error_msg + param_name[i] + "'");
}
}
}
}
} while (nextTensorWeightConfiguration(config));
const Status status = OkStatus();
const std::vector<DataType> int_type{DT_INT32};
int partial_shape_idx = -1;
all_weights = true;
do {
const auto& types = all_weights ? param_types : int_type;
const auto jEnd = all_weights ? 1 : 0;
for (auto limit_type : types) {
param_type[1] = limit_type;
for (auto delta_type : types) {
param_type[2] = delta_type;
for (int j = 0; j <= jEnd; j++) {
const int mult = (1 - 2 * j);
param_value[j] = {get_casted_value(start, tf_type_)};
param_value[1 - j] = {get_casted_value(limit, limit_type)};
param_value[2] = {mult * get_casted_value(delta, delta_type)};
std::vector<float> expected_output;
const float limit_curr = param_value[1][0];
const float delta_curr = param_value[2][0];
float value = param_value[0][0];
int num_values = 0;
while (mult * (limit_curr - value) > 0) {
num_values++;
expected_output.push_back(value);
value += delta_curr;
}
set_parameters(param_name, param_value, param_type, config,
partial_shape_idx);
const std::vector<int> output_dims = {num_values};
TestOpConverter(ndef, output_dims, status, status,
ElementsAreArray(expected_output));
}
}
}
if (all_weights) {
if (start_type != DT_INT32) break;
if (trt_mode_ == TrtTestMode::kDynamicShape) partial_shape_idx = 3;
all_weights = false;
}
} while (nextTensorWeightConfiguration(config));
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertLikeOps) {
auto get_node = [&](int value) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
if (value == 0) {
auto zeros_like = ops::ZerosLike(s.WithOpName("Zeros"), input);
return zeros_like.operation.node()->def();
}
auto ones_like = ops::OnesLike(s.WithOpName("Ones"), input);
return ones_like.operation.node()->def();
};
for (int value : {0, 1}) {
Reset();
const NodeDef& node_def = get_node(value);
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
std::vector<float> input_data(8, 42.0f);
AddTestTensor("input", {8}, tf_type_, input_data);
const auto& err = convert_not_supported_implicit(node_def.name() + "Like",
node_def.name());
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
err);
continue;
}
std::vector<std::vector<int>> output_dims_params = {
{8}, {8, 2, 4}, {32, 32, 3200}};
float val = 42.0;
Status status = OkStatus();
for (bool input_is_tensor : {true, false}) {
for (auto output_dims : output_dims_params) {
Reset();
size_t nb_el = 1;
for (auto d : output_dims) {
nb_el *= d;
}
std::vector<float> input_data(nb_el, val);
if (input_is_tensor) {
AddTestTensor("input", output_dims, tf_type_, input_data);
} else {
AddTestWeights("input", output_dims, input_data, tf_type_);
}
std::vector<float> expected_output(nb_el, value);
TestOpConverter(node_def, output_dims, status, status,
ElementsAreArray(expected_output));
}
}
}
}
#endif
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
TEST_P(OpConverter_FP32_Test, ConvertCombinedNMS) {
auto get_nms_nodedef = [](DataType tf_type, bool clip_boxes = true,
bool pad_per_class = false) -> NodeDef {
Scope s = Scope::NewRootScope();
auto boxes_tensor = ops::Placeholder(s.WithOpName("boxes"), tf_type);
auto scores_tensor = ops::Placeholder(s.WithOpName("scores"), tf_type);
auto max_output_size_per_class =
ops::Placeholder(s.WithOpName("max_output_size_per_class"), DT_INT32);
auto max_total_size =
ops::Placeholder(s.WithOpName("max_total_size"), DT_INT32);
auto iou_threshold =
ops::Placeholder(s.WithOpName("iou_threshold"), tf_type);
auto score_threshold =
ops::Placeholder(s.WithOpName("score_threshold"), tf_type);
auto nms_attrs = ops::CombinedNonMaxSuppression::Attrs()
.PadPerClass(pad_per_class)
.ClipBoxes(clip_boxes);
auto nms_op = ops::CombinedNonMaxSuppression(
s.WithOpName("my_nms"), boxes_tensor, scores_tensor,
max_output_size_per_class, max_total_size, iou_threshold,
score_threshold, nms_attrs);
return nms_op.operation.node()->def();
};
struct TestParams {
const std::string description;
const std::vector<int32> boxes_tensor_dims;
const std::vector<int32> scores_tensor_dims;
const std::vector<float> boxes_values;
const std::vector<float> scores_values;
const int32 max_output_size_per_class;
const int32 max_total_size;
const float iou_threshold;
const float score_threshold;
const bool pad_per_class;
const bool clip_boxes;
const std::vector<std::vector<int32>> expected_output_dims;
const std::vector<float> exp_boxes;
const std::vector<float> exp_scores;
const std::vector<float> exp_classes;
const std::vector<float> exp_num_detections;
Status conversion_status;
Status runtime_status;
};
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
Status conv_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(convert_not_supported_implicit(
"CombinedNonMaxSuppression", "my_nms"))
: OkStatus();
std::vector<TestParams> params = {
TestParams{"Test 1: clip boxes",
{1, 1, 3, 4},
{1, 1, 3},
{0, 0, 0.3, 1.4, 0, 0, 0.3, 1.4, 0, 0, 0.3, 1.4},
{0.4, 0.7, 0.3},
3,
2,
0.1,
0,
false,
true,
{{1, 2, 4},
{1, 2},
{1, 2},
{1}},
{0, 0, 0.3, 1.0, 0, 0, 0.3, 1.0},
{0.7, 0.4},
{1, 0},
{2},
conv_status},
TestParams{
"Test 2: iou threshold",
{1, 5, 1, 4},
{1, 5, 1},
{0, 0, 5, 10, 0, 1, 5, 11, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 4, 3, 2, 1},
4,
4,
0.7,
0,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{0, 0, 5, 10, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 3, 2, 1},
{0, 0, 0, 0},
{4},
conv_status},
TestParams{
"Test 3: score threshold",
{1, 5, 1, 4},
{1, 5, 1},
{0, 0, 5, 10, 0, 1, 5, 11, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 4, 3, 2, 1},
4,
4,
0.1,
2,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{0, 0, 5, 10, 8, 0, 12, 4, 0, 0, 0, 0, 0, 0, 0, 0},
{5, 3, 0, 0},
{0, 0, 0, 0},
{2},
conv_status},
TestParams{
"Test 4: per class size and pad",
{1, 5, 1, 4},
{1, 5, 2},
{0, 0, 5, 10, 0, 1, 5, 11, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 0, 0, 4, 3, 0, 2, 0, 1, 0},
1,
4,
0.1,
0,
true,
false,
{{1, 2, 4},
{1, 2},
{1, 2},
{1}},
{0, 0, 5, 10, 0, 1, 5, 11},
{5, 4},
{0, 1},
{2},
conv_status},
TestParams{
"Test 5: different box coordinate order",
{1, 5, 1, 4},
{1, 5, 2},
{5, 10, 0, 0, 5, 11, 0, 1, 12, 4, 8, 0, 10, 6, 6, 2, 11, 12, 8, 9},
{5, 0, 0, 4, 3, 0, 2, 0, 1, 0},
1,
4,
0.1,
0,
true,
false,
{{1, 2, 4},
{1, 2},
{1, 2},
{1}},
{5, 10, 0, 0, 5, 11, 0, 1},
{5, 4},
{0, 1},
{2},
conv_status},
};
#else
Status conv_status =
trt_mode_ == TrtTestMode::kDynamicShape
? errors::Unimplemented(
"TensorRT BatchedNMS Plugin requires input with static shape")
: OkStatus();
std::vector<TestParams> params = {
TestParams{
"Test 1: Original test",
{1, 1, 3, 4},
{1, 1, 3},
{0, 0, 0.3, 0.4, 0, 0, 0.3, 0.4, 0, 0, 0.3, 0.4},
{0.4, 0.7, 0.3},
3,
2,
.5f,
0,
false,
true,
{{1, 2, 4},
{1, 2},
{1, 2},
{1}},
{0, 0, 0.3, 0.4, 0, 0, 0.3, 0.4},
{0.7, 0.4},
{1, 0},
{2},
conv_status},
TestParams{
"Test 2: clip_boxes",
{1, 5, 1, 4},
{1, 5, 1},
{0, 0, 5, 10, 0, 4, 5, 14, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 4, 3, 2, 1},
4,
4,
0.1,
0,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{0, 0, 5, 10, 8, 0, 12, 4, 8, 9, 11, 12, 0, 0, 0, 0},
{5, 3, 1, 0},
{0, 0, 0, -1},
{3},
conv_status},
TestParams{
"Test 3: score threshold",
{1, 5, 1, 4},
{1, 5, 1},
{0, 0, 5, 10, 0, 4, 5, 14, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 4, 3, 2, 1},
4,
4,
0.1,
2,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{0, 0, 5, 10, 8, 0, 12, 4, 0, 0, 0, 0, 0, 0, 0, 0},
{5, 3, 0, 0},
{0, 0, -1, -1},
{2},
conv_status},
TestParams{
"Test 4: max coord first",
{1, 5, 1, 4},
{1, 5, 1},
{5, 10, 0, 0, 5, 14, 0, 4, 12, 4, 8, 0, 10, 6, 6, 2, 11, 12, 8, 9},
{5, 4, 3, 2, 1},
4,
4,
0.1,
0,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{5, 10, 0, 0, 12, 4, 8, 0, 11, 12, 8, 9, 0, 0, 0, 0},
{5, 3, 1, 0},
{0, 0, 0, -1},
{3},
conv_status},
TestParams{"Test 5: TopK error",
{1, 5000, 1, 4},
{1, 5000, 1},
{},
{},
4,
4,
0.1,
0,
false,
false,
{},
{},
{},
{},
{},
conv_status.ok()
? errors::InvalidArgument(
"TRT NMS plugin allow top_k<=4096, where top_k = "
"max(num_boxes, max_total_size). You can override "
"this by setting TF_TRT_ALLOW_NMS_TOPK_OVERRIDE=1 "
"environment variable, but this can result in a "
"loss of accuracy.")
: conv_status},
};
#endif
for (auto p : params) {
Reset();
SCOPED_TRACE(p.description);
AddTestTensor("boxes", p.boxes_tensor_dims, p.boxes_values);
AddTestTensor("scores", p.scores_tensor_dims, p.scores_values);
AddTestWeights<int32>("max_output_size_per_class", {1},
{p.max_output_size_per_class});
AddTestWeights<int32>("max_total_size", {1}, {p.max_total_size});
AddTestWeights<float>("iou_threshold", {1}, {p.iou_threshold}, tf_type_);
AddTestWeights<float>("score_threshold", {1}, {p.score_threshold},
tf_type_);
auto node_def = get_nms_nodedef(tf_type_, p.clip_boxes, p.pad_per_class);
TestOpConverterMultiOut(node_def, p.expected_output_dims,
p.conversion_status, p.runtime_status,
{
ElementsAreArray(p.exp_boxes),
ElementsAreArray(p.exp_scores),
ElementsAreArray(p.exp_classes),
ElementsAreArray(p.exp_num_detections),
},
{tf_type_, tf_type_, tf_type_, DT_INT32});
}
}
#endif
template <typename T>
NodeDef CreateUnaryOp(DataType tf_type) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
return T(s.WithOpName("my_unary"), input).operation.node()->def();
}
constexpr float kLeakyReluAlpha = 0.2f;
template <>
NodeDef CreateUnaryOp<ops::internal::LeakyRelu>(DataType tf_type) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
return ops::internal::LeakyRelu(
s.WithOpName("my_unary"), input,
ops::internal::LeakyRelu::Alpha(kLeakyReluAlpha))
.operation.node()
->def();
}
TEST_P(OpConverter_FP32_UnaryTest, ConvertActivation) {
constexpr float kSeluAlpha = 1.7580993408473768599402175208123f;
constexpr float kSeluScale = 1.0507009873554804934193349852946f;
using OpFunc = std::function<NodeDef(DataType)>;
using ValFunc = float (*)(float);
std::map<std::string, std::pair<OpFunc, ValFunc>> op_map;
#define ADD_OP(name, op, compute) \
op_map[name] = std::make_pair(CreateUnaryOp<op>, compute)
ADD_OP("LeakyRelu", ops::internal::LeakyRelu,
[](float x) { return (x > 0.0f) ? x : x * kLeakyReluAlpha; });
ADD_OP("Relu", ops::Relu, [](float x) { return (x > 0.0f) ? x : 0.0f; });
ADD_OP("Relu6", ops::Relu6,
[](float x) { return std::min(std::max(x, 0.0f), 6.0f); });
ADD_OP("Sigmoid", ops::Sigmoid,
[](float x) { return 1.0f / (1.0f + std::exp(-x)); });
ADD_OP("Tanh", ops::Tanh, static_cast<ValFunc>(std::tanh));
ADD_OP("Elu", ops::Elu,
[](float x) { return (x > 0.0f) ? x : std::exp(x) - 1; });
ADD_OP("Selu", ops::Selu, [](float x) {
return (x > 0.0f) ? kSeluScale * x
: kSeluScale * kSeluAlpha * (std::exp(x) - 1);
});
ADD_OP("Softsign", ops::Softsign,
[](float x) { return x / (std::abs(x) + 1); });
ADD_OP("Softplus", ops::Softplus,
[](float x) { return std::log(std::exp(x) + 1); });
#undef ADD_OP
const std::vector<float> input = {-100, -2, -1, 0, 1, 88};
const bool nan_sensitive = false;
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
const float max_abs_error = 1e-4;
#else
const float max_abs_error = 0.;
#endif
RunTests("Activation", *ActivationTypeMap(), op_map, input, "input",
max_abs_error, nan_sensitive);
}
TEST_P(OpConverter_FP32_Test, ConvertExpandDims) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto expanddims =
ops::ExpandDims(s.WithOpName("my_expanddims"), input, weights);
const NodeDef& node_def = expanddims.operation.node()->def();
{
Reset();
AddTestWeights<int32>("input", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestWeights<int32>("weights", {1}, {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for ExpandDims must be a "
"tensor");
}
{
Reset();
AddTestTensor("input", {3, 2, 1});
AddTestTensor("weights", {3});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for ExpandDims must be a "
"constant");
}
std::vector<TestParamBase> test_params = {
TestParamBase{{1, 1, 2, 3},
{},
{1, 1, 1, 2, 3},
{0},
trt_mode_ == TrtTestMode::kImplicitBatch
? Status(absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension")
: OkStatus()},
TestParamBase{{1, 1, 2, 3},
{},
{1, 1, 1, 2, 3},
{-5},
trt_mode_ == TrtTestMode::kImplicitBatch
? Status(absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension")
: OkStatus()},
TestParamBase{{1, 1, 2, 3},
{},
{},
{5},
Status(absl::StatusCode::kInvalidArgument,
"Axis value of 5 is out of bounds, must be in range"
" [-5, 5)")},
TestParamBase{{1, 1, 2, 3},
{},
{},
{-6},
Status(absl::StatusCode::kInvalidArgument,
"Axis value of -6 is out of bounds, must be in range"
" [-5, 5)")},
TestParamBase{{1, 2, 3}, {}, {1, 1, 2, 3}, {1}},
TestParamBase{{1, 2, 3}, {}, {1, 1, 2, 3}, {-3}},
TestParamBase{{1, 2, 3}, {}, {1, 2, 3, 1}, {3}},
TestParamBase{{1, 2, 3}, {}, {1, 2, 3, 1}, {-1}},
TestParamBase{{1, 2, 3}, {}, {1, 2, 1, 3}, {2}},
TestParamBase{{1, 2, 3}, {}, {1, 2, 1, 3}, {-2}},
TestParamBase{{1, 6}, {}, {1, 1, 6}, {1}},
TestParamBase{{1, 6}, {}, {1, 6, 1}, {-1}},
};
for (auto p : test_params) {
Reset();
AddTestTensor("input", p.input_dims, {1, 2, 3, 4, 5, 6});
AddTestWeights<int32>("weights", {1}, {p.param[0]});
TestOpConverter(node_def, p.expected_output_dims, p.status,
p.runtime_status, ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertSoftmax) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("logits"), tf_type_);
auto softmax = ops::Softmax(s.WithOpName("my_softmax"), input);
const NodeDef& node_def = softmax.operation.node()->def();
struct TestParams {
std::vector<int> input_dims;
std::vector<float> expected_values;
};
std::vector<TestParams> test_params = {
TestParams{{2, 3},
{0.09003057, 0.24472848, 0.66524094,
0.09003057, 0.24472848, 0.66524094}},
TestParams{{6, 1},
{1, 1, 1, 1, 1, 1}},
TestParams{{1, 6},
{0.00426978, 0.01160646, 0.03154963,
0.08576079, 0.23312202, 0.6336913}}};
std::vector<float> input_values{1, 2, 3, 4, 5, 6};
for (auto p : test_params) {
Reset();
AddTestTensor("logits", p.input_dims, input_values);
TestOpConverter(node_def, p.input_dims, OkStatus(), OkStatus(),
ArrayFloatNear(p.expected_values, 1e-3));
}
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertLogSoftmax) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("logits"), tf_type_);
auto logsoftmax = ops::LogSoftmax(s.WithOpName("my_logsoftmax"), input);
const NodeDef& node_def = logsoftmax.operation.node()->def();
struct TestParams {
std::vector<int> input_dims;
std::vector<float> expected_values;
};
std::vector<TestParams> test_params = {
TestParams{{2, 3},
{-2.4076061, -1.407606, -0.40760604,
-2.4076061, -1.407606, -0.40760604}},
TestParams{{1, 6},
{-5.4561934, -4.4561934, -3.4561934,
-2.4561934, -1.4561933, -0.45619333}},
TestParams{{6, 1},
{0, 0, 0, 0, 0, 0}}};
std::vector<float> input_values{1, 2, 3, 4, 5, 6};
for (auto p : test_params) {
Reset();
AddTestTensor("logits", p.input_dims, input_values);
TestOpConverter(node_def, p.input_dims, OkStatus(), OkStatus(),
ArrayFloatNear(p.expected_values, 1e-3));
}
}
TEST_P(OpConverter_FP32_Test, ConvertSqueeze) {
const bool use_implicit_batch = (trt_mode_ == TrtTestMode::kImplicitBatch);
auto get_squeeze_nodedef = [](std::vector<int> axes,
DataType tf_type) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
if (!axes.empty()) {
ops::Squeeze::Attrs squeeze_attrs;
squeeze_attrs.axis_ = gtl::ArraySlice<int>(axes);
auto squeeze =
ops::Squeeze(s.WithOpName("my_squeeze"), input, squeeze_attrs);
return squeeze.operation.node()->def();
} else {
auto squeeze = ops::Squeeze(s.WithOpName("my_squeeze"), input);
return squeeze.operation.node()->def();
}
};
std::vector<TestParamBase> test_params = {
TestParamBase{
{1, 2, 1, 3},
{},
{2, 3},
{},
trt_mode_ == TrtTestMode::kExplicitBatch
? OkStatus()
: Status{absl::StatusCode::kUnimplemented,
"Squeeze is not implemented for empty squeeze_dims"}},
TestParamBase{{1, 2, 1, 3},
{},
{2, 1, 3},
{0},
use_implicit_batch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension"}
: OkStatus()},
TestParamBase{{1, 2, 1, 3},
{},
{2, 1, 3},
{-4},
use_implicit_batch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension"}
: OkStatus()},
TestParamBase{
{1, 1, 2, 3},
{},
{},
{4},
Status{absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in range [-4, 4)"}},
TestParamBase{
{1, 1, 2, 3},
{},
{},
{-5},
Status{
absl::StatusCode::kInvalidArgument,
"Axis value of -5 is out of bounds, must be in range [-4, 4)"}},
TestParamBase{{1, 1, 2, 3}, {}, {1, 2, 3}, {1}},
TestParamBase{{1, 1, 2, 3}, {}, {1, 2, 3}, {-3}},
TestParamBase{{1, 2, 3, 1}, {}, {1, 2, 3}, {3}},
TestParamBase{{1, 2, 3, 1}, {}, {1, 2, 3}, {-1}},
TestParamBase{{1, 1, 2, 1, 3, 1}, {}, {1, 2, 3}, {1, 3, 5}},
TestParamBase{{1, 1, 2, 1, 3, 1}, {}, {1, 2, 3}, {3, 1, 5}},
TestParamBase{{1, 1, 2, 1, 3, 1}, {}, {1, 2, 3}, {-1, -3, -5}},
TestParamBase{{1, 1, 2, 1, 3, 1}, {}, {1, 2, 3}, {1, -3, 5}},
TestParamBase{{1, 1, 6}, {}, {1, 6}, {1}},
TestParamBase{{1, 6, 1}, {}, {1, 6}, {2}},
};
auto squeeze_non_singleton = TestParamBase{
{1, 1, 2, 3},
{},
{},
{2},
Status{absl::StatusCode::kInvalidArgument,
"Dimension 2 with size 2 cannot be squeezed because it must be "
"size 1"}};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
squeeze_non_singleton.status = OkStatus();
squeeze_non_singleton.runtime_status =
errors::InvalidArgument("Negative number of dimensions -1");
test_params.push_back(TestParamBase{{2, 1, 3}, {2, -1, 3}, {2, 3}, {1}});
test_params.push_back(TestParamBase{{2, 1, 3}, {2, 1, -1}, {2, 3}, {1}});
}
test_params.push_back(squeeze_non_singleton);
for (TestParamBase p : test_params) {
SCOPED_TRACE(p);
Reset();
NodeDef node_def = get_squeeze_nodedef(p.param, tf_type_);
AddTestTensor("input", p.input_dims, {1, 2, 3, 4, 5, 6},
p.partial_input_dims);
TestOpConverter(node_def, p.expected_output_dims, p.status,
p.runtime_status, ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertStridedSlice) {
auto get_strided_slice_nodedef =
[](DataType tf_type, int64 begin_mask = 0, int64 end_mask = 0,
int64 ellipsis_mask = 0, int64 new_axis_mask = 0,
int64 shrink_axis_mask = 0) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto begin = ops::Placeholder(s.WithOpName("begin"), DT_INT32);
auto end = ops::Placeholder(s.WithOpName("end"), DT_INT32);
auto strides = ops::Placeholder(s.WithOpName("strides"), DT_INT32);
ops::StridedSlice::Attrs attrs = ops::StridedSlice::Attrs()
.BeginMask(begin_mask)
.EndMask(end_mask)
.EllipsisMask(ellipsis_mask)
.NewAxisMask(new_axis_mask)
.ShrinkAxisMask(shrink_axis_mask);
auto strided_slice = ops::StridedSlice(s.WithOpName("my_strided_slice"),
input, begin, end, strides, attrs);
return strided_slice.operation.node()->def();
};
{
Reset();
NodeDef node_def = get_strided_slice_nodedef(tf_type_);
AddTestWeights<int32>("input", {1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestWeights<int32>("begin", {4}, {0, 0, 0, 0});
AddTestWeights<int32>("end", {4}, {1, 1, 2, 3});
AddTestWeights<int32>("strides", {4}, {1, 1, 1, 1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for StridedSlice must "
"be a tensor");
}
{
Reset();
NodeDef node_def = get_strided_slice_nodedef(tf_type_);
AddTestTensor("input", {4, 1, 1, 1});
AddTestTensor("begin", {4});
AddTestTensor("end", {4});
AddTestTensor("strides", {4});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"begin\" for StridedSlice must be a constant");
}
struct TestParams {
std::vector<int> input_dims;
std::vector<int> begin;
std::vector<int> end;
std::vector<int> strides;
int begin_mask;
int end_mask;
int ellipsis_mask;
int new_axis_mask;
int shrink_axis_mask;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
Status conversion_status;
Status runtime_status;
std::vector<int> partial_input_dims;
};
auto get_mask = [](const std::vector<int>& mask) {
int result = 0;
for (int i = 0; i < mask.size(); i++) {
if (mask[i]) result += (1 << i);
}
return result;
};
const std::vector<float> ok_input = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
Status modified_batch_dim_status =
(trt_mode_ == TrtTestMode::kImplicitBatch)
? errors::Unimplemented(
"TensorRT does not allow modifications to "
"the batch dimension")
: OkStatus();
std::vector<TestParams> params = {
TestParams{{2, 1, 1, 3},
{0, 0, 0, 0},
{1, 1, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
modified_batch_dim_status,
OkStatus(),
{}},
TestParams{
{2, 1, 1, 3},
{0, 0, 0, 0},
{1, 1, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
modified_batch_dim_status,
OkStatus(),
{-1, 1, 1, 3},
},
TestParams{
{2, 1, 1, 3},
{0, 0, 0, 0},
{0, 1, 1, 2},
{1, 1, 1, 1},
get_mask({1, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{2, 1, 1, 2},
{1, 2, 4, 5},
OkStatus(),
OkStatus(),
{-1, 1, 1, 3},
},
TestParams{{1, 1, 2, 3},
{0, 0, 2, 0},
{1, 1, 0, 3},
{1, 1, 1, 1},
0,
0,
0,
0,
0,
{},
{},
errors::InvalidArgument("\"size\" cannot be negative for "
"StridedSlice"),
OkStatus(),
{}},
TestParams{
{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
},
TestParams{
{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
OkStatus(),
OkStatus(),
{1, 1, -1, -1},
},
TestParams{
{1, 1, 2, 3},
{0, 0, 1, 1},
{0, 0, 0, 0},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 1, 2},
{5, 6},
OkStatus(),
OkStatus(),
{1, 1, -1, -1},
},
TestParams{
{1, 1, 2, 3},
{0, 0, 1, 1},
{0, 1, 2, 3},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{5, 6},
},
TestParams{{1, 1, 2, 3},
{0, 0, 1, 2},
{0, 0, 0, 0},
{1, 1, -1, -1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{6, 5},
OkStatus(),
OkStatus(),
{1, 1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 1, 1},
{0, 0, 0, 0},
{1, 1, -1, -1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 2, 2},
{5, 4, 2, 1},
OkStatus(),
OkStatus(),
{1, 1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 0, 0},
{1, 1, -1, -1},
get_mask({0, 0, 1, 1}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{6, 5},
OkStatus(),
OkStatus(),
{1, 1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 0, 0},
{1, -1, -1, -1},
get_mask({1, 1, 1, 1}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 2, 3},
{6, 5, 4, 3, 2, 1},
OkStatus(),
OkStatus(),
{1, -1, -1, -1}},
TestParams{
{1, 2, 3, 1},
{0, 0, 0, 0},
{0, 1, 2, 1},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 2, 1},
{1, 2},
},
TestParams{
{1, 2, 3, 1},
{0, 1, 1, 0},
{0, 2, 3, 1},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 2, 1},
{5, 6},
},
TestParams{
{1, 2, 1, 3},
{0, 0, 0, 0},
{0, 1, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
},
TestParams{
{1, 2, 1, 3},
{0, 1, 0, 1},
{0, 2, 1, 3},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{5, 6},
},
TestParams{
{1, 2, 3},
{0, 0, 0},
{0, 1, 2},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 0, 0}),
0,
0,
0,
{1, 1, 2},
{1, 2},
},
TestParams{{1, 2, 3},
{0, 1, 1},
{0, 0, 0},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 1, 1}),
0,
0,
0,
{1, 1, 2},
{5, 6},
OkStatus(),
OkStatus(),
{-1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 0, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 0}),
0,
0,
0,
{1, 1, 2, 2},
{1, 2, 4, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{
{1, 1, 2, 3},
{0, 0, 1, 0},
{0, 0, 0, 0},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 1, 3},
{4, 5, 6},
},
TestParams{{1, 2, 3, 1},
{0, 0, 0, 0},
{0, 1, 0, 0},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 1, 1}),
0,
0,
0,
{1, 1, 3, 1},
{1, 2, 3},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{
{1, 2, 3, 1},
{0, 1, 0, 0},
{0, 0, 0, 0},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 3, 1},
{4, 5, 6},
},
TestParams{{1, 6},
{0, 0},
{0, 3},
{1, 1},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{1, 2, 3},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{
{1, 1, 6},
{0, 0, 2},
{0, 0, 5},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 1, 0}),
0,
0,
0,
{1, 1, 3},
{3, 4, 5},
},
TestParams{
{1, 6, 1},
{0, 2, 0},
{0, 5, 0},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 0, 1}),
0,
0,
0,
{1, 3, 1},
{3, 4, 5},
},
TestParams{
{1, 6, 1},
{0, -6, 0},
{0, -3, 0},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 0, 1}),
0,
0,
0,
{1, 3, 1},
{1, 2, 3},
},
TestParams{
{1, 6, 1},
{0, 0, 0},
{0, -1, 0},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 0, 1}),
0,
0,
0,
{1, 5, 1},
{1, 2, 3, 4, 5},
},
TestParams{
{1, 1, 2, 3},
{0, 0, -9999, -9},
{0, 1, 1000, 4},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 2, 3},
{1, 2, 3, 4, 5, 6},
},
TestParams{{1, 6},
{0, 0},
{0, 5},
{1, 2},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{1, 3, 5},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 0},
{0, 6},
{1, 2},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{1, 3, 5},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 1},
{0, 6},
{1, 2},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{2, 4, 6},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 2},
{0, 6},
{1, 3},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 2},
{3, 6},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 5},
{0, 0},
{1, -2},
get_mask({0, 0}),
get_mask({1, 1}),
0,
0,
0,
{1, 3},
{6, 4, 2},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 5},
{0, 0},
{1, -2},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{6, 4, 2},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 5},
{0, 1},
{1, -3},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 2},
{6, 3},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 1, 2, 3},
{0, 1},
{0, 2},
{1, 1},
get_mask({0, 0}),
get_mask({0, 0}),
get_mask({1, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{
{1, 1, 2, 3},
{0, 0, 1},
{0, 0, 2},
{1, 1, 1},
get_mask({1, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
get_mask({0, 1, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 1},
{0, 1, 2, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 1, 0, 1},
{1, 1, 2, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
get_mask({0, 1, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 0, 1},
{0, 1, 1, 2, 2},
{1, 1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 1},
{0, 0, 0, 2},
{1, 1, 1, 1},
get_mask({1, 1, 1, 0}),
get_mask({1, 1, 1, 0}),
0,
0,
get_mask({0, 0, 0, 1}),
{1, 1, 2},
{2, 5},
OkStatus(),
OkStatus(),
{1, 1, 2, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 1},
{0, 1, 2, 2},
{1, 1, 1, 1},
get_mask({1, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
get_mask({0, 1, 0, 1}),
{1, 2},
{2, 5},
OkStatus(),
OkStatus(),
{1, 1, 2, -1}},
TestParams{{6, 1, 1},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
get_mask({1, 1, 1}),
get_mask({1, 1, 1}),
0,
0,
get_mask({0, 1, 1}),
{6},
{1, 2, 3, 4, 5, 6},
OkStatus(),
OkStatus(),
{-1, -1, -1}},
TestParams{{1, 6},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
get_mask({0, 1, 1}),
get_mask({0, 1, 1}),
0,
get_mask({1, 0, 0}),
get_mask({0, 0, 0}),
{1, 1, 6},
{1, 1, 6},
errors::Unimplemented(
"new_axis_mask is not supported for StridedSlice"),
OkStatus(),
{1, 6}},
TestParams{{1, 3, 2},
{0, 0, 0},
{0, 0, 3},
{1, 1, 1},
get_mask({0, 1, 1}),
get_mask({0, 1, 1}),
0,
0,
1,
{3, 2},
{1, 2, 3, 4, 5, 6},
modified_batch_dim_status, OkStatus(),
{-1, -1, -1}},
TestParams{{2, 3, 2},
{0, 0, 0},
{0, 0, 3},
{1, 1, 1},
get_mask({0, 1, 1}),
get_mask({0, 1, 1}),
0,
0,
1,
{3, 2},
{1, 2, 3, 4, 5, 6},
modified_batch_dim_status, OkStatus(),
{-1, -1, 2}},
TestParams{{2, 3, 2},
{0, 0, 0},
{0, 0, 3},
{1, 1, 1},
get_mask({0, 1, 1}),
get_mask({0, 1, 1}),
0,
0,
3,
{2},
{1, 2},
modified_batch_dim_status, OkStatus(),
{-1, -1, 2}},
};
int i = 0;
for (auto p : params) {
Reset();
NodeDef node_def = get_strided_slice_nodedef(
tf_type_, p.begin_mask, p.end_mask, p.ellipsis_mask, p.new_axis_mask,
p.shrink_axis_mask);
VLOG(2) << "Preparing test case " << i++ << " with dims "
<< DebugString(p.input_dims);
switch (trt_mode_) {
case TrtTestMode::kImplicitBatch: {
AddTestTensor("input", p.input_dims, ok_input);
break;
}
case TrtTestMode::kExplicitBatch: {
AddTestTensor("input", p.input_dims, ok_input);
break;
}
case TrtTestMode::kDynamicShape: {
if (p.partial_input_dims.size() > 0) {
AddTestTensor("input", p.input_dims, tf_type_, ok_input,
p.partial_input_dims);
} else {
AddTestTensor("input", p.input_dims, tf_type_, ok_input,
p.input_dims);
}
break;
}
}
VLOG(2) << "Adding weights begin: " << DebugString(p.begin)
<< ", end: " << DebugString(p.end)
<< ", strides: " << DebugString(p.strides);
AddTestWeights<int32>("begin", {static_cast<int>(p.begin.size())}, p.begin);
AddTestWeights<int32>("end", {static_cast<int>(p.end.size())}, p.end);
AddTestWeights<int32>("strides", {static_cast<int>(p.strides.size())},
p.strides);
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
p.runtime_status, ElementsAreArray(p.expected_output));
}
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertSlice) {
auto get_slice_nodedef = [](DataType tf_type) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto begin = ops::Placeholder(s.WithOpName("begin"), DT_INT32);
auto size = ops::Placeholder(s.WithOpName("size"), DT_INT32);
auto slice = ops::Slice(s.WithOpName("my_slice"), input, begin, size);
return slice.operation.node()->def();
};
struct TestParams {
std::vector<int> input_dims;
std::vector<int>
partial_input_dims;
std::vector<int> begin;
std::vector<int> size;
std::vector<int> expected_output_dims;
std::vector<int> expected_output;
Status conversion_status;
Status runtime_status;
};
std::vector<TestParams> params = {
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, -1, 0},
{1, 1, 2, 3},
{},
{},
errors::InvalidArgument("\"begin\" in Slice "
"is out of range")},
TestParams{{2, 1, 1, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, 1, 1, 3},
{1, 1, 1, 3},
{1, 2, 3},
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(
"TensorRT does not allow modifications to the batch "
"dimension in implicit batch mode")
: OkStatus()},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{-1, 1, 2, 2},
{1, 1, 2, 2},
{1, 2, 4, 5},
OkStatus()},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{-1, -1, -1, -1},
{1, 1, 2, 3},
{1, 2, 3, 4, 5, 6},
OkStatus()},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, 1, 2, 3},
{1, 1, 2, 3},
{1, 2, 3, 4, 5, 6}},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, -1, 2, 2},
{1, 1, 2, 2},
{1, 2, 4, 5},
OkStatus()},
TestParams{{1, 6},
{-1, -1},
{0, 1},
{1, 5},
{1, 5},
{2, 3, 4, 5, 6}},
TestParams{{1, 6},
{-1, -1},
{0, 1},
{-1, 3},
{1, 3},
{2, 3, 4}, OkStatus()},
TestParams{
{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 3, 0},
{1, 1, 2, 3},
{},
{},
trt_mode_ == TrtTestMode::kDynamicShape
? OkStatus()
: errors::InvalidArgument("\"begin\" + \"size\" for dimension "
"2 in Slice is out of range"),
errors::Internal("Internal: Failed to build TensorRT engine")},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, 1, 2, -2},
{},
{},
errors::InvalidArgument("\"size\" in Slice is out of range")},
TestParams{
{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, 1, 3, 2},
{},
{},
trt_mode_ == TrtTestMode::kDynamicShape
? OkStatus()
: errors::InvalidArgument("\"begin\" + \"size\" for dimension "
"2 in Slice is out of range"),
errors::Internal("Internal: Failed to build TensorRT engine")},
};
logger_.unsuppressAllLoggerMsgs();
int i = 0;
for (auto p : params) {
Reset();
NodeDef node_def = get_slice_nodedef(tf_type_);
VLOG(2) << "Preparing test case " << i++ << " with dims "
<< DebugString(p.input_dims);
std::vector<int> input_vals = {1, 2, 3, 4, 5, 6};
switch (trt_mode_) {
case TrtTestMode::kImplicitBatch: {
AddTestTensor("input", p.input_dims, input_vals);
break;
}
case TrtTestMode::kExplicitBatch: {
AddTestTensor("input", p.input_dims, input_vals);
break;
}
case TrtTestMode::kDynamicShape: {
if (p.partial_input_dims.size() > 0) {
AddTestTensor("input", p.input_dims, tf_type_, input_vals,
p.partial_input_dims);
} else {
AddTestTensor("input", p.input_dims, tf_type_, input_vals,
p.input_dims);
}
break;
}
}
AddTestWeights<int32>("begin", {static_cast<int>(p.begin.size())}, p.begin);
AddTestWeights<int32>("size", {static_cast<int>(p.size.size())}, p.size);
const bool flag =
trt_mode_ == TrtTestMode::kDynamicShape && (i == 9 || i == 11);
if (flag) logger_.suppressLoggerMsgs(nvinfer1::ILogger::Severity::kERROR);
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
p.runtime_status, ElementsAreArray(p.expected_output));
if (flag) logger_.unsuppressLoggerMsgs(nvinfer1::ILogger::Severity::kERROR);
}
}
TEST_P(OpConverter_FP32_Test, ConvertConv2D) {
DataType tf_type = tf_type_;
auto get_conv2d_nodedef =
[tf_type](std::vector<int> strides = {1, 1, 1, 1},
string padding = "SAME", string data_format = "NCHW",
std::vector<int> dilations = {1, 1, 1, 1}) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto filter = ops::Placeholder(s.WithOpName("weights"), tf_type);
ops::Conv2D::Attrs attrs =
ops::Conv2D::Attrs().DataFormat(data_format).Dilations(dilations);
auto conv2d = ops::Conv2D(s.WithOpName("my_conv2d"), input, filter, strides,
padding, attrs);
return conv2d.operation.node()->def();
};
{
Reset();
NodeDef node_def = get_conv2d_nodedef();
AddTestWeights<float>("input", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for Conv2D must be a tensor");
}
{
Reset();
NodeDef node_def = get_conv2d_nodedef();
AddTestTensor("input", {3, 1, 2, 1});
AddTestTensor("weights", {3, 3, 1, 1});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"filter\" for Conv2D must be a constant");
}
{
Reset();
NodeDef node_def = get_conv2d_nodedef();
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Conv2D expects kernel of dimension 4");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 1, 1, 1}, "SAME", "NCHW", {1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Convolution dilations field must specify 4 dimensions");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 1, 1, 1}, "SAME", "NCHW", {1, 2, 1, 1});
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation rate must be 1 for batch and channel "
"dimensions");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 1, 1, 1}, "SAME", "NHWC", {1, 1, 1, 2});
AddTestTensor("input", {1, 2, 3, 1});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation rate must be 1 for batch and channel "
"dimensions");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 1, 1}, "SAME", "NCHW", {1, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Convolution strides field must specify 4 dimensions");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 2, 1, 1}, "SAME", "NCHW", {1, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"Stride must be 1 for batch and channel dimensions");
}
if (trt_mode_ == TrtTestMode::kDynamicShape) {
Reset();
NodeDef node_def = get_conv2d_nodedef();
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(tf_type_, &trt_type));
AddTestTensorWithTFDims("input", {-1, -1, -1, -1}, trt_type);
AddTestWeights<float>("weights", {1, 2, 1, 1}, {-1, 1});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Channel dimension must be static");
}
struct TestParams {
std::vector<int> input_dims;
std::vector<float> input;
std::vector<int> filter_dims;
std::vector<float> filter;
std::vector<int> strides;
string padding;
string data_format;
std::vector<int> dilations;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
};
std::vector<TestParams> ok_params = {
TestParams{{1, 1, 2, 3},
{0, 1, 2, 3, 3, 4},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"VALID",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 2},
{1, 1, 0, 1}},
TestParams{{1, 1, 2, 3},
{0, 1, 2, 3, 3, 4},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 3},
{1, 1, -2, 0, 1, -4}},
TestParams{{1, 1, 2, 3},
{0, 1, 2, 3, 3, 4},
{1, 3, 1, 1},
{-1, 0, 1},
{1, 1, 1, 1},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 3},
{1, 2, -1, 3, 1, -3}},
TestParams{{1, 2, 3, 1},
{0, 1, 2, 3, 3, 4},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"VALID",
"NHWC",
{1, 1, 1, 1},
{1, 2, 2, 1},
{1, 1, 0, 1}},
TestParams{{1, 1, 2, 3},
{0, 1, 2, 3, 3, 4},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"VALID",
"NCHW",
{1, 1, 1, 2},
{1, 1, 2, 1},
{2, 1}},
TestParams{{1, 1, 2, 4},
{0, 1, 2, 2, 3, 4, 4, 7},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"VALID",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 2},
{1, 0, 1, 3}},
};
for (int i = 0; i < ok_params.size(); i++) {
Reset();
NodeDef node_def =
get_conv2d_nodedef(ok_params[i].strides, ok_params[i].padding,
ok_params[i].data_format, ok_params[i].dilations);
std::vector<int> partial_input_shape;
if (trt_mode_ == TrtTestMode::kDynamicShape) {
partial_input_shape.resize(ok_params[i].input_dims.size(), -1);
int channel_id = (ok_params[i].data_format == "NCHW") ? 1 : 3;
partial_input_shape[channel_id] = ok_params[i].input_dims[channel_id];
}
AddTestTensor("input", ok_params[i].input_dims, tf_type_,
ok_params[i].input, partial_input_shape);
AddTestWeights<float>("weights", ok_params[i].filter_dims,
ok_params[i].filter);
TestOpConverter(node_def, ok_params[i].expected_output_dims, OkStatus(),
OkStatus(), ElementsAreArray(ok_params[i].expected_output));
}
}
TEST_P(OpConverter_FP32_Test, ConvertConv2DBackpropInput) {
auto get_conv2d_backprop_input_nodedef =
[](DataType tf_type, std::vector<int> strides = {1, 1, 1, 1},
string padding = "SAME", string data_format = "NCHW",
std::vector<int> dilations = {1, 1, 1, 1}) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto filter = ops::Placeholder(s.WithOpName("weights"), tf_type);
auto input_sizes = ops::Placeholder(s.WithOpName("input_sizes"), DT_INT32);
ops::Conv2DBackpropInput::Attrs attrs = ops::Conv2DBackpropInput::Attrs()
.DataFormat(data_format)
.Dilations(dilations);
auto conv2d = ops::Conv2DBackpropInput(
s.WithOpName("my_conv2d_backprop_input"), input_sizes, filter, input,
strides, padding, attrs);
return conv2d.operation.node()->def();
};
struct TestParams {
std::vector<int> input_dims;
std::vector<float> input;
std::vector<int> filter_dims;
std::vector<float> filter;
std::vector<int> strides;
string padding;
string data_format;
std::vector<int> dilations;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
Status conversion_status;
std::vector<int> partial_input_dims;
};
std::vector<TestParams> params = {
TestParams{{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 4},
{0, 0, -1, 1, -2, 2, -3, 3}},
TestParams{{1, 2, 2, 1},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 2, 1},
"SAME",
"NHWC",
{1, 1, 1, 1},
{1, 2, 4, 1},
{0, 0, -1, 1, -2, 2, -3, 3}},
TestParams{{1, 3, 1, 1},
{0, 1, 2},
{2, 1, 1, 1},
{-1, 1},
{1, 2, 1, 1},
"VALID",
"NHWC",
{1, 1, 1, 1},
{1, 7, 1, 1},
{0, 0, -1, 1, -2, 2, 0}},
TestParams{{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"EXPLICIT",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 4},
{0, 0, -1, 1, -2, 2, -3, 3},
errors::Unimplemented("EXPLICIT padding type not "
"implemented, only VALID and SAME are"
" supported")},
TestParams{{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"SAME",
"NCHW",
{1, 1, 1, 2},
{1, 1, 2, 2},
{},
errors::Unimplemented("Dilation with Conv2DBackpropInput "
"(conv2d_transpose) is not supported")},
};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
params.push_back(
TestParams{{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 4},
{0, 0, -1, 1, -2, 2, -3, 3},
errors::InvalidArgument("Channel dimension must be static"),
{1, -1, 2, 2}});
params.push_back(TestParams{{2, 1, 2, 2},
{0, 1, 2, 3,
3, 2, 1, 0},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"SAME",
"NCHW",
{1, 1, 1, 1},
{2, 1, 2, 4},
{ 0, 0, -1, 1, -2, 2, -3, 3,
-3, 3, -2, 2, -1, 1, 0, 0},
OkStatus(),
{-1, 1, 2, 2}});
params.push_back(TestParams{
{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 4},
{0, 0, -1, 1, -2, 2, -3, 3},
errors::Unimplemented(
"Conv2dBackpropInput does not support input with unknown spatial "
"shape"),
{1, 1, -1, -1}});
}
for (auto p : params) {
for (int input_sizes_length : {2, 4}) {
Reset();
NodeDef node_def = get_conv2d_backprop_input_nodedef(
tf_type_, p.strides, p.padding, p.data_format, p.dilations);
switch (trt_mode_) {
case TrtTestMode::kImplicitBatch: {
AddTestTensor("input", p.input_dims, p.input);
break;
}
case TrtTestMode::kExplicitBatch: {
AddTestTensor("input", p.input_dims, p.input);
break;
}
case TrtTestMode::kDynamicShape: {
AddTestTensor("input", p.input_dims, tf_type_, p.input,
p.partial_input_dims.size() > 0 ? p.partial_input_dims
: p.input_dims);
break;
}
default: {
ASSERT_TRUE(false) << "unknown test mode";
}
}
AddTestWeights<float>("weights", p.filter_dims, p.filter, tf_type_);
if (input_sizes_length == 4) {
AddTestWeights<int>("input_sizes", {4}, p.expected_output_dims);
} else {
std::vector<int> tf_input_sizes(2);
if (p.data_format == "NHWC") {
std::copy(p.expected_output_dims.begin() + 1,
p.expected_output_dims.end() - 1, tf_input_sizes.begin());
} else {
std::copy(p.expected_output_dims.begin() + 2,
p.expected_output_dims.end(), tf_input_sizes.begin());
}
QCHECK_EQ(2, tf_input_sizes.size());
AddTestWeights<int>("input_sizes", {2}, tf_input_sizes);
}
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
OkStatus(), ElementsAreArray(p.expected_output));
}
}
}
NodeDef GetConv3DNodeDef(std::vector<int> strides = {1, 1, 1, 1, 1},
string padding = "SAME", string data_format = "NCDHW",
std::vector<int> dilations = {1, 1, 1, 1, 1},
bool is_conv3d_backprop_input = false) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto filter = ops::Placeholder(s.WithOpName("weights"), DT_FLOAT);
if (is_conv3d_backprop_input) {
auto input_sizes = ops::Placeholder(s.WithOpName("input_sizes"), DT_INT32);
ops::Conv3DBackpropInputV2::Attrs attrs =
ops::Conv3DBackpropInputV2::Attrs()
.DataFormat(data_format)
.Dilations(dilations);
auto conv3d =
ops::Conv3DBackpropInputV2(s.WithOpName("my_conv3d"), input_sizes,
filter, input, strides, padding, attrs);
return conv3d.operation.node()->def();
} else {
ops::Conv3D::Attrs attrs =
ops::Conv3D::Attrs().DataFormat(data_format).Dilations(dilations);
auto conv3d = ops::Conv3D(s.WithOpName("my_conv3d"), input, filter, strides,
padding, attrs);
return conv3d.operation.node()->def();
}
}
struct Conv3DTestParams {
std::vector<int> input_dims;
std::vector<float> input;
std::vector<int> filter_dims;
std::vector<float> filter;
std::vector<int> strides;
string padding;
string data_format;
std::vector<int> dilations;
bool is_conv3d_backprop;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
bool allow_dynamic_channel_dim;
Status validation_status;
};
void TestConv3D(ParameterizedOpConverterTestBase* test, Conv3DTestParams& p) {
test->Reset();
NodeDef node_def = GetConv3DNodeDef(p.strides, p.padding, p.data_format,
p.dilations, p.is_conv3d_backprop);
std::vector<int> partial_input_shape;
if (!p.allow_dynamic_channel_dim &&
test->get_trt_mode() == TrtTestMode::kDynamicShape) {
partial_input_shape.resize(p.input_dims.size(), -1);
int channel_id = (p.data_format == "NCDHW") ? 1 : 4;
partial_input_shape[channel_id] = p.input_dims[channel_id];
}
test->AddTestTensor("input", p.input_dims, test->get_tf_type(), p.input,
partial_input_shape);
test->AddTestWeights<float>("weights", p.filter_dims, p.filter);
if (p.is_conv3d_backprop) {
test->AddTestWeights<float>("input_sizes",
{static_cast<int>(p.expected_output.size())},
p.expected_output);
}
test->TestOpConverter(node_def, p.expected_output_dims,
p.validation_status,
OkStatus(),
ElementsAreArray(p.expected_output),
{test->get_tf_type()});
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertConv3D) {
{
Reset();
NodeDef node_def = GetConv3DNodeDef();
AddTestWeights<float>("input", {1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestWeights<float>("weights", {1, 3, 3, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for Conv3D must be a tensor");
}
{
Reset();
NodeDef node_def = GetConv3DNodeDef();
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestTensor("weights", {1, 3, 3, 1}, tf_type_,
CreateVectorIota<float>(9));
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"filter\" for Conv3D must be a constant");
}
{
Reset();
NodeDef node_def = GetConv3DNodeDef();
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Conv3D expects kernel of dimension 5");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NCDHW", {1, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>(
"weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Convolution dilations field must specify 5 dimensions");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NCDHW", {1, 2, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation rate must be 1 for batch and channel "
"dimensions");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NDHWC", {1, 1, 1, 1, 2});
AddTestTensor("input", {1, 2, 3, 1}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation rate must be 1 for batch and channel "
"dimensions");
}
{
Reset();
NodeDef node_def = GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NDHWC",
{1, 1, 2, 1, 1}, true);
AddTestTensor("input", {1, 2, 3, 1}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddTestWeights<int>("input_sizes", {4}, {1, 2, 3, 1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation with Conv3DBackpropInputV2 "
"(conv3d_transpose) is not supported");
}
{
Reset();
NodeDef node_def = GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NDHWC",
{1, 1, 1, 1, 1}, true);
AddTestTensor("input", {1, 2, 2, 2}, tf_type_, CreateVectorIota<float>(8));
AddTestWeights<float>("weights", {1, 1, 2, 1, 1}, {1, 1});
AddTestWeights<int>("input_sizes", {8}, {1, 2, 3, 4, 5, 6, 7, 8});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Asymmetric padding with Conv3DBackpropInputV2 "
"(conv3d_transpose) is not supported");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 1, 1, 1, 1, 1}, "SAME", "NCDHW", {1, 1, 1, 1, 1});
AddTestTensor("input", {1, 2, 2, 2}, tf_type_, CreateVectorIota<float>(8));
AddTestWeights<float>("weights", {1, 1, 2, 1, 1}, {1, 1});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Convolution strides field must specify 5 dimensions");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 2, 1, 1, 1}, "SAME", "NCDHW", {1, 1, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"Stride must be 1 for batch and channel dimensions");
}
std::vector<Conv3DTestParams> ok_params = {
{{1, 1, 3, 3, 3},
{1, 2, 15, 3, 6, -3, 22, 1, 88, 56, 36, 1, 1, 105,
1, 16, -28, 1, 42, 9, 3, 1, 7, 1, 11, 61, 5},
{1, 1, 1, 1, 1},
{1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 3, 3, 3},
{1, 2, 15, 3, 6, -3, 22, 1, 88,
56, 36, 1, 1, 105, 1, 16, -28, 1,
42, 9, 3, 1, 7, 1, 11, 61, 5},
false,
OkStatus()},
{{1, 1, 3, 3, 3},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6},
{2, 1, 1, 1, 1},
{1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 2, 3, 3},
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 7},
false,
OkStatus()},
{{1, 1, 2, 3, 2},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 1, 1, 1, 1},
{-1, 1},
{1, 1, 1, 1, 1},
"SAME",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 2, 3, 2},
{6, 6, 6, 6, 6, 6, -6, -7, -8, -9, -10, -11},
false,
OkStatus()},
{{1, 1, 2, 3, 2},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{3, 1, 1, 1, 1},
{-1, 0, 1},
{1, 1, 1, 1, 1},
"SAME",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 2, 3, 2},
{6, 7, 8, 9, 10, 11, 0, -1, -2, -3, -4, -5},
false,
OkStatus()
},
{{1, 2, 3, 2, 2},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 1, 1, 2, 1},
{-1, 1, 1, -1},
{1, 1, 1, 1, 1},
"VALID",
"NDHWC",
{1, 1, 1, 1, 1},
false,
{1, 1, 3, 2, 1},
{0, 0, 0, 0, 0, 0},
false,
OkStatus()},
{{1, 1, 3, 3, 3},
{1, 1, 1, 1, 1, 1, 1, 1, 1, -10, -10, -10, -10, -10,
-10, -10, -10, -10, 7, 7, 7, 7, 7, 7, 7, 7, 7},
{2, 1, 1, 1, 1},
{1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 2, 1, 1},
false,
{1, 1, 1, 3, 3},
{8, 8, 8, 8, 8, 8, 8, 8, 8},
false,
OkStatus()},
{{1, 1, 3, 3, 3},
{1, 0, 2, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0,
0, 0, 0, 0, 5, 0, 6, 0, 0, 0, 7, 0, 8},
{1, 1, 1, 1, 1},
{1},
{1, 1, 2, 2, 2},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
false,
OkStatus()},
{{1, 1, 2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
{1, 1, 1, 1, 1},
{1},
{1, 1, 2, 2, 2},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
true,
{1, 1, 3, 3, 3},
{1, 0, 2, 0, 0, 0, 3, 0, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 0, 6, 0, 0, 0, 7, 0, 8},
false,
OkStatus()},
};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
ok_params.reserve(ok_params.size() + 2);
const std::vector<float> common_input = CreateVectorIota<float>(3 * 3 * 3);
ok_params.push_back(Conv3DTestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, 1, 1},
{1},
{1, 1, 2, 2, 2},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{},
{},
true,
Status{absl::StatusCode::kInvalidArgument,
"Channel dimension must be static"}});
ok_params.push_back(Conv3DTestParams{
{1, 3, 3, 3, 1},
common_input,
{1, 1, 1, 1, 1},
{1},
{1, 2, 2, 2, 1},
"VALID",
"NDHWC",
{1, 1, 1, 1, 1},
false,
{},
{},
true,
Status{absl::StatusCode::kInvalidArgument,
"Channel dimension must be static"}});
}
for (auto p : ok_params) {
TestConv3D(this, p);
}
}
template <typename T>
NodeDef CreatePoolOp(DataType tf_type, std::vector<int> ksize,
std::vector<int> strides, string padding,
string data_format) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
typename T::Attrs attrs;
attrs.data_format_ = data_format;
return T(s.WithOpName("my_pool"), input, ksize, strides, padding, attrs)
.operation.node()
->def();
}
TEST_P(OpConverter_FP32_Test, ConvertPool) {
auto get_pool_nodedef =
[](DataType tf_type, int nDim, std::vector<int> ksize = {},
std::vector<int> strides = {}, string padding = "SAME",
string data_format = "", const bool is_max_pooling = true) -> NodeDef {
if (ksize.empty()) {
ksize = nDim == 2 ? std::vector<int>{1, 1, 1, 1}
: std::vector<int>{1, 1, 1, 1, 1};
}
if (strides.empty()) {
strides = nDim == 2 ? std::vector<int>{1, 1, 1, 1}
: std::vector<int>{1, 1, 1, 1, 1};
}
if (data_format == "") {
data_format = nDim == 2 ? "NCHW" : "NCDHW";
}
if (is_max_pooling) {
if (nDim == 3) {
return CreatePoolOp<ops::MaxPool3D>(tf_type, ksize, strides, padding,
data_format);
} else {
return CreatePoolOp<ops::MaxPool>(tf_type, ksize, strides, padding,
data_format);
}
} else {
if (nDim == 3) {
return CreatePoolOp<ops::AvgPool3D>(tf_type, ksize, strides, padding,
data_format);
} else {
return CreatePoolOp<ops::AvgPool>(tf_type, ksize, strides, padding,
data_format);
}
}
};
std::vector<int> test_nDims{2, 3};
for (int nDim : test_nDims) {
Reset();
NodeDef node_def = get_pool_nodedef(tf_type_, nDim);
AddTestWeights<float>("input", {1, 1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
StrCat("The input \"input\" for ", node_def.op(), " must be a tensor"));
}
struct TestParams {
std::vector<int> input_dims;
std::vector<float> input;
std::vector<int> ksize;
std::vector<int> strides;
string padding;
string data_format;
std::vector<int> expected_output_dims;
std::vector<std::vector<float>> expected_outputs;
Status status;
std::set<int> skip_dims;
};
const std::vector<float> common_input{-4, 2, 15, 3, 6, -3, 22, 1, 88,
56, 36, 1, 1, 105, 1, 16, -28, 1,
42, 9, 3, 1, 7, 1, 11, 61, 5};
const std::vector<float> common_2d_output{-4, 2, 15, 3, 6, -3, 22, 1, 88};
std::vector<TestParams> test_params = {
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1000, 1000, 1000},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input},
Status(absl::StatusCode::kInvalidArgument,
"Window dimensions are not within bounds")},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, -1, 1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input},
Status(absl::StatusCode::kInvalidArgument,
"Window dimensions are not within bounds"),
{2}},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, -1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input},
Status(absl::StatusCode::kInvalidArgument,
"Window dimensions are not within bounds")},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, 1, -1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input},
Status(absl::StatusCode::kInvalidArgument,
"Window dimensions are not within bounds")},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input}},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
"SAME",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input}},
TestParams{{1, 1, 3, 3, 3},
common_input,
{1, 1, 3, 3, 3},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
{{88}, {14.444445}, {105}, {17}}},
TestParams{{1, 3, 3, 3, 1},
common_input,
{1, 3, 3, 3, 1},
{1, 1, 1, 1, 1},
"VALID",
"NDHWC",
{1, 1, 1, 1, 1},
{{88}, {14.444445}, {105}, {17}}},
TestParams{{1, 1, 3, 3, 3},
{1, 0, 2, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0,
0, 0, 0, 0, 5, 0, 6, 0, 0, 0, 7, 0, 8},
{1, 1, 1, 1, 1},
{1, 1, 2, 2, 2},
"VALID",
"NCDHW",
{1, 1, 2, 2, 2},
{{1, 2, 3, 4},
{1, 2, 3, 4},
{1, 2, 3, 4, 5, 6, 7, 8},
{1, 2, 3, 4, 5, 6, 7, 8}}},
};
for (auto p : test_params) {
int test_counter = 0;
for (int nDim : test_nDims) {
if (p.skip_dims.find(nDim) != p.skip_dims.end()) {
continue;
}
auto input = p.input;
auto input_dims = p.input_dims;
auto ksize = p.ksize;
auto strides = p.strides;
auto expected_output_dims = p.expected_output_dims;
std::string data_format = p.data_format;
if (nDim == 2) {
input.resize(9);
data_format = p.data_format == "NDHWC" ? "NHWC" : "NCHW";
input_dims.erase(input_dims.begin() + 2);
ksize.erase(ksize.begin() + 2);
strides.erase(strides.begin() + 2);
expected_output_dims.erase(expected_output_dims.begin() + 2);
}
for (bool is_max_pooling : {true, false}) {
Reset();
NodeDef node = get_pool_nodedef(tf_type_, nDim, ksize, strides,
p.padding, data_format, is_max_pooling);
AddTestTensor("input", input_dims, input);
TestOpConverter(node, expected_output_dims, p.status, OkStatus(),
ElementsAreArray(p.expected_outputs.at(test_counter)));
test_counter++;
}
}
}
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertTopK) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto topk = ops::TopK(s.WithOpName("my_topk"), input, weights);
const NodeDef& node_def = topk.operation.node()->def();
{
Reset();
AddTestTensor("input", {1, 1, 2, 3});
AddTestTensor("weights", {1}, DT_INT32, {});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"k\" for TopKV2 must be a constant");
}
{
Reset();
AddTestTensor("input", {1, 1, 2, 5}, {-9, 3, 5, 1, 6, -5, 7, 1, 0, -1});
AddTestWeights<int32>("weights", {1}, {2});
std::vector<std::vector<int>> expected_output_dims{{1, 1, 2, 2},
{1, 1, 2, 2}};
TestOpConverterMultiOut(node_def, expected_output_dims, OkStatus(),
OkStatus(),
{ElementsAre(6, 5, 7, 1), ElementsAre(4, 2, 1, 2)},
{tf_type_, DT_INT32});
}
}
struct DataFormatVecPermuteTestParams {
string dst_format;
string src_format;
std::vector<int> x_shape;
std::vector<int> x;
bool x_is_tensor;
std::vector<int> expected_output;
Status conversion_status;
};
NodeDef GetDataFormatVecPermuteNodeDef(string dst_format, string src_format,
std::vector<int>& x_shape) {
Scope s = Scope::NewRootScope();
PartialTensorShape tensor_shape;
auto x = ops::Placeholder(s.WithOpName("x"), DT_INT32);
const auto attrs = ops::DataFormatVecPermute::Attrs()
.DstFormat(dst_format)
.SrcFormat(src_format);
auto dfvp = ops::DataFormatVecPermute(s.WithOpName("my_dfvp"), x, attrs);
return dfvp.operation.node()->def();
}
TEST_P(OpConverter_INT32_Test, ConvertDataFormatVecPermute) {
const auto& error = convert_not_supported_implicit(
string("DataFormatVecPermute"), string("my_dfvp"));
const Status implicit_error = Status{absl::StatusCode::kUnimplemented, error};
const auto conversion_status =
trt_mode_ == TrtTestMode::kImplicitBatch ? implicit_error : OkStatus();
std::vector<DataFormatVecPermuteTestParams> test_params = {
DataFormatVecPermuteTestParams{"NCHW",
"NHWC",
{4},
{1, 2, 3, 4},
true,
{1, 4, 2, 3},
conversion_status},
DataFormatVecPermuteTestParams{"NCHW",
"NHWC",
{4},
{1, 2, 3, 4},
false,
{1, 4, 2, 3},
conversion_status},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{4, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
true,
{1, 2, 7, 8, 3, 4, 5, 6},
conversion_status},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{4, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
false,
{1, 2, 7, 8, 3, 4, 5, 6},
conversion_status},
DataFormatVecPermuteTestParams{
"NCDHW",
"NDHWC",
{5, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
true,
{1, 2, 9, 10, 3, 4, 5, 6, 7, 8},
conversion_status},
DataFormatVecPermuteTestParams{"NCWH",
"NHWC",
{2, 2},
{1, 2, 3, 4},
true,
{3, 4, 1, 2},
conversion_status},
DataFormatVecPermuteTestParams{"NCHWD",
"NDHWC",
{3},
{1, 2, 3},
true,
{2, 3, 1},
conversion_status},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
true,
{},
trt_mode_ == TrtTestMode::kImplicitBatch
? implicit_error
: Status{absl::StatusCode::kInvalidArgument,
"Input must be a vector or matrix, but got rank 3, at "
"my_dfvp"}},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{3},
{1, 2, 3},
true,
{},
trt_mode_ == TrtTestMode::kImplicitBatch
? implicit_error
: Status{absl::StatusCode::kInvalidArgument,
"1D input must be of size 2 or 4, but got size 3, at "
"my_dfvp"}},
DataFormatVecPermuteTestParams{
"NCDHW",
"NDHWC",
{4, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
true,
{},
trt_mode_ == TrtTestMode::kImplicitBatch
? implicit_error
: Status{absl::StatusCode::kInvalidArgument,
"First dimension of 2D input must be of size 3 or 5, "
"but got shape (4, 2), at my_dfvp"}},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{4, 3},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
true,
{},
trt_mode_ == TrtTestMode::kImplicitBatch
? implicit_error
: Status{absl::StatusCode::kInvalidArgument,
"Second dimension of 2D input must be of size 2, but "
"got shape (4, 3), at my_dfvp"}},
};
for (auto p : test_params) {
Reset();
const NodeDef node_def =
GetDataFormatVecPermuteNodeDef(p.dst_format, p.src_format, p.x_shape);
if (p.x_is_tensor) {
AddTestTensor("x", p.x_shape, DT_INT32, p.x, p.x_shape);
} else {
AddTestWeights("x", p.x_shape, p.x, DT_INT32);
}
TestOpConverter(node_def, p.x_shape, p.conversion_status, OkStatus(),
ElementsAreArray(p.expected_output));
}
}
NodeDef CreateGatherOp(DataType tf_type, int batch_dims) {
Scope s = Scope::NewRootScope();
auto params = ops::Placeholder(s.WithOpName("params"), tf_type);
auto indices = ops::Placeholder(s.WithOpName("indices"), DT_INT32);
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
ops::GatherV2::Attrs op_attrs;
op_attrs.batch_dims_ = batch_dims;
auto gather =
ops::GatherV2(s.WithOpName("my_gather"), params, indices, axis, op_attrs);
const NodeDef& node_def = gather.operation.node()->def();
return node_def;
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertGather) {
auto node_def = CreateGatherOp(tf_type_, 0);
{
Reset();
AddTestTensor("params", {1, 1, 2, 3}, tf_type_, {});
AddTestTensor("indices", {1, 2}, DT_INT32, {});
AddTestTensor("axis", {1}, DT_INT32, {});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for GatherV2 must be a constant");
}
{
Reset();
AddTestTensor("params", {1, 1, 2, 3});
AddTestTensor("indices", {1, 2}, DT_INT32, {});
AddTestWeights<int32>("axis", {1}, {4});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in "
"range [-4, 4)");
}
struct TestParams {
std::vector<int> params_shape;
std::vector<int> indices_shape;
std::vector<int> indices;
int axis;
int batch_dims;
std::vector<int> expected_output_shape;
std::vector<int> expected_output;
bool params_is_tensor;
bool indices_is_tensor;
Status conversion_status;
Status runtime_status;
Status add_index_status;
};
const std::vector<int> params_input = {1, 2, 3, 4, 5, 6};
std::vector<TestParams> test_params = {
TestParams{{2, 1, 1, 3},
{2},
{1, 0},
0,
0,
{2, 1, 1, 3},
{4, 5, 6, 1, 2, 3},
true,
true,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow "
"manipulation of the batch dimension"}
: OkStatus()},
TestParams{{2, 1, 3},
{2, 1},
{2, 0},
2,
0,
{2, 1, 2, 1},
{3, 1, 6, 4},
true,
true,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"Params and indices must have a"
" batch size of 1 when params and indices are "
"both tensors or both"
" constants."}
: OkStatus()},
TestParams{{2, 1, 3},
{2, 1},
{2, 0},
2,
0,
{2, 1, 2, 1},
{3, 1, 6, 4},
true,
false,
OkStatus()},
TestParams{{2, 1, 3},
{2},
{1, 2},
2,
0,
{2, 1, 2},
{2, 3, 5, 6},
false,
true,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"The input axis must be zero when "
"params is a weight."}
: OkStatus()},
TestParams{
{6},
{2},
{1, 3},
0,
0,
{2},
{2, 4},
true,
true,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow "
"manipulation of the batch dimension"}
: OkStatus(),
OkStatus(),
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kInvalidArgument,
batch_size_error("indices",
"Provided batch size does not match "
"converter batch size: 2 vs 6")}
: OkStatus()},
TestParams{
{1, 1, 2, 3},
{1},
{0},
3,
0,
{1, 1, 2, 1},
{1, 4},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1},
{1},
2,
0,
{1, 1, 1, 3},
{4, 5, 6},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 1},
{0},
3,
0,
{1, 1, 2, 1, 1},
{1, 4},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 1},
{1},
3,
0,
{1, 1, 2, 1, 1},
{2, 5},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 1},
{2},
-1,
0,
{1, 1, 2, 1, 1},
{3, 6},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 3},
{2, 0, 1},
3,
0,
{1, 1, 2, 1, 3},
{3, 1, 2, 6, 4, 5},
true,
true,
},
TestParams{
{1, 3, 2},
{1, 2, 2},
{0, 0, 1, 0},
2,
0,
{1, 3, 1, 2, 2},
{1, 1, 2, 1, 3, 3, 4, 3, 5, 5, 6, 5},
true,
true,
},
TestParams{
{1, 2, 3},
{1},
{0},
0,
0,
{1, 2, 3},
{1, 2, 3, 4, 5, 6},
false,
true,
},
TestParams{
{3, 2},
{1, 2},
{0, 1},
0,
0,
{1, 2, 2},
{1, 2, 3, 4},
false,
true,
},
TestParams{
{2, 3},
{1, 1, 2},
{0, 1},
0,
0,
{1, 1, 2, 3},
{1, 2, 3, 4, 5, 6},
false,
true,
},
TestParams{
{3, 2},
{2, 2},
{0, 2, 1, 0},
0,
0,
{2, 2, 2},
{1, 2, 5, 6, 3, 4, 1, 2},
false,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 1},
{0},
3,
0,
{1, 1, 2, 1, 1},
{1, 4},
true,
false,
},
TestParams{{1, 2, 3},
{1},
{0},
0,
0,
{1, 2, 3},
{1, 2, 3, 4, 5, 6},
false,
false,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"Params and indices must have a"
" batch size of 1 when params and indices are "
"both tensors or both"
" constants."}
: OkStatus()},
TestParams{{3, 2},
{2, 2},
{0, 2, 1, 0},
0,
0,
{2, 2, 2},
{1, 2, 5, 6, 3, 4, 1, 2},
false,
false,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"Params and indices must have a"
" batch size of 1 when params and indices are "
"both tensors or both"
" constants."}
: OkStatus()},
TestParams{
{2, 3},
{2, 2},
{0, 1, 1, 2},
1,
1,
{2, 2},
{1, 2, 5, 6},
false,
false,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"The input axis must be zero when params is a weight."}
: OkStatus()},
};
for (auto p : test_params) {
Reset();
auto node_def = CreateGatherOp(tf_type_, p.batch_dims);
if (p.params_is_tensor) {
AddTestTensor("params", p.params_shape, params_input);
} else {
AddTestWeights("params", p.params_shape, params_input, tf_type_);
}
if (p.indices_is_tensor) {
AddTestTensor("indices", p.indices_shape, DT_INT32, p.indices, {},
p.add_index_status);
} else {
std::vector<int> indices_shape(p.indices_shape);
AddTestWeights("indices", indices_shape, p.indices, DT_INT32);
}
AddTestWeights<int32>("axis", {1}, {p.axis});
TestOpConverter(node_def, p.expected_output_shape, p.conversion_status,
p.runtime_status, ElementsAreArray(p.expected_output));
}
}
template <typename OpType>
NodeDef CreateReduceOp(DataType tf_type, bool keep_dims) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
typename OpType::Attrs op_attrs;
op_attrs.keep_dims_ = keep_dims;
auto op = OpType(s.WithOpName("my_reduce"), input, axis, op_attrs);
return op.operation.node()->def();
}
std::vector<float> CalcReduce(string op_name, std::vector<float> input, int m,
float (*op)(float, float), float init) {
std::vector<float> output(input.size() / m);
for (int i = 0; i < output.size(); i++) {
auto begin = input.begin() + i * m;
auto end = input.begin() + (i + 1) * m;
output[i] = std::accumulate(begin, end, init, op);
if (op_name == "Mean") {
output[i] /= m;
}
}
return output;
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertReduce) {
{
Reset();
const NodeDef node_def = CreateReduceOp<ops::Sum>(tf_type_, false);
AddTestWeights<float>("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
AddTestWeights<int32>("axis", {1}, {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for Sum must be a tensor");
}
{
Reset();
const NodeDef node_def = CreateReduceOp<ops::Sum>(tf_type_, false);
AddTestTensor("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
AddTestTensor("axis", {1}, DT_INT32, {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for Sum must be a constant");
}
using OpFunc = std::function<NodeDef(DataType, bool)>;
using ValFunc = float (*)(float, float);
struct ReduceTestDescriptor {
string name;
OpFunc get_node;
ValFunc val_func;
float init_val;
};
std::vector<ReduceTestDescriptor> op_test_info{
{"Sum", CreateReduceOp<ops::Sum>, [](float x, float y) { return x + y; },
0},
{"Prod", CreateReduceOp<ops::Prod>,
[](float x, float y) { return x * y; }, 1},
{"Mean", CreateReduceOp<ops::Mean>,
[](float x, float y) { return x + y; }, 0},
{"Min", CreateReduceOp<ops::Min>,
[](float x, float y) { return y < x ? y : x; }, 1000},
{"Max", CreateReduceOp<ops::Max>,
[](float x, float y) { return x < y ? y : x; }, -1000}};
std::vector<float> input_values{1, 2, 3, 4, 5, 6};
struct TestParams {
std::vector<int> input_dims;
std::vector<float> input_values;
std::vector<float> helper_array;
std::vector<int> axis;
int stride;
Status conversion_status;
};
std::vector<TestParams> params{
TestParams{{2, 3, 1}, input_values, input_values, {3}, 3},
TestParams{{2, 3, 1}, input_values, input_values, {-4}, 3},
TestParams{{2, 3, 1}, input_values, {1, 4, 2, 5, 3, 6}, {0}, 2},
TestParams{{2, 3, 1}, input_values, input_values, {1}, 3},
TestParams{{2, 3, 1}, input_values, input_values, {2}, 1},
TestParams{{2, 3, 1}, input_values, input_values, {0, 1}, 6},
TestParams{{2, 3, 1}, input_values, {1, 4, 2, 5, 3, 6}, {-3}, 2},
TestParams{{2, 3, 1}, input_values, input_values, {-2}, 3},
TestParams{{2, 3, 1}, input_values, input_values, {-1}, 1},
TestParams{{2, 3, 1}, input_values, input_values, {-3, 1}, 6},
};
for (bool keep_dims : {false, true}) {
for (auto& op : op_test_info) {
VLOG(2) << "Processing " << op.name << " with keep_dims=" << keep_dims;
for (auto p : params) {
SCOPED_TRACE(StrCat(op.name, keep_dims ? " & keep_dims" : ""));
Reset();
NodeDef node_def = op.get_node(tf_type_, keep_dims);
AddTestTensor("input", p.input_dims, p.input_values);
AddTestWeights<int32>("axis", {static_cast<int>(p.axis.size())},
p.axis);
std::vector<int> expected_output_dims(p.input_dims);
for (int ax : p.axis) {
int rank = p.input_dims.size();
if (ax >= rank || ax < -rank) {
p.conversion_status =
errors::InvalidArgument("Axis value of ", ax,
" is out of bounds, must be in "
"range [",
-rank, ", ", rank, ")");
} else {
int ax_positive = ax >= 0 ? ax : ax + rank;
expected_output_dims[ax_positive] = keep_dims ? 1 : 0;
if (trt_mode_ == TrtTestMode::kImplicitBatch &&
(ax == 0 || ax == -rank)) {
p.conversion_status = errors::Unimplemented(
"TensorRT does not allow manipulation of the batch "
"dimension");
}
}
}
expected_output_dims.erase(std::remove(expected_output_dims.begin(),
expected_output_dims.end(), 0),
expected_output_dims.end());
VLOG(2) << "out dims "
<< absl::StrCat("[", absl::StrJoin(expected_output_dims, ","),
"]");
std::vector<float> expected_values = CalcReduce(
op.name, p.helper_array, p.stride, op.val_func, op.init_val);
if (tf_type_ == DT_INT32) {
std::for_each(expected_values.begin(), expected_values.end(),
[](float& _n) { _n = std::floor(_n); });
}
TestOpConverter(node_def, expected_output_dims, p.conversion_status,
OkStatus(), ArrayFloatNear(expected_values));
}
}
}
}
NodeDef CreateCastOp(DataType tf_type) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_HALF);
return ops::Cast(s.WithOpName("my_unary"), input, DT_FLOAT)
.operation.node()
->def();
}
TEST_P(OpConverter_FP32_UnaryTest, ConvertUnary) {
using OpFunc = std::function<NodeDef(DataType)>;
using ValFunc = float (*)(float);
std::map<std::string, std::pair<OpFunc, ValFunc>> op_map;
#define ADD_OP(name, op, compute) \
op_map[name] = \
std::make_pair(CreateUnaryOp<op>, static_cast<ValFunc>(compute))
ADD_OP("Abs", ops::Abs, std::abs);
ADD_OP("Acos", ops::Acos, std::acos);
ADD_OP("Acosh", ops::Acosh, std::acosh);
ADD_OP("Asin", ops::Asin, std::asin);
ADD_OP("Asinh", ops::Asinh, std::asinh);
ADD_OP("Atan", ops::Atan, std::atan);
ADD_OP("Atanh", ops::Atanh, std::atanh);
op_map["Cast"] = std::make_pair(CreateCastOp, [](float x) { return x; });
ADD_OP("Ceil", ops::Ceil, std::ceil);
ADD_OP("Cos", ops::Cos, std::cos);
ADD_OP("Cosh", ops::Cosh, std::cosh);
ADD_OP("Exp", ops::Exp, std::exp);
ADD_OP("Erf", ops::Erf, std::erf);
ADD_OP("Floor", ops::Floor, std::floor);
ADD_OP("Log", ops::Log, std::log);
ADD_OP("Neg", ops::Neg, [](float x) { return -x; });
ADD_OP("Reciprocal", ops::Reciprocal, [](float x) { return 1.0f / x; });
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
ADD_OP("Round", ops::Round, [](float x) { return (float)std::round(x); });
ADD_OP("Sign", ops::Sign,
[](float x) { return x > 0 ? 1.0f : (x < 0 ? -1.0f : 0.0f); });
#endif
ADD_OP("Rsqrt", ops::Rsqrt, [](float x) { return 1.0f / std::sqrt(x); });
ADD_OP("Sin", ops::Sin, std::sin);
ADD_OP("Sinh", ops::Sinh, std::sinh);
ADD_OP("Sqrt", ops::Sqrt, std::sqrt);
ADD_OP("Tan", ops::Tan, std::tan);
#undef ADD_OP
std::vector<float> input_values{-0.9f, 0.6f, 0.0f, -3.5f, 100.0f, 2.9f};
RunTests("Unary", *UnaryOperationMap(), op_map, input_values, "x");
}
TEST_P(OpConverter_BOOL_Test, ConvertBoolean) {
std::vector<int> input_values{1, 0, 1, 0, 0, 1};
using OpFunc = std::function<NodeDef(DataType)>;
using ValFunc = int (*)(int);
std::map<std::string, std::pair<OpFunc, ValFunc>> op_map;
#define ADD_OP(name, op, compute) \
op_map[name] = \
std::make_pair(CreateUnaryOp<op>, static_cast<ValFunc>(compute))
ADD_OP("LogicalNot", ops::LogicalNot, [](int x) { return 1 - x; });
#undef ADD_OP
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
RunTests("LogicalUnary", *UnaryBooleanOperationMap(), op_map, input_values,
"x");
#endif
}
auto get_concat_nodedef = [](DataType dtype, int num_inputs) -> NodeDef {
Scope s = Scope::NewRootScope();
std::vector<Input> values;
values.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
const string input_name = StrCat("values_", i);
values.push_back(ops::Placeholder(s.WithOpName(input_name), dtype));
}
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
auto concat = ops::Concat(s.WithOpName("my_concat"),
absl::Span<const Input>(values), axis);
return concat.operation.node()->def();
};
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertConcat) {
{
Reset();
NodeDef node_def = get_concat_nodedef(tf_type_, 2);
AddTestTensor("values_0", {1, 1, 2, 3});
AddTestTensor("values_1", {1, 1, 2, 3});
AddTestTensor("axis", {1});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for ConcatV2 must be a constant");
}
{
Reset();
NodeDef node_def = get_concat_nodedef(tf_type_, 2);
AddTestTensor("values_0", {1, 1, 2, 3});
AddTestTensor("values_1", {1, 1, 2, 3});
AddTestWeights<int32>("axis", {1}, {4});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in "
"range [-4, 4)");
}
{
Reset();
NodeDef node_def = get_concat_nodedef(tf_type_, 2);
AddTestTensor("values_0", {1, 1, 2, 3});
AddTestTensor("values_1", {1, 1, 6});
AddTestWeights<int32>("axis", {1}, {1});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Received inputs with inconsistent rank");
}
struct TestParams {
std::vector<std::vector<int>> input_shapes;
std::vector<std::vector<int>> input_values;
std::vector<bool> inputs_are_tensors;
int axis;
std::vector<int> expected_output_dims;
std::vector<int> expected_output;
Status conversion_status;
Status run_status;
};
const std::vector<std::vector<int>> common_input{CreateVectorIota<int>(6),
CreateVectorIota<int>(6, 6)};
std::vector<TestParams> params = {
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, true},
1,
{1, 2, 2, 3},
CreateVectorIota<int>(12),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, true},
2,
{1, 1, 4, 3},
CreateVectorIota<int>(12),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, true},
3,
{1, 1, 2, 6},
{0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11},
},
{
{{1, 1}, {1, 2}, {1, 3}, {1, 1}, {1, 1}, {1, 2}},
{{1}, {2, 3}, {4, 5, 6}, {7}, {8}, {9, 10}},
{true, true, true, true, true, true},
1,
{1, 10},
CreateVectorIota<int>(10, 1),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, false},
1,
{1, 2, 2, 3},
CreateVectorIota<int>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(
"The input \"values_1\" for ConcatV2 must be a tensor")
: OkStatus(),
OkStatus(),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{false, false},
1,
{1, 2, 2, 3},
CreateVectorIota<int>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(
"The input \"values_0\" for ConcatV2 must be a tensor")
: OkStatus(),
OkStatus(),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, true},
0,
{2, 1, 2, 3},
CreateVectorIota<int>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(
"TensorRT does not allow manipulation of the "
"batch dimension")
: OkStatus(),
},
{
{{1, 1, 2, 3}, {1, 1, 3, 2}},
common_input,
{true, true},
1,
{2, 1, 2, 3},
CreateVectorIota<int>(12),
trt_mode_ != TrtTestMode::kDynamicShape
? errors::InvalidArgument(
"Received inputs with inconsistent shape")
: OkStatus(),
errors::InvalidArgument(""),
}};
for (auto p : params) {
Reset();
const int num_inputs = p.input_shapes.size();
EXPECT_EQ(num_inputs, p.input_values.size());
NodeDef node_def = get_concat_nodedef(tf_type_, num_inputs);
for (int j = 0; j < num_inputs; ++j) {
string name = StrCat("values_", j);
if (!p.inputs_are_tensors[j]) {
AddTestWeights(name, p.input_shapes[j], p.input_values[j], tf_type_);
} else {
AddTestTensor(name, p.input_shapes[j], p.input_values[j]);
}
}
AddTestWeights<int32>("axis", {1}, {p.axis});
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
p.run_status, ElementsAreArray(p.expected_output));
}
}
auto get_split_nodedef = [](DataType dtype, int num_split) -> NodeDef {
Scope s = Scope::NewRootScope();
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
auto value = ops::Placeholder(s.WithOpName("value"), dtype);
auto split = ops::Split(s.WithOpName("my_split"), axis, value, num_split);
return split.operation.node()->def();
};
template <DataType dtype>
void TestConvertSplit(OpConverterTest* test) {
typedef typename EnumToDataType<dtype>::Type CType;
struct TestParams {
std::vector<int> input_shape;
std::vector<CType> value;
int axis;
int num_split;
std::vector<int> expected_output_dims;
std::vector<std::vector<CType>> expected_outputs;
};
const std::vector<CType> common_input = CreateVectorIota<CType>(6);
std::vector<TestParams> ok_params = {
{{1, 2, 3}, common_input, 1,
1, {1, 2, 3},
{CreateVectorIota<CType>(6)}},
{{1, 2, 3},
common_input,
3,
3,
{1, 2, 1},
{{CType(0), CType(3)}, {CType(1), CType(4)}, {CType(2), CType(5)}}},
{{1, 6},
common_input,
2,
6,
{1, 1},
{{CType(0)},
{CType(1)},
{CType(2)},
{CType(3)},
{CType(4)},
{CType(5)}}},
{{1, 6},
common_input,
-1,
2,
{1, 3},
{CreateVectorIota<CType>(3), CreateVectorIota<CType>(3, CType(3))}},
};
for (int i = 0; i < ok_params.size(); ++i) {
test->Reset();
NodeDef node_def = get_split_nodedef(dtype, ok_params[i].num_split);
test->AddTestWeights<int32>("axis", {1}, {ok_params[i].axis});
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(dtype, &trt_type));
test->AddTestTensor("value", ok_params[i].input_shape, 1, trt_type);
test->RunValidationAndConversion(node_def);
EXPECT_EQ(ok_params[i].expected_outputs.size(), ok_params[i].num_split);
std::vector<TRT_TensorOrWeights> outputs(ok_params[i].num_split);
DataVec output_data;
for (int j = 0; j < outputs.size(); ++j) {
const string name = j == 0 ? StrCat("my_split") : StrCat("my_split:", j);
TF_EXPECT_OK(test->GetTensorOrWeights(name, &outputs[j]));
EXPECT_TRUE(outputs[j].is_tensor());
EXPECT_THAT(outputs[j].tensor()->getDimensions(),
DimsAreArray(ok_params[i].expected_output_dims));
output_data.push_back(
{name, test->ConstructTensor<CType>(
ok_params[i].expected_outputs[j].size())});
}
const DataVec input_data{
{"value", test->AsTensor<CType>(ok_params[i].value)}};
TF_EXPECT_OK(test->BuildAndRun(input_data, &output_data));
for (int j = 0; j < outputs.size(); ++j) {
EXPECT_THAT(GetSpanForData<CType>(output_data[j]),
ElementsAreArray(ok_params[i].expected_outputs[j]));
}
}
}
TEST_F(OpConverterTest, ConvertSplit) {
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestTensor("axis", {1});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for Split must be a constant");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestWeights<int32>("axis", {1}, {4});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in "
"range [-4, 4)");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestWeights<int32>("axis", {1}, {-5});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of -5 is out of bounds, must be in "
"range [-4, 4)");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestWeights<int32>("axis", {1}, {0});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestWeights<int32>("axis", {1}, {1});
AddTestWeights<float>("value", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"value\" for Split must be a tensor");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 2);
AddTestWeights<int32>("axis", {1}, {3});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Dimension 3 of size 3 is not evenly divisible by 2");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 4);
AddTestWeights<int32>("axis", {1}, {3});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Dimension 3 of size 3 is not evenly divisible by 4");
}
TestConvertSplit<DT_FLOAT>(this);
TestConvertSplit<DT_HALF>(this);
TestConvertSplit<DT_INT32>(this);
}
auto get_unpack_nodedef = [](DataType dtype, int num, int axis) -> NodeDef {
Scope s = Scope::NewRootScope();
auto value = ops::Placeholder(s.WithOpName("value"), dtype);
auto unstack_attrs = ops::Unstack::Axis(axis);
auto unstack =
ops::Unstack(s.WithOpName("my_unpack"), value, num, unstack_attrs);
return unstack.operation.node()->def();
};
struct UnpackTestParams {
std::vector<int> input_shape;
std::vector<float> input_value;
int axis;
int num;
std::vector<int> expected_output_dims;
std::vector<std::vector<float>> expected_outputs;
Status run_status;
};
void TestConvertUnpack(ParameterizedOpConverterTestBase* test,
UnpackTestParams& p) {
test->Reset();
NodeDef node_def = get_unpack_nodedef(test->get_tf_type(), p.num, p.axis);
test->AddTestTensor("value", p.input_shape, test->get_tf_type(),
p.input_value);
std::vector<Matcher<std::vector<float>>> matcher_vec;
std::vector<DataType> datatype_vec;
std::vector<std::vector<int>> expected_output_dims;
for (int j = 0; j < p.expected_outputs.size(); ++j) {
matcher_vec.push_back(ElementsAreArray(p.expected_outputs[j]));
datatype_vec.push_back(test->get_tf_type());
expected_output_dims.push_back(p.expected_output_dims);
}
test->TestOpConverterMultiOut(node_def,
expected_output_dims,
p.run_status,
p.run_status,
matcher_vec,
datatype_vec);
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertUnpack) {
if (trt_mode_ != TrtTestMode::kDynamicShape) {
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 3, 3);
AddTestWeights<float>("value", {1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"value\" for Unpack must be a tensor");
}
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 1, 4);
AddTestTensor("value", {1, 1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in "
"range [-4, 4)");
}
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 1, -5);
AddTestTensor("value", {1, 1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of -5 is out of bounds, must be "
"in range [-4, 4)");
}
{
if (trt_mode_ != TrtTestMode::kExplicitBatch) {
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 1, 0);
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of "
"the batch dimension");
}
}
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 5, 2);
AddTestTensor("value", {1, 1, 6});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Dimension 2 has size 6 which is not equal to num of 5");
}
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 1, 0);
AddTestTensor(
"value", {}, tf_type_, {}, {},
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::InvalidArgument(
"removing first dim requires explicit batch dimension")
: OkStatus());
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
RunValidationAndConversion(
node_def, absl::StatusCode::kInternal,
"Failed to convert at least one input to a TRT_TensorOrWeights: "
"Scalar input tensor is not supported since the first dimension is "
"treated as batch dimension by TRT");
} else {
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Input \"value\" for Unpack must be rank 2 "
"or greater");
}
}
}
const std::vector<float> common_input = CreateVectorIota<float>(6);
Status run_status =
trt_mode_ == TrtTestMode::kDynamicShape
? errors::InvalidArgument(
"The argument `strided_slice_spec` is "
"`std::nullopt` with `dynamic_input_size_indices` non empty.")
: OkStatus();
std::vector<UnpackTestParams> params = {
{{1, 1, 2, 1, 3, 1},
common_input,
4,
3,
{1, 1, 2, 1, 1},
{{0, 3}, {1, 4}, {2, 5}},
run_status},
{{1, 1, 2, 1, 3},
common_input,
4,
3,
{1, 1, 2, 1},
{{0, 3}, {1, 4}, {2, 5}},
run_status},
{{1, 1, 2, 3},
common_input,
1,
1,
{1, 2, 3},
{CreateVectorIota<float>(6)},
run_status},
{{1, 6, 1},
common_input,
-2,
6,
{1, 1},
{{0}, {1}, {2}, {3}, {4}, {5}},
run_status},
{{1, 6},
common_input,
1,
6,
{1},
{{0}, {1}, {2}, {3}, {4}, {5}},
run_status},
};
for (auto p : params) {
TestConvertUnpack(this, p);
}
}
NodeDef GetPackNodeDef(DataType dtype, int num_inputs, int axis) {
Scope s = Scope::NewRootScope();
std::vector<Input> values;
values.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
const string input_name = StrCat("values_", i);
values.push_back(ops::Placeholder(s.WithOpName(input_name), dtype));
}
auto pack =
ops::Stack(s.WithOpName("my_pack"), absl::Span<const Input>(values),
ops::Stack::Axis(axis));
return pack.operation.node()->def();
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertPack) {
struct TestParams {
std::vector<std::vector<int>> input_shapes;
std::vector<std::vector<int>> partial_input_shapes;
std::vector<std::vector<float>> input_values;
int axis;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
Status conversion_status;
Status runtime_status;
bool input_1_is_weight;
};
const std::vector<std::vector<float>> common_input{
CreateVectorIota<float>(6),
CreateVectorIota<float>(6, 6)};
std::vector<TestParams> params = {
{{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
1,
{1, 2, 2, 3},
CreateVectorIota<float>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"The input \"values_1\" for Pack must be a tensor"}
: OkStatus(),
OkStatus(),
true},
{
{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
-5,
{},
{},
Status{absl::StatusCode::kInvalidArgument,
"Axis value of -5 is out of bounds, must be in"
" range [-4, 4)"},
},
{{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
-4,
{2, 1, 2, 3},
CreateVectorIota<float>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the batch "
"dimension"}
: OkStatus()},
{
{{1, 2, 3}, {1, 6}},
{{}, {}},
common_input,
1,
{},
{},
Status{absl::StatusCode::kInvalidArgument,
"Received inputs with inconsistent rank"},
},
{
{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
1,
{1, 2, 2, 3},
CreateVectorIota<float>(12),
},
{
{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
2,
{1, 2, 2, 3},
{0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11},
},
{
{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
3,
{1, 2, 3, 2},
{0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5, 11},
},
{
{{1, 2, 3}},
{{}},
{CreateVectorIota<float>(6)},
1,
{1, 1, 2, 3},
CreateVectorIota<float>(6),
},
{
{{1, 2, 3}},
{{}},
{CreateVectorIota<float>(6)},
2,
{1, 2, 1, 3},
CreateVectorIota<float>(6),
},
};
if (trt_mode_ != TrtTestMode::kDynamicShape) {
params.push_back(
TestParams{{{1, 2, 3}, {1, 3, 2}},
{{}, {}},
common_input,
1,
{},
CreateVectorIota<float>(12),
Status{absl::StatusCode::kInvalidArgument,
"Received inputs with inconsistent shape"}});
} else {
}
if (trt_mode_ == TrtTestMode::kDynamicShape) {
params.push_back(
TestParams{{{1, 2, 3}, {1, 2, 3}},
{{-1, -1, -1}, {1, 2, 3}},
common_input,
2,
{1, 2, 2, 3},
{0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11}});
}
for (auto p : params) {
Reset();
const int num_inputs = p.input_shapes.size();
EXPECT_EQ(num_inputs, p.input_values.size());
NodeDef node_def = GetPackNodeDef(tf_type_, num_inputs, p.axis);
for (int j = 0; j < num_inputs; ++j) {
if (j == 1 && p.input_1_is_weight) {
AddTestWeights(StrCat("values_", j), p.input_shapes[j],
p.input_values[j], tf_type_);
} else {
AddTestTensor(StrCat("values_", j), p.input_shapes[j], tf_type_,
p.input_values[j], p.partial_input_shapes[j]);
}
}
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
p.runtime_status, ElementsAreArray(p.expected_output));
}
}
template <typename OpType>
NodeDef GetArgMinMaxNodeDef(DataType input_dtype, DataType output_dtype) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), input_dtype);
auto dimension = ops::Placeholder(s.WithOpName("dimension"), DT_INT32);
auto attrs = OpType::OutputType(output_dtype);
auto arg = OpType(s.WithOpName("my_arg"), input, dimension, attrs);
return arg.operation.node()->def();
}
struct ArgMinMaxTestParams {
std::vector<int> input_shape;
std::vector<float> input_value;
int axis;
std::vector<int> expected_output_dims;
std::vector<int> expected_argmax_output;
std::vector<int> expected_argmin_output;
Status status;
};
template <typename OpType>
void TestConvertArgMinMax(ParameterizedOpConverterTestBase* test,
DataType _tf_type, ArgMinMaxTestParams& p) {
test->Reset();
NodeDef node_def = GetArgMinMaxNodeDef<OpType>(_tf_type,
DT_INT32);
std::vector<int> expected_out;
if (node_def.op() == "ArgMax") {
expected_out = p.expected_argmax_output;
} else if (node_def.op() == "ArgMin") {
expected_out = p.expected_argmin_output;
} else {
ASSERT_TRUE(false);
}
test->AddTestTensor("input", p.input_shape, _tf_type, p.input_value);
test->AddTestWeights("dimension", {1}, {p.axis}, DT_INT32);
test->TestOpConverter(node_def, p.expected_output_dims,
p.status,
OkStatus(),
ElementsAreArray(expected_out), {DT_INT32});
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertArgMinMax) {
{
Reset();
NodeDef node_def =
GetArgMinMaxNodeDef<ops::ArgMax>(tf_type_,
DT_INT32);
AddTestTensor("input", {1, 2, 3});
AddTestTensor("dimension", {1});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"dimension\" for ArgMax must be a constant");
}
{
Reset();
NodeDef node_def =
GetArgMinMaxNodeDef<ops::ArgMax>(tf_type_,
DT_INT64);
AddTestTensor("input", {1, 2, 3});
AddTestWeights("dimension", {1}, {3}, DT_INT32);
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Output type int64 is not supported");
}
const std::vector<float> common_input = CreateVectorIota<float>(6);
std::vector<ArgMinMaxTestParams> params = {
{{2, 3},
common_input,
0,
{3},
{1, 1, 1},
{0, 0, 0},
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented("TensorRT does not allow manipulation of "
"the batch dimension")
: OkStatus()},
{
{1, 6},
common_input,
1,
{1},
{5},
{0},
},
{
{1, 10},
{-5.0f, 3.0f, 5.0f, 1.0f, 6.0f, -9.0f, 7.0f, 1.0f, 0.0f, -1.0f},
-1,
{1},
{6},
{5},
},
{
{1, 2, 3},
common_input,
2,
{1, 2},
{2, 2},
{0, 0},
},
{
{1, 2, 3},
common_input,
-2,
{1, 3},
{1, 1, 1},
{0, 0, 0},
},
{
{1, 2, 1, 3},
common_input,
3,
{1, 2, 1},
{2, 2},
{0, 0},
},
{
{1, 2, 1, 3},
common_input,
-3,
{1, 1, 3},
{1, 1, 1},
{0, 0, 0},
},
{{1, 2, 1, 1, 3},
common_input,
4,
{1, 2, 1, 1},
{2, 2},
{0, 0},
#if !IS_TRT_VERSION_GE(7, 0, 0, 11)
errors::Unimplemented("op is not able to support tensors with 4+"
" dimensions (excluding batch size)")
#else
OkStatus()
#endif
},
{{1, 2, 1, 1, 3},
common_input,
-4,
{1, 1, 1, 3},
{1, 1, 1},
{0, 0, 0},
#if !IS_TRT_VERSION_GE(7, 0, 0, 11)
errors::Unimplemented("op is not able to support tensors with 4+"
" dimensions (excluding batch size)")
#else
OkStatus()
#endif
},
};
for (auto p : params) {
TestConvertArgMinMax<ops::ArgMin>(this, tf_type_, p);
TestConvertArgMinMax<ops::ArgMax>(this, tf_type_, p);
}
}
template <typename OpType>
NodeDef GetDepthSpaceShuffleNodeDef(DataType dtype, int block_size,
string data_format) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto attrs = OpType::DataFormat(data_format);
auto shuffle = OpType(s.WithOpName("my_shuffle"), input, block_size, attrs);
return shuffle.operation.node()->def();
}
struct DepthSpaceShuffleTestParams {
std::vector<int> input_dims;
std::vector<int> input_value;
int block_size;
string data_format;
std::vector<int> expected_output_dims;
std::vector<int> expected_output;
};
template <typename OpType>
void TestConvertDepthSpaceShuffle(
ParameterizedOpConverterTestBase* test,
const std::vector<DepthSpaceShuffleTestParams>& params) {
Status status = OkStatus();
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::DepthToSpace>(
test->get_tf_type(), 2, "NCHW");
test->AddTestWeights<float>("input", {1, 4, 1, 1}, {1, 2, 3, 4});
test->RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
StrCat("The input \"input\" for ", node_def.op(), " must be a tensor"));
}
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::DepthToSpace>(
test->get_tf_type(), 2, "NCHW");
test->AddTestTensor("input", {1, 16, 32});
test->RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
StrCat("The input to ", node_def.op(), " must be rank 4"));
}
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::DepthToSpace>(
test->get_tf_type(), 2, "NCHW_VECT_C");
test->AddTestTensor("input", {1, 16, 32, 32});
test->RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"Data format NCHW_VECT_C is not supported");
}
if (test->get_trt_mode() != TrtTestMode::kDynamicShape) {
if (std::is_same<OpType, ops::DepthToSpace>::value) {
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::DepthToSpace>(
test->get_tf_type(), 3, "NCHW");
test->AddTestTensor("input", {1, 16, 32, 32});
test->RunValidationAndConversion(node_def,
absl::StatusCode::kInvalidArgument,
"Number of channels must be divisible by"
" block_size*block_size");
} else {
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::SpaceToDepth>(
test->get_tf_type(), 3, "NCHW");
test->AddTestTensor("input", {1, 16, 9, 32});
test->RunValidationAndConversion(node_def,
absl::StatusCode::kInvalidArgument,
"Width and height must be divisible by"
" block_size");
}
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::SpaceToDepth>(
test->get_tf_type(), 3, "NCHW");
test->AddTestTensor("input", {1, 16, 32, 9});
test->RunValidationAndConversion(node_def,
absl::StatusCode::kInvalidArgument,
"Width and height must be divisible by"
" block_size");
}
}
}
for (auto p : params) {
test->Reset();
const NodeDef node = GetDepthSpaceShuffleNodeDef<OpType>(
test->get_tf_type(), p.block_size, p.data_format);
test->AddTestTensor("input", p.input_dims, p.input_value);
test->TestOpConverter(node, p.expected_output_dims, status, OkStatus(),
ElementsAreArray(p.expected_output));
}
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertDepthToSpace) {
const std::vector<int> common_input = CreateVectorIota<int>(16);
std::vector<DepthSpaceShuffleTestParams> params = {
{
{1, 4, 2, 2},
common_input,
2,
"NCHW",
{1, 1, 4, 4},
{0, 4, 1, 5, 8, 12, 9, 13, 2, 6, 3, 7, 10, 14, 11, 15},
},
{
{1, 2, 2, 4},
common_input,
2,
"NHWC",
{1, 4, 4, 1},
{0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15},
},
{
{1, 16, 1, 1},
common_input,
4,
"NCHW",
{1, 1, 4, 4},
CreateVectorIota<int>(16),
},
{
{1, 2, 2, 8},
CreateVectorIota<int>(32),
2,
"NHWC",
{1, 4, 4, 2},
{0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6,
7, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25,
26, 27, 20, 21, 22, 23, 28, 29, 30, 31},
}};
TestConvertDepthSpaceShuffle<ops::DepthToSpace>(this, params);
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertSpaceToDepth) {
const std::vector<int> common_input = CreateVectorIota<int>(16);
std::vector<DepthSpaceShuffleTestParams> params = {
{
{1, 1, 4, 4},
common_input,
2,
"NCHW",
{1, 4, 2, 2},
{0, 2, 8, 10, 1, 3, 9, 11, 4, 6, 12, 14, 5, 7, 13, 15},
},
{
{1, 4, 4, 1},
common_input,
2,
"NHWC",
{1, 2, 2, 4},
{0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15},
},
{
{1, 1, 4, 4},
common_input,
4,
"NCHW",
{1, 16, 1, 1},
CreateVectorIota<int>(16),
},
{
{1, 4, 4, 2},
CreateVectorIota<int>(32),
2,
"NHWC",
{1, 2, 2, 8},
{0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6,
7, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25,
26, 27, 20, 21, 22, 23, 28, 29, 30, 31},
},
};
TestConvertDepthSpaceShuffle<ops::SpaceToDepth>(this, params);
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertClipByValue) {
Scope s = Scope::NewRootScope();
auto t = ops::Placeholder(s.WithOpName("t"), tf_type_);
auto clip_value_min =
ops::Placeholder(s.WithOpName("clip_value_min"), tf_type_);
auto clip_value_max =
ops::Placeholder(s.WithOpName("clip_value_max"), tf_type_);
auto clip = ops::ClipByValue(s.WithOpName("my_clip"), t, clip_value_min,
clip_value_max);
const NodeDef& node_def = clip.operation.node()->def();
nvinfer1::DataType trt_type_;
TF_ASSERT_OK(TfTypeToTrtType(tf_type_, &trt_type_));
{
Reset();
AddTestWeights("t", {1, 2, 3}, {1, 2, 3, 4, 5, 6}, tf_type_);
AddTestWeights("clip_value_min", {1}, {1}, tf_type_);
AddTestWeights("clip_value_max", {1}, {5}, tf_type_);
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"t\" for ClipByValue must be a "
"tensor");
}
{
Reset();
AddTestTensor("t", {1, 2, 3});
AddTestTensor("clip_value_min", {1});
AddTestWeights("clip_value_max", {1}, {1}, tf_type_);
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"clip_value_min\" for ClipByValue "
"must be a constant");
}
{
Reset();
AddTestTensor("t", {1, 2, 3});
AddTestWeights("clip_value_min", {1}, {1}, tf_type_);
AddTestTensor("clip_value_max", {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"clip_value_max\" for ClipByValue "
"must be a constant");
}
struct TestParams {
std::vector<int> dims;
int clip_value_min;
int clip_value_max;
std::vector<float> expected_output;
};
const std::vector<float> common_input = CreateVectorIota<float>(6);
std::vector<TestParams> params = {{
{6},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 6},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 2, 3},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 2, 3, 1},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 1, 3, 1, 2},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 1, 3, 1, 2, 1},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{2, 1, 3},
-1,
8,
common_input,
}};
for (auto p : params) {
Reset();
AddTestTensor("t", p.dims, tf_type_, common_input);
AddTestWeights("clip_value_min", {1}, {p.clip_value_min}, tf_type_);
AddTestWeights("clip_value_max", {1}, {p.clip_value_max}, tf_type_);
TestOpConverter(node_def, p.dims,
OkStatus(),
OkStatus(),
ElementsAreArray(p.expected_output));
}
}
NodeDef GetSquaredDifferenceNodeDef(DataType dtype) {
Scope s = Scope::NewRootScope();
auto x = ops::Placeholder(s.WithOpName("x"), dtype);
auto y = ops::Placeholder(s.WithOpName("y"), dtype);
auto squared_diff =
ops::SquaredDifference(s.WithOpName("my_squared_diff"), x, y);
return squared_diff.operation.node()->def();
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertSquaredDifference) {
{
Reset();
NodeDef node_def = GetSquaredDifferenceNodeDef(tf_type_);
AddTestWeights<float>("x", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestTensor("y", {1, 1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"x\" for SquaredDifference must be "
"a tensor");
}
struct TestParams {
std::vector<int> dims_x;
std::vector<int> dims_y;
std::vector<float> value_x;
std::vector<float> value_y;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
Status status;
Status runtime_status;
};
const std::vector<float> common_input = CreateVectorIota<float>(6);
std::vector<TestParams> params = {
{{1, 2, 3},
{1, 7, 5},
common_input,
std::vector<float>(7 * 5, 0),
{1, 1, 2, 3},
common_input,
trt_mode_ == TrtTestMode::kDynamicShape
? OkStatus()
: errors::InvalidArgument("Infeasible broadcast scheme"),
errors::Internal(
"Binding index out of range. This can happen if profile is not set, "
"or the network is invalid for the current profile.")},
{
{1, 1, 2, 3},
{1, 1, 2, 3},
common_input,
{0, -1, 3, 0, 10, -7},
{1, 1, 2, 3},
{0, 4, 1, 9, 36, 144},
},
{
{1, 1, 2, 3},
{1, 1, 1, 3},
common_input,
{0, 1, 2},
{1, 1, 2, 3},
{0, 0, 0, 9, 9, 9},
},
};
for (auto p : params) {
Reset();
const NodeDef node = GetSquaredDifferenceNodeDef(tf_type_);
AddTestTensor("x", p.dims_x, p.value_x);
AddTestTensor("y", p.dims_y, p.value_y);
TestOpConverter(node, p.expected_output_dims, p.status, p.runtime_status,
ElementsAreArray(p.expected_output));
}
}
template <typename OpType>
NodeDef MakeResizeNodeDef(DataType dtype, bool align_corners) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto size = ops::Placeholder(s.WithOpName("size"), DT_INT32);
auto attrs = typename OpType::Attrs().AlignCorners(align_corners);
auto resize = OpType(s.WithOpName("my_resize"), input, size, attrs);
return resize.operation.node()->def();
}
struct ResizeTestParams {
std::vector<int> input_dims;
std::vector<int> output_resize_dims;
std::vector<float> input_value;
bool size_as_tensor;
bool align_corners;
std::vector<int> expected_output_dims;
std::vector<float> expected_nearest_output_values;
std::vector<float> expected_bilinear_output_values;
Status status;
};
template <typename OpType>
void TestConvertResize(ParameterizedOpConverterTestBase* test,
ResizeTestParams& p) {
test->Reset();
NodeDef node_def =
MakeResizeNodeDef<OpType>(test->get_tf_type(), p.align_corners);
test->AddTestTensor("input", p.input_dims, test->get_tf_type(),
p.input_value);
if (p.size_as_tensor) {
std::vector<int32> size_dims{2};
std::vector<int32> size_values{p.output_resize_dims};
test->AddTestTensor("size", size_dims, DT_INT32, size_values, size_dims);
} else {
test->AddTestWeights("size", {2}, p.output_resize_dims, DT_INT32);
}
std::vector<float> expected_out;
if (node_def.op() == "ResizeBilinear") {
expected_out = p.expected_bilinear_output_values;
} else if (node_def.op() == "ResizeNearestNeighbor") {
expected_out = p.expected_nearest_output_values;
} else {
ASSERT_TRUE(false);
}
test->TestOpConverter(node_def, p.expected_output_dims,
p.status,
p.status,
ElementsAreArray(expected_out),
{DT_FLOAT});
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertResize) {
{
Reset();
NodeDef node_def = MakeResizeNodeDef<ops::ResizeBilinear>(tf_type_,
true);
AddTestWeights<float>("input", {1, 2}, {1, 2});
AddTestWeights<int>("size", {1, 2}, {1, 2});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for ResizeBilinear must be a "
"tensor");
}
std::vector<ResizeTestParams> params{
{{1, 1, 2, 1},
{2, 3},
{2.0f, -1.0f},
false,
false,
{1, 2, 3, 1},
{2.0f, 2.0f, -1.0f, 2.0f, 2.0f, -1.0f},
{2.0f, 0.f, -1.0f, 2.0f, 0.f, -1.0f},
OkStatus()},
{{1, 1, 2, 1},
{2, 3},
{2.0f, -1.0f},
false,
true,
{1, 2, 3, 1},
{2.0f, 2.0f, -1.0f, 2.0f, 2.0f, -1.0f},
{2.0f, 0.5f, -1.0f, 2.0f, 0.5f, -1.0f},
OkStatus()}};
if (trt_mode_ != TrtTestMode::kImplicitBatch) {
params.push_back({{1, 1, 2, 1},
{2, 3},
{2.0f, -1.0f},
true,
true,
{1, 2, 3, 1},
{2.0f, 2.0f, -1.0f, 2.0f, 2.0f, -1.0f},
{2.0f, 0.5f, -1.0f, 2.0f, 0.5f, -1.0f},
OkStatus()});
}
for (auto p : params) {
TestConvertResize<ops::ResizeNearestNeighbor>(this, p);
#if IS_TRT_VERSION_GE(7, 1, 0, 0)
if (!p.align_corners) {
p.status = errors::InvalidArgument(
"Cannot Convert Bilinear Resize when align_corners=False");
}
#endif
TestConvertResize<ops::ResizeBilinear>(this, p);
}
}
NodeDef MakePadNodeDef(std::string name, DataType dtype) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto padding = ops::Placeholder(s.WithOpName("padding"), DT_INT32);
auto pad = ops::Pad(s.WithOpName(name), input, padding);
return pad.operation.node()->def();
}
struct PadTestParams {
std::vector<int> input_dims;
std::vector<int> pad_dims;
std::vector<int> pad_values;
std::vector<float> input_values;
std::vector<int> expected_output_dims;
std::vector<float> expected_output_values;
Status status;
};
TEST_P(OpConverter_FP32_FP16_Test, ConvertPad) {
{
Reset();
NodeDef node_def = MakePadNodeDef("my_pad", tf_type_);
AddTestWeights("input", {1, 2}, {1, 2}, tf_type_);
AddTestWeights<int>("padding", {1, 2}, {1, 2});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"tensor\" for Pad must be a "
"tensor");
}
{
Reset();
NodeDef node_def = MakePadNodeDef("my_pad", tf_type_);
AddTestTensor("input", {1, 2});
AddTestTensor("padding", {1, 2});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"paddings\" for Pad must be a "
"constant");
}
{
Reset();
NodeDef node_def = MakePadNodeDef("my_pad", tf_type_);
AddTestTensor("input", {1, 1, 2, 1});
AddTestWeights<int>("padding", {4, 2}, {0, 0, 1, 0, 0, 1, 0, 0});
TRT_TensorOrWeights input;
TRT_TensorOrWeights output;
RunValidationAndConversion(node_def);
TF_EXPECT_OK(GetTensorOrWeights("input", &input));
TF_EXPECT_OK(GetTensorOrWeights("my_pad", &output));
ITensorProxyPtr input_tensor = input.tensor();
converter_->ProvideQuantizationRange(&input_tensor, -5.0f, 5.0f);
auto ranges = quantization_ranges();
EXPECT_EQ(5.0f, ranges[input.tensor()->trt_tensor()]);
}
std::vector<PadTestParams> params{
{
{1, 1, 3, 2},
{4, 2},
{0, 0, 0, 0, 0, 1, 0, 0},
{1, 2, 3, 4, 5, 6},
{1, 1, 4, 2},
{1, 2, 3, 4, 5, 6, 0, 0},
},
{
{1, 1, 3, 2},
{4, 2},
{0, 0, 0, 0, 0, 0, 0, 1},
{1, 2, 3, 4, 5, 6},
{1, 1, 3, 3},
{1, 2, 0, 3, 4, 0, 5, 6, 0},
},
{
{1, 1, 3, 2},
{4, 2},
{0, 0, 1, 0, 0, 0, 0, 0},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 2},
{0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6},
},
{
{1, 1, 2, 1},
{4, 2},
{0, 0, 1, 0, 0, 1, 0, 0},
{2.0f, -1.0f},
{1, 2, 3, 1},
{0.0, 0.0, 0.0, 2.0f, -1.0f, 0.0},
},
PadTestParams{
{1, 1, 2, 2},
{4, 2},
{0, 0, 1, 0, 0, 1, 0, 0},
{2, -1, 3., 4},
{1, 2, 3, 2},
{0, 0, 0, 0, 0, 0, 2, -1, 3, 4, 0, 0},
},
PadTestParams{
{1, 1, 2, 1, 2},
{5, 2},
{0, 0, 1, 0, 0, 1, 0, 0, 0, 0},
{2, -1, 3., 4},
{1, 2, 3, 1, 2},
{0, 0, 0, 0, 0, 0, 2, -1, 3, 4, 0, 0},
},
PadTestParams{
{1, 1, 2, 1, 2},
{5, 2},
{0, 0, 0, 1, 0, 0, 1, 1, 0, 0},
{2, -1, 3., 4},
{1, 2, 2, 3, 2},
{0., 0., 2., -1., 0., 0., 0., 0., 3., 4., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0},
},
PadTestParams{
{1, 1, 2, 1},
{4, 2},
{1, 0, 0, 0, 0, 1, 0, 0},
{2.0f, -1.0f},
{2, 1, 3, 1},
{0.0, 0.0, 0.0, 2.0f, -1.0f, 0.0},
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::InvalidArgument("Padding layer does not support "
"padding on batch dimension")
: OkStatus()},
PadTestParams{
{1, 1, 2, 1},
{4, 2},
{0, 0, 1, 0, 0, 1, 1, 1},
{2.0f, -1.0f},
{},
{},
errors::InvalidArgument("Padding layer does not support padding on "
"> 2")},
PadTestParams{
{1, 2, 2},
{3, 2},
{0, 0, 1, 0, 0, 1},
{2, -1, 3., 4},
{1, 3, 3},
{0., 0., 0., 2., -1., 0., 3., 4., 0.},
errors::InvalidArgument("Convertpad requires at least 4D input")}};
for (auto p : params) {
Reset();
NodeDef node_def = MakePadNodeDef("my_pad", tf_type_);
AddTestTensor("input", p.input_dims, p.input_values);
AddTestWeights<int32>("padding", p.pad_dims, p.pad_values);
TestOpConverter(node_def, p.expected_output_dims, p.status, p.status,
ElementsAreArray(p.expected_output_values));
}
}
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
class OpConverter_Select : public ParameterizedOpConverterTestBase {
public:
void RunTest(const string& opName);
};
void OpConverter_Select::RunTest(const string& opName) {
const auto testing_SelectV2 = opName == "SelectV2";
const int maxVal = 32;
const std::array<const char*, 3> par_name = {"cond", "then", "else"};
std::array<DataType, 3> par_type = {DT_BOOL, tf_type_, tf_type_};
std::vector<int> config(3, 0);
std::array<const std::vector<int>*, 3> par_dims;
std::vector<float> data_then(1, 0), data_else(1, maxVal),
expected_output(1, maxVal);
std::array<std::vector<float>*, 3> par_value = {nullptr, &data_then,
&data_else};
std::vector<int> data_cond(1, 0);
auto set_parameters = [&](DataType cond_type = DT_BOOL) {
Reset();
if (config[0]) {
AddTestTensor(par_name[0], *par_dims[0], cond_type, data_cond);
} else {
AddTestWeights(par_name[0], {1}, data_cond, cond_type);
}
for (int i = 1; i < 3; i++) {
if (config[i]) {
AddTestTensor(par_name[i], *par_dims[i], par_type[i], *par_value[i]);
} else {
AddTestWeights(par_name[i], {1}, *par_value[i], par_type[i]);
}
}
};
auto set_dimension = [this](const nvinfer1::Dims* dims,
std::vector<int>& dims_param,
std::string* comment = nullptr) {
const auto nbDims = dims->nbDims;
if (comment) {
*comment = "batch_dim: " + std::to_string(nbDims + 1) + ", " +
DebugString(*dims);
}
dims_param.resize(nbDims);
for (int i = 0; i < nbDims; i++) dims_param[i] = dims->d[i];
};
auto adjust_comments = [this](const nvinfer1::Dims* p_dims,
std::string* p_comment) {
if (p_dims[0].nbDims == p_dims[1].nbDims) return;
const int idx = p_dims[0].nbDims < p_dims[1].nbDims ? 0 : 1;
nvinfer1::Dims dims;
dims.nbDims = p_dims[1 - idx].nbDims;
int i = 0;
for (; i < dims.nbDims - p_dims[idx].nbDims; i++) dims.d[i] = 1;
for (int j = i; i < dims.nbDims; i++) dims.d[i] = p_dims[idx].d[i - j];
*(p_comment + idx) =
"batch_dim: " + std::to_string(1) + ", " + DebugString(dims);
*(p_comment + 1 - idx) =
"batch_dim: " + std::to_string(p_dims[idx].nbDims + 1) + ", " +
DebugString(p_dims[1 - idx]);
};
auto assign_values = [this](
const std::array<const std::vector<int>*, 3>& dims,
std::array<std::vector<float>*, 3> par_value,
std::vector<int>& data_cond, int use_indices = 0,
const std::vector<float>* expected_out = nullptr,
std::vector<int>* expect_dims_pntr = nullptr) {
size_t rank[3];
const auto dim_len =
dims[0]->size() > dims[1]->size() ? dims[0]->size() : dims[1]->size();
std::vector<int> exp_dims;
if (!expect_dims_pntr) expect_dims_pntr = &exp_dims;
auto& expect_dims = *expect_dims_pntr;
expect_dims.resize(dim_len);
expect_dims.assign(dim_len, 0);
for (int i = 0; i < 3; i++) {
if (dims[i]) {
const auto& dim = *dims[i];
for (auto j = 0; j < dims[i]->size(); j++) {
if (expect_dims[j] < dim[j]) expect_dims[j] = dim[j];
}
rank[i] = std::accumulate(std::begin(dim), std::end(dim), 1,
std::multiplies<int>());
} else {
assert(i >= 2);
rank[i] = rank[i - 1];
}
}
for (int k = 1; k <= 2; k++) {
auto& data = *par_value[k];
data.resize(rank[k]);
if (use_indices) {
const int mult = k == 1 ? 1 : -1;
for (int i = 0; i < rank[k]; i++) {
data[i] = mult * (i + 1);
}
} else {
for (int i = 0; i < rank[k]; i++) {
data[i] = k == 1 ? data[i >> 1] + i % 2 : maxVal - (*par_value[1])[i];
}
}
}
data_cond.resize(rank[0]);
data_cond[0] = 0;
for (int i = 0; i < rank[0]; i++) {
data_cond[i] = i % 2 ? 1 - data_cond[i >> 1] : data_cond[i >> 1];
}
if (!expected_out || expected_out->size() > 0) {
auto& expected_output = *par_value[0];
const auto rank_out =
std::accumulate(std::begin(expect_dims), std::end(expect_dims), 1,
std::multiplies<int>());
assert(rank_out == (expected_out ? expected_out->size()
: rank[use_indices >= 0 ? 0 : 1]));
expected_output.resize(rank_out);
const auto& data_then = *par_value[1];
const auto& data_else = *par_value[2];
const auto div = use_indices >= 0 ? 1 : rank_out / rank[0];
for (int i = 0; i < rank_out; i++) {
expected_output[i] = expected_out ? (*expected_out)[i]
: data_cond[i / div] ? data_then[i]
: data_else[i];
}
}
};
auto shape_error_msg = [&](const NodeDef& node, bool same_then_else = true) {
nvinfer1::Dims shape[3];
const auto j = same_then_else ? 0 : 1;
if (trt_mode_ == TrtTestMode::kDynamicShape) {
for (int i = 0; i < 2; i++) {
for (int j = shape[i].nbDims = par_dims[i]->size(); j--;) {
shape[i].d[j] = -1;
}
}
} else {
for (int i = 0; i < 2; i++) {
DimsAdapter(*par_dims[i + j]).TrtDims(&shape[i + j]);
}
}
return input_shapes_error_msg(shape[j], shape[j + 1], node,
!same_then_else);
};
auto run_test = [&](const NodeDef& node, const std::vector<int>& exp_dims) {
const bool same_then_else_shapes = *par_dims[1] == *par_dims[2];
const bool same_cond_chape = *par_dims[0] == *par_dims[1];
const auto nMax = testing_SelectV2 ? 2 : 1;
for (int n = 0; n < nMax; n++) {
set_parameters();
if (testing_SelectV2 || (same_then_else_shapes && same_cond_chape)) {
TestOpConverter(node, exp_dims, OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
} else {
const auto err_msg = shape_error_msg(node, same_then_else_shapes);
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
if (!n) {
for (auto idx = data_cond.size(); idx--;)
data_cond[idx] = 1 - data_cond[idx];
if (!same_then_else_shapes) {
for (int p = 1; p <= 2; p++) {
auto& values = *par_value[p];
const auto val = p == 1 ? 1 : -1;
for (auto idx = values.size(); idx--;) values[idx] = val;
}
for (auto idx = expected_output.size(); idx--;)
expected_output[idx] = expected_output[idx] > 0 ? -1 : 1;
} else {
for (auto idx = expected_output.size(); idx--;)
expected_output[idx] = -expected_output[idx];
}
}
}
};
std::array<DataType, 3> data_types = {DT_FLOAT, DT_HALF, DT_INT32};
NodeDef node;
TF_CHECK_OK(NodeDefBuilder("op", opName)
.Input("cond", 0, DT_BOOL)
.Input("then", 0, tf_type_)
.Input("else", 0, tf_type_)
.Finalize(&node));
const std::vector<std::vector<int>> dims_params = {
{8}, {8, 2, 4}, {32, 32, 3200}};
par_dims = {&dims_params[0], &dims_params[0], &dims_params[0]};
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
const auto& err = convert_not_supported_implicit(node.op(), node.name());
do {
set_parameters();
RunValidationAndConversion(node, absl::StatusCode::kUnimplemented, err);
} while (nextTensorWeightConfiguration(config));
return;
}
do {
for (auto cond_type : {DT_INT32, DT_FLOAT, DT_HALF}) {
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(cond_type, &trt_type));
const auto error_msg =
unexpected_type_error_msg(trt_type, nvinfer1::DataType::kBOOL, node);
set_parameters(cond_type);
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
error_msg);
}
} while (nextTensorWeightConfiguration(config));
std::string err_msg = bool_weight_error_msg(node);
std::vector<int> dims_const = {1};
par_dims = {&dims_const, &dims_const, &dims_const};
for (int i = 0; i < 2; i++) {
do {
set_parameters();
if (config[0]) {
TestOpConverter(node, {1}, OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
} else {
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
} while (nextTensorWeightConfiguration(config));
data_cond[0] = 1 - data_cond[0];
expected_output[0] = (*par_value[1 + i])[0];
}
for (int i = 0; i < 3; i++) {
config[i] = 1;
}
par_value[0] = &expected_output;
if (trt_mode_ == TrtTestMode::kExplicitBatch) {
std::string bc_comment[2];
std::vector<int> dims[4];
par_dims = {dims, dims + 1, dims + 1};
const nvinfer1::Dims infeasible_dims[] = {
{3, {4, 3, 2}}, {4, {4, 3, 2, 5}}, {3, {4, 1, 3}},
{3, {4, 3, 2}}, {3, {4, 3, 2}}, {5, {4, 3, 2, 5, 2}}};
auto iMax = sizeof(infeasible_dims) / sizeof(infeasible_dims[0]);
for (int i = 0; i < iMax; i += 2) {
for (int k = 0; k < 2; k++) {
for (int j = 0; j < 2; j++) {
set_dimension(infeasible_dims + i + (j + k) % 2, dims[j],
bc_comment + (j + k) % 2);
}
if (testing_SelectV2) {
adjust_comments(infeasible_dims + i, bc_comment);
err_msg = "Infeasible broadcast scheme (" + bc_comment[k] + " vs " +
bc_comment[1 - k];
} else {
err_msg = shape_error_msg(node);
}
set_parameters();
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
}
const nvinfer1::Dims feasible_dims_2[] = {
{3, {1, 3, 2}}, {3, {4, 3, 2}}, {3, {4, 1, 2}}, {3, {4, 3, 2}},
{3, {4, 3, 1}}, {3, {4, 3, 2}}, {3, {1, 1, 2}}, {3, {4, 3, 2}},
{3, {1, 3, 1}}, {3, {4, 3, 2}}, {3, {4, 1, 1}}, {3, {4, 3, 2}},
{3, {1, 1, 1}}, {3, {4, 3, 2}}, {3, {1, 3, 2}}, {3, {4, 1, 2}},
};
const std::vector<float> expected_val_2[] = {
{-1, 2, 3, -4, 5, -6, -7, 8, 9, -10, 11, -12,
-13, 14, 15, -16, 17, -18, -19, 20, 21, -22, 23, -24},
{-1, 2, 3, -4, 5, -6, -1, 2, 3, -4, -5, 6,
-1, 2, 3, -4, 5, -6, -1, 2, -3, 4, 5, -6},
{-1, 2, -3, 4, -5, 6, 7, -8, 9, -10, 11, -12,
13, -14, 15, -16, 17, -18, -19, 20, -21, 22, -23, 24},
{-1, 2, 1, -2, 1, -2, -3, 4, 3, -4, -3, 4,
-5, 6, 5, -6, 5, -6, -7, 8, -7, 8, 7, -8},
{-1, -2, 3, 4, 5, 6, -7, -8, 9, 10, -11, -12,
-13, -14, 15, 16, 17, 18, -19, -20, -21, -22, 23, 24},
{-1, 1, 2, -2, 3, -3, -4, 4, 5, -5, -6, 6,
-7, 7, 8, -8, 9, -9, -10, 10, -11, 11, 12, -12},
{-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12,
-13, 14, -15, 16, -17, 18, -19, 20, -21, 22, -23, 24},
{-1, 2, 1, -2, 1, -2, -1, 2, 1, -2, -1, 2,
-1, 2, 1, -2, 1, -2, -1, 2, -1, 2, 1, -2},
{-1, -2, 3, 4, 5, 6, -7, -8, 9, 10, 11, 12,
-13, -14, 15, 16, 17, 18, -19, -20, 21, 22, 23, 24},
{-1, 1, 2, -2, 3, -3, -1, 1, 2, -2, -3, 3,
-1, 1, 2, -2, 3, -3, -1, 1, -2, 2, 3, -3},
{-1, -2, -3, -4, -5, -6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, -19, -20, -21, -22, -23, -24},
{-1, 1, 1, -1, 1, -1, -2, 2, 2, -2, -2, 2,
-3, 3, 3, -3, 3, -3, -4, 4, -4, 4, 4, -4},
{-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12,
-13, -14, -15, -16, -17, -18, -19, -20, -21, -22, -23, -24},
{-1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1,
-1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1},
{-1, 2, 1, -2, 1, -2, -3, 4, 3, -4, 3, -4,
-5, 6, 5, -6, 5, -6, -7, 8, 7, -8, 7, -8},
{-1, 2, -3, 4, -5, 6, 1, -2, 3, -4, 5, -6,
1, -2, 3, -4, 5, -6, -1, 2, -3, 4, -5, 6},
{-1, 2, 3, -4, 5, -6, -7, 2, 3, -10, -11, 6,
-13, 2, 3, -16, 5, -18, -19, 2, -21, 4, 5, -24},
{-1, 2, 3, -4, 5, -6, -1, 8, 9, -4, 11, -6,
-1, 14, 15, -4, 17, -6, -1, 20, 21, -4, 23, -6},
{-1, 2, 1, -4, 1, -6, -7, 4, 3, -10, -11, 4,
-13, 6, 5, -16, 5, -18, -19, 8, -21, 8, 7, -24},
{-1, 2, -1, 4, -1, 6, 7, -4, 9, -4, 11, -4,
13, -6, 15, -6, 17, -6, -7, 20, -7, 22, -7, 24},
{-1, 1, 2, -4, 3, -6, -7, 4, 5, -10, -11, 6,
-13, 7, 8, -16, 9, -18, -19, 10, -21, 11, 12, -24},
{-1, -1, 3, 4, 5, 6, -4, -4, 9, 10, -6, -6,
-7, -7, 15, 16, 17, 18, -10, -10, -11, -11, 23, 24},
{-1, 2, 1, -4, 1, -6, -7, 2, 1, -10, -11, 2,
-13, 2, 1, -16, 1, -18, -19, 2, -21, 2, 1, -24},
{-1, 2, -1, 4, -1, 6, -1, 8, -1, 10, -1, 12,
-1, 14, -1, 16, -1, 18, -1, 20, -1, 22, -1, 24},
{-1, 1, 2, -4, 3, -6, -7, 1, 2, -10, -11, 3,
-13, 1, 2, -16, 3, -18, -19, 1, -21, 2, 3, -24},
{-1, -1, 3, 4, 5, 6, -1, -1, 9, 10, 11, 12,
-1, -1, 15, 16, 17, 18, -1, -1, 21, 22, 23, 24},
{-1, 1, 1, -4, 1, -6, -7, 2, 2, -10, -11, 2,
-13, 3, 3, -16, 3, -18, -19, 4, -21, 4, 4, -24},
{-1, -1, -1, -1, -1, -1, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, -4, -4, -4, -4, -4, -4},
{-1, 1, 1, -4, 1, -6, -7, 1, 1, -10, -11, 1,
-13, 1, 1, -16, 1, -18, -19, 1, -21, 1, 1, -24},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, 2, -1, 4, -1, 6, 1, -4, 3, -4, 5, -4,
1, -6, 3, -6, 5, -6, -7, 2, -7, 4, -7, 6},
{-1, 2, 1, -4, 1, -6, -1, 4, 3, -4, 3, -6,
-1, 6, 5, -4, 5, -6, -1, 8, 7, -4, 7, -6}};
const auto exp_dims = dims + 3;
const int kMax2 = 2;
iMax = sizeof(feasible_dims_2) / sizeof(feasible_dims_2[0]);
assert(kMax2 * iMax / 3 ==
sizeof(expected_val_2) / sizeof(expected_val_2[0]));
for (int i = 0; i < iMax; i += 2) {
for (int k = 0; k < kMax2; k++) {
for (int j = 0; j < 2; j++)
set_dimension(feasible_dims_2 + i + (j + k) % 2, dims[j]);
const std::vector<float>* expect = expected_val_2 + i + k;
for (int m = 0; m < 2; m++) {
assign_values(par_dims, par_value, data_cond, 1, expect, exp_dims);
run_test(node, *exp_dims);
const auto tmp = par_dims[0];
par_dims[0] = par_dims[1];
par_dims[1] = tmp;
expect += iMax;
}
}
}
const nvinfer1::Dims feasible_dims_3[] = {
{2, {3, 2}}, {2, {3, 1}}, {2, {1, 1}}, {3, {2, 2, 1}},
{3, {2, 1, 2}}, {3, {1, 2, 2}}, {3, {2, 1, 1}}, {3, {2, 1, 2}},
{3, {1, 2, 2}}, {3, {2, 1, 1}}, {3, {1, 1, 2}}, {3, {1, 2, 1}},
};
const std::vector<float> expected_val_3[] = {
{-1, 1, 2, -1, 3, -1}, {-1, 1, 1, -2, 1, -3},
{-1, -1, 3, 4, 5, 6}, {-1, -2, 1, 1, 1, 1},
{-1, -1, -2, -2, -3, -3}, {-1, -2, -3, -4, -5, -6},
{-1, -2, 1, 2, 3, 4, -3, -4}, {-1, -2, 3, 4, 1, 2, -3, -4},
{-1, 1, -3, 2, 3, -2, 4, -4}, {-1, 2, -2, 4, 1, -3, 3, -4},
{-1, 1, 2, -2, -3, 3, 4, -4}, {-1, 2, 1, -2, -3, 4, 3, -4},
{-1, -2, -3, -4, 3, 4, 3, 4}, {-1, -2, -1, -2, 1, 2, 3, 4},
{-1, 1, -3, 1, 2, -2, 2, -4}, {-1, 2, -1, 4, 1, -2, 3, -2},
{-1, 1, 1, -2, -3, 2, 2, -4}, {-1, 2, 1, -1, -2, 4, 3, -2},
{-1, -1, -2, -2, 1, 2, 1, 2}, {-1, -2, -1, -2, 1, 1, 2, 2},
{-1, 1, -2, 1, -1, 2, -2, 2}, {-1, 1, -1, 2, -2, 1, -2, 2},
{-1, -2, 1, 1, -1, -2, 2, 2}, {-1, -1, 1, 2, -2, -2, 1, 2},
};
const int kMax3 = 6;
const std::array<int, 3> perm[kMax3] = {{0, 1, 2}, {0, 2, 1}, {1, 0, 2},
{1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
par_dims = {dims, dims + 1, dims + 2};
iMax = sizeof(feasible_dims_3) / sizeof(feasible_dims_3[0]);
assert(kMax3 * iMax / 3 ==
sizeof(expected_val_3) / sizeof(expected_val_3[0]));
for (int i = 0; i < iMax; i += 3) {
for (int k = 0; k < kMax3; k++) {
for (int j = 0; j < 3; j++)
set_dimension(feasible_dims_3 + i + perm[k][j], dims[j]);
const auto* expect = expected_val_3 + kMax3 * (i / 3) + k;
assign_values(par_dims, par_value, data_cond, 1, expect, exp_dims);
run_test(node, *exp_dims);
}
}
if (!testing_SelectV2) {
const nvinfer1::Dims vect_dim[] = {
{1, {4}}, {3, {5, 2, 3}}, {2, {5, 2}}, {3, {5, 2, 3}},
{1, {5}}, {3, {5, 2, 3}}, {1, {4}}, {4, {4, 3, 5, 2}},
};
std::vector<int> dims[4];
par_dims = {dims, dims + 1, dims + 1};
auto iMax = sizeof(vect_dim) / sizeof(vect_dim[0]);
for (int i = 0; i < iMax; i += 2) {
err_msg =
vect_dim[i].nbDims != 1 || vect_dim[i].d[0] != vect_dim[i + 1].d[0]
? input_shapes_error_msg(vect_dim[i], vect_dim[i + 1], node)
: "";
for (int j = 0; j < 2; j++) {
set_dimension(vect_dim + i + j, dims[j]);
}
assign_values(par_dims, par_value, data_cond, -1);
set_parameters();
if (err_msg.empty()) {
TestOpConverter(node, dims[1], OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
} else {
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
}
}
}
for (auto dims : dims_params) {
par_dims = {&dims, &dims, &dims};
assign_values(par_dims, par_value, data_cond);
for (const auto type_else : data_types) {
par_type[2] = type_else;
set_parameters();
if ((par_type[1] == DT_INT32 || par_type[2] == DT_INT32) &&
par_type[1] != par_type[2]) {
nvinfer1::DataType trt_type[2];
for (int i = 0; i < 2; i++) {
TF_ASSERT_OK(TfTypeToTrtType(par_type[i + 1], trt_type + i));
}
err_msg = then_else_dtypes_error_msg(trt_type[0], trt_type[1], node);
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
} else {
TestOpConverter(node, dims, OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
}
}
par_type[2] = tf_type_;
}
if (trt_mode_ == TrtTestMode::kDynamicShape) {
std::vector<float> values_then{1, 2, 3, 4, 5, 6};
std::vector<float> values_else{-1, -2, -3, -4, -5, -6};
std::vector<float> expected_output{1, -2, 3, 4, -5, 6};
data_cond = std::vector<int>{1, 0, 1};
const std::vector<int> cond_dims{1, 3}, input_dims{1, 2, 3};
par_dims = {&cond_dims, &input_dims, &input_dims};
const auto len_cond = data_cond.size();
for (int i = 0; i < 2; i++) {
par_value[i + 1] = &values_then;
par_value[2 - i] = &values_else;
for (int j = 0; j < values_then.size(); j++) {
expected_output[j] = par_value[2 - data_cond[j % len_cond]]->at(j);
}
set_parameters();
if (testing_SelectV2) {
TestOpConverter(node, input_dims, OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
} else {
const auto err_msg = shape_error_msg(node);
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
for (int j = len_cond; j--;) {
data_cond[j] = 1 - data_cond[j];
}
}
}
}
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_Select,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT, DT_HALF, DT_INT32),
::testing::Values(TrtPrecisionMode::FP32)));
TEST_P(OpConverter_Select, ConvertSelectV2) { RunTest("SelectV2"); }
TEST_P(OpConverter_Select, Convert_Select) { RunTest("Select"); }
TEST_F(OpConverterTest, DuplicateSqueeze) {
auto op_converter = [](const OpConverterParams* params) -> Status {
if (params->validation_only) return OkStatus();
auto input = params->inputs.at(0).tensor();
ITensorProxyPtr output;
std::vector<int> new_dims = {0, 1, 2, 3};
TF_EXPECT_OK(params->converter->SqueezeTensor(
input, &new_dims, params,
&output, 0));
new_dims = {0, 2, 3};
TF_EXPECT_OK(params->converter->SqueezeTensor(
output, &new_dims, params,
&output, 1));
params->outputs->push_back(TRT_TensorOrWeights(output));
return OkStatus();
};
NodeDef node_def = CreateUnaryOp<ops::Abs>(DataType::DT_FLOAT);
AddTestTensor("input", {1, 1, 2, 3});
GetOpConverterRegistry()->Register("Abs", kDefaultConverterPriority + 1,
op_converter);
RunValidationAndConversion(node_def);
DataVec input_data;
DataVec output_data;
InputOutputData abs_input{
"input", ConstructTensor<float>(6, 0,
DataType::DT_FLOAT)};
InputOutputData abs_output{
"my_unary", ConstructTensor<float>(6, 0,
DataType::DT_FLOAT)};
input_data.push_back(abs_input);
output_data.push_back(abs_output);
TF_EXPECT_OK(BuildAndRun(input_data, &output_data));
}
#endif
}
}
}
int main(int argc, char** argv) {
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
std::unique_ptr<nvinfer1::IBuilder> const holder{
nvinfer1::createInferBuilder(*tensorflow::tensorrt::Logger::GetLogger())};
#endif
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
#else
int main(int, char**) { return 0; }
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
686813ec-b120-46dd-8c6e-b174ae9ae39a | cpp | tensorflow/tensorflow | op_converter_registry | tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.cc | tensorflow/compiler/tf2tensorrt/convert/op_converter_registry_test.cc | #include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include <set>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/env_var.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace convert {
struct OpConverterRegistration {
OpConverter converter;
int priority;
};
class OpConverterRegistry::Impl {
public:
~Impl() = default;
InitOnStartupMarker Register(const string& name, const int priority,
OpConverter converter) {
mutex_lock lock(mu_);
auto item = registry_.find(name);
if (item != registry_.end()) {
const int existing_priority = item->second.priority;
if (priority <= existing_priority) {
LOG(WARNING) << absl::StrCat(
"Ignoring TF->TRT ", name, " op converter with priority ",
existing_priority, " due to another converter with priority ",
priority);
return {};
} else {
LOG(WARNING) << absl::StrCat(
"Overwriting TF->TRT ", name, " op converter with priority ",
existing_priority, " using another converter with priority ",
priority);
registry_.erase(item);
}
}
registry_.insert({name, OpConverterRegistration{converter, priority}});
return {};
}
StatusOr<OpConverter> LookUp(string name) {
static const absl::flat_hash_set<string> tftrt_op_fakelist = [] {
string tftrt_op_fakelist_str;
TF_CHECK_OK(ReadStringFromEnvVar("TF_TRT_OP_FAKELIST",
"",
&tftrt_op_fakelist_str));
absl::flat_hash_set<string> tftrt_op_fakelist{};
for (const auto& x : str_util::Split(tftrt_op_fakelist_str, ",")) {
tftrt_op_fakelist.insert(x);
}
tftrt_op_fakelist.rehash(0);
return tftrt_op_fakelist;
}();
if (tftrt_op_fakelist.contains(name)) {
LOG_FIRST_N(INFO, 2) << "Emulating OP Converter: `" << name << "`. It "
<< "will cause TRT engine building to fail. This "
<< "feature is only intended to be used for "
<< "TF-TRT graph segmentation experiments. This "
<< "feature is controlled using: "
<< "`TF_TRT_OP_FAKELIST=OpName1,OpName2`.";
mutex_lock lock(mu_);
return registry_.find("FakeOp")->second.converter;
}
mutex_lock lock(mu_);
auto found = registry_.find(name);
if (found != registry_.end()) {
return found->second.converter;
}
return errors::NotFound("No converter for op ", name);
}
void Clear(const std::string& name) {
mutex_lock lock(mu_);
auto itr = registry_.find(name);
if (itr == registry_.end()) {
return;
}
registry_.erase(itr);
}
std::vector<std::string> ListRegisteredOps() const {
mutex_lock lock(mu_);
std::vector<std::string> result;
result.reserve(registry_.size());
for (const auto& item : registry_) {
result.push_back(item.first);
}
return result;
}
private:
mutable mutex mu_;
mutable std::unordered_map<std::string, OpConverterRegistration> registry_
TF_GUARDED_BY(mu_);
};
OpConverterRegistry::OpConverterRegistry() : impl_(std::make_unique<Impl>()) {}
StatusOr<OpConverter> OpConverterRegistry::LookUp(const string& name) {
return impl_->LookUp(name);
}
InitOnStartupMarker OpConverterRegistry::Register(const string& name,
const int priority,
OpConverter converter) {
return impl_->Register(name, priority, converter);
}
std::vector<std::string> OpConverterRegistry::ListRegisteredOps() const {
return impl_->ListRegisteredOps();
}
void OpConverterRegistry::Clear(const std::string& name) { impl_->Clear(name); }
OpConverterRegistry* GetOpConverterRegistry() {
static OpConverterRegistry* registry = new OpConverterRegistry();
return registry;
}
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
TEST(TestOpConverterRegistry, TestOpConverterRegistry) {
bool flag{false};
auto set_true_func = [&flag](const OpConverterParams*) -> Status {
flag = true;
return OkStatus();
};
auto set_false_func = [&flag](const OpConverterParams*) -> Status {
flag = false;
return OkStatus();
};
GetOpConverterRegistry()->Register("FakeFunc", kDefaultConverterPriority,
set_true_func);
GetOpConverterRegistry()->Register("FakeFunc", kDefaultConverterPriority - 1,
set_false_func);
auto func = GetOpConverterRegistry()->LookUp("FakeFunc");
EXPECT_TRUE(func.ok());
EXPECT_TRUE(((*func)(nullptr)).ok());
EXPECT_TRUE(flag);
GetOpConverterRegistry()->Register("FakeFunc", kDefaultConverterPriority + 1,
set_false_func);
func = GetOpConverterRegistry()->LookUp("FakeFunc");
EXPECT_TRUE(func.ok());
EXPECT_TRUE((*func)(nullptr).ok());
EXPECT_FALSE(flag);
GetOpConverterRegistry()->Clear("FakeFunc");
EXPECT_FALSE(GetOpConverterRegistry()->LookUp("FakeFunc").ok());
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/op_converter_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
00d600ed-35f3-4bc9-8f68-4886a4de5b47 | cpp | tensorflow/tensorflow | convert_graph | tensorflow/compiler/tf2tensorrt/convert/convert_graph.cc | tensorflow/compiler/tf2tensorrt/convert/convert_graph_test.cc | #include "tensorflow/compiler/tf2tensorrt/convert/convert_graph.h"
#include <fstream>
#include <list>
#include <map>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/segment/segment.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
using absl::StrAppend;
using absl::StrCat;
using ::tensorflow::tensorrt::segment::ClusterProperty;
using ::tensorflow::tensorrt::segment::NodePtrCompare;
using ::tensorflow::tensorrt::segment::Segment;
namespace {
Status BuildNodeMap(const Graph& graph,
std::unordered_map<string, Node*>* node_map) {
for (auto* node : graph.op_nodes()) {
if (!node_map->insert({node->name(), node}).second) {
return errors::AlreadyExists("Node name is not unique in graph: " +
node->name());
}
}
return OkStatus();
}
EngineInfo::EngineType GetEngineType(
const TRTOptimizationPass::ConversionParams& params) {
return (params.is_dynamic_op || params.use_calibration)
? EngineInfo::EngineType::TRTDynamic
: EngineInfo::EngineType::TRTStatic;
}
bool AllowDynamicNonBatchDimension(
const TRTOptimizationPass::ConversionParams& params) {
return !params.use_implicit_batch ||
GetEngineType(params) == EngineInfo::EngineType::TRTDynamic;
}
struct EdgePtrCompare {
bool operator()(const Edge* lhs, const Edge* rhs) const {
return lhs->id() < rhs->id();
}
};
std::pair<TfDeviceId, PlatformDeviceId> GetFirstValidDeviceId() {
for (int tf_device_id_value = 0; tf_device_id_value < 100;
++tf_device_id_value) {
TfDeviceId tf_device_id(tf_device_id_value);
PlatformDeviceId platform_device_id;
Status s =
GpuIdManager::TfToPlatformDeviceId(tf_device_id, &platform_device_id);
if (s.ok()) {
VLOG(1) << "Found TF GPU " << tf_device_id.value() << " at cuda device "
<< platform_device_id.value();
return std::make_pair(tf_device_id, platform_device_id);
}
}
LOG(ERROR) << "Could not find any TF GPUs";
return std::make_pair(TfDeviceId(-1), PlatformDeviceId(-1));
}
bool ShallKeepControlEdgeFrom(const Node* input_node) {
if (!input_node) {
LOG(ERROR) << "Node pointer is null, this should not happen";
return false;
}
return input_node->type_string() != "Const";
}
Status GetEngineInfo(const Graph* g,
const grappler::GraphProperties& graph_properties,
const Segment& segment,
const std::vector<Node*>& reverse_topo_order,
EngineInfo* info) {
std::vector<const Node*> subgraph_nodes;
std::set<const Node*> added_const_nodes;
const ClusterProperty& segment_property = segment.property;
const std::set<const Node*, NodePtrCompare>& segment_nodes = segment.nodes;
const DeviceNameUtils::ParsedName segment_device =
segment_property.DeviceName();
info->max_batch_size = segment_property.BatchSize().GetOptionalMaxBatchSize();
std::unordered_map<string, int> input_to_engine_port, output_to_engine_port;
for (auto it = reverse_topo_order.rbegin(); it != reverse_topo_order.rend();
++it) {
const Node* node = *it;
if (segment_nodes.count(node) == 0) continue;
subgraph_nodes.push_back(node);
const int node_id = node->id();
const string& node_name = node->name();
std::vector<const Edge*> in_edges(node->in_edges().begin(),
node->in_edges().end());
std::sort(in_edges.begin(), in_edges.end(), EdgePtrCompare());
for (const auto edge : in_edges) {
auto input_node = edge->src();
if (input_node->IsSource() || segment_nodes.count(input_node)) {
continue;
}
if (edge->IsControlEdge()) {
if (ShallKeepControlEdgeFrom(input_node)) {
info->connections.emplace_back(input_node->name(), input_node->id(),
node_name, node_id,
true);
}
} else if (input_node->type_string() == "Const") {
if (!added_const_nodes.insert(input_node).second) {
continue;
}
VLOG(1) << "Adding const node " << input_node->name();
} else {
int port = Graph::kControlSlot - 1;
const string s = StrCat(input_node->name(), ":", edge->src_output());
VLOG(1) << "Input edge = " << s;
if (input_to_engine_port.count(s)) {
port = input_to_engine_port.at(s);
} else {
port = input_to_engine_port.size();
input_to_engine_port.insert({s, port});
}
info->connections.emplace_back(
input_node->name(), input_node->id(), edge->src_output(), node_name,
node_id, edge->dst_input(), true, port);
}
}
std::vector<const Edge*> out_edges(node->out_edges().begin(),
node->out_edges().end());
std::sort(out_edges.begin(), out_edges.end(), EdgePtrCompare());
for (const auto edge : out_edges) {
auto output_node = edge->dst();
if (output_node->IsSink() || segment_nodes.count(output_node)) {
continue;
}
if (edge->IsControlEdge()) {
if (ShallKeepControlEdgeFrom(node)) {
info->connections.emplace_back(output_node->name(), output_node->id(),
node_name, node_id,
false);
}
} else {
int port = Graph::kControlSlot - 1;
const string s = StrCat(node_name, ":", edge->src_output());
VLOG(1) << "Output edge = " << s;
if (output_to_engine_port.count(s)) {
port = output_to_engine_port.at(s);
} else {
port = output_to_engine_port.size();
output_to_engine_port.insert({s, port});
}
info->connections.emplace_back(
output_node->name(), output_node->id(), edge->dst_input(),
node_name, node_id, edge->src_output(), false, port);
}
}
}
subgraph_nodes.insert(subgraph_nodes.begin(), added_const_nodes.begin(),
added_const_nodes.end());
TF_RETURN_IF_ERROR(
ConvertSegmentToGraphDef(g, graph_properties, subgraph_nodes, info));
VLOG(1) << "Converted TensorRT candidate segment '" << info->engine_name
<< "' to a GraphDef";
if (segment_device.has_type) {
if (segment_device.type != "GPU") {
return errors::Internal(
"segment device is not GPU: ",
DeviceNameUtils::ParsedNameToString(segment_device));
}
info->device = DeviceNameUtils::ParsedNameToString(segment_device);
} else {
TfDeviceId tf_device_id;
PlatformDeviceId platform_device_id;
std::tie(tf_device_id, platform_device_id) = GetFirstValidDeviceId();
if (tf_device_id.value() >= 0) {
DeviceNameUtils::ParsedName parsed_name;
parsed_name.type = "GPU";
parsed_name.has_type = true;
parsed_name.id = tf_device_id.value();
parsed_name.has_id = true;
info->device = DeviceNameUtils::ParsedNameToString(parsed_name);
} else {
VLOG(1) << "No device is assigned to the segment. A device will be "
"assigned during graph execution (inference).";
}
}
return OkStatus();
}
void UpdateToEngineNode(const std::vector<EngineInfo>& infos,
const size_t my_engine_id,
const std::vector<Node*>& engine_nodes,
const bool is_input_edge, const string& node_name,
Node** node, int* port) {
for (size_t t = 0; t < infos.size(); ++t) {
if (t == my_engine_id) {
continue;
}
const auto& info = infos.at(t);
for (const auto& eng_conn : info.connections) {
if (is_input_edge == eng_conn.is_input_edge) continue;
if (eng_conn.inside_node_name == node_name &&
eng_conn.inside_port == *port) {
*node = CHECK_NOTNULL(engine_nodes[t]);
QCHECK_EQ(info.engine_name, (**node).name())
<< "Engine name mismatch: " << info.engine_name << " vs "
<< (**node).name();
*port = eng_conn.port_number;
return;
}
}
}
LOG(FATAL) << "Node " << node_name << " not found in any engine.";
}
tensorflow::TensorShapeProto ComputeTRTNodeIOShape(
std::vector<PartialTensorShape>& partial_tensorshape_vect,
std::vector<tensorflow::TensorShapeProto>& shape_proto_vect,
const PartialTensorShape& conn_shape, int port_number) {
tensorflow::TensorShapeProto tmp_shape_proto;
conn_shape.AsProto(&tmp_shape_proto);
if (partial_tensorshape_vect.size() <= port_number) {
shape_proto_vect.resize(port_number + 1);
partial_tensorshape_vect.resize(port_number + 1);
}
return tmp_shape_proto;
}
Status CreateTRTNode(const TRTOptimizationPass::ConversionParams& params,
const std::vector<EngineInfo>& infos, int pos,
int default_max_batch_size, Graph* graph,
std::vector<Node*>* engine_nodes,
grappler::Cluster* cluster) {
const auto& info = infos.at(pos);
std::vector<tensorflow::TensorShapeProto> input_shape_protos;
std::vector<tensorflow::TensorShapeProto> output_shape_protos;
std::vector<PartialTensorShape> input_shapes;
std::vector<PartialTensorShape> output_shapes;
std::vector<NodeDefBuilder::NodeOut> inputs;
std::vector<Node*> input_nodes;
std::vector<Node*> control_input_nodes;
std::unordered_set<string> control_input_names;
std::vector<DataType> out_types;
VLOG(1) << "Processing " << info.engine_name;
for (const auto& conn : info.connections) {
if (conn.is_control_edge()) {
if (!conn.is_input_edge) continue;
Node* input_node = graph->FindNodeId(conn.outside_id);
int port = Graph::kControlSlot;
if (!input_node) {
UpdateToEngineNode(infos, pos, *engine_nodes, true,
conn.outside_node_name, &input_node, &port);
QCHECK_EQ(Graph::kControlSlot, port);
}
if (!control_input_names.insert(input_node->name()).second) {
continue;
}
control_input_nodes.push_back(input_node);
VLOG(1) << "Engine Control Input " << input_node->name() << " -> "
<< info.engine_name;
} else {
if (!conn.is_input_edge) {
tensorflow::TensorShapeProto out_shape = ComputeTRTNodeIOShape(
output_shapes,
output_shape_protos,
conn.inside_shape,
conn.port_number);
output_shape_protos.at(conn.port_number) = out_shape;
output_shapes.at(conn.port_number) = conn.inside_shape;
if (out_types.size() <= conn.port_number) {
out_types.resize(conn.port_number + 1);
}
out_types.at(conn.port_number) = conn.connection_type;
VLOG(2) << "Collected output shape "
<< output_shape_protos.at(conn.port_number).DebugString();
} else {
tensorflow::TensorShapeProto in_shape = ComputeTRTNodeIOShape(
input_shapes,
input_shape_protos,
conn.outside_shape,
conn.port_number);
input_shape_protos.at(conn.port_number) = in_shape;
input_shapes.at(conn.port_number) = conn.outside_shape;
if (params.use_implicit_batch &&
info.engine_type == EngineInfo::EngineType::TRTStatic) {
for (int i = 1; i < conn.outside_shape.dims(); i++) {
if (conn.outside_shape.dim_size(i) <= 0) {
return errors::Internal(
"Not fully defined input shape when in static mode which "
"should have been excluded by the segmenter. ");
}
}
}
Node* input_node = graph->FindNodeId(conn.outside_id);
int port = conn.outside_port;
if (!input_node) {
UpdateToEngineNode(infos, pos, *engine_nodes, true,
conn.outside_node_name, &input_node, &port);
}
if (std::find_if(
std::begin(inputs), std::end(inputs),
[input_node, &port](const NodeDefBuilder::NodeOut& inp) {
return inp.node == input_node->name() && inp.index == port;
}) == std::end(inputs)) {
inputs.emplace_back(input_node->name(), port, conn.connection_type);
input_nodes.push_back(CHECK_NOTNULL(input_node));
VLOG(1) << "Engine Input " << input_node->name() << ":" << port
<< " -> " << info.engine_name << ":" << inputs.size() - 1;
}
}
}
}
if (inputs.empty()) {
return errors::Internal(
"Segment has no inputs (possible constfold failure)");
}
string segment_string;
int max_batch_size = info.max_batch_size.has_value()
? info.max_batch_size.value()
: default_max_batch_size;
if (info.engine_type == EngineInfo::EngineType::TRTStatic) {
TF_RETURN_IF_ERROR(CreateStaticEngine(params, info, max_batch_size,
input_shapes, nullptr,
&segment_string, cluster));
}
string prec_string;
TF_RETURN_IF_ERROR(TrtPrecisionModeToName(info.precision_mode, &prec_string));
NodeDefBuilder node_builder(info.engine_name, "TRTEngineOp");
if (!info.device.empty()) node_builder.Device(info.device);
if (VLOG_IS_ON(1)) {
string ins = StrCat(info.engine_name, " inputs= ");
for (const auto& ii : inputs) {
StrAppend(&ins, ii.node, ":", ii.index, " ");
}
VLOG(1) << ins;
}
node_builder.Input(inputs);
for (const string& c : control_input_names) {
node_builder.ControlInput(c);
}
NodeDef trt_node;
NameAttrList function;
function.set_name(StrCat(info.engine_name, "_native_segment"));
node_builder.Attr("input_shapes", input_shape_protos)
.Attr("output_shapes", output_shape_protos)
.Attr("static_engine",
info.engine_type == EngineInfo::EngineType::TRTStatic)
.Attr("segment_func", function)
.Attr("serialized_segment", segment_string)
.Attr("calibration_data", "")
.Attr("max_cached_engines_count", info.maximum_cached_engines)
.Attr("workspace_size_bytes", info.max_workspace_size_bytes)
.Attr("max_batch_size", max_batch_size)
.Attr("precision_mode", prec_string)
.Attr("use_calibration", info.use_calibration)
.Attr("_use_implicit_batch", params.use_implicit_batch)
.Attr("use_explicit_precision", params.use_explicit_precision)
.Attr("_allow_build_at_runtime", info.allow_build_at_runtime)
.Attr("OutT", out_types);
if (!params.use_implicit_batch) {
node_builder.Attr("profile_strategy",
ProfileStrategyToName(params.profile_strategy));
}
Status status = node_builder.Finalize(&trt_node);
if (!status.ok()) {
LOG(ERROR) << "Node construction failed with" << status;
return status;
}
VLOG(1) << "Adding TRTEngine " << info.engine_name << " to graph";
TF_ASSIGN_OR_RETURN(Node * engine_node, graph->AddNode(trt_node));
(*engine_nodes)[pos] = engine_node;
for (const auto in : control_input_nodes) {
VLOG(1) << "Connecting control edge from " << in->name() << " to "
<< engine_node->name();
graph->AddControlEdge(in, engine_node);
}
VLOG(1) << "input_nodes size = " << input_nodes.size();
for (int i = 0; i < input_nodes.size(); ++i) {
Node* n = CHECK_NOTNULL(input_nodes[i]);
const auto& in = inputs[i];
VLOG(1) << "Connecting data edge from " << n->name() << ":" << in.index
<< " to " << engine_node->name() << ":" << i;
graph->AddEdge(n, in.index, engine_node, i);
}
for (auto& conn : info.connections) {
if (conn.is_input_edge) {
continue;
}
Node* output_node = graph->FindNodeId(conn.outside_id);
int port = conn.outside_port;
if (!output_node) {
UpdateToEngineNode(infos, pos, *engine_nodes, false,
conn.outside_node_name, &output_node, &port);
}
if (conn.is_control_edge()) {
VLOG(1) << "Updating control edge from " << engine_node->name() << " to "
<< output_node->name();
QCHECK_EQ(Graph::kControlSlot, port);
graph->AddControlEdge(engine_node, output_node);
} else {
VLOG(1) << "Updating data edge from " << engine_node->name() << ":"
<< conn.port_number << " to " << output_node->name() << ":"
<< port;
TF_CHECK_OK(
graph->UpdateEdge(engine_node, conn.port_number, output_node, port));
}
}
return OkStatus();
}
int64 GetNextGraphSequenceNumber() {
static std::atomic<int64_t> graph_sequence_num;
return graph_sequence_num++;
}
constexpr char kCastInputTypeAttrName[] = "SrcT";
Status MaybeRewriteCastToFp32(GraphDef* graph_def, NodeDef* node_def) {
if (node_def->op() != "Cast") {
return OkStatus();
}
DataTypeVector input_types;
DataTypeVector output_types;
TF_RETURN_IF_ERROR(
graph_transforms::GetInOutTypes(*node_def, &input_types, &output_types));
if (input_types.size() != 1 || output_types.size() != 1) {
return errors::Internal("Bad cast operation");
}
if (input_types[0] == DT_HALF || output_types[0] != DT_FLOAT) {
return OkStatus();
}
VLOG(2) << "Rewriting cast to FP32 " << node_def->DebugString();
NodeDef* castToFp16 = graph_def->add_node();
for (auto attr_value : node_def->attr()) {
(*castToFp16->mutable_attr())[attr_value.first] = attr_value.second;
}
castToFp16->set_name(node_def->name() + "_split");
castToFp16->set_op("Cast");
castToFp16->set_device(node_def->device());
castToFp16->add_input(node_def->input(0));
(*castToFp16->mutable_attr())[kCastOutputTypeAttrName].set_type(DT_HALF);
node_def->set_input(0, castToFp16->name() + ":0");
(*node_def->mutable_attr())[kCastInputTypeAttrName].set_type(DT_HALF);
VLOG(2) << castToFp16->DebugString();
VLOG(2) << node_def->DebugString();
return OkStatus();
}
}
Status RegisterGraphToFunctionLibrary(const GraphDef& segment_graph_def,
Graph* graph, const string& engine_name) {
Graph segment_graph(graph->flib_def());
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(GraphConstructorOptions(),
segment_graph_def, &segment_graph));
FunctionDefLibrary library;
auto segment_func = library.add_function();
TF_RETURN_IF_ERROR(GraphToFunctionDef(
segment_graph, StrCat(engine_name, "_native_segment"), segment_func));
if (VLOG_IS_ON(7)) {
VLOG(7) << engine_name << " Function_Def ";
VLOG(7) << segment_func->DebugString();
}
VLOG(1) << "Adding funcdef " << segment_func->signature().name()
<< " to graphlib";
TF_RETURN_IF_ERROR(graph->AddFunctionLibrary(library));
return OkStatus();
}
std::pair<int, Allocator*> GetDeviceAndAllocator(
const grappler::Cluster* cluster, const EngineInfo& engine) {
int cuda_device_id = -1;
Allocator* dev_allocator = nullptr;
if (cluster == nullptr || cluster->GetDeviceSet() == nullptr ||
engine.device.empty()) {
TfDeviceId tf_device_id;
PlatformDeviceId platform_device_id;
std::tie(tf_device_id, platform_device_id) = GetFirstValidDeviceId();
cuda_device_id = platform_device_id.value();
if (cuda_device_id >= 0) {
GPUOptions gpu_options;
dev_allocator = GPUProcessState::singleton()->GetGPUAllocator(
gpu_options, tf_device_id, 1, {});
}
return std::make_pair(cuda_device_id, dev_allocator);
}
auto device_set = cluster->GetDeviceSet();
std::vector<Device*> devices;
DeviceNameUtils::ParsedName parsed_name;
if (DeviceNameUtils::ParseFullName(engine.device, &parsed_name) &&
parsed_name.has_id) {
device_set->FindMatchingDevices(parsed_name, &devices);
}
if (!devices.empty()) {
if (devices.size() > 1) {
string msg = "Found multiple matching devices using name '";
StrAppend(&msg, engine.device, "': ");
for (auto d : devices) StrAppend(&msg, d->name(), ", ");
StrAppend(&msg, ". Will get the allocator from first one.");
LOG_WARNING_WITH_PREFIX << msg;
}
AllocatorAttributes alloc_attr;
cuda_device_id = devices[0]->tensorflow_accelerator_device_info()->gpu_id;
dev_allocator = devices[0]->GetAllocator(alloc_attr);
VLOG(1) << "Using allocator " << dev_allocator->Name()
<< " and cuda_device_id " << cuda_device_id;
} else {
LOG_WARNING_WITH_PREFIX << "Cluster is set but device '" << engine.device
<< "' is not found in the cluster";
}
return std::make_pair(cuda_device_id, dev_allocator);
}
Status CreateStaticEngine(const TRTOptimizationPass::ConversionParams& params,
const EngineInfo& info, int max_batch_size,
const std::vector<PartialTensorShape>& input_shapes,
TrtShapeOptimizationProfile* profile,
string* segment_string, grappler::Cluster* cluster) {
std::pair<int, Allocator*> device_allocator =
GetDeviceAndAllocator(cluster, info);
int cuda_device_id = 0;
std::unique_ptr<TRTBaseAllocator> trt_allocator;
if (device_allocator.first >= 0) {
cuda_device_id = device_allocator.first;
trt_allocator.reset(new TRTDeviceAllocator(device_allocator.second));
} else {
LOG_WARNING_WITH_PREFIX << "Can't identify the cuda device. Running on "
"device 0 and use cudamalloc as an allocator";
}
cudaSetDevice(cuda_device_id);
auto trt_logger = GetLoggerRegistry()->LookUp(params.trt_logger_name);
const bool calibrate_int8 =
(info.precision_mode == TrtPrecisionMode::INT8 && info.use_calibration);
TrtUniquePtrType<nvinfer1::ICudaEngine> engine;
TF_RETURN_IF_ERROR(ConvertGraphDefToEngine(
info.segment_graph_def, nullptr,
calibrate_int8 ? TrtPrecisionMode::FP32 : info.precision_mode,
max_batch_size, info.max_workspace_size_bytes, input_shapes, trt_logger,
trt_allocator.get(), nullptr, &engine,
info.use_calibration, params.use_implicit_batch,
nullptr, profile, info.engine_name,
params.use_explicit_precision, cluster));
TrtUniquePtrType<nvinfer1::IHostMemory> engine_data(engine->serialize());
*segment_string = string(static_cast<const char*>(engine_data->data()),
engine_data->size());
return OkStatus();
}
Status ConvertGraph(const TRTOptimizationPass::ConversionParams& params,
grappler::GrapplerItem& grappler_item,
const std::vector<string>& input_output_names,
grappler::Cluster* cluster, GraphDef* output) {
TRT_ENSURE(output != nullptr)
if (params.precision_mode != TrtPrecisionMode::INT8 &&
params.use_calibration) {
return errors::InvalidArgument(
"Calibration with FP32 or FP16 is not supported.");
}
GraphDef& graph_def = grappler_item.graph;
if (params.precision_mode == TrtPrecisionMode::FP16) {
for (int i = 0; i < graph_def.node_size(); i++) {
NodeDef* node_def = graph_def.mutable_node(i);
TF_RETURN_IF_ERROR(MaybeRewriteCastToFp32(&graph_def, node_def));
}
}
grappler::GraphProperties static_graph_properties(grappler_item);
TF_RETURN_IF_ERROR(static_graph_properties.InferStatically(true));
FunctionLibraryDefinition flib(OpRegistry::Global(), graph_def.library());
Graph graph(flib);
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph(GraphConstructorOptions(), graph_def, &graph));
segment::SegmentOptions segment_options;
for (const auto& node : input_output_names) {
segment_options.exclude_node_list.insert(node);
}
segment_options.minimum_segment_size = params.minimum_segment_size;
segment_options.use_implicit_batch = params.use_implicit_batch;
if (segment_options.use_implicit_batch)
segment_options.maximum_batch_size = params.max_batch_size;
segment_options.allow_dynamic_non_batch_dim =
AllowDynamicNonBatchDimension(params);
segment::SegmentVector initial_segments;
TrtNodeValidator validator(static_graph_properties, params.precision_mode,
params.use_calibration, params.use_implicit_batch,
params.use_explicit_precision);
TF_RETURN_IF_ERROR(segment::SegmentGraph(
&graph,
&static_graph_properties,
std::bind(&TrtNodeValidator::IsTensorRTCandidate, &validator,
std::placeholders::_1),
[](const Edge* edge) { return true; },
OutputEdgeValidator(),
segment_options,
&initial_segments));
LOG(INFO) << "Number of TensorRT candidate segments: "
<< initial_segments.size();
std::unordered_map<string, Node*> node_map;
TF_RETURN_IF_ERROR(BuildNodeMap(graph, &node_map));
std::vector<EngineInfo> engine_segments;
engine_segments.reserve(initial_segments.size());
std::vector<Node*> reverse_topo_order;
GetPostOrder(graph, &reverse_topo_order);
segment::SegmentVector converted_segments;
converted_segments.reserve(initial_segments.size());
string engine_name_prefix =
StrCat("TRTEngineOp_",
absl::StrFormat("%0*d", 3, GetNextGraphSequenceNumber()), "_");
for (size_t t = 0; t < initial_segments.size(); t++) {
auto& curr_segment = initial_segments.at(t);
EngineInfo curr_engine;
curr_engine.engine_name =
StrCat(engine_name_prefix, absl::StrFormat("%0*d", 3, t));
bool int8_no_calib = (!params.use_calibration &&
params.precision_mode == TrtPrecisionMode::INT8);
bool has_qdq = false;
if (int8_no_calib) {
has_qdq = absl::c_any_of(reverse_topo_order, IsQuantizeAndDequantizeOp);
}
Status status = GetEngineInfo(&graph, static_graph_properties, curr_segment,
reverse_topo_order, &curr_engine);
if (!status.ok()) {
LOG_WARNING_WITH_PREFIX << "Failed to get engine info for segment " << t
<< ": " << status;
continue;
}
curr_engine.engine_type = GetEngineType(params);
curr_engine.use_calibration = params.use_calibration;
if (int8_no_calib && !has_qdq) {
LOG(WARNING) << "Set engine precision to FP16 due to missing QDQ OP";
curr_engine.precision_mode = TrtPrecisionMode::FP16;
} else {
curr_engine.precision_mode = params.precision_mode;
}
curr_engine.maximum_cached_engines = params.max_cached_engines;
curr_engine.allow_build_at_runtime = params.allow_build_at_runtime;
if (!curr_engine.max_batch_size.has_value()) {
curr_engine.max_batch_size = params.max_batch_size;
}
status = RegisterGraphToFunctionLibrary(curr_engine.segment_graph_def,
&graph, curr_engine.engine_name);
if (!status.ok()) {
LOG_WARNING_WITH_PREFIX
<< "Failed to register segment graphdef to the library " << t << ": "
<< status;
continue;
}
engine_segments.push_back(std::move(curr_engine));
converted_segments.push_back(std::move(curr_segment));
if (VLOG_IS_ON(8)) {
string fname = engine_segments.back().engine_name;
StrAppend(&fname, ".pb");
std::fstream f;
f.open(fname.c_str(), std::fstream::out | std::fstream::binary);
f << engine_segments.at(t).segment_graph_def.SerializeAsString();
f.close();
}
}
std::optional<int> old_cuda_device = std::nullopt;
if (!params.is_dynamic_op) {
int cuda_device_id;
cudaError_t cuda_error = cudaGetDevice(&cuda_device_id);
if (cuda_error != cudaSuccess) {
LOG_WARNING_WITH_PREFIX << "Couldn't get current device: "
<< cudaGetErrorString(cuda_error);
} else {
VLOG(1) << "Current cuda device is " << cuda_device_id;
old_cuda_device = cuda_device_id;
}
}
auto restore_cuda_device = gtl::MakeCleanup([old_cuda_device] {
if (old_cuda_device.has_value()) {
cudaSetDevice(old_cuda_device.value());
}
});
std::vector<Node*> engine_nodes;
engine_nodes.resize(engine_segments.size());
for (int i = 0; i < engine_segments.size(); ++i) {
auto& engine = engine_segments.at(i);
engine.max_workspace_size_bytes = params.max_workspace_size_bytes;
VLOG(1) << "Assigned " << engine.max_workspace_size_bytes << " bytes to "
<< engine.engine_name;
auto status =
CreateTRTNode(params, engine_segments, i, params.max_batch_size, &graph,
&engine_nodes, cluster);
string msg = StrCat("segment ", i, " consisting of ",
converted_segments.at(i).nodes.size(), " nodes by ",
engine.engine_name);
if (status.ok()) {
LOG(INFO) << "Replaced " << msg << ".";
} else {
LOG_WARNING_WITH_PREFIX << "Cannot replace " << msg
<< " reason: " << status.message()
<< " (keeping original segment).";
}
if (VLOG_IS_ON(1)) {
msg = "Segment consists of nodes: ";
for (const Node* node : converted_segments.at(i).nodes) {
StrAppend(&msg, node->name(), ", ");
}
VLOG(1) << msg;
}
if (status.ok()) {
for (const Node* node : converted_segments.at(i).nodes) {
graph.RemoveNode(const_cast<Node*>(node));
}
}
}
graph.ToGraphDef(output);
return OkStatus();
}
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/convert/convert_graph.h"
#include <regex>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace convert {
class FakeCluster : public grappler::Cluster {
public:
FakeCluster() : Cluster(0) {}
void SetDeviceSet(const DeviceSet* device_set) { device_set_ = device_set; }
const DeviceSet* GetDeviceSet() const override { return device_set_; }
string type() const override { return ""; }
Status Provision() override { return OkStatus(); }
Status Initialize(const grappler::GrapplerItem& item) override {
return OkStatus();
}
Status Run(const GraphDef& graph_def,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* metadata) override {
return OkStatus();
}
private:
const DeviceSet* device_set_ = nullptr;
};
TEST(GetDeviceAndAllocatorTest, GetDeviceAndAllocator) {
TRTOptimizationPass::ConversionParams params;
EngineInfo engine_info;
{
auto result = GetDeviceAndAllocator(nullptr, engine_info);
EXPECT_EQ(-1, result.first);
EXPECT_EQ(nullptr, result.second);
}
SessionOptions options;
ConfigProto* config = &options.config;
GPUOptions* gpu_options = config->mutable_gpu_options();
auto virtual_devices =
gpu_options->mutable_experimental()->add_virtual_devices();
virtual_devices->add_memory_limit_mb(200);
virtual_devices->add_memory_limit_mb(200);
std::unique_ptr<Session> session(NewSession(options));
{
auto result = GetDeviceAndAllocator(nullptr, engine_info);
EXPECT_EQ(0, result.first);
EXPECT_NE(nullptr, result.second);
EXPECT_EQ("GPU_0_bfc", result.second->Name());
}
FakeCluster cluster;
{
auto result = GetDeviceAndAllocator(&cluster, engine_info);
EXPECT_EQ(0, result.first);
EXPECT_NE(nullptr, result.second);
EXPECT_EQ("GPU_0_bfc", result.second->Name());
}
DeviceSet device_set;
const DeviceMgr* device_mgr = nullptr;
TF_ASSERT_OK(session->LocalDeviceManager(&device_mgr));
for (auto d : device_mgr->ListDevices()) {
device_set.AddDevice(d);
}
cluster.SetDeviceSet(&device_set);
{
auto result = GetDeviceAndAllocator(&cluster, engine_info);
EXPECT_EQ(0, result.first);
EXPECT_NE(nullptr, result.second);
EXPECT_EQ("GPU_0_bfc", result.second->Name());
}
engine_info.device = "/GPU:1";
{
auto result = GetDeviceAndAllocator(&cluster, engine_info);
EXPECT_EQ(0, result.first);
EXPECT_NE(nullptr, result.second);
EXPECT_EQ("GPU_1_bfc", result.second->Name());
}
engine_info.device = "/GPU:3";
{
auto result = GetDeviceAndAllocator(&cluster, engine_info);
EXPECT_EQ(-1, result.first);
EXPECT_EQ(nullptr, result.second);
}
}
class ConvertGraphTest : public ::testing::Test {
public:
Status RunConvertGraph(Scope s, GraphDef* output_graph_def,
int maximum_batch_size = 1000) {
grappler::GrapplerItem item;
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
const std::vector<string> input_output_names{"output"};
TRTOptimizationPass::ConversionParams params;
params.max_batch_size = maximum_batch_size;
params.max_workspace_size_bytes = 8 << 20;
params.minimum_segment_size = 1;
params.use_calibration = false;
params.trt_logger_name = "DefaultLogger";
return ConvertGraph(params, item, input_output_names, nullptr,
output_graph_def);
}
};
TEST_F(ConvertGraphTest, DirectlyConnectedEngines) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({2, 1}));
auto segment_root_1 = ops::Identity(s.WithOpName("segment_root_b"), input);
auto add1 = ops::Add(s.WithOpName("add1"), segment_root_1, segment_root_1);
auto incompatible =
ops::Reshape(s.WithOpName("reshape1"), add1, Input({1, 2}));
incompatible =
ops::Reshape(s.WithOpName("reshape2"), incompatible, Input({2, 1}));
auto add2 = ops::Add(s.WithOpName("add2"), incompatible, add1);
auto segment_root_2 = ops::Identity(s.WithOpName("segment_root_a"), add1);
auto add3 = ops::Add(s.WithOpName("add3"), add2, segment_root_2);
ops::Identity(s.WithOpName("output"), add3);
GraphDef output_graph_def;
TF_EXPECT_OK(RunConvertGraph(s, &output_graph_def));
auto remove_graph_sequence_number = [](std::string node_name) {
const std::regex pattern("TRTEngineOp_[0-9]+_");
return std::regex_replace(node_name, pattern, "TRTEngineOp_");
};
int num_trt_ops = 0;
for (const NodeDef& node : output_graph_def.node()) {
std::string node_name = node.name();
if (node.op() != "TRTEngineOp") continue;
node_name = remove_graph_sequence_number(node_name);
if (node_name == "TRTEngineOp_001") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("input", node.input(0));
++num_trt_ops;
} else if (node_name == "TRTEngineOp_000") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("TRTEngineOp_001", remove_graph_sequence_number(node.input(0)));
EXPECT_EQ("reshape2", node.input(1));
++num_trt_ops;
}
}
EXPECT_EQ(2, num_trt_ops);
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/convert_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/convert_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
662c17fd-2323-497a-ba1c-0d24992c1432 | cpp | tensorflow/tensorflow | quantization_ops | tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.cc | tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include "absl/strings/str_format.h"
#include "tensorflow/cc/ops
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/weights.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
bool IsQuantizeAndDequantizeOp(const Node* node) {
return absl::c_find(kQuantizationOpNames, node->def().op()) !=
kQuantizationOpNames.end();
}
namespace {
template <typename T>
QuantizationScales<T, 1> ComputeQuantizationRange(bool signed_input,
int num_bits,
bool narrow_range,
T* min_range, T* max_range) {
const int64_t min_quantized =
signed_input ? narrow_range ? -(1ULL << (num_bits - 1)) + 1
: -(1ULL << (num_bits - 1))
: 0;
const int64_t max_quantized =
signed_input ? (1ULL << (num_bits - 1)) - 1 : (1ULL << num_bits) - 1;
const T scale_from_min_side = (min_quantized * *min_range > 0)
? min_quantized / *min_range
: std::numeric_limits<T>::max();
const T scale_from_max_side = (max_quantized * *max_range > 0)
? max_quantized / *max_range
: std::numeric_limits<T>::max();
QuantizationScales<T, 1> scales;
if (scale_from_min_side < scale_from_max_side) {
scales.quantize_scale[0] = scale_from_min_side;
scales.dequantize_scale[0] = *min_range / min_quantized;
*max_range = max_quantized * scales.dequantize_scale[0];
} else {
scales.quantize_scale[0] = scale_from_max_side;
scales.dequantize_scale[0] = *max_range / max_quantized;
*min_range = min_quantized * scales.dequantize_scale[0];
}
return scales;
}
StatusOr<nvinfer1::ITensor*> ExlicitQDQInputToTensor(
TRTNetworkBuilder* builder, const OpConverterParams* params,
const TRT_TensorOrWeights& input) {
if (input.is_tensor()) {
return input.tensor()->trt_tensor();
}
if (!IS_TRT_VERSION_GE(8, 0, 0, 0) && input.weights().count() > 1) {
LOG(WARNING) << absl::StrCat(
"QDQ per-channel for weights not "
"implemented, assuming uniform scaling");
}
TRT_ShapedWeights trt_weights = input.weights();
StatusOr<nvinfer1::IConstantLayer*> weights_const =
builder->WeightsToConstant(trt_weights.GetTrtWeights(),
trt_weights.Shape());
TRT_ENSURE_PTR_OK(weights_const);
params->converter->SetLayerName(*weights_const, params->node_def, "const");
nvinfer1::ITensor* qdq_input = (*weights_const)->getOutput(0);
std::string name = absl::StrCat((*weights_const)->getName(), "_output");
qdq_input->setName(name.c_str());
return qdq_input;
}
}
template <typename T>
struct QDQOpSpec {};
template <>
struct QDQOpSpec<ops::QuantizeAndDequantizeV2> {
static constexpr std::array<InputArgSpec, 3> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
InputArgSpec::Create("input_min", TrtInputArg::kWeight),
InputArgSpec::Create("input_max", TrtInputArg::kWeight),
};
}
struct Attrs {
float min_range;
float max_range;
bool narrow_range;
std::string round_mode;
UniformQuantizationScales scales;
};
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "round_mode", &args->round_mode));
if (args->round_mode != "HALF_TO_EVEN") {
LOG(WARNING) << node_def.op() << ": " << node_def.name()
<< " has round_mode=" << args->round_mode
<< ", but for TensorRT conversion, "
"round_mode=HALF_TO_EVEN is recommended.";
}
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "narrow_range", &args->narrow_range));
if (args->narrow_range) {
LOG(WARNING) << node_def.op() << ": " << node_def.name()
<< " has narrow_range=true, but for TensorRT conversion, "
"narrow_range=false is recommended.";
}
args->min_range = inputs.at(1).weights().template GetPointer<float>()[0];
args->max_range = inputs.at(2).weights().template GetPointer<float>()[0];
const int num_bits = 8;
args->scales = ComputeQuantizationRange<float>(
true, num_bits, args->narrow_range, &args->min_range,
&args->max_range);
TRT_ENSURE(args->scales.dequantize_scale[0] != 0);
TRT_ENSURE(args->scales.quantize_scale[0] != 0);
return OkStatus();
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
const auto& node_def = params->node_def;
StatusOr<TRTNetworkBuilder> builder = TRTNetworkBuilder::Create(
params->converter->network(), params->weight_store);
StatusOr<nvinfer1::ITensor*> qdq_input =
ExlicitQDQInputToTensor(&*builder, params, params->inputs.at(0));
TRT_ENSURE_PTR_OK(qdq_input);
const int required_dims = params->use_implicit_batch ? 3 : 4;
const nvinfer1::Dims idims = (*qdq_input)->getDimensions();
nvinfer1::Dims intermediate_dims = idims;
TRT_ENSURE(idims.nbDims > 0);
if (idims.nbDims < required_dims) {
const int nb_extra_dims = required_dims - idims.nbDims;
intermediate_dims.nbDims = required_dims;
std::vector<int> ones(nb_extra_dims, 1);
TRT_ENSURE(ones.size() == nb_extra_dims && nb_extra_dims > 0);
if (!params->use_implicit_batch) {
intermediate_dims.d[0] = idims.d[0];
std::copy(ones.begin(), ones.end(), intermediate_dims.d + 1);
std::copy_n(idims.d + 1, idims.nbDims - 1,
intermediate_dims.d + ones.size() + 1);
} else {
std::copy(ones.begin(), ones.end(), intermediate_dims.d);
std::copy_n(idims.d, idims.nbDims, intermediate_dims.d + ones.size());
}
LOG(WARNING) << absl::StrCat(
node_def.name(), ":", node_def.op(), ": tensor ",
(*qdq_input)->getName(), " has shape ", DebugString(idims),
" but TRT scale layer requires at least 3 dims excluding batch dim, "
"trying to recover by inserting 1's to create shape ",
DebugString(intermediate_dims));
StatusOr<nvinfer1::IShuffleLayer*> reshape =
builder->Reshape(*qdq_input, intermediate_dims);
TRT_ENSURE_PTR_OK(reshape);
*qdq_input = (*reshape)->getOutput(0);
}
VLOG(1) << "[ExplicitPrecision]" << node_def.op() << ": " << node_def.name()
<< " computed scales: " << args.scales << " from min/max ranges "
<< args.min_range << "/" << args.max_range;
StatusOr<nvinfer1::ILayer*> qdq =
builder->UniformQuantizeDequantizeExplicit(
*qdq_input, args.scales.quantize_scale[0],
args.scales.dequantize_scale[0], node_def.name());
TRT_ENSURE_PTR_OK(qdq);
ITensorProxyPtr final_output = (*qdq)->getOutput(0);
if (idims.nbDims != intermediate_dims.nbDims) {
StatusOr<nvinfer1::IShuffleLayer*> undo_reshape =
builder->Reshape(*qdq_input, idims);
TRT_ENSURE_PTR_OK(undo_reshape);
final_output = (*undo_reshape)->getOutput(0);
}
params->outputs->push_back(final_output);
return OkStatus();
}
};
template <>
struct QDQOpSpec<ops::QuantizeAndDequantizeV3> {
static constexpr std::array<InputArgSpec, 4> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
InputArgSpec::Create("min", TrtInputArg::kWeight),
InputArgSpec::Create("max", TrtInputArg::kWeight),
InputArgSpec::Create("num_bits", TrtInputArg::kWeight),
};
}
using Attrs = QDQOpSpec<ops::QuantizeAndDequantizeV2>::Attrs;
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
return QDQOpSpec<
ops::QuantizeAndDequantizeV2>::ValidateQDQForExplicitPrecision(inputs,
node_def,
args);
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
return QDQOpSpec<ops::QuantizeAndDequantizeV2>::ConvertExplicit(params,
args);
}
};
template <>
struct QDQOpSpec<ops::FakeQuantWithMinMaxVars> {
static constexpr std::array<InputArgSpec, 3> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
InputArgSpec::Create("min", TrtInputArg::kWeight),
InputArgSpec::Create("max", TrtInputArg::kWeight),
};
}
struct Attrs {
int num_bits;
bool narrow_range;
};
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
return errors::Unimplemented("");
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
return errors::Unimplemented("");
}
};
template <>
struct QDQOpSpec<ops::FakeQuantWithMinMaxArgs> {
static constexpr std::array<InputArgSpec, 1> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
};
}
struct Attrs {
float min;
float max;
int num_bits;
bool narrow_range;
};
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
return errors::Unimplemented("");
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
return errors::Unimplemented("");
}
};
Status ConvertDynamicRangeMode(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
float min_range = 0.0f;
float max_range = 0.0f;
const auto& op_name = node_def.op();
if (op_name == "FakeQuantWithMinMaxArgs") {
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "min", &min_range));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "max", &max_range));
} else if (op_name == "FakeQuantWithMinMaxVars" ||
op_name == "QuantizeAndDequantizeV2" ||
op_name == "QuantizeAndDequantizeV3") {
auto get_weights_value = [&inputs](int index) {
const auto* raw_weights = inputs.at(index).weights().GetPointer<float>();
return raw_weights[0];
};
min_range = get_weights_value(1);
max_range = get_weights_value(2);
} else {
return errors::InvalidArgument("Unknown quantization op ", op_name, ", at ",
node_def.name());
}
if (params->validation_only) {
return OkStatus();
}
ITensorProxyPtr input0 = inputs.at(0).tensor();
params->converter->ProvideQuantizationRange(&input0, min_range, max_range);
params->outputs->push_back(inputs.at(0));
return OkStatus();
}
template <typename TFOpType>
class ConvertQDQ : public OpConverterBase<ConvertQDQ<TFOpType>> {
public:
explicit ConvertQDQ(const OpConverterParams* params)
: OpConverterBase<ConvertQDQ<TFOpType>>(params) {}
static constexpr auto InputSpec() { return QDQOpSpec<TFOpType>::InputSpec(); }
static constexpr const char* NodeDefDataTypeAttributeName() { return ""; }
Status ValidateDynamicRangeINT8Mode() {
if (this->params_->validation_only) {
return ConvertDynamicRangeMode(this->params_);
}
return OkStatus();
}
Status Validate() {
if (!this->params_->use_explicit_precision) {
return ValidateDynamicRangeINT8Mode();
}
return OpSpec::ValidateQDQForExplicitPrecision(
this->params_->inputs, this->params_->node_def, &attrs_);
}
Status Convert() {
if (!this->params_->use_explicit_precision) {
return ConvertDynamicRangeMode(this->params_);
}
return OpSpec::ConvertExplicit(this->params_, attrs_);
}
using OpSpec = QDQOpSpec<TFOpType>;
using OpSpecAttrs = typename QDQOpSpec<TFOpType>::Attrs;
OpSpecAttrs attrs_;
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::QuantizeAndDequantizeV2>>(),
"QuantizeAndDequantizeV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::QuantizeAndDequantizeV3>>(),
"QuantizeAndDequantizeV3");
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::FakeQuantWithMinMaxVars>>(),
"FakeQuantWithMinMaxVars");
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::FakeQuantWithMinMaxArgs>>(),
"FakeQuantWithMinMaxArgs");
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/linalg_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/compiler/jit/shape_inference.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/trt_convert_api.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
namespace tensorflow {
namespace tensorrt {
namespace convert {
namespace ops = ::tensorflow::ops;
using ::tensorflow::testing::StatusIs;
namespace {
enum class ConvEpilogueType {
kNone,
kReLU,
kBatchNorm,
kReLUBatchnorm,
kBatchnormReLU
};
std::ostream& operator<<(std::ostream& os, ConvEpilogueType epilogue) {
switch (epilogue) {
case ConvEpilogueType::kNone:
return os << "None";
case ConvEpilogueType::kReLU:
return os << "ReLU only";
case ConvEpilogueType::kBatchNorm:
return os << "BatchNorm Only";
case ConvEpilogueType::kReLUBatchnorm:
return os << "ReLU+Batchnorm";
case ConvEpilogueType::kBatchnormReLU:
return os << "BatchNorm+ReLU";
}
}
std::string DebugString(ConvEpilogueType epilogue) {
std::stringstream ss;
ss << epilogue;
return ss.str();
}
ops::Placeholder AddInput(Scope scope, int input_idx,
const std::string data_format,
std::array<int, 3> size_chw = {1, 3, 3}) {
PartialTensorShape input_shape;
if (data_format == "NCHW") {
input_shape =
PartialTensorShape({1, size_chw[0], size_chw[1], size_chw[2]});
} else if (data_format == "NHWC") {
input_shape =
PartialTensorShape({1, size_chw[1], size_chw[2], size_chw[0]});
} else if (data_format == "NHW") {
input_shape = PartialTensorShape({1, size_chw[1], size_chw[2]});
} else {
LOG(FATAL) << "Unknown input shape type " << data_format;
}
auto input_attrs = ops::Placeholder::Attrs().Shape(input_shape);
return ops::Placeholder(scope.WithOpName(absl::StrCat("input_", input_idx)),
DT_FLOAT, input_attrs);
}
Output AddQDQV2(Scope scope, Input input) {
auto input_min =
ops::Const<float>(scope.WithOpName("in_min"), -1.0f, TensorShape{});
auto input_max =
ops::Const<float>(scope.WithOpName("in_max"), 1.0f, TensorShape{});
return ops::QuantizeAndDequantizeV2(scope.WithOpName("qdq"), input, input_min,
input_max);
}
Output AddOutput(Scope scope, Output input, int idx, bool add_qdq) {
Output out = input;
if (add_qdq) {
out = AddQDQV2(scope, input);
}
return ops::Identity(scope.WithOpName(StrCat("output_", idx)), out);
}
Output AddConv2D(Scope scope, Input input, int in_channels, int out_channels,
std::array<int, 2> filter_size = {1, 1},
std::array<int, 2> stride = {1, 1},
const std::string& data_format = "NCHW", bool with_bias = true,
ConvEpilogueType epilogue = ConvEpilogueType::kBatchnormReLU,
bool qdq_on_output = false) {
auto weights_const = ops::Const(
scope.WithOpName("weights"), 1.0f,
TensorShape({filter_size[0], filter_size[1], in_channels, out_channels}));
auto conv_input =
!qdq_on_output ? AddQDQV2(scope.WithOpName("qdq_input"), input) : input;
Output result = ops::Conv2D(
scope.WithOpName("conv2d"), conv_input, AddQDQV2(scope, weights_const),
{1, 1, 1, 1},
"SAME", ops::Conv2D::Attrs().DataFormat(data_format));
if (with_bias) {
auto bias_const = ops::Const(scope.WithOpName("bias_weights"), 1.0f,
TensorShape({
out_channels,
}));
result = ops::BiasAdd(scope.WithOpName("bias"), result, bias_const,
ops::BiasAdd::Attrs().DataFormat(data_format));
}
auto add_bn = [scope, data_format](Input input,
const int channels) -> Output {
TensorShape constant_shape = TensorShape({channels});
auto bn_scale =
ops::Const(scope.WithOpName("bn_scale"), 1.0f, constant_shape);
auto bn_offset =
ops::Const(scope.WithOpName("bn_offset"), 1.0f, constant_shape);
auto bn_mean =
ops::Const(scope.WithOpName("bn_mean"), 0.1f, TensorShape({channels}));
auto bn_var =
ops::Const(scope.WithOpName("bn_var"), 1.0f, TensorShape({channels}));
Input conv_bn_input = IS_TRT_VERSION_GE(8, 0, 1, 0)
? input
: AddQDQV2(scope.WithOpName("qdq_input"), input);
return ops::FusedBatchNormV3(
scope.WithOpName("bn"), conv_bn_input, bn_scale, bn_offset,
bn_mean, bn_var,
ops::FusedBatchNormV3::Attrs().IsTraining(false).DataFormat(
data_format))
.y;
};
switch (epilogue) {
case ConvEpilogueType::kBatchNorm: {
result = add_bn(result, out_channels);
break;
}
case ConvEpilogueType::kReLU: {
result = ops::Relu(scope.WithOpName("relu"), result);
break;
}
case ConvEpilogueType::kReLUBatchnorm: {
result = ops::Relu(scope.WithOpName("relu"), result);
result = add_bn(result, out_channels);
break;
}
case ConvEpilogueType::kBatchnormReLU: {
result = add_bn(result, out_channels);
result = ops::Relu(scope.WithOpName("relu"), result);
break;
}
case ConvEpilogueType::kNone:
break;
}
if (qdq_on_output) {
result = AddQDQV2(scope.WithOpName("qdq_out"), result);
}
return result;
}
ops::BatchMatMulV2 AddMatMul(Scope scope, const std::string& name,
Input input) {
auto input_qdq = AddQDQV2(scope, input);
auto weights_const =
ops::Const(scope.WithOpName(name + "_weights"),
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f},
TensorShape({3, 3}));
auto weights_qdq = AddQDQV2(scope.WithOpName("weights_qdq"), weights_const);
return ops::BatchMatMulV2(scope.WithOpName(name), input_qdq, weights_qdq);
}
}
struct QDQTestOptions {
bool conv_has_bias{true};
std::string data_format{"NCHW"};
bool qdq_on_output{false};
bool final_qdq{true};
ConvEpilogueType conv_epilogue;
TfTrtConversionParams conversion_params{};
};
std::ostream& operator<<(std::ostream& os, const QDQTestOptions opts) {
return os << absl::StrCat(
"QDQTestOptions(conv_has_bias=",
static_cast<int>(opts.conv_has_bias),
", qdq_on_output=", static_cast<int>(opts.qdq_on_output),
", data_format=", opts.data_format,
", conv_epilogue=", DebugString(opts.conv_epilogue),
", final_qdq=", opts.final_qdq, ")");
}
std::vector<QDQTestOptions> EnumerateQDQTestOptions() {
std::vector<QDQTestOptions> result;
for (const absl::string_view data_format : {"NCHW", "NHWC"}) {
for (auto use_bias : {true, false}) {
for (auto qdq_on_output : {false, true}) {
for (auto final_qdq : {true, false}) {
for (auto conv_epilogue :
{ConvEpilogueType::kReLU, ConvEpilogueType::kNone,
ConvEpilogueType::kBatchnormReLU}) {
if (data_format == "NHWC" &&
(conv_epilogue == ConvEpilogueType::kBatchnormReLU ||
conv_epilogue == ConvEpilogueType::kBatchNorm ||
conv_epilogue == ConvEpilogueType::kBatchnormReLU)) {
continue;
}
QDQTestOptions opts{};
opts.conv_has_bias = use_bias;
opts.data_format = data_format;
opts.qdq_on_output = qdq_on_output;
opts.final_qdq = final_qdq;
opts.conv_epilogue = conv_epilogue;
result.push_back(opts);
}
}
}
}
}
return result;
}
class QDQExplicitTest : public ::testing::Test,
public ::testing::WithParamInterface<QDQTestOptions> {
public:
static StatusOr<PartialTensorShape> GetShape(const std::string& name,
const GraphShapeInfo& shapes) {
TRT_ENSURE(shapes.find(name) != shapes.end());
TRT_ENSURE(shapes.at(name).size() == 1);
return shapes.at(name)[0].shape;
}
StatusOr<MetaGraphDef> GetModel(const GraphDef& graph_def,
const std::vector<const NodeDef*>& inputs,
const std::vector<const NodeDef*>& outputs,
const GraphShapeInfo& shapes) {
TRT_ENSURE(!inputs.empty());
TRT_ENSURE(!outputs.empty());
MetaGraphDef out;
out.mutable_graph_def()->CopyFrom(graph_def);
SignatureDef signature_def;
auto& mutable_inputs = *signature_def.mutable_inputs();
for (int i = 0; i < inputs.size(); i++) {
std::string input_name = inputs[i]->name();
auto& input = mutable_inputs[input_name];
input.set_name(input_name);
input.set_dtype(DT_FLOAT);
TRT_ENSURE(shapes.find(input_name) != shapes.end());
TRT_ENSURE(shapes.at(input_name).size() == 1);
PartialTensorShape input_shape = shapes.at(input_name)[0].shape;
input_shape.AsProto(input.mutable_tensor_shape());
}
auto& mutable_outputs = *signature_def.mutable_outputs();
for (int i = 0; i < outputs.size(); i++) {
std::string output_name = outputs[i]->name();
auto& output = mutable_outputs[output_name];
output.set_name(output_name);
output.set_dtype(DT_FLOAT);
TRT_ENSURE(shapes.find(output_name) != shapes.end());
TRT_ENSURE(shapes.at(output_name).size() == 1);
PartialTensorShape output_shape = shapes.at(output_name)[0].shape;
output_shape.AsProto(output.mutable_tensor_shape());
}
(*out.mutable_signature_def())["serving_default"] = signature_def;
return out;
}
static Status CheckTrtNode(const GraphDef& converted_graph_def) {
int n_trt_ops = 0;
string op_name{"TRTEngineOp"};
for (const auto& node : converted_graph_def.node()) {
if (op_name == node.op()) {
n_trt_ops++;
const auto& attr = node.attr();
TRT_ENSURE(attr.at("static_engine").b());
VLOG(2) << "Found serialized segment with size "
<< attr.at("serialized_segment").s().size();
TRT_ENSURE(!attr.at("serialized_segment").s().empty());
}
}
TRT_ENSURE(n_trt_ops == 1);
return OkStatus();
}
Status ConvertAndRun(Scope* scope) {
std::vector<const NodeDef*> inputs;
std::vector<const NodeDef*> outputs;
GraphDef gdef;
TF_RETURN_IF_ERROR(scope->ToGraphDef(&gdef));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_RETURN_IF_ERROR(scope->ToGraph(graph.get()));
GraphShapeInfo shape_info;
TF_RETURN_IF_ERROR(InferShapes(graph.get(), {},
nullptr, &shape_info));
for (const NodeDef& node : gdef.node()) {
if (absl::StartsWith(node.name(), "input_")) {
inputs.push_back(&node);
} else if (absl::StartsWith(node.name(), "output_")) {
outputs.push_back(&node);
}
}
StatusOr<MetaGraphDef> meta_graph_def =
GetModel(gdef, inputs, outputs, shape_info);
TRT_ENSURE_OK(meta_graph_def);
std::vector<Tensor> input_tensors;
std::vector<std::string> input_names;
for (const auto& input : inputs) {
input_names.push_back(input->name());
StatusOr<PartialTensorShape> input_shape =
GetShape(input->name(), shape_info);
TRT_ENSURE_OK(input_shape);
TensorShape shape;
input_shape->AsTensorShape(&shape);
Tensor tensor(DT_FLOAT, shape);
test::FillIota(&tensor, 1.0f);
input_tensors.push_back(tensor);
}
std::vector<std::string> output_names;
for (const auto& output : outputs) {
output_names.push_back(output->name());
}
TfTrtConversionParams conversion_params;
conversion_params.allow_build_at_runtime = true;
conversion_params.precision_mode = TrtPrecisionMode::INT8;
conversion_params.use_calibration = false;
conversion_params.convert_to_static_engine = true;
TRT_ENSURE(input_names.size() == input_tensors.size());
StatusOr<GraphDef> converted_gdef = tensorrt::ConvertAndBuild(
meta_graph_def->graph_def(), input_names, output_names, {input_tensors},
conversion_params);
TRT_ENSURE_OK(converted_gdef);
return CheckTrtNode(*converted_gdef);
}
protected:
TfTrtConversionParams params_;
TrtUniquePtrType<nvinfer1::ICudaEngine> engine_;
};
class TestQDQSuite : public QDQExplicitTest {};
#define EXPECT_QDQ_ON_OUTPUT_FAILURE(params, scope) \
if ((params).qdq_on_output) { \
EXPECT_THAT(ConvertAndRun(&(scope)), StatusIs(error::INTERNAL)); \
return; \
}
#define EXPECT_NO_FINAL_QDQ_FAILURE(params, scope) \
if (!(params).final_qdq) { \
EXPECT_THAT(ConvertAndRun(&(scope)), StatusIs(error::INTERNAL)); \
return; \
}
#define EXPECT_BUILD_OK(scope) TF_EXPECT_OK(ConvertAndRun(&(scope)))
#define POLICY_TRT7(params, scope) \
if (!IS_TRT_VERSION_GE(8, 0, 0, 0)) { \
EXPECT_QDQ_ON_OUTPUT_FAILURE(params, scope); \
EXPECT_NO_FINAL_QDQ_FAILURE(params, scope); \
EXPECT_BUILD_OK(scope); \
}
#define POLICY_TRT8(params, scope) \
if (IS_TRT_VERSION_GE(8, 0, 0, 0)) { \
if (((params).conv_epilogue == ConvEpilogueType::kBatchNorm || \
(params).conv_epilogue == ConvEpilogueType::kBatchnormReLU || \
(params).conv_epilogue == ConvEpilogueType::kReLUBatchnorm) && \
(params).data_format == "NHWC") { \
EXPECT_THAT(ConvertAndRun(&(scope)), StatusIs(error::UNIMPLEMENTED)); \
return; \
} \
EXPECT_BUILD_OK(scope); \
}
#define SKIP_TRT7(x) \
if (!IS_TRT_VERSION_GE(8, 0, 0, 0) && (x)) { \
GTEST_SKIP(); \
}
TEST_P(TestQDQSuite, TestConv2DBasic) {
SKIP_TRT7(GetParam().qdq_on_output);
SKIP_TRT7(GetParam().data_format != "NCHW");
SKIP_TRT7(!GetParam().final_qdq);
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, GetParam().data_format, {3, 28, 28});
Output out = input;
const int num_conv = 1;
std::array<int, 2> in_channels = {3, 16};
std::array<int, 2> out_channels = {16, 32};
for (int i = 0; i < num_conv; i++) {
out = AddConv2D(scope.WithOpName(absl::StrCat("conv_", i)), out,
in_channels[i], out_channels[i], {3, 3},
{1, 1}, GetParam().data_format,
GetParam().conv_has_bias, GetParam().conv_epilogue,
GetParam().qdq_on_output);
}
out = AddOutput(scope, out, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
TEST_P(TestQDQSuite, TestMatMulBasic) {
if (GetParam().data_format != "NCHW" || !GetParam().conv_has_bias ||
GetParam().qdq_on_output ||
GetParam().conv_epilogue != ConvEpilogueType::kReLU) {
GTEST_SKIP();
}
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, "NHW");
auto matmul_op = AddMatMul(scope, "matmul", input);
auto out = AddOutput(scope, matmul_op, 0, GetParam().final_qdq);
TF_EXPECT_OK(ConvertAndRun(&scope));
}
TEST_P(TestQDQSuite, AddBothBranchesQDQConvSingleInput) {
SKIP_TRT7(!GetParam().final_qdq);
SKIP_TRT7(GetParam().data_format != "NCHW");
Scope scope = Scope::NewRootScope();
auto input1 = AddInput(scope, 0, GetParam().data_format,
{3, 28, 28});
auto conv1 =
AddConv2D(scope, input1, 3, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto conv2 =
AddConv2D(scope, input1, 3, 16, {3, 3},
{1, 1}, GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto add =
ops::Add(scope.WithOpName("add"),
!GetParam().qdq_on_output ? AddQDQV2(scope, conv1) : conv1,
!GetParam().qdq_on_output ? AddQDQV2(scope, conv2) : conv2);
auto conv3 =
AddConv2D(scope.WithOpName("conv3"), conv2, 16, 16, {1, 1}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto out =
AddOutput(scope.WithOpName("output"), conv3, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
TEST_P(TestQDQSuite, AddBothBranchesQDQMultipleInput) {
SKIP_TRT7(true);
Scope scope = Scope::NewRootScope();
auto input1 = AddInput(scope, 0, GetParam().data_format);
auto input2 = AddInput(scope, 1, GetParam().data_format);
auto add =
ops::Add(scope.WithOpName("add"),
!GetParam().qdq_on_output ? AddQDQV2(scope, input1) : input1,
!GetParam().qdq_on_output ? AddQDQV2(scope, input2) : input2);
auto output = AddOutput(scope, add, 0, true);
TF_EXPECT_OK(ConvertAndRun(&scope));
}
TEST_P(TestQDQSuite, TestConvMaxpool) {
SKIP_TRT7(!GetParam().final_qdq);
SKIP_TRT7(GetParam().data_format != "NCHW");
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, GetParam().data_format,
{3, 28, 28});
auto conv1 =
AddConv2D(scope, input, 3, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
ops::MaxPool maxpool =
ops::MaxPool(scope.WithOpName("maxpool"),
AddQDQV2(scope.WithOpName("mp_qdq_in"), conv1), {1, 1, 1, 1},
{1, 1, 1, 1}, "SAME",
ops::MaxPool::Attrs().DataFormat(GetParam().data_format));
auto output =
AddOutput(scope.WithOpName("output"), maxpool, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
TEST_P(TestQDQSuite, TestConvMaxpoolConv) {
SKIP_TRT7(!GetParam().final_qdq);
SKIP_TRT7(GetParam().data_format != "NCHW");
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, GetParam().data_format,
{3, 28, 28});
auto conv1 =
AddConv2D(scope, input, 3, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
ops::MaxPool maxpool =
ops::MaxPool(scope.WithOpName("maxpool"),
AddQDQV2(scope.WithOpName("mp_qdq_in"), conv1), {1, 1, 1, 1},
{1, 1, 1, 1}, "SAME",
ops::MaxPool::Attrs().DataFormat(GetParam().data_format));
auto conv2 = AddConv2D(scope, maxpool, 16, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto output =
AddOutput(scope.WithOpName("out"), conv2, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
INSTANTIATE_TEST_SUITE_P(TestQDQSuiteInst, TestQDQSuite,
::testing::ValuesIn(EnumerateQDQTestOptions()));
}
}
}
#endif
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fd8bcab4-0896-4547-adfa-bf93fb8d2f73 | cpp | tensorflow/tensorflow | log_softmax | tensorflow/compiler/tf2tensorrt/convert/ops/log_softmax.cc | tensorflow/lite/kernels/log_softmax_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class ConvertLogSoftmax : public OpConverterBase<ConvertLogSoftmax> {
public:
explicit ConvertLogSoftmax(const OpConverterParams *params)
: OpConverterBase<ConvertLogSoftmax>(params) {}
static constexpr std::array<InputArgSpec, 1> InputSpec() {
return std::array<InputArgSpec, 1>{
InputArgSpec::Create("logits", TrtInputArg::kTensor)};
}
Status Validate() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
if (!num_trt_dims && params.use_implicit_batch) {
return errors::InvalidArgument(
"TensorRT LogSoftmax cannot apply on the batch dimension");
}
return OkStatus();
}
Status Convert() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
const auto &node_def = params.node_def;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
nvinfer1::IUnaryLayer *exp = params.converter->network()->addUnary(
*logits_tensor->trt_tensor(), nvinfer1::UnaryOperation::kEXP);
TFTRT_RETURN_ERROR_IF_NULLPTR(exp, node_def.name());
params.converter->SetLayerName(exp, node_def, "exp");
nvinfer1::IReduceLayer *reduced_sum =
params.converter->network()->addReduce(
*exp->getOutput(0), nvinfer1::ReduceOperation::kSUM,
(1 << (num_trt_dims - 1)),
true );
params.converter->SetLayerName(reduced_sum, node_def, "reduced_sum");
nvinfer1::IUnaryLayer *log_reduced_sum =
params.converter->network()->addUnary(*reduced_sum->getOutput(0),
nvinfer1::UnaryOperation::kLOG);
TFTRT_RETURN_ERROR_IF_NULLPTR(log_reduced_sum, node_def.name());
params.converter->SetLayerName(log_reduced_sum, node_def,
"log_reduced_sum");
nvinfer1::IElementWiseLayer *sub =
params.converter->network()->addElementWise(
*logits_tensor->trt_tensor(), *log_reduced_sum->getOutput(0),
nvinfer1::ElementWiseOperation::kSUB);
TFTRT_RETURN_ERROR_IF_NULLPTR(sub, node_def.name());
params.converter->SetLayerName(sub, node_def, "sub");
params.outputs->push_back(TRT_TensorOrWeights(sub->getOutput(0)));
return OkStatus();
}
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertLogSoftmax>(),
"LogSoftmax");
}
}
}
#endif | #include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
class LogSoftmaxOpModel : public SingleOpModel {
public:
LogSoftmaxOpModel(int batches, int size)
: batches_(batches), input_size_(size) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_LOG_SOFTMAX, BuiltinOptions_LogSoftmaxOptions,
CreateLogSoftmaxOptions(builder_).Union());
BuildInterpreter({{batches_, input_size_}});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
int batches_;
int input_size_;
};
TEST(LogSoftmaxOpTest, SimpleTest) {
LogSoftmaxOpModel m(2, 5);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-4.45191431, -3.45191431, -2.45191431, -1.45191443, -0.4519144,
-0.4519144, -1.45191443, -2.45191431, -3.45191431, -4.45191431},
1e-6)));
}
TEST(LogSoftmaxOpTest, CompareWithTFmini) {
const int batch_size = 2;
const int input_size = 5;
static float input_buffer[] = {
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
};
LogSoftmaxOpModel m(batch_size, input_size);
m.SetInput(0, input_buffer, input_buffer + input_size * batch_size);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::unique_ptr<float[]> output_buffer(new float[input_size * batch_size]);
auto input_shape = RuntimeShape({batch_size, 1, 1, input_size});
SoftmaxParams params;
tflite::reference_ops::LogSoftmax(params, input_shape, input_buffer,
input_shape, output_buffer.get());
std::vector<float> expected;
expected.insert(expected.end(), output_buffer.get(),
output_buffer.get() + input_size * batch_size);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(expected, 1e-6)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/log_softmax.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/log_softmax_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
912ad902-faee-4d7f-9487-783e291566d2 | cpp | tensorflow/tensorflow | const_analysis | tensorflow/compiler/tf2xla/const_analysis.cc | tensorflow/compiler/tf2xla/const_analysis_test.cc | #include "tensorflow/compiler/tf2xla/const_analysis.h"
#include <unordered_map>
#include <unordered_set>
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
Status GetFunctionBody(FunctionLibraryRuntime* flib_runtime,
const NodeDef& node, StringPiece func_attr_name,
const FunctionBody** fbody) {
NameAttrList name_attr_list;
TF_RETURN_IF_ERROR(GetNodeAttr(node, func_attr_name, &name_attr_list));
FunctionLibraryRuntime::Handle func_handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
name_attr_list.name(), AttrSlice(&name_attr_list.attr()), &func_handle));
*fbody = flib_runtime->GetFunctionBody(func_handle);
return absl::OkStatus();
}
Status GetFunctionBodies(FunctionLibraryRuntime* flib_runtime,
const NodeDef& node, StringPiece func_list_attr_name,
std::vector<const FunctionBody*>* fbodies) {
std::vector<NameAttrList> name_attr_lists;
TF_RETURN_IF_ERROR(GetNodeAttr(node, func_list_attr_name, &name_attr_lists));
for (const NameAttrList& name_attr_list : name_attr_lists) {
FunctionLibraryRuntime::Handle func_handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
name_attr_list.name(), AttrSlice(&name_attr_list.attr()),
&func_handle));
fbodies->push_back(flib_runtime->GetFunctionBody(func_handle));
}
return absl::OkStatus();
}
Status CondConstInputIndices(
absl::Span<const FunctionBody* const> branch_bodies,
std::vector<int>* const_input_idxs, FunctionLibraryRuntime* flib_runtime) {
TF_RET_CHECK(!branch_bodies.empty());
TF_RET_CHECK(branch_bodies[0] != nullptr);
int num_inputs =
branch_bodies[0]->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
for (auto fbody : branch_bodies) {
TF_RET_CHECK(fbody != nullptr);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
}
for (int i = 0, end = compile_time_const_arg_indices.size(); i < end; i++) {
if (compile_time_const_arg_indices[i]) {
const_input_idxs->push_back(i + 1);
}
}
return absl::OkStatus();
}
Status GetCompileTimeConstInputs(const NodeDef& node, const OpKernel* op_kernel,
const OpDef* op_def,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
DCHECK(op_def != nullptr || op_kernel != nullptr);
if (node.op() == "While" || node.op() == "StatelessWhile") {
const FunctionBody* fcond = nullptr;
const FunctionBody* fbody = nullptr;
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "cond", &fcond));
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "body", &fbody));
TF_RET_CHECK(fcond);
TF_RET_CHECK(fbody);
int num_inputs = fbody->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fcond->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
for (int i = 0; i < num_inputs; i++) {
if (compile_time_const_arg_indices[i]) {
TF_ASSIGN_OR_RETURN(
bool is_loop_invariant,
IsLoopInvariant(fbody, i,
flib_runtime->GetFunctionLibraryDefinition()));
if (is_loop_invariant) {
const_input_idxs->push_back(i);
} else {
Node* arg_i = fbody->arg_nodes[i];
Node* ret_i = fbody->ret_nodes[i];
VLOG(1) << "Argument " << i << " to while-loop " << node.name()
<< " has to be constant, but it's not a loop invariant, "
"cluster compilation likely to fail at compile time: "
<< arg_i->DebugString() << " vs. " << ret_i->DebugString();
VLOG(1) << node.ShortDebugString();
}
}
}
return absl::OkStatus();
} else if (node.op() == "If" || node.op() == "StatelessIf") {
const FunctionBody* fthen = nullptr;
const FunctionBody* felse = nullptr;
TF_RETURN_IF_ERROR(
GetFunctionBody(flib_runtime, node, "then_branch", &fthen));
TF_RETURN_IF_ERROR(
GetFunctionBody(flib_runtime, node, "else_branch", &felse));
return CondConstInputIndices({fthen, felse}, const_input_idxs,
flib_runtime);
} else if (node.op() == "Case" || node.op() == "StatelessCase") {
std::vector<const FunctionBody*> branch_bodies;
TF_RETURN_IF_ERROR(
GetFunctionBodies(flib_runtime, node, "branches", &branch_bodies));
return CondConstInputIndices(branch_bodies, const_input_idxs, flib_runtime);
} else if (node.op() == "PartitionedCall" ||
node.op() == "StatefulPartitionedCall") {
const FunctionBody* fbody;
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "f", &fbody));
int num_inputs = fbody->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
for (int i = 0; i < num_inputs; i++) {
if (compile_time_const_arg_indices[i]) {
const_input_idxs->push_back(i);
}
}
return absl::OkStatus();
} else if (op_def != nullptr) {
return XlaOpRegistry::CompileTimeConstantInputs(node, *op_def,
const_input_idxs);
} else {
return XlaOpRegistry::CompileTimeConstantInputs(*op_kernel,
const_input_idxs);
}
}
Status GetCompileTimeConstInputs(const Node* node,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
return GetCompileTimeConstInputs(node->def(), nullptr,
&node->op_def(), const_input_idxs,
flib_runtime);
}
}
Status BackwardsConstAnalysis(
const Graph& g, std::vector<bool>* compile_time_const_arg_indices,
std::vector<bool>* compile_time_const_nodes,
FunctionLibraryRuntime* flib_runtime,
std::function<bool(const Edge&)> edge_filter_input) {
if (!compile_time_const_nodes && g.GetConstArgIndicesCache().has_value() &&
!edge_filter_input) {
VLOG(5) << "Using cached argument indices on graph " << &g;
*compile_time_const_arg_indices = g.GetConstArgIndicesCache().value();
return absl::OkStatus();
}
auto edge_filter = [&](const Edge& e) {
return edge_filter_input ? edge_filter_input(e) : true;
};
std::vector<bool> compile_time_const_nodes_impl;
if (compile_time_const_nodes) {
CHECK_EQ(compile_time_const_nodes->size(), g.num_node_ids());
} else {
compile_time_const_nodes_impl.resize(g.num_node_ids());
compile_time_const_nodes = &compile_time_const_nodes_impl;
}
Status status;
auto visit = [&](Node* node) {
if (!status.ok()) return;
if (XlaOpRegistry::IsMetadataOp(node->type_string())) {
VLOG(3) << "must-be-const node is metadata op: " << node->name();
return;
}
if ((*compile_time_const_nodes)[node->id()]) {
VLOG(3) << "marking consts for must-be-const node " << node->name();
if (node->type_string() == "_Arg") {
int index;
status = GetNodeAttr(node->attrs(), "index", &index);
if (!status.ok()) return;
if (compile_time_const_arg_indices) {
(*compile_time_const_arg_indices)[index] = true;
}
VLOG(3) << " const _Arg " << index << ": " << node->name();
return;
}
for (const Edge* pred : node->in_edges()) {
if (!pred->IsControlEdge() && edge_filter(*pred)) {
while (edge_filter(*pred) && IsConstTraversableOpType(pred->src())) {
status = pred->src()->input_edge(pred->src_output(), &pred);
if (!status.ok()) return;
}
if (edge_filter(*pred)) {
VLOG(4) << " " << pred->src()->name() << " must be const (is "
<< pred->src()->type_string() << ")";
(*compile_time_const_nodes)[pred->src()->id()] = true;
}
}
}
return;
}
std::vector<int> const_input_idxs;
status = GetCompileTimeConstInputs(node, &const_input_idxs, flib_runtime);
if (!status.ok() || const_input_idxs.empty()) {
return;
}
VLOG(3) << "marking consts for must-be-const inputs of " << node->name();
for (Edge const* edge : node->in_edges()) {
if (!edge->IsControlEdge() &&
absl::c_binary_search(const_input_idxs, edge->dst_input()) &&
edge_filter(*edge)) {
while (edge_filter(*edge) && IsConstTraversableOpType(edge->src())) {
status = edge->src()->input_edge(edge->src_output(), &edge);
if (!status.ok()) return;
}
if (edge_filter(*edge)) {
VLOG(4) << " input " << edge->dst_input() << ": "
<< edge->src()->name() << " must be const (is "
<< edge->src()->type_string() << ")";
(*compile_time_const_nodes)[edge->src()->id()] = true;
}
}
}
};
DFS(g, {}, visit, NodeComparatorName{},
[](const Edge& edge) { return !edge.src()->IsNextIteration(); });
if (compile_time_const_arg_indices && !edge_filter_input) {
VLOG(5) << "Setting the cache on the graph: " << &g;
g.GetConstArgIndicesCache() = *compile_time_const_arg_indices;
}
return status;
}
Status GetCompileTimeConstInputs(const OpKernel* op_kernel,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
return GetCompileTimeConstInputs(op_kernel->def(), op_kernel,
nullptr, const_input_idxs,
flib_runtime);
}
} | #include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(ConstAnalysisTest, Basics) {
Scope root = Scope::NewRootScope();
auto arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(root.WithOpName("Arg2"), DT_INT32, 2);
auto arg3 = ops::_Arg(root.WithOpName("Arg3"), DT_INT32, 3);
auto a = ops::Shape(root, arg0);
auto b = ops::Add(root, a, arg1);
auto c = ops::Reshape(root, arg2, b);
auto d = ops::Mul(root, c, ops::Sum(root, arg3, arg3));
FixupSourceAndSinkEdges(root.graph());
std::vector<bool> const_args(4, false);
std::vector<bool> const_nodes(root.graph()->num_node_ids(), false);
TF_ASSERT_OK(BackwardsConstAnalysis(*root.graph(), &const_args, &const_nodes,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, true, false, true}));
EXPECT_FALSE(const_nodes[arg0.node()->id()]);
EXPECT_TRUE(const_nodes[arg1.node()->id()]);
EXPECT_FALSE(const_nodes[arg2.node()->id()]);
EXPECT_TRUE(const_nodes[arg3.node()->id()]);
}
TEST(ConstAnalysisTest, TopologicalOrder) {
for (bool order : {false, true}) {
Scope root = Scope::NewRootScope();
auto arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(root.WithOpName("Arg2"), DT_INT32, 2);
auto a = ops::Reshape(root, arg0, arg1);
auto b = ops::Reshape(root, arg2, a);
if (order) {
std::swap(a, b);
}
auto c = ops::Add(root, a, b);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(3, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({true, true, false}));
}
}
void TestFunctionCall(bool is_stateful_partitioned_call) {
FunctionDef callee = FunctionDefHelper::Define(
"Callee", {"t:float", "shape:int32"}, {"result:float"}, {},
{{{"result"}, "Reshape", {"t", "shape"}, {{"T", DT_FLOAT}}}});
FunctionDefLibrary flib;
*flib.add_function() = callee;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Scope root = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(root.WithOpName("tensor"), DT_FLOAT, 0);
auto arg1 = ops::_Arg(root.WithOpName("shape"), DT_INT32, 1);
NameAttrList call_attrs;
call_attrs.set_name("Callee");
if (is_stateful_partitioned_call) {
ops::StatefulPartitionedCall b(root.WithOpName("Call"),
{Output(arg0), Output(arg1)}, {DT_FLOAT},
call_attrs);
} else {
ops::PartitionedCall b(root.WithOpName("Call"),
{Output(arg0), Output(arg1)}, {DT_FLOAT},
call_attrs);
}
Graph graph(&flib_def);
TF_ASSERT_OK(root.ToGraph(&graph));
OptimizerOptions opts;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(nullptr, Env::Default(),
nullptr,
TF_GRAPH_DEF_VERSION, &flib_def, opts));
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
lib_runtime));
EXPECT_EQ(const_args, std::vector<bool>({false, true}));
}
TEST(ConstAnalysisTest, PartitionedCall) {
TestFunctionCall(false);
}
TEST(ConstAnalysisTest, StatefulPartitionedCall) {
TestFunctionCall(true);
}
TEST(ConstAnalysisTest, DontFollowControlDependencies) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg1, c1);
Output reshape = ops::Reshape(root, arg1, add);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, true}));
}
TEST(ConstAnalysisTest, RespectExplicitAttr_0) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg1, c1);
Output reshape = ops::Reshape(root, arg1, add);
reshape.node()->AddAttr(kXlaCompileTimeConstantInputsAttr,
std::vector<string>());
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, false}));
}
TEST(ConstAnalysisTest, RespectExplicitAttr_1) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg0, c1);
std::vector<string> add_constant_inputs;
add_constant_inputs.push_back("x");
add.node()->AddAttr(kXlaCompileTimeConstantInputsAttr, add_constant_inputs);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(1, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({true}));
}
static bool Initialized = [] {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
return true;
}();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/const_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/const_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2b49c330-9716-4914-b911-489ee78f0187 | cpp | tensorflow/tensorflow | functionalize_cond | tensorflow/compiler/tf2xla/functionalize_cond.cc | tensorflow/compiler/tf2xla/functionalize_cond_test.cc | #include "tensorflow/compiler/tf2xla/functionalize_cond.h"
#include <algorithm>
#include <deque>
#include <stack>
#include <unordered_set>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/tf2xla/frontend_attributes_util.h"
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/union_find.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace functionalize_cond {
bool AncestorNode::operator<(const AncestorNode& other) const {
return (output_tensor.node->id() < other.output_tensor.node->id()) ||
(output_tensor.node->id() == other.output_tensor.node->id() &&
output_tensor.index < other.output_tensor.index) ||
(output_tensor.node->id() == other.output_tensor.node->id() &&
output_tensor.index == other.output_tensor.index &&
type < other.type);
}
bool AncestorNode::operator==(const AncestorNode& other) const {
return output_tensor.node->id() == other.output_tensor.node->id() &&
output_tensor.index == other.output_tensor.index && type == other.type;
}
size_t AncestorNode::Hash::operator()(const AncestorNode& ancestor) const {
size_t h = std::hash<int>()(ancestor.output_tensor.node->id());
h = Hash64Combine(h, std::hash<int>()(ancestor.output_tensor.index));
return Hash64Combine(h, std::hash<int>()(static_cast<int>(ancestor.type)));
}
typedef std::tuple<StateMap::CondId, StateMap::AncestorId, OutputTensor>
ClusterTuple;
struct ClusterTupleLessThan {
bool operator()(const ClusterTuple& a, const ClusterTuple& b) const {
if (std::tie(std::get<0>(a), std::get<1>(a)) <
std::tie(std::get<0>(b), std::get<1>(b))) {
return true;
} else if (std::tie(std::get<0>(a), std::get<1>(a)) ==
std::tie(std::get<0>(b), std::get<1>(b))) {
return StateMap::OutputTensorLess()(std::get<2>(a), std::get<2>(b));
} else {
return false;
}
}
};
string DebugString(const OutputTensor& tensor) {
return absl::StrCat(tensor.node->name(), ":", tensor.index);
}
string Branch_Name(BranchType b) {
switch (b) {
case BranchType::kElseBranch:
return "else";
case BranchType::kThenBranch:
return "then";
case BranchType::kBoth:
return "both";
case BranchType::kNeither:
return "neither";
}
}
string DebugString(StateMap::CondId cond_state) {
if (cond_state == nullptr || cond_state->empty()) return "{}";
using value_type = StateMap::CondState::value_type;
return absl::StrCat(
"{",
absl::StrJoin(*cond_state, ", ",
[](string* output, const value_type& pred_branch) {
const OutputTensor& pred = pred_branch.first;
const BranchType& branch = pred_branch.second;
if (branch == BranchType::kNeither)
absl::StrAppend(output, "d");
else
absl::StrAppend(output, "s(", DebugString(pred), ",",
Branch_Name(branch), ")");
}),
"}");
}
Status GetSwitchPredicate(const Node& switch_node, OutputTensor* pred) {
const Edge* pred_edge;
TF_RETURN_IF_ERROR(switch_node.input_edge(1, &pred_edge));
while (pred_edge->src()->IsIdentity()) {
TF_RETURN_IF_ERROR(pred_edge->src()->input_edge(0, &pred_edge));
}
*pred = OutputTensor(pred_edge->src(), pred_edge->src_output());
return absl::OkStatus();
}
Status GetSwitchValue(const Node& switch_node, OutputTensor* val) {
const Edge* val_edge;
TF_RETURN_IF_ERROR(switch_node.input_edge(0, &val_edge));
*val = OutputTensor(val_edge->src(), val_edge->src_output());
return absl::OkStatus();
}
bool StateMap::OutputTensorLess::operator()(const OutputTensor& lhs,
const OutputTensor& rhs) const {
return (lhs.node->id() < rhs.node->id()) ||
(lhs.node->id() == rhs.node->id() && lhs.index < rhs.index);
}
struct CondStateLess {
bool operator()(const StateMap::CondState::value_type& lhs,
const StateMap::CondState::value_type& rhs) const {
if (StateMap::OutputTensorLess().operator()(lhs.first, rhs.first))
return true;
if (lhs.first.node->id() == rhs.first.node->id() &&
lhs.first.index == rhs.first.index)
return lhs.second < rhs.second;
return false;
}
};
StateMap::StateMap(Graph* graph) {
node_to_condid_map_.resize(graph->num_node_ids());
node_to_ancestorid_map_.resize(graph->num_node_ids());
dead_id_ = GetCondId(
{std::make_pair(OutputTensor(nullptr, -1), BranchType::kNeither)});
}
bool StateMap::IsDead(StateMap::CondId id) const { return id == dead_id_; }
bool StateMap::IsEmpty(StateMap::CondId id) const { return id == nullptr; }
size_t StateMap::Hash::operator()(const StateMap::CondState& map) const {
if (map.empty()) return 0;
auto it = map.begin();
size_t h = Hash64Combine(OutputTensor::Hash()(it->first),
hash<BranchType>()(it->second));
for (++it; it != map.end(); ++it) {
h = Hash64Combine(h, Hash64Combine(OutputTensor::Hash()(it->first),
hash<BranchType>()(it->second)));
}
return h;
}
size_t StateMap::Hash::operator()(const StateMap::AncestorState& map) const {
if (map.empty()) return 0;
auto it = map.begin();
size_t h = AncestorNode::Hash()(*it);
for (++it; it != map.end(); ++it) {
h = Hash64Combine(h, AncestorNode::Hash()(*it));
}
return h;
}
struct CondArgNode {
explicit CondArgNode(Node* src, int src_output)
: src(src), src_output(src_output) {}
string ToString() const {
return absl::StrCat("src=", src->name(), ":", src_output,
" switches=", NodesToString(switches));
}
Node* src;
int src_output;
std::array<Node*, 2> branch_copy;
std::vector<Node*> switches;
};
using CondArgNodes = std::vector<CondArgNode>;
string DebugString(const CondArgNodes& nodes) {
return absl::StrCat(
"[",
absl::StrJoin(nodes, ", ",
[](string* output, const CondArgNode& node) {
absl::StrAppend(output, node.ToString());
}),
"]");
}
StateMap::CondId StateMap::LookupCondId(const Node* node) const {
const int64_t map_size = node_to_condid_map_.size();
if (node->id() < map_size) return node_to_condid_map_[node->id()];
return added_node_condid_mapping_.at(node->id());
}
StateMap::CondId StateMap::GetCondId(const StateMap::CondState& state) {
if (state.empty()) return nullptr;
return &*condstate_set_.insert(state).first;
}
void StateMap::ResetCondId(const Node* node, StateMap::CondId id) {
const int64_t map_size = node_to_condid_map_.size();
if (node->id() < map_size)
node_to_condid_map_[node->id()] = id;
else
added_node_condid_mapping_[node->id()] = id;
}
StateMap::AncestorId StateMap::LookupAncestorId(const Node* node) const {
const int64_t map_size = node_to_ancestorid_map_.size();
if (node->id() < map_size) return node_to_ancestorid_map_[node->id()];
return added_node_ancestorid_mapping_.at(node->id());
}
StateMap::AncestorId StateMap::GetAncestorId(
const StateMap::AncestorState& state) {
if (state.empty()) return nullptr;
return &*ancestorstate_set_.insert(state).first;
}
void StateMap::ResetAncestorId(const Node* node, StateMap::AncestorId id) {
const int64_t map_size = node_to_ancestorid_map_.size();
if (node->id() < map_size)
node_to_ancestorid_map_[node->id()] = id;
else
added_node_ancestorid_mapping_[node->id()] = id;
}
void StateMap::MarkDead(const Node* node) { ResetCondId(node, dead_id_); }
string StateMap::CondStateToString(const Node* node) const {
return CondStateToString(LookupCondId(node));
}
string StateMap::CondStateToString(StateMap::CondId id) const {
return DebugString(id);
}
string StateMap::AncestorStateToString(const Node* node) const {
if (auto id = LookupAncestorId(node)) {
return absl::StrCat(
"{",
absl::StrJoin(*id, ",",
[](string* output, const AncestorNode& ancestor) {
absl::StrAppend(output,
ancestor.output_tensor.node->name(),
":", ancestor.output_tensor.index);
}),
"}");
}
return "{}";
}
FunctionalizeCond::FunctionalizeCond(Graph* graph,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter)
: state_map_(graph),
library_(library),
graph_(graph),
node_filter_(node_filter) {}
class Conditional {
public:
Conditional(OutputTensor predicate, FunctionalizeCond* parent,
StateMap* cond_state_map, const ShapeRefiner& refiner);
Status AddMerge(Node* m);
Status BuildAndReplace(
Graph* graph, FunctionLibraryDefinition* library,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement);
private:
Status ExtractBodies(Graph* graph);
Status BuildArgumentNodes();
Status BuildIfNode(Graph* graph, FunctionLibraryDefinition* library);
Status AddInputEdges(
Graph* graph,
const std::unordered_map<Node*, OutputTensor>& merge_to_replacement);
Status AddOutputEdges(
Graph* graph,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement);
Status AddSwitch(Node* s);
Status AddSwitchNodeAlongEdge(const Edge* edge, BranchType branch,
Graph* graph);
string name() const;
FunctionalizeCond* parent_;
StateMap* state_map_;
OutputTensor predicate_;
const ShapeRefiner& refiner_;
OutputTensor switch_predicate_;
std::set<Node*, NodeCmpByNameResourcesLast> switches_;
std::set<Node*, NodeCmpByNameResourcesLast> merges_;
std::vector<Node*> external_control_inputs_;
std::vector<Node*> external_control_outputs_;
std::array<std::unique_ptr<Graph>, 2> bodies_;
std::array<std::vector<Node*>, 2> node_maps_;
CondArgNodes cond_arg_nodes_;
Node* if_node_ = nullptr;
bool replaced_ = false;
};
Conditional::Conditional(OutputTensor predicate, FunctionalizeCond* parent,
StateMap* cond_state_map, const ShapeRefiner& refiner)
: parent_(parent),
state_map_(cond_state_map),
predicate_(predicate),
refiner_(refiner) {}
Status Conditional::AddMerge(Node* m) {
merges_.insert(m);
return absl::OkStatus();
}
Status Conditional::AddSwitch(Node* s) {
VLOG(5) << "Adding switch " << s->DebugString();
OutputTensor predicate;
TF_RETURN_IF_ERROR(GetSwitchPredicate(*s, &predicate));
if (switch_predicate_.node == nullptr) switch_predicate_ = predicate;
if (!(switch_predicate_ == predicate)) {
return errors::InvalidArgument(
"Merge nodes ", NodesToString(merges_),
" directly dominated by switch nodes with different predicates (",
DebugString(switch_predicate_), " vs ", DebugString(predicate), ").");
}
switches_.insert(s);
parent_->AddSwitchId(s->id());
return absl::OkStatus();
}
Status Conditional::BuildArgumentNodes() {
VLOG(1) << "Build function arguments";
struct Hash {
size_t operator()(const std::pair<Node*, int>& item) const {
return Hash64Combine(hash<Node*>()(item.first),
std::hash<int>()(item.second));
}
};
std::unordered_map<std::pair<Node*, int>, int, Hash> input_index;
for (Node* switch_node : switches_) {
const Edge* e;
TF_RETURN_IF_ERROR(switch_node->input_edge(0, &e));
std::pair<Node*, int> key = std::make_pair(e->src(), e->src_output());
if (input_index.find(key) == input_index.end()) {
input_index[key] = cond_arg_nodes_.size();
cond_arg_nodes_.emplace_back(key.first, key.second);
}
cond_arg_nodes_.at(input_index.at(key)).switches.push_back(switch_node);
}
VLOG(5) << "CondArg nodes created: " << DebugString(cond_arg_nodes_);
int arg_count = 0;
for (CondArgNode& cond_arg_node : cond_arg_nodes_) {
DataType dtype = cond_arg_node.src->output_type(cond_arg_node.src_output);
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
TF_RETURN_IF_ERROR(
NodeBuilder(absl::StrCat("_Arg", arg_count),
FunctionLibraryDefinition::kArgOp)
.Attr("T", dtype)
.Attr("index", arg_count)
.Finalize(bodies_[branch_index].get(),
&cond_arg_node.branch_copy[branch_index]));
}
for (Node* node : cond_arg_node.switches) {
for (const Edge* e : node->out_edges()) {
if (e->IsControlEdge()) continue;
int branch_index = e->src_output();
Node* src_copy = cond_arg_node.branch_copy[branch_index];
Node* dst_copy = node_maps_[branch_index][e->dst()->id()];
if (dst_copy == nullptr) continue;
TF_RET_CHECK(dst_copy != nullptr)
<< "Unable to find copied node for " << e->dst()->DebugString()
<< " on branch " << Branch_Name(BranchType(branch_index));
int dst_input = IsMerge(e->dst()) ? 0 : e->dst_input();
bodies_[branch_index]->AddEdge(src_copy, 0, dst_copy, dst_input);
}
}
++arg_count;
}
for (Node* m : merges_) {
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
bool has_input = false;
for (auto e : node_maps_[static_cast<int>(branch)][m->id()]->in_edges()) {
if (!e->IsControlEdge()) {
has_input = true;
break;
}
}
if (!has_input) {
return errors::Internal(
"Failed to functionalize control flow with merge ",
FormatNodeForError(*m), " that doesn't have input on ",
Branch_Name(branch), " branch.");
}
}
}
return absl::OkStatus();
}
Status Conditional::AddSwitchNodeAlongEdge(const Edge* edge, BranchType branch,
Graph* graph) {
Node* switch_node;
Node* src = edge->src();
int src_output = edge->src_output();
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName(absl::StrCat(src->name(), "_added_switch")),
"Switch")
.Input(src, src_output)
.Input(const_cast<Node*>(predicate_.node), predicate_.index)
.Finalize(graph, &switch_node));
state_map_->ResetCondId(switch_node, state_map_->LookupCondId(src));
state_map_->ResetAncestorId(switch_node, state_map_->LookupAncestorId(src));
Node* dst = edge->dst();
int dst_input = edge->dst_input();
graph->RemoveEdge(edge);
graph->AddEdge(switch_node, static_cast<int>(branch), dst, dst_input);
return AddSwitch(switch_node);
}
Status Conditional::ExtractBodies(Graph* graph) {
VLOG(2) << "Extracting bodies for " << name();
for (auto b : {BranchType::kElseBranch, BranchType::kThenBranch}) {
bodies_[static_cast<int>(b)] =
std::make_unique<Graph>(graph->op_registry());
}
auto find_branch = [&](const Edge* e) {
const auto& id = state_map_->LookupCondId(e->src());
return IsSwitch(e->src()) ? BranchType(e->src_output())
: state_map_->FindBranchOf(id, predicate_);
};
std::array<std::vector<Node*>, 2> stacks;
VLOG(5) << "Merges: " << NodesToString(merges_);
for (Node* m : merges_) {
VLOG(5) << "For merge: " << m->DebugString() << " "
<< state_map_->CondStateToString(m);
for (auto e : m->in_edges()) {
if (e->IsControlEdge()) continue;
BranchType branch = find_branch(e);
TF_RET_CHECK(branch == BranchType::kThenBranch ||
branch == BranchType::kElseBranch)
<< "Error: " << e->src()->name()
<< " is not on either then or else branch (" << Branch_Name(branch)
<< ") for predicate " << DebugString(predicate_) << " ["
<< DebugString(state_map_->LookupCondId(e->src())) << "].";
Node* src = e->src();
if (IsSwitch(src)) {
TF_RETURN_IF_ERROR(AddSwitch(src));
} else {
stacks[static_cast<int>(branch)].push_back(src);
}
}
}
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
auto output = bodies_[branch_index].get();
auto& stack = stacks[branch_index];
VLOG(5) << "In branch: " << Branch_Name(branch) << " "
<< NodesToString(stack);
std::vector<bool> visited(graph->num_node_ids(), false);
node_maps_[branch_index].resize(graph->num_node_ids(), nullptr);
auto& node_map = node_maps_[branch_index];
while (!stack.empty()) {
Node* n = stack.back();
stack.pop_back();
if (visited.at(n->id())) continue;
visited[n->id()] = true;
for (const Edge* e : n->out_edges()) {
Node* dst = e->dst();
if (IsMerge(dst)) continue;
Node* src = e->src();
auto dst_id = state_map_->LookupCondId(dst);
auto src_id = state_map_->LookupCondId(src);
if (dst_id != src_id) {
if (e->IsControlEdge()) {
external_control_outputs_.push_back(e->src());
} else {
if (!IsConstant(src)) {
LOG(WARNING) << errors::InvalidArgument(
"Graph contains node ", FormatNodeForError(*src),
" that feeds into node ", FormatNodeForError(*dst),
" but these nodes are in different control contexts (",
DebugString(src_id), " vs ", DebugString(dst_id),
" (detected during out edge testing)");
}
}
}
}
std::vector<const Edge*> in_edges(n->in_edges().begin(),
n->in_edges().end());
std::sort(
in_edges.begin(), in_edges.end(), [](const Edge* a, const Edge* b) {
int a_src_output = a->src_output(), b_src_output = b->src_output();
StringPiece a_name(a->src()->name()), b_name(b->src()->name());
return std::tie(a_src_output, a_name) <
std::tie(b_src_output, b_name);
});
for (const Edge* e : in_edges) {
Node* src = e->src();
if (!src->IsOp()) continue;
Node* dst = e->dst();
if (IsSwitch(src)) {
TF_RETURN_IF_ERROR(AddSwitch(src));
continue;
}
auto src_id = state_map_->LookupCondId(src);
auto dst_id = state_map_->LookupCondId(dst);
if (IsMerge(dst) || src_id == dst_id) {
if (node_map.at(src->id()) == nullptr) {
node_map.at(src->id()) = output->CopyNode(src);
stack.push_back(src);
}
} else if (e->IsControlEdge()) {
bool is_external_control_input = true;
if (!state_map_->IsEmpty(src_id) && !state_map_->IsEmpty(dst_id)) {
std::vector<StateMap::CondState::value_type> diff;
std::set_symmetric_difference(
src_id->begin(), src_id->end(), dst_id->begin(), dst_id->end(),
std::back_inserter(diff), CondStateLess());
if (diff.size() == 2 && diff[0].first == diff[1].first &&
(diff[0].second == BranchType::kNeither ||
diff[1].second == BranchType::kNeither)) {
auto src_branch = src_id->find(diff[0].first);
if (src_branch != src_id->end() &&
src_branch->second == BranchType::kNeither) {
is_external_control_input = false;
}
}
}
if (is_external_control_input) {
external_control_inputs_.push_back(src);
}
} else {
if (IsConstant(src)) {
if (node_map.at(src->id()) == nullptr) {
node_map.at(src->id()) = output->CopyNode(src);
}
} else {
StateMap::CondState state = *dst_id;
state.erase(predicate_);
if (state_map_->GetCondId(state) == src_id) {
TF_RETURN_IF_ERROR(AddSwitchNodeAlongEdge(e, branch, graph));
continue;
} else {
return errors::InvalidArgument(
"Graph contains node ", FormatNodeForError(*src),
" that feeds into node ", FormatNodeForError(*dst),
" but these nodes are in different control contexts (",
DebugString(src_id), " vs ", DebugString(dst_id),
" (detected during in edge testing)");
}
}
}
Node* src_copy = node_map.at(e->src()->id());
int src_output = e->src_output();
if (node_map.at(dst->id()) == nullptr) {
node_map.at(dst->id()) = output->CopyNode(dst);
}
Node* dst_copy = node_map.at(e->dst()->id());
if (e->IsControlEdge()) {
if (src_copy != nullptr) output->AddControlEdge(src_copy, dst_copy);
} else {
output->AddEdge(src_copy, src_output, dst_copy, e->dst_input());
}
}
}
}
int index = 0;
for (Node* m : merges_) {
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
auto& node_map = node_maps_[branch_index];
auto output = bodies_[branch_index].get();
TF_ASSIGN_OR_RETURN(node_map[m->id()],
BuildRetvalNode(output, m->output_type(0), index));
}
++index;
for (auto e : m->in_edges()) {
if (e->IsControlEdge()) continue;
int branch_index = static_cast<int>(find_branch(e));
auto& node_map = node_maps_[branch_index];
auto output = bodies_[branch_index].get();
Node* in = e->src();
if (!IsSwitch(in)) {
if (node_map.at(in->id()) == nullptr) {
node_map[in->id()] = output->CopyNode(in);
}
output->AddEdge(node_map[in->id()], e->src_output(),
node_map.at(m->id()), 0);
}
}
}
return absl::OkStatus();
}
Status Conditional::BuildIfNode(Graph* graph,
FunctionLibraryDefinition* library) {
VLOG(2) << "Build cond function for " << name();
NodeDebugInfo debug_info((*merges_.begin())->def());
NodeDefBuilder builder(name(), "If", library, &debug_info);
const string branch_name[] = {"else_branch", "then_branch"};
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
NameAttrList body_name;
body_name.set_name(library->UniqueFunctionName(
absl::StrCat("_functionalize_if_", branch_name[branch_index], "_")));
VLOG(3) << "FunctionalizeControlFlow (" << branch_name[branch_index]
<< "): "
<< DumpGraphToFile(
"functionalize_cond_body_" + branch_name[branch_index],
*bodies_[branch_index], nullptr);
FunctionDef body_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*bodies_[branch_index],
body_name.name(), &body_fdef));
TF_RETURN_IF_ERROR(library->AddFunctionDef(body_fdef));
builder.Attr(branch_name[branch_index], body_name);
}
VLOG(3) << "Build input type";
std::vector<NodeDefBuilder::NodeOut> inputs;
DataTypeVector in_arg_types;
for (auto& kv : cond_arg_nodes_) {
bool inserted = false;
for (const Node* arg : kv.switches) {
const Edge* in_edge;
TF_RETURN_IF_ERROR(arg->input_edge(0, &in_edge));
if (in_edge->IsControlEdge()) {
builder.ControlInput(in_edge->src()->name());
} else {
if (!inserted) {
DataType dtype = arg->input_type(0);
inputs.emplace_back(NodeDefBuilder::NodeOut(
in_edge->src()->name(), in_edge->src_output(), dtype));
in_arg_types.push_back(dtype);
inserted = true;
}
}
}
}
builder.Attr("Tin", in_arg_types);
DataTypeVector out_type;
std::vector<PartialTensorShape> output_shapes;
output_shapes.reserve(merges_.size());
for (const Node* merge : merges_) {
DataType dtype = merge->output_type(0);
TensorShapeProto shape;
if (auto* shape_ctx = refiner_.GetContext(merge)) {
shape_inference::ShapeHandle handle;
shape_ctx->ShapeHandleToProto(shape_ctx->output(0), &shape);
}
out_type.push_back(dtype);
output_shapes.push_back(shape);
}
builder.Attr("Tout", out_type);
VLOG(3) << "Build output type: " << DataTypeVectorString(out_type);
builder.Attr("output_shapes", output_shapes);
VLOG(3) << "Build output shapes: "
<< PartialTensorShapeUtils::PartialShapeListString(output_shapes);
builder.Attr("Tcond", DT_BOOL);
for (absl::string_view attr_name : kAttrsToPropagate) {
string attr_val;
if (GetNodeAttr(predicate_.node->def(), attr_name, &attr_val).ok()) {
builder.Attr(attr_name, attr_val);
}
}
builder.Device(predicate_.node->assigned_device_name());
builder.Input(
NodeDefBuilder::NodeOut(predicate_.node->name(), predicate_.index,
predicate_.node->output_type(predicate_.index)));
builder.Input(inputs);
VLOG(3) << "Build If node";
NodeDef if_def;
TF_RETURN_IF_ERROR(builder.Finalize(&if_def));
TF_ASSIGN_OR_RETURN(if_node_,
parent_->AddIfNode(if_def, *merges_.begin(), predicate_));
return absl::OkStatus();
}
Status Conditional::AddInputEdges(
Graph* graph,
const std::unordered_map<Node*, OutputTensor>& merge_to_replacement) {
VLOG(2) << "AddInputEdges for " << if_node_->name();
int index = 0;
if (predicate_.node->IsMerge()) {
auto iter = merge_to_replacement.find(predicate_.node);
if (iter == merge_to_replacement.end()) {
return errors::Internal("Cannot find replacement for Merge node ",
predicate_.node->name());
}
graph->AddEdge(iter->second.node, iter->second.index, if_node_, index++);
} else {
graph->AddEdge(const_cast<Node*>(predicate_.node), predicate_.index,
if_node_, index++);
}
for (auto& arg : cond_arg_nodes_) {
if (arg.src_output == Graph::kControlSlot) {
graph->AddControlEdge(arg.src, if_node_);
} else {
graph->AddEdge(arg.src, arg.src_output, if_node_, index++);
}
}
for (Node* n : external_control_inputs_) {
graph->AddControlEdge(n, if_node_);
}
return absl::OkStatus();
}
Status Conditional::AddOutputEdges(
Graph* graph,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement) {
VLOG(2) << "AddOutputEdges for " << if_node_->name();
int i = 0;
for (Node* node : merges_) {
TF_RETURN_IF_ERROR(parent_->AddIdentityNode(node, if_node_, i));
std::vector<const Edge*> edges(node->out_edges().begin(),
node->out_edges().end());
for (const Edge* edge : edges) {
Node* dst = edge->dst();
int dst_input = edge->dst_input();
if (edge->src_output() > 0) {
return errors::Unimplemented("Output of index (", edge->src_output(),
") of merge node ",
FormatNodeForError(*node));
}
bool control_edge = edge->IsControlEdge();
graph->RemoveEdge(edge);
if (control_edge) {
graph->AddControlEdge(if_node_, dst);
} else {
graph->AddEdge(if_node_, i, dst, dst_input);
}
}
(*merge_to_replacement)[node] = OutputTensor{if_node_, i};
++i;
}
for (Node* n : external_control_outputs_) {
graph->AddControlEdge(if_node_, n);
}
return absl::OkStatus();
}
Status Conditional::BuildAndReplace(
Graph* graph, FunctionLibraryDefinition* library,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement) {
VLOG(1) << "Build If and replace merge nodes "
<< NodesToString(this->merges_);
if (replaced_) return absl::OkStatus();
TF_RETURN_IF_ERROR(ExtractBodies(graph));
TF_RETURN_IF_ERROR(BuildArgumentNodes());
if (VLOG_IS_ON(3)) {
LOG(INFO) << "Extracted bodies:";
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
auto output = bodies_[branch_index].get();
LOG(INFO) << Branch_Name(branch) << ": "
<< DebugString(output->ToGraphDefDebug());
}
}
TF_RETURN_IF_ERROR(BuildIfNode(graph, library));
TF_RETURN_IF_ERROR(AddInputEdges(graph, *merge_to_replacement));
TF_RETURN_IF_ERROR(AddOutputEdges(graph, merge_to_replacement));
TF_RETURN_IF_ERROR(parent_->PropagateUpdatedState(if_node_));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
CheckNodeNotInCycle(if_node_, graph->num_node_ids()),
"Converting to If failed.");
replaced_ = true;
return absl::OkStatus();
}
string Conditional::name() const {
CHECK(!merges_.empty());
return absl::StrCat((*merges_.begin())->name(), "_if");
}
Status FunctionalizeCond::AddIdentityNode(const Node* replacee, Node* if_node,
int port) {
NodeBuilder id_builder(replacee->name(), "Identity");
id_builder.Input(if_node, port);
string outside_compilation;
if (GetNodeAttr(if_node->def(), kXlaOutsideCompilationAttr,
&outside_compilation)
.ok()) {
id_builder.Attr(kXlaOutsideCompilationAttr, outside_compilation);
}
Node* id;
TF_RETURN_IF_ERROR(id_builder.Finalize(graph_, &id));
state_map_.ResetCondId(id, state_map_.LookupCondId(if_node));
state_map_.ResetAncestorId(id, state_map_.LookupAncestorId(if_node));
return absl::OkStatus();
}
absl::StatusOr<Node*> FunctionalizeCond::AddIfNode(
const NodeDef& def, const Node* replacee, const OutputTensor& predicate) {
TF_ASSIGN_OR_RETURN(Node * ret, graph_->AddNode(def));
VLOG(1) << "Adding If for " << replacee->name();
StateMap::CondId id = state_map_.LookupCondId(replacee);
if (id) {
StateMap::CondState state = *id;
state.erase(predicate);
state_map_.ResetCondId(ret, state_map_.GetCondId(state));
} else {
state_map_.ResetCondId(ret, nullptr);
}
state_map_.ResetAncestorId(ret, state_map_.LookupAncestorId(replacee));
return ret;
}
Status FunctionalizeCond::PropagateUpdatedState(const Node* replacee) {
VLOG(2) << "Propagating update state for " << replacee->name() << " "
<< state_map_.CondStateToString(replacee);
std::vector<Node*> rev_topo_order;
GetPostOrder(*graph_, &rev_topo_order, NodeComparatorID());
std::unordered_set<Node*> changed;
for (auto n : replacee->out_nodes())
if (n->IsOp()) changed.insert(n);
for (auto it = rev_topo_order.rbegin();
it != rev_topo_order.rend() && !changed.empty(); ++it) {
if (changed.find(*it) != changed.end()) {
Node* n = *it;
StateMap::CondId old_state = state_map_.LookupCondId(n);
state_map_.ResetCondId(n, nullptr);
TF_RETURN_IF_ERROR(DetermineCondState(n));
if (state_map_.LookupCondId(n) != old_state) {
for (auto out : n->out_nodes())
if (out->IsOp()) changed.insert(out);
}
changed.erase(n);
}
}
return absl::OkStatus();
}
BranchType MeetBranch(const BranchType& lhs, const BranchType& rhs) {
if (lhs == rhs) return lhs;
if (lhs == BranchType::kNeither) return rhs;
if (rhs == BranchType::kNeither) return lhs;
if (lhs == BranchType::kBoth) return rhs;
if (rhs == BranchType::kBoth) return lhs;
return BranchType::kNeither;
}
BranchType StateMap::FindBranchOf(CondId id, OutputTensor predicate) const {
if (IsEmpty(id)) return BranchType::kNeither;
const CondState& nodes = *id;
auto it = nodes.find(predicate);
if (it == nodes.end()) return BranchType::kNeither;
return it->second;
}
absl::StatusOr<StateMap::CondId> FunctionalizeCond::JoinCondStatesNonMerge(
StateMap::CondId src, StateMap::CondId dst) {
VLOG(5) << "Joining src=" << DebugString(src) << " [" << src
<< "] and dst=" << DebugString(dst) << " [" << dst << "]";
if (state_map_.IsEmpty(dst) || state_map_.IsDead(src)) return src;
if (state_map_.IsDead(dst) || state_map_.IsEmpty(src)) return dst;
if (src == dst) return src;
StateMap::CondState both = *src;
for (const auto& kv : *dst) {
auto it = both.find(kv.first);
if (it == both.end()) {
both.insert(kv);
} else {
if (it->second != kv.second) {
if (it->second == BranchType::kNeither) {
it->second = kv.second;
} else if (kv.second == BranchType::kNeither) {
} else {
return errors::InvalidArgument(
"Graph contains node with inputs predicated on incompatible "
"predicates: ",
DebugString(src), " and ", DebugString(dst));
}
}
}
}
return state_map_.GetCondId(both);
}
absl::StatusOr<StateMap::CondId> FunctionalizeCond::JoinCondStatesMerge(
Node* merge, StateMap::CondId src, StateMap::CondId dst) {
VLOG(4) << "Joining (for merge) " << DebugString(src) << " and "
<< DebugString(dst);
if (state_map_.IsEmpty(dst)) return src;
if (state_map_.IsEmpty(src)) {
return errors::Internal("Merge node ", merge->name(),
" has input that's not in any CondContext.");
}
if (state_map_.IsDead(src)) return src;
if (state_map_.IsDead(dst)) return dst;
std::vector<StateMap::CondState::value_type> diff;
StateMap::CondState merged;
std::set_symmetric_difference(src->begin(), src->end(), dst->begin(),
dst->end(), std::back_inserter(diff),
CondStateLess());
std::set_intersection(src->begin(), src->end(), dst->begin(), dst->end(),
std::inserter(merged, merged.begin()), CondStateLess());
if (diff.size() == 2) {
auto pred = diff[0].first;
bool different_branches = (diff[0].second != diff[1].second) &&
(diff[0].second == BranchType::kThenBranch ||
diff[0].second == BranchType::kElseBranch) &&
(diff[1].second == BranchType::kThenBranch ||
diff[1].second == BranchType::kElseBranch);
if (!(pred == diff[1].first) || !different_branches)
return errors::InvalidArgument(
"Unable to determine predicate for merge node");
merge_to_predicate_[merge] = pred;
} else {
return errors::InvalidArgument(
"Merge of two inputs that differ on more than one predicate ",
DebugString(src), " and ", DebugString(dst));
}
return state_map_.GetCondId(merged);
}
StateMap::CondId FunctionalizeCond::StateAlongEdge(const Edge* e) {
Node* src = e->src();
StateMap::CondId id = state_map_.LookupCondId(e->src());
if (state_map_.IsDead(id)) return id;
if (IsSwitch(src)) {
StateMap::CondState state;
if (id != nullptr) state = *id;
OutputTensor predicate;
TF_CHECK_OK(GetSwitchPredicate(*src, &predicate));
if (e->IsControlEdge()) {
state[predicate] = BranchType::kNeither;
} else {
state[predicate] = BranchType(e->src_output());
}
return state_map_.GetCondId(state);
}
return id;
}
Status FunctionalizeCond::DetermineCondStateMerge(Node* dst) {
if (state_map_.IsDead(state_map_.LookupCondId(dst))) return absl::OkStatus();
int data_inputs = 0;
for (auto e : dst->in_edges()) {
Node* src = e->src();
VLOG(5) << "Processing forward flow for merge: " << e->DebugString() << " "
<< state_map_.CondStateToString(src);
if (!src->IsOp()) continue;
if (!e->IsControlEdge()) ++data_inputs;
StateMap::CondId prop = StateAlongEdge(e);
auto id_or = JoinCondStatesMerge(dst, prop, state_map_.LookupCondId(dst));
TF_RETURN_WITH_CONTEXT_IF_ERROR(id_or.status(), "for node ",
FormatNodeForError(*dst));
state_map_.ResetCondId(dst, id_or.value());
}
if (data_inputs != 2) {
return errors::Unimplemented(
dst->name(), " only has ", data_inputs,
" inputs, while only merge nodes with two inputs supported.");
}
return absl::OkStatus();
}
Status FunctionalizeCond::DetermineCondStateNonMerge(Node* dst) {
for (auto e : dst->in_edges()) {
VLOG(4) << "Processing forward flow for: " << e->DebugString() << " "
<< state_map_.CondStateToString(dst);
Node* src = e->src();
if (!src->IsOp()) continue;
StateMap::CondId prop = StateAlongEdge(e);
auto id_or = JoinCondStatesNonMerge(prop, state_map_.LookupCondId(dst));
TF_RETURN_WITH_CONTEXT_IF_ERROR(id_or.status(), "for node ",
FormatNodeForError(*dst));
state_map_.ResetCondId(dst, id_or.value());
}
return absl::OkStatus();
}
Status FunctionalizeCond::RemoveRedundantMerge(Node* node) {
if (!state_map_.IsDead(state_map_.LookupCondId(node)))
return absl::OkStatus();
const Edge* non_dead_edge = nullptr;
for (auto e : node->in_edges()) {
if (e->IsControlEdge()) continue;
Node* src = e->src();
const auto& src_id = state_map_.LookupCondId(src);
if (!state_map_.IsDead(src_id)) {
non_dead_edge = e;
break;
}
}
if (non_dead_edge == nullptr) {
return errors::InvalidArgument("Merge node ", FormatNodeForError(*node),
" has no non-dead inputs.");
}
state_map_.MarkDead(node);
VLOG(5) << "removing redundant merge: " << node->name();
while (!node->out_edges().empty()) {
const Edge* oe = *node->out_edges().begin();
Node* dst_node = oe->dst();
int dst_port = oe->dst_input();
graph_->RemoveEdge(oe);
graph_->AddEdge(non_dead_edge->src(),
dst_port == Graph::kControlSlot
? Graph::kControlSlot
: non_dead_edge->src_output(),
dst_node, dst_port);
}
return absl::OkStatus();
}
Status FunctionalizeCond::RemoveRedundantSwitch(Node* node) {
StateMap::CondId dst_id = state_map_.LookupCondId(node);
if (state_map_.IsDead(dst_id)) return absl::OkStatus();
BranchType b;
OutputTensor pred;
TF_RETURN_IF_ERROR(GetSwitchPredicate(*node, &pred));
b = state_map_.FindBranchOf(dst_id, pred);
if (b != BranchType::kThenBranch && b != BranchType::kElseBranch) {
OutputTensor val;
const Edge* e;
TF_RETURN_IF_ERROR(node->input_edge(0, &e));
val = OutputTensor(e->src(), e->src_output());
while (IsIdentity(val.node)) {
TF_RETURN_IF_ERROR(val.node->input_edge(0, &e));
val = OutputTensor(e->src(), e->src_output());
}
b = state_map_.FindBranchOf(dst_id, val);
if (b != BranchType::kThenBranch && b != BranchType::kElseBranch)
return absl::OkStatus();
}
VLOG(5) << "Redundant switch " << node->name() << " " << Branch_Name(b) << " "
<< DebugString(dst_id);
const Edge* value_edge;
TF_RETURN_IF_ERROR(node->input_edge(0, &value_edge));
Node* val_node = value_edge->src();
int val_port = value_edge->src_output();
while (!node->out_edges().empty()) {
auto e = *node->out_edges().begin();
Node* dst_node = e->dst();
int dst_input = e->dst_input();
int switch_branch = e->src_output();
graph_->RemoveEdge(e);
if (switch_branch == Graph::kControlSlot) {
if (IsMerge(dst_node)) {
auto id_or = JoinCondStatesMerge(dst_node, dst_id,
state_map_.LookupCondId(dst_node));
TF_RETURN_WITH_CONTEXT_IF_ERROR(id_or.status(), "for node ",
FormatNodeForError(*dst_node));
state_map_.ResetCondId(dst_node, id_or.value());
} else {
auto id_or =
JoinCondStatesNonMerge(dst_id, state_map_.LookupCondId(dst_node));
TF_RETURN_IF_ERROR(id_or.status());
state_map_.ResetCondId(dst_node, id_or.value());
}
} else if (BranchType(switch_branch) != b) {
state_map_.MarkDead(dst_node);
continue;
}
graph_->AddEdge(
val_node,
switch_branch == Graph::kControlSlot ? Graph::kControlSlot : val_port,
dst_node, dst_input);
}
return absl::OkStatus();
}
Status FunctionalizeCond::DetermineStates(std::vector<Node*> rev_topo_order) {
for (auto it = rev_topo_order.rbegin(); it != rev_topo_order.rend(); ++it) {
Node* dst = *it;
TF_RETURN_IF_ERROR(DetermineCondState(dst));
TF_RETURN_IF_ERROR(DetermineAncestorState(dst));
if (IsSwitch(dst)) TF_RETURN_IF_ERROR(RemoveRedundantSwitch(dst));
if (IsMerge(dst)) TF_RETURN_IF_ERROR(RemoveRedundantMerge(dst));
VLOG(5) << dst->name() << " :: " << state_map_.CondStateToString(dst)
<< " @ " << state_map_.AncestorStateToString(dst);
if (VLOG_IS_ON(10)) DumpGraphWithCondState("it");
}
return absl::OkStatus();
}
Status FunctionalizeCond::DetermineAncestorState(Node* dst) {
StateMap::AncestorId id = nullptr;
StateMap::AncestorState state;
auto insert = [&](StateMap::AncestorId id, Node* src) {
auto other_id = state_map_.LookupAncestorId(src);
if (other_id != id && other_id != nullptr) {
state.insert(other_id->begin(), other_id->end());
}
if (IsMerge(src)) {
state.insert({{src, 0}, AncestorNode::AncestorNodeType::kMerge});
} else if (IsSwitch(src)) {
OutputTensor pred;
if (GetSwitchPredicate(*src, &pred).ok()) {
state.insert({pred, AncestorNode::AncestorNodeType::kPred});
} else {
state.insert({{src, 0}, AncestorNode::AncestorNodeType::kSwitch});
}
}
return state_map_.GetAncestorId(state);
};
for (auto e : dst->in_edges()) {
Node* src = e->src();
id = insert(id, src);
}
state_map_.ResetAncestorId(dst, id);
return absl::OkStatus();
}
void FunctionalizeCond::DeleteReachableAndDeadNodes(
const std::vector<Node*>& merge_order) {
std::deque<int> delete_nodes;
std::vector<bool> deleted(graph_->num_node_ids(), false);
deleted[graph_->kSourceId] = true;
deleted[graph_->kSinkId] = true;
for (int s_id : switch_ids_) {
Node* s = graph_->FindNodeId(s_id);
if (s == nullptr) continue;
for (const Edge* e : s->out_edges()) {
if (!e->IsControlEdge()) delete_nodes.push_back(e->dst()->id());
}
if (!node_filter_ || node_filter_(s)) {
VLOG(2) << "Removing obsolete switch node " << s->name();
deleted[s_id] = true;
graph_->RemoveNode(s);
}
}
for (Node* m : merge_order) {
for (const Edge* e : m->out_edges()) {
if (!e->IsControlEdge()) delete_nodes.push_back(e->dst()->id());
}
if (!node_filter_ || node_filter_(m)) {
VLOG(2) << "Removing obsolete merge node " << m->name();
deleted[m->id()] = true;
graph_->RemoveNode(m);
}
}
for (Node* n : graph_->nodes()) {
if (state_map_.IsDead(state_map_.LookupCondId(n))) {
delete_nodes.push_back(n->id());
}
}
while (!delete_nodes.empty()) {
int d_id = delete_nodes.front();
delete_nodes.pop_front();
if (deleted[d_id]) continue;
Node* d = graph_->FindNodeId(d_id);
if (d == nullptr) continue;
for (const Edge* e : d->out_edges()) {
delete_nodes.push_back(e->dst()->id());
}
VLOG(2) << "Removing obsolete node " << d->name();
deleted[d_id] = true;
graph_->RemoveNode(d);
}
}
void FunctionalizeCond::SortMergeNodes(std::vector<Node*>* merge_order) {
using sort_pair = std::pair<int, Node*>;
std::vector<sort_pair> inner_to_outer_merge_order;
inner_to_outer_merge_order.reserve(merge_order->size());
for (auto it = merge_order->rbegin(); it != merge_order->rend(); ++it) {
Node* merge = *it;
StateMap::CondId id = state_map_.LookupCondId(merge);
int depth = id != nullptr ? id->size() : 0;
inner_to_outer_merge_order.emplace_back(depth, merge);
}
std::stable_sort(
inner_to_outer_merge_order.begin(), inner_to_outer_merge_order.end(),
[](sort_pair lhs, sort_pair rhs) { return lhs.first > rhs.first; });
merge_order->clear();
for (sort_pair t : inner_to_outer_merge_order) {
merge_order->push_back(t.second);
}
}
Status FunctionalizeCond::FunctionalizeInternal() {
std::vector<Node*> rev_topo_order;
std::vector<Node*> merge_order;
DFS(*graph_, nullptr, [&](Node* n) {
if (!node_filter_ || node_filter_(n)) {
if (IsSwitch(n)) {
AddSwitchId(n->id());
}
if (IsMerge(n)) {
merge_order.push_back(n);
}
}
if (n->IsOp()) {
rev_topo_order.push_back(n);
}
});
if (merge_order.empty()) {
DeleteReachableAndDeadNodes(merge_order);
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(DetermineStates(std::move(rev_topo_order)));
if (VLOG_IS_ON(4)) DumpGraphWithCondState("id");
ShapeRefiner shape_refiner{graph_->versions().producer(),
graph_->op_registry()};
std::vector<Node*> nodes;
GetReversePostOrder(*graph_, &nodes, NodeComparatorID());
for (auto node : nodes) {
if (!shape_refiner.AddNode(node).ok()) {
LOG(WARNING) << "Couldn't deduce shape for " << node->name();
}
}
SortMergeNodes(&merge_order);
std::deque<std::vector<Node*>> merge_clusters;
std::map<ClusterTuple, int, ClusterTupleLessThan> merge_cluster_index;
for (Node* merge : merge_order) {
auto cond_id = state_map_.LookupCondId(merge);
if (state_map_.IsDead(cond_id)) continue;
auto predicate = merge_to_predicate_.find(merge);
if (predicate == merge_to_predicate_.end()) {
return errors::Internal("Cannot find predicate for Merge node ",
merge->name());
}
ClusterTuple key = std::make_tuple(
cond_id, state_map_.LookupAncestorId(merge), predicate->second);
auto idx = merge_cluster_index.find(key);
if (idx == merge_cluster_index.end()) {
merge_cluster_index[key] = merge_clusters.size();
merge_clusters.push_back({merge});
} else {
merge_clusters[idx->second].emplace_back(merge);
}
}
for (const auto& cluster : merge_clusters) {
Conditional cond(merge_to_predicate_.at(cluster.front()), this, &state_map_,
shape_refiner);
for (Node* merge : cluster) TF_RETURN_IF_ERROR(cond.AddMerge(merge));
TF_RETURN_IF_ERROR(
cond.BuildAndReplace(graph_, library_, &merge_to_replacement_));
if (VLOG_IS_ON(4)) DumpGraphWithCondState("after_extract");
}
DeleteReachableAndDeadNodes(merge_order);
return absl::OkStatus();
}
void FunctionalizeCond::DumpGraphWithCondState(const string& name) {
const char* const kCondGroupDebugAttr = "_XlaFunctionalizeCondGroup";
for (Node* n : graph_->nodes()) {
n->ClearAttr(kCondGroupDebugAttr);
n->AddAttr(kCondGroupDebugAttr,
absl::StrCat(state_map_.CondStateToString(n), "_",
state_map_.AncestorStateToString(n)));
}
LOG(INFO) << "FunctionalizeControlFlow (" << name << "): "
<< DumpGraphToFile(absl::StrCat("functionalize_cond_", name),
*graph_, library_);
}
void FunctionalizeCond::AddSwitchId(int switch_id) {
switch_ids_.push_back(switch_id);
}
Status FunctionalizeCond::Functionalize(Graph* graph,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter) {
VLOG(1) << "FunctionalizeCond::Functionalize";
FunctionalizeCond fc(graph, library, node_filter);
return fc.FunctionalizeInternal();
}
}
Status FunctionalizeCond(Graph* graph, FunctionLibraryDefinition* library,
const NodeFilter& node_filter) {
return functionalize_cond::FunctionalizeCond::Functionalize(graph, library,
node_filter);
}
} | #include "tensorflow/compiler/tf2xla/functionalize_cond.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace functionalize_cond {
class FunctionalizeCondTest : public ::testing::Test {
protected:
FunctionalizeCondTest() {
graph_.reset(new Graph(OpRegistry::Global()));
flib_def_.reset(
new FunctionLibraryDefinition(OpRegistry::Global(), fdef_lib_));
fc_.reset(new functionalize_cond::FunctionalizeCond(
graph_.get(), flib_def_.get(), NodeFilter{}));
}
StateMap::CondId GetUniqueId(const StateMap::StateMap::CondState& state) {
return fc_->state_map_.GetCondId(state);
}
string GetString(const StateMap::StateMap::CondId id) {
return fc_->state_map_.CondStateToString(id);
}
absl::StatusOr<StateMap::CondId> JoinCondStatesNonMerge(
StateMap::CondId src, StateMap::CondId dst) {
return fc_->JoinCondStatesNonMerge(src, dst);
}
absl::StatusOr<StateMap::CondId> JoinCondStatesMerge(Node* n,
StateMap::CondId src,
StateMap::CondId dst) {
return fc_->JoinCondStatesMerge(n, src, dst);
}
FunctionDefLibrary fdef_lib_;
std::unique_ptr<functionalize_cond::FunctionalizeCond> fc_;
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
std::unique_ptr<Graph> graph_;
};
namespace {
TEST_F(FunctionalizeCondTest, JoinCondStates) {
Tensor pred_tensor(DT_BOOL, TensorShape());
pred_tensor.flat<bool>().setZero();
Node* pred = test::graph::Constant(graph_.get(), pred_tensor, "pred");
Tensor val_tensor(DT_INT32, TensorShape());
val_tensor.flat<int>().setZero();
Node* val = test::graph::Constant(graph_.get(), val_tensor, "val");
Node* m = test::graph::Merge(graph_.get(), val, val);
StateMap::CondId then_branch;
{
StateMap::CondState ss;
ss.insert(std::make_pair(OutputTensor(pred, 0), BranchType::kThenBranch));
then_branch = GetUniqueId(ss);
}
StateMap::CondId else_branch;
{
StateMap::CondState ss;
ss.insert(std::make_pair(OutputTensor(pred, 0), BranchType::kElseBranch));
else_branch = GetUniqueId(ss);
}
Status status = JoinCondStatesNonMerge(then_branch, else_branch).status();
EXPECT_TRUE(errors::IsInvalidArgument(status));
auto joined_or = JoinCondStatesMerge(m, then_branch, else_branch);
TF_EXPECT_OK(joined_or.status());
StateMap::CondId joined = joined_or.value();
auto t = JoinCondStatesNonMerge(then_branch, joined);
TF_EXPECT_OK(t.status());
}
TEST_F(FunctionalizeCondTest, JoinCondStatesMergeWithInputNotInCondContext) {
Tensor val_tensor(DT_INT32, TensorShape());
val_tensor.flat<int>().setZero();
Node* val = test::graph::Constant(graph_.get(), val_tensor, "val");
Node* m = test::graph::Merge(graph_.get(), val, val);
StateMap::CondState cond_state;
auto joined_or = JoinCondStatesMerge(m, nullptr, &cond_state);
EXPECT_FALSE(joined_or.ok());
}
TEST(FunctionalizeCond, DuplicateConstNodes) {
Scope root = Scope::NewRootScope().ExitOnError();
auto const_op = ops::Const(root.WithOpName("const"), 1);
auto arg_0_op = ops::_Arg(root.WithOpName("arg_0"), DT_BOOL, 0);
auto arg_1_op = ops::_Arg(root.WithOpName("arg_1"), DT_INT32, 1);
auto switch_op = ops::Switch(root.WithOpName("switch"), arg_1_op, arg_0_op);
auto identity_n_false_op =
ops::IdentityN(root.WithOpName("identity_n_0"),
{switch_op.output_false, const_op, const_op});
auto identity_n_true_op =
ops::IdentityN(root.WithOpName("identity_n_1"),
{switch_op.output_true, const_op, const_op});
auto merge_op = ops::Merge(
root.WithOpName("merge"),
{identity_n_false_op.output.front(), identity_n_true_op.output.front()});
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
Graph graph(OpRegistry::Global());
GraphConstructorOptions options;
TF_EXPECT_OK(ConvertGraphDefToGraph(options, graph_def, &graph));
FunctionDefLibrary fdef_lib;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
auto status = tensorflow::FunctionalizeCond(&graph, &flib_def);
TF_ASSERT_OK(status);
FunctionDefLibrary flib_def_proto = flib_def.ToProto();
for (const auto& fdef : flib_def_proto.function()) {
absl::flat_hash_set<absl::string_view> node_names;
for (const auto& node : fdef.node_def()) {
EXPECT_TRUE(node_names.insert(node.name()).second)
<< node.op() << " with duplicate node name '" << node.name()
<< "' found.";
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/functionalize_cond.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/functionalize_cond_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ec664ca7-65e1-4a7d-acf8-ac959dafd8ea | cpp | tensorflow/tensorflow | sharding_util | tensorflow/compiler/tf2xla/sharding_util.cc | tensorflow/compiler/tf2xla/sharding_util_test.cc | #include "tensorflow/compiler/tf2xla/sharding_util.h"
#include "absl/strings/match.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
const char kDeviceSuffixReplicatedCore[] = "REPLICATED_CORE";
const char kShardingAttribute[] = "_XlaSharding";
const char kShardingOpAttribute[] = "sharding";
}
namespace {
xla::OpMetadata CreateOpMetadata(const std::string& op_type,
const std::string& op_name) {
xla::OpMetadata metadata;
metadata.set_op_type(op_type);
metadata.set_op_name(op_name);
return metadata;
}
void AssignOpMetadataToSharding(xla::OpSharding& sharding,
const string& op_type, const string& op_name) {
auto metadata = CreateOpMetadata(op_type, op_name);
if (sharding.type() == xla::OpSharding::TUPLE) {
for (auto& sharding_element : *sharding.mutable_tuple_shardings()) {
*sharding_element.add_metadata() = metadata;
}
} else {
*sharding.add_metadata() = metadata;
}
}
Status CoreOutOfRangeError(int core, int num_cores_per_replica) {
return errors::InvalidArgument(
"Invalid replicated core id: ", core,
"; num_cores_per_replica=", num_cores_per_replica);
}
}
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice(
const string& device_name, int num_cores_per_replica,
std::optional<xla::OpSharding> explicit_sharding,
std::optional<xla::OpMetadata> metadata) {
if (device_name.empty()) {
return explicit_sharding;
}
DeviceNameUtils::ParsedName parsed_device;
if (!DeviceNameUtils::ParseFullName(device_name, &parsed_device)) {
return errors::InvalidArgument("Malformed assigned device '", device_name,
"'");
}
if (explicit_sharding.has_value()) {
return explicit_sharding;
} else if (!parsed_device.has_type || !parsed_device.has_id ||
!absl::StrContains(parsed_device.type,
kDeviceSuffixReplicatedCore)) {
return std::optional<xla::OpSharding>();
} else {
const int core = parsed_device.id;
if (core < 0 || core >= num_cores_per_replica) {
return CoreOutOfRangeError(core, num_cores_per_replica);
}
auto sharding = xla::sharding_builder::AssignDevice(core);
if (metadata.has_value()) {
*sharding.add_metadata() = metadata.value();
}
return std::optional<xla::OpSharding>(sharding);
}
}
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice(
const NodeDef& node_def, int num_cores_per_replica, bool add_metadata) {
const string& device_name = node_def.device();
TF_ASSIGN_OR_RETURN(std::optional<xla::OpSharding> sharding,
GetShardingFromNodeDef(node_def, add_metadata));
return ParseShardingFromDevice(
device_name, num_cores_per_replica, sharding,
add_metadata ? std::optional<xla::OpMetadata>(
CreateOpMetadata(node_def.op(), node_def.name()))
: std::nullopt);
}
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice(
const Node& node, int num_cores_per_replica, bool add_metadata) {
string device_name = node.assigned_device_name();
if (device_name.empty()) {
device_name = node.requested_device();
}
TF_ASSIGN_OR_RETURN(std::optional<xla::OpSharding> sharding,
GetShardingFromNodeDef(node.def(), add_metadata));
return ParseShardingFromDevice(
device_name, num_cores_per_replica, sharding,
add_metadata ? std::optional<xla::OpMetadata>(
CreateOpMetadata(node.type_string(), node.name()))
: std::nullopt);
}
absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromEdgeSource(
const Edge& edge, int num_cores_per_replica, bool add_metadata) {
if (edge.src() == nullptr) {
return tensorflow::errors::InvalidArgument(
"Null src for ParseShardingFromEdgeSource edge=", edge.DebugString());
}
TF_ASSIGN_OR_RETURN(std::optional<xla::OpSharding> sharding,
ParseShardingFromDevice(
*edge.src(), num_cores_per_replica, add_metadata));
if (sharding.has_value() &&
sharding.value().type() == xla::OpSharding::TUPLE) {
if (edge.src_output() < 0 ||
edge.src_output() >= sharding.value().tuple_shardings_size()) {
return tensorflow::errors::InvalidArgument(
"Tuple index out of bound: edge=", edge.DebugString(),
" sharding=", sharding->DebugString());
}
std::optional<xla::OpSharding> subsharding =
sharding.value().tuple_shardings(edge.src_output());
return subsharding;
}
return sharding;
}
void SetShardingDeviceAssignmentFromNode(const Node& src, Node* dst) {
string device_name = src.assigned_device_name();
if (device_name.empty()) {
device_name = src.requested_device();
}
dst->set_assigned_device_name(device_name);
if (const AttrValue* attr = src.attrs().Find(kShardingAttribute)) {
dst->AddAttr(kShardingAttribute, *attr);
}
}
namespace {
absl::StatusOr<std::optional<xla::OpSharding>> GetShardingFromNodeDefInternal(
const NodeDef& node_def, bool add_metadata, const char* attribute) {
if (!HasNodeAttr(node_def, attribute)) {
return std::optional<xla::OpSharding>();
}
string value;
xla::OpSharding sharding;
TF_RETURN_IF_ERROR(GetNodeAttr(node_def, attribute, &value));
if (tensorflow::DecodeShardingAttribute(value, sharding).failed()) {
return xla::InvalidArgument(
"Experimental %s attribute was not a valid encoded xla::OpSharding "
"proto.",
attribute);
}
if (add_metadata) {
AssignOpMetadataToSharding(sharding, node_def.op(), node_def.name());
}
return std::optional<xla::OpSharding>(sharding);
}
}
absl::StatusOr<std::optional<xla::OpSharding>> GetShardingFromNodeDef(
const NodeDef& node_def, bool add_metadata) {
if (node_def.op() == "XlaSharding") {
TF_ASSIGN_OR_RETURN(auto sharding,
GetShardingFromNodeDefInternal(node_def, add_metadata,
kShardingOpAttribute));
if (sharding.has_value()) {
return sharding;
}
}
return GetShardingFromNodeDefInternal(node_def, add_metadata,
kShardingAttribute);
}
} | #include "tensorflow/compiler/tf2xla/sharding_util.h"
#include <functional>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(CoreUtilTest, ParseShardingFromDevice) {
Graph graph(OpRegistry::Global());
auto core_from_sharding =
[](std::optional<xla::OpSharding> sharding) -> int64 {
if (sharding.has_value() &&
sharding.value().type() == xla::OpSharding::MAXIMAL) {
return sharding.value().tile_assignment_devices(0);
} else {
return -1;
}
};
auto parse_status = ParseShardingFromDevice("", 1);
TF_EXPECT_OK(parse_status.status());
EXPECT_EQ(-1, core_from_sharding(parse_status.value()));
parse_status = ParseShardingFromDevice("", 100);
TF_EXPECT_OK(parse_status.status());
EXPECT_EQ(-1, core_from_sharding(parse_status.value()));
parse_status = ParseShardingFromDevice("/device:A_REPLICATED_CORE:-1", 100);
EXPECT_FALSE(parse_status.ok());
parse_status = ParseShardingFromDevice("/device:A_REPLICATED_CORE:55", 100);
TF_EXPECT_OK(parse_status.status());
EXPECT_EQ(55, core_from_sharding(parse_status.value()));
parse_status = ParseShardingFromDevice("/device:A_REPLICATED_CORE:100", 100);
EXPECT_FALSE(parse_status.ok());
parse_status = ParseShardingFromDevice("/cpu:0", 100);
TF_EXPECT_OK(parse_status.status());
EXPECT_EQ(-1, core_from_sharding(parse_status.value()));
}
class ShardingWithMetadataTest
: public ::testing::TestWithParam<xla::OpSharding> {};
TEST_P(ShardingWithMetadataTest, GetShardingFromNode) {
NodeDef node_def;
{
node_def.set_op("_Arg");
node_def.set_name("arg");
AttrValue xla_sharding;
xla_sharding.set_s("");
AttrValue index;
index.set_i(0);
AttrValue type;
type.set_type(DataType::DT_FLOAT);
node_def.mutable_attr()->insert(
{{"_XlaSharding", xla_sharding}, {"index", index}, {"T", type}});
}
auto check_metadata = [](const xla::OpSharding& sharding) {
ASSERT_EQ(sharding.metadata_size(), 1);
const auto& metadata = sharding.metadata(0);
EXPECT_EQ(metadata.op_type(), "_Arg");
EXPECT_EQ(metadata.op_name(), "arg");
};
auto test_sharding_metadata =
[&check_metadata](
const std::function<absl::StatusOr<std::optional<xla::OpSharding>>()>&
fn) {
auto status_or_sharding = fn();
TF_ASSERT_OK(status_or_sharding.status());
ASSERT_TRUE(status_or_sharding.value().has_value());
auto& sharding = status_or_sharding.value();
ASSERT_TRUE(sharding.has_value());
if (sharding->type() == xla::OpSharding::TUPLE) {
EXPECT_TRUE(sharding->metadata().empty());
for (const auto& sharding_element : sharding->tuple_shardings()) {
check_metadata(sharding_element);
}
} else {
check_metadata(sharding.value());
}
};
{
test_sharding_metadata([&node_def]() {
return GetShardingFromNodeDef(node_def, true);
});
}
{
test_sharding_metadata([&node_def]() {
return ParseShardingFromDevice(node_def, 1,
true);
});
}
{
Graph graph(OpRegistry::Global());
Status status;
Node* node = graph.AddNode(node_def, &status);
TF_ASSERT_OK(status);
test_sharding_metadata([node]() {
return ParseShardingFromDevice(*node, 1,
true);
});
}
}
xla::OpSharding CreateTupleSharding() {
xla::OpSharding sharding;
sharding.set_type(xla::OpSharding::TUPLE);
sharding.add_tuple_shardings()->set_type(xla::OpSharding::REPLICATED);
sharding.add_tuple_shardings()->set_type(xla::OpSharding::REPLICATED);
return sharding;
}
INSTANTIATE_TEST_SUITE_P(GetShardingFromNode, ShardingWithMetadataTest,
::testing::Values(xla::sharding_builder::Replicate(),
CreateTupleSharding()));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/sharding_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/sharding_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f3f4d455-cc3d-487e-b16e-d692bc492cec | cpp | tensorflow/tensorflow | xla_jit_compiled_cpu_function | tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.cc | tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function_test.cc | #include "tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/tf2xla.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "xla/client/client_library.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/local_client.h"
#include "xla/cpu_function_runtime.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/service/cpu/buffer_info_util.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr char kHostPlatform[] = "Host";
absl::StatusOr<size_t> ComputeResultIndex(
const xla::BufferAssignment& buffer_assignment) {
TF_ASSIGN_OR_RETURN(const xla::BufferAllocation::Slice result_slice,
buffer_assignment.GetUniqueTopLevelOutputSlice());
return result_slice.index();
}
int CountResults(
absl::Span<const xla::cpu_function_runtime::BufferInfo> buffer_infos) {
int num_results = 0;
for (const auto& info : buffer_infos) {
if (info.is_result_parameter()) {
++num_results;
}
}
return num_results;
}
template <typename T>
void CollectNames(const T& entries, std::vector<string>* nonempty_names,
std::vector<const char*>* name_ptrs) {
for (const auto& entry : entries) {
const string& name = entry.name();
if (!name.empty()) {
nonempty_names->push_back(name);
}
}
name_ptrs->reserve(entries.size() + 1);
size_t nonempty_index = 0;
for (const auto& entry : entries) {
const string& name = entry.name();
if (!name.empty()) {
name_ptrs->push_back(nonempty_names->at(nonempty_index).c_str());
++nonempty_index;
} else {
name_ptrs->push_back("");
}
}
name_ptrs->push_back(nullptr);
}
}
absl::StatusOr<std::unique_ptr<XlaJitCompiledCpuFunction>>
XlaJitCompiledCpuFunction::Compile(
const GraphDef& graph_def, const tf2xla::Config& config,
const xla::ExecutableBuildOptions& build_options) {
TF_ASSIGN_OR_RETURN(se::Platform * platform,
xla::PlatformUtil::GetPlatform(kHostPlatform));
TF_ASSIGN_OR_RETURN(xla::LocalClient * client,
xla::ClientLibrary::GetOrCreateLocalClient(platform));
xla::XlaComputation computation;
TF_RETURN_IF_ERROR(tensorflow::ConvertGraphDefToXla(graph_def, config, client,
&computation));
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::ProgramShape> program_shape,
client->GetComputationShape(computation));
if (program_shape->result().element_type() != xla::TUPLE) {
return errors::Internal(
"XlaJitCompiledCpuFunction requires the XLA result to be a tuple");
}
program_shape->clear_parameter_names();
std::vector<const xla::Shape*> arg_shapes;
arg_shapes.reserve(program_shape->parameters_size());
for (int i = 0; i < program_shape->parameters_size(); ++i) {
arg_shapes.push_back(&program_shape->parameters(i));
}
xla::ExecutableBuildOptions build_options_copy = build_options;
build_options_copy.mutable_debug_options()->set_xla_cpu_use_thunk_runtime(
false);
TF_ASSIGN_OR_RETURN(auto executables, client->Compile(computation, arg_shapes,
build_options_copy));
TF_RET_CHECK(executables.size() == 1);
std::unique_ptr<xla::LocalExecutable> executable = std::move(executables[0]);
const xla::cpu::CpuExecutable* cpu_executable =
static_cast<xla::cpu::CpuExecutable*>(executable->executable());
XlaCompiledCpuFunction::RawFunction raw_function =
cpu_executable->compute_function();
const xla::BufferAssignment& buffer_assignment =
cpu_executable->buffer_assignment();
std::vector<xla::cpu_function_runtime::BufferInfo> buffer_infos =
xla::cpu::CreateBufferInfosFromBufferAssignment(cpu_executable->module(),
buffer_assignment);
std::vector<int32> arg_index_table =
xla::cpu::CreateArgIndexTableFromBufferInfos(buffer_infos);
TF_ASSIGN_OR_RETURN(size_t result_index,
ComputeResultIndex(buffer_assignment));
const int num_results = CountResults(buffer_infos);
std::unique_ptr<XlaJitCompiledCpuFunction> jit_unique_ptr(
new XlaJitCompiledCpuFunction);
XlaJitCompiledCpuFunction* jit = jit_unique_ptr.get();
jit->executable_ = std::move(executable);
jit->buffer_infos_ = std::move(buffer_infos);
jit->arg_index_table_ = std::move(arg_index_table);
jit->program_shape_ =
std::make_unique<xla::ProgramShapeProto>(program_shape->ToProto());
XlaCompiledCpuFunction::set_static_data_raw_function(&jit->static_data_,
raw_function);
XlaCompiledCpuFunction::set_static_data_buffer_infos(
&jit->static_data_, jit->buffer_infos_.data());
XlaCompiledCpuFunction::set_static_data_num_buffers(
&jit->static_data_, jit->buffer_infos_.size());
XlaCompiledCpuFunction::set_static_data_arg_index_table(
&jit->static_data_, jit->arg_index_table_.data());
XlaCompiledCpuFunction::set_static_data_num_args(
&jit->static_data_, jit->arg_index_table_.size());
XlaCompiledCpuFunction::set_static_data_num_variables(&jit->static_data_,
config.variable_size());
XlaCompiledCpuFunction::set_static_data_num_results(&jit->static_data_,
num_results);
XlaCompiledCpuFunction::set_static_data_result_index(&jit->static_data_,
result_index);
CollectNames(config.feed(), &jit->nonempty_arg_names_, &jit->arg_names_);
auto variable_copy = config.variable();
for (auto& var : variable_copy) {
if (var.name().empty()) {
var.set_name(var.node_name());
}
}
CollectNames(variable_copy, &jit->nonempty_variable_names_,
&jit->variable_names_);
CollectNames(config.fetch(), &jit->nonempty_result_names_,
&jit->result_names_);
XlaCompiledCpuFunction::set_static_data_arg_names(&jit->static_data_,
jit->arg_names_.data());
XlaCompiledCpuFunction::set_static_data_variable_names(
&jit->static_data_, jit->variable_names_.data());
XlaCompiledCpuFunction::set_static_data_result_names(
&jit->static_data_, jit->result_names_.data());
XlaCompiledCpuFunction::set_static_data_program_shape(
&jit->static_data_, jit->program_shape_.get());
if (cpu_executable->hlo_profiling_enabled()) {
XlaCompiledCpuFunction::set_static_data_hlo_profile_printer_data(
&jit->static_data_, &cpu_executable->hlo_profile_printer_data());
XlaCompiledCpuFunction::set_static_data_profile_counters_size(
&jit->static_data_,
cpu_executable->hlo_profile_printer_data().profile_counters_size());
}
return std::move(jit_unique_ptr);
}
} | #include "tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.h"
#include <memory>
#include <string>
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/local_client.h"
#include "xla/service/compiler.h"
#include "xla/service/platform_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::testing::HasSubstr;
PLATFORM_DEFINE_ID(kFakePlatformId);
AttrValue TypeAttrValue(DataType type) {
AttrValue attr_value;
SetAttrValue(type, &attr_value);
return attr_value;
}
GraphDef SumGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* sum = graph_def.add_node();
sum->set_name("sum");
sum->set_op("Add");
sum->add_input("x");
sum->add_input("y");
(*sum->mutable_attr())["T"] = TypeAttrValue(DT_INT32);
return graph_def;
}
tf2xla::Config SumConfig() {
tf2xla::Config config;
tf2xla::Feed* x = config.add_feed();
x->mutable_id()->set_node_name("x");
x->set_name("x_name");
tf2xla::Feed* y = config.add_feed();
y->mutable_id()->set_node_name("y");
y->set_name("y_name");
tf2xla::Fetch* sum = config.add_fetch();
sum->mutable_id()->set_node_name("sum");
sum->set_name("sum_name");
return config;
}
GraphDef SumGraphVariable() {
constexpr char text_proto[] = R"pb(
node {
name: "x"
op: "VarHandleOp"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "shared_name"
value { s: "myvar" }
}
attr {
key: "shape"
value { shape { dim { size: 1 } } }
}
}
node {
name: "read"
op: "ReadVariableOp"
input: "x"
attr {
key: "dtype"
value { type: DT_INT32 }
}
}
node {
name: "y"
op: "Placeholder"
attr {
key: "dtype"
value { type: DT_INT32 }
}
}
node {
name: "sum"
op: "Add"
input: "read"
input: "y"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node {
name: "assign"
op: "AssignVariableOp"
input: "x"
input: "sum"
attr {
key: "dtype"
value { type: DT_INT32 }
}
}
# We use this identity op to make sure assign doesn't get pruned away.
node {
name: "out"
op: "Identity"
input: "y"
input: "^assign"
attr {
key: "T"
value { type: DT_INT32 }
}
})pb";
GraphDef graph;
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &graph));
return graph;
}
tf2xla::Config SumConfigVariable() {
constexpr char text_proto[] = R"pb(feed { id { node_name: "y" } }
variable {
node_name: "myvar"
shape { dim { size: 1 } }
type: DT_INT32
}
fetch { id { node_name: "out" } })pb";
tf2xla::Config config;
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &config));
return config;
}
TEST(XlaJitCompiledCpuFunction, CheckThunkDisabled) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
ASSERT_TRUE(jit->LocalExecutable().build_options().has_debug_options());
ASSERT_FALSE(jit->LocalExecutable()
.build_options()
.debug_options()
.xla_cpu_use_thunk_runtime());
}
TEST(XlaJitCompiledCpuFunction, Sum) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
XlaCompiledCpuFunction function(jit->StaticData());
ASSERT_EQ(function.num_args(), 2);
ASSERT_EQ(function.num_results(), 1);
*static_cast<int32*>(function.arg_data(0)) = 10;
*static_cast<int32*>(function.arg_data(1)) = 32;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 42);
*static_cast<int32*>(function.arg_data(0)) = 100;
*static_cast<int32*>(function.arg_data(1)) = 320;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 420);
EXPECT_TRUE(function.HasNameIndices());
EXPECT_EQ(function.LookupArgIndex("x_name"), 0);
EXPECT_EQ(function.LookupArgIndex("y_name"), 1);
EXPECT_EQ(function.LookupArgIndex(""), -1);
EXPECT_EQ(function.LookupArgIndex("x"), -1);
EXPECT_EQ(function.LookupArgIndex("y"), -1);
EXPECT_EQ(function.LookupArgIndex("sum"), -1);
EXPECT_EQ(function.LookupArgIndex("sum_name"), -1);
EXPECT_EQ(function.LookupResultIndex("sum_name"), 0);
EXPECT_EQ(function.LookupResultIndex(""), -1);
EXPECT_EQ(function.LookupResultIndex("x"), -1);
EXPECT_EQ(function.LookupResultIndex("y"), -1);
EXPECT_EQ(function.LookupResultIndex("sum"), -1);
EXPECT_EQ(function.LookupResultIndex("x_name"), -1);
EXPECT_EQ(function.LookupResultIndex("y_name"), -1);
EXPECT_EQ(0, function.num_variables());
EXPECT_EQ(function.LookupVariableIndex("x"), -1);
for (int i = 0; i < function.num_args(); ++i) {
const char* name = function.GetArgName(i);
ASSERT_NE(name, nullptr);
const int roundtrip_i = function.LookupArgIndex(name);
EXPECT_EQ(roundtrip_i, i) << " name= " << name;
}
for (int i = 0; i < function.num_results(); ++i) {
const char* name = function.GetResultName(i);
ASSERT_NE(name, nullptr);
const int roundtrip_i = function.LookupResultIndex(name);
EXPECT_EQ(roundtrip_i, i) << " name= " << name;
}
EXPECT_EQ(function.GetArgName(-1), nullptr);
EXPECT_EQ(function.GetArgName(function.num_args()), nullptr);
EXPECT_EQ(function.GetResultName(-1), nullptr);
EXPECT_EQ(function.GetResultName(function.num_results()), nullptr);
EXPECT_EQ(function.GetVariableName(0), nullptr);
using xla::ShapeUtil;
const xla::Shape s32 = ShapeUtil::MakeShape(xla::S32, {});
ASSERT_TRUE(function.ProgramShape() != nullptr);
const xla::ProgramShape program_shape(*function.ProgramShape());
ASSERT_EQ(program_shape.parameters_size(), 2);
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(0), s32));
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(1), s32));
const xla::Shape& result = program_shape.result();
ASSERT_EQ(result.element_type(), xla::TUPLE);
ASSERT_EQ(ShapeUtil::TupleElementCount(result), 1);
const xla::Shape& result0 = ShapeUtil::GetTupleElementShape(result, 0);
EXPECT_TRUE(ShapeUtil::Compatible(result0, s32));
}
TEST(XlaJitCompiledCpuFunction, SumVariable) {
GraphDef graph_def = SumGraphVariable();
tf2xla::Config config = SumConfigVariable();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
XlaCompiledCpuFunction function(jit->StaticData());
ASSERT_EQ(function.num_args(), 2);
ASSERT_EQ(function.num_results(), 2);
*static_cast<int32*>(function.arg_data(0)) = 10;
*static_cast<int32*>(function.arg_data(1)) = 32;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 10);
EXPECT_EQ(*static_cast<int32*>(function.result_data(1)), 42);
*static_cast<int32*>(function.arg_data(0)) = 100;
*static_cast<int32*>(function.arg_data(1)) = 320;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 100);
EXPECT_EQ(*static_cast<int32*>(function.result_data(1)), 420);
EXPECT_TRUE(function.HasNameIndices());
EXPECT_EQ(2, function.num_args());
EXPECT_EQ(1, function.num_variables());
EXPECT_EQ(function.LookupVariableIndex("myvar"), 1);
const char* name = function.GetVariableName(0);
EXPECT_EQ(std::string(name), "myvar");
EXPECT_EQ(function.GetVariableName(1), nullptr);
EXPECT_EQ(function.GetVariableName(-1), nullptr);
using xla::ShapeUtil;
const xla::Shape s32 = ShapeUtil::MakeShape(xla::S32, {});
const xla::Shape s32_1 = ShapeUtil::MakeShape(xla::S32, {1});
ASSERT_TRUE(function.ProgramShape() != nullptr);
const xla::ProgramShape program_shape(*function.ProgramShape());
ASSERT_EQ(program_shape.parameters_size(), 2);
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(0), s32));
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(1), s32_1));
const xla::Shape& result = program_shape.result();
ASSERT_EQ(result.element_type(), xla::TUPLE);
ASSERT_EQ(ShapeUtil::TupleElementCount(result), 2);
const xla::Shape& result0 = ShapeUtil::GetTupleElementShape(result, 0);
EXPECT_TRUE(ShapeUtil::Compatible(result0, s32));
}
TEST(XlaJitCompiledCpuFunction, CanCompileWithAdditionalPlatform) {
class FakePlatform : public se::Platform {
public:
FakePlatform() : name_("FakePlatform") {}
~FakePlatform() override {}
se::Platform::Id id() const override { return kFakePlatformId; }
int VisibleDeviceCount() const override { return 0; }
const string& Name() const override { return name_; }
absl::StatusOr<std::unique_ptr<se::DeviceDescription>> DescriptionForDevice(
int ordinal) const override {
return std::unique_ptr<se::DeviceDescription>(nullptr);
}
absl::StatusOr<se::StreamExecutor*> ExecutorForDevice(
int ordinal) override {
return nullptr;
}
private:
string name_;
};
TF_EXPECT_OK(
se::PlatformManager::RegisterPlatform(std::make_unique<FakePlatform>()));
xla::Compiler::RegisterCompilerFactory(kFakePlatformId, []() {
return std::unique_ptr<xla::Compiler>(nullptr);
});
EXPECT_THAT(xla::PlatformUtil::GetDefaultPlatform().status().message(),
HasSubstr("FakePlatform"));
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef04366f-e460-4ab7-b5bb-64cc8e582f89 | cpp | tensorflow/tensorflow | resource_util | tensorflow/compiler/tf2xla/resource_util.cc | tensorflow/compiler/tf2xla/resource_util_test.cc | #include "tensorflow/compiler/tf2xla/resource_util.h"
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "xla/status_macros.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using tsl::StatusOr;
const char kIdentityNOp[] = "IdentityN";
const char kIfOp[] = "If";
const char kWhileOp[] = "While";
const char kArgOp[] = "_Arg";
const char kRetvalOp[] = "_Retval";
const int kMaxCallDepth = 100;
Status AnalyzeResourceUsage(
const Graph* graph, const std::optional<std::string>& function_name,
const int call_depth, const absl::flat_hash_set<int>& resource_arg_indices,
FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
source_to_path);
bool IsControlFlowV1Node(const Node* n) {
return (n->IsEnter() || n->IsExit() || n->IsSwitch() || n->IsMerge() ||
n->IsNextIteration());
}
absl::StatusOr<absl::InlinedVector<const Edge*, 1>> OutputEdgesByIndex(
const Node& n, int idx) {
absl::InlinedVector<const Edge*, 1> res;
if (idx >= n.num_outputs()) {
return errors::InvalidArgument("Invalid out_edge index: ", idx, ", Node ",
n.name(), " only has ", n.num_outputs(),
" outputs.");
}
for (const Edge* o : n.out_edges()) {
if (o->src_output() == idx) res.emplace_back(o);
}
return res;
}
bool IsStackOrTensorArraySource(const Node& n) {
const XlaResourceOpInfo* op_info = GetResourceOpInfoForOp(n.type_string());
if (!op_info) return false;
if (op_info->resource_kind() != XlaResourceKind::kStack &&
op_info->resource_kind() != XlaResourceKind::kTensorArray)
return false;
return n.num_outputs() > 0 && n.output_type(0) == DataType::DT_RESOURCE;
}
void PropagateFromStackOrTensorArraySourceOp(
const Node& n, const std::optional<std::string>& function_name,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source) {
ResourceUsageAnalysis::NodeInfo src_node_info(function_name, n.name(),
n.type_string());
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
if (o->dst()->input_type(o->dst_input()) != DataType::DT_RESOURCE) {
continue;
}
(*user_to_source)[o] = src_node_info;
}
}
Status PropagateFromArgOp(
const Node& n, const std::optional<std::string>& function_name,
const absl::flat_hash_set<int>& resource_arg_indices,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source) {
TF_RET_CHECK(n.type_string() == kArgOp);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n.attrs(), "index", &index));
if (!resource_arg_indices.contains(index)) return absl::OkStatus();
TF_RET_CHECK(function_name.has_value())
<< "ResourceUsageAnalysis does not support analyzing _Arg nodes "
"carrying Stack/TensorArray resource in given graph unless they "
"are in function calls.";
const ResourceUsageAnalysis::NodeInfo src_node_info(function_name, n.name(),
n.type_string());
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
if (o->dst()->input_type(o->dst_input()) != DataType::DT_RESOURCE) {
continue;
}
(*user_to_source)[o] = src_node_info;
}
return absl::OkStatus();
}
Status UpdateResourceUsageFromFunctionBodyAnalysis(
const Node& call_node,
const std::optional<absl::string_view>& caller_function_name,
const FunctionBody& fbody,
const absl::flat_hash_map<
ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>&
called_function_source_to_path,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
caller_source_to_path) {
std::unordered_map<std::string, Node*> node_name_index =
fbody.graph->BuildNodeNameIndex();
for (const auto& it : called_function_source_to_path) {
ResourceUsageAnalysis::NodeInfo src_node_info = it.first;
if (src_node_info.op_ == kArgOp) {
const Node* arg_src = node_name_index[src_node_info.node_name_];
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(arg_src->attrs(), "index", &index));
const Edge* e;
TF_RETURN_IF_ERROR(call_node.input_edge(index, &e));
src_node_info = (*user_to_source)[e];
}
for (const auto& dst_node_info : it.second) {
if (dst_node_info.op_ == kRetvalOp) {
const Node* ret_user = node_name_index[dst_node_info.node_name_];
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(ret_user->attrs(), "index", &index));
absl::InlinedVector<const Edge*, 1> outs;
TF_ASSIGN_OR_RETURN(outs, OutputEdgesByIndex(call_node, index));
for (const Edge* o : outs) (*user_to_source)[o] = src_node_info;
} else {
(*caller_source_to_path)[src_node_info].emplace(dst_node_info);
}
}
}
return absl::OkStatus();
}
Status PropagateThroughCallOp(
const Node& n, const std::optional<std::string>& function_name,
const int call_depth, FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
source_to_path) {
if (call_depth > kMaxCallDepth) {
return errors::InvalidArgument(
"Function call stack in given graph is too deep, last function ",
"name is: ", function_name.value());
}
absl::flat_hash_set<int> resource_arg_indices;
for (const Edge* e : n.in_edges()) {
if (user_to_source->contains(e)) {
resource_arg_indices.emplace(e->dst_input());
}
}
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(InstantiateFunctionCall(n.def(), lib_runtime, &handle));
auto release_handle_on_return = gtl::MakeCleanup(
[&] { TF_CHECK_OK(lib_runtime->ReleaseHandle(handle)); });
const FunctionBody* fbody = lib_runtime->GetFunctionBody(handle);
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>
called_function_source_to_path;
TF_RETURN_IF_ERROR(AnalyzeResourceUsage(
fbody->graph, n.type_string(), call_depth + 1, resource_arg_indices,
lib_runtime, &called_function_source_to_path));
TF_RETURN_IF_ERROR(UpdateResourceUsageFromFunctionBodyAnalysis(
n, function_name, *fbody, called_function_source_to_path, user_to_source,
source_to_path));
return absl::OkStatus();
}
Status PropagateThroughIdentityOp(
const Node& n,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source) {
TF_RET_CHECK(n.IsIdentity() || n.type_string() == kIdentityNOp);
if (n.IsIdentity()) {
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
const Edge* in;
TF_RETURN_IF_ERROR(n.input_edge(0, &in));
if (!user_to_source->contains(in)) continue;
user_to_source->emplace(std::make_pair(o, (*user_to_source)[in]));
}
} else {
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
const Edge* in;
TF_RETURN_IF_ERROR(n.input_edge(o->src_output(), &in));
if (!user_to_source->contains(in)) continue;
user_to_source->emplace(std::make_pair(o, (*user_to_source)[in]));
}
}
return absl::OkStatus();
}
Status AnalyzeResourceUsage(
const Graph* graph, const std::optional<std::string>& function_name,
const int call_depth, const absl::flat_hash_set<int>& resource_arg_indices,
FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
source_to_path) {
source_to_path->clear();
std::vector<Node*> reverse_post_order;
GetReversePostOrder(*graph, &reverse_post_order, NodeComparatorName{});
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>
user_to_source;
for (const Node* n : reverse_post_order) {
if (IsControlFlowV1Node(n)) {
return errors::InvalidArgument(
"AnalyzeResourceUsage does not support control flow v1 node: ",
n->DebugString());
}
if (n->type_string() == kIfOp || n->type_string() == kWhileOp) {
return errors::InvalidArgument(
"AnalyzeResourceUsage does not yet support control flow v2 "
"node: ",
n->DebugString());
}
if (IsStackOrTensorArraySource(*n)) {
PropagateFromStackOrTensorArraySourceOp(*n, function_name,
&user_to_source);
continue;
}
if (n->IsArg()) {
TF_RETURN_IF_ERROR(PropagateFromArgOp(
*n, function_name, resource_arg_indices, &user_to_source));
continue;
}
if (IsFunctionCall(*lib_runtime->GetFunctionLibraryDefinition(), *n)) {
TF_RETURN_IF_ERROR(PropagateThroughCallOp(*n, function_name, call_depth,
lib_runtime, &user_to_source,
source_to_path));
continue;
}
if (n->IsIdentity() || n->type_string() == kIdentityNOp) {
TF_RETURN_IF_ERROR(PropagateThroughIdentityOp(*n, &user_to_source));
}
}
for (const auto& it : user_to_source) {
(*source_to_path)[it.second].emplace(function_name, it.first->dst()->name(),
it.first->dst()->type_string());
}
return absl::OkStatus();
}
}
Status ResourceUsageAnalysis::Analyze(
const Graph* graph, FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<NodeInfo, absl::flat_hash_set<NodeInfo>>*
source_to_path) {
return AnalyzeResourceUsage(
graph, {}, 0,
absl::flat_hash_set<int>(), lib_runtime,
source_to_path);
}
} | #include "tensorflow/compiler/tf2xla/resource_util.h"
#include <memory>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
ResourceUsageAnalysis::NodeInfo node_info_from_string(absl::string_view s) {
std::vector<std::string> tokens = absl::StrSplit(s, ':');
EXPECT_EQ(tokens.size(), 3);
ResourceUsageAnalysis::NodeInfo node_info;
if (tokens[0].empty()) {
node_info.function_name_ = std::nullopt;
} else {
node_info.function_name_ = std::move(tokens[0]);
}
node_info.node_name_ = std::move(tokens[1]);
node_info.op_ = std::move(tokens[2]);
return node_info;
}
void AnalyzeAndVerify(
const GraphDef& graphdef, FunctionLibraryDefinition* flib_def,
const absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>>&
expected) {
auto graph = std::make_unique<Graph>(flib_def);
TF_EXPECT_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), graphdef, graph.get()));
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, Env::Default(), nullptr, TF_GRAPH_DEF_VERSION,
flib_def, OptimizerOptions());
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>
source_to_path;
TF_EXPECT_OK(ResourceUsageAnalysis::Analyze(graph.get(), lib_runtime,
&source_to_path));
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>
expected_source_to_path;
for (auto it : expected) {
auto src_node_info = node_info_from_string(it.first);
for (const std::string& user : it.second) {
expected_source_to_path[src_node_info].emplace(
node_info_from_string(user));
}
}
EXPECT_EQ(source_to_path, expected_source_to_path);
}
}
TEST(ResourceOpAnalyzerTest, SingleResourceSingleUserNoPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(stack_op);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] =
absl::flat_hash_set<std::string>({":stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, SingleResourceSingleUserWithPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder resource_identity_builder("resource_identity", "Identity",
op_reg);
resource_identity_builder.Input(stack_op);
Node* resource_identity = opts.FinalizeBuilder(&resource_identity_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(resource_identity);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":resource_identity:Identity", ":stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, SingleResourceMultipleUserNoPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(stack_op);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(stack_op);
opts.FinalizeBuilder(&stack_close1_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close0:StackCloseV2", ":stack_close1:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, SingleResourceMultipleUserWithPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder resource_identity_builder("resource_identity", "Identity",
op_reg);
resource_identity_builder.Input(stack_op);
Node* resource_identity = opts.FinalizeBuilder(&resource_identity_builder);
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(resource_identity);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(resource_identity);
opts.FinalizeBuilder(&stack_close1_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":resource_identity:Identity", ":stack_close0:StackCloseV2",
":stack_close1:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, MultipleResourceMultipleUserNoPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op0_builder("stack_op0", "StackV2", op_reg);
stack_op0_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op0 = opts.FinalizeBuilder(&stack_op0_builder);
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close1_builder);
NodeBuilder stack_op1_builder("stack_op1", "StackV2", op_reg);
stack_op1_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op1 = opts.FinalizeBuilder(&stack_op1_builder);
NodeBuilder stack_close2_builder("stack_close2", "StackCloseV2", op_reg);
stack_close2_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close2_builder);
NodeBuilder stack_close3_builder("stack_close3", "StackCloseV2", op_reg);
stack_close3_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close3_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op0:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close0:StackCloseV2", ":stack_close1:StackCloseV2"});
expected[":stack_op1:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close2:StackCloseV2", ":stack_close3:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, MultipleResourceMultipleUserWithPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op0_builder("stack_op0", "StackV2", op_reg);
stack_op0_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op0 = opts.FinalizeBuilder(&stack_op0_builder);
NodeBuilder stack_op1_builder("stack_op1", "StackV2", op_reg);
stack_op1_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op1 = opts.FinalizeBuilder(&stack_op1_builder);
NodeBuilder identity_n_builder("identity_n", "IdentityN", op_reg);
identity_n_builder.Input({stack_op0, stack_size_placeholder, stack_op1});
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close1_builder);
NodeBuilder stack_close2_builder("stack_close2", "StackCloseV2", op_reg);
stack_close2_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close2_builder);
NodeBuilder stack_close3_builder("stack_close3", "StackCloseV2", op_reg);
stack_close3_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close3_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op0:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close0:StackCloseV2", ":stack_close1:StackCloseV2"});
expected[":stack_op1:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close2:StackCloseV2", ":stack_close3:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, ResourcePassThroughFunction) {
auto library = std::make_unique<FunctionDefLibrary>();
*library->add_function() = FunctionDefHelper::Define(
"pass_through_function",
{"in: resource"},
{"out: resource"},
{},
{{{"out"}, "Identity", {"in"}, {{"T", DataType::DT_RESOURCE}}}});
FunctionLibraryDefinition flib_def(OpRegistry::Global(), *library);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder pass_through_fn_builder("pass_through_fn",
"pass_through_function", op_reg);
pass_through_fn_builder.Input(stack_op);
Node* pass_through_fn = opts.FinalizeBuilder(&pass_through_fn_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(pass_through_fn);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close:StackCloseV2", ":pass_through_fn:pass_through_function",
"pass_through_function:out:Identity"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, ResourceUserInFunction) {
auto library = std::make_unique<FunctionDefLibrary>();
*library->add_function() = FunctionDefHelper::Define(
"resource_user_function",
{"in: resource"},
{},
{},
{{{"stack_close"},
"StackCloseV2",
{"in"},
{{"T", DataType::DT_RESOURCE}}}});
FunctionLibraryDefinition flib_def(OpRegistry::Global(), *library);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder resource_user_fn_builder("resource_user_function",
"resource_user_function", op_reg);
resource_user_fn_builder.Input(stack_op);
opts.FinalizeBuilder(&resource_user_fn_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":resource_user_function:resource_user_function",
"resource_user_function:stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, ResourceSourceInFunction) {
auto library = std::make_unique<FunctionDefLibrary>();
*library->add_function() = FunctionDefHelper::Define(
"resource_source_function",
{"in: int32"},
{"out: resource"},
{},
{{{"out"}, "StackV2", {"in"}, {{"elem_type", DataType::DT_FLOAT}}}});
FunctionLibraryDefinition flib_def(OpRegistry::Global(), *library);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder resource_source_fn_builder("resource_source_function",
"resource_source_function", op_reg);
resource_source_fn_builder.Input(stack_size_placeholder);
Node* resource_source_function =
opts.FinalizeBuilder(&resource_source_fn_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(resource_source_function);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected["resource_source_function:out:StackV2"] =
absl::flat_hash_set<std::string>({":stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/resource_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/resource_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
27d392a8-cabd-4861-a727-cdf786189ae2 | cpp | tensorflow/tensorflow | graph_compiler | tensorflow/compiler/tf2xla/graph_compiler.cc | tensorflow/compiler/tf2xla/graph_compiler_test.cc | #include "tensorflow/compiler/tf2xla/graph_compiler.h"
#include <deque>
#include <numeric>
#include <utility>
#include <vector>
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/literal_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_expression.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "xla/client/client_library.h"
#include "xla/hlo/builder/xla_builder.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/validate.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
auto* graph_compiler_failed_compilation_op_count =
tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/graph_compilation_failed_op_count",
"Records an op that failed to compile",
"op_name");
namespace {
Status PrepareArguments(XlaOpKernelContext* ctx, Graph* graph,
const std::vector<const XlaExpression*>& expressions,
const NameAttrList& func,
std::vector<XlaCompiler::Argument>* args) {
auto client = ctx->compiler()->client();
std::vector<bool> arg_must_be_compile_time_constant(expressions.size());
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*graph, &arg_must_be_compile_time_constant,
nullptr, ctx->function_library()));
args->resize(expressions.size());
for (int i = 0, end = args->size(); i < end; ++i) {
XlaCompiler::Argument& arg = (*args)[i];
arg.type = ctx->input_type(i);
arg.shape = ctx->InputShape(i);
switch (expressions[i]->kind()) {
case XlaExpression::Kind::kConstant:
arg.kind = XlaCompiler::Argument::kConstant;
arg.constant_value = *expressions[i]->constant_value();
break;
case XlaExpression::Kind::kXlaOp:
if (arg_must_be_compile_time_constant[i]) {
TF_ASSIGN_OR_RETURN(std::optional<Tensor> value,
expressions[i]->ResolveConstant(client));
if (value.has_value()) {
arg.kind = XlaCompiler::Argument::kConstant;
arg.constant_value = *value;
} else {
arg.kind = XlaCompiler::Argument::kParameter;
}
} else {
arg.kind = XlaCompiler::Argument::kParameter;
}
break;
case XlaExpression::Kind::kResource: {
XlaResource* resource = expressions[i]->resource();
XlaCompiler::PopulateArgumentFromResource(*resource, &arg);
break;
}
case XlaExpression::Kind::kTensorList: {
arg.kind = XlaCompiler::Argument::kTensorList;
const xla::XlaOp& tensor_list = expressions[i]->handle();
arg.shape = tensor_list.builder()->GetShape(tensor_list).value();
break;
}
case XlaExpression::Kind::kInvalid:
return errors::InvalidArgument("Invalid function argument");
}
}
return absl::OkStatus();
}
}
Status GraphCompiler::Compile() {
TF_RETURN_IF_ERROR(graph::ValidateGraphHasNoCycle(*graph_));
using NodeOutputs = std::vector<TensorValue>;
std::vector<NodeOutputs> output_registry(graph_->num_node_ids());
auto output_registry_cleanup = gtl::MakeCleanup([&output_registry] {
for (const NodeOutputs& outputs : output_registry) {
for (const TensorValue& value : outputs) {
CHECK(!value.is_ref());
delete value.tensor;
}
}
});
std::vector<Node*> topo_sorted_nodes;
GetReversePostOrder(*graph_, &topo_sorted_nodes,
NodeComparatorName());
OpKernelContext::Params params;
PartiallySetupParams(¶ms);
for (Node* n : topo_sorted_nodes) {
OpKernel* op_kernel_raw = nullptr;
Status s = flib_->CreateKernel(n->properties(), &op_kernel_raw);
std::unique_ptr<OpKernel> op_kernel(op_kernel_raw);
if (!s.ok()) {
s = AttachDef(s, *n);
LOG(ERROR) << "Executor failed to create kernel. " << s;
return s;
}
TF_RET_CHECK(!n->IsRecv() && !n->IsSend() && !n->IsSwitch())
<< "Not supported node: " << n->DebugString();
params.op_kernel = op_kernel.get();
absl::InlinedVector<AllocatorAttributes, 4> output_attr(n->num_outputs());
params.output_attr_array = output_attr.data();
tensor_inputs_.clear();
tensor_inputs_.resize(n->num_inputs());
for (auto* e : n->in_edges()) {
if (e->IsControlEdge()) continue;
const Node* src = e->src();
const int output_registry_size = output_registry.size();
TF_RET_CHECK(src->id() < output_registry_size);
const NodeOutputs& src_outputs = output_registry[src->id()];
tensor_inputs_.at(e->dst_input()) = src_outputs.at(e->src_output());
}
params.inputs = tensor_inputs_;
OpKernelContext op_context(¶ms, n->num_outputs());
VLOG(3) << "Translating " << params.op_kernel->name();
if (IsFunctionCall(*flib_->GetFunctionLibraryDefinition(), *n)) {
TF_RETURN_IF_ERROR(CompileFunctionalNode(n, &op_context));
} else {
device_->Compute(CHECK_NOTNULL(params.op_kernel), &op_context);
Status s = op_context.status();
if (!s.ok()) {
graph_compiler_failed_compilation_op_count
->GetCell(params.op_kernel->def().op())
->IncrementBy(1);
return AttachDef(s, n->def());
}
}
NodeOutputs& outputs = output_registry[n->id()];
outputs.resize(n->num_outputs());
for (int o = 0; o < n->num_outputs(); ++o) {
outputs[o] = op_context.release_output(o);
if (outputs[o].tensor == nullptr) {
return errors::Internal("Missing xla_context ", o, "-th output from ",
FormatNodeForError(*n));
}
}
}
return absl::OkStatus();
}
namespace {
Status GetFunctionNameAndAttr(const FunctionLibraryRuntime& flib,
const Node& node, NameAttrList* func) {
if (node.IsPartitionedCall()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(
node.attrs().Find(FunctionLibraryDefinition::kFuncAttr, &attr_value));
if (!attr_value->has_func()) {
return errors::InvalidArgument(
"The attribute value for attribute 'f' in node ", node.DebugString(),
" does not have 'func' field set");
}
*func = attr_value->func();
return absl::OkStatus();
}
if (flib.GetFunctionLibraryDefinition()->Find(node.def().op())) {
func->set_name(node.type_string());
} else {
func->set_name(FunctionLibraryDefinition::kGradientOp);
}
*func->mutable_attr() = node.def().attr();
return absl::OkStatus();
}
}
Status GraphCompiler::CompileFunctionalNode(Node* n,
OpKernelContext* op_context) {
TF_RET_CHECK(IsFunctionCall(*flib_->GetFunctionLibraryDefinition(), *n));
XlaOpKernelContext xla_op_context(op_context);
XlaContext& context = XlaContext::Get(op_context);
auto* b = context.builder();
XlaCompiler* compiler = xla_op_context.compiler();
NameAttrList func;
TF_RETURN_IF_ERROR(GetFunctionNameAndAttr(*flib_, *n, &func));
std::vector<const XlaExpression*> expressions;
for (auto tensor : tensor_inputs_) {
auto expression =
reinterpret_cast<const XlaExpression*>(tensor->tensor_data().data());
expressions.push_back(expression);
}
std::vector<XlaCompiler::Argument> arguments;
const FunctionBody* fbody;
TF_RETURN_IF_ERROR(compiler->FindFunctionBody(func, &fbody));
auto graph = compiler->GetGraph(fbody);
TF_RETURN_IF_ERROR(PrepareArguments(&xla_op_context, graph.get(), expressions,
func, &arguments));
bool add_token_input_output =
func.attr().find(kXlaTokenInputNodesAttrName) != func.attr().end();
XlaCompiler::CompileOptions compile_options;
compile_options.is_entry_computation = false;
compile_options.add_token_input_output = add_token_input_output;
XlaCompiler::CompilationResult result;
TF_RETURN_IF_ERROR(
compiler->CompileFunction(compile_options, func, arguments, &result));
TF_RET_CHECK(arguments.size() == expressions.size());
std::vector<xla::XlaOp> handles;
for (int64_t i = 0, end = expressions.size(); i < end; ++i) {
if (arguments[i].kind == XlaCompiler::Argument::kConstant) {
continue;
}
if (arguments[i].kind == XlaCompiler::Argument::kResource) {
handles.push_back(expressions[i]->resource()->value());
} else {
handles.push_back(expressions[i]->handle());
}
}
if (add_token_input_output) {
std::vector<string> token_input_nodes;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(&func.attr()),
kXlaTokenInputNodesAttrName,
&token_input_nodes));
std::vector<xla::XlaOp> token_inputs;
for (const string& node_name : token_input_nodes) {
auto token_or = compiler->GetNodeToken(node_name);
TF_RETURN_IF_ERROR(token_or.status());
token_inputs.push_back(std::move(token_or).value());
}
xla::XlaOp token_input = xla::AfterAll(b, token_inputs);
handles.push_back(token_input);
}
auto output_handle = xla::Call(b, *result.computation, handles);
int computation_output = 0;
for (int64_t i = 0; i < n->num_outputs(); ++i) {
if (result.outputs[i].is_constant) {
xla_op_context.SetConstantOutput(i, result.outputs[i].constant_value);
} else {
if (result.outputs[i].is_tensor_list) {
xla_op_context.SetTensorListOutput(
i, xla::GetTupleElement(output_handle, computation_output));
} else {
xla_op_context.SetOutput(
i, xla::GetTupleElement(output_handle, computation_output));
}
++computation_output;
}
}
for (int64_t i = 0, end = result.resource_updates.size(); i < end; i++) {
if (result.resource_updates[i].modified) {
XlaResource* resource =
expressions[result.resource_updates[i].input_index]->resource();
xla::XlaOp updated_value =
xla::GetTupleElement(output_handle, i + n->num_outputs());
TF_RETURN_IF_ERROR(resource->SetValue(updated_value));
}
}
if (add_token_input_output) {
std::string node_name;
if (!GetNodeAttr(n->attrs(), kXlaOriginalOutsideCompilationNodeName,
&node_name)
.ok())
node_name = n->name();
TF_RETURN_IF_ERROR(compiler->SetNodeToken(
node_name, xla::GetTupleElement(output_handle, computation_output)));
}
return b->first_error();
}
void GraphCompiler::PartiallySetupParams(OpKernelContext::Params* params) {
params->device = device_;
params->step_container = step_container_;
params->resource_manager = device_->resource_manager();
params->function_library = flib_;
}
} | #include "tensorflow/compiler/tf2xla/graph_compiler.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2xla/graph_compiler_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/xla_compilation_device.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
constexpr char kOpCompilationFailureStreamz[] =
"/tensorflow/core/tf2xla/graph_compilation_failed_op_count";
class DummyOp : public XlaOpKernel {
public:
explicit DummyOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {}
};
REGISTER_KERNEL_BUILDER(Name("NoOp").Device(DEVICE_DEFAULT), DummyOp);
REGISTER_KERNEL_BUILDER(Name("NoOp").Device("XLA_TPU_JIT"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("NoOp").Device("XLA_CPU_JIT"), DummyOp);
class MockAlwaysFailsOp : public XlaOpKernel {
public:
explicit MockAlwaysFailsOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
ctx->CtxFailure(__FILE__, __LINE__, errors::InvalidArgument("MockBroken"));
}
};
REGISTER_OP("MockAlwaysFails")
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
A test only Op that always fails to compile.
)doc");
REGISTER_KERNEL_BUILDER(Name("MockAlwaysFails").Device(DEVICE_DEFAULT),
MockAlwaysFailsOp);
REGISTER_KERNEL_BUILDER(Name("MockAlwaysFails").Device("XLA_CPU_JIT"),
MockAlwaysFailsOp);
REGISTER_KERNEL_BUILDER(Name("MockAlwaysFails").Device("XLA_TPU_JIT"),
MockAlwaysFailsOp);
REGISTER_XLA_OP(Name("MockAlwaysFails").CompilationOnly(), MockAlwaysFailsOp);
class GraphCompilerTest : public ::testing::Test {
public:
void SetUp() override {
device_ = new tensorflow::XlaCompilationDevice(
tensorflow::SessionOptions(), tensorflow::DeviceType("XLA_TPU_JIT"));
device_mgr_ = std::make_unique<StaticDeviceMgr>(absl::WrapUnique(device_));
}
Status RunGraphCompiler(Graph& graph) {
ProcessFunctionLibraryRuntime runtime(
device_mgr_.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION,
&graph.flib_def(), OptimizerOptions());
xla::XlaBuilder builder("test_builder");
XlaCompiler::Options options;
options.device_type = "XLA_TPU_JIT";
XlaCompiler xla_compiler(options);
XlaContext* xla_context = new XlaContext(&xla_compiler, &builder, &graph);
core::ScopedUnref context_unref(xla_context);
xla_context->Ref();
auto step_container =
std::make_unique<ScopedStepContainer>(0, [this](const string& name) {
Status status = this->device_->resource_manager()->Cleanup(name);
});
auto container_status = step_container->Create(
device_->resource_manager(), XlaContext::kXlaContextResourceName,
xla_context);
GraphCompiler graph_compiler(
device_, &graph, runtime.GetFLR(device_->name()), step_container.get());
return graph_compiler.Compile();
}
protected:
XlaCompilationDevice* device_;
std::unique_ptr<StaticDeviceMgr> device_mgr_;
};
TEST_F(GraphCompilerTest, CompilesGraph) {
Graph graph(OpRegistry::Global());
EXPECT_TRUE(RunGraphCompiler(graph).ok());
}
TEST_F(GraphCompilerTest, RecordsStreamzFailedCompilationNode) {
Graph graph(OpRegistry::Global());
Node* mock_fail;
ASSERT_TRUE(NodeBuilder("mock_fail", "MockAlwaysFails")
.Finalize(&graph, &mock_fail)
.ok());
graph.AddControlEdge(graph.source_node(), mock_fail);
graph.AddControlEdge(mock_fail, graph.sink_node());
CellReader<int64_t> op_reader(kOpCompilationFailureStreamz);
EXPECT_FALSE(RunGraphCompiler(graph).ok());
EXPECT_EQ(op_reader.Delta("MockAlwaysFails"), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/graph_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/graph_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
64411ee4-506a-45b4-acc7-4283142cc2d5 | cpp | tensorflow/tensorflow | xla_expression | tensorflow/compiler/tf2xla/xla_expression.cc | tensorflow/compiler/tf2xla/xla_expression_test.cc | #include "tensorflow/compiler/tf2xla/xla_expression.h"
#include "tensorflow/compiler/tf2xla/literal_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "xla/hlo/builder/value_inference.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
XlaExpression::XlaExpression() = default;
XlaExpression XlaExpression::Invalid() {
XlaExpression e;
e.kind_ = Kind::kInvalid;
return e;
}
XlaExpression XlaExpression::Constant(Tensor value) {
XlaExpression e;
e.kind_ = Kind::kConstant;
e.dtype_ = value.dtype();
e.constant_value_ = value;
return e;
}
XlaExpression XlaExpression::ConstantResource(Tensor value,
XlaResource* resource) {
XlaExpression e;
e.kind_ = Kind::kResource;
e.dtype_ = DT_RESOURCE;
e.resource_ = resource;
e.constant_value_ = value;
return e;
}
XlaExpression XlaExpression::XlaOp(xla::XlaOp value, DataType dtype) {
XlaExpression e;
e.kind_ = Kind::kXlaOp;
e.dtype_ = dtype;
e.handle_ = value;
return e;
}
XlaExpression XlaExpression::TensorList(xla::XlaOp tensor_list) {
XlaExpression e;
e.kind_ = Kind::kTensorList;
e.dtype_ = DT_VARIANT;
e.handle_ = tensor_list;
return e;
}
XlaExpression XlaExpression::Resource(XlaResource* resource) {
XlaExpression e;
e.kind_ = Kind::kResource;
e.dtype_ = DT_RESOURCE;
e.resource_ = resource;
return e;
}
string XlaExpression::HumanString() const {
switch (kind_) {
case Kind::kInvalid:
return "invalid";
case Kind::kConstant:
return "constant";
case Kind::kXlaOp:
return "xla_op";
case Kind::kResource:
return "resource";
case Kind::kTensorList:
return "tensor_list";
}
}
xla::XlaOp XlaExpression::AsXlaOp(xla::XlaBuilder* builder) const {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<xla::XlaOp> {
switch (kind_) {
case Kind::kConstant: {
xla::BorrowingLiteral literal;
TF_RETURN_IF_ERROR(
HostTensorToBorrowingLiteral(*constant_value_, &literal));
return xla::ConstantLiteral(builder, literal);
}
case Kind::kTensorList:
TF_FALLTHROUGH_INTENDED;
case Kind::kXlaOp:
if (builder != handle_.builder()) {
return errors::InvalidArgument(
"Mismatched builders in XlaExpression::AsXlaOp");
}
return handle_;
default:
return errors::InvalidArgument("AsXlaOp called on XlaExpression: ",
HumanString());
}
});
}
absl::StatusOr<Tensor> XlaExpression::ResolveDynamism() const {
switch (kind()) {
case Kind::kConstant: {
Tensor constant_false(DT_BOOL, constant_value()->shape());
auto flat = constant_false.flat<bool>();
for (int64_t i = 0; i < flat.size(); ++i) flat(i) = false;
return constant_false;
}
case Kind::kXlaOp:
break;
case Kind::kTensorList:
TF_FALLTHROUGH_INTENDED;
case Kind::kResource:
TF_FALLTHROUGH_INTENDED;
case Kind::kInvalid:
return errors::InvalidArgument(
"ResolveDynamism called on unsupported XlaExpression: ",
HumanString());
}
TF_ASSIGN_OR_RETURN(TensorShape shape, GetShape());
std::vector<int64_t> layout_indices(shape.dims());
std::iota(layout_indices.rbegin(), layout_indices.rend(), 0);
xla::ValueInference value_inference(handle().builder());
TF_ASSIGN_OR_RETURN(xla::LiteralSlice literal,
value_inference.AnalyzeIsDynamic(handle()));
Tensor tensor(DT_BOOL);
TF_RETURN_IF_ERROR(LiteralToHostTensor(literal, DT_BOOL, &tensor));
return tensor;
}
absl::StatusOr<std::optional<Tensor>> XlaExpression::ResolveConstant(
xla::Client* client, bool dynamic_dimension_is_minus_one,
xla::ValueInferenceMode mode) const {
switch (kind()) {
case Kind::kConstant:
case Kind::kResource:
return constant_value();
case Kind::kXlaOp:
break;
case Kind::kTensorList:
TF_FALLTHROUGH_INTENDED;
case Kind::kInvalid:
return errors::InvalidArgument(
"ResolveConstant called on XlaExpression: ", HumanString());
}
TF_ASSIGN_OR_RETURN(TensorShape shape, GetShape());
std::vector<int64_t> layout_indices(shape.dims());
std::iota(layout_indices.rbegin(), layout_indices.rend(), 0);
xla::Layout layout = xla::LayoutUtil::MakeLayout(layout_indices);
if (mode == xla::ValueInferenceMode::kLowerBound ||
mode == xla::ValueInferenceMode::kUpperBound ||
mode == xla::ValueInferenceMode::kValue) {
std::vector<int64_t> layout_indices(shape.dims());
std::iota(layout_indices.rbegin(), layout_indices.rend(), 0);
xla::ValueInference value_inference(handle().builder());
TF_ASSIGN_OR_RETURN(xla::OptionalLiteral literal,
value_inference.AnalyzeConstant(handle(), mode));
if (!literal.GetValue().has_value()) {
return {std::nullopt};
}
Tensor tensor;
TF_RETURN_IF_ERROR(LiteralToHostTensor(
literal.GetValue().value().Relayout(layout), dtype(), &tensor));
return {tensor};
}
TF_ASSIGN_OR_RETURN(bool is_constant,
handle().builder()->IsConstant(handle()));
if (!is_constant) {
return {std::nullopt};
}
if (!client)
return errors::InvalidArgument("client is required to resolve constant");
TF_ASSIGN_OR_RETURN(xla::XlaComputation constant_graph,
handle().builder()->BuildConstantSubGraph(
handle(), dynamic_dimension_is_minus_one));
TF_ASSIGN_OR_RETURN(xla::Literal literal,
client->ComputeConstant(constant_graph, &layout));
Tensor tensor;
TF_RETURN_IF_ERROR(LiteralToHostTensor(literal, dtype(), &tensor));
return {tensor};
}
absl::StatusOr<TensorShape> XlaExpression::GetShape() const {
switch (kind_) {
case Kind::kConstant:
return constant_value()->shape();
case Kind::kResource:
if (constant_value()) {
return constant_value()->shape();
}
return TensorShape({});
case Kind::kXlaOp: {
TF_ASSIGN_OR_RETURN(xla::Shape xla_shape,
handle().builder()->GetShape(handle()));
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(xla_shape, &shape));
return shape;
}
case Kind::kTensorList:
return TensorShape({});
case Kind::kInvalid:
return errors::InvalidArgument(
"GetShape() called on invalid XlaExpression");
}
}
absl::StatusOr<xla::Shape> XlaExpression::GetXlaShape() const {
if (kind_ == Kind::kXlaOp) {
return handle().builder()->GetShape(handle());
}
TF_ASSIGN_OR_RETURN(TensorShape shape, GetShape());
return TensorShapeToXLAShape(dtype_, shape);
}
const XlaExpression* XlaExpression::CastExpressionFromTensor(
const Tensor& tensor) {
const XlaExpression* expression =
reinterpret_cast<const XlaExpression*>(tensor.tensor_data().data());
CHECK(expression->kind() != XlaExpression::Kind::kInvalid)
<< expression->HumanString();
return expression;
}
void XlaExpression::AssignExpressionToTensor(const XlaExpression& value,
Tensor* tensor) {
const XlaExpression* expression =
reinterpret_cast<const XlaExpression*>(tensor->tensor_data().data());
CHECK(expression->kind() == XlaExpression::Kind::kInvalid)
<< expression->HumanString();
*const_cast<XlaExpression*>(expression) = value;
}
} | #include "tensorflow/compiler/tf2xla/xla_expression.h"
#include <memory>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/tf2xla/xla_resource.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tests/literal_test_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class XlaExpressionTest : public ::testing::Test {
protected:
void SetUp() override {
client_ = xla::ClientLibrary::LocalClientOrDie();
builder_ = std::make_unique<xla::XlaBuilder>("acomputation");
constant_ = test::AsScalar<int32>(42);
op_ = xla::ConstantR0<int32>(builder_.get(), 7);
non_constant_op_ = xla::Parameter(
builder_.get(), 0, xla::ShapeUtil::MakeShape(xla::F32, {}), "x");
resource_ = std::make_unique<XlaResource>(
XlaResource::kVariable, 0, string("avariable"),
DT_INT32, TensorShape({17, 3}), op_, -1,
std::set<string>(),
false);
}
xla::Client* client_;
std::unique_ptr<xla::XlaBuilder> builder_;
Tensor constant_;
xla::XlaOp op_;
xla::XlaOp non_constant_op_;
std::unique_ptr<XlaResource> resource_;
};
TEST_F(XlaExpressionTest, Kind) {
EXPECT_TRUE(XlaExpression::Kind::kInvalid == XlaExpression().kind());
EXPECT_TRUE(XlaExpression::Kind::kInvalid == XlaExpression::Invalid().kind());
EXPECT_TRUE(XlaExpression::Kind::kConstant ==
XlaExpression::Constant(constant_).kind());
EXPECT_TRUE(XlaExpression::Kind::kXlaOp ==
XlaExpression::XlaOp(op_, DT_INT32).kind());
EXPECT_TRUE(XlaExpression::Kind::kResource ==
XlaExpression::Resource(resource_.get()).kind());
}
TEST_F(XlaExpressionTest, HumanString) {
EXPECT_EQ("invalid", XlaExpression().HumanString());
EXPECT_EQ("invalid", XlaExpression::Invalid().HumanString());
EXPECT_EQ("constant", XlaExpression::Constant(constant_).HumanString());
EXPECT_EQ("xla_op", XlaExpression::XlaOp(op_, DT_INT32).HumanString());
EXPECT_EQ("resource", XlaExpression::Resource(resource_.get()).HumanString());
}
TEST_F(XlaExpressionTest, AsXlaOp) {
xla::XlaOp op_as_op =
XlaExpression::XlaOp(op_, DT_INT32).AsXlaOp(builder_.get());
EXPECT_TRUE(op_.IsIdenticalTo(op_as_op));
xla::XlaOp const_as_op =
XlaExpression::Constant(constant_).AsXlaOp(builder_.get());
TF_ASSERT_OK_AND_ASSIGN(xla::XlaComputation computation,
builder_->BuildConstantSubGraph(const_as_op));
TF_ASSERT_OK_AND_ASSIGN(xla::Literal value,
client_->ComputeConstant(computation));
EXPECT_TRUE(xla::LiteralTestUtil::Equal(xla::LiteralUtil::CreateR0<int32>(42),
value));
}
TEST_F(XlaExpressionTest, GetShape) {
EXPECT_FALSE(XlaExpression().GetShape().ok());
EXPECT_FALSE(XlaExpression::Invalid().GetShape().ok());
TF_ASSERT_OK_AND_ASSIGN(TensorShape resource_shape,
XlaExpression::Resource(resource_.get()).GetShape());
EXPECT_EQ(TensorShape({}), resource_shape);
TF_ASSERT_OK_AND_ASSIGN(TensorShape op_shape,
XlaExpression::XlaOp(op_, DT_INT32).GetShape());
EXPECT_EQ(TensorShape({}), op_shape);
TF_ASSERT_OK_AND_ASSIGN(TensorShape constant_shape,
XlaExpression::Constant(constant_).GetShape());
EXPECT_EQ(TensorShape({}), constant_shape);
}
TEST_F(XlaExpressionTest, ResolveConstant) {
EXPECT_FALSE(XlaExpression().ResolveConstant(client_).ok());
EXPECT_FALSE(XlaExpression::Invalid().ResolveConstant(client_).ok());
EXPECT_FALSE(XlaExpression::Resource(resource_.get())
.ResolveConstant(client_)
->has_value());
TF_ASSERT_OK_AND_ASSIGN(
std::optional<Tensor> op_constant,
XlaExpression::XlaOp(op_, DT_INT32).ResolveConstant(client_));
ASSERT_TRUE(op_constant.has_value());
test::ExpectTensorEqual<int32>(test::AsScalar<int32>(7), *op_constant);
TF_ASSERT_OK_AND_ASSIGN(std::optional<Tensor> op_nonconstant,
XlaExpression::XlaOp(non_constant_op_, DT_FLOAT)
.ResolveConstant(client_));
EXPECT_FALSE(op_nonconstant.has_value());
TF_ASSERT_OK_AND_ASSIGN(
std::optional<Tensor> constant_constant,
XlaExpression::Constant(constant_).ResolveConstant(client_));
ASSERT_TRUE(constant_constant.has_value());
test::ExpectTensorEqual<int32>(constant_, *constant_constant);
}
TEST_F(XlaExpressionTest, ResolveConstantOnResource) {
XlaExpression constant_resource =
XlaExpression::ConstantResource(constant_, resource_.get());
EXPECT_TRUE(constant_resource.ResolveConstant(client_).ok());
EXPECT_TRUE(resource_->SetZeroValue(builder_.get()).ok());
LOG(ERROR) << "Resource is overwritten: " << resource_->IsOverwritten();
absl::StatusOr<std::optional<Tensor>> resolved_constant =
constant_resource.ResolveConstant(client_);
EXPECT_TRUE(resolved_constant.ok());
EXPECT_FALSE(resolved_constant->has_value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_expression.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_expression_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9806f9ab-cc7d-4f52-854b-480e57fdf3cc | cpp | tensorflow/tensorflow | tf2xla_util | tensorflow/compiler/tf2xla/tf2xla_util.cc | tensorflow/compiler/tf2xla/tf2xla_util_test.cc | #include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include <functional>
#include <queue>
#include <random>
#include <set>
#include <unordered_map>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/tf2xla/sharding_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace {
Status ValidateTensorId(const tf2xla::TensorId& id) {
if (id.node_name().empty()) {
return errors::InvalidArgument("TensorId node_name must be non-empty");
}
if (id.output_index() < 0) {
return errors::InvalidArgument("TensorId output_index must be positive");
}
return absl::OkStatus();
}
Status CheckNameDuplicates(const string& kind, const string& name,
std::set<string>* names) {
if (!name.empty()) {
if (!names->insert(name).second) {
return errors::InvalidArgument("duplicate ", kind, " name: ", name);
}
}
return absl::OkStatus();
}
Status CheckFeedFetchNameConflicts(const string& kind,
const std::set<string>& names) {
for (const string& name : names) {
const string name_data(name + "_data");
if (names.find(name_data) != names.end()) {
return errors::InvalidArgument("conflicting ", kind, " name: ", name,
" and ", name_data);
}
}
return absl::OkStatus();
}
Status CopyAssociatedFunctions(Graph* g,
const FunctionLibraryDefinition* lookup_fld,
FunctionLibraryDefinition* fld) {
for (Node* n : g->op_nodes()) {
for (const auto& associated_function :
GetAssociatedFunctions(*n, lookup_fld)) {
switch (associated_function.type()) {
case AssociatedFunctionInfo::kFunctionCallNode: {
const FunctionDef* fdef =
lookup_fld->Find(associated_function.func_name());
if (!fdef) {
return errors::Internal(
"Cannot find function ", associated_function.func_name(),
" for function call node ", n->DebugString());
}
TF_RETURN_IF_ERROR(fld->AddFunctionDef(*fdef));
break;
}
case AssociatedFunctionInfo::kSymbolicGradient:
case AssociatedFunctionInfo::kFunctionAttr:
break;
}
}
}
return absl::OkStatus();
}
absl::StatusOr<Node*> ReplaceEdge(Graph* g, Node* dst, int dst_input,
Node* with, int with_output) {
NodeDef replace_def = dst->def();
*replace_def.mutable_input(dst_input) = with->name();
TF_ASSIGN_OR_RETURN(Node * replace_node, ReplaceNode(g, dst, replace_def));
const Edge* usage_edge;
TF_RETURN_IF_ERROR(replace_node->input_edge(dst_input, &usage_edge));
g->RemoveEdge(usage_edge);
g->AddEdge(with, with_output, replace_node, dst_input);
return replace_node;
}
Status ReplaceSrcOutputUsageWithNode(Graph* g, Node* src, int src_output,
Node* replacement) {
VLOG(1) << "Replace usages of output " << src_output << " of node "
<< (VLOG_IS_ON(3) ? src->DebugString() : src->name()) << " with "
<< (VLOG_IS_ON(3) ? replacement->DebugString() : replacement->name());
struct OutEdgeInfo {
int dst_node_id, dst_input;
};
std::vector<OutEdgeInfo> usages;
for (const Edge* e : src->out_edges()) {
if (e->IsControlEdge() || e->src_output() != src_output) {
continue;
}
usages.push_back({e->dst()->id(), e->dst_input()});
}
for (int i = 0, end = usages.size(); i < end; i++) {
Node* usage_node = g->FindNodeId(usages[i].dst_node_id);
VLOG(2) << " Replace usage by " << usage_node->DebugString();
TF_ASSIGN_OR_RETURN(
Node * replace_node,
ReplaceEdge(g, usage_node, usages[i].dst_input, replacement, 0));
for (int j = i + 1, end = usages.size(); j < end; j++) {
if (usages[j].dst_node_id == usages[i].dst_node_id) {
usages[j].dst_node_id = replace_node->id();
}
}
}
return absl::OkStatus();
}
Status ReplaceArgUsageWithConstNode(
Graph* g,
const absl::flat_hash_map<int, const Node*>& const_input_index_to_node) {
absl::flat_hash_map<int, Node*> arg_nodes;
for (Node* n : g->op_nodes()) {
if (n->IsArg()) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
arg_nodes[index] = n;
}
}
for (const auto& iter : const_input_index_to_node) {
int arg_index = iter.first;
VLOG(2) << "Replace usages of _Arg " << arg_index;
NodeDef const_def = iter.second->def();
const_def.set_name(g->NewName(const_def.name()));
TF_ASSIGN_OR_RETURN(Node * const_node, g->AddNode(const_def));
Node* arg_node = arg_nodes[arg_index];
TF_RETURN_IF_ERROR(
ReplaceSrcOutputUsageWithNode(g, arg_node, 0, const_node));
}
return absl::OkStatus();
}
Status ReplaceRetvalInputWithArg(
Graph* g,
const absl::flat_hash_map<int, const Node*>& const_input_index_to_node) {
absl::flat_hash_map<int, Node*> arg_nodes;
absl::flat_hash_map<int, Node*> ret_nodes;
for (Node* n : g->op_nodes()) {
if (n->IsRetval() || n->IsArg()) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
if (n->IsRetval()) {
ret_nodes[index] = n;
} else {
arg_nodes[index] = n;
}
}
}
for (const auto& iter : const_input_index_to_node) {
int arg_index = iter.first;
VLOG(2) << "Bind _Retval " << arg_index << " to _Arg " << arg_index;
TF_RETURN_IF_ERROR(
ReplaceEdge(g, ret_nodes[arg_index], 0, arg_nodes[arg_index], 0)
.status());
}
return absl::OkStatus();
}
Status PropagateConstIntoFuncAttr(
Node* n, const string& attr_name,
const absl::flat_hash_map<int, const Node*>& const_input_index_to_node,
const FunctionLibraryDefinition* lookup_fld, FunctionLibraryDefinition* fld,
bool passthrough_arg_to_retval = false) {
VLOG(1) << "Propagate const into " << attr_name << " of node " << n->name();
NameAttrList func_attr;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), attr_name, &func_attr));
const FunctionDef* fdef = lookup_fld->Find(func_attr.name());
if (!fdef) {
return errors::Internal("Cannot find function ", func_attr.name(),
" for node ", n->name());
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*fdef, AttrSlice(&func_attr.attr()), lookup_fld, &fbody));
Graph* func_graph = fbody->graph;
TF_RETURN_IF_ERROR(
ReplaceArgUsageWithConstNode(func_graph, const_input_index_to_node));
if (passthrough_arg_to_retval) {
TF_RETURN_IF_ERROR(
ReplaceRetvalInputWithArg(func_graph, const_input_index_to_node));
}
FunctionDef replace_fdef;
string new_func_name =
fld->UniqueFunctionName(absl::StrCat(func_attr.name(), "_const_"));
const StackTracesMap* stack_traces =
lookup_fld->GetStackTraces(func_attr.name());
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*func_graph, new_func_name, &replace_fdef));
if (stack_traces != nullptr) {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(replace_fdef, *stack_traces));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(replace_fdef, {}));
}
VLOG(1) << "replace func " << func_attr.name() << " with " << new_func_name;
func_attr.set_name(new_func_name);
n->ClearAttr(attr_name);
n->AddAttr(attr_name, func_attr);
TF_RETURN_IF_ERROR(CopyAssociatedFunctions(func_graph, lookup_fld, fld));
return absl::OkStatus();
}
Status PropagateConstIntoIfNode(Graph* g, Node* if_node,
const FunctionLibraryDefinition* lookup_fld,
FunctionLibraryDefinition* fld) {
absl::flat_hash_map<int, const Node*> const_input_index_to_node;
for (int i = 1; i < if_node->num_inputs(); i++) {
const Node* input_node;
TF_RETURN_IF_ERROR(if_node->input_node(i, &input_node));
if (input_node->type_string() == "Const") {
const_input_index_to_node[i - 1] = input_node;
}
}
if (const_input_index_to_node.empty()) {
return absl::OkStatus();
}
for (const auto& attr_name :
std::vector<string>{"then_branch", "else_branch"}) {
TF_RETURN_IF_ERROR(PropagateConstIntoFuncAttr(
if_node, attr_name, const_input_index_to_node, lookup_fld, fld));
}
return absl::OkStatus();
}
using GraphCache = absl::flat_hash_map<string, std::unique_ptr<FunctionBody>>;
absl::StatusOr<FunctionBody*> FindOrInsert(
GraphCache* cache, const NameAttrList& body_attr,
const FunctionLibraryDefinition* lookup_fld,
const FunctionLibraryDefinition* fallback_fld) {
const string name = body_attr.name();
std::unique_ptr<FunctionBody>& value = (*cache)[name];
if (!value) {
const FunctionDef* body_func = lookup_fld->Find(name);
if (!body_func && fallback_fld != nullptr) {
body_func = fallback_fld->Find(name);
}
if (!body_func) {
return errors::Internal("Traverse: Cannot find body function ", name);
}
std::unique_ptr<FunctionBody> fbody;
Status s = FunctionDefToBodyHelper(*body_func, AttrSlice(&body_attr.attr()),
lookup_fld, &fbody);
if (!s.ok() && fallback_fld != nullptr) {
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*body_func, AttrSlice(&body_attr.attr()), fallback_fld, &fbody));
}
value = std::move(fbody);
}
return value.get();
}
absl::StatusOr<bool> IsLoopInvariant(
const FunctionBody* loop_body, int index,
const FunctionLibraryDefinition* lookup_fld,
const FunctionLibraryDefinition* fallback_fld, GraphCache* cache);
absl::StatusOr<const Edge*> TraverseUnmodifiedPathBackward(
const Edge* src, const FunctionLibraryDefinition* lookup_fld,
const FunctionLibraryDefinition* fallback_fld, GraphCache* cache) {
const Edge* e = src;
VLOG(2) << "Traverse: Begin at " << e->DebugString();
while (IsConstTraversableOpType(e->src())) {
VLOG(3) << e->DebugString();
if (e->src()->IsWhileNode()) {
NameAttrList body_attr;
TF_RETURN_IF_ERROR(GetNodeAttr(e->src()->def(), "body", &body_attr));
TF_ASSIGN_OR_RETURN(
FunctionBody * fbody,
FindOrInsert(cache, body_attr, lookup_fld, fallback_fld));
TF_ASSIGN_OR_RETURN(bool is_loop_invariant,
IsLoopInvariant(fbody, e->src_output(), lookup_fld,
fallback_fld, cache));
if (!is_loop_invariant) {
VLOG(2) << "Non-loop-invariant: index " << e->src_output() << " of "
<< body_attr.name();
break;
}
}
TF_RETURN_IF_ERROR(e->src()->input_edge(e->src_output(), &e));
}
VLOG(2) << "Traverse: Finish at " << e->DebugString();
return e;
}
absl::StatusOr<bool> IsLoopInvariant(
const FunctionBody* loop_body, int index,
const FunctionLibraryDefinition* lookup_fld,
const FunctionLibraryDefinition* fallback_fld, GraphCache* cache) {
const Edge* e;
TF_RETURN_IF_ERROR(loop_body->ret_nodes[index]->input_edge(0, &e));
TF_ASSIGN_OR_RETURN(
const Edge* reachable,
TraverseUnmodifiedPathBackward(e, lookup_fld, fallback_fld, cache));
if (reachable->src()->id() == loop_body->arg_nodes[index]->id()) {
VLOG(2) << "Index " << index << " is loop invariant.";
return true;
}
VLOG(2) << "Index " << index << " not loop invariant: "
<< "walk backward from " << e->src()->DebugString() << " to "
<< reachable->src()->DebugString() << " did not reach "
<< loop_body->arg_nodes[index]->DebugString();
return false;
}
Status PropagateConstIntoAndAroundWhileNode(
Graph* g, Node* while_node, const FunctionLibraryDefinition* lookup_fld,
FunctionLibraryDefinition* fld) {
VLOG(1) << "Propagate const into " << while_node->name();
absl::flat_hash_map<int, const Node*> const_input_index_to_node;
absl::flat_hash_map<int, Node*> const_input_index_to_mutable_node;
NameAttrList body_attr;
TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "body", &body_attr));
const string fn_name = body_attr.name();
const FunctionDef* body_func = lookup_fld->Find(fn_name);
if (!body_func) {
return errors::Internal("Propagate: Cannot find body function ", fn_name,
" for While node ", while_node->name());
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*body_func, AttrSlice(&body_attr.attr()), lookup_fld, &fbody));
GraphCache cache;
for (int i = 0; i < while_node->num_inputs(); i++) {
if (i >= body_func->signature().output_arg_size()) {
break;
}
const Edge* input_edge;
TF_RETURN_IF_ERROR(while_node->input_edge(i, &input_edge));
TF_ASSIGN_OR_RETURN(input_edge, TraverseUnmodifiedPathBackward(
input_edge, lookup_fld, fld, &cache));
if (!input_edge->src()->IsConstant()) {
VLOG(2) << "Input " << i << " is not Const; is "
<< input_edge->src()->type_string();
continue;
}
TF_ASSIGN_OR_RETURN(
bool is_loop_invariant,
IsLoopInvariant(fbody.get(), i, lookup_fld, fld, &cache));
if (!is_loop_invariant) {
VLOG(2) << "While state not loop-invariant; not propagating Const " << i;
continue;
}
VLOG(2) << "While state is loop-invariant; propagating Const " << i;
const_input_index_to_mutable_node[i] = input_edge->src();
const_input_index_to_node[i] = input_edge->src();
}
if (const_input_index_to_node.empty()) {
return absl::OkStatus();
}
for (const auto& attr_name : std::vector<string>{"cond", "body"}) {
TF_RETURN_IF_ERROR(PropagateConstIntoFuncAttr(
while_node, attr_name, const_input_index_to_node, lookup_fld, fld,
attr_name == "body"));
}
for (const auto& it : const_input_index_to_mutable_node) {
TF_RETURN_IF_ERROR(
ReplaceSrcOutputUsageWithNode(g, while_node, it.first, it.second));
}
return absl::OkStatus();
}
}
absl::StatusOr<bool> IsLoopInvariant(
const FunctionBody* loop_body, int index,
const FunctionLibraryDefinition* lookup_fld) {
GraphCache cache;
return IsLoopInvariant(loop_body, index, lookup_fld,
nullptr, &cache);
}
Status ValidateConfig(const tf2xla::Config& config) {
std::set<string> names;
for (const tf2xla::Feed& feed : config.feed()) {
TF_RETURN_IF_ERROR(ValidateTensorId(feed.id()));
TF_RETURN_IF_ERROR(TensorShape::IsValidShape(feed.shape()));
TF_RETURN_IF_ERROR(CheckNameDuplicates("feed", feed.name(), &names));
}
TF_RETURN_IF_ERROR(CheckFeedFetchNameConflicts("feed", names));
names.clear();
for (const tf2xla::Fetch& fetch : config.fetch()) {
TF_RETURN_IF_ERROR(ValidateTensorId(fetch.id()));
TF_RETURN_IF_ERROR(CheckNameDuplicates("fetch", fetch.name(), &names));
}
TF_RETURN_IF_ERROR(CheckFeedFetchNameConflicts("fetch", names));
if (config.fetch().empty()) {
return errors::InvalidArgument("fetches must be specified");
}
return absl::OkStatus();
}
Status AddPlaceholdersForFeeds(
const tf2xla::Config& config, const OpRegistryInterface* op_registry,
std::unordered_map<string, string>* feed_remapping, GraphDef* graph_def) {
struct PlaceholderInfo {
const tf2xla::Feed* feed = nullptr;
string placeholder_name;
DataType data_type = DT_INVALID;
};
std::map<string, PlaceholderInfo> placeholder_info;
for (int i = 0; i < config.feed_size(); ++i) {
const tf2xla::Feed* feed = &config.feed(i);
const string name_port = TensorIdToString(feed->id());
PlaceholderInfo& info = placeholder_info[name_port];
info.feed = feed;
info.placeholder_name = absl::StrCat("aot_feed_", feed->id().output_index(),
"/", feed->id().node_name());
(*feed_remapping)[name_port] = info.placeholder_name;
}
std::unordered_map<string, const NodeDef*> name_to_node;
for (int i = 0; i < graph_def->node_size(); ++i) {
name_to_node[graph_def->node(i).name()] = &graph_def->node(i);
}
for (auto it = placeholder_info.begin(); it != placeholder_info.end(); ++it) {
PlaceholderInfo& info = it->second;
const tf2xla::TensorId& feed_id = info.feed->id();
auto node_it = name_to_node.find(feed_id.node_name());
if (node_it == name_to_node.end()) {
return errors::NotFound("Can't find feed node: ",
TensorIdToString(feed_id));
}
const NodeDef* existing = node_it->second;
if (info.feed->type() != DT_INVALID) {
info.data_type = info.feed->type();
} else {
GraphDef gd;
*gd.mutable_versions() = graph_def->versions();
*gd.add_node() = *existing;
MergeDebugInfo(NodeDebugInfo(*existing), gd.mutable_node(0));
TF_RETURN_IF_ERROR(
AddDefaultAttrsToGraphDef(&gd, *op_registry, 0 ));
Graph g(op_registry);
g.set_versions(graph_def->versions());
TF_ASSIGN_OR_RETURN(Node * feed_node, g.AddNode(gd.node(0)));
if (info.feed->id().output_index() < feed_node->num_outputs()) {
info.data_type =
BaseType(feed_node->output_type(info.feed->id().output_index()));
} else {
return errors::InvalidArgument(
"Invalid output_index ", info.feed->id().output_index(),
" for feed node ", info.feed->id().node_name());
}
}
}
for (auto it = placeholder_info.begin(); it != placeholder_info.end(); ++it) {
const PlaceholderInfo& info = it->second;
NodeDef* d = graph_def->add_node();
d->set_name(info.placeholder_name);
d->set_op("Placeholder");
auto& attr_map = *d->mutable_attr();
attr_map["dtype"].set_type(info.data_type);
*attr_map["shape"].mutable_shape() = info.feed->shape();
}
for (int i = 0; i < graph_def->node_size(); ++i) {
NodeDef* node_def = graph_def->mutable_node(i);
for (int j = 0; j < node_def->input_size(); ++j) {
auto id = ParseTensorName(node_def->input(j));
auto it = placeholder_info.find(id.ToString());
if (it != placeholder_info.end()) {
node_def->set_input(j, it->second.placeholder_name);
}
}
}
return absl::OkStatus();
}
Status PruneGraphDefInto(const tf2xla::Config& config, const GraphDef& in,
GraphDef* out) {
*out = in;
out->clear_node();
std::set<std::pair<string, int>> feed_tensors;
for (const tf2xla::Feed& feed : config.feed()) {
feed_tensors.insert(
std::make_pair(feed.id().node_name(), feed.id().output_index()));
}
std::unordered_map<string, std::pair<bool, const NodeDef*>> node_by_name;
for (const NodeDef& node : in.node()) {
node_by_name[node.name()] = std::pair<bool, const NodeDef*>(false, &node);
}
std::queue<string> name_queue;
for (int i = 0; i < config.fetch_size(); ++i) {
name_queue.push(config.fetch(i).id().node_name());
}
while (!name_queue.empty()) {
const string name = name_queue.front();
name_queue.pop();
auto find_it = node_by_name.find(name);
if (find_it == node_by_name.end()) {
return errors::InvalidArgument("While pruning graph, node ", name,
" needed but not found in the graph.");
}
auto& map_entry = find_it->second;
if (map_entry.first) {
continue;
}
map_entry.first = true;
for (const string& in_edge : map_entry.second->input()) {
auto id = ParseTensorName(in_edge);
const string node_name = string(id.first);
if (feed_tensors.find(std::make_pair(node_name, id.second)) ==
feed_tensors.end()) {
name_queue.push(node_name);
} else {
}
}
}
out->mutable_node()->Reserve(in.node_size());
for (const NodeDef& node : in.node()) {
if (node_by_name[node.name()].first) {
*out->add_node() = node;
}
}
return absl::OkStatus();
}
string TensorIdToString(const tf2xla::TensorId& id) {
return absl::StrCat(id.node_name(), ":", id.output_index());
}
Status SetNodeShardingFromNeighbors(Node* n, bool out_edges) {
int core = -1;
const Node* matching_node = nullptr;
for (const Edge* edge : (out_edges ? n->out_edges() : n->in_edges())) {
if (edge->IsControlEdge()) continue;
const Node* possible_match = out_edges ? edge->dst() : edge->src();
TF_ASSIGN_OR_RETURN(
std::optional<xla::OpSharding> sharding,
ParseShardingFromDevice(
*possible_match,
std::numeric_limits<int32>::max(),
false));
if (sharding && sharding->type() == xla::OpSharding::MAXIMAL) {
const int core_annotation = sharding.value().tile_assignment_devices(0);
if (core == -1 || core > core_annotation) {
core = core_annotation;
matching_node = possible_match;
}
}
}
if (matching_node != nullptr) {
n->set_assigned_device_name(matching_node->assigned_device_name());
n->set_requested_device(matching_node->requested_device());
}
return absl::OkStatus();
}
void AddDtypeToKernelDefConstraint(absl::string_view name, DataType dtype,
KernelDef* kdef) {
for (KernelDef::AttrConstraint& constraint : *kdef->mutable_constraint()) {
if (constraint.name() == name) {
constraint.mutable_allowed_values()->mutable_list()->add_type(dtype);
}
}
}
namespace {
uint32 InitialRandomSeed() {
std::random_device rd;
return rd() | 1;
}
}
uint32 GetXLARandomSeed() {
static std::atomic<uint32> counter(InitialRandomSeed());
uint32 seed = counter.fetch_add(2);
std::srand(seed);
return std::rand() | 1;
}
bool HasAssociatedFunction(const NodeDef& node_def,
const FunctionLibraryDefinition* fld) {
if (fld->Contains(node_def.op())) {
return true;
}
if (node_def.op() == FunctionLibraryDefinition::kGradientOp) {
return true;
}
if (node_def.op() == "XlaHostCompute") {
return false;
}
for (const auto& iter : node_def.attr()) {
if (iter.second.has_func()) {
return true;
}
}
return false;
}
std::vector<AssociatedFunctionInfo> GetAssociatedFunctions(
const Node& node, const FunctionLibraryDefinition* fld) {
std::vector<AssociatedFunctionInfo> results;
const string& op = node.type_string();
if (fld->Contains(op)) {
AttrValueMap attrs(node.attrs().begin(), node.attrs().end());
results.emplace_back(AssociatedFunctionInfo::FunctionCall(op, attrs));
} else if (node.type_string() == FunctionLibraryDefinition::kGradientOp) {
AttrValueMap attrs(node.attrs().begin(), node.attrs().end());
results.emplace_back(AssociatedFunctionInfo::SymbolicGradient(op, attrs));
} else if (node.type_string() == "XlaHostCompute") {
} else {
for (auto& iter : node.attrs()) {
if (iter.second.has_func()) {
VLOG(2) << "Found function attr for node " << node.name() << ": "
<< iter.first << " = " << iter.second.func().name();
results.emplace_back(AssociatedFunctionInfo::FunctionAttr(
iter.second.func().name(), iter.second.func().attr(), iter.first));
}
}
}
return results;
}
Status RewriteAssociatedFunction(
Graph* graph, Node* node, FunctionLibraryDefinition* fld,
const AssociatedFunctionInfo& associated_function,
const string& rewritten_function_name) {
switch (associated_function.type()) {
case AssociatedFunctionInfo::kFunctionCallNode: {
NodeDebugInfo debug_info(*node);
NodeDefBuilder builder(node->name(), rewritten_function_name, fld,
&debug_info);
for (const auto& attr : node->attrs()) {
builder.Attr(attr.first, attr.second);
}
for (int i = 0; i < node->num_inputs(); i++) {
Node* input_node;
TF_RETURN_IF_ERROR(node->input_node(i, &input_node));
builder.Input(input_node->name(), i, node->input_type(i));
}
builder.Device(node->assigned_device_name().empty()
? node->requested_device()
: node->assigned_device_name());
NodeDef node_def;
TF_RETURN_IF_ERROR(builder.Finalize(&node_def));
TF_ASSIGN_OR_RETURN(Node * new_node, graph->AddNode(node_def));
for (auto edge : node->in_edges()) {
graph->AddEdge(edge->src(), edge->src_output(), new_node,
edge->dst_input());
}
for (auto edge : node->out_edges()) {
graph->AddEdge(new_node, edge->src_output(), edge->dst(),
edge->dst_input());
}
graph->RemoveNode(node);
break;
}
case AssociatedFunctionInfo::kSymbolicGradient: {
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(
node->attrs(), FunctionLibraryDefinition::kFuncAttr, &func));
GradientDef gradient_def;
gradient_def.set_function_name(func.name());
gradient_def.set_gradient_func(rewritten_function_name);
string original_grad_func = fld->FindGradient(func.name());
if (original_grad_func.empty()) {
TF_RETURN_IF_ERROR(fld->AddGradientDef(gradient_def));
} else if (original_grad_func != rewritten_function_name) {
TF_RETURN_IF_ERROR(fld->ReplaceGradient(gradient_def));
}
break;
}
case AssociatedFunctionInfo::kFunctionAttr: {
NameAttrList func;
TF_RETURN_IF_ERROR(
GetNodeAttr(node->attrs(), associated_function.attr_name(), &func));
if (node->type_string() == "TPUPartitionedCall") {
node->AddAttr("_orig_f", func.name());
}
node->ClearAttr(associated_function.attr_name());
func.set_name(rewritten_function_name);
node->AddAttr(associated_function.attr_name(), func);
break;
}
}
return absl::OkStatus();
}
Status CachedFunctionHandles::GetOrInstantiate(
const string& func_name, AttrSlice attrs,
FunctionLibraryRuntime::Handle* handle) {
string canonicalized_name = Canonicalize(func_name, attrs);
auto iter = handles_.find(canonicalized_name);
if (iter != handles_.end()) {
*handle = iter->second;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(flr_->Instantiate(func_name, attrs, handle));
handles_[canonicalized_name] = *handle;
return absl::OkStatus();
}
Status CachedFunctionHandles::ReleaseAllHandles() {
Status result;
for (const auto& iter : handles_) {
result.Update(flr_->ReleaseHandle(iter.second));
}
handles_.clear();
return result;
}
absl::StatusOr<Node*> ReplaceNode(Graph* g, Node* n, const NodeDef& node_def) {
TF_ASSIGN_OR_RETURN(Node * new_node, g->AddNode(node_def));
std::vector<OutEdgeInfo> out_edge_info;
std::vector<const Edge*> out_edges;
for (const Edge* edge : n->out_edges()) {
out_edges.push_back(edge);
out_edge_info.push_back(
{edge->dst(), edge->src_output(), edge->dst_input()});
}
for (const Edge* edge : out_edges) {
g->RemoveEdge(edge);
}
for (const Edge* in_edge : n->in_edges()) {
g->AddEdge(in_edge->src(), in_edge->src_output(), new_node,
in_edge->dst_input());
}
for (const OutEdgeInfo& out_edge : out_edge_info) {
g->AddEdge(new_node, out_edge.src_output, out_edge.dst, out_edge.dst_input);
}
g->RemoveNode(n);
return new_node;
}
absl::StatusOr<Node*> BuildIdentityNode(
Graph* graph, const string& node_name, DataType dtype, const Node* input,
std::optional<string> requested_device) {
NodeDef ndef;
ndef.set_name(node_name);
ndef.set_op("Identity");
if (input) {
ndef.add_input(input->name());
}
if (requested_device) {
ndef.set_device(*requested_device);
}
AddNodeAttr("T", dtype, &ndef);
TF_ASSIGN_OR_RETURN(Node * id_node, graph->AddNode(ndef));
return id_node;
}
Status PropagateConstIntoFunctionalNodes(
Graph* g, const FunctionLibraryDefinition* lookup_fld,
FunctionLibraryDefinition* fld) {
absl::flat_hash_set<int> done_node_ids;
bool should_continue = true;
while (should_continue) {
should_continue = false;
for (Node* n : g->op_nodes()) {
if (!done_node_ids.contains(n->id())) {
if (n->IsIfNode()) {
VLOG(1) << "PropagateConstIntoIfNode: " << n->name();
TF_RETURN_IF_ERROR(PropagateConstIntoIfNode(g, n, lookup_fld, fld));
done_node_ids.emplace(n->id());
VLOG(1) << "Done PropagateConstIntoIfNode: " << n->name();
} else if (n->IsWhileNode()) {
VLOG(1) << "PropagateConstIntoWhileNode: " << n->name();
TF_RETURN_IF_ERROR(
PropagateConstIntoAndAroundWhileNode(g, n, lookup_fld, fld));
done_node_ids.emplace(n->id());
should_continue = true;
VLOG(1) << "Done PropagateConstIntoWhileNode: " << n->name();
break;
}
}
}
}
return absl::OkStatus();
}
Status PruneUnreachableFunctionsFromGraph(const Graph& g,
FunctionLibraryDefinition* fld) {
GraphDef graph_def;
g.ToGraphDef(&graph_def);
FunctionLibraryDefinition reachable_functions =
fld->ReachableDefinitions(graph_def);
for (const string& func_name : fld->ListFunctionNames()) {
if (!reachable_functions.Find(func_name)) {
TF_RETURN_IF_ERROR(fld->RemoveFunction(func_name));
}
}
return absl::OkStatus();
}
Status RewriteTensorListWithConstElement(Graph* g,
FunctionLibraryDefinition* fld) {
for (Node* n : g->nodes()) {
if (n->type_string() != "EmptyTensorList") {
continue;
}
std::vector<const Edge*> fwd_while_edges;
for (const Edge* e : n->out_edges()) {
if (!e->IsControlEdge() && e->dst()->IsWhileNode()) {
fwd_while_edges.push_back(e);
}
}
if (fwd_while_edges.size() != 1) {
continue;
}
Node* fwd_while = fwd_while_edges[0]->dst();
int fwd_while_dst_input = fwd_while_edges[0]->dst_input();
std::vector<const Edge*> bwd_while_edges;
for (const Edge* e : fwd_while->out_edges()) {
if (e->src_output() == fwd_while_dst_input && e->dst()->IsWhileNode()) {
bwd_while_edges.push_back(e);
}
}
if (bwd_while_edges.size() != 1) {
continue;
}
Node* bwd_while = bwd_while_edges[0]->dst();
int bwd_while_dst_input = bwd_while_edges[0]->dst_input();
NameAttrList fwd_body_attr;
TF_CHECK_OK(GetNodeAttr(fwd_while->def(), "body", &fwd_body_attr));
const FunctionDef* fwd_body = fld->Find(fwd_body_attr.name());
if (!fwd_body) {
return errors::InvalidArgument("Cannot find function ",
fwd_body_attr.name(), " for While node ",
fwd_while->DebugString());
}
std::unique_ptr<FunctionBody> fwd_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(
*fwd_body, AttrSlice(&fwd_body_attr.attr()), fld, &fwd_fbody));
Node* fwd_arg = fwd_fbody->arg_nodes[fwd_while_dst_input];
std::vector<Node*> tl_push_nodes;
for (const Edge* out_edge : fwd_arg->out_edges()) {
if (out_edge->dst()->type_string() == "TensorListPushBack") {
tl_push_nodes.push_back(out_edge->dst());
}
}
if (tl_push_nodes.size() != 1) {
continue;
}
Node* input_node;
TF_CHECK_OK(tl_push_nodes[0]->input_node(1, &input_node));
if (input_node->type_string() != "Const") {
continue;
}
NodeDef const_input_nodedef = input_node->def();
NameAttrList bwd_body_attr;
TF_CHECK_OK(GetNodeAttr(bwd_while->def(), "body", &bwd_body_attr));
const FunctionDef* bwd_body = fld->Find(bwd_body_attr.name());
if (!bwd_body) {
return errors::InvalidArgument("Cannot find function ",
bwd_body_attr.name(), " for While node ",
bwd_while->DebugString());
}
std::unique_ptr<FunctionBody> bwd_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(
*bwd_body, AttrSlice(&bwd_body_attr.attr()), fld, &bwd_fbody));
Node* bwd_arg = bwd_fbody->arg_nodes[bwd_while_dst_input];
std::vector<Node*> tl_pop_nodes;
for (const Edge* out_edge : bwd_arg->out_edges()) {
if (out_edge->dst()->type_string() == "TensorListPopBack") {
tl_pop_nodes.push_back(out_edge->dst());
}
}
if (tl_pop_nodes.size() != 1) {
continue;
}
std::vector<const Edge*> edges_to_replace;
for (const Edge* e : tl_pop_nodes[0]->out_edges()) {
if (e->src_output() == 1) {
edges_to_replace.push_back(e);
}
}
if (edges_to_replace.empty()) {
continue;
}
const_input_nodedef.set_name(
bwd_fbody->graph->NewName(const_input_nodedef.name()));
TF_ASSIGN_OR_RETURN(Node * const_node,
bwd_fbody->graph->AddNode(const_input_nodedef));
for (const Edge* e : edges_to_replace) {
Node* dst = e->dst();
int dst_input = e->dst_input();
bwd_fbody->graph->RemoveEdge(e);
bwd_fbody->graph->AddEdge(const_node, 0, dst, dst_input);
}
FunctionDef new_fdef;
string new_name = fld->UniqueFunctionName(
absl::StrCat(bwd_body_attr.name(), "_tl_rewrite_"));
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*bwd_fbody->graph, new_name, &new_fdef));
TF_RETURN_IF_ERROR(fld->AddFunctionDef(new_fdef));
bwd_body_attr.set_name(new_name);
bwd_while->ClearAttr("body");
bwd_while->AddAttr("body", bwd_body_attr);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/data_flow_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/list_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2xla/sharding_util.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
void ExpectErrorContains(const Status& status, absl::string_view str) {
EXPECT_NE(absl::OkStatus(), status);
EXPECT_TRUE(absl::StrContains(status.message(), str))
<< "expected error: " << status.message() << " to contain: " << str;
}
TEST(ValidateConfig, Good) {
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("foo");
feed->mutable_id()->set_output_index(123);
feed->set_name("foo_debug");
feed = config.add_feed();
feed->mutable_id()->set_node_name("bar");
feed->mutable_id()->set_output_index(0);
tf2xla::Fetch* fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("baz");
fetch->mutable_id()->set_output_index(456);
fetch->set_name("baz_debug");
fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("banana");
fetch->mutable_id()->set_output_index(0);
TF_EXPECT_OK(ValidateConfig(config));
}
TEST(ValidateConfig, BadEmpty) {
tf2xla::Config config;
ExpectErrorContains(ValidateConfig(config), "fetches must be specified");
}
TEST(ValidateConfig, BadNoFetch) {
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("foo");
ExpectErrorContains(ValidateConfig(config), "fetches must be specified");
}
TEST(ValidateConfig, BadFeedNodeName) {
tf2xla::Config config;
config.add_feed();
ExpectErrorContains(ValidateConfig(config), "node_name must be non-empty");
}
TEST(ValidateConfig, BadFeedOutputIndex) {
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("foo");
feed->mutable_id()->set_output_index(-1);
ExpectErrorContains(ValidateConfig(config), "output_index must be positive");
}
TEST(ValidateConfig, BadFetchNodeName) {
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("foo");
config.add_fetch();
ExpectErrorContains(ValidateConfig(config), "node_name must be non-empty");
}
TEST(ValidateConfig, BadFetchOutputIndex) {
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("foo");
tf2xla::Fetch* fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("bar");
fetch->mutable_id()->set_output_index(-1);
ExpectErrorContains(ValidateConfig(config), "output_index must be positive");
}
TEST(ValidateConfig, DuplicateFeedName) {
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("foo");
feed->set_name("dup");
feed = config.add_feed();
feed->mutable_id()->set_node_name("bar");
feed->set_name("dup");
ExpectErrorContains(ValidateConfig(config), "duplicate feed name");
}
TEST(ValidateConfig, DuplicateFetchName) {
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("foo");
tf2xla::Fetch* fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("bar");
fetch->set_name("dup");
fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("baz");
fetch->set_name("dup");
ExpectErrorContains(ValidateConfig(config), "duplicate fetch name");
}
TEST(ValidateConfig, ConflictingFeedName) {
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("foo");
feed->set_name("conflict");
feed = config.add_feed();
feed->mutable_id()->set_node_name("bar");
feed->set_name("conflict_data");
ExpectErrorContains(ValidateConfig(config), "conflicting feed name");
}
TEST(ValidateConfig, ConflictingFetchName) {
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("foo");
tf2xla::Fetch* fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("bar");
fetch->set_name("conflict");
fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("baz");
fetch->set_name("conflict_data");
ExpectErrorContains(ValidateConfig(config), "conflicting fetch name");
}
static tf2xla::Config FetchesConfig(std::vector<string> fetches) {
tf2xla::Config config;
for (const auto& fetch_node_name : fetches) {
auto* fetch = config.add_fetch();
fetch->set_name(absl::StrCat("fetch_", fetch_node_name));
fetch->mutable_id()->set_node_name(fetch_node_name);
}
return config;
}
TEST(PruneGraphDefInto, Basic) {
GraphDef def;
auto* n = def.add_node();
n->set_name("a");
n->add_input("b:0");
n->add_input("^c");
GraphDef copy;
ExpectErrorContains(PruneGraphDefInto(FetchesConfig({"missing"}), def, ©),
"node missing needed");
ExpectErrorContains(PruneGraphDefInto(FetchesConfig({"a"}), def, ©),
"node b needed");
n = def.add_node();
n->set_name("b");
ExpectErrorContains(PruneGraphDefInto(FetchesConfig({"a"}), def, ©),
"node c needed");
n->add_input("d:1");
n = def.add_node();
n->set_name("c");
n->add_input("d:1");
n = def.add_node();
n->set_name("d");
TF_EXPECT_OK(PruneGraphDefInto(FetchesConfig({"a"}), def, ©));
EXPECT_EQ(def.DebugString(), copy.DebugString());
GraphDef pruned_a = copy;
n = def.add_node();
n->set_name("e");
n->add_input("^d");
n->add_input("b:2");
copy.Clear();
TF_EXPECT_OK(PruneGraphDefInto(FetchesConfig({"a"}), def, ©));
EXPECT_EQ(pruned_a.DebugString(), copy.DebugString());
copy.Clear();
TF_EXPECT_OK(PruneGraphDefInto(FetchesConfig({"a", "e"}), def, ©));
EXPECT_EQ(def.DebugString(), copy.DebugString());
}
TEST(SetNodeShardingFromNeighbors, Basic) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
Node* a_node = nullptr;
Node* b_node = nullptr;
Node* c_node = nullptr;
for (Node* n : graph->nodes()) {
if (n->name() == "A") a_node = n;
if (n->name() == "B") b_node = n;
if (n->name() == "C") c_node = n;
}
const int num_cores_per_replica = 4;
a_node->set_assigned_device_name("foo");
EXPECT_FALSE(SetNodeShardingFromNeighbors(c_node, false).ok());
a_node->set_assigned_device_name("/device:TPU_REPLICATED_CORE:2");
TF_ASSERT_OK(SetNodeShardingFromNeighbors(c_node, false));
auto parse_status = ParseShardingFromDevice(*c_node, num_cores_per_replica,
false);
TF_ASSERT_OK(parse_status.status());
ASSERT_TRUE(parse_status.value().has_value());
EXPECT_EQ(2, parse_status.value().value().tile_assignment_devices(0));
b_node->set_assigned_device_name("/device:TPU_REPLICATED_CORE:1");
TF_ASSERT_OK(SetNodeShardingFromNeighbors(c_node, false));
parse_status = ParseShardingFromDevice(*c_node, num_cores_per_replica,
false);
TF_ASSERT_OK(parse_status.status());
ASSERT_TRUE(parse_status.value().has_value());
EXPECT_EQ(1, parse_status.value().value().tile_assignment_devices(0));
TF_ASSERT_OK(SetNodeShardingFromNeighbors(a_node, true));
parse_status = ParseShardingFromDevice(*a_node, num_cores_per_replica,
false);
TF_ASSERT_OK(parse_status.status());
ASSERT_TRUE(parse_status.value().has_value());
EXPECT_EQ(1, parse_status.value().value().tile_assignment_devices(0));
}
REGISTER_OP("One")
.Output("y: T")
.Attr("T: {float, double, int32, int64}")
.Doc(R"doc(
Returns a tensor with a single element (1) of type T.
y: A scalar in type T.
)doc");
TEST(CachedFunctionHandles, Basic) {
FunctionDef func = FunctionDefHelper::Define(
"TestFunc",
{},
{"y:T"},
{"T:{float, double, int32, int64}"},
{
{{"y"}, "One", {}, {{"T", "$T"}}},
});
FunctionDefLibrary proto;
*proto.add_function() = func;
FunctionLibraryDefinition fld(OpRegistry::Global(), proto);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(
nullptr, Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, &fld, OptimizerOptions()));
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
CachedFunctionHandles cached_function_handles(flr);
FunctionLibraryRuntime::Handle first_handle;
AttrValue attr;
attr.set_type(DT_FLOAT);
AttrValueMap attrs;
attrs["T"] = attr;
TF_ASSERT_OK(cached_function_handles.GetOrInstantiate(
"TestFunc", AttrSlice(&attrs), &first_handle));
const FunctionBody* body = flr->GetFunctionBody(first_handle);
EXPECT_NE(body, nullptr);
FunctionLibraryRuntime::Handle second_handle;
TF_ASSERT_OK(cached_function_handles.GetOrInstantiate(
"TestFunc", AttrSlice(&attrs), &second_handle));
EXPECT_EQ(first_handle, second_handle);
attr.set_type(DT_INT32);
attrs["T"] = attr;
FunctionLibraryRuntime::Handle third_handle;
TF_ASSERT_OK(cached_function_handles.GetOrInstantiate(
"TestFunc", AttrSlice(&attrs), &third_handle));
EXPECT_NE(first_handle, third_handle);
TF_EXPECT_OK(cached_function_handles.ReleaseAllHandles());
}
TEST(PropagateConstIntoFunctionalNodes, WhileLoopWithResourceInput) {
FunctionLibraryDefinition fld(OpRegistry::Global(), FunctionDefLibrary());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto pred = ops::_Arg(scope.WithOpName("pred"), DT_BOOL, 0);
auto input = ops::_Arg(scope.WithOpName("input"), DT_RESOURCE, 1);
auto ret = ops::_Retval(scope.WithOpName("ret"), pred, 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
FunctionDef cond_fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "cond", &cond_fdef));
TF_ASSERT_OK(fld.AddFunctionDef(cond_fdef));
FunctionDef body_fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "body", &body_fdef));
TF_ASSERT_OK(fld.AddFunctionDef(body_fdef));
}
Scope scope = Scope::NewRootScope().ExitOnError();
auto pred = ops::Const(scope.WithOpName("pred"), false, TensorShape({}));
auto input = ops::Const(scope.WithOpName("input"), 0, TensorShape({}));
NameAttrList cond_fn, body_fn;
cond_fn.set_name("cond");
body_fn.set_name("body");
auto while_op =
ops::While(scope.WithOpName("while"),
std::initializer_list<Input>{pred, input}, cond_fn, body_fn);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
TF_EXPECT_OK(PropagateConstIntoFunctionalNodes(&graph, &fld, &fld));
}
TEST(PropagateConstIntoFunctionalNodes, CopiedConstNodeHasUniqueName) {
FunctionLibraryDefinition fld(OpRegistry::Global(), FunctionDefLibrary());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto pred = ops::_Arg(scope.WithOpName("arg0"), DT_BOOL, 0);
auto input = ops::_Arg(scope.WithOpName("arg1"), DT_BOOL, 1);
auto duplicate_name = ops::NoOp(scope.WithOpName("duplicate_name"));
auto ret = ops::_Retval(scope.WithOpName("ret"), pred, 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
FunctionDef cond_fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "cond", &cond_fdef));
TF_ASSERT_OK(fld.AddFunctionDef(cond_fdef));
FunctionDef body_fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "body", &body_fdef));
TF_ASSERT_OK(fld.AddFunctionDef(body_fdef));
}
Scope scope = Scope::NewRootScope().ExitOnError();
auto pred =
ops::Const(scope.WithOpName("duplicate_name"), false, TensorShape({}));
auto input = ops::Const(scope.WithOpName("input"), false, TensorShape({}));
NameAttrList cond_fn, body_fn;
cond_fn.set_name("cond");
body_fn.set_name("body");
auto while_op =
ops::While(scope.WithOpName("while"),
std::initializer_list<Input>{pred, input}, cond_fn, body_fn);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
TF_EXPECT_OK(PropagateConstIntoFunctionalNodes(&graph, &fld, &fld));
auto node_name_index = graph.BuildNodeNameIndex();
Node* while_node = node_name_index["while"];
ASSERT_NE(while_node, nullptr);
TF_ASSERT_OK(GetNodeAttr(while_node->def(), "body", &body_fn));
const FunctionDef* rewritten_body_fn = fld.Find(body_fn.name());
ASSERT_NE(rewritten_body_fn, nullptr);
std::unordered_map<string, NodeDef> nodes;
for (const NodeDef& node_def : rewritten_body_fn->node_def()) {
nodes[node_def.name()] = node_def;
}
auto noop_def = nodes.find("duplicate_name");
ASSERT_NE(noop_def, nodes.end());
EXPECT_EQ(noop_def->second.op(), "NoOp");
auto const_def = nodes.find("duplicate_name/_0");
ASSERT_NE(const_def, nodes.end());
EXPECT_EQ(const_def->second.op(), "Const");
}
TEST(PropagateConstIntoFunctionalNodes, RewriteTensorListWithConstMember) {
FunctionLibraryDefinition fld(OpRegistry::Global(), FunctionDefLibrary());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto input = ops::_Arg(scope.WithOpName("arg"), DT_VARIANT, 0);
auto result =
ops::Const(scope.WithOpName("result"), false, TensorShape({}));
auto ret = ops::_Retval(scope.WithOpName("ret"), result, 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "cond", &fdef));
TF_ASSERT_OK(fld.AddFunctionDef(fdef));
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto input = ops::_Arg(scope.WithOpName("arg"), DT_VARIANT, 0);
auto element = ops::Const(scope.WithOpName("element"), 0, TensorShape({}));
auto push =
ops::TensorListPushBack(scope.WithOpName("push"), input, element);
auto ret = ops::_Retval(scope.WithOpName("ret"), push.output_handle, 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "fwd_body", &fdef));
TF_ASSERT_OK(fld.AddFunctionDef(fdef));
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto input = ops::_Arg(scope.WithOpName("arg"), DT_VARIANT, 0);
auto shape = ops::Const(scope.WithOpName("element"), -1, TensorShape({}));
auto pop =
ops::TensorListPopBack(scope.WithOpName("pop"), input, shape, DT_INT32);
auto identity = ops::Identity(scope.WithOpName("identity"), pop.tensor);
auto ret = ops::_Retval(scope.WithOpName("ret"), pop.output_handle, 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "bwd_body", &fdef));
TF_ASSERT_OK(fld.AddFunctionDef(fdef));
}
Scope scope = Scope::NewRootScope().ExitOnError();
auto shape = ops::Const(scope.WithOpName("element"), -1, TensorShape({}));
auto max_num_elements =
ops::Const(scope.WithOpName("max_num_elements"), 10, TensorShape({}));
auto tl = ops::EmptyTensorList(scope.WithOpName("tl"), shape,
max_num_elements, DT_INT32);
NameAttrList cond_fn, fwd_body_fn, bwd_body_fn;
cond_fn.set_name("cond");
fwd_body_fn.set_name("fwd_body");
bwd_body_fn.set_name("bwd_body");
auto fwd_while_op =
ops::While(scope.WithOpName("fwd_while"),
std::initializer_list<Input>{tl}, cond_fn, fwd_body_fn);
auto bwd_while_op =
ops::While(scope.WithOpName("bwd_while"),
std::initializer_list<Input>{fwd_while_op.output[0]}, cond_fn,
bwd_body_fn);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
TF_EXPECT_OK(RewriteTensorListWithConstElement(&graph, &fld));
const FunctionDef* bwd_body = fld.Find("bwd_body_tl_rewrite_0");
ASSERT_NE(bwd_body, nullptr);
std::unique_ptr<FunctionBody> bwd_fbody;
TF_CHECK_OK(
FunctionDefToBodyHelper(*bwd_body, AttrSlice(), &fld, &bwd_fbody));
auto node_name_index = bwd_fbody->graph->BuildNodeNameIndex();
const Node* identity = node_name_index.at("identity");
ASSERT_NE(identity, nullptr);
const Node* input;
TF_ASSERT_OK(identity->input_node(0, &input));
EXPECT_EQ(input->type_string(), "Const");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
36995878-42b9-45db-8e37-0f90cd01d40f | cpp | tensorflow/tensorflow | tf2xla_opset | tensorflow/compiler/tf2xla/tf2xla_opset.cc | tensorflow/compiler/tf2xla/tf2xla_opset_test.cc | #include "tensorflow/compiler/tf2xla/tf2xla_opset.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
namespace tensorflow {
const int SUPPORTED_DEVICES_NUM = 2;
static const char* const SUPPORTED_DEVICES[SUPPORTED_DEVICES_NUM] = {
DEVICE_GPU_XLA_JIT, DEVICE_CPU_XLA_JIT};
bool IsSupportedBackend(absl::string_view device_name) {
for (int i = 0; i < SUPPORTED_DEVICES_NUM; i++) {
if (SUPPORTED_DEVICES[i] == device_name) return true;
}
return false;
}
absl::Status RegisterBackends(absl::string_view device_name) {
if (!IsSupportedBackend(device_name)) {
return absl::InvalidArgumentError(
absl::StrCat(device_name, " is not supported. Supported devices are ",
absl::StrJoin(SUPPORTED_DEVICES, ", ")));
}
auto op_filter = [](KernelDef* kdef) {
if (kdef->op() == "Const") {
AddDtypeToKernelDefConstraint("dtype", DT_STRING, kdef);
}
if (kdef->op() == "Assert") {
AddDtypeToKernelDefConstraint("T", DT_STRING, kdef);
}
return true;
};
if (!XlaOpRegistry::IsBackendRegistered(DEVICE_GPU_XLA_JIT)) {
static auto gpu_backend =
XlaBackendRegistrar(DEVICE_GPU_XLA_JIT, kGpuAllTypes, op_filter);
}
if (!XlaOpRegistry::IsBackendRegistered(DEVICE_CPU_XLA_JIT)) {
static auto cpu_backend =
XlaBackendRegistrar(DEVICE_CPU_XLA_JIT, kCpuAllTypes, op_filter);
}
if (!XlaOpRegistry::IsBackendRegistered(std::string(device_name))) {
return absl::InternalError(
absl::StrCat(device_name, " is not registered."));
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::string>> GetRegisteredXlaOpsForDevice(
absl::string_view device_name) {
auto status = RegisterBackends(device_name);
if (!status.ok()) return status;
std::vector<const KernelDef*> kernel_defs =
XlaOpRegistry::DeviceKernels(std::string(device_name), true);
std::vector<std::string> op_names;
op_names.reserve(kernel_defs.size());
for (const auto& kernel_def : kernel_defs) {
op_names.push_back(kernel_def->op());
}
std::sort(op_names.begin(), op_names.end());
return op_names;
}
} | #include "tensorflow/compiler/tf2xla/tf2xla_opset.h"
#include <algorithm>
#include <string>
#include <vector>
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(GeXlaOpsForDeviceTest, InvalidDeviceToRegister) {
absl::StatusOr<std::vector<std::string>> result =
GetRegisteredXlaOpsForDevice("Invalid_Device");
EXPECT_FALSE(result.ok());
}
TEST(GeXlaOpsForDeviceTest, GetGpuNames) {
absl::StatusOr<std::vector<std::string>> result =
GetRegisteredXlaOpsForDevice("XLA_GPU_JIT");
EXPECT_GT(result.value().size(), 0);
auto matmul =
std::find(result.value().begin(), result.value().end(), "MatMul");
auto max = std::find(result.value().begin(), result.value().end(), "Max");
auto min = std::find(result.value().begin(), result.value().end(), "Min");
EXPECT_TRUE((matmul != result.value().end()));
EXPECT_TRUE((max != result.value().end()));
EXPECT_TRUE((min != result.value().end()));
EXPECT_LT(matmul, max);
EXPECT_LT(max, min);
}
TEST(GeXlaOpsForDeviceTest, GetCpuNames) {
absl::StatusOr<std::vector<std::string>> result =
GetRegisteredXlaOpsForDevice("XLA_CPU_JIT");
EXPECT_GT(result.value().size(), 0);
auto matmul =
std::find(result.value().begin(), result.value().end(), "MatMul");
auto max = std::find(result.value().begin(), result.value().end(), "Max");
auto min = std::find(result.value().begin(), result.value().end(), "Min");
EXPECT_TRUE((matmul != result.value().end()));
EXPECT_TRUE((max != result.value().end()));
EXPECT_TRUE((min != result.value().end()));
EXPECT_LT(matmul, max);
EXPECT_LT(max, min);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla_opset.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla_opset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95e82690-60b8-4cb9-881d-1c29fd32eca1 | cpp | tensorflow/tensorflow | resource_operation_table | tensorflow/compiler/tf2xla/resource_operation_table.cc | tensorflow/compiler/tf2xla/resource_operation_table_test.cc | #include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
namespace tensorflow {
absl::string_view XlaResourceOpInfo::XlaResourceOpKindToString(
XlaResourceOpKind op_kind) {
switch (op_kind) {
case XlaResourceOpKind::kRead:
return "Read";
case XlaResourceOpKind::kWrite:
return "Write";
case XlaResourceOpKind::kReadWrite:
return "Modify";
}
}
static absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>*
CreateResourceOpInfoMap() {
auto* result = new absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>;
auto add = [&](absl::string_view op, XlaResourceOpKind op_kind,
XlaResourceKind resource_kind) {
auto insert_result =
result->insert({op, XlaResourceOpInfo(op_kind, resource_kind)});
CHECK(insert_result.second);
};
auto kRead = XlaResourceOpKind::kRead;
auto kWrite = XlaResourceOpKind::kWrite;
auto kReadWrite = XlaResourceOpKind::kReadWrite;
auto kVariable = XlaResourceKind::kVariable;
auto kStack = XlaResourceKind::kStack;
auto kTensorArray = XlaResourceKind::kTensorArray;
add("AssignAddVariableOp" , kReadWrite, kVariable);
add("AssignSubVariableOp" , kReadWrite, kVariable);
add("AssignVariableOp" , kWrite, kVariable);
add("AssignVariableXlaConcatND" , kWrite, kVariable);
add("CollectiveReduceV2" , kRead, kVariable);
add("ReadVariableOp" , kRead, kVariable);
add("ReadVariableXlaSplitND" , kRead, kVariable);
add("ResourceApplyAdaMax" , kReadWrite, kVariable);
add("ResourceApplyAdadelta" , kReadWrite, kVariable);
add("ResourceApplyAdagrad" , kReadWrite, kVariable);
add("ResourceApplyAdagradV2" , kReadWrite, kVariable),
add("ResourceApplyAdagradDA" , kReadWrite, kVariable);
add("ResourceApplyAdam" , kReadWrite, kVariable);
add("ResourceApplyAddSign" , kReadWrite, kVariable);
add("ResourceApplyCenteredRMSProp" , kReadWrite, kVariable);
add("ResourceApplyFtrl" , kReadWrite, kVariable);
add("ResourceApplyFtrlV2" , kReadWrite, kVariable);
add("ResourceApplyGradientDescent" , kReadWrite, kVariable);
add("ResourceApplyMomentum" , kReadWrite, kVariable);
add("ResourceApplyKerasMomentum" , kReadWrite, kVariable);
add("ResourceApplyPowerSign" , kReadWrite, kVariable);
add("ResourceApplyProximalAdagrad" , kReadWrite, kVariable);
add("ResourceApplyProximalGradientDescent" , kReadWrite, kVariable);
add("ResourceApplyRMSProp" , kReadWrite, kVariable);
add("ResourceGather" , kRead, kVariable);
add("ResourceScatterAdd" , kReadWrite, kVariable);
add("ResourceScatterDiv" , kReadWrite, kVariable);
add("ResourceScatterMax" , kReadWrite, kVariable);
add("ResourceScatterMin" , kReadWrite, kVariable);
add("ResourceScatterMul" , kReadWrite, kVariable);
add("ResourceScatterNdAdd" , kReadWrite, kVariable);
add("ResourceScatterNdSub" , kReadWrite, kVariable);
add("ResourceScatterNdUpdate" , kReadWrite, kVariable);
add("ResourceScatterSub" , kReadWrite, kVariable);
add("ResourceScatterUpdate" , kReadWrite, kVariable);
add("ResourceStridedSliceAssign" , kReadWrite, kVariable);
add("RngReadAndSkip" , kReadWrite, kVariable);
add("RngSkip" , kReadWrite, kVariable);
add("StatefulStandardNormalV2" , kReadWrite, kVariable);
add("StatefulTruncatedNormal" , kReadWrite, kVariable);
add("StatefulUniform" , kReadWrite, kVariable);
add("StatefulUniformFullInt" , kReadWrite, kVariable);
add("StatefulUniformInt" , kReadWrite, kVariable);
add("VarIsInitializedOp" , kRead, kVariable);
add("VariableShape" , kRead, kVariable);
add("StackV2" , kWrite, kStack);
add("StackCloseV2" , kRead, kStack);
add("StackPopV2" , kReadWrite, kStack);
add("StackPushV2" , kReadWrite, kStack);
add("TensorArrayV3" , kWrite, kTensorArray);
add("TensorArrayConcatV3" , kRead, kTensorArray);
add("TensorArrayGatherV3" , kRead, kTensorArray);
add("TensorArrayScatterV3" , kWrite, kTensorArray);
add("TensorArrayGradV3" , kRead, kTensorArray);
add("TensorArrayCloseV3" , kRead, kTensorArray);
add("TensorArrayReadV3" , kRead, kTensorArray);
add("TensorArraySizeV3" , kRead, kTensorArray);
add("TensorArraySplitV3" , kWrite, kTensorArray);
add("TensorArrayWriteV3" , kWrite, kTensorArray);
return result;
}
static const absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>&
GetStaticResourceOpInfoMap() {
static absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>*
op_info_map = CreateResourceOpInfoMap();
return *op_info_map;
}
const XlaResourceOpInfo* GetResourceOpInfoForOp(absl::string_view op) {
const absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>& op_infos =
GetStaticResourceOpInfoMap();
auto it = op_infos.find(op);
return it == op_infos.end() ? nullptr : &it->second;
}
namespace resource_op_table_internal {
std::vector<absl::string_view> GetKnownResourceOps() {
std::vector<absl::string_view> result;
for (const auto& p : GetStaticResourceOpInfoMap()) {
result.push_back(p.first);
}
absl::c_sort(result);
return result;
}
}
} | #include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
bool IsResourceArgDef(const OpDef::ArgDef& arg_def) {
return arg_def.type() == DT_RESOURCE;
}
bool HasResourceInputOrOutput(const OpDef& op_def) {
return absl::c_any_of(op_def.input_arg(), IsResourceArgDef) ||
absl::c_any_of(op_def.output_arg(), IsResourceArgDef);
}
TEST(ResourceOperationTableTest, HaveAllResourceOps) {
absl::flat_hash_map<string, bool> known_resource_ops;
for (absl::string_view known_resource_op :
resource_op_table_internal::GetKnownResourceOps()) {
ASSERT_TRUE(
known_resource_ops.insert({string(known_resource_op), false}).second);
}
std::vector<string> xla_op_names = XlaOpRegistry::GetAllRegisteredOps();
for (const string& xla_op_name : xla_op_names) {
const OpDef* op_def;
TF_ASSERT_OK(OpRegistry::Global()->LookUpOpDef(xla_op_name, &op_def));
if (HasResourceInputOrOutput(*op_def)) {
EXPECT_EQ(known_resource_ops.count(xla_op_name), 1)
<< "Unknown resource op " << xla_op_name;
known_resource_ops[xla_op_name] = true;
}
}
std::vector<string> unnecessary_resource_ops;
for (const auto& pair : known_resource_ops) {
if (!pair.second) {
unnecessary_resource_ops.push_back(pair.first);
}
}
EXPECT_TRUE(unnecessary_resource_ops.empty())
<< "Stale resource ops:\n"
<< absl::StrJoin(unnecessary_resource_ops, "\n");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/resource_operation_table.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/resource_operation_table_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e6003ff8-dfe3-42fc-a3ac-46631b67e34d | cpp | tensorflow/tensorflow | tf2xla | tensorflow/compiler/tf2xla/tf2xla.cc | tensorflow/compiler/tf2xla/tf2xla_test.cc | #include "tensorflow/compiler/tf2xla/tf2xla.h"
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/aot/aot_only_var_handle_op.h"
#include "tensorflow/compiler/tf2xla/graph_compiler_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_computation.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
Status ConvertGraphToXla(std::unique_ptr<Graph> graph,
const tf2xla::Config& config, xla::Client* client,
xla::XlaComputation* computation) {
XlaOpRegistry::RegisterCompilationKernels();
for (Node* node : graph->nodes()) {
node->set_assigned_device_name(
absl::StrCat("/device:", DEVICE_CPU_XLA_JIT));
}
std::vector<XlaCompiler::Argument> xla_args;
TF_RETURN_IF_ERROR(CreateXlaArgs(*graph, &xla_args));
PopulateXlaArgs(config, &xla_args);
XlaCompiler::Options compiler_options;
compiler_options.client = client;
compiler_options.device_type = DeviceType(DEVICE_CPU_XLA_JIT);
compiler_options.flib_def = &graph->flib_def();
compiler_options.graph_def_version = graph->versions().producer();
compiler_options.allow_cpu_custom_calls = true;
XlaCompiler compiler(compiler_options);
XlaCompiler::CompilationResult result;
XlaCompiler::CompileOptions options;
options.alias_resource_update = true;
TF_RETURN_IF_ERROR(compiler.CompileGraph(
options, "tfcompile", std::move(graph), xla_args, &result));
*computation = std::move(*result.computation);
int num_const_results = 0;
for (int i = 0, end = result.outputs.size(); i < end; ++i) {
if (result.outputs[i].is_constant) {
++num_const_results;
LOG(ERROR) << "ConstRetVal index:" << i
<< " value:" << result.outputs[i].constant_value.DebugString();
}
}
if (num_const_results > 0) {
return errors::Unimplemented(
"Conversion from TensorFlow graph to XLA resulted in ",
num_const_results,
" constant results. The configuration of "
"the output args (i.e. fetch ids) is probably wrong.");
}
{
std::vector<bool> updated_inputs(xla_args.size());
for (const XlaCompiler::ResourceUpdate& update : result.resource_updates) {
updated_inputs[update.input_index] = true;
}
int64_t input_index = xla_args.size() - config.variable_size();
for (const tf2xla::Variable& variable : config.variable()) {
if (variable.readonly() == updated_inputs[input_index]) {
return errors::InvalidArgument(
"Variable \"", variable.node_name(), "\" is marked as ",
variable.readonly() ? "" : "not ", "readonly, but is ",
updated_inputs[input_index] ? "" : "not ",
"modified by the computation.");
}
++input_index;
}
}
return absl::OkStatus();
}
Status ConvertVarHandlesToAotVarHandles(GraphDef* graph_def) {
auto update_var_handle_op_node = [](NodeDef& node) -> Status {
if (node.op() == "VarHandleOp") {
node.set_op(tfcompile::kXlaAotOnlyVarHandleOp);
const auto& it = node.attr().find("allowed_devices");
if (it != node.attr().end()) {
if (!it->second.list().s().empty()) {
return errors::InvalidArgument(
"VarHandleOp with non-empty allowed devices is not supported.");
}
node.mutable_attr()->erase("allowed_devices");
}
}
return absl::OkStatus();
};
for (auto& node : *graph_def->mutable_node()) {
TF_RETURN_IF_ERROR(update_var_handle_op_node(node));
}
for (auto& fn : *graph_def->mutable_library()->mutable_function()) {
for (auto& node : *fn.mutable_node_def()) {
TF_RETURN_IF_ERROR(update_var_handle_op_node(node));
}
}
return absl::OkStatus();
}
}
Status ConvertGraphDefToXla(GraphDef graph_def, const tf2xla::Config& config,
xla::Client* client,
xla::XlaComputation* computation) {
std::unique_ptr<Graph> graph;
TF_RETURN_IF_ERROR(ConvertVarHandlesToAotVarHandles(&graph_def));
TF_RETURN_IF_ERROR(InitGraph(graph_def, config, &graph));
TF_RETURN_IF_ERROR(
ConvertGraphToXla(std::move(graph), config, client, computation));
return absl::OkStatus();
}
} | #include "tensorflow/compiler/tf2xla/tf2xla.h"
#include <vector>
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace {
class ConvertGraphDefToXlaWithTF32Disabled : public ::testing::Test {
public:
ConvertGraphDefToXlaWithTF32Disabled() {
tsl::enable_tensor_float_32_execution(false);
}
~ConvertGraphDefToXlaWithTF32Disabled() override {
tsl::enable_tensor_float_32_execution(true);
}
};
AttrValue TypeAttrValue(DataType type) {
AttrValue attr_value;
SetAttrValue(type, &attr_value);
return attr_value;
}
AttrValue StringAttrValue(StringPiece str) {
AttrValue attr_value;
SetAttrValue(str, &attr_value);
return attr_value;
}
AttrValue IntAttrValue(int i) {
AttrValue attr_value;
SetAttrValue(i, &attr_value);
return attr_value;
}
AttrValue IntVectorAttrValue(const std::vector<int>& ints) {
AttrValue attr_value;
SetAttrValue(ints, &attr_value);
return attr_value;
}
TensorShapeProto TensorShape(const std::vector<int>& dims) {
TensorShapeProto shape;
for (int i = 0; i < dims.size(); ++i) {
shape.add_dim();
shape.mutable_dim(i)->set_size(dims[i]);
}
return shape;
}
GraphDef SumGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* sum = graph_def.add_node();
sum->set_name("sum");
sum->set_op("Add");
sum->add_input("x");
sum->add_input("y");
(*sum->mutable_attr())["T"] = TypeAttrValue(DT_INT32);
return graph_def;
}
tf2xla::Config SumConfig() {
tf2xla::Config config;
config.add_feed()->mutable_id()->set_node_name("x");
config.add_feed()->mutable_id()->set_node_name("y");
config.add_fetch()->mutable_id()->set_node_name("sum");
return config;
}
TEST(ConvertGraphDefToXla, Sum) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
auto x_literal = xla::LiteralUtil::CreateR0<int32>(10);
auto y_literal = xla::LiteralUtil::CreateR0<int32>(32);
auto x_global_or = client->TransferToServer(x_literal);
auto y_global_or = client->TransferToServer(y_literal);
TF_EXPECT_OK(x_global_or.status());
TF_EXPECT_OK(y_global_or.status());
std::unique_ptr<xla::GlobalData> x_global = std::move(x_global_or.value());
std::unique_ptr<xla::GlobalData> y_global = std::move(y_global_or.value());
auto result_or =
client->ExecuteAndTransfer(computation, {x_global.get(), y_global.get()});
TF_EXPECT_OK(result_or.status());
xla::Literal result = std::move(result_or.value());
EXPECT_EQ("(\ns32[] 42\n)", result.ToString());
config.mutable_feed(0)->mutable_id()->set_output_index(
123);
EXPECT_TRUE(errors::IsInvalidArgument(
ConvertGraphDefToXla(graph_def, config, client, &computation)));
}
GraphDef EinsumGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* einsum = graph_def.add_node();
einsum->set_name("einsum");
einsum->set_op("Einsum");
einsum->add_input("x");
einsum->add_input("y");
(*einsum->mutable_attr())["equation"] = StringAttrValue("ij,jk->ik");
(*einsum->mutable_attr())["T"] = TypeAttrValue(DT_FLOAT);
(*einsum->mutable_attr())["N"] = IntAttrValue(2);
return graph_def;
}
tf2xla::Config EinsumConfig() {
tf2xla::Config config;
tf2xla::Feed* x_feed = config.add_feed();
x_feed->mutable_id()->set_node_name("x");
*x_feed->mutable_shape() = TensorShape({2, 2});
tf2xla::Feed* y_feed = config.add_feed();
y_feed->mutable_id()->set_node_name("y");
*y_feed->mutable_shape() = TensorShape({2, 2});
config.add_fetch()->mutable_id()->set_node_name("einsum");
return config;
}
TEST(ConvertGraphDefToXla, EinsumIsConvertedToDotWithDefaultPrecision) {
GraphDef graph_def = EinsumGraph();
tf2xla::Config config = EinsumConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_dots = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "dot") {
num_dots++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::DEFAULT);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::DEFAULT);
}
}
}
EXPECT_EQ(num_dots, 1);
}
TEST_F(ConvertGraphDefToXlaWithTF32Disabled,
EinsumIsConvertedToDotWithHighestPrecision) {
GraphDef graph_def = EinsumGraph();
tf2xla::Config config = EinsumConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_dots = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "dot") {
num_dots++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::HIGHEST);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::HIGHEST);
}
}
}
EXPECT_EQ(num_dots, 1);
}
GraphDef Conv2DGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* einsum = graph_def.add_node();
einsum->set_name("conv2d");
einsum->set_op("Conv2D");
einsum->add_input("x");
einsum->add_input("y");
(*einsum->mutable_attr())["T"] = TypeAttrValue(DT_FLOAT);
(*einsum->mutable_attr())["padding"] = StringAttrValue("VALID");
(*einsum->mutable_attr())["strides"] = IntVectorAttrValue({1, 1, 1, 1});
return graph_def;
}
tf2xla::Config Conv2DConfig() {
tf2xla::Config config;
tf2xla::Feed* x_feed = config.add_feed();
x_feed->mutable_id()->set_node_name("x");
*x_feed->mutable_shape() = TensorShape({1, 1, 2, 2});
tf2xla::Feed* y_feed = config.add_feed();
y_feed->mutable_id()->set_node_name("y");
*y_feed->mutable_shape() = TensorShape({1, 1, 2, 2});
config.add_fetch()->mutable_id()->set_node_name("conv2d");
return config;
}
TEST(ConvertGraphDefToXla, Conv2DIsConvertedToConvolutionWithDefaultPrecision) {
GraphDef graph_def = Conv2DGraph();
tf2xla::Config config = Conv2DConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_convolutions = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "convolution") {
num_convolutions++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::DEFAULT);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::DEFAULT);
}
}
}
EXPECT_EQ(num_convolutions, 1);
}
TEST_F(ConvertGraphDefToXlaWithTF32Disabled,
Conv2DIsConvertedToConvolutionWithHighestPrecision) {
GraphDef graph_def = Conv2DGraph();
tf2xla::Config config = Conv2DConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_convolutions = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "convolution") {
num_convolutions++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::HIGHEST);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::HIGHEST);
}
}
}
EXPECT_EQ(num_convolutions, 1);
}
TEST(ConvertGraphDefToXla, SumWithUnusedArgument) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
NodeDef* unused = graph_def.add_node();
unused->set_name("unused");
unused->set_op("Placeholder");
(*unused->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
config.add_feed()->mutable_id()->set_node_name("unused");
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
auto x_literal = xla::LiteralUtil::CreateR0<int32>(10);
auto y_literal = xla::LiteralUtil::CreateR0<int32>(32);
auto x_global_or = client->TransferToServer(x_literal);
auto y_global_or = client->TransferToServer(y_literal);
auto unused_global_or = client->TransferToServer(y_literal);
TF_EXPECT_OK(x_global_or.status());
TF_EXPECT_OK(y_global_or.status());
TF_EXPECT_OK(unused_global_or.status());
std::unique_ptr<xla::GlobalData> x_global = std::move(x_global_or.value());
std::unique_ptr<xla::GlobalData> y_global = std::move(y_global_or.value());
std::unique_ptr<xla::GlobalData> unused_global =
std::move(unused_global_or.value());
auto result_or = client->ExecuteAndTransfer(
computation, {x_global.get(), y_global.get(), unused_global.get()});
TF_EXPECT_OK(result_or.status());
xla::Literal result = std::move(result_or.value());
EXPECT_EQ("(\ns32[] 42\n)", result.ToString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c30fe9e6-8cac-4bf9-8a3a-a1ee162dddce | cpp | tensorflow/tensorflow | functionalize_control_flow | tensorflow/compiler/tf2xla/functionalize_control_flow.cc | tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc | #include "tensorflow/compiler/tf2xla/functionalize_control_flow.h"
#include <algorithm>
#include <deque>
#include <stack>
#include <unordered_set>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/tf2xla/functionalize_cond.h"
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "tensorflow/compiler/tf2xla/functionalize_while.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/status_macros.h"
#include "xla/union_find.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
using FuncMap = std::map<string, std::optional<string>>;
using FuncMapIter = std::map<string, std::optional<string>>::const_iterator;
bool FunctionHasBeenProcessed(FuncMapIter func_iter, const FuncMap* func_map) {
return func_iter != func_map->end();
}
bool FunctionHasBeenModified(FuncMapIter func_iter) {
return func_iter->second.has_value();
}
string GetNewFunctionName(
const string& func_name, Node* n,
AssociatedFunctionInfo::AssociatedFunctionType func_type,
FunctionLibraryDefinition* fld) {
return (
func_type ==
AssociatedFunctionInfo::AssociatedFunctionType::kSymbolicGradient
? fld->UniqueFunctionName(absl::StrCat(n->name(), "_f15n_"))
: fld->UniqueFunctionName(absl::StrCat(func_name, "_f15n_")));
}
const string& GetMappedFunctionName(FuncMapIter func_iter) {
DCHECK(func_iter->second.has_value());
return func_iter->second.value();
}
void UpdateFunctionMap(FuncMap* func_map, const string& canonicalized_name,
const string& new_func_name, bool function_modified) {
(*func_map)[canonicalized_name] =
function_modified ? absl::make_optional(new_func_name) : std::nullopt;
}
Status AddFunctionDefToGraphLibrary(
const string& func_name, const AssociatedFunctionInfo& associated_function,
Graph* graph, FunctionLibraryDefinition* fld) {
const OpRegistrationData* op_reg_data;
if (graph->flib_def().LookUp(func_name, &op_reg_data).ok())
return absl::OkStatus();
const FunctionDef* new_fdef = fld->Find(func_name);
DCHECK(new_fdef != nullptr);
FunctionDefLibrary fdef_lib;
*(fdef_lib.add_function()) = *new_fdef;
return graph->AddFunctionLibrary(fdef_lib);
}
Status FunctionalizeControlFlowForFunction(
const string& func_name, const string& new_func_name,
const protobuf::Map<string, tensorflow::AttrValue>& attrs,
FunctionLibraryDefinition* fld, FunctionLibraryRuntime* flr,
FuncMap* func_map, bool* function_modified,
const NodeFilter& node_filter = {});
Status FunctionalizeControlFlowForNodeAssociatedFunctions(
FuncMap* func_map, Graph* graph, FunctionLibraryDefinition* fld,
FunctionLibraryRuntime* flr, bool* any_function_modified,
const NodeFilter& node_filter) {
std::vector<std::pair<Node*, std::vector<AssociatedFunctionInfo>>>
nodes_to_associated_functions;
for (auto* n : graph->nodes()) {
auto associated_functions = GetAssociatedFunctions(*n, fld);
if (!associated_functions.empty()) {
nodes_to_associated_functions.push_back({n, associated_functions});
}
}
for (const auto& pair : nodes_to_associated_functions) {
Node* n = pair.first;
auto associated_functions = pair.second;
for (auto& associated_function : associated_functions) {
DCHECK(associated_function.type() !=
AssociatedFunctionInfo::kFunctionCallNode ||
associated_functions.size() == 1);
string func_name = associated_function.func_name();
string canonicalized_name =
Canonicalize(func_name, AttrSlice(&associated_function.attrs()));
auto func_iter = func_map->find(canonicalized_name);
string new_func_name;
if (FunctionHasBeenProcessed(func_iter, func_map)) {
if (FunctionHasBeenModified(func_iter)) {
*any_function_modified = true;
new_func_name = GetMappedFunctionName(func_iter);
TF_RETURN_IF_ERROR(RewriteAssociatedFunction(
graph, n, fld, associated_function, new_func_name));
}
continue;
}
bool function_modified = false;
new_func_name =
GetNewFunctionName(func_name, n, associated_function.type(), fld);
TF_RETURN_IF_ERROR(FunctionalizeControlFlowForFunction(
func_name, new_func_name, associated_function.attrs(), fld, flr,
func_map, &function_modified, node_filter));
UpdateFunctionMap(func_map, canonicalized_name, new_func_name,
function_modified);
if (function_modified) {
*any_function_modified = true;
TF_RETURN_IF_ERROR(AddFunctionDefToGraphLibrary(
new_func_name, associated_function, graph, fld));
TF_RETURN_IF_ERROR(RewriteAssociatedFunction(
graph, n, fld, associated_function, new_func_name));
}
}
}
return absl::OkStatus();
}
Status FunctionalizeControlFlowForFunction(
const string& func_name, const string& new_func_name,
const protobuf::Map<string, tensorflow::AttrValue>& attrs,
FunctionLibraryDefinition* fld, FunctionLibraryRuntime* flr,
FuncMap* func_map, bool* function_modified, const NodeFilter& node_filter) {
*function_modified = false;
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(flr->Instantiate(func_name, AttrSlice(&attrs), &handle));
Status ret_status = absl::OkStatus();
auto cleanup_handle = gtl::MakeCleanup([&]() {
auto s = flr->ReleaseHandle(handle);
if (!s.ok()) {
ret_status.Update(s);
}
});
const FunctionBody* body = flr->GetFunctionBody(handle);
Graph* g = body->graph;
bool has_switch_or_merge = false;
for (Node* n : body->graph->nodes()) {
if (node_filter && !node_filter(n)) continue;
if (n->type_string() == "Switch" || n->type_string() == "Merge") {
has_switch_or_merge = true;
break;
}
}
TF_RETURN_IF_ERROR(FunctionalizeControlFlowForNodeAssociatedFunctions(
func_map, g, fld, flr, function_modified, node_filter));
if (has_switch_or_merge) {
*function_modified = true;
if (VLOG_IS_ON(4)) {
DumpGraphToFile(
absl::StrCat("functionalize_control_flow_before_fdef_", func_name),
*g, fld);
}
TF_RETURN_IF_ERROR(FunctionalizeControlFlow(g, fld, node_filter));
if (VLOG_IS_ON(4)) {
DumpGraphToFile(
absl::StrCat("functionalize_control_flow_after_fdef_", func_name), *g,
fld);
}
}
if (*function_modified) {
FunctionDef functionalized_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*g, new_func_name, &functionalized_fdef));
if (func_name == new_func_name) {
VLOG(2) << "Replacing function " << func_name;
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(new_func_name, functionalized_fdef));
} else {
VLOG(2) << "Adding function " << new_func_name;
TF_RETURN_IF_ERROR(fld->AddFunctionDef(functionalized_fdef));
}
}
return ret_status;
}
Status FunctionalizeControlFlow(Graph* graph,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter,
bool include_functions) {
VLOG(2) << "FunctionalizeControlFlow (initial): "
<< DumpGraphToFile("functionalize_initial", *graph, library);
if (include_functions) {
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, tensorflow::Env::Default(),
nullptr, TF_GRAPH_DEF_VERSION, library,
tensorflow::OptimizerOptions());
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
FuncMap func_map;
bool modified = false;
TF_RETURN_IF_ERROR(FunctionalizeControlFlowForNodeAssociatedFunctions(
&func_map, graph, library, flr, &modified, node_filter));
}
TF_RETURN_IF_ERROR(FunctionalizeWhileLoop(graph, library, node_filter));
TF_RETURN_IF_ERROR(FunctionalizeCond(graph, library, node_filter));
VLOG(2) << "FunctionalizeControlFlow (final): "
<< DumpGraphToFile("functionalize_final", *graph, library);
return absl::OkStatus();
}
Status FunctionalizeControlFlowForGraphDef(GraphDef* graph_def,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter,
bool include_functions) {
FunctionDefLibrary function_lib = graph_def->library();
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph({}, *graph_def, &graph));
TF_RETURN_IF_ERROR(FunctionalizeControlFlow(&graph, library, node_filter,
include_functions));
graph.ToGraphDef(graph_def);
std::swap(*graph_def->mutable_library(), function_lib);
return absl::OkStatus();
}
Status FunctionalizeControlFlowForXlaPass::Run(
const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
if (VLOG_IS_ON(4)) {
DumpGraphToFile("functionalize_control_flow_before", *graph,
options.flib_def);
}
const auto* config = &options.session_options->config;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(
nullptr, options.session_options->env, config,
TF_GRAPH_DEF_VERSION, options.flib_def,
config->graph_options().optimizer_options()));
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
static std::map<string, string>* kNodeTypeToFunctionAttrMapping =
new std::map<string, string>{
{"_TPUReplicate", "computation"},
{"XlaLaunch", "function"},
};
FuncMap func_map;
bool fld_modified = false;
for (Node* n : graph->nodes()) {
auto it = kNodeTypeToFunctionAttrMapping->find(n->type_string());
if (it == kNodeTypeToFunctionAttrMapping->end()) {
continue;
}
const string func_attr = it->second;
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), func_attr, &func));
VLOG(2) << "Graph has node " << n->type_string()
<< ". Corresponding function: " << func.name();
string new_func_name = options.flib_def->UniqueFunctionName(
absl::StrCat(func.name(), "_f15n_"));
bool modified;
TF_RETURN_IF_ERROR(FunctionalizeControlFlowForFunction(
func.name(), new_func_name, func.attr(), options.flib_def, flr,
&func_map, &modified));
if (modified) {
n->ClearAttr(func_attr);
func.set_name(new_func_name);
n->AddAttr(func_attr, func);
fld_modified = true;
}
}
if (false) {
if (VLOG_IS_ON(4)) {
DumpGraphToFile("functionalize_control_flow_before_prune", *graph,
options.flib_def);
}
TF_RETURN_IF_ERROR(
PruneUnreachableFunctionsFromGraph(*graph, options.flib_def));
}
if (VLOG_IS_ON(4)) {
DumpGraphToFile("functionalize_control_flow_after", *graph,
options.flib_def);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/tf2xla/functionalize_control_flow.h"
#include <string>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2xla/cc/ops/xla_ops.h"
#include "tensorflow/compiler/tf2xla/test_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/validate.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
Status FindIfThenAndElse(const GraphDef& graph, string* op_name,
NameAttrList* then_fn, NameAttrList* else_fn) {
for (const NodeDef& node : graph.node()) {
if (node.op() == "If") {
*op_name = node.name();
const NameAttrList* result;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "then_branch", &result));
*then_fn = *result;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "else_branch", &result));
*else_fn = *result;
return absl::OkStatus();
}
}
return errors::NotFound("No If node found in graph");
}
class ConditionalTestFixture
: public ::testing::TestWithParam<std::tuple<bool, bool>> {
protected:
void SetUp() override {
restrict_to_tpu_nodes_ = std::get<0>(GetParam());
wrap_condition_in_function_ = std::get<1>(GetParam());
}
void RunTest();
private:
void BuildCondGraph(Graph* cond_graph);
void CheckGraphDef(const GraphDef& graph_def,
const FunctionLibraryDefinition& library);
bool restrict_to_tpu_nodes_ = false;
bool wrap_condition_in_function_ = false;
};
TEST_P(ConditionalTestFixture, ConditionalTests) { RunTest(); }
INSTANTIATE_TEST_SUITE_P(
FunctionalizeControlFlow, ConditionalTestFixture,
::testing::Combine(::testing::Bool(), ::testing::Bool()),
[](const ::testing::TestParamInfo<ConditionalTestFixture::ParamType>&
info) {
bool restrict_to_tpu_nodes = std::get<0>(info.param);
bool wrap_cond_in_function = std::get<1>(info.param);
string name =
absl::StrCat(restrict_to_tpu_nodes ? "with_filter" : "without_filter",
wrap_cond_in_function ? "_in_function" : "_in_graph");
return name;
});
void ConditionalTestFixture::BuildCondGraph(Graph* cond_graph) {
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto x = ops::Placeholder(scope.WithOpName("x"), DT_INT32);
auto y = ops::Placeholder(scope.WithOpName("y"), DT_INT32);
auto less = ops::Less(scope.WithOpName("cond/Less"), y, x);
auto switch_1 = ops::Switch(scope.WithOpName("cond/Switch"), less, less);
auto identity_t =
ops::Identity(scope.WithOpName("cond/Identity"), switch_1.output_true);
auto seventeen = ops::Const<int32>(
scope.WithOpName("cond").WithControlDependencies(identity_t), 17);
auto switch_2 = ops::Switch(scope.WithOpName("cond/Switch"), y, less);
auto mul = ops::Multiply(scope.WithOpName("cond/Mul"), switch_2.output_true,
seventeen);
auto identity_f =
ops::Identity(scope.WithOpName("cond/Identity"), switch_1.output_false);
auto twenty_three = ops::Const<int32>(
scope.WithOpName("cond").WithControlDependencies(identity_f), 23);
auto switch_3 = ops::Switch(scope.WithOpName("cond/Switch"), x, less);
auto add = ops::Add(scope.WithOpName("cond/false/add"),
switch_3.output_false, twenty_three);
auto merge = ops::Merge(scope.WithOpName("cond/Merge"),
std::initializer_list<Input>{add, mul});
TF_EXPECT_OK(scope.ToGraph(cond_graph));
for (Node* n : cond_graph->nodes()) {
std::string dummy_value = "value";
for (absl::string_view attr_name : kAttrsToPropagate) {
n->AddAttr(std::string(attr_name), dummy_value);
}
}
}
}
void ConditionalTestFixture::CheckGraphDef(
const GraphDef& graph_def, const FunctionLibraryDefinition& library) {
string op_name;
NameAttrList then_fn;
NameAttrList else_fn;
TF_EXPECT_OK(FindIfThenAndElse(graph_def, &op_name, &then_fn, &else_fn));
InstantiationResultForTest else_result;
TF_EXPECT_OK(
InstantiateFunctionForTest(else_fn.name(), library, &else_result));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto y = ops::Placeholder(scope.WithOpName("y"), DT_INT32);
auto x = ops::Placeholder(scope.WithOpName("x"), DT_INT32);
auto less = ops::Less(scope.WithOpName("cond/Less"), y, x);
auto if_op =
ops::If(scope.WithOpName(op_name), less,
std::initializer_list<Input>{less, y, x}, {DT_INT32}, then_fn,
else_fn, ops::If::OutputShapes({PartialTensorShape()}));
auto id = ops::Identity(scope.WithOpName("cond/Merge"), if_op.output[0]);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg_0 = ops::_Arg(scope.WithOpName("arg0"), DT_BOOL, 0);
auto arg_1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto arg_2 = ops::_Arg(scope.WithOpName("arg2"), DT_INT32, 2);
auto identity = ops::Identity(scope.WithOpName("cond/Identity"), arg_0);
auto cond = ops::Const(
scope.WithOpName("cond").WithControlDependencies(identity), 17);
auto mul = ops::Mul(scope.WithOpName("cond/Mul"), arg_1, cond);
auto retval0 = ops::_Retval(scope.WithOpName("retval0_RetVal"), mul, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(InstantiateFunctionForTest(then_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
EXPECT_EQ((DataTypeVector{DT_BOOL, DT_INT32, DT_INT32}), result.arg_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg_0 = ops::_Arg(scope.WithOpName("arg0"), DT_BOOL, 0);
auto arg_1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto arg_2 = ops::_Arg(scope.WithOpName("arg2"), DT_INT32, 2);
auto identity = ops::Identity(scope.WithOpName("cond/Identity_1"), arg_0);
auto cond_1 = ops::Const(
scope.WithOpName("cond_1").WithControlDependencies(identity), 23);
auto add = ops::Add(scope.WithOpName("cond/false/add"), arg_2, cond_1);
auto retval0 = ops::_Retval(scope.WithOpName("retval0_RetVal"), add, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(InstantiateFunctionForTest(else_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
EXPECT_EQ((DataTypeVector{DT_BOOL, DT_INT32, DT_INT32}), result.arg_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
for (const NodeDef& node : graph_def.node()) {
if (node.op() == "If") {
for (absl::string_view attr_name : kAttrsToPropagate) {
std::string attr_val;
TF_EXPECT_OK(GetNodeAttr(node, attr_name, &attr_val));
EXPECT_EQ(attr_val, "value");
}
}
}
}
}
void ConditionalTestFixture::RunTest() {
Graph graph(OpRegistry::Global());
if (wrap_condition_in_function_) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
Graph cond_graph(OpRegistry::Global());
BuildCondGraph(&cond_graph);
FunctionDef cond_fdef;
TF_ASSERT_OK(GraphToFunctionDef(cond_graph, "cond_fn", &cond_fdef));
FunctionDefLibrary fdef_lib;
*(fdef_lib.add_function()) = cond_fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
NodeDef cond_fn;
cond_fn.set_name("cond_node");
cond_fn.set_op("cond_fn");
*(cond_fn.add_input()) = "source";
Status status;
scope.graph()->AddNode(cond_fn, &status);
TF_ASSERT_OK(status);
TF_ASSERT_OK(scope.ToGraph(&graph));
} else {
BuildCondGraph(&graph);
}
FunctionLibraryDefinition library(graph.flib_def());
NodeFilter node_filter =
restrict_to_tpu_nodes_
? [](const Node* n) { return n->attrs().Find("_tpu_replicate"); }
: NodeFilter{};
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
TF_ASSERT_OK(FunctionalizeControlFlowForGraphDef(
&optimized_graph_def, &library, node_filter,
wrap_condition_in_function_));
TF_ASSERT_OK(FunctionalizeControlFlow(
&graph, &library, node_filter,
wrap_condition_in_function_));
if (wrap_condition_in_function_) {
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, tensorflow::Env::Default(),
nullptr, TF_GRAPH_DEF_VERSION, &library,
tensorflow::OptimizerOptions());
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
FunctionLibraryRuntime::Handle handle;
string func_name;
for (Node* n : graph.nodes()) {
if (n->name() == "cond_node") {
func_name = n->type_string();
break;
}
}
TF_ASSERT_OK(flr->Instantiate(func_name, AttrSlice(), &handle));
const FunctionBody* body = flr->GetFunctionBody(handle);
GraphDef graph_def;
body->graph->ToGraphDef(&graph_def);
CheckGraphDef(graph_def, library);
} else {
CheckGraphDef(optimized_graph_def, library);
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
CheckGraphDef(converted_graph_def, library);
}
}
Status FindWhileCondAndBody(const GraphDef& graph, NameAttrList* cond,
NameAttrList* body) {
for (const NodeDef& node : graph.node()) {
if (node.op() == "While") {
const NameAttrList* result;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "cond", &result));
*cond = *result;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "body", &result));
*body = *result;
return absl::OkStatus();
}
}
return errors::NotFound("No While node found in graph");
}
TEST(FunctionalizeControlFlow, OneLoopVar) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32);
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto enter =
ops::internal::Enter(scope.WithOpName("while/Enter"), source, "aloop");
auto enter2 =
ops::internal::Enter(scope.WithOpName("while/Enter2"), source, "aloop");
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(merge.output),
10);
auto less = ops::Less(scope.WithOpName("while/Less"), merge.output, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_ =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto exit = ops::internal::Exit(scope.WithOpName("while/Exit"),
switch_.output_false);
auto identity =
ops::Identity(scope.WithOpName("while/Identity"), switch_.output_true);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity, one);
auto next_iteration =
ops::NextIteration(scope.WithOpName("while/NextIteration"), add);
auto sink = ops::Identity(scope.WithOpName("sink"), exit);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration.node(), 0, merge.output.node(), 1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
for (Node* n : graph.nodes()) {
if (n->name() == "while/Enter") {
graph.AddControlEdge(n, graph.sink_node());
}
}
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
TF_ASSERT_OK(
FunctionalizeControlFlowForGraphDef(&optimized_graph_def, &library));
TF_ASSERT_OK(FunctionalizeControlFlow(&graph, &library));
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
for (const GraphDef& graph_def : {optimized_graph_def, converted_graph_def}) {
NameAttrList cond_fn, body_fn;
TF_EXPECT_OK(FindWhileCondAndBody(graph_def, &cond_fn, &body_fn));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto while_op =
ops::While(scope.WithOpName("while/LoopCond"),
std::initializer_list<Input>{source}, cond_fn, body_fn);
auto sink = ops::Identity(scope.WithOpName("sink"), while_op[0]);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(arg), 10);
auto less = ops::Less(scope.WithOpName("while/Less"), arg, ten);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), less, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(cond_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_BOOL}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto identity = ops::Identity(scope.WithOpName("while/Identity"), arg);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity, one);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), add, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(body_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
}
}
FunctionDef GetNoinlineFunctionDef() {
FunctionDef fdef = FunctionDefHelper::Create(
"increment_fn", {"x:int32"}, {"add:int32"}, {},
{
{{"add/y"}, "Const", {}, {{"dtype", DT_INT32}}},
{{"add_0"}, "Add", {"x", "add/y:output:0"}, {{"T", DT_INT32}}},
},
{{"add", "add_0:z:0"}});
(*fdef.mutable_attr())["_noinline"].set_b(true);
return fdef;
}
Status AddNoinlineFunctionToGraph(const string& node_name, Graph* graph) {
FunctionDefLibrary fdef_lib;
*(fdef_lib.add_function()) = GetNoinlineFunctionDef();
TF_RETURN_IF_ERROR(graph->AddFunctionLibrary(fdef_lib));
NodeDef increment_fn;
increment_fn.set_name(node_name);
increment_fn.set_op("increment_fn");
*increment_fn.add_input() = "while/Identity";
*increment_fn.add_input() = "^while/Identity";
Status status;
graph->AddNode(increment_fn, &status);
return status;
}
TEST(FunctionalizeControlFlow, NoinlineLoopBody) {
const string& noinline_node_name = "while/increment_fn";
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32);
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto enter = ops::internal::Enter(scope.WithOpName("while/Enter"), source,
"while/while_context");
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(merge.output),
10);
auto less = ops::Less(scope.WithOpName("while/Less"), merge.output, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_ =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto exit = ops::internal::Exit(scope.WithOpName("while/Exit"),
switch_.output_false);
auto identity =
ops::Identity(scope.WithOpName("while/Identity"), switch_.output_true);
TF_ASSERT_OK(AddNoinlineFunctionToGraph(noinline_node_name, scope.graph()));
NodeDef next_iter;
next_iter.set_name("while/NextIteration");
next_iter.set_op("NextIteration");
*next_iter.add_input() = noinline_node_name;
(*next_iter.mutable_attr())["T"].set_type(DT_INT32);
Status status;
Node* n = scope.graph()->AddNode(next_iter, &status);
TF_ASSERT_OK(status);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(n, 0, merge.output.node(), 1);
TF_ASSERT_OK(scope.ToGraph(&graph));
}
FunctionLibraryDefinition library(graph.flib_def());
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
*(optimized_graph_def.mutable_library()->add_function()) =
GetNoinlineFunctionDef();
TF_ASSERT_OK(
FunctionalizeControlFlowForGraphDef(&optimized_graph_def, &library));
TF_ASSERT_OK(FunctionalizeControlFlow(&graph, &library));
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
for (const GraphDef& graph_def : {optimized_graph_def, converted_graph_def}) {
NameAttrList cond_fn, body_fn;
TF_ASSERT_OK(FindWhileCondAndBody(graph_def, &cond_fn, &body_fn));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto while_op =
ops::While(scope.WithOpName("while/LoopCond"),
std::initializer_list<Input>{source}, cond_fn, body_fn);
GraphDef expected;
TF_ASSERT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
TF_ASSERT_OK(
AddNoinlineFunctionToGraph(noinline_node_name, scope.graph()));
auto identity = ops::Identity(scope.WithOpName("while/Identity"), arg);
NodeDef retval;
retval.set_name("retval0_RetVal");
retval.set_op(FunctionLibraryDefinition::kRetOp);
*retval.add_input() = noinline_node_name;
(*retval.mutable_attr())["T"].set_type(DT_INT32);
(*retval.mutable_attr())["index"].set_i(0);
Status status;
scope.graph()->AddNode(retval, &status);
TF_ASSERT_OK(status);
GraphDef expected;
TF_ASSERT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(body_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
expected.clear_library();
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
}
}
TEST(FunctionalizeControlFlow, MissingFunctionDefInLibrary) {
const string& noinline_node_name = "while/increment_fn";
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto identity = ops::Identity(scope.WithOpName("while/Identity"), source);
TF_ASSERT_OK(AddNoinlineFunctionToGraph(noinline_node_name, scope.graph()));
TF_ASSERT_OK(scope.ToGraph(&graph));
}
FunctionLibraryDefinition library(graph.flib_def());
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
graph_def.clear_library();
Status status = FunctionalizeControlFlowForGraphDef(&graph_def, &library);
EXPECT_EQ(tensorflow::error::NOT_FOUND, status.code());
}
TEST(FunctionalizeControlFlow, OneLoopVarWithoutExit) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32);
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto enter =
ops::internal::Enter(scope.WithOpName("while/Enter"), source, "aloop");
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(merge.output),
10);
auto less = ops::Less(scope.WithOpName("while/Less"), merge.output, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_ =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto identity =
ops::Identity(scope.WithOpName("while/Identity"), switch_.output_true);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity, one);
auto next_iteration =
ops::NextIteration(scope.WithOpName("while/NextIteration"), add);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration.node(), 0, merge.output.node(), 1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
TF_ASSERT_OK(
FunctionalizeControlFlowForGraphDef(&optimized_graph_def, &library));
TF_ASSERT_OK(FunctionalizeControlFlow(&graph, &library));
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
for (const GraphDef& graph_def : {optimized_graph_def, converted_graph_def}) {
NameAttrList cond_fn, body_fn;
TF_EXPECT_OK(FindWhileCondAndBody(graph_def, &cond_fn, &body_fn));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32);
auto while_op =
ops::While(scope.WithOpName("while/LoopCond"),
std::initializer_list<Input>{source}, cond_fn, body_fn);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(arg), 10);
auto less = ops::Less(scope.WithOpName("while/Less"), arg, ten);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), less, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(cond_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_BOOL}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto identity = ops::Identity(scope.WithOpName("while/Identity"), arg);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity, one);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), add, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(body_fn.name(), library, &result));
EXPECT_EQ(DataTypeVector{DT_INT32}, result.arg_types);
EXPECT_EQ(DataTypeVector{DT_INT32}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
}
}
TEST(FunctionalizeControlFlow, TwoLoopVars) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32);
auto x = ops::Placeholder(scope.WithOpName("Placeholder/x"), DT_INT32);
auto y = ops::Placeholder(scope.WithOpName("Placeholder/y"), DT_INT32);
auto enter_x =
ops::internal::Enter(scope.WithOpName("while/Enter/x"), x, "aloop");
auto enter_y =
ops::internal::Enter(scope.WithOpName("while/Enter/y"), y, "aloop");
auto merge_x = ops::Merge(scope.WithOpName("while/Merge/x"),
std::initializer_list<Input>{enter_x, dummy});
auto merge_y = ops::Merge(scope.WithOpName("while/Merge/y"),
std::initializer_list<Input>{enter_y, dummy});
auto three = ops::Const<int32>(scope.WithOpName("while/cond/three")
.WithControlDependencies(merge_x.output),
3);
auto cond_add =
ops::Add(scope.WithOpName("while/cond/Add"), merge_x.output, three);
auto ten = ops::Const<int32>(scope.WithOpName("while/cond/ten")
.WithControlDependencies(merge_x.output),
10);
auto less = ops::Less(scope.WithOpName("while/cond/Less"), cond_add, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_x = ops::Switch(scope.WithOpName("while/Switch/x"),
merge_x.output, loop_cond);
auto switch_y = ops::Switch(scope.WithOpName("while/Switch/y"),
merge_y.output, loop_cond);
auto exit_x = ops::internal::Exit(scope.WithOpName("while/Exit/x"),
switch_x.output_false);
auto exit_y = ops::internal::Exit(scope.WithOpName("while/Exit/y"),
switch_y.output_false);
auto identity_x = ops::Identity(scope.WithOpName("while/Identity/x"),
switch_x.output_true);
auto identity_y = ops::Identity(scope.WithOpName("while/Identity/y"),
switch_y.output_true);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/one").WithControlDependencies(identity_x),
1);
auto two = ops::Const<int32>(
scope.WithOpName("while/mul/two").WithControlDependencies(identity_x),
2);
auto add = ops::Add(scope.WithOpName("while/add"), identity_x, one);
auto mul = ops::Add(scope.WithOpName("while/mul"), identity_y, two);
auto next_iteration_x =
ops::NextIteration(scope.WithOpName("while/NextIteration/x"), add);
auto next_iteration_y =
ops::NextIteration(scope.WithOpName("while/NextIteration/y"), mul);
auto sink_x = ops::Identity(scope.WithOpName("sink_x"), exit_x);
auto sink_y = ops::Identity(scope.WithOpName("sink_y"), exit_y);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration_x.node(), 0, merge_x.output.node(),
1);
scope.graph()->AddEdge(next_iteration_y.node(), 0, merge_y.output.node(),
1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
GraphDef optimized_graph_def;
graph.ToGraphDef(&optimized_graph_def);
TF_ASSERT_OK(
FunctionalizeControlFlowForGraphDef(&optimized_graph_def, &library));
TF_ASSERT_OK(FunctionalizeControlFlow(&graph, &library));
GraphDef converted_graph_def;
graph.ToGraphDef(&converted_graph_def);
for (const GraphDef& graph_def : {optimized_graph_def, converted_graph_def}) {
NameAttrList cond_fn, body_fn;
TF_EXPECT_OK(FindWhileCondAndBody(graph_def, &cond_fn, &body_fn));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto x = ops::Placeholder(scope.WithOpName("Placeholder/x"), DT_INT32);
auto y = ops::Placeholder(scope.WithOpName("Placeholder/y"), DT_INT32);
auto while_op =
ops::While(scope.WithOpName("while/LoopCond"),
std::initializer_list<Input>{x, y}, cond_fn, body_fn);
auto sink_x = ops::Identity(scope.WithOpName("sink_x"), while_op[0]);
auto sink_y = ops::Identity(scope.WithOpName("sink_y"), while_op[1]);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto three = ops::Const<int32>(scope.WithOpName("while/cond/three")
.WithControlDependencies(arg0.output),
3);
auto cond_add =
ops::Add(scope.WithOpName("while/cond/Add"), arg0.output, three);
auto ten = ops::Const<int32>(scope.WithOpName("while/cond/ten")
.WithControlDependencies(arg0.output),
10);
auto less = ops::Less(scope.WithOpName("while/cond/Less"), cond_add, ten);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), less, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(cond_fn.name(), library, &result));
EXPECT_EQ((DataTypeVector{DT_INT32, DT_INT32}), result.arg_types);
EXPECT_EQ(DataTypeVector{DT_BOOL}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto identity_x =
ops::Identity(scope.WithOpName("while/Identity/x"), arg0);
auto identity_y =
ops::Identity(scope.WithOpName("while/Identity/y"), arg1);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/one").WithControlDependencies(identity_x),
1);
auto two = ops::Const<int32>(
scope.WithOpName("while/mul/two").WithControlDependencies(identity_x),
2);
auto add = ops::Add(scope.WithOpName("while/add"), identity_x, one);
auto mul = ops::Add(scope.WithOpName("while/mul"), identity_y, two);
auto retval0 = ops::_Retval(scope.WithOpName("retval0_RetVal"), add, 0);
auto retval1 = ops::_Retval(scope.WithOpName("retval1_RetVal"), mul, 1);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(body_fn.name(), library, &result));
EXPECT_EQ((DataTypeVector{DT_INT32, DT_INT32}), result.arg_types);
EXPECT_EQ((DataTypeVector{DT_INT32, DT_INT32}), result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
}
}
class ComplexTestFixture
: public ::testing::TestWithParam<std::tuple<bool, bool, bool>> {
protected:
void SetUp() override {
restrict_to_tpu_nodes_ = std::get<0>(GetParam());
mark_inner_loop_tpu_ = std::get<1>(GetParam());
mark_outer_loop_tpu_ = std::get<2>(GetParam());
}
void RunTest();
private:
void CheckOuterNodesFunctionalized(const GraphDef& graph_def,
const FunctionLibraryDefinition& library,
NameAttrList& inner_cond_fn,
NameAttrList& inner_body_fn);
void CheckInnerNodesFunctionalized(const GraphDef& graph_def,
const FunctionLibraryDefinition& library,
const NameAttrList& inner_cond_fn,
const NameAttrList& inner_body_fn);
bool restrict_to_tpu_nodes_ = false;
bool mark_inner_loop_tpu_ = false;
bool mark_outer_loop_tpu_ = false;
};
TEST_P(ComplexTestFixture, ComplexTests) { RunTest(); }
INSTANTIATE_TEST_SUITE_P(
FunctionalizeControlFlow, ComplexTestFixture,
::testing::Combine(::testing::Bool(), ::testing::Bool(), ::testing::Bool()),
[](const ::testing::TestParamInfo<ComplexTestFixture::ParamType>& info) {
bool restrict_to_tpu_nodes = std::get<0>(info.param);
bool mark_inner_loop_tpu = std::get<1>(info.param);
bool mark_outer_loop_tpu = std::get<2>(info.param);
string node_string;
if (mark_inner_loop_tpu && mark_outer_loop_tpu)
node_string = "both_loops_tpu";
else if (!mark_inner_loop_tpu && !mark_outer_loop_tpu)
node_string = "no_loop_tpu";
else
node_string = mark_inner_loop_tpu ? "inner_loop_tpu" : "outer_loop_tpu";
string name = absl::StrCat(
restrict_to_tpu_nodes ? "restricted_" : "unrestricted_", node_string);
return name;
});
void ComplexTestFixture::RunTest() {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32);
auto x = ops::Placeholder(scope.WithOpName("x"), DT_INT32);
auto three = ops::Const<int32>(scope.WithOpName("three"), 3);
auto y = ops::Add(scope.WithOpName("y"), x, three);
auto var = ops::VarHandleOp(scope.WithOpName("Variable"), DT_INT32,
TensorShape({}));
auto zero = ops::Const<int32>(scope.WithOpName("outer/Const"), 0);
auto enter_i =
ops::internal::Enter(scope.WithOpName("outer/Enter_i"), zero, "outer");
auto merge_i = ops::Merge(scope.WithOpName("outer/Merge_i"),
std::initializer_list<Input>{enter_i, dummy});
auto ten = ops::Const<int32>(scope.WithOpName("outer/Less/y")
.WithControlDependencies(merge_i.output),
10);
auto less_i =
ops::Less(scope.WithOpName("outer/Less_i"), merge_i.output, ten);
auto outer_loop_cond =
ops::LoopCond(scope.WithOpName("outer/LoopCond"), less_i);
auto switch_i = ops::Switch(scope.WithOpName("outer/Switch"),
merge_i.output, outer_loop_cond);
auto exit_i = ops::internal::Exit(scope.WithOpName("outer/Exit"),
switch_i.output_false);
auto identity_i =
ops::Identity(scope.WithOpName("outer/Identity"), switch_i.output_true);
auto enter_x_outer =
ops::internal::Enter(scope.WithOpName("outer/Enter_x"), x, "outer",
ops::internal::Enter::Attrs().IsConstant(true));
auto enter_k_outer =
ops::internal::Enter(scope.WithOpName("outer/Enter_k"), y, "outer",
ops::internal::Enter::Attrs().IsConstant(true));
auto enter_var_outer =
ops::internal::Enter(scope.WithOpName("outer/Enter_var"), var, "outer",
ops::internal::Enter::Attrs().IsConstant(true));
auto one_j = ops::Const<int32>(
scope.WithOpName("outer/j").WithControlDependencies(identity_i), 1);
auto enter_j = ops::internal::Enter(scope.WithOpName("outer/inner/Enter_j"),
one_j, "inner");
auto enter_k =
ops::internal::Enter(scope.WithOpName("outer/inner/Enter_k")
.WithControlDependencies(identity_i),
enter_k_outer, "inner");
auto enter_x = ops::internal::Enter(
scope.WithOpName("outer/inner/Enter_x"), enter_x_outer, "inner",
ops::internal::Enter::Attrs().IsConstant(true));
auto enter_var = ops::internal::Enter(
scope.WithOpName("outer/inner/Enter_var"), enter_var_outer, "inner",
ops::internal::Enter::Attrs().IsConstant(true));
auto merge_j = ops::Merge(scope.WithOpName("outer/inner/Merge_j"),
std::initializer_list<Input>{enter_j, dummy});
auto merge_k = ops::Merge(scope.WithOpName("outer/inner/Merge_k"),
std::initializer_list<Input>{enter_k, dummy});
auto five = ops::Const<int32>(scope.WithOpName("outer/inner/Five")
.WithControlDependencies(merge_j.output),
5);
auto less_j =
ops::Less(scope.WithOpName("outer/inner/Less_j"), merge_j.output, five);
auto loop_cond =
ops::LoopCond(scope.WithOpName("outer/inner/LoopCond"), less_j);
auto switch_j = ops::Switch(scope.WithOpName("outer/inner/Switch_j"),
merge_j.output, loop_cond);
auto switch_k = ops::Switch(scope.WithOpName("outer/inner/Switch_k"),
merge_k.output, loop_cond);
auto exit_j = ops::internal::Exit(scope.WithOpName("outer/inner/Exit_j"),
switch_j.output_false);
auto exit_k = ops::internal::Exit(scope.WithOpName("outer/inner/Exit_k"),
switch_k.output_false);
auto identity_j = ops::Identity(scope.WithOpName("outer/inner/Identity_j"),
switch_j.output_true);
auto identity_k = ops::Identity(scope.WithOpName("outer/inner/Identity_k"),
switch_k.output_true);
auto mul_jk =
ops::Mul(scope.WithOpName("outer/inner/mul"), identity_j, identity_k);
auto add_jkx =
ops::Add(scope.WithOpName("outer/inner/add"), mul_jk, enter_x);
auto assign = ops::AssignAddVariableOp(
scope.WithOpName("outer/inner/assign_add"), enter_var, add_jkx);
auto one = ops::Const<int32>(
scope.WithOpName("outer/inner/One")
.WithControlDependencies(
absl::Span<const Operation>{assign.operation}),
1);
auto add_j =
ops::Add(scope.WithOpName("outer/inner/add_j"), identity_j, one);
auto next_iteration_j = ops::NextIteration(
scope.WithOpName("outer/inner/NextIteration_j"), add_j);
auto next_iteration_k = ops::NextIteration(
scope.WithOpName("outer/inner/NextIteration_k"), identity_k);
auto one_outer = ops::Const<int32>(
scope.WithOpName("outer/add/y").WithControlDependencies(identity_i), 1);
auto add_i =
ops::Add(scope.WithOpName("outer/add")
.WithControlDependencies(absl::Span<const Operation>{
exit_j.output.op(), exit_k.output.op()}),
identity_i, one_outer);
auto next_iteration_i =
ops::NextIteration(scope.WithOpName("outer/NextIteration"), add_i);
auto sink = ops::Identity(scope.WithOpName("sink"), exit_i);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration_i.node(), 0, merge_i.output.node(),
1);
scope.graph()->AddEdge(next_iteration_j.node(), 0, merge_j.output.node(),
1);
scope.graph()->AddEdge(next_iteration_k.node(), 0, merge_k.output.node(),
1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
for (Node* n : graph.nodes()) {
string name = n->name();
bool is_inner_node = name.find("outer/inner/") != string::npos;
bool is_outer_node = !is_inner_node && name.find("outer/") != string::npos;
if ((is_inner_node && mark_inner_loop_tpu_) ||
(is_outer_node && mark_outer_loop_tpu_)) {
n->AddAttr("_tpu_replicate", "cluster");
}
}
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
GraphDef orig_graph_def, optimized_graph_def;
graph.ToGraphDef(&orig_graph_def);
optimized_graph_def = orig_graph_def;
NodeFilter node_filter =
restrict_to_tpu_nodes_
? [](const Node* n) { return n->attrs().Find("_tpu_replicate"); }
: NodeFilter{};
Status status1 = FunctionalizeControlFlowForGraphDef(&optimized_graph_def,
&library, node_filter);
Status status2 = FunctionalizeControlFlow(&graph, &library, node_filter);
ASSERT_EQ(status1, status2);
if (restrict_to_tpu_nodes_ && mark_outer_loop_tpu_ && !mark_inner_loop_tpu_) {
ASSERT_EQ(errors::IsInternal(status1), true);
return;
} else {
TF_ASSERT_OK(status1);
}
GraphDef optimized_converted_graph_def;
graph.ToGraphDef(&optimized_converted_graph_def);
for (const GraphDef& graph_def :
{optimized_graph_def, optimized_converted_graph_def}) {
NameAttrList inner_cond_fn, inner_body_fn;
if (!restrict_to_tpu_nodes_ ||
(restrict_to_tpu_nodes_ && mark_outer_loop_tpu_ &&
mark_inner_loop_tpu_)) {
CheckOuterNodesFunctionalized(graph_def, library, inner_cond_fn,
inner_body_fn);
CheckInnerNodesFunctionalized(graph_def, library, inner_cond_fn,
inner_body_fn);
} else {
if (!mark_outer_loop_tpu_ && !mark_inner_loop_tpu_) {
TF_EXPECT_GRAPH_EQ(orig_graph_def, graph_def);
} else if (!mark_outer_loop_tpu_ && mark_inner_loop_tpu_) {
TF_EXPECT_OK(
FindWhileCondAndBody(graph_def, &inner_cond_fn, &inner_body_fn));
CheckInnerNodesFunctionalized(graph_def, library, inner_cond_fn,
inner_body_fn);
}
}
}
}
void ComplexTestFixture::CheckOuterNodesFunctionalized(
const GraphDef& graph_def, const FunctionLibraryDefinition& library,
NameAttrList& inner_cond_fn, NameAttrList& inner_body_fn) {
NameAttrList outer_cond_fn, outer_body_fn;
TF_EXPECT_OK(FindWhileCondAndBody(graph_def, &outer_cond_fn, &outer_body_fn));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto x = ops::Placeholder(scope.WithOpName("x"), DT_INT32);
auto three = ops::Const<int32>(scope.WithOpName("three"), 3);
auto y = ops::Add(scope.WithOpName("y"), x, three);
auto var = ops::VarHandleOp(scope.WithOpName("Variable"), DT_INT32,
TensorShape({}));
auto zero = ops::Const<int32>(scope.WithOpName("outer/Const"), 0);
auto while_op = ops::While(scope.WithOpName("outer/LoopCond"),
std::initializer_list<Input>{zero, y, x, var},
outer_cond_fn, outer_body_fn);
auto sink = ops::Identity(scope.WithOpName("sink"), while_op[0]);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(scope.WithOpName("arg2"), DT_INT32, 2);
auto arg3 = ops::_Arg(scope.WithOpName("arg3"), DT_RESOURCE, 3);
auto ten = ops::Const<int32>(
scope.WithOpName("outer/Less/y").WithControlDependencies(arg0.output),
10);
auto less = ops::Less(scope.WithOpName("outer/Less_i"), arg0, ten);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), less, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(outer_cond_fn.name(), library, &result));
EXPECT_EQ((DataTypeVector{DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE}),
result.arg_types);
EXPECT_EQ(DataTypeVector{DT_BOOL}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
{
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(outer_body_fn.name(), library, &result));
TF_EXPECT_OK(
FindWhileCondAndBody(result.gdef, &inner_cond_fn, &inner_body_fn));
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(scope.WithOpName("arg2"), DT_INT32, 2);
auto arg3 = ops::_Arg(scope.WithOpName("arg3"), DT_RESOURCE, 3);
auto identity_i = ops::Identity(scope.WithOpName("outer/Identity"), arg0);
auto one_j = ops::Const<int32>(
scope.WithOpName("outer/j").WithControlDependencies(identity_i), 1);
auto while_op =
ops::While(scope.WithOpName("outer/inner/LoopCond"),
std::initializer_list<Input>{one_j, arg1, arg2, arg3},
inner_cond_fn, inner_body_fn);
auto one_outer = ops::Const<int32>(
scope.WithOpName("outer/add/y").WithControlDependencies(identity_i), 1);
auto add_i =
ops::Add(scope.WithOpName("outer/add")
.WithControlDependencies(absl::Span<const Operation>{
while_op[0].op(), while_op[1].op()}),
identity_i, one_outer);
auto retval0 = ops::_Retval(scope.WithOpName("retval0_RetVal"), add_i, 0);
auto retval1 = ops::_Retval(scope.WithOpName("retval1_RetVal"), arg1, 1);
auto retval2 = ops::_Retval(scope.WithOpName("retval2_RetVal"), arg2, 2);
auto retval3 = ops::_Retval(scope.WithOpName("retval3_RetVal"), arg3, 3);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
EXPECT_EQ((DataTypeVector{DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE}),
result.arg_types);
EXPECT_EQ((DataTypeVector{DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE}),
result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
}
void ComplexTestFixture::CheckInnerNodesFunctionalized(
const GraphDef& graph_def, const FunctionLibraryDefinition& library,
const NameAttrList& inner_cond_fn, const NameAttrList& inner_body_fn) {
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(scope.WithOpName("arg2"), DT_INT32, 2);
auto arg3 = ops::_Arg(scope.WithOpName("arg3"), DT_RESOURCE, 3);
auto five = ops::Const<int32>(
scope.WithOpName("outer/inner/Five").WithControlDependencies(arg0), 5);
auto less_j = ops::Less(scope.WithOpName("outer/inner/Less_j"), arg0, five);
auto retval = ops::_Retval(scope.WithOpName("retval0_RetVal"), less_j, 0);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(inner_cond_fn.name(), library, &result));
EXPECT_EQ((DataTypeVector{DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE}),
result.arg_types);
EXPECT_EQ(DataTypeVector{DT_BOOL}, result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(scope.WithOpName("arg2"), DT_INT32, 2);
auto arg3 = ops::_Arg(scope.WithOpName("arg3"), DT_RESOURCE, 3);
auto identity_j =
ops::Identity(scope.WithOpName("outer/inner/Identity_j"), arg0);
auto identity_k =
ops::Identity(scope.WithOpName("outer/inner/Identity_k"), arg1);
auto mul_jk =
ops::Mul(scope.WithOpName("outer/inner/mul"), identity_j, identity_k);
auto add_jkx = ops::Add(scope.WithOpName("outer/inner/add"), mul_jk, arg2);
auto assign = ops::AssignAddVariableOp(
scope.WithOpName("outer/inner/assign_add"), arg3, add_jkx);
auto one = ops::Const<int32>(
scope.WithOpName("outer/inner/One")
.WithControlDependencies(
absl::Span<const Operation>{assign.operation}),
1);
auto add_j =
ops::Add(scope.WithOpName("outer/inner/add_j"), identity_j, one);
auto retval0 = ops::_Retval(scope.WithOpName("retval0_RetVal"), add_j, 0);
auto retval1 =
ops::_Retval(scope.WithOpName("retval1_RetVal"), identity_k, 1);
auto retval2 = ops::_Retval(scope.WithOpName("retval2_RetVal"), arg2, 2);
auto retval3 = ops::_Retval(scope.WithOpName("retval3_RetVal"), arg3, 3);
GraphDef expected;
TF_EXPECT_OK(scope.ToGraphDef(&expected));
InstantiationResultForTest result;
TF_EXPECT_OK(
InstantiateFunctionForTest(inner_body_fn.name(), library, &result));
EXPECT_EQ((DataTypeVector{DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE}),
result.arg_types);
EXPECT_EQ((DataTypeVector{DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE}),
result.ret_types);
TF_EXPECT_GRAPH_EQ(expected, result.gdef);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/functionalize_control_flow.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fc2fa2bc-1063-4038-901b-fb4eeea6064d | cpp | tensorflow/tensorflow | xla_op_registry | tensorflow/compiler/tf2xla/xla_op_registry.cc | tensorflow/compiler/tf2xla/xla_op_registry_test.cc | #include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include <functional>
#include <memory>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/util.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/next_pluggable_device_factory.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace tensorflow {
const char* const DEVICE_CPU_XLA_JIT = "XLA_CPU_JIT";
const char* const DEVICE_GPU_XLA_JIT = "XLA_GPU_JIT";
const char* const DEVICE_XLA_CPU = "XLA_CPU";
const char* const DEVICE_XLA_GPU = "XLA_GPU";
static Status LaunchOpHasKernelForDevice(const DeviceType& device_type) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef("XlaLaunch", &op_def));
NodeDef node_def;
node_def.set_name("_XlaLaunch-op");
node_def.set_op("XlaLaunch");
string kernel_class_name;
TF_RETURN_IF_ERROR(FindKernelDef(device_type, node_def, nullptr,
&kernel_class_name));
VLOG(1) << "LaunchOpHasKernelForDevice"
<< " kernel_class_name: " << kernel_class_name;
return absl::OkStatus();
}
XlaOpRegistry::XlaOpRegistry() = default;
XlaOpRegistry::~XlaOpRegistry() = default;
bool XlaOpRegistry::IsCompatible(const OpRegistration& x,
const OpRegistration& y) {
if (x.name != y.name) return true;
if (x.label != y.label) return true;
if (x.compilation_only != y.compilation_only) {
LOG(WARNING) << "Registrations of " << x.name
<< " have incompatible compilation_only settings.";
return false;
}
if (x.allow_resource_types != y.allow_resource_types) {
LOG(WARNING) << "Registrations of " << x.name
<< " have incompatible allow_resource_types settings.";
return false;
}
if (x.allow_variant_types != y.allow_variant_types) {
LOG(WARNING) << "Registrations of " << x.name
<< " have incompatible allow_variant_types settings.";
return false;
}
if (x.allow_string_type != y.allow_string_type) {
LOG(WARNING) << "Registrations of " << x.name
<< " have incompatible allow_string_type settings.";
return false;
}
if (!x.has_device_allowlist && !y.has_device_allowlist) {
LOG(WARNING) << "Duplicate registrations of " << x.name
<< " with no device allowlists.";
return false;
}
if (x.has_device_allowlist && y.has_device_allowlist) {
for (const auto& device : x.device_allowlist) {
if (y.device_allowlist.count(device) != 0) {
LOG(WARNING) << "Multiple registrations of " << x.name << " on device "
<< device;
return false;
}
}
}
if (x.compile_time_constant_inputs != y.compile_time_constant_inputs) {
LOG(WARNING) << "Registrations of " << x.name
<< " have incompatible compile time constant inputs.";
return false;
}
if (x.is_metadata_op != y.is_metadata_op) {
LOG(WARNING) << "Registrations of " << x.name
<< " have incompatible values for is_metadata_op.";
return false;
}
return true;
}
void XlaOpRegistry::RegisterCompilationDevice(
const string& device_name, const DeviceRegistration& registration) {
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
auto result =
registry.compilation_devices_.emplace(device_name, registration);
CHECK(result.second || result.first->second.compilation_device_name ==
registration.compilation_device_name);
}
void XlaOpRegistry::RegisterBackend(
const string& compilation_device_name,
absl::Span<const DataType> supported_types, BackendOpFilter op_filter) {
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
auto result = registry.backends_.emplace(compilation_device_name, Backend());
CHECK(result.second) << "Duplicate XLA backend registration "
<< compilation_device_name;
result.first->second.supported_types.insert(supported_types.begin(),
supported_types.end());
result.first->second.op_filter = op_filter;
}
bool XlaOpRegistry::IsCompilationDevice(
const string& device_name) {
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
return registry.backends_.find(device_name) != registry.backends_.end();
}
bool XlaOpRegistry::GetCompilationDevice(
const string& device_name, const DeviceRegistration** registration) {
XlaOpRegistry& registry = Instance();
static void* registration_init = [®istry]() {
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
bool cpu_global_jit = flags->tf_xla_cpu_global_jit;
VLOG(2) << "tf_xla_cpu_global_jit = " << cpu_global_jit;
mutex_lock lock(registry.mutex_);
if (LaunchOpHasKernelForDevice(DeviceType(DEVICE_CPU)).ok()) {
DeviceRegistration& registration =
registry.compilation_devices_[DEVICE_CPU];
registration.compilation_device_name = DEVICE_CPU_XLA_JIT;
registration.autoclustering_policy =
cpu_global_jit
? XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally
: XlaOpRegistry::AutoclusteringPolicy::kIfExplicitlyRequested;
}
if (LaunchOpHasKernelForDevice(DeviceType(DEVICE_GPU)).ok()) {
DeviceRegistration& registration =
registry.compilation_devices_[DEVICE_GPU];
registration.compilation_device_name = DEVICE_GPU_XLA_JIT;
registration.autoclustering_policy =
XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally;
}
return nullptr;
}();
(void)registration_init;
if (DeviceFactory::IsPluggableDevice(device_name) &&
GetPjRtClient(DeviceType(device_name)).ok()) {
mutex_lock lock(registry.mutex_);
NextPluggableDeviceFactory* device_factory =
static_cast<NextPluggableDeviceFactory*>(
DeviceFactory::GetFactory(device_name));
if (device_factory != nullptr &&
DeviceType(device_factory->compilation_device_name()) ==
DeviceType(DEVICE_GPU_XLA_JIT) &&
registry.compilation_devices_.find(device_name) ==
registry.compilation_devices_.end()) {
DeviceRegistration& registration =
registry.compilation_devices_[device_name];
registration.compilation_device_name = DEVICE_GPU_XLA_JIT;
registration.autoclustering_policy =
XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally;
}
}
mutex_lock lock(registry.mutex_);
auto it = registry.compilation_devices_.find(device_name);
if (it == registry.compilation_devices_.end()) return false;
*registration = &it->second;
return true;
}
void XlaOpRegistry::RegisterCompilationKernels() {
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
if (registry.jit_kernels_registered_) return;
registry.jit_kernels_registered_ = true;
OpRegistryInterface* op_registry = OpRegistry::Global();
for (auto& ops : registry.ops_) {
const string& op_name = ops.first;
std::vector<std::unique_ptr<OpRegistration>>& op_registrations = ops.second;
std::partition(op_registrations.begin(), op_registrations.end(),
[](const std::unique_ptr<OpRegistration>& op_reg) {
return op_reg->has_device_allowlist;
});
std::unordered_set<string> allowlisted_backend;
for (auto& op_registration : op_registrations) {
if (op_registration->has_device_allowlist) {
allowlisted_backend.insert(op_registration->device_allowlist.begin(),
op_registration->device_allowlist.end());
}
}
for (auto& op_registration : op_registrations) {
const OpDef* op_def;
Status lookup_status = op_registry->LookUpOpDef(op_name, &op_def);
if (!lookup_status.ok()) {
LOG(ERROR) << lookup_status.message();
XLA_LOG_LINES(
ERROR,
"Ops registered: \n" +
dynamic_cast<OpRegistry*>(op_registry)->DebugString(true));
}
TF_CHECK_OK(lookup_status);
std::unordered_set<string> type_attrs;
for (const OpDef::AttrDef& attr_def : op_def->attr()) {
if (attr_def.type() == "type" || attr_def.type() == "list(type)") {
type_attrs.insert(attr_def.name());
}
}
for (const auto& constraint : op_registration->type_constraints) {
if (type_attrs.find(constraint.first) == type_attrs.end()) {
LOG(FATAL) << "Unknown type attribute " << constraint.first
<< " in XLA op registration for " << op_name;
}
}
for (auto& backend : registry.backends_) {
if (op_registration->has_device_allowlist &&
op_registration->device_allowlist.find(backend.first) ==
op_registration->device_allowlist.end()) {
continue;
}
if (!op_registration->has_device_allowlist &&
allowlisted_backend.find(backend.first) !=
allowlisted_backend.end()) {
continue;
}
std::unique_ptr<KernelDef> kdef(new KernelDef);
kdef->set_op(op_registration->name);
kdef->set_device_type(backend.first);
kdef->set_label(op_registration->label);
bool unsatisfiable_type_constraint = false;
for (const string& type_attr : type_attrs) {
KernelDef::AttrConstraint* attr_constraint = kdef->add_constraint();
attr_constraint->set_name(type_attr);
auto* allowed_values =
attr_constraint->mutable_allowed_values()->mutable_list();
const OpDef::AttrDef& op_def_attr = *FindAttr(type_attr, *op_def);
const auto* op_def_allowed_types =
op_def_attr.has_allowed_values()
? &op_def_attr.allowed_values().list().type()
: nullptr;
auto constraint_it =
op_registration->type_constraints.find(type_attr);
const std::set<DataType>* type_constraints =
constraint_it != op_registration->type_constraints.end()
? &constraint_it->second
: nullptr;
for (DataType dtype : backend.second.supported_types) {
if (op_def_allowed_types != nullptr &&
std::find(op_def_allowed_types->begin(),
op_def_allowed_types->end(),
dtype) == op_def_allowed_types->end()) {
continue;
}
if (type_constraints != nullptr &&
type_constraints->find(dtype) == type_constraints->end()) {
continue;
}
allowed_values->add_type(dtype);
}
if (op_registration->allow_resource_types) {
allowed_values->add_type(DT_RESOURCE);
}
if (op_registration->allow_variant_types) {
allowed_values->add_type(DT_VARIANT);
}
if (op_registration->allow_string_type) {
allowed_values->add_type(DT_STRING);
}
if (allowed_values->type().empty()) {
unsatisfiable_type_constraint = true;
break;
}
}
if (unsatisfiable_type_constraint) continue;
if (backend.second.op_filter != nullptr &&
!backend.second.op_filter(kdef.get())) {
continue;
}
VLOG(2) << "XLA op registration: device: " << backend.first
<< " op: " << op_name;
registry.kernel_registrars_.emplace_back(
new kernel_factory::OpKernelRegistrar(
new KernelDef(*kdef), "XlaJitOp", op_registration->factory));
backend.second.kernel_defs.push_back(std::move(kdef));
}
}
}
}
std::vector<const KernelDef*> XlaOpRegistry::DeviceKernels(
const string& compilation_device_name,
bool include_compilation_only_kernels) {
RegisterCompilationKernels();
std::vector<const KernelDef*> kernels;
XlaOpRegistry& registry = Instance();
std::string registered_backends =
absl::StrJoin(registry.BackendNames(), ", ");
mutex_lock lock(registry.mutex_);
auto it = registry.backends_.find(compilation_device_name);
CHECK(it != registry.backends_.end())
<< "Unknown backend " << compilation_device_name
<< "; Known backends are: " << registered_backends;
for (const std::unique_ptr<KernelDef>& k : it->second.kernel_defs) {
auto op_iter = registry.ops_.find(k->op());
CHECK(op_iter != registry.ops_.end() && !op_iter->second.empty());
if (include_compilation_only_kernels ||
!op_iter->second.front()->compilation_only) {
kernels.push_back(k.get());
}
}
return kernels;
}
std::vector<string> XlaOpRegistry::GetAllRegisteredOps() {
std::vector<string> ops;
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
ops.reserve(registry.ops_.size());
for (const auto& pair : registry.ops_) {
ops.push_back(pair.first);
}
std::sort(ops.begin(), ops.end());
return ops;
}
const std::unordered_set<std::string>*
XlaOpRegistry::CompileTimeConstantInputArgNames(const string& op) {
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
auto it = registry.ops_.find(op);
static auto empty_set = new std::unordered_set<std::string>;
if (it == registry.ops_.end() || it->second.empty()) {
return empty_set;
} else {
return &it->second.front()->compile_time_constant_inputs;
}
}
Status XlaOpRegistry::CompileTimeConstantInputs(
const NodeDef& node_def, const OpKernel* op_kernel, const OpDef* op_def,
std::vector<int>* result) {
result->clear();
DCHECK(op_def != nullptr || op_kernel != nullptr);
std::unordered_set<string> compile_time_constant_inputs_from_attr;
std::vector<string> compile_time_constant_inputs_vect_from_attr;
const std::unordered_set<string>* compile_time_constant_inputs;
if (TryGetNodeAttr(node_def, kXlaCompileTimeConstantInputsAttr,
&compile_time_constant_inputs_vect_from_attr)) {
absl::c_copy(compile_time_constant_inputs_vect_from_attr,
std::inserter(compile_time_constant_inputs_from_attr,
compile_time_constant_inputs_from_attr.end()));
compile_time_constant_inputs = &compile_time_constant_inputs_from_attr;
} else {
compile_time_constant_inputs =
CompileTimeConstantInputArgNames(node_def.op());
if (compile_time_constant_inputs->empty()) {
return absl::OkStatus();
}
}
VLOG(3) << "For operation "
<< (op_def != nullptr ? op_def->name() : op_kernel->name())
<< " required constants are: "
<< absl::StrJoin(*compile_time_constant_inputs, ", ");
for (const string& input : *compile_time_constant_inputs) {
if (op_def) {
NameRangeMap input_name_ranges;
TF_RETURN_IF_ERROR(
NameRangesForNode(node_def, *op_def, &input_name_ranges, nullptr));
auto name_range = input_name_ranges.find(input);
if (name_range == input_name_ranges.end()) {
continue;
}
for (int i = name_range->second.first; i < name_range->second.second;
i++) {
result->push_back(i);
}
} else {
int start, stop;
TF_CHECK_OK(op_kernel->InputRange(input, &start, &stop));
for (int i = start; i < stop; ++i) {
result->push_back(i);
}
}
}
absl::c_sort(*result);
return absl::OkStatus();
}
bool XlaOpRegistry::IsMetadataOp(const string& op) {
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
auto it = registry.ops_.find(op);
if (it == registry.ops_.end() || it->second.empty()) {
return false;
}
return it->second.front()->is_metadata_op;
}
std::vector<string> XlaOpRegistry::BackendNames() {
std::vector<string> names;
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
names.reserve(registry.backends_.size());
for (const auto& backend_pair : registry.backends_) {
names.push_back(backend_pair.first);
}
return names;
}
bool XlaOpRegistry::IsBackendRegistered(const string& name) {
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
return registry.backends_.find(name) != registry.backends_.end();
}
XlaOpRegistry& XlaOpRegistry::Instance() {
static XlaOpRegistry* r = new XlaOpRegistry;
return *r;
}
XlaOpRegistrationBuilder::XlaOpRegistrationBuilder(absl::string_view name) {
registration_.reset(new XlaOpRegistry::OpRegistration);
registration_->name = string(name);
}
XlaOpRegistrationBuilder XlaOpRegistrationBuilder::Name(
absl::string_view name) {
XlaOpRegistrationBuilder registration(name);
return registration;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Device(
absl::Span<const absl::string_view> devices) {
registration_->has_device_allowlist = true;
for (absl::string_view device : devices) {
registration_->device_allowlist.emplace(device);
}
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Device(
absl::string_view device) {
registration_->has_device_allowlist = true;
registration_->device_allowlist.emplace(device);
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::CompilationOnly() {
registration_->compilation_only = true;
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::AllowResourceTypes() {
registration_->allow_resource_types = true;
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::AllowVariantTypes() {
registration_->allow_variant_types = true;
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::AllowStringType() {
registration_->allow_string_type = true;
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
absl::string_view attr_name, DataType allowed) {
std::set<DataType>& types =
registration_->type_constraints[string(attr_name)];
types.insert(allowed);
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
absl::string_view attr_name, absl::Span<const DataType> allowed) {
std::set<DataType>& types =
registration_->type_constraints[string(attr_name)];
for (DataType t : allowed) {
types.insert(t);
}
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::CompileTimeConstantInput(
absl::string_view input_name) {
registration_->compile_time_constant_inputs.emplace(input_name);
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::IsMetadataOp() {
registration_->is_metadata_op = true;
return *this;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Label(std::string label) {
registration_->label = label;
return *this;
}
std::unique_ptr<XlaOpRegistry::OpRegistration> XlaOpRegistrationBuilder::Build(
XlaOpRegistry::Factory factory) {
registration_->factory = factory;
return std::move(registration_);
}
XlaOpRegistrar::XlaOpRegistrar(
std::unique_ptr<XlaOpRegistry::OpRegistration> registration) {
XlaOpRegistry& registry = XlaOpRegistry::Instance();
mutex_lock lock(registry.mutex_);
auto& existing_ops = registry.ops_[registration->name];
for (auto& existing : existing_ops) {
if (!XlaOpRegistry::IsCompatible(*existing, *registration)) {
LOG(FATAL)
<< "XLA op registration " << registration->name
<< " is incompatible with existing registration of the same name.";
}
}
existing_ops.emplace_back(std::move(registration));
}
XlaBackendRegistrar::XlaBackendRegistrar(
absl::string_view name, absl::Span<const DataType> types,
XlaOpRegistry::BackendOpFilter op_filter) {
XlaOpRegistry& registry = XlaOpRegistry::Instance();
registry.RegisterBackend(string(name), types, op_filter);
AddSymbolicExecutionDevice(name);
}
} | #include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "absl/log/log.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class DummyCPUOp : public XlaOpKernel {
public:
explicit DummyCPUOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
ctx->SetOutput(0, ctx->Input(0));
}
};
class DummyGenericOp : public XlaOpKernel {
public:
explicit DummyGenericOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
ctx->SetOutput(0, ctx->Input(0));
}
};
REGISTER_OP("DummyDuplicateOp")
.Attr("T: {float, int32}")
.Input("input: int32")
.Output("output: int32")
.Doc(R"doc(
A dummy Op.
input: dummy input.
output: dummy output.
)doc");
REGISTER_XLA_OP(Name("DummyDuplicateOp")
.Device(DEVICE_CPU_XLA_JIT)
.TypeConstraint("T", DT_INT32),
DummyCPUOp);
REGISTER_XLA_OP(Name("DummyDuplicateOp").TypeConstraint("T", DT_FLOAT),
DummyGenericOp);
TEST(XlaOpRegistryTest, XlaOpRegistrationWithOverride) {
XlaOpRegistry::RegisterCompilationKernels();
auto registered_kernels = GetAllRegisteredKernels().kernel();
for (const auto& kernels : registered_kernels) {
if (kernels.op() == "DummyDuplicateOp") {
EXPECT_EQ(kernels.constraint_size(), 1);
EXPECT_EQ(kernels.constraint(0).name(), "T");
if (kernels.device_type() == "XLA_CPU_JIT") {
EXPECT_EQ(kernels.constraint(0).allowed_values().list().type(0),
DT_INT32);
} else {
EXPECT_EQ(kernels.constraint(0).allowed_values().list().type(0),
DT_FLOAT);
}
}
}
}
TEST(XlaOpReigstryTest, XlaOpRegistrationDeviceKernels) {
XlaOpRegistry::RegisterCompilationKernels();
auto registered_devices = XlaOpRegistry::BackendNames();
for (const auto& resgistered_device : registered_devices) {
auto kernels = XlaOpRegistry::DeviceKernels(resgistered_device, true);
for (const auto& kernel : kernels) {
if (kernel->op() == "DummyDuplicateOp") {
if (resgistered_device == DEVICE_CPU_XLA_JIT) {
EXPECT_EQ(kernel->constraint(0).allowed_values().list().type(0),
DT_INT32);
} else {
EXPECT_EQ(kernel->constraint(0).allowed_values().list().type(0),
DT_FLOAT);
}
}
}
}
}
class DummyInfeasibleTypeConstraintOp : public XlaOpKernel {
public:
explicit DummyInfeasibleTypeConstraintOp(OpKernelConstruction* ctx)
: XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
LOG(FATAL) << "unreachable";
}
};
REGISTER_OP("DummyInfeasibleTypeConstraintOp")
.Attr("T: {float, string}")
.Input("input: T")
.Output("output: T")
.Doc(R"doc(
A dummy Op.
input: dummy input.
output: dummy output.
)doc");
REGISTER_XLA_OP(
Name("DummyInfeasibleTypeConstraintOp").TypeConstraint("T", DT_STRING),
DummyInfeasibleTypeConstraintOp);
TEST(XlaOpRegistryTest, OpWithInfeasibleTypeConstraintIsNotRegistered) {
XlaOpRegistry::RegisterCompilationKernels();
auto registered_kernels = GetAllRegisteredKernels().kernel();
for (const auto& kernels : registered_kernels) {
EXPECT_NE(kernels.op(), "DummyInfeasibleTypeConstraintOp");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_op_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_op_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b1600d3c-a287-4136-9fcf-cd1dff99ff27 | cpp | tensorflow/tensorflow | reduction_ops | tensorflow/compiler/tf2xla/kernels/reduction_ops.cc | tensorflow/core/kernels/reduction_ops_test.cc | #include "tensorflow/compiler/tf2xla/kernels/reduction_ops.h"
#include <cstdint>
#include <limits>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace {
class SumOp : public XlaReductionOp {
public:
explicit SumOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::Zero(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Add(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Sum").CompileTimeConstantInput("reduction_indices"),
SumOp);
class ProdOp : public XlaReductionOp {
public:
explicit ProdOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::One(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Mul(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Prod").CompileTimeConstantInput("reduction_indices"),
ProdOp);
class MinOp : public XlaReductionOp {
public:
explicit MinOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MaxValue(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Min(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Min").CompileTimeConstantInput("reduction_indices"),
MinOp);
class MaxOp : public XlaReductionOp {
public:
explicit MaxOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {
OP_REQUIRES_OK(ctx, PrimitiveTypeCheck(xla_reduction_type_));
}
static Status PrimitiveTypeCheck(xla::PrimitiveType xla_reduction_type) {
if (xla_reduction_type == xla::C64 || xla_reduction_type == xla::C128 ||
xla_reduction_type == xla::TUPLE ||
xla_reduction_type == xla::OPAQUE_TYPE) {
return errors::InvalidArgument(
"Unsupported PrimitiveType in MaxOp: '",
xla::PrimitiveType_Name(xla_reduction_type), "'");
} else {
return absl::OkStatus();
}
}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MinValue(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Max(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Max").CompileTimeConstantInput("reduction_indices"),
MaxOp);
class MeanOp : public XlaReductionOp {
public:
explicit MeanOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::Zero(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Add(scalar_lhs, scalar_rhs);
}
xla::XlaOp BuildFinalizer(
xla::XlaBuilder* builder, const xla::XlaOp& input,
const xla::XlaOp& reduce_output,
const std::vector<int64_t>& dimensions_to_reduce) override {
if (dimensions_to_reduce.empty()) {
return reduce_output;
}
xla::XlaOp result = reduce_output;
xla::Shape bounded_shape = builder->GetShape(input).value();
int64_t divisor_value = bounded_shape.dimensions(dimensions_to_reduce[0]);
auto divisor = xla::GetDimensionSize(input, dimensions_to_reduce[0]);
for (int i = 1; i < dimensions_to_reduce.size(); i++) {
int64_t size_value = bounded_shape.dimensions(dimensions_to_reduce[i]);
auto size = xla::GetDimensionSize(input, dimensions_to_reduce[i]);
if (size_value * divisor_value > std::numeric_limits<int32_t>::max()) {
result = result / xla::ConvertElementType(divisor, xla_reduction_type_);
divisor_value = size_value;
divisor = size;
} else {
divisor = xla::Mul(divisor, size);
divisor_value = size_value * divisor_value;
}
}
divisor = xla::ConvertElementType(divisor, xla_reduction_type_);
return XlaHelpers::ConvertElementType(result / divisor, input_type(0));
}
};
REGISTER_XLA_OP(Name("Mean").CompileTimeConstantInput("reduction_indices"),
MeanOp);
class AllOp : public XlaReductionOp {
public:
explicit AllOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::ConstantR0<bool>(builder, true);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::And(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("All").CompileTimeConstantInput("reduction_indices"),
AllOp);
class AnyOp : public XlaReductionOp {
public:
explicit AnyOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::ConstantR0<bool>(builder, false);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Or(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Any").CompileTimeConstantInput("reduction_indices"),
AnyOp);
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <typename T>
static Graph* ToScalar(const string& reduce, int num_x, int num_y) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DataTypeToEnum<T>::value, TensorShape({num_x, num_y}));
data.flat<T>().setRandom();
Tensor axes(DT_INT32, TensorShape({2}));
axes.flat<int32>()(0) = 0;
axes.flat<int32>()(1) = 1;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ColReduce(const string& reduce, int num_x, int num_y) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({num_x, num_y}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = 0;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* RowReduce(const string& reduce, int num_x, int num_y) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({num_x, num_y}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = 1;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ThreeDYReduce(const string& reduce, int num_y, int num_z) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({4, num_y, num_z}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = 1;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ThreeDXZReduce(const string& reduce, int num_y, int num_z) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({4, num_y, num_z}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({2}));
axes.flat<int32>()(0) = 0;
axes.flat<int32>()(1) = 2;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
template <typename T>
static void ReduceToScalar(::testing::benchmark::State& state,
const string& device, const string& reduce,
int num_x, int num_y) {
test::Benchmark(device, ToScalar<T>(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(T));
}
static void DoRowReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, RowReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void DoColReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, ColReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void Do3DYReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, ThreeDYReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void Do3DXZReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, ThreeDXZReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void BM_Sum2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DToScalarGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DToScalarGPUComplex(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<std::complex<float>>(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DToScalarGPUComplex)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DToScalarGPUHalf(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<Eigen::half>(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DToScalarGPUHalf)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DRowReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoRowReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DRowReduceGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DColumnReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoColReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DColumnReduceGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum3DYReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
Do3DYReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum3DYReduceGPU)->RangePair(64, 4096, 64, 4096);
static void BM_Sum3DXZReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
Do3DXZReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum3DXZReduceGPU)->RangePair(64, 4096, 64, 4096);
static void BM_Mean2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Mean", num_x, num_y);
}
BENCHMARK(BM_Mean2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_EuclideanNorm2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "EuclideanNorm", num_x, num_y);
}
BENCHMARK(BM_EuclideanNorm2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Max2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Max", num_x, num_y);
}
BENCHMARK(BM_Max2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Min2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Min", num_x, num_y);
}
BENCHMARK(BM_Min2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Min2DToScalarGPUHalf(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<Eigen::half>(state, "gpu", "Min", num_x, num_y);
}
BENCHMARK(BM_Min2DToScalarGPUHalf)->RangePair(2048, 8192, 2048, 8192);
static void BM_Bool2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<bool>(state, "gpu", "All", num_x, num_y);
}
BENCHMARK(BM_Bool2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Mean2DToScalarCPUBF16(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<bfloat16>(state, "cpu", "Mean", num_x, num_y);
}
BENCHMARK(BM_Mean2DToScalarCPUBF16)->RangePair(2048, 8192, 2048, 8192);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/reduction_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bdf5db5b-a0d4-4927-8d36-962b7e1520fa | cpp | tensorflow/tensorflow | while_op | tensorflow/compiler/tf2xla/kernels/while_op.cc | tensorflow/core/kernels/while_op_test.cc | #include "tensorflow/compiler/tf2xla/kernels/while_op.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/tf2xla/kernels/if_while_utils.h"
#include "tensorflow/compiler/tf2xla/kernels/tensor_list_utils.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/tf2xla/xla_resource.h"
#include "xla/client/client.h"
#include "xla/hlo/builder/lib/tuple.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
Status VerifyResourceArgsGroupedAtEnd(XlaOpKernelContext* ctx,
const NameAttrList& body_name_attr) {
const FunctionBody* body;
TF_RETURN_IF_ERROR(ctx->compiler()->FindFunctionBody(body_name_attr, &body));
bool has_seen_resource = false;
for (int i = 0; i < body->arg_types.size(); i++) {
DataType arg_type = body->arg_types[i];
if (has_seen_resource) {
if (arg_type != DT_RESOURCE) {
return errors::InvalidArgument(
"Expect input resources are grouped in the end of while body ",
body_name_attr.name(), ", but the ", i, "-th argument ",
body->arg_nodes[i]->name(), " is not a resource.");
}
} else {
if (arg_type == DT_RESOURCE) {
has_seen_resource = true;
}
}
}
return absl::OkStatus();
}
Status MakeXlaCompilerArgumentsFromInputs(
XlaOpKernelContext* ctx, std::vector<XlaCompiler::Argument>* args,
bool* has_uninitialized_vars, bool* has_tensor_arrays,
bool* has_uninitialized_tensor_lists) {
VLOG(2) << "Num inputs " << ctx->num_inputs();
args->resize(ctx->num_inputs());
*has_uninitialized_vars = false;
*has_tensor_arrays = false;
*has_uninitialized_tensor_lists = false;
for (int i = 0; i < ctx->num_inputs(); ++i) {
VLOG(2) << " Input " << i << " type: " << DataTypeString(ctx->input_type(i))
<< " shape: " << ctx->InputShape(i).DebugString();
XlaCompiler::Argument& arg = (*args)[i];
DataType type = ctx->input_type(i);
if (type == DT_RESOURCE) {
XlaResource* resource;
TF_RETURN_IF_ERROR(ctx->GetResourceInput(i, &resource));
XlaCompiler::PopulateArgumentFromResource(*resource, &arg);
if (arg.resource_kind == XlaResource::kTensorArray) {
*has_tensor_arrays = true;
}
if (!arg.initialized) {
*has_uninitialized_vars = true;
}
VLOG(2) << " resource " << resource->name()
<< " type: " << DataTypeString(arg.type)
<< " shape: " << arg.ShapeHumanString()
<< " initialized: " << arg.initialized;
} else {
arg.kind = XlaCompiler::Argument::kParameter;
arg.type = type;
TF_ASSIGN_OR_RETURN(arg.shape, ctx->builder()->GetShape(ctx->Input(i)));
if (IsTensorListInput(ctx, i)) {
TF_RETURN_IF_ERROR(
IsTensorListInitialized(ctx->Input(i), &arg.initialized));
if (!arg.initialized) {
*has_uninitialized_tensor_lists = true;
}
}
}
}
return absl::OkStatus();
}
void GetLoopInvariants(XlaOpKernelContext* ctx,
const NameAttrList& body_name_attr,
std::vector<bool>* const loop_invariants) {
const FunctionBody* body;
OP_REQUIRES_OK(ctx, ctx->compiler()->FindFunctionBody(body_name_attr, &body));
const tensorflow::FunctionLibraryDefinition* fld =
ctx->compiler()->flib_runtime()->GetFunctionLibraryDefinition();
for (int i = 0; i < body->ret_nodes.size(); i++) {
absl::StatusOr<bool> is_loop_invariant = IsLoopInvariant(body, i, fld);
OP_REQUIRES_OK(ctx, is_loop_invariant.status());
(*loop_invariants)[i] = *is_loop_invariant;
VLOG(2) << "Arg " << i << " of " << body_name_attr.name() << " is "
<< ((*loop_invariants)[i] ? "" : "not ") << "loop invariant";
}
}
Status ConvertLoopInvariantsToConst(
XlaOpKernelContext* ctx, const NameAttrList& body_name_attr,
const NameAttrList& cond_name_attr,
std::vector<XlaCompiler::Argument>* args,
std::vector<bool>* compile_time_const_arg_indices,
int* num_compile_time_const_args, xla::Client* client) {
std::vector<bool> loop_invariants(ctx->num_inputs());
GetLoopInvariants(ctx, body_name_attr, &loop_invariants);
std::vector<bool> body_must_be_const_nodes;
const FunctionBody* body;
std::vector<bool> cond_must_be_const_nodes;
const FunctionBody* cond;
TF_RETURN_IF_ERROR(FindMustBeConstNodes(ctx, body_name_attr,
&body_must_be_const_nodes, &body));
TF_RETURN_IF_ERROR(FindMustBeConstNodes(ctx, cond_name_attr,
&cond_must_be_const_nodes, &cond));
auto should_convert_to_const = [&](int arg_idx) {
XlaCompiler::Argument& arg = (*args)[arg_idx];
return arg.kind != XlaCompiler::Argument::kResource &&
loop_invariants[arg_idx] &&
(body_must_be_const_nodes[body->arg_nodes[arg_idx]->id()] ||
cond_must_be_const_nodes[cond->arg_nodes[arg_idx]->id()]);
};
absl::InlinedVector<int, 5> converted_constants =
ConvertCompileTimeConstArgumentsToConst(ctx, args,
0,
should_convert_to_const);
VLOG(2) << "Converted args to constants: {"
<< absl::StrJoin(converted_constants, ",") << "}";
for (int arg_idx : converted_constants) {
compile_time_const_arg_indices->at(arg_idx) = true;
(*num_compile_time_const_args)++;
}
return absl::OkStatus();
}
Status VerifyBodyInputAndOutputShapeMatch(
XlaOpKernelContext* ctx,
const std::vector<bool>& compile_time_const_arg_indices,
const XlaCompiler::CompilationResult& body, bool has_token_input_output) {
xla::Shape body_input_shape = body.xla_input_shapes[0];
xla::Shape body_output_shape;
body_output_shape.set_element_type(xla::TUPLE);
for (int i = 0; i < ctx->num_outputs(); i++) {
if (!compile_time_const_arg_indices[i]) {
*(body_output_shape.add_tuple_shapes()) =
body.xla_output_shape.tuple_shapes(i);
}
}
if (has_token_input_output) {
*(body_output_shape.add_tuple_shapes()) =
body.xla_output_shape.tuple_shapes(ctx->num_inputs());
}
if (!xla::ShapeUtil::Compatible(body_input_shape, body_output_shape)) {
return errors::InvalidArgument(
"Input and output shapes of loop body do not match: ",
xla::ShapeUtil::HumanString(body_input_shape), " vs. ",
xla::ShapeUtil::HumanString(body_output_shape));
}
return absl::OkStatus();
}
absl::StatusOr<xla::XlaComputation> BuildWrappedCond(
XlaOpKernelContext* ctx, const XlaCompiler::CompilationResult& cond) {
xla::Shape cond_input_shape = cond.xla_input_shapes[0];
std::unique_ptr<xla::XlaBuilder> cb =
ctx->builder()->CreateSubBuilder("cond_wrapper");
auto inputs = xla::Parameter(cb.get(), 0, cond_input_shape, "inputs");
auto outputs = xla::Call(cb.get(), *cond.computation, {inputs});
xla::GetTupleElement(outputs, 0);
return cb->Build();
}
absl::StatusOr<xla::XlaComputation> BuildWrappedBody(
XlaOpKernelContext* ctx, const XlaCompiler::CompilationResult& body,
const std::vector<bool>& compile_time_const_arg_indices,
int num_compile_time_const_args, bool has_token_input_output) {
if (num_compile_time_const_args <= 0 &&
body.xla_input_shapes[0] == body.xla_output_shape) {
return xla::XlaComputation(body.computation->proto());
}
xla::XlaComputation body_wrapper;
std::unique_ptr<xla::XlaBuilder> cb =
ctx->builder()->CreateSubBuilder("body_wrapper");
xla::Shape body_input_shape = body.xla_input_shapes[0];
auto inputs = xla::Parameter(cb.get(), 0, body_input_shape, "inputs");
auto outputs = xla::Call(cb.get(), *body.computation, {inputs});
std::vector<xla::XlaOp> non_compile_time_const_outputs;
int input_num = 0;
for (int i = 0; i < compile_time_const_arg_indices.size(); i++) {
if (!compile_time_const_arg_indices[i]) {
xla::XlaOp output = xla::GetTupleElement(outputs, i);
const xla::Shape& input_shape = body_input_shape.tuple_shapes(input_num);
const xla::Shape& output_shape = body.xla_output_shape.tuple_shapes(i);
TF_RET_CHECK(xla::ShapeUtil::Compatible(input_shape, output_shape));
if (input_shape != output_shape) {
TF_ASSIGN_OR_RETURN(xla::ShapeTree<xla::XlaOp> disassembled_tuple,
xla::DisassembleTuple(output));
disassembled_tuple.ForEachMutableElement(
[&](const xla::ShapeIndex& index, xla::XlaOp* element) {
const xla::Shape& output_subshape =
xla::ShapeUtil::GetSubshape(output_shape, index);
if (output_subshape.IsArray()) {
const xla::Shape& input_subshape =
xla::ShapeUtil::GetSubshape(input_shape, index);
for (int d = 0; d < output_subshape.rank(); ++d) {
if (input_subshape.is_dynamic_dimension(d) &&
!output_subshape.is_dynamic_dimension(d)) {
*element = xla::SetDimensionSize(
*element,
xla::ConstantR0(
cb.get(),
static_cast<int32_t>(output_shape.dimensions()[d])),
d);
}
}
}
});
output =
xla::AssembleTuple(output.builder(), std::move(disassembled_tuple));
}
non_compile_time_const_outputs.push_back(output);
++input_num;
}
}
if (has_token_input_output) {
non_compile_time_const_outputs.push_back(
xla::GetTupleElement(outputs, ctx->num_outputs()));
}
xla::Tuple(cb.get(), non_compile_time_const_outputs);
return cb->Build();
}
xla::XlaOp BuildWhile(XlaOpKernelContext* ctx,
const xla::XlaComputation& wrapped_cond,
const xla::XlaComputation& wrapped_body,
const xla::XlaOp initial_values,
const std::vector<int>& input_mapping,
const std::vector<bool>& compile_time_const_arg_indices,
int num_compile_time_const_args,
bool has_token_input_output) {
xla::XlaOp while_result =
xla::While(wrapped_cond, wrapped_body, initial_values);
std::vector<xla::XlaOp> padded_while_outputs(ctx->num_outputs());
int while_result_index = 0;
for (int i = 0; i < ctx->num_inputs(); i++) {
if (!compile_time_const_arg_indices[i]) {
padded_while_outputs[input_mapping[while_result_index]] =
xla::GetTupleElement(while_result, while_result_index);
while_result_index++;
} else {
padded_while_outputs[i] = ctx->Input(i);
}
}
if (has_token_input_output) {
padded_while_outputs.push_back(xla::GetTupleElement(
while_result, ctx->num_inputs() - num_compile_time_const_args));
}
return xla::Tuple(ctx->builder(), padded_while_outputs);
}
}
XlaWhileOp::XlaWhileOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
const NameAttrList* name_attr;
OP_REQUIRES_OK(ctx, ctx->GetAttr("cond", &name_attr));
cond_name_attr_ = *name_attr;
OP_REQUIRES_OK(ctx, ctx->GetAttr("body", &name_attr));
body_name_attr_ = *name_attr;
if (!ctx->GetAttr(kXlaTokenInputNodesAttrName, &token_input_nodes_).ok()) {
has_token_input_output_ = false;
} else {
has_token_input_output_ = !token_input_nodes_.empty();
}
if (ctx->HasAttr(kPropagateCompileTimeConsts)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kPropagateCompileTimeConsts,
&propagate_compile_time_consts_));
}
if (!ctx->GetAttr(kXlaOriginalOutsideCompilationNodeName,
&original_node_name_)
.ok())
original_node_name_ = name();
}
void XlaWhileOp::Compile(XlaOpKernelContext* ctx) {
VLOG(1) << "WhileOp::Compile";
OP_REQUIRES_OK(ctx, VerifyResourceArgsGroupedAtEnd(ctx, body_name_attr_));
std::vector<XlaCompiler::Argument> arguments;
bool has_uninitialized_vars;
bool has_tensor_arrays;
bool has_uninitialized_tensor_lists;
OP_REQUIRES_OK(ctx, MakeXlaCompilerArgumentsFromInputs(
ctx, &arguments, &has_uninitialized_vars,
&has_tensor_arrays, &has_uninitialized_tensor_lists));
xla::XlaBuilder* builder = ctx->builder();
XlaCompiler* compiler = ctx->compiler();
std::vector<bool> compile_time_const_arg_indices(ctx->num_inputs());
int num_compile_time_const_args = 0;
if (propagate_compile_time_consts_) {
OP_REQUIRES_OK(ctx, ConvertLoopInvariantsToConst(
ctx, body_name_attr_, cond_name_attr_, &arguments,
&compile_time_const_arg_indices,
&num_compile_time_const_args, compiler->client()));
}
VLOG(1) << "Compiling body";
XlaCompiler::CompileOptions body_options;
body_options.use_tuple_arg = true;
body_options.return_updated_values_for_all_resources = true;
body_options.is_entry_computation = false;
body_options.add_token_input_output = has_token_input_output_;
auto body = std::make_unique<XlaCompiler::CompilationResult>();
OP_REQUIRES_OK(ctx, compiler->CompileFunction(body_options, body_name_attr_,
arguments, body.get()));
OP_REQUIRES_OK(
ctx, ctx->xla_context()->RecordCollectiveInfoFromNestedCompilationResult(
*body.get()));
if (has_uninitialized_vars || has_tensor_arrays ||
has_uninitialized_tensor_lists) {
VLOG(2) << "Recompiling loop body: has_uninitialized_vars: "
<< has_uninitialized_vars
<< " has_tensor_arrays: " << has_tensor_arrays
<< " has_uninitialized_tensor_lists: "
<< has_uninitialized_tensor_lists;
for (int i = 0; i < body->resource_updates.size(); ++i) {
const XlaCompiler::ResourceUpdate& update = body->resource_updates[i];
XlaResource* resource;
OP_REQUIRES_OK(ctx, ctx->GetResourceInput(update.input_index, &resource));
XlaCompiler::Argument& arg = arguments[update.input_index];
if (!arg.initialized) {
VLOG(2) << "Update shape for argument " << update.input_index << " "
<< update.shape.DebugString();
arg.initialized = true;
arg.shape = update.shape;
OP_REQUIRES_OK(ctx,
resource->SetTypeAndShape(update.type, update.shape));
OP_REQUIRES_OK(ctx, resource->SetZeroValue(builder));
}
for (const string& grad_source : update.tensor_array_gradients_accessed) {
VLOG(4) << "TensorArray " << resource->name() << " accessed gradient "
<< grad_source;
XlaResource* gradient;
OP_REQUIRES_OK(ctx, resource->GetOrCreateTensorArrayGradient(
grad_source, builder, &gradient));
}
for (const auto& gradient : resource->tensor_array_gradients()) {
arg.tensor_array_gradients.insert(gradient.first);
}
}
xla::Shape body_output_shape = body->xla_output_shape;
OP_REQUIRES(ctx, body_output_shape.IsTuple(),
errors::FailedPrecondition(
"xla_output_shape of while body must be a tuple."));
for (int i = 0; i < arguments.size(); i++) {
XlaCompiler::Argument& arg = arguments[i];
if (arg.initialized || !IsTensorListInput(ctx, i)) {
continue;
}
arg.shape = body_output_shape.tuple_shapes(i);
arg.initialized = true;
}
VLOG(1) << "Recompiling body with corrected resource shapes";
*body = {};
OP_REQUIRES_OK(ctx, compiler->CompileFunction(body_options, body_name_attr_,
arguments, body.get()));
}
VLOG(1) << "Compiling condition";
XlaCompiler::CompileOptions cond_options;
cond_options.use_tuple_arg = true;
cond_options.is_entry_computation = false;
cond_options.add_token_input_output = has_token_input_output_;
XlaCompiler::CompilationResult cond;
OP_REQUIRES_OK(ctx, compiler->CompileFunction(cond_options, cond_name_attr_,
arguments, &cond));
OP_REQUIRES(ctx, body->xla_input_shapes.size() == 1,
errors::FailedPrecondition("Expected one input shape"));
xla::Shape body_input_shape = body->xla_input_shapes[0];
OP_REQUIRES(ctx, body_input_shape.IsTuple(),
errors::FailedPrecondition("Expected tuple shape"));
OP_REQUIRES(ctx, cond.xla_input_shapes.size() == 1,
errors::FailedPrecondition("Expected one input shape"));
xla::Shape cond_input_shape = cond.xla_input_shapes[0];
OP_REQUIRES(ctx, cond_input_shape.IsTuple(),
errors::FailedPrecondition("Expected tuple shape"));
VLOG(2) << "Body shape: " << xla::ShapeUtil::HumanString(body_input_shape)
<< " -> " << xla::ShapeUtil::HumanString(body->xla_output_shape);
VLOG(2) << "Cond shape: " << xla::ShapeUtil::HumanString(cond_input_shape)
<< " -> " << xla::ShapeUtil::HumanString(cond.xla_output_shape);
OP_REQUIRES(ctx,
xla::ShapeUtil::Compatible(body_input_shape, cond_input_shape),
errors::InvalidArgument(
"Input shapes of loop body and condition do not match: ",
xla::ShapeUtil::HumanString(body_input_shape), " vs. ",
xla::ShapeUtil::HumanString(cond_input_shape)));
OP_REQUIRES_OK(ctx, VerifyBodyInputAndOutputShapeMatch(
ctx, compile_time_const_arg_indices, *body.get(),
has_token_input_output_));
xla::Shape expected_cond_output_shape_without_side_effect =
xla::ShapeUtil::MakeTupleShape(
{xla::ShapeUtil::MakeShape(xla::PRED, {})});
xla::Shape expected_cond_output_shape_with_side_effect =
xla::ShapeUtil::MakeTupleShape({xla::ShapeUtil::MakeShape(xla::PRED, {}),
xla::ShapeUtil::MakeTokenShape()});
OP_REQUIRES(ctx,
xla::ShapeUtil::Compatible(
cond.xla_output_shape,
expected_cond_output_shape_without_side_effect) ||
xla::ShapeUtil::Compatible(
cond.xla_output_shape,
expected_cond_output_shape_with_side_effect),
errors::InvalidArgument(
"Output shape of loop condition should be (pred[]) or "
"(pred[], token[]), got: ",
xla::ShapeUtil::HumanString(cond.xla_output_shape)));
int num_inputs = body->input_mapping.size();
std::vector<xla::XlaOp> inputs(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
int input_num = body->input_mapping[i];
if (has_token_input_output_ && i == num_inputs - 1) {
std::vector<xla::XlaOp> token_inputs;
token_inputs.reserve(token_input_nodes_.size());
for (const string& node_name : token_input_nodes_) {
auto token_or = compiler->GetNodeToken(node_name);
OP_REQUIRES_OK(ctx, token_or.status());
token_inputs.push_back(token_or.value());
}
inputs[i] = xla::AfterAll(builder, token_inputs);
} else if (ctx->input_type(input_num) == DT_RESOURCE) {
XlaResource* resource;
OP_REQUIRES_OK(ctx, ctx->GetResourceInput(input_num, &resource));
OP_REQUIRES_OK(ctx, resource->Pack(&inputs[i], builder));
} else if (IsTensorListInput(ctx, input_num)) {
xla::XlaOp input = ctx->Input(input_num);
auto input_shape_or = ctx->builder()->GetShape(input);
OP_REQUIRES_OK(ctx, input_shape_or.status());
xla::Shape input_shape = input_shape_or.value();
const xla::Shape& list_shape = body_input_shape.tuple_shapes(i);
if (input_shape != list_shape) {
std::vector<std::vector<xla::XlaOp>> list_dynamic_dims;
for (int i = 0; i < list_shape.tuple_shapes_size() - 1; ++i) {
std::vector<xla::XlaOp> dynamic_dims;
const xla::Shape& shape = list_shape.tuple_shapes(i);
if (shape.is_dynamic_dimension(0)) {
xla::XlaOp leading_dim_size = xla::GetDimensionSize(input, 0);
dynamic_dims.push_back(leading_dim_size);
} else {
int32_t dim_size = shape.dimensions(0);
dynamic_dims.push_back(
xla::ConstantR0<int32>(ctx->builder(), dim_size));
}
for (int64_t dim = 1; dim < shape.dimensions_size(); ++dim) {
int32_t dim_size = shape.dimensions(dim);
if (shape.is_dynamic_dimension(dim)) {
dim_size = 0;
}
dynamic_dims.push_back(
xla::ConstantR0<int32_t>(ctx->builder(), dim_size));
}
list_dynamic_dims.push_back(dynamic_dims);
}
OP_REQUIRES_OK(
ctx, CreateZerosTensorListWithShape(ctx->builder(), list_shape,
list_dynamic_dims, &inputs[i]));
} else {
inputs[i] = ctx->Input(input_num);
}
} else {
inputs[i] = ctx->Input(input_num);
}
}
xla::XlaOp init = xla::Tuple(builder, inputs);
VLOG(1) << "Building while loop";
absl::StatusOr<xla::XlaComputation> cond_result = BuildWrappedCond(ctx, cond);
OP_REQUIRES_OK(ctx, cond_result.status());
xla::XlaComputation wrapped_cond = std::move(cond_result.value());
absl::StatusOr<xla::XlaComputation> body_result =
BuildWrappedBody(ctx, *body.get(), compile_time_const_arg_indices,
num_compile_time_const_args, has_token_input_output_);
OP_REQUIRES_OK(ctx, body_result.status());
xla::XlaComputation wrapped_body = std::move(body_result.value());
xla::XlaOp while_result =
BuildWhile(ctx, wrapped_cond, wrapped_body, init, body->input_mapping,
compile_time_const_arg_indices, num_compile_time_const_args,
has_token_input_output_);
int resource_index = 0;
for (int i = 0; i < ctx->num_outputs(); ++i) {
if (ctx->input_type(i) != DT_RESOURCE) {
if (IsTensorListInput(ctx, i)) {
ctx->SetTensorListOutput(i, xla::GetTupleElement(while_result, i));
} else {
ctx->SetOutput(i, xla::GetTupleElement(while_result, i));
}
++resource_index;
} else {
break;
}
}
if (has_token_input_output_) {
xla::XlaOp token_output =
xla::GetTupleElement(while_result, ctx->num_outputs());
auto shape_or = builder->GetShape(token_output);
OP_REQUIRES_OK(ctx, shape_or.status());
OP_REQUIRES(ctx, shape_or.value().IsToken(),
errors::FailedPrecondition(
"Token output is not token type: ",
xla::ShapeUtil::HumanString(shape_or.value())));
OP_REQUIRES_OK(ctx,
compiler->SetNodeToken(original_node_name_, token_output));
}
for (int i = 0; i < body->resource_updates.size(); ++i) {
const XlaCompiler::ResourceUpdate& update = body->resource_updates[i];
XlaResource* resource;
OP_REQUIRES_OK(ctx, ctx->GetResourceInput(update.input_index, &resource));
if (update.modified) {
int pos = resource_index + i;
OP_REQUIRES_OK(ctx,
resource->SetFromPack(
arguments[update.input_index].tensor_array_gradients,
xla::GetTupleElement(while_result, pos), builder));
}
VLOG(2) << "Loop-carried variable: pos: " << update.input_index
<< " name: " << resource->name() << " modified: " << update.modified
<< " type: " << DataTypeString(update.type)
<< " shape: " << update.shape.DebugString();
ctx->op_kernel_context()->set_output(
update.input_index,
ctx->op_kernel_context()->input(update.input_index));
}
VLOG(1) << "Done building while loop";
}
REGISTER_XLA_OP(Name("While").AllowResourceTypes().AllowVariantTypes(),
XlaWhileOp);
REGISTER_XLA_OP(Name("StatelessWhile").AllowResourceTypes().AllowVariantTypes(),
XlaWhileOp);
REGISTER_XLA_OP(Name("XlaWhile").AllowResourceTypes().AllowVariantTypes(),
XlaWhileOp);
} | #include "tensorflow/c/experimental/stream_executor/stream_executor.h"
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h"
#include "tensorflow/c/experimental/stream_executor/stream_executor_test_util.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
namespace tensorflow {
namespace {
class WhileOpTest : public OpsTestBase {
protected:
WhileOpTest() {}
void SetUp() override {
stream_executor::test_util::PopulateDefaultPlatform(&platform_,
&platform_fns_);
stream_executor::test_util::PopulateDefaultDeviceFns(&device_fns_);
stream_executor::test_util::PopulateDefaultStreamExecutor(&se_);
stream_executor::test_util::PopulateDefaultTimerFns(&timer_fns_);
}
void TearDown() override {}
SP_Platform platform_;
SP_PlatformFns platform_fns_;
SP_DeviceFns device_fns_;
SP_StreamExecutor se_;
SP_TimerFns timer_fns_;
};
FunctionDef LessThanOrEqualToNWithCast(int64_t N) {
typedef FunctionDefHelper FDH;
const Tensor kN = test::AsScalar<int64_t>(N);
return FDH::Define(
"LessThanOrEqualToNWithCast",
{"x: T"},
{"z: bool"},
{"T: {float, double, int32, int64}"},
{
{{"N"}, "Const", {}, {{"value", kN}, {"dtype", DT_INT64}}},
{{"y"}, "_HostCast", {"N"}, {{"SrcT", DT_INT64}, {"DstT", DT_INT32}}},
{{"x_cst"}, "_HostCast", {"x"}, {{"SrcT", "$T"}, {"DstT", DT_INT32}}},
{{"z"}, "LessEqual", {"x_cst", "y"}, {{"T", DT_INT32}}},
});
}
FunctionDef XTimesTwoWithCast() {
typedef FunctionDefHelper FDH;
const Tensor kTwo = test::AsScalar<int64_t>(2);
return FDH::Define(
"XTimesTwoWithCast",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"two_cst"},
"_HostCast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT32}}},
{{"x_cst"}, "_HostCast", {"x"}, {{"SrcT", "$T"}, {"DstT", DT_INT32}}},
{{"y_cast"}, "Mul", {"x_cst", "two_cst"}, {{"T", DT_INT32}}},
{{"y"},
"_HostCast",
{"y_cast"},
{{"SrcT", DT_INT32}, {"DstT", "$T"}}},
});
}
TEST_F(WhileOpTest, WhileOpCPUBuildWithPluggableDevice) {
const std::string platform_name = "MY_TEST";
const std::string platform_type = "FAKE";
platform_.name = platform_name.c_str();
platform_.type = platform_type.c_str();
static bool memcpy_d2h_called = false;
se_.memcpy_dtoh = [](const SP_Device* device, SP_Stream stream,
void* host_dst, const SP_DeviceMemoryBase* device_src,
uint64_t size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
memcpy_d2h_called = true;
std::memcpy(host_dst, device_src->opaque, size);
};
se_.memcpy_htod = [](const SP_Device* const device, SP_Stream stream,
SP_DeviceMemoryBase* const device_dst,
const void* host_src, uint64_t size,
TF_Status* const status) {
TF_SetStatus(status, TF_OK, "");
std::memcpy(device_dst->opaque, host_src, size);
};
se_.host_memory_allocate = [](const SP_Device* const device, uint64_t size) {
#if EIGEN_MAX_ALIGN_BYTES == 0
return malloc(size);
#else
return tensorflow::port::AlignedMalloc(size, EIGEN_MAX_ALIGN_BYTES);
#endif
};
se_.host_memory_deallocate = [](const SP_Device* const device, void* mem) {
free(mem);
};
se_.allocate = [](const SP_Device* const device, uint64_t size,
int64_t memory_space, SP_DeviceMemoryBase* const mem) {
mem->struct_size = SP_DEVICE_MEMORY_BASE_STRUCT_SIZE;
#if EIGEN_MAX_ALIGN_BYTES == 0
mem->opaque = malloc(size);
#else
mem->opaque = tensorflow::port::AlignedMalloc(size, EIGEN_MAX_ALIGN_BYTES);
#endif
mem->size = size;
};
se_.deallocate = [](const SP_Device* const device,
SP_DeviceMemoryBase* const mem) {
free(mem->opaque);
mem->opaque = nullptr;
mem->size = 0;
};
static SE_EventStatus event_status = SE_EVENT_COMPLETE;
se_.create_event = [](const SP_Device* const device, SP_Event* event,
TF_Status* const status) -> void {
*event = new SP_Event_st(666);
};
se_.destroy_event = [](const SP_Device* const device,
SP_Event event) -> void { delete event; };
se_.get_event_status = [](const SP_Device* const device,
SP_Event event) -> SE_EventStatus {
EXPECT_EQ(event->event_id, 666);
return event_status;
};
std::unique_ptr<stream_executor::CPlatform> cplatform(
new stream_executor::CPlatform(
std::move(platform_), stream_executor::test_util::DestroyPlatform,
std::move(platform_fns_),
stream_executor::test_util::DestroyPlatformFns,
std::move(device_fns_), std::move(se_), std::move(timer_fns_)));
TF_CHECK_OK(
stream_executor::PlatformManager::RegisterPlatform(std::move(cplatform)));
DeviceFactory::Register(
platform_type, new PluggableDeviceFactory(platform_type, platform_name),
220, true);
std::unique_ptr<Device> plug_device(
DeviceFactory::NewDevice(platform_type, {}, "/job:a/replica:0"));
OpsTestBase::SetDevice(platform_type.c_str(), std::move(plug_device));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDef x_times_two = XTimesTwoWithCast();
FunctionDef less_than_or_eq = LessThanOrEqualToNWithCast(8);
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = x_times_two;
*f_lib_proto.add_function() = less_than_or_eq;
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT);
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToNWithCast");
(*cond_func.mutable_func()->mutable_attr())["T"].set_type(DT_FLOAT);
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwoWithCast");
(*body_func.mutable_func()->mutable_attr())["T"].set_type(DT_FLOAT);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* node;
TF_EXPECT_OK(NodeBuilder("while_test", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_FLOAT})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Finalize(root.graph(), &node));
auto c = ops::Identity(
root.WithOpName("C").WithControlDependencies(Output(node)), Output(node));
TF_ASSERT_OK(root.DoShapeInference(node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
ClientSession session(root);
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(1.f));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(c.node())}, &out_tensors));
ASSERT_EQ(memcpy_d2h_called, true);
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<float>()(), 16.f);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/while_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/while_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4d8a9bc1-5e56-4b9a-aa95-836d48aa551c | cpp | tensorflow/tensorflow | segment_reduction_ops | tensorflow/compiler/tf2xla/kernels/segment_reduction_ops.cc | tensorflow/core/kernels/segment_reduction_ops_test.cc | #include <vector>
#include "tensorflow/compiler/tf2xla/lib/scatter.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/value_inference.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace {
class SegmentReduce : public XlaOpKernel {
public:
explicit SegmentReduce(OpKernelConstruction* ctx, bool indices_are_sorted)
: XlaOpKernel(ctx), indices_are_sorted_(indices_are_sorted) {
DataType dtype;
OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype));
OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(dtype, &type_));
}
virtual xla::XlaOp InitialValue(xla::XlaBuilder* builder) = 0;
virtual xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) = 0;
void Compile(XlaOpKernelContext* ctx) override {
auto data = ctx->Input(0);
TensorShape data_shape = ctx->InputShape(0);
auto indices = ctx->Input(1);
TensorShape indices_shape = ctx->InputShape(1);
int64_t num_segments;
OP_REQUIRES_OK(ctx,
ctx->ConstantInputAsIntScalar(
2, &num_segments, xla::ValueInferenceMode::kUpperBound));
OP_REQUIRES(ctx, data_shape.dims() >= indices_shape.dims(),
errors::InvalidArgument(type_string(),
" requires that indices' rank be"
" less than or equal to data's rank."));
for (int d = 0; d < indices_shape.dims(); ++d) {
OP_REQUIRES(
ctx, (data_shape.dim_size(d) == indices_shape.dim_size(d)),
errors::InvalidArgument(type_string(),
" requires indices shape to be prefix"
" of data_shape, but dimension ",
d, " differs ", data_shape.dim_size(d),
" vs. ", indices_shape.dim_size(d)));
}
xla::XlaBuilder* builder = ctx->builder();
TensorShape buffer_shape = data_shape;
buffer_shape.RemoveDimRange(0, indices_shape.dims());
buffer_shape.InsertDim(0, num_segments);
auto buffer =
xla::Broadcast(InitialValue(builder), buffer_shape.dim_sizes());
std::vector<xla::XlaOp> buffer_dims;
std::vector<bool> buffer_dims_are_dynamic;
bool num_segments_is_dynamic;
OP_REQUIRES_OK(
ctx, ctx->ResolveInputDynamismIntoPred(2, &num_segments_is_dynamic));
buffer_dims.insert(buffer_dims.begin(), ctx->Input(2));
buffer_dims_are_dynamic.insert(buffer_dims_are_dynamic.begin(),
num_segments_is_dynamic);
for (int64_t i = indices_shape.dims(); i < data_shape.dims(); ++i) {
buffer_dims.push_back(xla::GetDimensionSize(data, i));
buffer_dims_are_dynamic.push_back(
ctx->InputXlaShape(0)->is_dynamic_dimension(i));
}
for (int64_t i = 0; i < buffer_dims.size(); ++i) {
if (buffer_dims_are_dynamic[i]) {
buffer = xla::SetDimensionSize(buffer, buffer_dims[i], i);
}
}
auto combiner = [this](xla::XlaOp a, xla::XlaOp b,
xla::XlaBuilder* builder) { return Combine(a, b); };
auto result = XlaScatter(buffer, data, indices,
false, indices_are_sorted_,
combiner, builder);
OP_REQUIRES_OK(ctx, result.status());
ctx->SetOutput(0, result.value());
}
protected:
xla::PrimitiveType type_;
bool indices_are_sorted_;
};
template <bool indices_are_sorted>
class SegmentSum : public SegmentReduce {
public:
explicit SegmentSum(OpKernelConstruction* ctx)
: SegmentReduce(ctx, indices_are_sorted) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::Zero(builder, type_);
};
xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override { return a + b; };
};
REGISTER_XLA_OP(Name("SegmentSumV2").CompileTimeConstantInput("num_segments"),
SegmentSum<true>);
REGISTER_XLA_OP(
Name("UnsortedSegmentSum").CompileTimeConstantInput("num_segments"),
SegmentSum<false>);
template <bool indices_are_sorted>
class SegmentProd : public SegmentReduce {
public:
explicit SegmentProd(OpKernelConstruction* ctx)
: SegmentReduce(ctx, indices_are_sorted) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::One(builder, type_);
};
xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override { return a * b; };
};
REGISTER_XLA_OP(
Name("UnsortedSegmentProd").CompileTimeConstantInput("num_segments"),
SegmentProd<false>);
REGISTER_XLA_OP(Name("SegmentProdV2").CompileTimeConstantInput("num_segments"),
SegmentProd<true>);
template <bool indices_are_sorted>
class SegmentMin : public SegmentReduce {
public:
explicit SegmentMin(OpKernelConstruction* ctx)
: SegmentReduce(ctx, indices_are_sorted) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MaxFiniteValue(builder, type_);
};
xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override {
return xla::Min(a, b);
};
};
REGISTER_XLA_OP(
Name("UnsortedSegmentMin").CompileTimeConstantInput("num_segments"),
SegmentMin<false>);
REGISTER_XLA_OP(Name("SegmentMinV2").CompileTimeConstantInput("num_segments"),
SegmentMin<true>);
template <bool indices_are_sorted>
class SegmentMax : public SegmentReduce {
public:
explicit SegmentMax(OpKernelConstruction* ctx)
: SegmentReduce(ctx, indices_are_sorted) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MinFiniteValue(builder, type_);
};
xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override {
return xla::Max(a, b);
};
};
REGISTER_XLA_OP(
Name("UnsortedSegmentMax").CompileTimeConstantInput("num_segments"),
SegmentMax<false>);
REGISTER_XLA_OP(Name("SegmentMaxV2").CompileTimeConstantInput("num_segments"),
SegmentMax<true>);
}
} | #include <functional>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
static void BM_UnsortedSegmentReduction(::testing::benchmark::State& state,
const string& reduction, int num_rows,
int num_cols, int segment_size) {
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"));
absl::InlinedVector<TensorValue, 4> reduction_inputs;
TensorShape shape1({num_rows, num_cols});
Tensor input(DT_FLOAT, shape1);
reduction_inputs.push_back({nullptr, &input});
TensorShape shape2({num_rows});
Tensor indices(DT_INT32, shape2);
test::FillFn<int>(&indices,
[&segment_size](int i) -> int { return i % segment_size; });
reduction_inputs.push_back({nullptr, &indices});
Tensor num_segments(DT_INT32, TensorShape({}));
num_segments.scalar<int>()() = segment_size;
reduction_inputs.push_back({nullptr, &num_segments});
NodeDef reduction_node_def;
TF_CHECK_OK(NodeDefBuilder(reduction, reduction)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Finalize(&reduction_node_def));
Status status;
std::unique_ptr<OpKernel> reduction_op(
CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(),
reduction_node_def, TF_GRAPH_DEF_VERSION, &status));
OpKernelContext::Params params;
params.device = device.get();
params.frame_iter = FrameAndIter(0, 0);
params.inputs = reduction_inputs;
params.op_kernel = reduction_op.get();
std::vector<AllocatorAttributes> attrs;
test::SetOutputAttrs(¶ms, &attrs);
std::unique_ptr<OpKernelContext> reduction_context(
new OpKernelContext(¶ms));
reduction_op->Compute(reduction_context.get());
TF_CHECK_OK(reduction_context->status());
for (auto s : state) {
delete reduction_context->release_output(0).tensor;
reduction_op->Compute(reduction_context.get());
}
int64_t bytes_per_iter =
static_cast<int64_t>(num_rows * num_cols * sizeof(float));
state.SetBytesProcessed(bytes_per_iter * state.iterations());
}
#define BM_UnsortedReduce(O, R, C, S) \
static void BM_##O##_##R##_##C##_##S(::testing::benchmark::State& state) { \
BM_UnsortedSegmentReduction(state, #O, R, C, S); \
} \
BENCHMARK(BM_##O##_##R##_##C##_##S);
#define BM_UnsortedReduce_Arg(R, C, S) \
BM_UnsortedReduce(UnsortedSegmentSum, R, C, S);
BM_UnsortedReduce_Arg(4096, 1024, 1);
BM_UnsortedReduce_Arg(4096, 1024, 128);
template <typename Index>
static void BM_SegmentReduction(::testing::benchmark::State& state,
const string& reduction, Index num_rows,
Index num_cols, Index segment_size) {
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"));
absl::InlinedVector<TensorValue, 4> reduction_inputs;
TensorShape shape1({num_rows, num_cols});
Tensor input1(DT_FLOAT, shape1);
reduction_inputs.push_back({nullptr, &input1});
TensorShape shape2({num_rows});
Tensor input2(DataTypeToEnum<Index>::v(), shape2);
test::FillFn<Index>(&input2, [&num_rows, &segment_size](Index i) -> Index {
return std::min(i / segment_size, num_rows - 1);
});
reduction_inputs.push_back({nullptr, &input2});
NodeDef reduction_node_def;
TF_CHECK_OK(NodeDefBuilder(reduction, reduction)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DataTypeToEnum<Index>::v()))
.Finalize(&reduction_node_def));
Status status;
std::unique_ptr<OpKernel> reduction_op(
CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(),
reduction_node_def, TF_GRAPH_DEF_VERSION, &status));
OpKernelContext::Params params;
params.device = device.get();
params.frame_iter = FrameAndIter(0, 0);
params.inputs = reduction_inputs;
params.op_kernel = reduction_op.get();
std::vector<AllocatorAttributes> attrs;
test::SetOutputAttrs(¶ms, &attrs);
std::unique_ptr<OpKernelContext> reduction_context(
new OpKernelContext(¶ms));
reduction_op->Compute(reduction_context.get());
TF_CHECK_OK(reduction_context->status());
for (auto s : state) {
delete reduction_context->release_output(0).tensor;
reduction_op->Compute(reduction_context.get());
}
int64_t bytes_per_iter =
static_cast<int64_t>(num_rows * num_cols * sizeof(float));
state.SetBytesProcessed(bytes_per_iter * state.iterations());
}
#define BM_Reduce(O, R, C, S) \
static void BM_Reduce_##O##_##R##_##C##_##S##_int32( \
::testing::benchmark::State & state) { \
BM_SegmentReduction<int32>(state, #O, R, C, S); \
} \
static void BM_Reduce_##O##_##R##_##C##_##S##_int64( \
::testing::benchmark::State & state) { \
BM_SegmentReduction<int64_t>(state, #O, R, C, S); \
} \
BENCHMARK(BM_Reduce_##O##_##R##_##C##_##S##_int32); \
BENCHMARK(BM_Reduce_##O##_##R##_##C##_##S##_int64);
#define BM_Reduce_Arg(R, C, S) \
BM_Reduce(SegmentSum, R, C, S); \
BM_Reduce(SegmentMean, R, C, S);
BM_Reduce_Arg(64, 32, 1);
BM_Reduce_Arg(4096, 128, 1);
BM_Reduce_Arg(16, 8, 2);
BM_Reduce_Arg(64, 32, 2);
BM_Reduce_Arg(4096, 32, 2);
BM_Reduce_Arg(4096, 128, 2);
template <DataType T>
static void SparseSegmentMeanGradHelper(::testing::benchmark::State& state,
float uniqueness, int size) {
typedef typename EnumToDataType<T>::Type DT;
Graph* g = new Graph(OpRegistry::Global());
CHECK_LE(uniqueness, 1.0);
CHECK_GT(uniqueness, 0.0);
const int kNumIndices = size;
Tensor indices(DT_INT32, TensorShape({kNumIndices}));
auto indices_flat = indices.flat<int32>();
Tensor segments(DT_INT32, TensorShape({kNumIndices}));
auto segments_flat = segments.flat<int32>();
int kUniqueIndices = uniqueness * kNumIndices;
Tensor output_dim0(DT_INT32, TensorShape({}));
output_dim0.scalar<int32>()() = kUniqueIndices;
for (int i = 0; i < kNumIndices; ++i) {
indices_flat(i) = (i * 31) % kUniqueIndices;
segments_flat(i) = i * .8;
}
const int kDim1 = segments_flat(kNumIndices - 1) + 1;
const int kDim2 = 128;
Tensor input(T, TensorShape({kDim1, kDim2}));
input.flat<DT>().setRandom();
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "SparseSegmentMeanGrad")
.Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, indices))
.Input(test::graph::Constant(g, segments))
.Input(test::graph::Constant(g, output_dim0))
.Attr("T", T)
.Finalize(g, &node));
test::Benchmark("cpu", g, false).Run(state);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
(kDim1 * kDim2) * sizeof(float));
}
static void BM_SparseSegmentMeanGrad_Low_FP32(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_FLOAT>(state, 1.0, size);
}
static void BM_SparseSegmentMeanGrad_High_FP32(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_FLOAT>(state, 0.01, size);
}
static void BM_SparseSegmentMeanGrad_Low_BF16(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_BFLOAT16>(state, 1.0, size);
}
static void BM_SparseSegmentMeanGrad_High_BF16(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_BFLOAT16>(state, 0.01, size);
}
static void BM_SparseSegmentMeanGrad_Low_FP16(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_HALF>(state, 1.0, size);
}
static void BM_SparseSegmentMeanGrad_High_FP16(
::testing::benchmark::State& state) {
const int size = state.range(0);
return SparseSegmentMeanGradHelper<DT_HALF>(state, 0.01, size);
}
BENCHMARK(BM_SparseSegmentMeanGrad_Low_FP32)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_High_FP32)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_Low_BF16)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_High_BF16)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_Low_FP16)
->UseRealTime()
->Arg(1000)
->Arg(100000);
BENCHMARK(BM_SparseSegmentMeanGrad_High_FP16)
->UseRealTime()
->Arg(1000)
->Arg(100000);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/segment_reduction_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/segment_reduction_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
97f5f733-75dc-4de4-9034-7cc1e1a54823 | cpp | tensorflow/tensorflow | matmul_op | tensorflow/compiler/tf2xla/kernels/matmul_op.cc | tensorflow/core/kernels/matmul_op_test.cc | #include <array>
#include <optional>
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace {
constexpr std::array<DataType, 10> kMatmulTypes = {
{DT_HALF, DT_BFLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128,
DT_INT32, DT_INT64, DT_INT16, DT_INT8}};
class MatMulOp : public XlaOpKernel {
public:
explicit MatMulOp(OpKernelConstruction* ctx, bool is_sparse = false)
: XlaOpKernel(ctx),
is_sparse_(is_sparse),
grad_a_(false),
grad_b_(false) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_a", &transpose_a_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_b", &transpose_b_));
if (!is_sparse) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("grad_a", &grad_a_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("grad_b", &grad_b_));
}
if (is_sparse) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("Ta", &a_type_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("Tb", &b_type_));
bool dummy_is_sparse;
OP_REQUIRES_OK(ctx, ctx->GetAttr("a_is_sparse", &dummy_is_sparse));
OP_REQUIRES_OK(ctx, ctx->GetAttr("b_is_sparse", &dummy_is_sparse));
}
}
~MatMulOp() override = default;
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape a_shape = ctx->InputShape(0);
const TensorShape b_shape = ctx->InputShape(1);
OP_REQUIRES(ctx, a_shape.dims() == b_shape.dims(),
errors::InvalidArgument("In[0] and In[1] has different ndims: ",
a_shape.DebugString(), " vs. ",
b_shape.DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsMatrix(a_shape),
errors::InvalidArgument("In[0] is not a matrix. Instead it has shape ",
a_shape.DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsMatrix(b_shape),
errors::InvalidArgument("In[1] is not a matrix. Instead it has shape ",
b_shape.DebugString()));
int first_index = transpose_a_ ? 0 : 1;
int second_index = transpose_b_ ? 1 : 0;
OP_REQUIRES(ctx,
a_shape.dim_size(first_index) == b_shape.dim_size(second_index),
errors::InvalidArgument(
"Matrix size-incompatible: In[0]: ", a_shape.DebugString(),
", In[1]: ", b_shape.DebugString()));
xla::XlaOp a = ctx->Input(0);
xla::XlaOp b = ctx->Input(1);
if (is_sparse_) {
if (a_type_ == DT_BFLOAT16) {
a = xla::ConvertElementType(a, xla::F32);
}
if (b_type_ == DT_BFLOAT16) {
b = xla::ConvertElementType(b, xla::F32);
}
}
xla::PrecisionConfig::Precision precision =
tsl::tensor_float_32_execution_enabled()
? xla::PrecisionConfig::DEFAULT
: xla::PrecisionConfig::HIGHEST;
ctx->SetOutput(0, xla::BatchDot(a, transpose_a_, b, transpose_b_, precision,
std::nullopt, grad_a_, grad_b_));
}
private:
bool is_sparse_;
bool transpose_a_;
bool transpose_b_;
bool grad_a_;
bool grad_b_;
DataType a_type_;
DataType b_type_;
};
REGISTER_XLA_OP(Name("MatMul").TypeConstraint("T", kMatmulTypes), MatMulOp);
class SparseMatMulOp : public MatMulOp {
public:
explicit SparseMatMulOp(OpKernelConstruction* ctx) : MatMulOp(ctx, true) {}
~SparseMatMulOp() override = default;
};
REGISTER_XLA_OP(Name("SparseMatMul"), SparseMatMulOp);
}
} | #include <functional>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tsl/platform/status.h"
#if TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
namespace tensorflow {
namespace {
template <typename T>
class FusedMatMulOpTest : public OpsTestBase {
protected:
static constexpr auto kTValueType = DataTypeToEnum<T>::value;
using BiasAddGraphRunner =
std::function<bool(const Tensor& lhs_data, const Tensor& rhs_data,
const Tensor& bias_data, Tensor* out)>;
void RunAndFetch(const tensorflow::Scope& root, const string& fetch,
Tensor* output, bool allow_gpu_device,
const NodeDef* fetch_node = nullptr,
absl::Status* last_status = nullptr) {
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
if (fetch_node) {
*graph.add_node() = *fetch_node;
}
tensorflow::SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
tensorflow::RewriterConfig* cfg =
session_options.config.mutable_graph_options()
->mutable_rewrite_options();
cfg->set_constant_folding(tensorflow::RewriterConfig::OFF);
cfg->set_layout_optimizer(tensorflow::RewriterConfig::OFF);
cfg->set_remapping(tensorflow::RewriterConfig::OFF);
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(session_options));
std::vector<DeviceAttributes> available_devices;
TF_ASSERT_OK(session->ListDevices(&available_devices))
<< "Failed to get available session devices";
const bool has_gpu_device =
absl::c_any_of(available_devices, [](const DeviceAttributes& device) {
return device.device_type() == DEVICE_GPU;
});
const bool place_all_on_gpu = allow_gpu_device && has_gpu_device;
const string device = place_all_on_gpu ? "/device:GPU:0" : "/device:CPU:0";
for (NodeDef& mutable_node : *graph.mutable_node()) {
mutable_node.set_device(device);
}
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> unfused_tensors;
auto res = session->Run({}, {fetch}, {}, &unfused_tensors);
if (last_status != nullptr) {
*last_status = res;
} else {
TF_ASSERT_OK(res);
}
if (!unfused_tensors.empty()) {
*output = unfused_tensors[0];
}
}
void RunMatMulWithBias(const Tensor& lhs_data, const Tensor& rhs_data,
const Tensor& bias_data, bool transpose_a,
bool transpose_b, Tensor* output,
bool allow_gpu_device = false) {
Scope root = tensorflow::Scope::NewRootScope();
ops::MatMul matmul = ops::MatMul(
root.WithOpName("matmul"),
ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data)),
ops::Const(root.WithOpName("rhs"), Input::Initializer(rhs_data)),
ops::MatMul::Attrs().TransposeA(transpose_a).TransposeB(transpose_b));
ops::BiasAdd with_bias = ops::BiasAdd(
root.WithOpName("with_bias"), matmul,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
RunAndFetch(root, "with_bias", output, allow_gpu_device);
}
void RunMatMulWithBiasAndActivation(
const Tensor& lhs_data, const Tensor& rhs_data, const Tensor& bias_data,
bool transpose_a, bool transpose_b, const string& activation_type,
Tensor* output, bool allow_gpu_device = false) {
Scope root = tensorflow::Scope::NewRootScope();
ops::MatMul matmul = ops::MatMul(
root.WithOpName("matmul"),
ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data)),
ops::Const(root.WithOpName("rhs"), Input::Initializer(rhs_data)),
ops::MatMul::Attrs().TransposeA(transpose_a).TransposeB(transpose_b));
ops::BiasAdd with_bias = ops::BiasAdd(
root.WithOpName("with_bias"), matmul,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
if (activation_type == "Relu") {
ops::Relu(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Relu6") {
ops::Relu6(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Elu") {
ops::Elu(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "LeakyRelu") {
ops::internal::LeakyRelu(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "GeluExact") {
VLOG(0) << "ERROR: GeluExact is yet not available!!";
ops::Identity(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Sigmoid") {
ops::Sigmoid(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Tanh") {
ops::Tanh(root.WithOpName("with_activation"), with_bias);
} else {
ops::Identity(root.WithOpName("with_activation"), with_bias);
}
RunAndFetch(root, "with_activation", output, allow_gpu_device);
}
void RunFusedMatMulOp(const Tensor& lhs_data, const Tensor& rhs_data,
const std::vector<Tensor>& args_data,
const std::vector<string>& fused_ops, bool transpose_a,
bool transpose_b, Tensor* output,
bool allow_gpu_device = false,
bool* test_skipped = nullptr) {
Scope root = tensorflow::Scope::NewRootScope();
DataType dtype = DataTypeToEnum<T>::v();
int num_args = static_cast<int>(args_data.size());
Output lhs =
ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data));
Output rhs =
ops::Const(root.WithOpName("rhs"), Input::Initializer(rhs_data));
std::vector<NodeDefBuilder::NodeOut> args;
for (int i = 0; i < num_args; ++i) {
Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)),
Input::Initializer(args_data[i]));
args.emplace_back(arg.name(), 0, dtype);
}
NodeDef fused_matmul;
TF_EXPECT_OK(NodeDefBuilder("fused_matmul", "_FusedMatMul")
.Input({lhs.name(), 0, dtype})
.Input({rhs.name(), 0, dtype})
.Input(args)
.Attr("num_args", num_args)
.Attr("T", dtype)
.Attr("fused_ops", fused_ops)
.Attr("transpose_a", transpose_a)
.Attr("transpose_b", transpose_b)
.Finalize(&fused_matmul));
absl::Status last_status;
RunAndFetch(root, fused_matmul.name(), output, allow_gpu_device,
&fused_matmul, &last_status);
std::string what = "No algorithm worked!";
bool skip = absl::StrContains(last_status.message(), what);
if (test_skipped != nullptr) {
*test_skipped = skip;
}
if (skip) {
GTEST_SKIP() << what;
} else {
TF_ASSERT_OK(last_status);
}
}
void VerifyBiasAddTensorsNear(int m, int k, int n, bool transpose_a,
bool transpose_b,
const BiasAddGraphRunner& run_default,
const BiasAddGraphRunner& run_fused) {
DataType dtype = DataTypeToEnum<T>::v();
Tensor lhs(dtype, {transpose_a ? k : m, transpose_a ? m : k});
lhs.flat<T>() = lhs.flat<T>().setRandom();
Tensor rhs(dtype, {transpose_b ? n : k, transpose_b ? k : n});
rhs.flat<T>() = rhs.flat<T>().setRandom();
rhs.flat<T>() -= rhs.flat<T>().constant(static_cast<T>(0.5f));
const int bias_size = n;
Tensor bias(dtype, {bias_size});
bias.flat<T>() = bias.flat<T>().setRandom();
bias.flat<T>() += bias.flat<T>().constant(static_cast<T>(0.5f));
Tensor matmul;
Tensor fused_matmul;
run_default(lhs, rhs, bias, &matmul);
bool skipped = run_fused(lhs, rhs, bias, &fused_matmul);
if (!skipped) {
ASSERT_EQ(matmul.dtype(), fused_matmul.dtype());
ASSERT_EQ(matmul.shape(), fused_matmul.shape());
double atol = this->kTValueType == DT_HALF ? 1e-3 : 1e-5;
double rtol = this->kTValueType == DT_HALF ? 1e-3 : -1.0;
test::ExpectClose(matmul, fused_matmul, atol, rtol);
}
}
void VerifyMatMulWithBias(int m, int k, int n, bool transpose_a,
bool transpose_b) {
VLOG(2) << "=== VerifyMatMulWithBias (" << m << ", " << k << ", " << n
<< ", " << (int)transpose_a << ", " << (int)transpose_b << ") ===";
const BiasAddGraphRunner run_default =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
RunMatMulWithBias(input_data, filter_data, bias_data, transpose_a,
transpose_b, out, true);
return false;
};
const BiasAddGraphRunner run_fused =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
bool skipped = false;
RunFusedMatMulOp(input_data, filter_data, {bias_data}, {"BiasAdd"},
transpose_a, transpose_b, out,
true, &skipped);
return skipped;
};
VerifyBiasAddTensorsNear(m, k, n, transpose_a, transpose_b, run_default,
run_fused);
}
void VerifyConv2DWithBiasAndActivation(int m, int k, int n, bool transpose_a,
bool transpose_b,
const string& activation) {
bool use_gpu_device =
activation == "Relu" || (this->kTValueType == DT_HALF);
const BiasAddGraphRunner run_default =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
RunMatMulWithBiasAndActivation(input_data, filter_data, bias_data,
transpose_a, transpose_b, activation,
out, use_gpu_device);
return false;
};
const BiasAddGraphRunner run_fused =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
bool skipped = false;
RunFusedMatMulOp(input_data, filter_data, {bias_data},
{"BiasAdd", activation}, transpose_a, transpose_b,
out, use_gpu_device, &skipped);
return skipped;
};
VerifyBiasAddTensorsNear(m, k, n, transpose_a, transpose_b, run_default,
run_fused);
}
};
template <typename T>
class FusedMatMulWithBiasOpTest : public FusedMatMulOpTest<T> {};
TYPED_TEST_SUITE_P(FusedMatMulWithBiasOpTest);
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x128x64) {
this->VerifyMatMulWithBias(256, 128, 64, false, false);
this->VerifyMatMulWithBias(256, 128, 64, true, false);
this->VerifyMatMulWithBias(256, 128, 64, false, true);
this->VerifyMatMulWithBias(256, 128, 64, true, true);
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x256) {
this->VerifyMatMulWithBias(1, 256, 256, false, false);
this->VerifyMatMulWithBias(1, 256, 256, true, false);
this->VerifyMatMulWithBias(1, 256, 256, false, true);
this->VerifyMatMulWithBias(1, 256, 256, true, true);
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x256x1) {
this->VerifyMatMulWithBias(256, 256, 1, false, false);
this->VerifyMatMulWithBias(256, 256, 1, true, false);
this->VerifyMatMulWithBias(256, 256, 1, false, true);
this->VerifyMatMulWithBias(256, 256, 1, true, true);
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x1) {
this->VerifyMatMulWithBias(1, 256, 1, false, false);
}
static auto GetActivations(DataType dtype) {
switch (dtype) {
case DT_HALF:
return std::vector{ "Tanh", "Sigmoid"};
default:
return std::vector{"Relu", "Relu6", "Elu", "LeakyRelu"};
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x128x64WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, false, false,
activation);
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, true, false,
activation);
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, false, true,
activation);
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, true, true,
activation);
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x256WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(1, 256, 256, false, false,
activation);
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x256x1WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(256, 256, 1, false, false,
activation);
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x1WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(1, 256, 1, false, false,
activation);
}
}
REGISTER_TYPED_TEST_SUITE_P(FusedMatMulWithBiasOpTest,
MatMul256x128x64,
MatMul1x256x256,
MatMul256x256x1,
MatMul1x256x1,
MatMul256x128x64WithActivation,
MatMul1x256x256WithActivation,
MatMul256x256x1WithActivation,
MatMul1x256x1WithActivation);
using FusedBiasAddDataTypes = ::testing::Types<float, Eigen::half>;
INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedMatMulWithBiasOpTest,
FusedBiasAddDataTypes);
template <typename T>
static Graph* Matmul(int m, int k, int n, bool transpose_a, bool transpose_b,
DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, transpose_a ? TensorShape({k, m}) : TensorShape({m, k}));
in0.flat<T>().setRandom();
Tensor in1(type, transpose_b ? TensorShape({n, k}) : TensorShape({k, n}));
in1.flat<T>().setRandom();
test::graph::Matmul(g, test::graph::Constant(g, in0),
test::graph::Constant(g, in1), transpose_a, transpose_b);
return g;
}
#define BM_MatmulDev(M, K, N, TA, TB, T, TFTYPE, DEVICE) \
static void BM_Matmul##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, Matmul<T>(M, K, N, TA, TB, TFTYPE)).Run(state); \
state.SetItemsProcessed(state.iterations() * M * K * N * 2); \
} \
BENCHMARK(BM_Matmul##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE) \
->MeasureProcessCPUTime();
#ifdef GOOGLE_CUDA
#define BM_Matmul(M, K, N, TA, TB) \
BM_MatmulDev(M, K, N, TA, TB, float, DT_FLOAT, cpu); \
BM_MatmulDev(M, K, N, TA, TB, std::complex<float>, DT_COMPLEX64, cpu); \
BM_MatmulDev(M, K, N, TA, TB, float, DT_FLOAT, gpu); \
BM_MatmulDev(M, K, N, TA, TB, std::complex<float>, DT_COMPLEX64, gpu); \
\
#else
#define BM_Matmul(M, K, N, TA, TB) \
BM_MatmulDev(M, K, N, TA, TB, float, DT_FLOAT, cpu); \
BM_MatmulDev(M, K, N, TA, TB, std::complex<float>, DT_COMPLEX64, cpu);
#endif
BM_Matmul(1, 512, 512, false, false);
BM_Matmul(8, 512, 512, false, false);
BM_Matmul(16, 512, 512, false, false);
BM_Matmul(128, 512, 512, false, false);
BM_Matmul(1, 1024, 1024, false, false);
BM_Matmul(8, 1024, 1024, false, false);
BM_Matmul(16, 1024, 1024, false, false);
BM_Matmul(128, 1024, 1024, false, false);
BM_Matmul(4096, 4096, 4096, false, false);
BM_Matmul(1, 1024, 1024, false, true);
BM_Matmul(8, 1024, 1024, false, true);
BM_Matmul(16, 1024, 1024, false, true);
BM_Matmul(128, 1024, 1024, false, true);
BM_Matmul(1, 200, 10000, false, false);
BM_Matmul(8, 200, 10000, false, false);
BM_Matmul(20, 200, 10000, false, false);
BM_Matmul(20, 200, 20000, false, false);
BM_Matmul(1, 10000, 200, false, true);
BM_Matmul(1, 10000, 200, false, false);
BM_Matmul(8, 10000, 200, false, true);
BM_Matmul(20, 10000, 200, false, true);
BM_Matmul(20, 20000, 200, false, true);
BM_Matmul(50, 50, 1, false, false);
BM_Matmul(50, 50, 1, true, false);
BM_Matmul(50, 50, 1, false, true);
BM_Matmul(50, 50, 1, true, true);
BM_Matmul(500, 500, 1, false, false);
BM_Matmul(500, 500, 1, true, false);
BM_Matmul(500, 500, 1, false, true);
BM_Matmul(500, 500, 1, true, true);
BM_Matmul(2000, 2000, 1, false, false);
BM_Matmul(2000, 2000, 1, true, false);
BM_Matmul(2000, 2000, 1, false, true);
BM_Matmul(2000, 2000, 1, true, true);
BM_Matmul(1, 50, 50, false, false);
BM_Matmul(1, 50, 50, true, false);
BM_Matmul(1, 50, 50, false, true);
BM_Matmul(1, 50, 50, true, true);
BM_Matmul(1, 500, 500, false, false);
BM_Matmul(1, 500, 500, true, false);
BM_Matmul(1, 500, 500, false, true);
BM_Matmul(1, 500, 500, true, true);
BM_Matmul(1, 2000, 2000, false, false);
BM_Matmul(1, 2000, 2000, true, false);
BM_Matmul(1, 2000, 2000, false, true);
BM_Matmul(1, 2000, 2000, true, true);
BM_Matmul(50, 1, 50, false, false);
BM_Matmul(50, 1, 50, true, false);
BM_Matmul(50, 1, 50, false, true);
BM_Matmul(50, 1, 50, true, true);
BM_Matmul(500, 1, 500, false, false);
BM_Matmul(500, 1, 500, true, false);
BM_Matmul(500, 1, 500, false, true);
BM_Matmul(500, 1, 500, true, true);
BM_Matmul(2000, 1, 2000, false, false);
BM_Matmul(2000, 1, 2000, true, false);
BM_Matmul(2000, 1, 2000, false, true);
BM_Matmul(2000, 1, 2000, true, true);
Node* BroadcastTo(Graph* g, Node* input, Node* shape) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BroadcastTo")
.Input(input)
.Input(shape)
.Finalize(g, &ret));
return ret;
}
Node* BatchMatmulV2(Graph* g, Node* in0, Node* in1, bool adj_x, bool adj_y) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BatchMatMulV2")
.Input(in0)
.Input(in1)
.Attr("adj_x", adj_x)
.Attr("adj_y", adj_y)
.Finalize(g, &ret));
return ret;
}
template <typename T>
static Graph* BatchMatmul(int b, int m, int k, int n, bool adjoint_a,
bool adjoint_b, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, adjoint_a ? TensorShape({b, k, m}) : TensorShape({b, m, k}));
in0.flat<T>().setRandom();
Tensor in1(type, adjoint_b ? TensorShape({b, n, k}) : TensorShape({b, k, n}));
in1.flat<T>().setRandom();
test::graph::BatchMatmul(g, test::graph::Constant(g, in0),
test::graph::Constant(g, in1), adjoint_a, adjoint_b);
return g;
}
template <typename T>
static Graph* BatchMatmulWithBroadcast(int b0, int b1, int m, int k, int n,
bool manual_broadcast, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, TensorShape({b0, m, k}));
in0.flat<T>().setRandom();
Tensor in1(type, TensorShape({b1, k, n}));
in1.flat<T>().setRandom();
Tensor broadcasted_in0_shape(DT_INT64, TensorShape({3}));
Tensor broadcasted_in1_shape(DT_INT64, TensorShape({3}));
Node* in0_node = nullptr;
Node* in1_node = nullptr;
if (manual_broadcast) {
for (int i = 0; i < 3; ++i) {
auto vec0 = broadcasted_in0_shape.vec<int64_t>();
auto vec1 = broadcasted_in1_shape.vec<int64_t>();
vec0(i) = (i == 0 ? std::max(b0, b1) : in0.shape().dim_size(i));
vec1(i) = (i == 0 ? std::max(b0, b1) : in1.shape().dim_size(i));
}
in0_node = BroadcastTo(g, test::graph::Constant(g, in0),
test::graph::Constant(g, broadcasted_in0_shape));
in1_node = BroadcastTo(g, test::graph::Constant(g, in1),
test::graph::Constant(g, broadcasted_in1_shape));
} else {
in0_node = test::graph::Constant(g, in0);
in1_node = test::graph::Constant(g, in1);
}
BatchMatmulV2(g, in0_node, in1_node, false, false);
return g;
}
#define BM_BatchMatmulDev(B, M, K, N, TA, TB, T, TFTYPE, DEVICE) \
static void \
BM_BatchMatmul##_##B##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, BatchMatmul<T>(B, M, K, N, TA, TB, TFTYPE), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * M * K * N * 2); \
} \
BENCHMARK( \
BM_BatchMatmul##_##B##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE) \
->MeasureProcessCPUTime();
#define BM_BatchMatmul(B, M, K, N, TA, TB) \
BM_BatchMatmulDev(B, M, K, N, TA, TB, float, DT_FLOAT, cpu);
#define BM_BatchMatmulBCastDev(B1, B2, M, K, N, MB, T, TT, D) \
static void \
BM_BatchMatmulBCast##_##B1##_##B2##_##M##_##K##_##N##_##MB##_##TT##_##D( \
::testing::benchmark::State& state) { \
test::Benchmark(#D, BatchMatmulWithBroadcast<T>(B1, B2, M, K, N, MB, TT), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * std::max(B1, B2) * M * K * \
N * 2); \
} \
BENCHMARK( \
BM_BatchMatmulBCast##_##B1##_##B2##_##M##_##K##_##N##_##MB##_##TT##_##D) \
->MeasureProcessCPUTime();
#define BM_BatchMatmulBCast(B1, B2, M, K, N, MB) \
BM_BatchMatmulBCastDev(B1, B2, M, K, N, MB, float, DT_FLOAT, cpu);
BM_BatchMatmulBCast(1, 128, 1, 1024, 1024, true);
BM_BatchMatmulBCast(1, 128, 1, 1024, 1024, false);
BM_BatchMatmulBCast(128, 1, 1, 1024, 1024, true);
BM_BatchMatmulBCast(128, 1, 1, 1024, 1024, false);
BM_BatchMatmulBCast(1, 128, 128, 1024, 1024, true);
BM_BatchMatmulBCast(1, 128, 128, 1024, 1024, false);
BM_BatchMatmulBCast(128, 1, 128, 1024, 1024, true);
BM_BatchMatmulBCast(128, 1, 128, 1024, 1024, false);
BM_BatchMatmulBCast(1, 128, 512, 512, 512, true);
BM_BatchMatmulBCast(1, 128, 512, 512, 512, false);
BM_BatchMatmulBCast(128, 1, 512, 512, 512, true);
BM_BatchMatmulBCast(128, 1, 512, 512, 512, false);
BM_BatchMatmulBCast(1, 128, 1024, 1024, 1024, true);
BM_BatchMatmulBCast(1, 128, 1024, 1024, 1024, false);
BM_BatchMatmulBCast(128, 1, 1024, 1024, 1024, true);
BM_BatchMatmulBCast(128, 1, 1024, 1024, 1024, false);
BM_BatchMatmulBCast(1, 128, 10000, 200, 1, true);
BM_BatchMatmulBCast(1, 128, 10000, 200, 1, false);
BM_BatchMatmulBCast(128, 1, 10000, 200, 1, true);
BM_BatchMatmulBCast(128, 1, 10000, 200, 1, false);
BM_BatchMatmulBCast(1, 128, 1, 200, 10000, true);
BM_BatchMatmulBCast(1, 128, 1, 200, 10000, false);
BM_BatchMatmulBCast(128, 1, 1, 200, 10000, true);
BM_BatchMatmulBCast(128, 1, 1, 200, 10000, false);
BM_BatchMatmul(1, 1, 1024, 1024, false, false);
BM_BatchMatmul(1, 8, 1024, 1024, false, false);
BM_BatchMatmul(1, 16, 1024, 1024, false, false);
BM_BatchMatmul(1, 128, 1024, 1024, false, false);
BM_BatchMatmul(2, 1, 1024, 1024, false, false);
BM_BatchMatmul(2, 8, 1024, 1024, false, false);
BM_BatchMatmul(2, 16, 1024, 1024, false, false);
BM_BatchMatmul(2, 128, 1024, 1024, false, false);
BM_BatchMatmul(8, 1, 1024, 1024, false, false);
BM_BatchMatmul(8, 8, 1024, 1024, false, false);
BM_BatchMatmul(8, 16, 1024, 1024, false, false);
BM_BatchMatmul(8, 128, 1024, 1024, false, false);
BM_BatchMatmul(32, 1, 1024, 1024, false, false);
BM_BatchMatmul(32, 8, 1024, 1024, false, false);
BM_BatchMatmul(32, 16, 1024, 1024, false, false);
BM_BatchMatmul(32, 128, 1024, 1024, false, false);
BM_BatchMatmul(1, 32, 32, 32, false, false);
BM_BatchMatmul(1, 128, 128, 128, false, false);
BM_BatchMatmul(1, 256, 256, 256, false, false);
BM_BatchMatmul(1, 1024, 1024, 1024, false, false);
BM_BatchMatmul(1, 2048, 2048, 2048, false, false);
BM_BatchMatmul(2, 32, 32, 32, false, false);
BM_BatchMatmul(2, 128, 128, 128, false, false);
BM_BatchMatmul(2, 256, 256, 256, false, false);
BM_BatchMatmul(2, 1024, 1024, 1024, false, false);
BM_BatchMatmul(2, 2048, 2048, 2048, false, false);
BM_BatchMatmul(4, 32, 32, 32, false, false);
BM_BatchMatmul(4, 128, 128, 128, false, false);
BM_BatchMatmul(4, 256, 256, 256, false, false);
BM_BatchMatmul(4, 1024, 1024, 1024, false, false);
BM_BatchMatmul(4, 2048, 2048, 2048, false, false);
BM_BatchMatmul(8, 32, 32, 32, false, false);
BM_BatchMatmul(8, 128, 128, 128, false, false);
BM_BatchMatmul(8, 256, 256, 256, false, false);
BM_BatchMatmul(8, 1024, 1024, 1024, false, false);
BM_BatchMatmul(8, 2048, 2048, 2048, false, false);
BM_BatchMatmul(32, 32, 32, 32, false, false);
BM_BatchMatmul(32, 128, 128, 128, false, false);
BM_BatchMatmul(32, 256, 256, 256, false, false);
BM_BatchMatmul(32, 1024, 1024, 1024, false, false);
BM_BatchMatmul(32, 2048, 2048, 2048, false, false);
BM_BatchMatmul(1, 10000, 200, 1, false, false);
BM_BatchMatmul(8, 10000, 200, 1, false, false);
BM_BatchMatmul(32, 10000, 200, 1, false, false);
BM_BatchMatmul(1, 10000, 200, 1, true, false);
BM_BatchMatmul(8, 10000, 200, 1, true, false);
BM_BatchMatmul(32, 10000, 200, 1, true, false);
BM_BatchMatmul(1, 10000, 200, 1, false, true);
BM_BatchMatmul(8, 10000, 200, 1, false, true);
BM_BatchMatmul(32, 10000, 200, 1, false, true);
BM_BatchMatmul(1, 10000, 200, 1, true, true);
BM_BatchMatmul(8, 10000, 200, 1, true, true);
BM_BatchMatmul(32, 10000, 200, 1, true, true);
BM_BatchMatmul(1, 1, 200, 10000, false, false);
BM_BatchMatmul(8, 1, 200, 10000, false, false);
BM_BatchMatmul(32, 1, 200, 10000, false, false);
BM_BatchMatmul(1, 1, 200, 10000, true, false);
BM_BatchMatmul(8, 1, 200, 10000, true, false);
BM_BatchMatmul(32, 1, 200, 10000, true, false);
BM_BatchMatmul(1, 1, 200, 10000, false, true);
BM_BatchMatmul(8, 1, 200, 10000, false, true);
BM_BatchMatmul(32, 1, 200, 10000, false, true);
BM_BatchMatmul(1, 1, 200, 10000, true, true);
BM_BatchMatmul(8, 1, 200, 10000, true, true);
BM_BatchMatmul(32, 1, 200, 10000, true, true);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/matmul_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/matmul_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f02a4423-d7f6-48a3-99fa-ecbd4b421227 | cpp | tensorflow/tensorflow | cwise_ops | tensorflow/compiler/tf2xla/kernels/cwise_ops.cc | tensorflow/core/kernels/cwise_ops_test.cc | #include "tensorflow/compiler/tf2xla/kernels/cwise_ops.h"
#include <algorithm>
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/tf2xla/lib/broadcast.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
void XlaBinaryOp::Compile(XlaOpKernelContext* ctx) {
TensorShape lhs_shape = ctx->InputShape(0);
TensorShape rhs_shape = ctx->InputShape(1);
xla::Shape lhs_xla_shape = ctx->InputXlaShape(0).value();
xla::Shape rhs_xla_shape = ctx->InputXlaShape(1).value();
auto lhs_handle = ctx->Input(0);
auto rhs_handle = ctx->Input(1);
if (lhs_shape.dims() == rhs_shape.dims()) {
auto reconcile_tensor_mismatched_dims = [ctx](
xla::XlaOp lhs, xla::XlaOp rhs,
const xla::Shape& lhs_xla_shape,
const xla::Shape& rhs_xla_shape,
TensorShape* lhs_tensor_shape) {
for (int64_t i = 0; i < lhs_xla_shape.rank(); ++i) {
if (lhs_xla_shape.is_dynamic_dimension(i)) {
if (!rhs_xla_shape.is_dynamic_dimension(i) &&
lhs_xla_shape.dimensions(i) > rhs_xla_shape.dimensions(i) &&
rhs_xla_shape.dimensions(i) != 1) {
auto size = xla::GetDimensionSize(lhs, i);
lhs = xla::SliceInDim(lhs, 0, rhs_xla_shape.dimensions(i), 1,
i);
lhs_tensor_shape->set_dim(i, rhs_xla_shape.dimensions(i));
lhs = xla::SetDimensionSize(lhs, size, i);
}
if (rhs_xla_shape.is_dynamic_dimension(i) &&
lhs_xla_shape.dimensions(i) < rhs_xla_shape.dimensions(i) &&
rhs_xla_shape.dimensions(i) != 1 &&
lhs_xla_shape.dimensions(i) != 1) {
auto size = xla::GetDimensionSize(lhs, i);
int64_t diff =
rhs_xla_shape.dimensions(i) - lhs_xla_shape.dimensions(i);
lhs = xla::PadInDim(
lhs, xla::Zero(ctx->builder(), lhs_xla_shape.element_type()), i,
0, diff);
lhs_tensor_shape->set_dim(i, rhs_xla_shape.dimensions(i));
lhs = xla::SetDimensionSize(lhs, size, i);
}
if (lhs_xla_shape.dimensions(i) == 1 &&
rhs_xla_shape.dimensions(i) != 1) {
auto size = xla::GetDimensionSize(lhs, i);
lhs = xla::RemoveDynamicDimension(lhs, i);
std::vector<int64_t> dimensions(lhs_xla_shape.dimensions().begin(),
lhs_xla_shape.dimensions().end());
dimensions[i] = rhs_xla_shape.dimensions(i);
std::vector<int64_t> broadcast_dimensions(lhs_xla_shape.rank());
absl::c_iota(broadcast_dimensions, 0);
lhs = xla::BroadcastInDim(lhs, dimensions, broadcast_dimensions);
xla::XlaOp rhs_size;
if (rhs_xla_shape.is_dynamic_dimension(i)) {
rhs_size = xla::GetDimensionSize(rhs, i);
} else {
rhs_size = xla::ConstantR0<int32_t>(lhs.builder(),
rhs_xla_shape.dimensions(i));
}
size = xla::Mul(size, rhs_size);
lhs = xla::SetDimensionSize(lhs, size, i);
lhs_tensor_shape->set_dim(i, rhs_xla_shape.dimensions(i));
}
}
}
return lhs;
};
lhs_handle = reconcile_tensor_mismatched_dims(
lhs_handle, rhs_handle, lhs_xla_shape, rhs_xla_shape, &lhs_shape);
rhs_handle = reconcile_tensor_mismatched_dims(
rhs_handle, lhs_handle, rhs_xla_shape, lhs_xla_shape, &rhs_shape);
}
BCast bcast(BCast::FromShape(lhs_shape), BCast::FromShape(rhs_shape),
false);
if (!bcast.IsValid()) {
ctx->SetStatus(absl::InvalidArgumentError(
absl::StrCat("Incompatible shapes: ", lhs_shape.DebugString(), " vs. ",
rhs_shape.DebugString())));
return;
}
std::vector<int64_t> extend_dimension;
int max_rank = std::max(lhs_shape.dims(), rhs_shape.dims());
int min_rank = std::min(lhs_shape.dims(), rhs_shape.dims());
if (min_rank != max_rank) {
for (int i = 0; i < min_rank; ++i) {
extend_dimension.push_back(max_rank - min_rank + i);
}
}
xla::XlaOp output =
Computation(ctx, lhs_handle, lhs_shape.dim_sizes(), rhs_handle,
rhs_shape.dim_sizes(), bcast, extend_dimension);
ctx->SetOutput(0, output);
}
std::pair<xla::XlaOp, xla::XlaOp> XlaBinaryOp::Broadcast(
xla::XlaOp lhs, xla::XlaOp rhs, const BCast& broadcast_helper) {
auto lhs_output = BroadcastTo(lhs, broadcast_helper.output_shape());
if (!lhs_output.ok()) {
xla::XlaOp error = lhs.builder()->ReportError(lhs_output.status());
return {error, error};
}
auto rhs_output = BroadcastTo(rhs, broadcast_helper.output_shape());
if (!rhs_output.ok()) {
xla::XlaOp error = rhs.builder()->ReportError(rhs_output.status());
return {error, error};
}
return {lhs_output.value(), rhs_output.value()};
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
namespace {
template <typename T>
static Graph* Unary(const string& func, int num, DataType dtype) {
Graph* g = new Graph(OpRegistry::Global());
Tensor data(dtype, TensorShape({64, 64, num / (64 * 64)}));
CHECK_GT(data.NumElements(), 0);
data.flat<T>().setRandom();
test::graph::Unary(g, func, test::graph::Constant(g, data), 0);
return g;
}
const int kRows = 100000;
int RowsAndColsArg(int r, int c) { return r * kRows + c; }
int RowsFromArg(int arg) { return (arg / kRows); }
int ColsFromArg(int arg) { return (arg % kRows); }
#define BM_UNARY(DEVICE, FUNC, T, TYPE) \
void BM_##DEVICE##_##FUNC##_##TYPE(::testing::benchmark::State& state) { \
const int num = state.range(0); \
test::Benchmark(#DEVICE, Unary<T>(#FUNC, num, TYPE), \
false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * num; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(T)); \
} \
BENCHMARK(BM_##DEVICE##_##FUNC##_##TYPE) \
->UseRealTime() \
->Range(4 << 10, 1 << 20);
BM_UNARY(cpu, LeakyRelu, float, DT_FLOAT);
BM_UNARY(cpu, LeakyRelu, bfloat16, DT_BFLOAT16);
BM_UNARY(cpu, Floor, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Floor, float, DT_FLOAT);
#endif
BM_UNARY(cpu, Floor, double, DT_DOUBLE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Floor, double, DT_DOUBLE);
#endif
BM_UNARY(cpu, Conj, std::complex<float>, DT_COMPLEX64);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Conj, std::complex<float>, DT_COMPLEX64);
#endif
BM_UNARY(cpu, Conj, std::complex<double>, DT_COMPLEX128);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Conj, std::complex<double>, DT_COMPLEX128);
#endif
BM_UNARY(cpu, Rint, double, DT_DOUBLE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Rint, double, DT_DOUBLE);
#endif
BM_UNARY(cpu, Rint, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Rint, float, DT_FLOAT);
#endif
BM_UNARY(cpu, Round, double, DT_DOUBLE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Round, double, DT_DOUBLE);
#endif
BM_UNARY(cpu, Round, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Round, float, DT_FLOAT);
#endif
Graph* BinaryScalar(int num, const string& func) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
Tensor rhs(DT_FLOAT, TensorShape({}));
rhs.flat<float>().setRandom();
test::graph::Binary(g, func, test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
#define BM_BINARY_SCALAR(DEVICE, FUNC) \
void BM_##DEVICE##_##FUNC##_scalar(::testing::benchmark::State& state) { \
const int num = state.range(0); \
\
test::Benchmark(#DEVICE, BinaryScalar(num, #FUNC), \
false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * num; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_##FUNC##_scalar) \
->Arg(1 << 12) \
->Arg(1 << 13) \
->Arg(1 << 14) \
->Arg((1 << 15) - (1 << 13)) \
->Arg(1 << 15) \
->Arg((1 << 15) + (1 << 14)) \
->Arg(1 << 16) \
->Arg((1 << 17) - (1 << 15)) \
->Arg(1 << 17) \
->Arg((1 << 17) + (1 << 16)) \
->Arg(1 << 18) \
->Arg(1 << 19) \
->Arg(1 << 20);
BM_BINARY_SCALAR(cpu, Less);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BINARY_SCALAR(gpu, Less);
#endif
BM_BINARY_SCALAR(cpu, Add);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BINARY_SCALAR(gpu, Add);
#endif
BM_BINARY_SCALAR(cpu, DivNoNan);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BINARY_SCALAR(gpu, DivNoNan);
#endif
#undef BM_BINARY_SCALAR
Graph* CubeWithPow3(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
Tensor rhs(DT_FLOAT, TensorShape({}));
rhs.flat<float>().setConstant(3);
test::graph::Binary(g, "Pow", test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
Graph* CubeWithTwoMuls(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
auto* x = test::graph::Constant(g, lhs);
auto* inner = test::graph::Binary(g, "Mul", x, x);
test::graph::Binary(g, "Mul", x, inner);
return g;
}
Graph* CubeWithMulSquare(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
auto* x = test::graph::Constant(g, lhs);
auto* inner = test::graph::Unary(g, "Square", x);
test::graph::Binary(g, "Mul", test::graph::Constant(g, lhs), inner);
return g;
}
#define BM_CUBE(DEVICE, Impl) \
void BM_##DEVICE##_Cube_##Impl(::testing::benchmark::State& state) { \
const int num = state.range(0); \
\
test::Benchmark(#DEVICE, Impl(num), false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * num; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_Cube_##Impl) \
->UseRealTime() \
->Arg(1 << 12) \
->Arg(1 << 16) \
->Arg(1 << 20);
BM_CUBE(cpu, CubeWithPow3);
BM_CUBE(cpu, CubeWithTwoMuls);
BM_CUBE(cpu, CubeWithMulSquare);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_CUBE(gpu, CubeWithPow3);
BM_CUBE(gpu, CubeWithTwoMuls);
BM_CUBE(gpu, CubeWithMulSquare);
#endif
#undef BM_CUBE
template <class T>
Graph* BiasAdd(int rows, int cols, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(type, TensorShape({rows, cols}));
lhs.template flat<T>().setRandom();
TensorShape rhs_shape;
rhs_shape = TensorShape({cols});
Tensor rhs(type, rhs_shape);
rhs.template flat<T>().setRandom();
test::graph::Binary(g, "BiasAdd", test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
#define BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, R, C) \
void BM_##DEVICE##_##C_TYPE##_BiasAdd_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
test::Benchmark(#DEVICE, BiasAdd<C_TYPE>(rows, cols, TF_TYPE), \
false) \
.Run(state); \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_##DEVICE##_##C_TYPE##_BiasAdd_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BIAS_ADD_ALL(DEVICE, C_TYPE, TF_TYPE) \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 512, 2048); \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 512, 4096); \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 2048, 512); \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 4096, 512);
using Eigen::half;
BM_BIAS_ADD_ALL(cpu, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_ALL(gpu, float, DT_FLOAT);
#endif
BM_BIAS_ADD_ALL(cpu, half, DT_HALF);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_ALL(gpu, half, DT_HALF);
#endif
#undef BM_BIAS_ADD_ALL
#undef BM_BIAS_ADD
template <class T>
Graph* BiasAddGrad(int rows, int cols, int channels, DataType type,
TensorFormat format) {
Graph* g = new Graph(OpRegistry::Global());
TensorShape lhs_shape;
if (format == FORMAT_NCHW) {
lhs_shape = TensorShape({channels, rows, cols});
} else {
lhs_shape = TensorShape({rows, cols, channels});
}
Tensor lhs(type, lhs_shape);
lhs.template flat<T>().setRandom();
Node* n;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BiasAddGrad")
.Attr("data_format", ToString(format))
.Input(test::graph::Constant(g, lhs), 0)
.Finalize(g, &n));
return g;
}
#define BM_BIAS_ADD_GRAD(DEVICE, FMT, C_TYPE, TF_TYPE, R, C, CH) \
void BM_##DEVICE##_##FMT##_##C_TYPE##_BiasAddGrad_R##R##_C##C##_CH##CH( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
const int channels = state.range(1); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark( \
#DEVICE, \
BiasAddGrad<C_TYPE>(rows, cols, channels, TF_TYPE, FORMAT_##FMT), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols * channels; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_##DEVICE##_##FMT##_##C_TYPE##_BiasAddGrad_R##R##_C##C##_CH##CH) \
->ArgPair(RowsAndColsArg(R, C), CH);
#define BM_BIAS_ADD_GRAD_ALL(DEVICE, FORMAT, C_TYPE, TF_TYPE) \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 64, 64, 64); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 512, 512, 4); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 512, 512, 1); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 4096, 4096, 4); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 4096, 4096, 1);
using Eigen::half;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_GRAD_ALL(gpu, NCHW, float, DT_FLOAT);
BM_BIAS_ADD_GRAD_ALL(gpu, NCHW, half, DT_HALF);
#endif
BM_BIAS_ADD_GRAD_ALL(cpu, NHWC, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_GRAD_ALL(gpu, NHWC, float, DT_FLOAT);
#endif
BM_BIAS_ADD_GRAD_ALL(cpu, NHWC, half, DT_HALF);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_GRAD_ALL(gpu, NHWC, half, DT_HALF);
#endif
#undef BM_BIAS_ADD_GRAD_ALL
#undef BM_BIAS_ADD_GRAD
Graph* BcastAdd(int rows, int cols, int dim) {
Graph* g = new Graph(OpRegistry::Global());
TensorShape lhs_shape, rhs_shape;
if (dim == 0) {
lhs_shape = TensorShape({rows, cols});
rhs_shape = TensorShape({rows, 1});
} else if (dim == 1) {
lhs_shape = TensorShape({rows, cols});
rhs_shape = TensorShape({cols});
} else if (dim == 2) {
lhs_shape = TensorShape({rows, 1});
rhs_shape = TensorShape({1, cols});
} else {
lhs_shape = TensorShape({1, cols});
rhs_shape = TensorShape({rows, 1});
}
Tensor lhs(DT_FLOAT, lhs_shape);
lhs.flat<float>().setRandom();
Tensor rhs(DT_FLOAT, rhs_shape);
rhs.flat<float>().setRandom();
test::graph::Binary(g, "Add", test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
#define BM_BCAST_ADD_ROW(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddRow_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 0), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddRow_R##R##_C##C)->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_ROW_ALL(DEVICE) \
BM_BCAST_ADD_ROW(DEVICE, 512, 2048); \
BM_BCAST_ADD_ROW(DEVICE, 512, 4096); \
BM_BCAST_ADD_ROW(DEVICE, 2048, 512); \
BM_BCAST_ADD_ROW(DEVICE, 4096, 512);
BM_BCAST_ADD_ROW_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_ROW_ALL(gpu);
#endif
#undef BM_BCAST_ADD_ROW_ALL
#undef BM_BCAST_ADD_ROW
#define BM_BCAST_ADD_COL(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddCol_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 1), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
\
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddCol_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_COL_ALL(DEVICE) \
BM_BCAST_ADD_COL(DEVICE, 512, 2048); \
BM_BCAST_ADD_COL(DEVICE, 512, 4096); \
BM_BCAST_ADD_COL(DEVICE, 2048, 512); \
BM_BCAST_ADD_COL(DEVICE, 4096, 512);
BM_BCAST_ADD_COL_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_COL_ALL(gpu);
#endif
#undef BM_BCAST_ADD_COL_ALL
#undef BM_BCAST_ADD_COL
#define BM_BCAST_ADD_CROSS_RC(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddCrossRC_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 2), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
\
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddCrossRC_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_CROSS_RC_ALL(DEVICE) \
BM_BCAST_ADD_CROSS_RC(DEVICE, 512, 2048); \
BM_BCAST_ADD_CROSS_RC(DEVICE, 512, 4096); \
BM_BCAST_ADD_CROSS_RC(DEVICE, 2048, 512); \
BM_BCAST_ADD_CROSS_RC(DEVICE, 4096, 512);
BM_BCAST_ADD_CROSS_RC_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_CROSS_RC_ALL(gpu);
#endif
#undef BM_BCAST_ADD_CROSS_RC_ALL
#undef BM_BCAST_ADD_CROSS_RC
#define BM_BCAST_ADD_CROSS_CR(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddCrossCR_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 3), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddCrossCR_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_CROSS_CR_ALL(DEVICE) \
BM_BCAST_ADD_CROSS_CR(DEVICE, 512, 2048); \
BM_BCAST_ADD_CROSS_CR(DEVICE, 512, 4096); \
BM_BCAST_ADD_CROSS_CR(DEVICE, 2048, 512); \
BM_BCAST_ADD_CROSS_CR(DEVICE, 4096, 512);
BM_BCAST_ADD_CROSS_CR_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_CROSS_CR_ALL(gpu);
#endif
#undef BM_BCAST_ADD_CROSS_CR_ALL
#undef BM_BCAST_ADD_CROSS_CR
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/cwise_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/cwise_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bcf5ff3d-be74-493f-a8b0-af3125c467b6 | cpp | tensorflow/tensorflow | matrix_triangular_solve_op | tensorflow/compiler/tf2xla/kernels/matrix_triangular_solve_op.cc | tensorflow/core/kernels/linalg/matrix_triangular_solve_op_test.cc | #include <tuple>
#include <utility>
#include "tensorflow/compiler/tf2xla/lib/broadcast.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/util/matmul_bcast.h"
namespace tensorflow {
namespace {
class MatrixTriangularSolveOp : public XlaOpKernel {
public:
explicit MatrixTriangularSolveOp(OpKernelConstruction* ctx)
: XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("lower", &lower_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("adjoint", &adjoint_));
}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape lhs_shape = ctx->InputShape(0);
const TensorShape rhs_shape = ctx->InputShape(1);
MatMulBCast bcast(BCast::FromShape(lhs_shape), BCast::FromShape(rhs_shape));
if (!bcast.IsValid()) {
ctx->SetStatus(errors::InvalidArgument(
"Incompatible shapes: ", lhs_shape.DebugString(), " vs. ",
rhs_shape.DebugString()));
return;
}
auto lhs_size = lhs_shape.dims();
OP_REQUIRES(
ctx,
lhs_shape.dim_size(lhs_size - 1) == lhs_shape.dim_size(lhs_size - 2),
errors::InvalidArgument("The coefficient matrix must be square in "
"the inner-most two dimensions: ",
lhs_shape.DebugString()));
xla::XlaOp a = ctx->Input(0);
xla::XlaOp b = ctx->Input(1);
std::tie(a, b) = Broadcast(a, lhs_shape, b, rhs_shape, bcast);
auto result = xla::TriangularSolve(
a, b, true,
lower_, false,
adjoint_ ? xla::TriangularSolveOptions::ADJOINT
: xla::TriangularSolveOptions::NO_TRANSPOSE);
ctx->SetOutput(0, result);
}
private:
static std::pair<xla::XlaOp, xla::XlaOp> Broadcast(
xla::XlaOp lhs, const TensorShape& lhs_shape, xla::XlaOp rhs,
const TensorShape& rhs_shape, const MatMulBCast& broadcast_helper);
bool lower_;
bool adjoint_;
};
std::pair<xla::XlaOp, xla::XlaOp>
MatrixTriangularSolveOp::Broadcast(xla::XlaOp lhs, const TensorShape& lhs_shape,
xla::XlaOp rhs, const TensorShape& rhs_shape,
const MatMulBCast& broadcast_helper) {
int64_t m = lhs_shape.dim_size(lhs_shape.dims() - 1);
int64_t n = rhs_shape.dim_size(rhs_shape.dims() - 1);
TensorShape lhs_broadcast_shape(broadcast_helper.output_batch_shape());
lhs_broadcast_shape.AddDim(m);
lhs_broadcast_shape.AddDim(m);
auto lhs_output = BroadcastTo(lhs, lhs_broadcast_shape.dim_sizes());
if (!lhs_output.ok()) {
xla::XlaOp error = lhs.builder()->ReportError(lhs_output.status());
return {error, error};
}
TensorShape rhs_broadcast_shape(broadcast_helper.output_batch_shape());
rhs_broadcast_shape.AddDim(m);
rhs_broadcast_shape.AddDim(n);
auto rhs_output = BroadcastTo(rhs, rhs_broadcast_shape.dim_sizes());
if (!rhs_output.ok()) {
xla::XlaOp error = rhs.builder()->ReportError(rhs_output.status());
return {error, error};
}
return {lhs_output.value(), rhs_output.value()};
}
REGISTER_XLA_OP(Name("MatrixTriangularSolve"), MatrixTriangularSolveOp);
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/broadcast_to_op.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
Node* BroadcastTo(Graph* g, Node* input, Node* shape) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BroadcastTo")
.Input(input)
.Input(shape)
.Attr("Tidx", DT_INT64)
.Finalize(g, &ret));
return ret;
}
Node* MatrixTriangularSolve(Graph* g, Node* in0, Node* in1, bool adjoint) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "MatrixTriangularSolve")
.Input(in0)
.Input(in1)
.Attr("lower", true)
.Attr("adjoint", adjoint)
.Finalize(g, &ret));
return ret;
}
template <typename T>
static Graph* MatrixTriangularSolveWithBroadcast(int64_t b0, int64_t b1,
int64_t m, int64_t n,
bool manual_broadcast,
DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, TensorShape({b0, m, m}));
in0.flat<T>().setRandom();
auto matrix = Eigen::Map<
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>(
in0.flat<T>().data(), in0.dim_size(1), in0.dim_size(2));
matrix.diagonal() =
(matrix.diagonal().cwiseAbs().array() + static_cast<T>(0.5));
Tensor in1(type, TensorShape({b1, m, n}));
in1.flat<T>().setRandom();
Tensor broadcasted_in0_shape(DT_INT64, TensorShape({3}));
Tensor broadcasted_in1_shape(DT_INT64, TensorShape({3}));
Node* in0_node = nullptr;
Node* in1_node = nullptr;
if (manual_broadcast) {
auto vec0 = broadcasted_in0_shape.vec<int64_t>();
auto vec1 = broadcasted_in1_shape.vec<int64_t>();
for (int i = 0; i < 3; ++i) {
vec0(i) = (i == 0 ? std::max(b0, b1) : in0.shape().dim_size(i));
vec1(i) = (i == 0 ? std::max(b0, b1) : in1.shape().dim_size(i));
}
in0_node = BroadcastTo(g, test::graph::Constant(g, in0),
test::graph::Constant(g, broadcasted_in0_shape));
in1_node = BroadcastTo(g, test::graph::Constant(g, in1),
test::graph::Constant(g, broadcasted_in1_shape));
} else {
in0_node = test::graph::Constant(g, in0);
in1_node = test::graph::Constant(g, in1);
}
MatrixTriangularSolve(g, in0_node, in1_node, false);
return g;
}
#define BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, T, TT, D) \
static void \
BM_MatrixTriangularSolve##_##B1##_##B2##_##M##_##N##_##MB##_##TT##_##D( \
::testing::benchmark::State& state) { \
state.SetItemsProcessed(state.iterations() * std::max(B1, B2) * M * M * \
N * 2); \
test::Benchmark( \
#D, MatrixTriangularSolveWithBroadcast<T>(B1, B2, M, N, MB, TT), \
false) \
.Run(state); \
} \
BENCHMARK( \
BM_MatrixTriangularSolve##_##B1##_##B2##_##M##_##N##_##MB##_##TT##_##D);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define BM_MatrixTriangularSolve(B1, B2, M, N, MB) \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, float, DT_FLOAT, cpu); \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, double, DT_DOUBLE, cpu); \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, float, DT_FLOAT, gpu); \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, double, DT_DOUBLE, gpu);
#else
#define BM_MatrixTriangularSolve(B1, B2, M, N, MB) \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, float, DT_FLOAT, cpu); \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, double, DT_DOUBLE, cpu);
#endif
BM_MatrixTriangularSolve(32, 32, 512, 512, true);
BM_MatrixTriangularSolve(32, 32, 512, 512, false);
BM_MatrixTriangularSolve(1, 32, 512, 512, true);
BM_MatrixTriangularSolve(1, 32, 512, 512, false);
BM_MatrixTriangularSolve(32, 1, 512, 512, true);
BM_MatrixTriangularSolve(32, 1, 512, 512, false);
BM_MatrixTriangularSolve(128, 128, 512, 512, true);
BM_MatrixTriangularSolve(128, 128, 512, 512, false);
BM_MatrixTriangularSolve(1, 128, 512, 512, true);
BM_MatrixTriangularSolve(1, 128, 512, 512, false);
BM_MatrixTriangularSolve(128, 1, 512, 512, true);
BM_MatrixTriangularSolve(128, 1, 512, 512, false);
BM_MatrixTriangularSolve(1, 128, 1024, 1024, true);
BM_MatrixTriangularSolve(1, 128, 1024, 1024, false);
BM_MatrixTriangularSolve(128, 1, 1024, 1024, true);
BM_MatrixTriangularSolve(128, 1, 1024, 1024, false);
BM_MatrixTriangularSolve(1, 128, 200, 1, true);
BM_MatrixTriangularSolve(1, 128, 200, 1, false);
BM_MatrixTriangularSolve(128, 1, 200, 1, true);
BM_MatrixTriangularSolve(128, 1, 200, 1, false);
BM_MatrixTriangularSolve(1, 128, 200, 10000, true);
BM_MatrixTriangularSolve(1, 128, 200, 10000, false);
BM_MatrixTriangularSolve(128, 1, 200, 10000, true);
BM_MatrixTriangularSolve(128, 1, 200, 10000, false);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/matrix_triangular_solve_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/linalg/matrix_triangular_solve_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a5dcfca-2d54-4353-bcdd-af9c476f0366 | cpp | tensorflow/tensorflow | rng_converter_utils | tensorflow/compiler/tf2xla/kernels/rng_converter_utils.cc | tensorflow/compiler/tf2xla/kernels/rng_converter_utils_test.cc | #include "tensorflow/compiler/tf2xla/kernels/rng_converter_utils.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/rng_alg.h"
namespace tensorflow {
Algorithm ToTensorflowAlgorithm(xla::RandomAlgorithm alg) {
switch (alg) {
case xla::RandomAlgorithm::RNG_PHILOX:
return RNG_ALG_PHILOX;
case xla::RandomAlgorithm::RNG_THREE_FRY:
return RNG_ALG_THREEFRY;
case xla::RandomAlgorithm::RNG_DEFAULT:
default:
return RNG_ALG_AUTO_SELECT;
}
}
xla::RandomAlgorithm DefaultRngAlgForDeviceType(
absl::string_view device_type_string) {
if (device_type_string == DEVICE_GPU_XLA_JIT ||
device_type_string == DEVICE_CPU_XLA_JIT) {
return xla::RandomAlgorithm::RNG_PHILOX;
} else {
return xla::RandomAlgorithm::RNG_DEFAULT;
}
}
} | #include "tensorflow/compiler/tf2xla/kernels/rng_converter_utils.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/rng_alg.h"
namespace tensorflow {
namespace {
TEST(RngConverterUtilsTest, DefaultRngForCPUEqualsGPU) {
EXPECT_EQ(DefaultRngAlgForDeviceType(DEVICE_CPU_XLA_JIT),
DefaultRngAlgForDeviceType(DEVICE_GPU_XLA_JIT));
}
TEST(RngConverterUtilsTest, UnknownDeviceIsDefault) {
EXPECT_EQ(DefaultRngAlgForDeviceType("UNKNOWN DEVICE"),
xla::RandomAlgorithm::RNG_DEFAULT);
}
TEST(RngConverterUtilsTest, TensorflowAutoSelects) {
EXPECT_EQ(ToTensorflowAlgorithm(xla::RandomAlgorithm::RNG_DEFAULT),
tensorflow::RNG_ALG_AUTO_SELECT);
}
TEST(RngConverterUtilsTest, ToTensorflow) {
EXPECT_EQ(ToTensorflowAlgorithm(xla::RandomAlgorithm::RNG_PHILOX),
tensorflow::RNG_ALG_PHILOX);
EXPECT_EQ(ToTensorflowAlgorithm(xla::RandomAlgorithm::RNG_THREE_FRY),
tensorflow::RNG_ALG_THREEFRY);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/rng_converter_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/rng_converter_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3aee648e-9f7e-4188-b1ab-86279bc91b57 | cpp | tensorflow/tensorflow | register_common_dialects | tensorflow/compiler/mlir/register_common_dialects.cc | tensorflow/compiler/mlir/register_common_dialects_test.cc | #include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
#include "mlir/InitAllDialects.h"
#include "mlir/InitAllExtensions.h"
#include "stablehlo/dialect/Register.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.h"
#include "xla/mlir/framework/ir/xla_framework.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir {
void RegisterCommonToolingDialects(mlir::DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::registerAllDialects(registry);
mlir::registerAllExtensions(registry);
mlir::stablehlo::registerAllDialects(registry);
registry.insert<mlir::TFL::TensorFlowLiteDialect>();
registry.insert<mlir::kernel_gen::tf_framework::TFFrameworkDialect>();
registry.insert<mlir::quant::QuantDialect>();
registry.insert<mlir::quantfork::QuantizationForkDialect>();
registry.insert<mlir::shape::ShapeDialect>();
registry.insert<mlir::tensor::TensorDialect>();
registry.insert<mlir::tosa::TosaDialect>();
registry.insert<mlir::xla_framework::XLAFrameworkDialect,
mlir::TF::TensorFlowDialect, mlir::tf_type::TFTypeDialect>();
}
}; | #include "tensorflow/compiler/mlir/register_common_dialects.h"
#include <gtest/gtest.h>
#include "mlir/IR/DialectRegistry.h"
namespace mlir {
namespace {
TEST(RegisterCommonDialectsTest, DoesntCrash) {
mlir::DialectRegistry registry;
mlir::RegisterCommonToolingDialects(registry);
EXPECT_FALSE(registry.getDialectNames().empty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/register_common_dialects.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/register_common_dialects_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a088a86b-daf2-4447-b1ba-c4c90bd22950 | cpp | tensorflow/tensorflow | mlir_graph_optimization_pass | tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc | tensorflow/compiler/mlir/mlir_graph_optimization_pass_test.cc | #include "tensorflow/compiler/mlir/mlir_graph_optimization_pass.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
auto* mlir_function_pass_fallback_count = monitoring::Counter<1>::New(
"/tensorflow/core/mlir_function_pass_fallback_count",
"Track success/failure of MLIR pass runs when fallback used",
"status");
auto* mlir_graph_optimization_pass_fallback_count = monitoring::Counter<1>::New(
"/tensorflow/core/mlir_graph_optimization_pass_fallback_count",
"Track success/failure of MLIR graph optimization pass runs when fallback "
"used",
"status");
auto* mlir_function_pass_graph_conversion_count = monitoring::Counter<1>::New(
"/tensorflow/core/mlir_function_pass_graph_conversion_count",
"Track success/failure of Graph to MLIR conversions in function "
"optimization pass",
"status");
constexpr char kSuccess[] = "kSuccess";
constexpr char kFailure[] = "kFailure";
static inline absl::string_view StringRefToView(llvm::StringRef ref) {
return {ref.data(), ref.size()};
}
static void DumpModule(mlir::ModuleOp module, std::string file_prefix) {
std::string prefix = GetDumpDirFromEnvVar();
if (prefix.empty()) return;
auto* env = tensorflow::Env::Default();
auto status = env->RecursivelyCreateDir(prefix);
if (!status.ok()) {
LOG(WARNING) << "cannot create directory '" << prefix
<< "': " << status.message();
return;
}
prefix += "/" + file_prefix;
if (!tensorflow::Env::Default()->CreateUniqueFileName(&prefix, ".mlir")) {
LOG(WARNING) << "cannot create unique filename, won't dump MLIR module.";
return;
}
std::unique_ptr<WritableFile> file_writer;
status = env->NewWritableFile(prefix, &file_writer);
if (!status.ok()) {
LOG(WARNING) << "cannot open file '" << prefix << "': " << status.message();
return;
}
std::string txt_module;
{
llvm::raw_string_ostream os(txt_module);
module.print(os);
}
status = file_writer->Append(txt_module);
if (!status.ok()) {
LOG(WARNING) << "error writing to file '" << prefix
<< "': " << status.message();
return;
}
(void)file_writer->Close();
VLOG(1) << "Dumped MLIR module to " << prefix;
}
MlirOptimizationPassRegistry& MlirOptimizationPassRegistry::Global() {
static auto* global = new MlirOptimizationPassRegistry();
return *global;
}
static void RegisterDialects(mlir::DialectRegistry& registry) {
registry.insert<mlir::arith::ArithDialect,
mlir::func::FuncDialect,
mlir::TF::TensorFlowDialect,
mlir::shape::ShapeDialect,
mlir::tf_device::TensorFlowDeviceDialect,
mlir::tf_executor::TensorFlowExecutorDialect>();
mlir::func::registerAllExtensions(registry);
}
Status MlirFunctionOptimizationPass::Run(
const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptimizationPass::FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) {
MlirOptimizationPassState overall_state = MlirOptimizationPassState::Disabled;
std::vector<MlirOptimizationPassState> per_pass_state;
per_pass_state.reserve(registry_->passes().size());
int num_passes_enabled = 0, num_passes_disabled = 0,
num_passes_fallback_enabled = 0;
for (const auto& pass_registration : registry_->passes()) {
MlirOptimizationPassState pass_state = pass_registration.pass->GetPassState(
&device_set, config_proto, **graph, *flib_def);
per_pass_state.push_back(pass_state);
switch (pass_state) {
case MlirOptimizationPassState::FallbackEnabled: {
if (overall_state != MlirOptimizationPassState::Enabled)
overall_state = MlirOptimizationPassState::FallbackEnabled;
++num_passes_fallback_enabled;
break;
}
case MlirOptimizationPassState::Enabled: {
overall_state = MlirOptimizationPassState::Enabled;
++num_passes_enabled;
break;
}
case MlirOptimizationPassState::Disabled: {
++num_passes_disabled;
break;
}
}
}
if (overall_state == MlirOptimizationPassState::Disabled) {
if (VLOG_IS_ON(1)) {
LOG_FIRST_N(INFO, 1)
<< "None of the MLIR Optimization Passes are enabled "
<< "(registered " << registry_->passes().size() << ")";
}
return absl::OkStatus();
}
if (VLOG_IS_ON(1)) {
LOG_FIRST_N(INFO, 1) << "MLIR Graph Optimization Passes."
<< " Enabled: " << num_passes_enabled
<< ", Disabled: " << num_passes_disabled
<< ", FallbackEnabled: " << num_passes_fallback_enabled
<< ", Total: " << registry_->passes().size();
}
GraphDebugInfo debug_info;
mlir::DialectRegistry registry;
RegisterDialects(registry);
mlir::MLIRContext context(registry);
GraphImportConfig import_config;
import_config.graph_as_function = true;
import_config.control_outputs = *control_ret_node_names;
import_config.upgrade_legacy = true;
import_config.enable_shape_inference = false;
import_config.xla_compile_device_type =
function_options.xla_compile_device_type;
import_config.enable_soft_placement = function_options.allow_soft_placement;
static const char* kTfMlirCategory = "TfMlir";
tensorflow::metrics::ScopedCounter<2> timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{kTfMlirCategory, "convert_graph_to_mlir"});
auto module_ref_status = ConvertGraphToMlir(**graph, debug_info, *flib_def,
import_config, &context);
mlir_function_pass_graph_conversion_count
->GetCell(absl::StatusCodeToString(module_ref_status.status().code()))
->IncrementBy(1);
timings.ReportAndStop();
if (!module_ref_status.ok()) {
if (overall_state == MlirOptimizationPassState::Enabled) {
return module_ref_status.status();
}
LOG(WARNING) << "Failed to convert graph to MLIR: "
<< module_ref_status.status()
<< " , continuing without MlirOptimizationPass because "
"fallback enabled.";
return absl::OkStatus();
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
std::move(module_ref_status.value());
AddDevicesToOp(*module_ref, &device_set);
int per_pass_state_index = 0;
bool is_module_updated = false;
for (auto& pass_registration : registry_->passes()) {
llvm::StringRef name = pass_registration.pass->name();
if (DEBUG_DATA_DUMPER()->ShouldDump(function_name, kDebugGroupMain) ||
VLOG_IS_ON(1)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(
function_name, kDebugGroupMain,
llvm::formatv("mlir_{0}_before", name)),
*module_ref, llvm::StringRef(), nullptr);
}
Status pass_status = absl::OkStatus();
auto pass_state = per_pass_state[per_pass_state_index++];
if (pass_state == MlirOptimizationPassState::Enabled) {
VLOG(2) << "Run MLIR graph optimization pass: " << StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
timings.Reset({kTfMlirCategory, name.str()});
pass_status = pass_registration.pass->Run(
function_name, config_proto, *module_ref, **graph, *flib_def);
timings.ReportAndStop();
if (pass_status.ok()) {
VLOG(2) << "Finished MLIR graph optimization pass: "
<< StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
is_module_updated = true;
}
} else if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
VLOG(2) << "Run MLIR graph optimization pass with fallback: "
<< StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
auto module_ref_clone = module_ref->clone();
timings.Reset({kTfMlirCategory, name.str() + "_fallback"});
pass_status = pass_registration.pass->Run(
function_name, config_proto, module_ref_clone, **graph, *flib_def);
timings.ReportAndStop();
if (pass_status.ok()) {
VLOG(2) << "Finished MLIR graph optimization pass with fallback: "
<< StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
module_ref = module_ref_clone;
is_module_updated = true;
} else {
module_ref_clone->destroy();
}
} else {
VLOG(2) << "MLIR graph optimization pass: " << StringRefToView(name)
<< " is disabled and will not be run.";
}
if (!pass_status.ok()) {
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
LOG(WARNING) << StringRefToView(name)
<< " pass failed, continuing without the pass because the "
"pass has fallback enabled";
mlir_function_pass_fallback_count->GetCell(kFailure)->IncrementBy(1);
} else if (pass_state == MlirOptimizationPassState::Enabled) {
return pass_status;
}
} else {
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
mlir_function_pass_fallback_count->GetCell(kSuccess)->IncrementBy(1);
}
}
if (DEBUG_DATA_DUMPER()->ShouldDump(function_name, kDebugGroupMain) ||
VLOG_IS_ON(1)) {
::tensorflow::DumpMlirOpToFile(DEBUG_DATA_DUMPER()->GetDumpFilename(
function_name, kDebugGroupMain,
llvm::formatv("mlir_{0}_after", name)),
*module_ref, llvm::StringRef(), nullptr);
}
}
if (!is_module_updated) {
VLOG(2) << "MLIR module is not updated. Using the original graph. "
<< "Do not convert mlir module back to graph";
return absl::OkStatus();
}
GraphExportConfig export_config;
absl::flat_hash_set<Node*> control_ret_nodes;
timings.Reset({kTfMlirCategory, "convert_mlir_to_graph"});
Status status = tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
*module_ref, export_config, graph, flib_def, &control_ret_nodes);
if (!status.ok()) {
errors::AppendToMessage(&status,
"Error converting MLIR module back to graph");
return status;
}
timings.ReportAndStop();
control_ret_node_names->clear();
control_ret_node_names->reserve(control_ret_nodes.size());
for (const auto* node : control_ret_nodes)
control_ret_node_names->push_back(node->name());
*control_rets_updated = true;
return absl::OkStatus();
}
MlirV1CompatOptimizationPassRegistry&
MlirV1CompatOptimizationPassRegistry::Global() {
static auto* global = new MlirV1CompatOptimizationPassRegistry();
return *global;
}
Status MlirV1CompatGraphOptimizationPass::Run(
const GraphOptimizationPassOptions& options) {
if (options.is_function_graph || !registry_->pass()) return absl::OkStatus();
auto pass = registry_->pass();
auto pass_state =
pass->GetPassState(options.device_set, options.session_options->config,
**options.graph, *options.flib_def);
if (pass_state == MlirOptimizationPassState::Disabled) {
LOG_FIRST_N(INFO, 1) << "MLIR V1 optimization pass is not enabled";
return absl::OkStatus();
}
LOG_FIRST_N(INFO, 1) << "Running MLIR Graph Optimization V1 Compat Pass";
GraphDebugInfo debug_info;
mlir::DialectRegistry registry;
RegisterDialects(registry);
mlir::MLIRContext context(registry);
GraphImportConfig import_config;
import_config.upgrade_legacy = true;
import_config.restrict_functionalization_to_compiled_nodes = true;
auto module_ref_status = ConvertGraphToMlir(
**options.graph, debug_info, *options.flib_def, import_config, &context);
if (!module_ref_status.ok()) {
if (pass_state == MlirOptimizationPassState::Enabled) {
return module_ref_status.status();
}
LOG(WARNING) << "Failed to convert graph to MLIR: "
<< module_ref_status.status()
<< " , continuing without MlirOptimizationPass because "
"fallback enabled.";
return absl::OkStatus();
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
std::move(module_ref_status.value());
AddDevicesToOp(*module_ref, options.device_set);
auto module_ref_clone = module_ref->clone();
llvm::StringRef name = pass->name();
VLOG(2) << "Run MLIR V1 graph optimization pass: " << StringRefToView(name);
if (VLOG_IS_ON(1)) {
DumpModule(*module_ref, llvm::formatv("mlir_{0}_before_", name));
}
Status pass_status = pass->Run(options, *module_ref);
bool is_module_updated = !mlir::OperationEquivalence::isEquivalentTo(
module_ref_clone, *module_ref,
mlir::OperationEquivalence::Flags::IgnoreLocations);
module_ref_clone->destroy();
if (!pass_status.ok()) {
if (pass_state == MlirOptimizationPassState::Enabled) return pass_status;
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
LOG(WARNING) << StringRefToView(name)
<< " pass failed, continuing without the pass because the "
"pass has fallback enabled";
mlir_graph_optimization_pass_fallback_count->GetCell(kFailure)
->IncrementBy(1);
return absl::OkStatus();
}
} else {
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
mlir_graph_optimization_pass_fallback_count->GetCell(kSuccess)
->IncrementBy(1);
}
}
if (VLOG_IS_ON(1)) {
DumpModule(*module_ref, llvm::formatv("mlir_{0}_after_", name));
}
if (!is_module_updated) {
VLOG(2) << "MLIR module is not updated. Using the original graph. "
<< "Do not convert mlir module back to graph";
return absl::OkStatus();
}
GraphExportConfig export_config;
absl::flat_hash_set<Node*> control_ret_nodes;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
*module_ref, export_config, options.graph, options.flib_def,
&control_ret_nodes),
"Error converting MLIR module back to graph");
return absl::OkStatus();
}
} | #include "tensorflow/compiler/mlir/mlir_graph_optimization_pass.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
using ::testing::_;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::Test;
constexpr char kOk[] = "OK";
constexpr char kInvalidArgument[] = "INVALID_ARGUMENT";
constexpr char kSuccess[] = "kSuccess";
constexpr char kFailure[] = "kFailure";
class MockMlirOptimizationPass : public MlirOptimizationPass {
public:
MOCK_METHOD(llvm::StringRef, name, (), (const, override));
MOCK_METHOD(MlirOptimizationPassState, GetPassState,
(const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(const, override));
MOCK_METHOD(Status, Run,
(const std::string& function_name,
const ConfigProto& config_proto, mlir::ModuleOp module,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(override));
};
class MockMlirV1CompatOptimizationPass : public MlirV1CompatOptimizationPass {
public:
MOCK_METHOD(llvm::StringRef, name, (), (const, override));
MOCK_METHOD(MlirOptimizationPassState, GetPassState,
(const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(const, override));
MOCK_METHOD(Status, Run,
(const GraphOptimizationPassOptions& options,
mlir::ModuleOp module),
(override));
};
class ModifyMlirModulePass : public MlirOptimizationPass {
public:
explicit ModifyMlirModulePass(Status run_status) : run_status_(run_status) {}
MOCK_METHOD(llvm::StringRef, name, (), (const, override));
MOCK_METHOD(MlirOptimizationPassState, GetPassState,
(const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(const, override));
Status Run(const std::string& function_name, const ConfigProto& config_proto,
mlir::ModuleOp module, const Graph& graph,
const FunctionLibraryDefinition& function_library) override {
mlir::Builder b(module.getContext());
auto producer = b.getNamedAttr("producer", b.getI32IntegerAttr(0));
auto min_consumer = b.getNamedAttr("min_consumer", b.getI32IntegerAttr(0));
auto bad_consumers =
b.getNamedAttr("bad_consumers", b.getI32ArrayAttr({1, 2, 3, 4}));
module->setAttr("tf.versions",
b.getDictionaryAttr(llvm::ArrayRef<mlir::NamedAttribute>(
{producer, min_consumer, bad_consumers})));
return run_status_;
}
Status run_status_;
};
FunctionDef XTimesTwo() {
const Tensor kTwo = test::AsScalar<int64>(2);
return FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
}
class MlirGraphOptimizationPassTest : public Test {
public:
void Init(Status pass_run_result,
const std::vector<MlirOptimizationPassState>& pass_states) {
graph_ = std::make_unique<Graph>(OpRegistry::Global());
int pass_priority = 0;
for (const MlirOptimizationPassState& pass_state : pass_states) {
auto optimization_pass =
std::make_unique<NiceMock<MockMlirOptimizationPass>>();
ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
.WillByDefault(Return(pass_state));
ON_CALL(*optimization_pass, Run(_, _, _, _, _))
.WillByDefault(Return(pass_run_result));
MlirOptimizationPassRegistry::Global().Add(pass_priority++,
std::move(optimization_pass));
pass_result_expected_[pass_state][pass_run_result.ok()]++;
}
flib_ = std::make_unique<FunctionLibraryDefinition>(graph_->flib_def());
}
void AddModuleModificationPass(MlirOptimizationPassState pass_state,
Status run_status) {
auto optimization_pass =
std::make_unique<NiceMock<ModifyMlirModulePass>>(run_status);
ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
.WillByDefault(Return(pass_state));
MlirOptimizationPassRegistry::Global().Add(10,
std::move(optimization_pass));
pass_result_expected_[pass_state][run_status.ok()]++;
}
void TearDown() override {
MlirOptimizationPassRegistry::Global().ClearPasses();
}
void verifyGraph(const GraphDef& original_graph_def, bool changed = false) {
#if defined(PLATFORM_GOOGLE)
GraphDef resulted_graph_def;
graph_->ToGraphDef(&resulted_graph_def);
if (changed)
EXPECT_THAT(resulted_graph_def,
Not(::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def))));
else
EXPECT_THAT(resulted_graph_def,
::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def)));
#endif
}
void verifyCounters() {
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kSuccess),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[true]);
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kFailure),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[false]);
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kOk), 1);
}
ConfigProto config_proto_;
FunctionOptimizationPass::FunctionOptions function_options_;
MlirFunctionOptimizationPass function_optimization_pass_;
DeviceSet device_set_;
std::unique_ptr<Graph> graph_;
std::unique_ptr<FunctionLibraryDefinition> flib_;
std::vector<std::string> control_ret_node_names_;
bool control_rets_updated_{false};
monitoring::testing::CellReader<int64_t> mlir_function_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_graph_optimization_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_graph_optimization_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_function_pass_graph_conversion_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_graph_conversion_count");
std::map<MlirOptimizationPassState, std::map<bool, int64_t>>
pass_result_expected_;
};
TEST_F(MlirGraphOptimizationPassTest, OptimizationPassFailsNoFallback) {
Init(Status(absl::StatusCode::kAborted, "aborted"),
{MlirOptimizationPassState::Enabled});
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
Status(absl::StatusCode::kAborted, "aborted"));
verifyGraph(original_graph_def);
verifyCounters();
}
TEST_F(MlirGraphOptimizationPassTest, OptimizationPassFailsDisabledFallback) {
Init(Status(absl::StatusCode::kAborted, "aborted"),
{MlirOptimizationPassState::Disabled,
MlirOptimizationPassState::FallbackEnabled});
FunctionDefLibrary flib;
*flib.add_function() = XTimesTwo();
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
graph_ = std::make_unique<Graph>(flib_def);
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
AddModuleModificationPass(MlirOptimizationPassState::FallbackEnabled,
Status(absl::StatusCode::kAborted, "aborted"));
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
absl::OkStatus());
verifyGraph(original_graph_def);
verifyCounters();
}
TEST_F(MlirGraphOptimizationPassTest, OptimizationPassDoesNotFailFallback) {
Init(absl::OkStatus(), {MlirOptimizationPassState::FallbackEnabled});
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
AddModuleModificationPass(MlirOptimizationPassState::FallbackEnabled,
absl::OkStatus());
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
absl::OkStatus());
verifyGraph(original_graph_def, true);
verifyCounters();
}
TEST_F(MlirGraphOptimizationPassTest, GraphDoesntConvertUpdatesCounter) {
Init(absl::OkStatus(), {MlirOptimizationPassState::FallbackEnabled});
graph_ = std::make_unique<Graph>(OpRegistry::Global());
control_ret_node_names_.push_back("foo");
AddModuleModificationPass(MlirOptimizationPassState::FallbackEnabled,
absl::OkStatus());
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
absl::OkStatus());
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kOk), 0);
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kInvalidArgument),
1);
}
TEST(MlirOptimizationPassRegistry, RegisterPassesWithTheSamePriorityFails) {
MlirOptimizationPassRegistry::Global().Add(
0, std::make_unique<NiceMock<MockMlirOptimizationPass>>());
EXPECT_DEATH(MlirOptimizationPassRegistry::Global().Add(
0, std::make_unique<NiceMock<MockMlirOptimizationPass>>()),
"Pass priority must be unique.");
}
TEST(MlirV1CompatOptimizationPassRegistry, RegisterMultiplePassesFails) {
MlirV1CompatOptimizationPassRegistry::Global().Add(
std::make_unique<NiceMock<MockMlirV1CompatOptimizationPass>>());
EXPECT_DEATH(
MlirV1CompatOptimizationPassRegistry::Global().Add(
std::make_unique<NiceMock<MockMlirV1CompatOptimizationPass>>()),
"Only a single pass can be registered");
}
class MlirGraphOptimizationV1PassTest : public Test {
public:
void Init(Status pass_run_result,
const std::vector<MlirOptimizationPassState>& pass_states) {
graph_ = std::make_unique<Graph>(OpRegistry::Global());
MlirV1CompatOptimizationPassRegistry::Global().ClearPass();
for (const MlirOptimizationPassState& pass_state : pass_states) {
auto optimization_pass =
std::make_unique<NiceMock<MockMlirV1CompatOptimizationPass>>();
ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
.WillByDefault(Return(pass_state));
ON_CALL(*optimization_pass, Run(_, _))
.WillByDefault(Return(pass_run_result));
MlirV1CompatOptimizationPassRegistry::Global().Add(
std::move(optimization_pass));
pass_result_expected_[pass_state][pass_run_result.ok()]++;
}
flib_ = std::make_unique<FunctionLibraryDefinition>(graph_->flib_def());
InitGraphOptions();
}
void verifyGraph(const GraphDef& original_graph_def, bool changed = false) {
#if defined(PLATFORM_GOOGLE)
GraphDef resulted_graph_def;
graph_->ToGraphDef(&resulted_graph_def);
if (changed)
EXPECT_THAT(resulted_graph_def,
Not(::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def))));
else
EXPECT_THAT(resulted_graph_def,
::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def)));
#endif
}
void InitGraphOptions() {
session_options_.config = config_proto_;
graph_optimization_pass_options_.device_set = &device_set_;
graph_optimization_pass_options_.session_options = &session_options_;
graph_optimization_pass_options_.graph = &graph_;
graph_optimization_pass_options_.flib_def = flib_.get();
}
void verifyCounters() {
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kSuccess),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[false]);
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kFailure),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[false]);
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kOk), 0);
}
void TearDown() override {
MlirV1CompatOptimizationPassRegistry::Global().ClearPass();
}
ConfigProto config_proto_;
FunctionOptimizationPass::FunctionOptions function_options_;
MlirV1CompatGraphOptimizationPass function_optimization_pass_;
DeviceSet device_set_;
std::unique_ptr<Graph> graph_;
std::unique_ptr<FunctionLibraryDefinition> flib_;
std::vector<std::string> control_ret_node_names_;
bool control_rets_updated_{false};
SessionOptions session_options_;
tensorflow::GraphOptimizationPassOptions graph_optimization_pass_options_;
std::map<MlirOptimizationPassState, std::map<bool, int64_t>>
pass_result_expected_;
monitoring::testing::CellReader<int64_t> mlir_function_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_graph_optimization_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_graph_optimization_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_function_pass_graph_conversion_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_graph_conversion_count");
};
TEST_F(MlirGraphOptimizationV1PassTest, OptimizationPassDoesNotFailFallback) {
Init(absl::OkStatus(), {MlirOptimizationPassState::FallbackEnabled});
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
EXPECT_EQ(function_optimization_pass_.Run(graph_optimization_pass_options_),
absl::OkStatus());
verifyGraph(original_graph_def, false);
verifyCounters();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/mlir_graph_optimization_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8017d096-35ec-4ee1-9fbc-145ef10823f3 | cpp | tensorflow/tensorflow | legalize_tf | tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.cc | tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h"
#include <memory>
#include <string>
#include <string_view>
#include <variant>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/variant.h"
#include "llvm/ADT/ScopeExit.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/reproducer.pb.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tsl/lib/monitoring/sampler.h"
#include "xla/xla.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using tpu::FunctionToHloArgs;
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
auto* phase2_bridge_compilation_time = tsl::monitoring::Sampler<1>::New(
{"/tensorflow/core/tf2xla/api/v2/phase2_compilation_time",
"The wall-clock time spent on executing graphs in milliseconds.",
"configuration"},
{tsl::monitoring::Buckets::Exponential(1, 1.5, 45)});
constexpr char kBridgeComponent[] = "TFXLABridge";
constexpr char kFullBridge[] = "full_bridge";
namespace {
bool ShouldFallbackToGraphCompiler(
const std::variant<MlirToHloArgs, FunctionToHloArgs>& computation) {
if (computation.index() == 1) return true;
return std::get<0>(computation).rollout_state ==
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
}
void DumpComputationInput(
const tpu::TPUCompileMetadataProto& metadata,
const std::vector<tensorflow::TensorShape>& arg_shapes,
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>
computation) {
if (!VLOG_IS_ON(2)) {
return;
}
tensorflow::mlir::tf2xla::internal::LegalizeMlirToHloReproducer reproducer;
*reproducer.mutable_compile_metadata() = metadata;
for (const auto& shape : arg_shapes) {
shape.AsProto(reproducer.add_input_shapes());
}
switch (computation.index()) {
case 0:
reproducer.set_mlir_module(
std::string(std::get<0>(computation).mlir_module));
break;
case 1: {
auto input = std::get<1>(computation);
*reproducer.mutable_function_def_library() = input.flib_def->ToProto();
} break;
default:
VLOG(2) << "LegalizeMlirToHlo computation input: unknown";
break;
}
std::string string_reproducer;
tensorflow::protobuf::TextFormat::PrintToString(reproducer,
&string_reproducer);
DumpRawStringToFile("legalize_tf_reproducer.textproto", string_reproducer);
}
Status DumpHloCompilationResult(std::string_view name,
XlaCompilationResult* compilation_result) {
if (!VLOG_IS_ON(2) &&
!DEBUG_DATA_DUMPER()->ShouldDump(std::string(name), kDebugGroupMain)) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(
auto hlo_module_config,
xla::HloModule::CreateModuleConfigFromProto(
compilation_result->computation->proto(), xla::DebugOptions()));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::HloModule> hlo_module,
xla::HloModule::CreateFromProto(compilation_result->computation->proto(),
hlo_module_config));
std::string all_computations;
for (auto computation : hlo_module->computations()) {
all_computations += computation->ToString() + "\n\n";
}
tensorflow::DumpRawStringToFile(name, all_computations);
return absl::OkStatus();
}
}
absl::StatusOr<tensorflow::XlaCompilationResult> LegalizeMlirToHlo(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
std::vector<std::unique_ptr<::mlir::Pass>>& custom_legalization_passes,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client) {
CompilationTimer timer;
auto record_time = llvm::make_scope_exit([&timer] {
phase2_bridge_compilation_time->GetCell(kFullBridge)
->Add(timer.ElapsedCyclesInMilliseconds());
});
auto compilation_result = std::make_unique<XlaCompilationResult>();
DumpComputationInput(metadata, arg_shapes, computation);
if (ShouldFallbackToGraphCompiler(computation)) {
TF_RETURN_IF_ERROR(tf2xla::v1::CompileTensorflowGraphToHlo(
computation, metadata, use_tuple_args, shape_determination_fns,
arg_shapes, arg_core_mapping, per_core_arg_shapes, client,
compilation_result.get()));
DumpHloCompilationResult("legalize_tf_fallback.hlo",
compilation_result.get())
.IgnoreError();
return *compilation_result;
}
auto combined_bridge_status = internal::LegalizeTfToHlo(
std::get<0>(computation), metadata, use_tuple_args, device_type,
shape_determination_fns, arg_shapes, arg_core_mapping,
per_core_arg_shapes, custom_legalization_passes, client,
compilation_result.get());
if (combined_bridge_status.ok()) {
VLOG(1) << "Successfully compiled MLIR computation to XLA HLO using "
"Combined MLIR and XlaBuilder Bridge.";
DumpHloCompilationResult("legalize_tf_combined_bridge.hlo",
compilation_result.get())
.IgnoreError();
return *compilation_result;
}
return combined_bridge_status.status();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/monitoring/test_utils.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::Not;
using ::testing::TestWithParam;
using tpu::FunctionToHloArgs;
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
using tpu::TPUCompileMetadataProto;
static constexpr char kCompilationTimeStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/phase2_compilation_time";
static constexpr char kFullBridge[] = "full_bridge";
static constexpr char kCompilationStatusStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/phase2_compilation_status";
static const char kMlirWithFallbackModeSuccess[] =
"kMlirWithFallbackModeSuccess";
static const char kMlirWithFallbackModeFailure[] =
"kMlirWithFallbackModeFailure";
static const char kOldBridgeMlirFilteredFailure[] =
"kOldBridgeMlirFilteredFailure";
static const char kOldBridgeWithFallbackModeFailure[] =
"kOldBridgeWithFallbackModeFailure";
static const char kOldBridgeMlirFilteredSuccess[] =
"kOldBridgeMlirFilteredSuccess";
static const char kOldBridgeWithFallbackModeSuccess[] =
"kOldBridgeWithFallbackModeSuccess";
static const char kMlirCombinedMlirSuccess[] = "kMlirCombinedMlirSuccess";
static const char kMlirCombinedMlirFailure[] = "kMlirCombinedMlirFailure";
static const char kMlirCombinedOldSuccess[] = "kMlirCombinedOldSuccess";
static const char kMlirCombinedOldFailure[] = "kMlirCombinedOldFailure";
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
func.return
}
})";
static constexpr char kBadMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%0 = tf.Unknown() -> ()
func.return %0
}
})";
static constexpr char kUnsupportedMlirBridgeModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%cst0 = "tf.Const"(){ value = dense<0> : tensor<3x5xi1>} : () -> tensor<3x5xi1>
%0 = "tf.Where"(%cst0) : (tensor<3x5xi1>) -> tensor<?x2xi64>
func.return
}
})";
absl::StatusOr<XlaCompiler::CompilationResult> CompileMlirModule(
const char* mlir_module_str,
ConfigProto::Experimental::MlirBridgeRollout rollout_state) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.rollout_state = rollout_state;
mlir_to_hlo_args.mlir_module = mlir_module_str;
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform).value();
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
tensorflow::tf2xla::internal::ConfigureMetadata(mlir_module_str, arg_shapes,
metadata_proto)
.IgnoreError();
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
return LegalizeMlirToHlo(mlir_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_TPU_JIT",
custom_legalization_passes,
{}, arg_shapes,
&arg_core_mapping, &per_core_arg_shapes, client);
}
TEST(LegalizeTFTest, RecordsStreamzForSuccessfulLegalizeWithMlirBridge) {
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
EXPECT_EQ(compilation_status.Delta(kMlirWithFallbackModeFailure), 0);
}
TEST(LegalizeTFTest, MatMul) {
static constexpr char kMatMulModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<5x11xf32>) {
%arg0 = "tf.Const"() {value = dense<-3.0> : tensor<5x7xf32>} : () -> tensor<5x7xf32>
%arg1 = "tf.Const"() {value = dense<-3.0> : tensor<11x7xf32>} : () -> tensor<11x7xf32>
%1 = "tf.MatMul"(%arg0, %arg1) {transpose_a = false, transpose_b = true} : (tensor<5x7xf32>, tensor<11x7xf32>) -> tensor<5x11xf32>
func.return %1 : tensor<5x11xf32>
}
})";
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMatMulModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
}
struct MatMulTestCase {
std::string mat_mul_method;
};
using BatchMatMulTest = TestWithParam<MatMulTestCase>;
TEST_P(BatchMatMulTest, BatchMatMul) {
const MatMulTestCase& test_case = GetParam();
static constexpr char kMatMulModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<1x4x4xf32>) {
%%arg0 = "tf.Const"() {value = dense<-3.0> : tensor<1x4x2xf32>} : () -> tensor<1x4x2xf32>
%%arg1 = "tf.Const"() {value = dense<-3.0> : tensor<1x2x4xf32>} : () -> tensor<1x2x4xf32>
%%1 = "tf.%s"(%%arg0, %%arg1) {T = f32, adj_x = false, adj_y = false, grad_x = false, grad_y = false, device = ""} : (tensor<1x4x2xf32>, tensor<1x2x4xf32>) -> tensor<1x4x4xf32>
func.return %%1 : tensor<1x4x4xf32>
}
})";
std::string mat_mul_method =
absl::StrFormat(kMatMulModuleStr, test_case.mat_mul_method);
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
mat_mul_method.c_str(),
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
}
INSTANTIATE_TEST_SUITE_P(
BatchMatMulTest, BatchMatMulTest,
::testing::ValuesIn<MatMulTestCase>({
{"BatchMatMul"},
{"BatchMatMulV2"},
{"BatchMatMulV3"},
}),
[](const ::testing::TestParamInfo<BatchMatMulTest::ParamType>& info) {
return info.param.mat_mul_method;
});
TEST(LegalizeTFTest, DumpsProducedHLO) {
Env* env = Env::Default();
std::string test_dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", test_dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
std::vector<std::string> files;
TF_ASSERT_OK(env->GetChildren(test_dir, &files));
int original_files_size = files.size();
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
TF_ASSERT_OK(env->GetChildren(test_dir, &files));
EXPECT_THAT(files.size(), ::testing::Gt(original_files_size));
setenv("TF_DUMP_GRAPH_PREFIX", test_dir.c_str(), 0);
}
TEST(LegalizeTFTest, RecordsStreamzForFailedLegalizeWithMlirBridge) {
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
auto result = CompileMlirModule(
kBadMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
EXPECT_FALSE(result.ok());
EXPECT_EQ(compilation_status.Delta(kMlirCombinedMlirFailure), 1);
}
TEST(LegalizeTFTest, RecordsStreamzForSuccessWithCombinedBridge) {
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
auto result = CompileMlirModule(
kUnsupportedMlirBridgeModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
EXPECT_TRUE(result.ok());
EXPECT_EQ(compilation_status.Delta(kMlirCombinedMlirSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kMlirCombinedMlirFailure), 0);
EXPECT_EQ(compilation_status.Delta(kMlirCombinedOldSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kMlirCombinedOldFailure), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeMlirFilteredFailure), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeWithFallbackModeFailure), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeMlirFilteredSuccess), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeWithFallbackModeSuccess), 0);
}
TEST(LegalizeTFTest, RecordsStreamzForNoMlirFallback) {
FunctionDef my_func =
tensorflow::FunctionDefHelper::Create("empty", {}, {}, {}, {}, {});
tensorflow::FunctionDefLibrary fdef;
*(fdef.add_function()) = my_func;
tensorflow::FunctionLibraryDefinition flib_def(
tensorflow::OpRegistry::Global(), fdef);
OpInputList guaranteed_constants;
NameAttrList function;
FunctionToHloArgs function_to_hlo_args{&function,
&flib_def,
0,
{&guaranteed_constants}};
se::Platform* cpu_platform =
se::PlatformManager::PlatformWithName("Host").value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(cpu_platform).value();
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
absl::StatusOr<XlaCompiler::CompilationResult> compile_result =
LegalizeMlirToHlo(function_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_CPU_JIT",
custom_legalization_passes,
{}, arg_shapes,
&arg_core_mapping, &per_core_arg_shapes, client);
EXPECT_FALSE(compile_result.ok());
}
TEST(LegalizeTFTest, RecordsCompilationTimeForSuccessfulCompilation) {
CellReader<monitoring::testing::Histogram> compilation_time(
kCompilationTimeStreamzName);
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_ENABLED));
EXPECT_GT(compilation_time.Delta(kFullBridge).num(), 0);
}
TEST(LegalizeTFTest, SuccessfullyCompilesModulesWithReturnValues) {
static constexpr char kHasReturnValuesAndNoMetadataRetvals[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<2xi32>) {
%cst = "tf.Const"() {value = dense<[524170, 523952]> : tensor<2xi32>} : () -> tensor<2xi32>
return %cst : tensor<2xi32>
}
})";
auto compilation_result = CompileMlirModule(
kHasReturnValuesAndNoMetadataRetvals,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
EXPECT_TRUE(compilation_result.ok());
EXPECT_THAT(compilation_result,
ComputationProtoContains("opcode:.*constant"));
}
TEST(LegalizeTFTest, SkipsTensorListSetItemIfDimensionsTooLarge) {
static constexpr char kTensorListSetItemDimensionTooLarge[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<!tf_type.variant<tensor<64x1xbf16>>> {
%elem_shape = "tf.Const"() <{value = dense<-1> : tensor<i32>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<i32>
%num_elements = "tf.Const"() <{value = dense<0> : tensor<i32>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<i32>
%list = "tf.TensorListReserve"(%elem_shape, %num_elements) : (tensor<i32>, tensor<i32>) -> tensor<!tf_type.variant<tensor<64x1xbf16>>>
%index = "tf.Const"() <{value = dense<0> : tensor<i32>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<i32>
%element = "tf.Const"() <{value = dense<0.0> : tensor<64x1xbf16>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<64x1xbf16>
%updated_list = "tf.TensorListSetItem"(%list, %index, %element) : (tensor<!tf_type.variant<tensor<64x1xbf16>>>, tensor<i32>, tensor<64x1xbf16>) -> tensor<!tf_type.variant<tensor<64x1xbf16>>>
return %updated_list : tensor<!tf_type.variant<tensor<64x1xbf16>>>
}
})";
auto compilation_result = CompileMlirModule(
kTensorListSetItemDimensionTooLarge,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
ASSERT_TRUE(compilation_result.ok());
ASSERT_THAT(compilation_result,
Not(ComputationProtoContains("%.*= \"tf.TensorListSetItem")));
ASSERT_THAT(compilation_result,
Not(ComputationProtoContains("%.*=.*DynamicUpdateSlice")));
}
TEST(LegalizeTFTest, LegalizesFunctionWithBoundedDynamicArg) {
static constexpr char kMlirModuleWithBoundedDynamicArgStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [3]>> ) -> (tensor<?xi32, #mhlo.type_extensions<bounds = [3]>>) {
func.return %arg0 : tensor<?xi32, #mhlo.type_extensions<bounds = [3]>>
}
})";
auto compilation_result = CompileMlirModule(
kMlirModuleWithBoundedDynamicArgStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
ASSERT_TRUE(compilation_result.ok());
EXPECT_THAT(compilation_result,
ComputationProtoContains("element_type:.S32\n.*dimensions: 3"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
68532c9a-3dc4-4766-8829-0e780474d83e | cpp | tensorflow/tensorflow | error_collector_inst | tensorflow/compiler/mlir/lite/metrics/error_collector_inst.cc | tensorflow/compiler/mlir/lite/metrics/error_collector_inst_test.cc | #include "tensorflow/compiler/mlir/lite/metrics/error_collector_inst.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_split.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/metrics/error_collector.h"
#include "tensorflow/compiler/mlir/lite/metrics/types_util.h"
namespace mlir {
namespace TFL {
namespace {
inline std::string extract_pass_name(const std::string &signature) {
const std::vector<std::string> &v = absl::StrSplit(signature, "::");
return v.back();
}
inline std::string extract_op_name_from_error_message(
const std::string &error_message) {
int end_pos = error_message.find("' op");
if ((absl::StartsWith(error_message, "'tf.") ||
absl::StartsWith(error_message, "'tfl.")) &&
end_pos != std::string::npos) {
return error_message.substr(1, end_pos - 1);
}
return "";
}
const int kMaxAcceptedNoteSize = 1024;
}
ErrorCollectorInstrumentation::ErrorCollectorInstrumentation(
MLIRContext *context)
: error_collector_(ErrorCollector::GetErrorCollector()) {
handler_ = std::make_unique<ScopedDiagnosticHandler>(
context, [this](Diagnostic &diag) {
if (diag.getSeverity() == DiagnosticSeverity::Error) {
Location loc = diag.getLocation();
std::string error_message = diag.str();
std::string op_name, error_code;
if (loc_to_name_.count(loc)) {
op_name = loc_to_name_[loc];
} else {
op_name = extract_op_name_from_error_message(diag.str());
}
for (const auto ¬e : diag.getNotes()) {
const std::string note_str = note.str();
if (absl::StartsWith(note_str, kErrorCodePrefix)) {
error_code = note_str.substr(sizeof(kErrorCodePrefix) - 1);
}
error_message += "\n";
if (note_str.size() <= kMaxAcceptedNoteSize) {
error_message += note_str;
} else {
error_message += note_str.substr(0, kMaxAcceptedNoteSize);
error_message += "...";
}
}
ErrorCode error_code_enum = ConverterErrorData::UNKNOWN;
bool has_valid_error_code =
ConverterErrorData::ErrorCode_Parse(error_code, &error_code_enum);
if (!op_name.empty() || has_valid_error_code) {
error_collector_->ReportError(NewConverterErrorData(
pass_name_, error_message, error_code_enum, op_name, loc));
} else {
common_error_message_ += diag.str();
common_error_message_ += "\n";
}
}
return failure();
});
}
void ErrorCollectorInstrumentation::runBeforePass(Pass *pass,
Operation *module) {
auto collectOps = [this](Operation *op) {
const auto &op_name = op->getName().getStringRef().str();
if (absl::StartsWith(op_name, "tf.") || absl::StartsWith(op_name, "tfl.")) {
loc_to_name_.emplace(op->getLoc(), op_name);
}
};
for (auto ®ion : module->getRegions()) {
region.walk(collectOps);
}
pass_name_ = extract_pass_name(pass->getName().str());
error_collector_->Clear();
}
void ErrorCollectorInstrumentation::runAfterPass(Pass *pass,
Operation *module) {
loc_to_name_.clear();
pass_name_.clear();
common_error_message_.clear();
error_collector_->Clear();
}
void ErrorCollectorInstrumentation::runAfterPassFailed(Pass *pass,
Operation *module) {
if (error_collector_->CollectedErrors().empty() &&
!common_error_message_.empty()) {
error_collector_->ReportError(NewConverterErrorData(
pass_name_, common_error_message_, ConverterErrorData::UNKNOWN,
"", module->getLoc()));
}
loc_to_name_.clear();
pass_name_.clear();
common_error_message_.clear();
}
}
} | #include "tensorflow/compiler/mlir/lite/metrics/error_collector_inst.h"
#include <cstddef>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/lite/metrics/converter_error_data.pb.h"
#include "tensorflow/compiler/mlir/lite/metrics/error_collector.h"
#include "tensorflow/compiler/mlir/lite/metrics/types_util.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace TFL {
namespace {
using tsl::StatusOr;
class MockSuccessPass
: public PassWrapper<MockSuccessPass, OperationPass<ModuleOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry.insert<TF::TensorFlowDialect>();
}
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MockSuccessPass)
explicit MockSuccessPass() = default;
private:
void runOnOperation() override {
getOperation().walk([](Operation* nestedOp) {
nestedOp->emitError()
<< "Error at " << nestedOp->getName().getStringRef().str() << " op";
});
};
};
class MockFailurePass
: public PassWrapper<MockFailurePass, OperationPass<ModuleOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry.insert<TF::TensorFlowDialect>();
}
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MockFailurePass)
explicit MockFailurePass() = default;
private:
void runOnOperation() override {
getOperation().walk([](Operation* nestedOp) {
if (nestedOp->getName().getStringRef().str().rfind("tf.") != -1) {
AttachErrorCode(
nestedOp->emitError()
<< "Failed at " << nestedOp->getName().getStringRef().str()
<< " op",
tflite::metrics::ConverterErrorData::ERROR_NEEDS_FLEX_OPS);
}
});
signalPassFailure();
};
};
absl::StatusOr<OwningOpRef<mlir::ModuleOp>> LoadModule(
MLIRContext* context, const std::string& file_name) {
std::string error_message;
auto file = openInputFile(file_name, &error_message);
if (!file) {
return tensorflow::errors::InvalidArgument("fail to open input file");
}
llvm::SourceMgr source_mgr;
source_mgr.AddNewSourceBuffer(std::move(file), llvm::SMLoc());
return OwningOpRef<mlir::ModuleOp>(
parseSourceFile<mlir::ModuleOp>(source_mgr, context));
}
TEST(ErrorCollectorTest, TessSuccessPass) {
std::string input_file = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/lite/metrics/testdata/strided_slice.mlir");
MLIRContext context;
context.getOrLoadDialect<mlir::func::FuncDialect>();
context.getOrLoadDialect<TF::TensorFlowDialect>();
context.enableMultithreading();
auto module = LoadModule(&context, input_file);
EXPECT_EQ(module.ok(), true);
PassManager pm(module.value().get()->getName(),
OpPassManager::Nesting::Implicit);
pm.addPass(std::make_unique<MockSuccessPass>());
pm.addInstrumentation(
std::make_unique<ErrorCollectorInstrumentation>(&context));
EXPECT_EQ(succeeded(pm.run(module.value().get())), true);
auto collected_errors =
ErrorCollector::GetErrorCollector()->CollectedErrors();
EXPECT_EQ(collected_errors.size(), 0);
}
TEST(ErrorCollectorTest, TessFailurePass) {
using tflite::metrics::ConverterErrorData;
MLIRContext context;
context.getOrLoadDialect<mlir::func::FuncDialect>();
context.getOrLoadDialect<TF::TensorFlowDialect>();
const std::string input_file =
"tensorflow/compiler/mlir/lite/metrics/testdata/strided_slice.mlir";
auto input_file_id = StringAttr::get(&context, input_file);
context.enableMultithreading();
auto module =
LoadModule(&context, tensorflow::GetDataDependencyFilepath(input_file));
EXPECT_EQ(module.ok(), true);
PassManager pm(module.value().get()->getName(),
OpPassManager::Nesting::Implicit);
pm.addPass(std::make_unique<MockSuccessPass>());
pm.addPass(std::make_unique<MockFailurePass>());
pm.addInstrumentation(
std::make_unique<ErrorCollectorInstrumentation>(&context));
EXPECT_EQ(succeeded(pm.run(module.value().get())), false);
auto collected_errors =
ErrorCollector::GetErrorCollector()->CollectedErrors();
EXPECT_EQ(collected_errors.size(), 3);
EXPECT_EQ(collected_errors.count(NewConverterErrorData(
"MockFailurePass",
"Failed at tf.Const op\nsee current operation: %0 = "
"\"tf.Const\"() <{value = dense<1> : tensor<4xi32>}> : () -> "
"tensor<4xi32>\nError code: ERROR_NEEDS_FLEX_OPS",
ConverterErrorData::ERROR_NEEDS_FLEX_OPS, "tf.Const",
mlir::FileLineColLoc::get(input_file_id, 2, 9))),
1);
EXPECT_EQ(collected_errors.count(NewConverterErrorData(
"MockFailurePass",
"Failed at tf.Const op\nsee current operation: %1 = "
"\"tf.Const\"() <{value = dense<0> : tensor<4xi32>}> : () -> "
"tensor<4xi32>\nError code: ERROR_NEEDS_FLEX_OPS",
ConverterErrorData::ERROR_NEEDS_FLEX_OPS, "tf.Const",
mlir::FileLineColLoc::get(input_file_id, 2, 9))),
1);
EXPECT_EQ(
collected_errors.count(NewConverterErrorData(
"MockFailurePass",
"Failed at tf.StridedSlice op\nsee current operation: %2 = "
"\"tf.StridedSlice\"(%arg0, %1, %1, %0) <{begin_mask = 11 : "
"i64, ellipsis_mask = 0 : i64, end_mask = 11 : i64, new_axis_mask = "
"4 : i64, shrink_axis_mask = 0 : i64}> {device = \"\"} : "
"(tensor<*xf32>, tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) "
"-> tensor<*xf32>\nError code: ERROR_NEEDS_FLEX_OPS",
ConverterErrorData::ERROR_NEEDS_FLEX_OPS, "tf.StridedSlice",
mlir::FileLineColLoc::get(input_file_id, 4, 10))),
1);
std::vector<std::string> locations;
for (const auto& error : collected_errors) {
EXPECT_TRUE(error.has_location());
locations.push_back(error.location().DebugString());
}
EXPECT_THAT(locations, Each(testing::HasSubstr("CALLSITELOC")));
EXPECT_THAT(locations, Each(testing::HasSubstr(input_file)));
EXPECT_THAT(locations, Contains(testing::HasSubstr("line: 2")));
EXPECT_THAT(locations, Contains(testing::HasSubstr("column: 9")));
EXPECT_THAT(locations, Contains(testing::HasSubstr("line: 4")));
EXPECT_THAT(locations, Contains(testing::HasSubstr("column: 10")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/metrics/error_collector_inst.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/metrics/error_collector_inst_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d43e90d3-982d-493d-b392-cc60f915b654 | cpp | tensorflow/tensorflow | sparsify_model | tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc | tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc | #include "tensorflow/compiler/mlir/lite/sparsity/sparsify_model.h"
#include <cstdint>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_import.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/tools/optimize/reduced_precision_metadata.h"
#include "tensorflow/compiler/mlir/lite/transforms/dense_to_sparse_pass.h"
#include "tensorflow/compiler/mlir/lite/transforms/pass_registry_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/framework/types.pb.h"
namespace mlir {
namespace lite {
absl::Status SparsifyModel(const tflite::ModelT& input_model,
flatbuffers::FlatBufferBuilder* builder) {
MLIRContext context;
StatusScopedDiagnosticHandler statusHandler(&context,
true);
flatbuffers::FlatBufferBuilder input_builder;
flatbuffers::Offset<tflite::Model> input_model_location =
tflite::Model::Pack(input_builder, &input_model);
tflite::FinishModelBuffer(input_builder, input_model_location);
std::string serialized_model(
reinterpret_cast<const char*>(input_builder.GetBufferPointer()),
input_builder.GetSize());
OwningOpRef<mlir::ModuleOp> module = tflite::FlatBufferToMlir(
serialized_model, &context, UnknownLoc::get(&context));
if (!module) {
LOG(ERROR) << "Couldn't import flatbuffer to MLIR.";
return absl::InternalError("Couldn't import flatbuffer to MLIR.");
}
PassManager pm((*module)->getName(), OpPassManager::Nesting::Implicit);
pm.addPass(TFL::Create<TFL::DenseToSparsePass>());
if (failed(pm.run(module.get()))) {
LOG(ERROR) << "Failed to sparsify: "
<< statusHandler.ConsumeStatus().message();
return absl::InternalError(absl::StrCat(
"Failed to sparsify: ", statusHandler.ConsumeStatus().message()));
}
std::string result;
tflite::FlatbufferExportOptions options;
options.converter_flags.set_force_select_tf_ops(false);
options.converter_flags.set_enable_select_tf_ops(true);
options.converter_flags.set_allow_custom_ops(true);
for (const auto& metadata : input_model.metadata) {
if (metadata->name != tflite::optimize::kTfLiteReducedPrecisionKey) {
continue;
}
const auto& data = input_model.buffers[metadata->buffer]->data;
options.metadata[metadata->name] = std::string(data.begin(), data.end());
break;
}
if (!tflite::MlirToFlatBufferTranslateFunction(module.get(), options,
&result)) {
LOG(ERROR) << "Failed to export MLIR to flatbuffer.";
return absl::InternalError("Failed to export MLIR to flatbuffer.");
}
builder->PushFlatBuffer(reinterpret_cast<const uint8_t*>(result.data()),
result.size());
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/mlir/lite/sparsity/sparsify_model.h"
#include <stdint.h>
#include <cstdarg>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/core/absl_error_model_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/tools/optimize/reduced_precision_metadata.h"
namespace mlir {
namespace lite {
namespace {
TEST(SparsifyModelTest, MetadataIsAddedToOutputModel) {
std::string expected_key = tflite::optimize::kTfLiteReducedPrecisionKey;
std::string expected_value = "test_data";
auto input_fbm = mlir::TFL::FlatBufferModelAbslError::BuildFromFile(
"tensorflow/compiler/mlir/lite/sparsity/testdata/"
"sparse_tensor.bin");
tflite::ModelT input_model;
input_fbm->GetModel()->UnPackTo(&input_model);
auto model_metadata_buffer = std::make_unique<tflite::BufferT>();
model_metadata_buffer->data =
std::vector<uint8_t>(expected_value.begin(), expected_value.end());
input_model.buffers.push_back(std::move(model_metadata_buffer));
auto metadata_t = std::make_unique<tflite::MetadataT>();
metadata_t->name = tflite::optimize::kTfLiteReducedPrecisionKey;
metadata_t->buffer = input_model.buffers.size() - 1;
input_model.metadata.push_back(std::move(metadata_t));
flatbuffers::FlatBufferBuilder output_builder;
ASSERT_TRUE(SparsifyModel(input_model, &output_builder).ok());
auto output_fbm = mlir::TFL::FlatBufferModelAbslError::BuildFromBuffer(
reinterpret_cast<const char*>(output_builder.GetCurrentBufferPointer()),
output_builder.GetSize());
tflite::ModelT output_model;
output_fbm->GetModel()->UnPackTo(&output_model);
std::map<std::string, std::string> output_metadata;
for (const auto& metadata : output_model.metadata) {
const auto& data = output_model.buffers[metadata->buffer]->data;
output_metadata[metadata->name] = std::string(data.begin(), data.end());
}
EXPECT_THAT(output_metadata,
testing::Contains(testing::Pair(expected_key, expected_value)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fd514cad-fa71-41a4-8900-55dae1563782 | cpp | tensorflow/tensorflow | execution_metadata_exporter | tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.cc | tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter_test.cc | #include "tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.h"
#include <cstdint>
#include <map>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/string.h"
#include "flatbuffers/vector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Region.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/lite/experimental/tac/common/targets.h"
#include "tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.h"
#include "tensorflow/compiler/mlir/lite/experimental/tac/runtime_metadata_generated.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace tflite {
namespace {
bool IsConst(mlir::Operation* op) {
return llvm::isa<mlir::arith::ConstantOp, mlir::TF::ConstOp,
mlir::TFL::ConstOp, mlir::TFL::QConstOp>(op);
}
bool IsOpSupported(mlir::Operation* op, const std::string& hardware) {
auto* devce_hardware = mlir::TFL::tac::GetTargetHardware(hardware);
if (devce_hardware == nullptr) return {};
return devce_hardware->IsOpSupported(op);
}
bool HasValidHardwareTarget(mlir::Operation* op) {
return IsOpSupported(op, "CPU");
}
std::optional<std::string> GetDeviceName(mlir::Operation* op) {
if (IsConst(op)) return std::nullopt;
if (llvm::isa<mlir::func::ReturnOp, mlir::quantfork::StatisticsOp>(op))
return std::nullopt;
if (!HasValidHardwareTarget(op)) return std::nullopt;
auto device = op->getAttrOfType<mlir::StringAttr>(mlir::TFL::tac::kDevice);
if (device == nullptr) return std::nullopt;
llvm::StringRef device_name_str = device.getValue();
return device_name_str.str();
}
std::optional<std::vector<float>> GetPerDeviceCosts(
const std::map<std::string, uint8_t>& hardware_map, mlir::Operation* op) {
auto device_costs_attr =
op->getAttrOfType<mlir::DictionaryAttr>("per_device_costs");
if (device_costs_attr == nullptr) return std::nullopt;
std::vector<float> device_costs(hardware_map.size(), -1.f);
for (const auto& kv : hardware_map) {
auto cost_attr = device_costs_attr.getNamed(kv.first);
if (!cost_attr.has_value()) return std::nullopt;
float cost = mlir::dyn_cast_or_null<mlir::FloatAttr>(cost_attr->getValue())
.getValueAsDouble();
device_costs[kv.second] = cost;
}
return device_costs;
}
flatbuffers::Offset<SubgraphMetadata> CreateSubgraphMetadata(
const std::map<std::string, uint8_t>& hardware_map, mlir::Region* Region,
flatbuffers::FlatBufferBuilder* builder) {
auto& block = Region->front();
int index = 0;
std::vector<flatbuffers::Offset<tflite::OpMetadata>> ops;
for (auto& inst : block) {
if (IsConst(&inst)) continue;
if (llvm::isa<mlir::func::ReturnOp, mlir::quantfork::StatisticsOp>(&inst))
continue;
auto device_name = GetDeviceName(&inst);
if (device_name.has_value()) {
auto per_device_cost = GetPerDeviceCosts(hardware_map, &inst);
flatbuffers::Offset<flatbuffers::Vector<float>> per_device_cost_offset;
if (per_device_cost.has_value()) {
per_device_cost_offset = builder->CreateVector(*per_device_cost);
}
OpMetadataBuilder op_builder(*builder);
op_builder.add_index(index);
uint8_t hardware = hardware_map.at(*device_name);
op_builder.add_hardware(hardware);
if (per_device_cost.has_value()) {
op_builder.add_op_costs(per_device_cost_offset);
}
ops.push_back(op_builder.Finish());
}
index++;
}
return CreateSubgraphMetadata(*builder, builder->CreateVector(ops));
}
flatbuffers::Offset<tflite::HardwareMetadata>
CreateHardwareMetadataAndPopulateLookupTable(
std::vector<mlir::func::FuncOp>* funcs,
flatbuffers::FlatBufferBuilder* builder,
std::map<std::string, uint8_t>* hardware_names) {
uint8_t index = 0;
for (auto& func : *funcs) {
func.walk([&hardware_names, &index](mlir::Operation* op) {
auto device_name = GetDeviceName(op);
if (!device_name.has_value()) return;
auto iter = hardware_names->find(*device_name);
if (iter == hardware_names->end()) {
hardware_names->insert({*device_name, index++});
}
});
}
std::vector<flatbuffers::Offset<flatbuffers::String>> hardwares;
for (const auto& kv : *hardware_names) {
hardwares.push_back(builder->CreateString(kv.first));
}
return CreateHardwareMetadata(*builder, builder->CreateVector(hardwares));
}
}
std::optional<std::string> ExportRuntimeMetadata(mlir::ModuleOp module) {
mlir::func::FuncOp main_fn = module.lookupSymbol<mlir::func::FuncOp>("main");
if (!main_fn) return std::string("");
flatbuffers::FlatBufferBuilder fb_builder;
std::vector<mlir::func::FuncOp> funcs;
funcs.push_back(main_fn);
module.walk([&](mlir::func::FuncOp fn) {
if (fn != main_fn) {
funcs.push_back(fn);
}
});
std::map<std::string, uint8_t> hardware_map;
flatbuffers::Offset<tflite::HardwareMetadata> hardware_metadata_offset =
CreateHardwareMetadataAndPopulateLookupTable(&funcs, &fb_builder,
&hardware_map);
std::vector<flatbuffers::Offset<SubgraphMetadata>> subgraphs_metadata;
subgraphs_metadata.reserve(funcs.size());
for (auto& func : funcs) {
subgraphs_metadata.push_back(
CreateSubgraphMetadata(hardware_map, &func.getBody(), &fb_builder));
}
auto runtime_metadata =
CreateRuntimeMetadata(fb_builder, hardware_metadata_offset,
fb_builder.CreateVector(subgraphs_metadata));
fb_builder.Finish(runtime_metadata);
return std::string(
reinterpret_cast<const char*>(fb_builder.GetBufferPointer()),
fb_builder.GetSize());
}
} | #include "tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/string.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/lite/experimental/tac/runtime_metadata_generated.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
namespace tflite {
std::string CreateRuntimeMetadata() {
flatbuffers::FlatBufferBuilder fb_builder;
std::vector<flatbuffers::Offset<flatbuffers::String>> device_names = {
fb_builder.CreateString("GPU"), fb_builder.CreateString("CPU")};
const auto hardwares =
CreateHardwareMetadata(fb_builder, fb_builder.CreateVector(device_names));
const auto ops = {
CreateOpMetadata(fb_builder, 0, 0,
fb_builder.CreateVector(std::vector<float>({1.0, 5.0}))),
CreateOpMetadata(fb_builder, 1, 0,
fb_builder.CreateVector(std::vector<float>({1.0, 5.0}))),
CreateOpMetadata(fb_builder, 2, 0,
fb_builder.CreateVector(std::vector<float>({1.0, 5.0}))),
CreateOpMetadata(
fb_builder, 3, 1,
fb_builder.CreateVector(std::vector<float>({-1.0, 2.0}))),
};
const auto subgraphs = {CreateSubgraphMetadata(
fb_builder, fb_builder.CreateVector(ops.begin(), ops.size()))};
const auto metadata = CreateRuntimeMetadata(
fb_builder, hardwares,
fb_builder.CreateVector(subgraphs.begin(), subgraphs.size()));
fb_builder.Finish(metadata);
return std::string(
reinterpret_cast<const char*>(fb_builder.GetBufferPointer()),
fb_builder.GetSize());
}
void Verify(const RuntimeMetadata* result, const RuntimeMetadata* expected) {
EXPECT_EQ(result->subgraph_metadata()->size(),
expected->subgraph_metadata()->size());
for (int i = 0; i < result->subgraph_metadata()->size(); ++i) {
auto result_subgraph_metadata =
result->subgraph_metadata()->GetAs<SubgraphMetadata>(i);
auto expected_subgraph_metadata =
expected->subgraph_metadata()->GetAs<SubgraphMetadata>(i);
if (expected_subgraph_metadata->op_metadata() == nullptr &&
result_subgraph_metadata->op_metadata() == nullptr) {
return;
}
ASSERT_EQ(expected_subgraph_metadata->op_metadata()->size(),
result_subgraph_metadata->op_metadata()->size());
for (int j = 0; j < expected_subgraph_metadata->op_metadata()->size();
++j) {
auto result_op_metadata =
result_subgraph_metadata->op_metadata()->GetAs<OpMetadata>(j);
auto expected_op_metadata =
expected_subgraph_metadata->op_metadata()->GetAs<OpMetadata>(j);
EXPECT_EQ(result_op_metadata->index(), expected_op_metadata->index());
EXPECT_EQ(result_op_metadata->hardware(),
expected_op_metadata->hardware());
EXPECT_EQ(result_op_metadata->op_costs()->size(),
expected_op_metadata->op_costs()->size());
for (int i = 0; i < result_op_metadata->op_costs()->size(); ++i) {
EXPECT_FLOAT_EQ(result_op_metadata->op_costs()->Get(i),
expected_op_metadata->op_costs()->Get(i));
}
}
}
}
TEST(ExporterTest, Valid) {
const std::string kMLIR = R"(
func.func @main(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>, %arg3: tensor<1xf32>) -> tensor<2x1xf32> {
%0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6", per_device_costs = {CPU = 5.0 : f32, GPU = 1.0 : f32}, tac.device = "GPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
%1 = "tfl.mul"(%0, %arg2) {fused_activation_function = "RELU6", per_device_costs = {CPU = 5.0 : f32, GPU = 1.0 : f32}, tac.device = "GPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
%2 = "tfl.add"(%arg0, %arg3) {fused_activation_function = "RELU6", per_device_costs = {CPU = 5.0 : f32, GPU = 1.0 : f32}, tac.device = "GPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
%3 = "tfl.pack"(%1, %2) {axis = 0 : i32, per_device_costs = {CPU = 2.0 : f32, GPU = -1.0 : f32}, values_count = 2 : i32, tac.device = "CPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
func.return %3 : tensor<2x1xf32>
})";
const std::string kExpectedFB = CreateRuntimeMetadata();
mlir::DialectRegistry registry;
registry.insert<mlir::TFL::TensorFlowLiteDialect, mlir::arith::ArithDialect,
mlir::func::FuncDialect>();
mlir::MLIRContext context(registry);
auto module = mlir::OwningOpRef<mlir::ModuleOp>(
mlir::parseSourceString<mlir::ModuleOp>(kMLIR, &context));
auto module_op = module.get();
auto serialized_result_fb = ExportRuntimeMetadata(module_op);
const auto* result = GetRuntimeMetadata(serialized_result_fb.value().c_str());
const auto* expected = GetRuntimeMetadata(kExpectedFB.c_str());
ASSERT_TRUE(result != nullptr);
ASSERT_TRUE(result->subgraph_metadata() != nullptr);
ASSERT_TRUE(expected->subgraph_metadata() != nullptr);
Verify(result, expected);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ff0ecb1-9acc-458b-8c6c-6d2736f471f6 | cpp | tensorflow/tensorflow | metadata_util | tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.cc | tensorflow/compiler/mlir/lite/experimental/remat/metadata_util_test.cc | #include "tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.h"
#include <string>
#include <utility>
#include <vector>
namespace {
constexpr int kMod = (1 << 7);
void Serialize(std::string* out, uint32_t value) {
for (; value >= kMod; value /= kMod) {
out->push_back(value % kMod + kMod);
}
out->push_back(value);
}
bool Parse(const char** data, size_t* size, uint32_t* out) {
*out = 0;
uint32_t mul = 1;
for (bool done = false; !done;
mul *= kMod, done = !(**data & kMod), ++*data, --*size) {
if (*size == 0) {
return false;
}
*out += static_cast<unsigned char>(**data) % kMod * mul;
}
return true;
}
void Serialize(std::string* out, int32_t value) {
Serialize(out, static_cast<uint32_t>(
value < 0 ? static_cast<uint32_t>(-(value + 1)) * 2 + 1
: static_cast<uint32_t>(value) * 2));
}
bool Parse(const char** data, size_t* size, int32_t* out) {
uint32_t value = 0;
if (!Parse(data, size, &value)) {
return false;
}
const int32_t magnitude = value / 2;
*out = (value % 2) ? (-magnitude - 1) : magnitude;
return true;
}
template <class First, class Second>
void Serialize(std::string* out, const std::pair<First, Second>& in) {
Serialize(out, in.first);
Serialize(out, in.second);
}
template <class First, class Second>
bool Parse(const char** data, size_t* size, std::pair<First, Second>* out) {
return Parse(data, size, &(out->first)) && Parse(data, size, &(out->second));
}
template <class Value>
void Serialize(std::string* out, const std::vector<Value>& in) {
Serialize(out, static_cast<uint32_t>(in.size()));
for (const auto& val : in) {
Serialize(out, val);
}
}
template <class T>
bool Parse(const char** data, size_t* size, std::vector<T>* out) {
uint32_t num_elems = 0;
if (!Parse(data, size, &num_elems)) {
return false;
}
out->assign(num_elems, T{});
for (auto& elem : *out) {
if (!Parse(data, size, &elem)) {
return false;
}
}
return true;
}
}
namespace tflite {
std::string SerializeModelControlDependencies(
const ModelControlDependencies& in) {
std::string out;
Serialize(&out, kModelControlDependenciesMetadataVersion);
Serialize(&out, in);
return out;
}
bool ParseModelControlDependencies(const char* data, size_t size,
ModelControlDependencies* out) {
out->clear();
uint32_t version = 0;
return Parse(&data, &size, &version) &&
(version == kModelControlDependenciesMetadataVersion) &&
Parse(&data, &size, out) && (size == 0);
}
} | #include "tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.h"
#include <cstdint>
#include <limits>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
class MetadataSerializerTest : public ::testing::Test {
protected:
static constexpr auto kHuge = std::numeric_limits<int32_t>::max();
static constexpr auto kTiny = std::numeric_limits<int32_t>::min();
std::string RoundTrip(const ModelControlDependencies &in) const {
ModelControlDependencies out = {{{-1, -1}}};
const std::string serialized =
tflite::SerializeModelControlDependencies(in);
return tflite::ParseModelControlDependencies(serialized.data(),
serialized.size(), &out)
? (out == in) ? "ok" : "mismatch"
: "malformed";
}
};
TEST_F(MetadataSerializerTest, nothing) { EXPECT_THAT(RoundTrip({}), "ok"); }
TEST_F(MetadataSerializerTest, something) {
EXPECT_THAT(
RoundTrip({{{1, 2}, {2, 3}, {4, 5}},
{},
{{kHuge, kTiny}, {kTiny, kHuge}, {kHuge - 1, kTiny + 1}},
{{1, 0}}}),
"ok");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/experimental/remat/metadata_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.